├── .flake8 ├── .github ├── dependabot.yaml └── workflows │ ├── publish.yaml │ └── test.yaml ├── .gitignore ├── .pre-commit-config.yaml ├── .readthedocs.yaml ├── CONTRIBUTING.md ├── LICENSE ├── MANIFEST.in ├── README.md ├── RELEASE.md ├── docs ├── Makefile ├── make.bat ├── requirements.txt └── source │ ├── _static │ └── .gitkeep │ ├── changelog.md │ ├── conf.py │ ├── index.md │ ├── objects.md │ ├── reflector.md │ ├── spawner.md │ ├── ssl.md │ ├── templates.md │ └── utils.md ├── jupyterhub_config.py ├── kubespawner ├── __init__.py ├── _version.py ├── clients.py ├── objects.py ├── proxy.py ├── reflector.py ├── slugs.py ├── spawner.py ├── templates │ ├── form.html │ └── style.css └── utils.py ├── pyproject.toml └── tests ├── conftest.py ├── jupyterhub_config.py ├── test_clients.py ├── test_objects.py ├── test_profile.py ├── test_slugs.py ├── test_spawner.py └── test_utils.py /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | # Ignore style and complexity 3 | # E: style errors 4 | # W: style warnings 5 | # C: complexity 6 | # F841: local variable assigned but never used 7 | ignore = E, C, W, F841 8 | builtins = c, get_config 9 | exclude = 10 | .cache, 11 | .github, 12 | docs, 13 | __init__.py 14 | -------------------------------------------------------------------------------- /.github/dependabot.yaml: -------------------------------------------------------------------------------- 1 | # dependabot.yaml reference: https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file 2 | # 3 | # Notes: 4 | # - Status and logs from dependabot are provided at 5 | # https://github.com/jupyterhub/kubespawner/network/updates. 6 | # - YAML anchors are not supported here or in GitHub Workflows. 7 | # 8 | version: 2 9 | updates: 10 | # Maintain dependencies in our GitHub Workflows 11 | - package-ecosystem: github-actions 12 | directory: / 13 | labels: [ci] 14 | schedule: 15 | interval: monthly 16 | time: "05:00" 17 | timezone: Etc/UTC 18 | -------------------------------------------------------------------------------- /.github/workflows/publish.yaml: -------------------------------------------------------------------------------- 1 | # This is a GitHub workflow defining a set of jobs with a set of steps. 2 | # ref: https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions 3 | # 4 | name: Publish 5 | 6 | on: 7 | pull_request: 8 | paths-ignore: 9 | - "docs/**" 10 | - "**.md" 11 | - ".github/workflows/*" 12 | - "!.github/workflows/publish.yaml" 13 | push: 14 | paths-ignore: 15 | - "docs/**" 16 | - "**.md" 17 | - ".github/workflows/*" 18 | - "!.github/workflows/publish.yaml" 19 | branches-ignore: 20 | - "dependabot/**" 21 | - "pre-commit-ci-update-config" 22 | tags: 23 | - "**" 24 | workflow_dispatch: 25 | 26 | jobs: 27 | publish-to-pypi: 28 | runs-on: ubuntu-22.04 29 | 30 | steps: 31 | - uses: actions/checkout@v4 32 | - uses: actions/setup-python@v5 33 | with: 34 | python-version: "3.10" 35 | 36 | - name: install build package 37 | run: | 38 | pip install build 39 | pip freeze 40 | 41 | - name: build release 42 | run: | 43 | python -m build --sdist --wheel . 44 | ls -alh dist 45 | 46 | # ref: https://github.com/pypa/gh-action-pypi-publish 47 | - name: publish to pypi 48 | if: startsWith(github.ref, 'refs/tags/') 49 | uses: pypa/gh-action-pypi-publish@release/v1 50 | with: 51 | user: __token__ 52 | password: ${{ secrets.pypi_password }} 53 | -------------------------------------------------------------------------------- /.github/workflows/test.yaml: -------------------------------------------------------------------------------- 1 | # This is a GitHub workflow defining a set of jobs with a set of steps. 2 | # ref: https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions 3 | # 4 | name: Test 5 | 6 | on: 7 | pull_request: 8 | paths-ignore: 9 | - "docs/**" 10 | - "**.md" 11 | - ".github/workflows/*" 12 | - "!.github/workflows/test.yaml" 13 | push: 14 | paths-ignore: 15 | - "docs/**" 16 | - "**.md" 17 | - ".github/workflows/*" 18 | - "!.github/workflows/test.yaml" 19 | branches-ignore: 20 | - "dependabot/**" 21 | - "pre-commit-ci-update-config" 22 | workflow_dispatch: 23 | 24 | jobs: 25 | run-pytest: 26 | runs-on: ubuntu-22.04 27 | timeout-minutes: 10 28 | 29 | strategy: 30 | # Keep running even if one variation of the job fail 31 | fail-fast: false 32 | matrix: 33 | # We run this job multiple times with different parameterization 34 | # specified below, these parameters have no meaning on their own and 35 | # gain meaning on how job steps use them. 36 | # 37 | # k3s-channel: https://update.k3s.io/v1-release/channels 38 | # kubernetes_asyncio: https://github.com/tomplus/kubernetes_asyncio/tags 39 | # 40 | include: 41 | # Tests with oldest supported Python, jupyterhub, k8s, and k8s client 42 | # 43 | # NOTE: If lower bounds are updated, also update our test for the 44 | # lower bounds in pyproject.toml. 45 | # 46 | - python: "3.7" 47 | k3s: v1.24 48 | test_dependencies: >- 49 | jupyterhub==4.0.0 50 | kubernetes_asyncio==24.2.3 51 | 52 | # Test with modern python and k8s versions 53 | - python: "3.11" 54 | k3s: v1.27 55 | - python: "3.12" 56 | k3s: v1.28 57 | 58 | # Test with latest python and JupyterHub in main branch 59 | - python: "3.X" 60 | k3s: latest 61 | test_dependencies: git+https://github.com/jupyterhub/jupyterhub 62 | 63 | steps: 64 | - uses: actions/checkout@v4 65 | - uses: actions/setup-python@v5 66 | with: 67 | python-version: "${{ matrix.python }}" 68 | 69 | - name: Install package and test dependencies 70 | run: | 71 | pip install -e ".[test]" ${{ matrix.test_dependencies }} 72 | pip freeze 73 | 74 | # Starts a k8s cluster with NetworkPolicy enforcement and installs both 75 | # kubectl and helm. We won't need network policy enforcement or helm 76 | # though. 77 | # 78 | # ref: https://github.com/jupyterhub/action-k3s-helm/ 79 | - uses: jupyterhub/action-k3s-helm@v4 80 | with: 81 | k3s-channel: ${{ matrix.k3s }} 82 | metrics-enabled: false 83 | traefik-enabled: false 84 | docker-enabled: false 85 | 86 | - name: Run pytest 87 | run: | 88 | pytest --cov kubespawner 89 | 90 | # ref: https://github.com/jupyterhub/action-k8s-namespace-report 91 | - name: Kubernetes namespace report 92 | uses: jupyterhub/action-k8s-namespace-report@v1 93 | if: always() 94 | 95 | # ref: https://github.com/codecov/codecov-action 96 | - uses: codecov/codecov-action@v4 97 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Manually added parts to .gitignore 2 | # ---------------------------------- 3 | # 4 | # Artifacts from running kubespawner tests with minikube 5 | bin/ 6 | lib64 7 | minikube-linux-amd64 8 | node_modules/ 9 | package.json 10 | package-lock.json 11 | pyvenv.cfg 12 | share/ 13 | pip-selfcheck.json 14 | 15 | # JupyterHub running from kubespawner folder for development purposes 16 | jupyterhub-proxy.pid 17 | jupyterhub.sqlite 18 | jupyterhub_cookie_secret 19 | 20 | # Editors 21 | .vscode 22 | .idea 23 | 24 | 25 | # Python .gitignore from https://github.com/github/gitignore/blob/HEAD/Python.gitignore 26 | # ------------------------------------------------------------------------------------- 27 | # 28 | # Byte-compiled / optimized / DLL files 29 | __pycache__/ 30 | *.py[cod] 31 | *$py.class 32 | 33 | # C extensions 34 | *.so 35 | 36 | # Distribution / packaging 37 | .Python 38 | build/ 39 | develop-eggs/ 40 | dist/ 41 | downloads/ 42 | eggs/ 43 | .eggs/ 44 | lib/ 45 | lib64/ 46 | parts/ 47 | sdist/ 48 | var/ 49 | wheels/ 50 | share/python-wheels/ 51 | *.egg-info/ 52 | .installed.cfg 53 | *.egg 54 | MANIFEST 55 | 56 | # PyInstaller 57 | # Usually these files are written by a python script from a template 58 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 59 | *.manifest 60 | *.spec 61 | 62 | # Installer logs 63 | pip-log.txt 64 | pip-delete-this-directory.txt 65 | 66 | # Unit test / coverage reports 67 | htmlcov/ 68 | .tox/ 69 | .nox/ 70 | .coverage 71 | .coverage.* 72 | .cache 73 | nosetests.xml 74 | coverage.xml 75 | *.cover 76 | *.py,cover 77 | .hypothesis/ 78 | .pytest_cache/ 79 | cover/ 80 | 81 | # Translations 82 | *.mo 83 | *.pot 84 | 85 | # Django stuff: 86 | *.log 87 | local_settings.py 88 | db.sqlite3 89 | db.sqlite3-journal 90 | 91 | # Flask stuff: 92 | instance/ 93 | .webassets-cache 94 | 95 | # Scrapy stuff: 96 | .scrapy 97 | 98 | # Sphinx documentation 99 | docs/_build/ 100 | 101 | # PyBuilder 102 | .pybuilder/ 103 | target/ 104 | 105 | # Jupyter Notebook 106 | .ipynb_checkpoints 107 | 108 | # IPython 109 | profile_default/ 110 | ipython_config.py 111 | 112 | # pyenv 113 | # For a library or package, you might want to ignore these files since the code is 114 | # intended to run in multiple environments; otherwise, check them in: 115 | # .python-version 116 | 117 | # pipenv 118 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 119 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 120 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 121 | # install all needed dependencies. 122 | #Pipfile.lock 123 | 124 | # poetry 125 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 126 | # This is especially recommended for binary packages to ensure reproducibility, and is more 127 | # commonly ignored for libraries. 128 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 129 | #poetry.lock 130 | 131 | # pdm 132 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 133 | #pdm.lock 134 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 135 | # in version control. 136 | # https://pdm.fming.dev/#use-with-ide 137 | .pdm.toml 138 | 139 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 140 | __pypackages__/ 141 | 142 | # Celery stuff 143 | celerybeat-schedule 144 | celerybeat.pid 145 | 146 | # SageMath parsed files 147 | *.sage.py 148 | 149 | # Environments 150 | .env 151 | .venv 152 | env/ 153 | venv/ 154 | ENV/ 155 | env.bak/ 156 | venv.bak/ 157 | 158 | # Spyder project settings 159 | .spyderproject 160 | .spyproject 161 | 162 | # Rope project settings 163 | .ropeproject 164 | 165 | # mkdocs documentation 166 | /site 167 | 168 | # mypy 169 | .mypy_cache/ 170 | .dmypy.json 171 | dmypy.json 172 | 173 | # Pyre type checker 174 | .pyre/ 175 | 176 | # pytype static type analyzer 177 | .pytype/ 178 | 179 | # Cython debug symbols 180 | cython_debug/ 181 | 182 | # PyCharm 183 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 184 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 185 | # and can be added to the global gitignore or merged into this file. For a more nuclear 186 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 187 | #.idea/ 188 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | # pre-commit is a tool to perform a predefined set of tasks manually and/or 2 | # automatically before git commits are made. 3 | # 4 | # Config reference: https://pre-commit.com/#pre-commit-configyaml---top-level 5 | # 6 | # Common tasks 7 | # 8 | # - Run on all files: pre-commit run --all-files 9 | # - Register git hooks: pre-commit install --install-hooks 10 | # 11 | repos: 12 | # Autoformat: Python code, syntax patterns are modernized 13 | - repo: https://github.com/asottile/pyupgrade 14 | rev: v3.20.0 15 | hooks: 16 | - id: pyupgrade 17 | args: 18 | - --py37-plus 19 | 20 | # Autoformat: Python code 21 | - repo: https://github.com/pycqa/isort 22 | rev: 6.0.1 23 | hooks: 24 | - id: isort 25 | 26 | # Autoformat: Python code 27 | - repo: https://github.com/psf/black 28 | rev: 25.1.0 29 | hooks: 30 | - id: black 31 | 32 | # Autoformat: markdown, yaml 33 | - repo: https://github.com/pre-commit/mirrors-prettier 34 | rev: v4.0.0-alpha.8 35 | hooks: 36 | - id: prettier 37 | # Don't run prettier on our jinja2 template files. We run djlint instead 38 | exclude: "kubespawner/templates/.*\\.html" 39 | 40 | # Misc... 41 | - repo: https://github.com/pre-commit/pre-commit-hooks 42 | rev: v5.0.0 43 | # ref: https://github.com/pre-commit/pre-commit-hooks#hooks-available 44 | hooks: 45 | # Autoformat: Makes sure files end in a newline and only a newline. 46 | - id: end-of-file-fixer 47 | 48 | # Autoformat: Sorts entries in requirements.txt. 49 | - id: requirements-txt-fixer 50 | 51 | # Lint: Check for files with names that would conflict on a 52 | # case-insensitive filesystem like MacOS HFS+ or Windows FAT. 53 | - id: check-case-conflict 54 | 55 | # Lint: Checks that non-binary executables have a proper shebang. 56 | - id: check-executables-have-shebangs 57 | 58 | # Lint: Python code 59 | - repo: https://github.com/PyCQA/flake8 60 | rev: "7.2.0" 61 | hooks: 62 | - id: flake8 63 | 64 | # Lint our jinja2 templates 65 | - repo: https://github.com/Riverside-Healthcare/djLint 66 | rev: v1.36.4 67 | hooks: 68 | - id: djlint-jinja 69 | files: "kubespawner/templates/.*\\.html" 70 | types_or: 71 | - html 72 | args: 73 | - --reformat 74 | 75 | # pre-commit.ci config reference: https://pre-commit.ci/#configuration 76 | ci: 77 | autoupdate_schedule: monthly 78 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # Configuration on how ReadTheDocs (RTD) builds our documentation 2 | # ref: https://readthedocs.org/projects/jupyterhub-kubespawner/ 3 | # ref: https://docs.readthedocs.io/en/stable/config-file/v2.html 4 | # 5 | version: 2 6 | 7 | sphinx: 8 | configuration: docs/source/conf.py 9 | 10 | build: 11 | os: ubuntu-22.04 12 | tools: 13 | python: "3.10" 14 | 15 | python: 16 | install: 17 | - requirements: docs/requirements.txt 18 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | :sparkles: Thank you for thinking about contributing to kubespawner! :sparkles: 4 | 5 | Welcome! As a [Jupyter](https://jupyter.org) project, we follow the [Jupyter contributor guide](https://jupyter.readthedocs.io/en/latest/contributor/content-contributor.html). 6 | 7 | ## Types of contribution 8 | 9 | There are many ways to contribute to kubespawner, here are some of them: 10 | 11 | - **Update the documentation.** 12 | If you're reading a page or docstring and it doesn't make sense (or doesn't exist!), please let us know by opening a bug report. 13 | It's even more amazing if you can give us a suggested change. 14 | - **Fix bugs or add requested features.** 15 | Have a look through the [issue tracker](https://github.com/jupyterhub/kubespawner/issues) and see if there are any tagged as ["help wanted"](https://github.com/jupyterhub/kubespawner/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22). 16 | As the label suggests, we'd love your help! 17 | - **Report a bug.** 18 | If kubespawner isn't doing what you thought it would do then open a [bug report](https://github.com/jupyterhub/kubespawner/issues/new). 19 | Please provide details on what you were trying to do, what goal you were trying to achieve and how we can reproduce the problem. 20 | - **Suggest a new feature.** 21 | We know that there are lots of ways to extend kubespawner! 22 | If you're interested in adding a feature then please open a [feature request](https://github.com/jupyterhub/kubespawner/issues/new?template=feature_request.md). 23 | Try to explain what the feature is, what alternatives you have though about, what skills are required to work on this task and how big a task you estimate it to be. 24 | - **Review someone's Pull Request.** 25 | Whenever somebody proposes changes to the kubespawner codebase, the community reviews 26 | the changes, and provides feedback, edits, and suggestions. Check out the 27 | [open pull requests](https://github.com/jupyterhub/kubespawner/pulls?q=is%3Apr+is%3Aopen+sort%3Aupdated-desc) 28 | and provide feedback that helps improve the PR and get it merged. Please keep your 29 | feedback positive and constructive! 30 | - **Tell people about kubespawner.** 31 | Kubespawner is built by and for its community. 32 | If you know anyone who would like to use kubespawner, please tell them about the project! 33 | You could give a talk about it or run a demonstration or make a poster. 34 | The sky is the limit :rocket::star2:. 35 | 36 | ## Setting up for documentation changes 37 | 38 | We use Sphinx to build the kubespawner documentation. You can make changes to 39 | the documentation with any text editor and directly through the GitHub website. 40 | 41 | For small changes (like typos) you do not need to setup anything locally. For 42 | larger changes we recommend you build the documentation locally so you can see 43 | the end product in its full glory. 44 | 45 | To make edits through the GitHub website visit https://github.com/jupyterhub/kubespawner/tree/HEAD/docs/source, open the file you would like to edit and then click "edit". GitHub will 46 | walk you through the process of proposing your change ("making a Pull Request"). 47 | 48 | A brief guide to setting up for local development 49 | 50 | ```sh 51 | pip install -r docs/requirements.txt 52 | 53 | cd docs 54 | make html 55 | ``` 56 | 57 | ## Setting up a local development environment 58 | 59 | To work on kubespawner's code you can run JupyterHub locally on your computer, 60 | using an editable installation of kubespawner, that interacts with pods in a 61 | local kubernetes cluster! 62 | 63 | You need to have a local kubernetes cluster and be able to edit networking 64 | rules on your computer. We will now walk you through the steps to get going: 65 | 66 | 1. Install [minikube](https://kubernetes.io/docs/tasks/tools/install-minikube/) 67 | with a container manager such as 68 | [docker](https://minikube.sigs.k8s.io/docs/drivers/docker/) (or a virtual 69 | machine manager). 70 | 71 | 1. Run `minikube start --driver=docker`. This will start your kubernetes 72 | cluster if it isn't already up. Run `kubectl get node` to make sure it is. 73 | 74 | Note that the `minikube start --driver=docker` command will also setup 75 | `kubectl` on your host machine to interact with the kubernetes cluster along 76 | with a `~/.kube/config` file with credentials for connecting to this 77 | cluster. 78 | 79 | 1. Setup a networking route so that a program on your host can talk to the 80 | pods inside minikube. 81 | 82 | ```bash 83 | # Linux 84 | sudo ip route add $(kubectl get node minikube -o jsonpath="{.spec.podCIDR}") via $(minikube ip) 85 | # later on you can undo this with 86 | sudo ip route del $(kubectl get node minikube -o jsonpath="{.spec.podCIDR}") 87 | 88 | # MACOS 89 | sudo route -n add -net $(kubectl get node minikube -o jsonpath="{.spec.podCIDR}") $(minikube ip) 90 | # later on you can undo this with 91 | sudo route delete -net $(kubectl get node minikube -o jsonpath="{.spec.podCIDR}") 92 | ``` 93 | 94 | ### Troubleshooting 95 | 96 | Got an error like below? 97 | 98 | ``` 99 | RTNETLINK answers: File exists 100 | ``` 101 | 102 | It most likely means you have Docker running on your host using the same 103 | IP range minikube is using. You can fix this by editing your 104 | `/etc/docker/daemon.json` file to add the following: 105 | 106 | ```json 107 | { 108 | "bip": "172.19.1.1/16" 109 | } 110 | ``` 111 | 112 | If some JSON already exists in that file, make sure to just add the 113 | `bip` key rather than replace it all. The final file needs to be valid 114 | JSON. 115 | 116 | Once edited, restart docker with `sudo systemctl restart docker`. It 117 | should come up using a different IP range, and you can run the 118 | `sudo ip route add` command again. Note that restarting docker will 119 | restart all your running containers by default. 120 | 121 | 1. Clone this repository 122 | 123 | ```sh 124 | git clone https://github.com/jupyterhub/kubespawner.git 125 | ``` 126 | 127 | 1. Setup a virtual environment. After cloning the repository, you should set up an 128 | isolated environment to install libraries required for running / developing 129 | kubespawner. 130 | 131 | There are many ways of doing this: conda envs, virtualenv, pipenv, etc. Pick 132 | your favourite. We show you how to use venv: 133 | 134 | ```sh 135 | cd kubespawner 136 | 137 | python3 -m venv . 138 | source bin/activate 139 | ``` 140 | 141 | 1. Install a locally editable version of kubespawner and its dependencies for 142 | running it and testing it. 143 | 144 | ```sh 145 | pip install -e ".[test]" 146 | ``` 147 | 148 | 1. Install the nodejs based [Configurable HTTP Proxy 149 | (CHP)](https://github.com/jupyterhub/configurable-http-proxy), and make it 150 | accessible to JupyterHub. 151 | 152 | ```sh 153 | npm install configurable-http-proxy 154 | export PATH=$(pwd)/node_modules/.bin:$PATH 155 | ``` 156 | 157 | 1. Start JupyterHub 158 | 159 | ```sh 160 | # Run this from the kubespawner repo's root directory where the preconfigured 161 | # jupyterhub_config.py file resides! 162 | jupyterhub 163 | ``` 164 | 165 | 1. Visit [http://localhost:8000/](http://localhost:8000/)! 166 | 167 | You should now have a JupyterHub running directly on your computer outside of 168 | the Kubernetes cluster, using a locally editable kubespawner code base. The 169 | JupyterHub is setup with 170 | [DummyAuthenticator](http://github.com/yuvipanda/jupyterhub-dummy-authenticator), 171 | so any user + password combination will allow you to log in. You can make changes to 172 | kubespawner and restart the jupyterhub, and rapidly iterate :) 173 | 174 | ## Running tests 175 | 176 | To run our automated test-suite you need to have a local development setup. 177 | 178 | Run all tests with: 179 | 180 | ```sh 181 | pytest 182 | ``` 183 | 184 | ### Troubleshooting 185 | 186 | If you a huge amount of errors, make sure your minikube is up and running and see it if helps to clear your .eggs 187 | directory. 188 | 189 | ```sh 190 | rm -rf .eggs 191 | ``` 192 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2017, Project Jupyter Contributors 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | * Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | * Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | * Neither the name of the copyright holder nor the names of its 17 | contributors may be used to endorse or promote products derived from 18 | this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include LICENSE 2 | include README.md 3 | graft kubespawner/templates 4 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # [kubespawner](https://github.com/jupyterhub/kubespawner) (jupyterhub-kubespawner @ PyPI) 2 | 3 | [![Latest PyPI version](https://img.shields.io/pypi/v/jupyterhub-kubespawner?logo=pypi)](https://pypi.python.org/pypi/jupyterhub-kubespawner) 4 | [![Latest conda-forge version](https://img.shields.io/conda/vn/conda-forge/jupyterhub-kubespawner?logo=conda-forge)](https://anaconda.org/conda-forge/jupyterhub-kubespawner) 5 | [![Documentation status](https://img.shields.io/readthedocs/jupyterhub-kubespawner?logo=read-the-docs)](https://jupyterhub-kubespawner.readthedocs.io/en/latest/?badge=latest) 6 | [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/jupyterhub/kubespawner/test.yaml?logo=github&label=tests)](https://github.com/jupyterhub/kubespawner/actions) 7 | [![Code coverage](https://codecov.io/gh/jupyterhub/kubespawner/branch/main/graph/badge.svg)](https://codecov.io/gh/jupyterhub/kubespawner) 8 | 9 | The _kubespawner_ (also known as the JupyterHub Kubernetes Spawner) enables JupyterHub to spawn 10 | single-user notebook servers on a [Kubernetes](https://kubernetes.io/) 11 | cluster. 12 | 13 | See the [KubeSpawner documentation](https://jupyterhub-kubespawner.readthedocs.io) for more 14 | information about features and usage. In particular, here is [a list of all the spawner options](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#module-kubespawner.spawner). 15 | 16 | ## Features 17 | 18 | Kubernetes is an open-source system for automating deployment, scaling, and 19 | management of containerized applications. If you want to run a JupyterHub 20 | setup that needs to scale across multiple nodes (anything with over ~50 21 | simultaneous users), Kubernetes is a wonderful way to do it. Features include: 22 | 23 | - Easily and elasticly run anywhere between 2 and thousands of nodes with the 24 | same set of powerful abstractions. Scale up and down as required by simply 25 | adding or removing nodes. 26 | 27 | - Run JupyterHub itself inside Kubernetes easily. This allows you to manage 28 | many JupyterHub deployments with only Kubernetes, without requiring an extra 29 | layer of Ansible / Puppet / Bash scripts. This also provides easy integrated 30 | monitoring and failover for the hub process itself. 31 | 32 | - Spawn multiple hubs in the same kubernetes cluster, with support for 33 | [namespaces](https://kubernetes.io/docs/admin/namespaces/). You can limit the 34 | amount of resources each namespace can use, effectively limiting the amount 35 | of resources a single JupyterHub (and its users) can use. This allows 36 | organizations to easily maintain multiple JupyterHubs with just one 37 | kubernetes cluster, allowing for easy maintenance & high resource 38 | utilization. 39 | 40 | - Provide guarantees and limits on the amount of resources (CPU / RAM) that 41 | single-user notebooks can use. Kubernetes has comprehensive [resource control](https://kubernetes.io/docs/user-guide/compute-resources/) that can 42 | be used from the spawner. 43 | 44 | - Mount various types of [persistent volumes](https://kubernetes.io/docs/user-guide/persistent-volumes/) 45 | onto the single-user notebook's container. 46 | 47 | - Control various security parameters (such as userid/groupid, SELinux, etc) 48 | via flexible [Pod Security Policies](https://kubernetes.io/docs/user-guide/pod-security-policy/). 49 | 50 | - Run easily in multiple clouds (or on your own machines). Helps avoid vendor 51 | lock-in. You can even spread out your cluster across 52 | [multiple clouds at the same time](https://kubernetes.io/docs/user-guide/federation/). 53 | 54 | In general, Kubernetes provides a ton of well thought out, useful features - 55 | and you can use all of them along with this spawner. 56 | 57 | ## Requirements 58 | 59 | ### JupyterHub 60 | 61 | Requires JupyterHub 4.0+ 62 | 63 | ### Kubernetes 64 | 65 | Everything should work from Kubernetes v1.24+. 66 | 67 | The [Kube DNS addon](https://kubernetes.io/docs/user-guide/connecting-applications/#dns) 68 | is not strictly required - the spawner uses 69 | [environment variable](https://kubernetes.io/docs/user-guide/connecting-applications/#environment-variables) 70 | based discovery instead. Your kubernetes cluster will need to be configured to 71 | support the types of volumes you want to use. 72 | 73 | If you are just getting started and want a kubernetes cluster to play with, 74 | [Google Container Engine](https://cloud.google.com/container-engine/) is 75 | probably the nicest option. For AWS/Azure, 76 | [kops](https://github.com/kubernetes/kops) is probably the way to go. 77 | 78 | ## Getting help 79 | 80 | We encourage you to ask questions on the 81 | [Jupyter mailing list](https://groups.google.com/forum/#!forum/jupyter). 82 | You can also participate in development discussions or get live help on 83 | [Gitter](https://gitter.im/jupyterhub/jupyterhub). 84 | 85 | ## License 86 | 87 | We use a shared copyright model that enables all contributors to maintain the 88 | copyright on their contributions. 89 | 90 | All code is licensed under the terms of the revised BSD license. 91 | 92 | ## Resources 93 | 94 | #### JupyterHub and kubespawner 95 | 96 | - [Reporting Issues](https://github.com/jupyterhub/kubespawner/issues) 97 | - [Documentation for JupyterHub](https://jupyterhub.readthedocs.io) 98 | - [Documentation for JupyterHub's REST API](https://petstore.swagger.io/?url=https://raw.githubusercontent.com/jupyter/jupyterhub/master/docs/rest-api.yml#/default) 99 | 100 | #### Jupyter 101 | 102 | - [Documentation for Project Jupyter](https://jupyter.readthedocs.io/en/latest/index.html) | [PDF](https://media.readthedocs.org/pdf/jupyter/latest/jupyter.pdf) 103 | - [Project Jupyter website](https://jupyter.org) 104 | -------------------------------------------------------------------------------- /RELEASE.md: -------------------------------------------------------------------------------- 1 | # How to make a release 2 | 3 | `jupyterhub-kubespawner` is a package available on [PyPI][] and [conda-forge][]. 4 | These are instructions on how to make a release. 5 | 6 | ## Pre-requisites 7 | 8 | - Push rights to [github.com/jupyterhub/kubespawner][] 9 | - Push rights to [conda-forge/jupyterhub-kubespawner-feedstock][] 10 | 11 | ## Steps to make a release 12 | 13 | 1. Create a PR updating `docs/source/changelog.md` with [github-activity][] and 14 | continue only when its merged. 15 | 16 | 1. Checkout main and make sure it is up to date. 17 | 18 | ```shell 19 | git checkout main 20 | git fetch origin main 21 | git reset --hard origin/main 22 | ``` 23 | 24 | 1. Update the version, make commits, and push a git tag with `tbump`. 25 | 26 | ```shell 27 | pip install tbump 28 | tbump --dry-run ${VERSION} 29 | 30 | # run 31 | tbump ${VERSION} 32 | ``` 33 | 34 | Following this, the [CI system][] will build and publish a release. 35 | 36 | 1. Reset the version back to dev, e.g. `2.0.1.dev0` after releasing `2.0.0`. 37 | 38 | ```shell 39 | tbump --no-tag ${NEXT_VERSION}.dev0 40 | ``` 41 | 42 | 1. Following the release to PyPI, an automated PR should arrive to 43 | [conda-forge/jupyterhub-kubespawner-feedstock][] with instructions. 44 | 45 | [github-activity]: https://github.com/executablebooks/github-activity 46 | [github.com/jupyterhub/kubespawner]: https://github.com/jupyterhub/kubespawner 47 | [pypi]: https://pypi.org/project/jupyterhub-kubespawner/ 48 | [conda-forge]: https://anaconda.org/conda-forge/jupyterhub-kubespawner 49 | [conda-forge/jupyterhub-kubespawner-feedstock]: https://github.com/conda-forge/jupyterhub-kubespawner-feedstock 50 | [ci system]: https://github.com/jupyterhub/kubespawner/actions/workflows/publish.yaml 51 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation generated by sphinx-quickstart 2 | # ---------------------------------------------------------------------------- 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = source 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) 21 | 22 | 23 | # Manually added commands 24 | # ---------------------------------------------------------------------------- 25 | 26 | # For local development: 27 | # - builds and rebuilds html on changes to source 28 | # - starts a livereload enabled webserver and opens up a browser 29 | devenv: 30 | sphinx-autobuild -b html --open-browser "$(SOURCEDIR)" "$(BUILDDIR)/html" $(SPHINXOPTS) 31 | 32 | # For local development and CI: 33 | # - verifies that links are valid 34 | linkcheck: 35 | $(SPHINXBUILD) -b linkcheck "$(SOURCEDIR)" "$(BUILDDIR)/linkcheck" $(SPHINXOPTS) 36 | @echo 37 | @echo "Link check complete; look for any errors in the above output " \ 38 | "or in $(BUILDDIR)/linkcheck/output.txt." 39 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=source 11 | set BUILDDIR=_build 12 | 13 | if "%1" == "" goto help 14 | if "%1" == "devenv" goto devenv 15 | if "%1" == "linkcheck" goto linkcheck 16 | goto default 17 | 18 | 19 | :default 20 | %SPHINXBUILD% >NUL 2>NUL 21 | if errorlevel 9009 ( 22 | echo. 23 | echo.The 'sphinx-build' command was not found. Open and read README.md! 24 | exit /b 1 25 | ) 26 | %SPHINXBUILD% -M %1 "%SOURCEDIR%" "%BUILDDIR%" %SPHINXOPTS% 27 | goto end 28 | 29 | 30 | :help 31 | %SPHINXBUILD% -M help "%SOURCEDIR%" "%BUILDDIR%" %SPHINXOPTS% 32 | goto end 33 | 34 | 35 | :devenv 36 | sphinx-autobuild >NUL 2>NUL 37 | if errorlevel 9009 ( 38 | echo. 39 | echo.The 'sphinx-autobuild' command was not found. Open and read README.md! 40 | exit /b 1 41 | ) 42 | sphinx-autobuild -b html --open-browser "%SOURCEDIR%" "%BUILDDIR%/html" %SPHINXOPTS% 43 | goto end 44 | 45 | 46 | :linkcheck 47 | %SPHINXBUILD% -b linkcheck "%SOURCEDIR%" "%BUILDDIR%/linkcheck" %SPHINXOPTS% 48 | echo. 49 | echo.Link check complete; look for any errors in the above output 50 | echo.or in "%BUILDDIR%/linkcheck/output.txt". 51 | goto end 52 | 53 | 54 | :end 55 | popd 56 | -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | # This requirements.txt file must be installed from the project's root folder: 2 | # 3 | # pip install -r docs/requirements.txt 4 | # 5 | # Install the package to help autodoc-traits inspect and generate documentation. 6 | # 7 | --editable . 8 | 9 | autodoc-traits 10 | myst-parser>=0.17.0 11 | sphinx-book-theme 12 | sphinx-copybutton 13 | sphinxext-rediraffe 14 | -------------------------------------------------------------------------------- /docs/source/_static/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jupyterhub/kubespawner/37e22e8ee2062d79cd0c9dc4a0e855bd0cd22875/docs/source/_static/.gitkeep -------------------------------------------------------------------------------- /docs/source/conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for Sphinx to build our documentation to HTML. 2 | # 3 | # Configuration reference: https://www.sphinx-doc.org/en/master/usage/configuration.html 4 | # 5 | import datetime 6 | 7 | import kubespawner 8 | 9 | # -- Project information ----------------------------------------------------- 10 | # ref: https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information 11 | # 12 | project = "KubeSpawner" 13 | copyright = f"{datetime.date.today().year}, Project Jupyter Contributors" 14 | author = "Project Jupyter Contributors" 15 | version = "%i.%i" % kubespawner.version_info[:2] 16 | release = kubespawner.__version__ 17 | 18 | 19 | # -- General Sphinx configuration -------------------------------------------- 20 | # ref: https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration 21 | # 22 | extensions = [ 23 | 'sphinx.ext.autodoc', 24 | 'sphinx.ext.intersphinx', 25 | 'sphinx.ext.napoleon', 26 | 'sphinxext.rediraffe', 27 | 'autodoc_traits', 28 | 'myst_parser', 29 | ] 30 | 31 | root_doc = "index" 32 | source_suffix = [".md", ".rst"] 33 | 34 | # default_role is set for use with reStructuredText that we still need to use in 35 | # docstrings in the autodoc_traits inspected Python module. It makes single 36 | # backticks around text, like `my_function`, behave as in typical Markdown. 37 | default_role = "literal" 38 | 39 | 40 | # -- MyST configuration ------------------------------------------------------ 41 | # ref: https://myst-parser.readthedocs.io/en/latest/configuration.html 42 | # 43 | myst_enable_extensions = [ 44 | # available extensions: https://myst-parser.readthedocs.io/en/latest/syntax/optional.html 45 | "colon_fence", 46 | ] 47 | 48 | 49 | # -- Options for HTML output ------------------------------------------------- 50 | # ref: https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output 51 | # 52 | html_title = "Kubespawner" 53 | html_theme = "sphinx_book_theme" 54 | html_theme_options = { 55 | "repository_url": "https://github.com/jupyterhub/kubespawner", 56 | "use_issues_button": True, 57 | "use_repository_button": True, 58 | "use_edit_page_button": True, 59 | } 60 | 61 | # Add any paths that contain custom static files (such as style sheets) here, 62 | # relative to this directory. They are copied after the builtin static files, 63 | # so a file named "default.css" will overwrite the builtin "default.css". 64 | html_static_path = ['_static'] 65 | 66 | 67 | # -- Options for intersphinx extension --------------------------------------- 68 | # ref: https://www.sphinx-doc.org/en/master/usage/extensions/intersphinx.html#configuration 69 | # 70 | # The extension makes us able to link like to other projects like below. 71 | # 72 | # rST - :external:py:class:`jupyterhub.spawner.Spawner` 73 | # MyST - {external:py:class}`jupyterhub.spawner.Spawner` 74 | # 75 | # To see what we can link to, do the following where "objects.inv" is appended 76 | # to the sphinx based website: 77 | # 78 | # python -m sphinx.ext.intersphinx https://jupyterhub.readthedocs.io/en/stable/objects.inv 79 | # 80 | intersphinx_mapping = { 81 | "jupyterhub": ("https://jupyterhub.readthedocs.io/en/stable/", None), 82 | } 83 | 84 | # intersphinx_disabled_reftypes set based on recommendation in 85 | # https://docs.readthedocs.io/en/stable/guides/intersphinx.html#using-intersphinx 86 | intersphinx_disabled_reftypes = ["*"] 87 | 88 | 89 | # -- Options for linkcheck builder ------------------------------------------- 90 | # ref: https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-the-linkcheck-builder 91 | # 92 | linkcheck_ignore = [ 93 | r"(.*)github\.com(.*)#", # javascript based anchors 94 | r"(.*)/#%21(.*)/(.*)", # /#!forum/jupyter - encoded anchor edge case 95 | r"https://github.com/[^/]*$", # too many github usernames / searches in changelog 96 | "https://github.com/jupyterhub/kubespawner/pull/", # too many pull requests in changelog 97 | "https://github.com/jupyterhub/kubespawner/compare/", # too many ref comparisons in changelog 98 | ] 99 | linkcheck_anchors_ignore = [ 100 | "/#!", 101 | "/#%21", 102 | ] 103 | 104 | 105 | # -- Options for the rediraffe extension ------------------------------------- 106 | # ref: https://github.com/wpilibsuite/sphinxext-rediraffe#readme 107 | # 108 | # This extensions help us relocated content without breaking links. If a 109 | # document is moved internally, put its path as a dictionary key in the 110 | # redirects dictionary below and its new location in the value. 111 | # 112 | # If the changelog has been moved to live under reference/, then you'd add this 113 | # entry to the rediraffe_redirects dictionary: 114 | # 115 | # "changelog": "reference/changelog", 116 | # 117 | rediraffe_branch = "main" 118 | rediraffe_redirects = {} 119 | -------------------------------------------------------------------------------- /docs/source/index.md: -------------------------------------------------------------------------------- 1 | (front-page)= 2 | 3 | # Kubespawner 4 | 5 | The _kubespawner_ (also known as the JupyterHub Kubernetes Spawner) enables JupyterHub to spawn 6 | single-user notebook servers on a [Kubernetes](https://kubernetes.io/) 7 | cluster. 8 | 9 | ## Features 10 | 11 | Kubernetes is an open-source system for automating deployment, scaling, and 12 | management of containerized applications. If you want to run a JupyterHub 13 | setup that needs to scale across multiple nodes (anything with over ~50 14 | simultaneous users), Kubernetes is a wonderful way to do it. Features include: 15 | 16 | - Easily and elastically run anywhere between 2 and thousands of nodes with the 17 | same set of powerful abstractions. Scale up and down as required by simply 18 | adding or removing nodes. 19 | 20 | - Run JupyterHub itself inside Kubernetes easily. This allows you to manage 21 | many JupyterHub deployments with only Kubernetes, without requiring an extra 22 | layer of Ansible / Puppet / Bash scripts. This also provides easy integrated 23 | monitoring and failover for the hub process itself. 24 | 25 | - Spawn multiple hubs in the same kubernetes cluster, with support for 26 | [namespaces](https://kubernetes.io/docs/tasks/administer-cluster/namespaces/). You can limit the 27 | amount of resources each namespace can use, effectively limiting the amount 28 | of resources a single JupyterHub (and its users) can use. This allows 29 | organizations to easily maintain multiple JupyterHubs with just one 30 | kubernetes cluster, allowing for easy maintenance & high resource 31 | utilization. 32 | 33 | - Provide guarantees and limits on the amount of resources (CPU / RAM) that 34 | single-user notebooks can use. Kubernetes has comprehensive [resource control](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/) that can 35 | be used from the spawner. 36 | 37 | - Mount various types of 38 | [persistent volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) 39 | onto the single-user notebook's container. 40 | 41 | - Control various security parameters (such as userid/groupid, SELinux, etc) 42 | via flexible [Pod Security Policies](https://kubernetes.io/docs/concepts/policy/pod-security-policy/). 43 | 44 | - Run easily in multiple clouds (or on your own machines). Helps avoid vendor 45 | lock-in. You can even spread out your cluster across 46 | [multiple clouds at the same time](https://kubernetes.io/docs/concepts/cluster-administration/federation/). 47 | 48 | - Internal SSL configuration supported 49 | 50 | In general, Kubernetes provides a ton of well thought out, useful features - 51 | and you can use all of them along with this spawner. 52 | 53 | ## Requirements 54 | 55 | ### Kubernetes 56 | 57 | Everything should work from Kubernetes v1.6+. 58 | 59 | The [Kube DNS addon](https://kubernetes.io/docs/concepts/services-networking/connect-applications-service/#dns) 60 | is not strictly required - the spawner uses 61 | [environment variable](https://kubernetes.io/docs/concepts/services-networking/connect-applications-service/#environment-variables) 62 | based discovery instead. Your kubernetes cluster will need to be configured to 63 | support the types of volumes you want to use. 64 | 65 | If you are just getting started and want a kubernetes cluster to play with, 66 | [Google Container Engine](https://cloud.google.com/kubernetes-engine/) is 67 | probably the nicest option. For AWS/Azure, 68 | [kops](https://github.com/kubernetes/kops) is probably the way to go. 69 | 70 | ```{toctree} 71 | :maxdepth: 2 72 | :caption: API Documentation 73 | 74 | spawner 75 | objects 76 | reflector 77 | ssl 78 | utils 79 | ``` 80 | 81 | ```{toctree} 82 | :maxdepth: 2 83 | :caption: Reference 84 | templates 85 | changelog 86 | ``` 87 | -------------------------------------------------------------------------------- /docs/source/objects.md: -------------------------------------------------------------------------------- 1 | # Objects 2 | 3 | ```{eval-rst} 4 | .. automodule:: kubespawner.objects 5 | ``` 6 | 7 | ```{eval-rst} 8 | .. autofunction:: kubespawner.objects.make_pod 9 | ``` 10 | 11 | ```{eval-rst} 12 | .. autofunction:: kubespawner.objects.make_pvc 13 | ``` 14 | -------------------------------------------------------------------------------- /docs/source/reflector.md: -------------------------------------------------------------------------------- 1 | # Reflectors 2 | 3 | ```{eval-rst} 4 | .. automodule:: kubespawner.reflector 5 | ``` 6 | 7 | ```{eval-rst} 8 | .. autoclass:: kubespawner.reflector.NamespacedResourceReflector 9 | ``` 10 | -------------------------------------------------------------------------------- /docs/source/spawner.md: -------------------------------------------------------------------------------- 1 | # KubeSpawner 2 | 3 | ```{eval-rst} 4 | .. automodule:: kubespawner 5 | ``` 6 | 7 | ```{eval-rst} 8 | .. autoconfigurable:: KubeSpawner 9 | ``` 10 | -------------------------------------------------------------------------------- /docs/source/ssl.md: -------------------------------------------------------------------------------- 1 | # Internal SSL 2 | 3 | JupyterHub 1.0 introduces the internal_ssl configuration for encryption and authentication of all internal communication via mutual TLS. 4 | 5 | If enabled, the Kubespawner will mount the internal_ssl certificates as Kubernetes secrets into the jupyter user's pod. 6 | 7 | ## Setup 8 | 9 | To enable, use the following settings: 10 | 11 | ```python 12 | c.JupyterHub.internal_ssl = True 13 | 14 | c.JupyterHub.spawner_class = 'kubespawner.KubeSpawner' 15 | ``` 16 | 17 | Further configuration can be specified with the following (listed with their default values): 18 | 19 | ```python 20 | c.KubeSpawner.secret_name_template = "{pod_name}" 21 | 22 | c.KubeSpawner.secret_mount_path = "/etc/jupyterhub/ssl/" 23 | ``` 24 | 25 | The Kubespawner sets the `JUPYTERHUB_SSL_KEYFILE`, `JUPYTERHUB_SSL_CERTFILE` and `JUPYTERHUB_SSL_CLIENT_CA` environment variables, with the appropriate paths, on the user's notebook server. 26 | -------------------------------------------------------------------------------- /docs/source/templates.md: -------------------------------------------------------------------------------- 1 | (templates)= 2 | 3 | # Templated fields 4 | 5 | Several fields in KubeSpawner can be resolved as string templates, 6 | so each user server can get distinct values from the same configuration. 7 | 8 | String templates use the Python formatting convention of `f"{fieldname}"`, 9 | so for example the default `pod_name_template` of `"jupyter-{user_server}"` will produce: 10 | 11 | | username | server name | pod name | 12 | | ---------------- | ----------- | ---------------------------------------------- | 13 | | `user` | `''` | `jupyter-user` | 14 | | `user` | `server` | `jupyter-user--server` | 15 | | `user@email.com` | `Some Name` | `jupyter-user-email-com--some-name---0c1fe94b` | 16 | 17 | ## templated properties 18 | 19 | Templated fields include: 20 | 21 | - [extra_annotations](#KubeSpawner.extra_annotations) 22 | - [extra_containers](#KubeSpawner.extra_containers) 23 | - [extra_labels](#KubeSpawner.extra_labels) 24 | - [pod_connect_ip](#KubeSpawner.pod_connect_ip) 25 | - [pod_name_template](#KubeSpawner.pod_name_template) 26 | - [pvc_name_template](#KubeSpawner.pvc_name_template) 27 | - [storage_extra_annotations](#KubeSpawner.storage_extra_annotations) 28 | - [storage_extra_labels](#KubeSpawner.storage_extra_labels) 29 | - [storage_selector](#KubeSpawner.storage_selector) 30 | - [user_namespace_annotations](#KubeSpawner.user_namespace_annotations) 31 | - [user_namespace_labels](#KubeSpawner.user_namespace_labels) 32 | - [user_namespace_template](#KubeSpawner.user_namespace_template) 33 | - [volume_mounts](#KubeSpawner.volume_mounts) 34 | - [volumes](#KubeSpawner.volumes) 35 | - [working_dir](#KubeSpawner.working_dir) 36 | 37 | ## fields 38 | 39 | The following fields are available in templates: 40 | 41 | | field | description | 42 | | ------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------ | 43 | | `{username}` | the username passed through the configured slug scheme | 44 | | `{servername}` | the name of the server passed through the configured slug scheme (`''` for the user's default server) | 45 | | `{user_server}` | the username and servername together as a single slug. This should be used most places for a unique string for a given user's server (new in kubespawner 7). | 46 | | `{unescaped_username}` | the actual username without escaping (no guarantees about value, except as enforced by your Authenticator) | 47 | | `{unescaped_servername}` | the actual server name without escaping (no guarantees about value) | 48 | | `{pod_name}` | the resolved pod name, often a good choice if you need a starting point for other resources (new in kubespawner 7) | 49 | | `{pvc_name}` | the resolved PVC name (new in kubespawner 7) | 50 | | `{namespace}` | the kubernetes namespace of the server (new in kubespawner 7) | 51 | | `{hubnamespace}` | the kubernetes namespace of the Hub | 52 | 53 | Because there are two escaping schemes for `username`, `servername`, and `user_server`, you can explicitly select one or the other on a per-template-field basis with the prefix `safe_` or `escaped_`: 54 | 55 | | field | description | 56 | | ----------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------- | 57 | | `{escaped_username}` | the username passed through the old 'escape' slug scheme (new in kubespawner 7) | 58 | | `{escaped_servername}` | the server name passed through the 'escape' slug scheme (new in kubespawner 7) | 59 | | `{escaped_user_server}` | the username and servername together as a single slug, identical to `"{escaped_username}--{escaped_servername}".rstrip("-")` (new in kubespawner 7) | 60 | | `{safe_username}` | the username passed through the 'safe' slug scheme (new in kubespawner 7) | 61 | | `{safe_servername}` | the server name passed through the 'safe' slug scheme (new in kubespawner 7) | 62 | | `{safe_user_server}` | the username and server name together as a 'safe' slug (new in kubespawner 7) | 63 | 64 | These may be useful during a transition upgrading a deployment from an earlier version of kubespawner. 65 | 66 | The value of the unprefixed `username`, etc. is goverend by the [](#KubeSpawner.slug_scheme) configuration, and always matches exactly one of these values. 67 | 68 | ## Template tips 69 | 70 | In general, these guidelines should help you pick fields to use in your template strings: 71 | 72 | - use `{user_server}` when a string should be unique _per server_ (e.g. pod name) 73 | - use `{username}` when it should be unique per user, but shared across named servers (sometimes chosen for PVCs) 74 | - use `{escaped_}` prefix if you need to keep certain values unchanged in a deployment upgrading from kubespawner \< 7 75 | - `{pod_name}` can be re-used anywhere you want to create more resources associated with a given pod, 76 | to avoid repeating yourself 77 | 78 | ## Changing template configuration 79 | 80 | Changing configuration should not generally affect _running_ servers. 81 | However, when changing a property that may need to persist across user server restarts, special consideration may be required. 82 | For example, changing `pvc_name` or `working_dir` could result in disconnecting a user's server from data loaded in previous sessions. 83 | This may be your intention or not! KubeSpawner cannot know. 84 | 85 | `pvc_name` is handled specially, to avoid losing access to data. 86 | If `KubeSpawner.remember_pvc_name` is True, once a server has started, a server's PVC name cannot be changed by configuration. 87 | Any future launch will use the previous `pvc_name`, regardless of change in configuration. 88 | If you _want_ to change the names of mounted PVCs, you can set 89 | 90 | ```python 91 | c.KubeSpawner.remember_pvc_name = False 92 | ``` 93 | 94 | This handling isn't general for PVCs, only specifically the default `pvc_name`. 95 | If you have defined your own volumes, you need to handle changes to these yourself. 96 | 97 | (templates:upgrading-from-less-than-7)= 98 | 99 | ## Upgrading from kubespawner \< 7 100 | 101 | Prior to kubespawner 7, an escaping scheme was used that ensured values were _unique_, 102 | but did not always ensure fields were _valid_. 103 | In particular: 104 | 105 | - start/end rules were not enforced 106 | - length was not enforced 107 | 108 | This meant that e.g. usernames that start with a capital letter or were very long could result in servers failing to start because the escaping scheme produced an invalid label. 109 | To solve this, a new 'safe' scheme has been added in kubespawner 7 for computing template strings, 110 | which aims to guarantee to always produce valid object names and labels. 111 | The new scheme is the default in kubespawner 7. 112 | 113 | You can select the scheme with: 114 | 115 | ```python 116 | c.KubeSpawner.slug_scheme = "escape" # no changes from kubespawner 6 117 | c.KubeSpawner.slug_scheme = "safe" # default for kubespawner 7 118 | ``` 119 | 120 | You can also adjust individual template fields to expand independent of 121 | configured slug scheme. If you for example previously have mounted a folder 122 | named `{username}` for a single NFS volume with all user's home folders, a 123 | change of slug scheme could lead to mounting a different folder. To avoid this, 124 | you can stick with the previous behavior for the volume mount specifically by 125 | referencing `{escaped_username}` instead. 126 | 127 | ```python 128 | c.KubeSpawner.volume_mounts = { 129 | "name": "home", 130 | "mountPath": "/home/jovyan", 131 | "subPath": "{escaped_username}", # matches "{username}" in kubespawner 6 132 | } 133 | ``` 134 | 135 | The new scheme has the following rules: 136 | 137 | - the length of any _single_ template field is limited to 48 characters (the total length of the string is not enforced) 138 | - the result will only contain lowercase ascii letters, numbers, and `-` 139 | - it will always start and end with a letter or number 140 | - if a name is 'safe', it is used unmodified 141 | - if any escaping is required, a truncated safe subset of characters is used, followed by `---{hash}` where `{hash}` is a checksum of the original input string 142 | - `-` shall not occur in sequences of more than one consecutive `-`, except where inserted by the escaping mechanism 143 | - if no safe characters are present, 'x' is used for the 'safe' subset 144 | 145 | Since length requirements are applied on a per-field basis, a new `{user_server}` field is added, 146 | which computes a single valid slug following the above rules which is unique for a given user server. 147 | The general form is: 148 | 149 | ``` 150 | {username}--{servername}---{hash} 151 | ``` 152 | 153 | where 154 | 155 | - `--{servername}` is only present for non-empty server names 156 | - `---{hash}` is only present if escaping is required for _either_ username or servername, and hashes the combination of user and server. 157 | 158 | This `{user_server}` is the recommended value to use in pod names, etc. 159 | In the escape scheme, `{user_server}` is identical to the previous value used in default templates: `{username}--{servername}`, 160 | so it should be safe to upgrade previous templated using `{username}--{servername}` to `{user_server}` or `{escaped_user_server}`. 161 | 162 | In the vast majority of cases (where no escaping is required), the 'safe' scheme produces identical results to the 'escape' scheme. 163 | Probably the most common case where the two differ is in the presence of single `-` characters, which the `escape` scheme escaped to `-2d`, while the 'safe' scheme does not. 164 | 165 | Examples: 166 | 167 | | name | escape scheme | safe scheme | 168 | | ------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------- | -------------------------------------------------- | 169 | | `username` | `username` | `username` | 170 | | `has-hyphen` | `has-2dhyphen` | `has-hyphen` | 171 | | `Capital` | `-43apital` (error) | `capital---1a1cf792` | 172 | | `user@email.com` | `user-40email-2ecom` | `user-email-com---0925f997` | 173 | | `a-very-long-name-that-is-too-long-for-sixty-four-character-labels` | `a-2dvery-2dlong-2dname-2dthat-2dis-2dtoo-2dlong-2dfor-2dsixty-2dfour-2dcharacter-2dlabels` (error) | `a-very-long-name-that-is-too-long-for---29ac5fd2` | 174 | | `ALLCAPS` | `-41-4c-4c-43-41-50-53` (error) | `allcaps---27c6794c` | 175 | 176 | Most changed names won't have a practical effect. 177 | However, to avoid `pvc_name` changing even though KubeSpawner 6 didn't persist it, 178 | on first launch (for each server) after upgrade KubeSpawner checks if: 179 | 180 | 1. `pvc_name_template` produces a different result with `scheme='escape'` 181 | 1. a pvc with the old 'escaped' name exists 182 | 183 | and if such a pvc exists, the older name is used instead of the new one (it is then remembered for subsequent launches, according to `remember_pvc_name`). 184 | This is an attempt to respect the `remember_pvc_name` configuration, even though the old name is not technically recorded. 185 | We can infer the old value, as long as configuration has not changed. 186 | This will only work if upgrading KubeSpawer does not _also_ coincide with a change in the `pvc_name_template` configuration. 187 | -------------------------------------------------------------------------------- /docs/source/utils.md: -------------------------------------------------------------------------------- 1 | # Utilities 2 | 3 | ```{eval-rst} 4 | .. automodule:: kubespawner.utils 5 | ``` 6 | 7 | ```{eval-rst} 8 | .. autofunction:: kubespawner.utils.generate_hashed_slug 9 | ``` 10 | -------------------------------------------------------------------------------- /jupyterhub_config.py: -------------------------------------------------------------------------------- 1 | import os 2 | import socket 3 | 4 | c.JupyterHub.spawner_class = 'kubespawner.KubeSpawner' 5 | 6 | c.JupyterHub.ip = '127.0.0.1' 7 | c.JupyterHub.hub_ip = '127.0.0.1' 8 | 9 | # Don't try to cleanup servers on exit - since in general for k8s, we want 10 | # the hub to be able to restart without losing user containers 11 | c.JupyterHub.cleanup_servers = False 12 | 13 | # A small user image with jupyterlab that is easy to test against, assumed to be 14 | # downloadable in less than 60 seconds. 15 | c.KubeSpawner.image = 'jupyter/base-notebook:latest' 16 | c.KubeSpawner.start_timeout = 60 17 | 18 | if os.environ.get("CI"): 19 | # In the CI system we use k3s which will be accessible on localhost. 20 | c.JupyterHub.hub_connect_ip = "127.0.0.1" 21 | else: 22 | # Find the IP of the machine that minikube is most likely able to talk to 23 | # Graciously used from https://stackoverflow.com/a/166589 24 | s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) 25 | s.connect(("8.8.8.8", 80)) 26 | host_ip = s.getsockname()[0] 27 | s.close() 28 | 29 | c.JupyterHub.hub_connect_ip = host_ip 30 | 31 | # Simplify testing by using a dummy authenticator class where any username 32 | # password combination will work and where we don't provide persistent storage. 33 | c.JupyterHub.authenticator_class = 'dummy' 34 | c.KubeSpawner.storage_pvc_ensure = False 35 | 36 | c.JupyterHub.allow_named_servers = True 37 | 38 | c.KubeSpawner.profile_list = [ 39 | { 40 | 'display_name': 'Demo - profile_list entry 1', 41 | 'description': 'Demo description for profile_list entry 1, and it should look good even though it is a bit lengthy.', 42 | 'slug': 'demo-1', 43 | 'default': True, 44 | 'profile_options': { 45 | 'image': { 46 | 'display_name': 'Image', 47 | 'choices': { 48 | 'base': { 49 | 'display_name': 'jupyter/base-notebook:latest', 50 | 'kubespawner_override': { 51 | 'image': 'jupyter/base-notebook:latest' 52 | }, 53 | }, 54 | 'minimal': { 55 | 'display_name': 'jupyter/minimal-notebook:latest', 56 | 'default': True, 57 | 'kubespawner_override': { 58 | 'image': 'jupyter/minimal-notebook:latest' 59 | }, 60 | }, 61 | }, 62 | 'unlisted_choice': { 63 | 'enabled': True, 64 | 'display_name': 'Other image', 65 | 'validation_regex': '^jupyter/.+:.+$', 66 | 'validation_message': 'Must be an image matching ^jupyter/:$', 67 | 'kubespawner_override': {'image': '{value}'}, 68 | }, 69 | }, 70 | }, 71 | 'kubespawner_override': { 72 | 'default_url': '/lab', 73 | }, 74 | }, 75 | { 76 | 'display_name': 'Demo - profile_list entry 2', 77 | 'slug': 'demo-2', 78 | 'kubespawner_override': { 79 | 'extra_resource_guarantees': {"nvidia.com/gpu": "1"}, 80 | }, 81 | }, 82 | ] 83 | -------------------------------------------------------------------------------- /kubespawner/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | JupyterHub Spawner to spawn user notebooks on a Kubernetes cluster. 3 | 4 | After installation, you can enable it by adding:: 5 | 6 | c.JupyterHub.spawner_class = 'kubespawner.KubeSpawner' 7 | 8 | in your `jupyterhub_config.py` file. 9 | """ 10 | 11 | # We export KubeSpawner specifically here. This simplifies import for users. 12 | # Users can simply import kubespawner.KubeSpawner in their applications 13 | # instead of the more verbose import kubespawner.spawner.KubeSpawner. 14 | from ._version import __version__, version_info 15 | from .spawner import KubeSpawner 16 | -------------------------------------------------------------------------------- /kubespawner/_version.py: -------------------------------------------------------------------------------- 1 | # __version__ should be updated using tbump, based on configuration in 2 | # pyproject.toml, according to instructions in RELEASE.md. 3 | # 4 | __version__ = "7.0.0" 5 | 6 | # version_info looks like (1, 2, 3, "dev") if __version__ is 1.2.3.dev 7 | version_info = tuple(int(p) if p.isdigit() else p for p in __version__.split(".")) 8 | -------------------------------------------------------------------------------- /kubespawner/clients.py: -------------------------------------------------------------------------------- 1 | """Configures and instantiates REST API clients of various kinds to 2 | communicate with a Kubernetes api-server, but only one instance per kind is 3 | instantiated. 4 | 5 | The instances of these REST API clients are also patched to avoid the creation 6 | of unused threads. 7 | """ 8 | 9 | import asyncio 10 | from concurrent.futures import ThreadPoolExecutor 11 | from functools import lru_cache 12 | from unittest.mock import Mock 13 | 14 | import kubernetes_asyncio.client 15 | from kubernetes_asyncio.client import Configuration, api_client 16 | 17 | # FIXME: Remove this workaround when instantiating a k8s client doesn't 18 | # automatically create a ThreadPool with 1 thread that we won't use 19 | # anyhow. To know if that has happened, reading 20 | # https://github.com/jupyterhub/kubespawner/issues/567 may be helpful. 21 | # 22 | # The workaround is to monkeypatch ThreadPool in the kubernetes 23 | # api_client to avoid ThreadPools. This is known to work with both 24 | # `kubernetes` and `kubernetes_asyncio`. 25 | # 26 | _dummy_pool = Mock() 27 | api_client.ThreadPool = lambda *args, **kwargs: _dummy_pool 28 | 29 | _client_cache = {} 30 | 31 | 32 | def shared_client(ClientType, *args, **kwargs): 33 | """Return a shared kubernetes client instance 34 | based on the provided arguments. 35 | 36 | Cache is one client per running loop per combination of input args. 37 | 38 | Client will be closed when the loop closes. 39 | """ 40 | kwarg_key = tuple((key, kwargs[key]) for key in sorted(kwargs)) 41 | cache_key = (asyncio.get_running_loop(), ClientType, args, kwarg_key) 42 | client = _client_cache.get(cache_key, None) 43 | 44 | if client is None: 45 | # Kubernetes client configuration is handled globally and should already 46 | # be configured from spawner.py or proxy.py via the load_config function 47 | # prior to a shared_client being instantiated. 48 | Client = getattr(kubernetes_asyncio.client, ClientType) 49 | client = Client(*args, **kwargs) 50 | 51 | _client_cache[cache_key] = client 52 | 53 | # create a task that will close the client when it is cancelled 54 | # relies on JupyterHub's task cleanup at shutdown 55 | async def close_client_task(): 56 | try: 57 | async with client.api_client: 58 | while True: 59 | await asyncio.sleep(300) 60 | except asyncio.CancelledError: 61 | pass 62 | finally: 63 | _client_cache.pop(cache_key, None) 64 | 65 | asyncio.create_task(close_client_task()) 66 | 67 | return client 68 | 69 | 70 | @lru_cache() 71 | def load_config(host=None, ssl_ca_cert=None, verify_ssl=None): 72 | """ 73 | Loads global configuration for the Python client we use to communicate with 74 | a Kubernetes API server, and optionally tweaks that configuration based on 75 | specific settings on the passed caller object. 76 | 77 | This needs to be called before creating a kubernetes client, so practically 78 | before the shared_client function is called. 79 | """ 80 | try: 81 | kubernetes_asyncio.config.load_incluster_config() 82 | except kubernetes_asyncio.config.ConfigException: 83 | # avoid making this async just for load-config 84 | # run async load_kube_config in a background thread, 85 | # blocking this thread until it's done 86 | with ThreadPoolExecutor(1) as pool: 87 | load_sync = lambda: asyncio.run( 88 | kubernetes_asyncio.config.load_kube_config() 89 | ) 90 | future = pool.submit(load_sync) 91 | # blocking wait for load to complete 92 | future.result() 93 | 94 | if ssl_ca_cert: 95 | global_conf = Configuration.get_default_copy() 96 | global_conf.ssl_ca_cert = ssl_ca_cert 97 | Configuration.set_default(global_conf) 98 | if host: 99 | global_conf = Configuration.get_default_copy() 100 | global_conf.host = host 101 | Configuration.set_default(global_conf) 102 | if verify_ssl is not None: 103 | global_conf = Configuration.get_default_copy() 104 | global_conf.verify_ssl = verify_ssl 105 | Configuration.set_default(global_conf) 106 | -------------------------------------------------------------------------------- /kubespawner/proxy.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import json 3 | import os 4 | import string 5 | 6 | import escapism 7 | from jupyterhub.proxy import Proxy 8 | from jupyterhub.utils import exponential_backoff 9 | from kubernetes_asyncio import client 10 | from traitlets import Bool, Dict, List, Unicode 11 | 12 | from .clients import load_config, shared_client 13 | from .objects import make_ingress 14 | from .reflector import ResourceReflector 15 | from .slugs import escape_slug 16 | from .utils import generate_hashed_slug 17 | 18 | 19 | class IngressReflector(ResourceReflector): 20 | kind = 'ingresses' 21 | api_group_name = 'NetworkingV1Api' 22 | 23 | @property 24 | def ingresses(self): 25 | return self.resources 26 | 27 | 28 | class ServiceReflector(ResourceReflector): 29 | kind = 'services' 30 | 31 | @property 32 | def services(self): 33 | return self.resources 34 | 35 | 36 | class EndpointsReflector(ResourceReflector): 37 | kind = 'endpoints' 38 | 39 | @property 40 | def endpoints(self): 41 | return self.resources 42 | 43 | 44 | class KubeIngressProxy(Proxy): 45 | """ 46 | DISCLAIMER: 47 | 48 | This JupyterHub Proxy class is not maintained thoroughly with tests and 49 | documentation, or actively used in any official distribution of 50 | JupyterHub. 51 | 52 | KubeIngressProxy was originally developed by @yuvipanda in preparation 53 | to support a JupyterHub that could end up with ~20k simultaneous users. 54 | The single ConfigurableHTTPProxy server that is controlled by the 55 | default JupyterHub Proxy class could bottleneck for such heavy load, 56 | even though it has been found to be good enough for ~2k simultaneous 57 | users. By instead using this JupyterHub Proxy class that would create 58 | k8s resources (Ingress, Service, Endpoint) that in turn controlled how a 59 | flexible set of proxy servers would proxy traffic, the bottleneck could 60 | be removed. 61 | 62 | KubeIngressProxy's efficiency relates greatly to the performance of the 63 | k8s api-server and the k8s controller that routes traffic based on 64 | changes to Ingress resources registered by the k8s api-server. This 65 | means users of KubeIngressProxy may have greatly varying experiences of 66 | using it depending on the performance of their k8s cluster setup. 67 | 68 | Use of KubeIngressProxy as a JupyterHub Proxy class, is entirely 69 | independent of use of KubeSpawner as a JupyterHub Spawner class. For 70 | reasons related to sharing code with KubeSpawner in reflectors.py, it 71 | has been made and remained part of the jupyterhub/kubespawner project. 72 | 73 | Related issues: 74 | - Need for tests: https://github.com/jupyterhub/kubespawner/issues/496 75 | - Need for docs: https://github.com/jupyterhub/kubespawner/issues/163 76 | 77 | --- 78 | 79 | KubeIngressProxy is an implementation of the JupyterHub Proxy class that 80 | JupyterHub can be configured to rely on: 81 | 82 | c.JupyterHub.proxy_class = "kubespawner.proxy:KubeIngressProxy" 83 | 84 | Like all JupyterHub Proxy implementations, KubeIngressProxy will know 85 | how to respond to hub requests like `get_all_routes`, `add_route`, and 86 | `delete_route` in a way that will ensure traffic will get routed to the user 87 | pods or JupyterHub registered external services. For reference, the official 88 | documentation on writing a custom Proxy class like this is documented here: 89 | https://jupyterhub.readthedocs.io/en/stable/reference/proxy.html. 90 | 91 | KubeIngressProxy communicates with the k8s api-server in order to 92 | create/delete Ingress resources according to the hub's `add_route`/`delete_route` 93 | requests. It doesn't route traffic by itself, but instead relies on the k8s cluster's 94 | ability to route traffic according to these Ingress resources. 95 | 96 | Because KubeIngressProxy interacts with a k8s api-server that manages 97 | Ingress resources, it must have permissions to do so as well. These 98 | permissions should be granted to a k8s service account for where the 99 | JupyterHub is running, as that is also where the KubeIngressProxy class 100 | instance will run its code. 101 | 102 | FIXME: Verify what k8s RBAC permissions are required for KubeIngressProxy 103 | to function. 104 | 105 | - The IngressReflector, ServiceReflector, and EndpointsReflector 106 | require permission to read/list/watch those resources. 107 | - `add_route` and `delete_route` requires permission to 108 | create/update/patch/delete Ingress, Service, and Endpoints 109 | resources. 110 | 111 | These permissions are needed on a k8s Role resource bound to the k8s 112 | ServiceAccount (via a k8s RoleBinding) used on the k8s Pod where 113 | JupyterHub runs: 114 | 115 | ```yaml 116 | kind: Role 117 | apiVersion: rbac.authorization.k8s.io/v1 118 | metadata: 119 | name: kube-ingress-proxy 120 | rules: 121 | - apiGroups: [""] 122 | resources: ["endpoints", "services"] 123 | verbs: ["get", "watch", "list", "create", "update", "patch", "delete"] 124 | - apiGroups: ["networking.k8s.io"] 125 | resources: ["ingresses"] 126 | verbs: ["get", "watch", "list", "create", "update", "patch", "delete"] 127 | ``` 128 | """ 129 | 130 | # JupyterHub should not try to start or stop this proxy 131 | should_start = False 132 | 133 | namespace = Unicode( 134 | config=True, 135 | help=""" 136 | Kubernetes namespace to spawn ingresses for single-user servers in. 137 | 138 | If running inside a kubernetes cluster with service accounts enabled, 139 | defaults to the current namespace. If not, defaults to 'default' 140 | """, 141 | ) 142 | 143 | def _namespace_default(self): 144 | """ 145 | Set namespace default to current namespace if running in a k8s cluster 146 | 147 | If not in a k8s cluster with service accounts enabled, default to 148 | 'default' 149 | """ 150 | ns_path = '/var/run/secrets/kubernetes.io/serviceaccount/namespace' 151 | if os.path.exists(ns_path): 152 | with open(ns_path) as f: 153 | return f.read().strip() 154 | return 'default' 155 | 156 | component_label = Unicode( 157 | 'singleuser-server', 158 | config=True, 159 | help=""" 160 | The value of the labels app.kubernetes.io/component and component, used 161 | to identify user pods kubespawner is to manage. 162 | 163 | This can be used to override the spawner behavior when dealing with 164 | multiple hub instances in the same namespace. Usually helpful for CI 165 | workflows. 166 | """, 167 | ) 168 | 169 | common_labels = Dict( 170 | { 171 | 'app.kubernetes.io/name': 'jupyterhub', 172 | 'app.kubernetes.io/managed-by': 'kubespawner', 173 | # app and heritage are older variants of the modern 174 | # app.kubernetes.io labels 175 | 'app': 'jupyterhub', 176 | 'heritage': 'jupyterhub', 177 | }, 178 | config=True, 179 | help=""" 180 | Extra kubernetes labels to set on all created objects. 181 | 182 | The keys and values must both be strings that match the kubernetes 183 | label key / value constraints. 184 | 185 | See `the Kubernetes documentation `__ 186 | for more info on what labels are and why you might want to use them! 187 | 188 | `{username}`, `{servername}`, `{servicename}`, `{routespec}`, `{hubnamespace}`, 189 | `{unescaped_username}`, `{unescaped_servername}`, `{unescaped_servicename}` and `{unescaped_routespec}` will be expanded if 190 | found within strings of this configuration. 191 | 192 | Names have to be are escaped to follow the `DNS label standard 193 | `__. 194 | """, 195 | ) 196 | 197 | ingress_extra_labels = Dict( 198 | config=True, 199 | help=""" 200 | Extra kubernetes labels to set to Ingress objects. 201 | 202 | The keys and values must both be strings that match the kubernetes 203 | label key / value constraints. 204 | 205 | See `the Kubernetes documentation `__ 206 | for more info on what labels are and why you might want to use them! 207 | 208 | `{username}`, `{servername}`, `{servicename}`, `{routespec}`, `{hubnamespace}`, 209 | `{unescaped_username}`, `{unescaped_servername}`, `{unescaped_servicename}` and `{unescaped_routespec}` will be expanded if 210 | found within strings of this configuration. 211 | 212 | Names have to be are escaped to follow the `DNS label standard 213 | `__. 214 | """, 215 | ) 216 | 217 | ingress_extra_annotations = Dict( 218 | config=True, 219 | help=""" 220 | Extra kubernetes annotations to set on the Ingress object. 221 | 222 | The keys and values must both be strings. 223 | 224 | See `the Kubernetes documentation `__ 225 | for more info on what labels are and why you might want to use them! 226 | 227 | `{username}`, `{servername}`, `{servicename}`, `{routespec}`, `{hubnamespace}`, 228 | `{unescaped_username}`, `{unescaped_servername}`, `{unescaped_servicename}` and `{unescaped_routespec}` will be expanded if 229 | found within strings of this configuration. 230 | 231 | Names have to be are escaped to follow the `DNS label standard 232 | `__. 233 | """, 234 | ) 235 | 236 | ingress_class_name = Unicode( 237 | config=True, 238 | help=""" 239 | Default value for 'ingressClassName' field in Ingress specification 240 | """, 241 | ) 242 | 243 | ingress_specifications = List( 244 | trait=Dict, 245 | config=True, 246 | help=""" 247 | Specifications for ingress routes. List of dicts of the following format: 248 | 249 | [{'host': 'host.example.domain'}] 250 | [{'host': 'host.example.domain', 'tlsSecret': 'tlsSecretName'}] 251 | [{'host': 'jh.{hubnamespace}.domain', 'tlsSecret': 'tlsSecretName'}] 252 | 253 | Wildcard might not work, refer to your ingress controller documentation. 254 | 255 | `{routespec}`, `{hubnamespace}`, and `{unescaped_routespec}` will be expanded if 256 | found within strings of this configuration. 257 | 258 | Names have to be are escaped to follow the `DNS label standard 259 | `__. 260 | """, 261 | ) 262 | 263 | reuse_existing_services = Bool( 264 | False, 265 | config=True, 266 | help=""" 267 | If `True`, proxy will try to reuse existing services created by `KubeSpawner.services_enabled=True` 268 | or `KubeSpawner.internal_ssl=True`. 269 | If `False` (default), KubeSpawner creates service with type `ExternalName`, pointing to the pod's service. 270 | 271 | Sometimes `ExternalName` could lead to issues with accessing pod, like `500 Redirect loop detected`, 272 | so setting this option to `True` could solve this issue. 273 | 274 | By default, KubeSpawner does not create service for a pod at all (`service_enabled=False`, `internal_ssl=False`). 275 | In such a case KubeSpawner creates service with type `ClusterIP`, pointing to the pod IP and port. 276 | 277 | If KubeSpawner creates pod in a different namespace, this option is ignored, 278 | because Ingress(namespace=hub) cannot point to Service(namespace=user). 279 | """, 280 | ) 281 | 282 | k8s_api_ssl_ca_cert = Unicode( 283 | "", 284 | config=True, 285 | help=""" 286 | Location (absolute filepath) for CA certs of the k8s API server. 287 | 288 | Typically this is unnecessary, CA certs are picked up by 289 | config.load_incluster_config() or config.load_kube_config. 290 | 291 | In rare non-standard cases, such as using custom intermediate CA 292 | for your cluster, you may need to mount root CA's elsewhere in 293 | your Pod/Container and point this variable to that filepath 294 | """, 295 | ) 296 | 297 | k8s_api_host = Unicode( 298 | "", 299 | config=True, 300 | help=""" 301 | Full host name of the k8s API server ("https://hostname:port"). 302 | 303 | Typically this is unnecessary, the hostname is picked up by 304 | config.load_incluster_config() or config.load_kube_config. 305 | """, 306 | ) 307 | 308 | k8s_api_verify_ssl = Bool( 309 | None, 310 | allow_none=True, 311 | config=True, 312 | help=""" 313 | Verify TLS certificates when connecting to the k8s master. 314 | 315 | Set this to false to skip verifying SSL certificate when calling API 316 | from https server. 317 | """, 318 | ) 319 | 320 | def __init__(self, *args, **kwargs): 321 | super().__init__(*args, **kwargs) 322 | load_config( 323 | host=self.k8s_api_host, 324 | ssl_ca_cert=self.k8s_api_ssl_ca_cert, 325 | verify_ssl=self.k8s_api_verify_ssl, 326 | ) 327 | self.core_api = shared_client('CoreV1Api') 328 | self.networking_api = shared_client('NetworkingV1Api') 329 | 330 | labels = { 331 | # NOTE: We monitor resources with the old component label instead of 332 | # the modern app.kubernetes.io/component label. A change here 333 | # is only non-breaking if we can assume the running resources 334 | # monitored can be detected by either old or new labels. 335 | # 336 | # The modern labels were added to resources created by 337 | # KubeSpawner 6.3 first adopted in z2jh 4.0. 338 | # 339 | # Related to https://github.com/jupyterhub/kubespawner/issues/834 340 | # 341 | 'component': self.component_label, 342 | 'hub.jupyter.org/proxy-route': 'true', 343 | } 344 | self.ingress_reflector = IngressReflector( 345 | parent=self, namespace=self.namespace, labels=labels 346 | ) 347 | self.service_reflector = ServiceReflector( 348 | parent=self, namespace=self.namespace, labels=labels 349 | ) 350 | self.endpoint_reflector = EndpointsReflector( 351 | parent=self, namespace=self.namespace, labels=labels 352 | ) 353 | 354 | # schedule our reflectors to start in the event loop, 355 | # reflectors first load can be awaited with: 356 | # 357 | # await some_reflector._first_load_future 358 | # 359 | asyncio.ensure_future(self.ingress_reflector.start()) 360 | asyncio.ensure_future(self.service_reflector.start()) 361 | asyncio.ensure_future(self.endpoint_reflector.start()) 362 | 363 | def _safe_name_for_routespec(self, routespec): 364 | # FIXME: escape_slug isn't exactly as whats done here, because we aren't 365 | # calling .lower(), it may have been fine to just transition to 366 | # escape_slug though, but its wasn't obvious a safe change so it 367 | # wasn't done. 368 | safe_chars = set(string.ascii_lowercase + string.digits) 369 | safe_name = generate_hashed_slug( 370 | 'jupyter-' 371 | + escapism.escape(routespec, safe=safe_chars, escape_char='-') 372 | + '-route' 373 | ) 374 | return safe_name 375 | 376 | def _expand_user_properties(self, template, routespec, data): 377 | raw_servername = data.get('server_name') or '' 378 | safe_servername = escape_slug(raw_servername) 379 | 380 | hub_namespace = self._namespace_default() 381 | if hub_namespace == "default": 382 | hub_namespace = "user" 383 | 384 | raw_username = data.get('user') or '' 385 | safe_username = escape_slug(raw_username) 386 | 387 | raw_servicename = data.get('services') or '' 388 | safe_servicename = escape_slug(raw_servicename) 389 | 390 | raw_routespec = routespec 391 | safe_routespec = self._safe_name_for_routespec(routespec) 392 | 393 | rendered = template.format( 394 | username=safe_username, 395 | unescaped_username=raw_username, 396 | servername=safe_servername, 397 | unescaped_servername=raw_servername, 398 | servicename=safe_servicename, 399 | unescaped_servicename=raw_servicename, 400 | routespec=safe_routespec, 401 | unescaped_routespec=raw_routespec, 402 | hubnamespace=hub_namespace, 403 | ) 404 | # strip trailing - delimiter in case of empty servername. 405 | # k8s object names cannot have trailing - 406 | return rendered.rstrip("-") 407 | 408 | def _expand_all(self, src, routespec, data): 409 | if isinstance(src, list): 410 | return [self._expand_all(i, routespec, data) for i in src] 411 | elif isinstance(src, dict): 412 | return {k: self._expand_all(v, routespec, data) for k, v in src.items()} 413 | elif isinstance(src, str): 414 | return self._expand_user_properties(src, routespec, data) 415 | else: 416 | return src 417 | 418 | async def _delete_if_exists(self, kind, safe_name, future): 419 | try: 420 | await future 421 | self.log.info('Deleted %s/%s', kind, safe_name) 422 | except client.rest.ApiException as e: 423 | if e.status != 404: 424 | raise 425 | self.log.warn("Could not delete %s/%s: does not exist", kind, safe_name) 426 | 427 | async def add_route(self, routespec, target, data): 428 | # Create a route with the name being escaped routespec 429 | # Use full routespec in label 430 | # 'data' is JSON encoded and put in an annotation - we don't need to query for it 431 | 432 | safe_name = self._safe_name_for_routespec(routespec).lower() 433 | full_name = f'{self.namespace}/{safe_name}' 434 | 435 | common_labels = self._expand_all(self.common_labels, routespec, data) 436 | common_labels.update( 437 | { 438 | 'app.kubernetes.io/component': self.component_label, 439 | 'component': self.component_label, 440 | } 441 | ) 442 | 443 | ingress_extra_labels = self._expand_all( 444 | self.ingress_extra_labels, routespec, data 445 | ) 446 | ingress_extra_annotations = self._expand_all( 447 | self.ingress_extra_annotations, routespec, data 448 | ) 449 | 450 | ingress_specifications = self._expand_all( 451 | self.ingress_specifications, 452 | routespec, 453 | data, 454 | ) 455 | 456 | endpoint, service, ingress = make_ingress( 457 | name=safe_name, 458 | routespec=routespec, 459 | target=target, 460 | data=data, 461 | namespace=self.namespace, 462 | common_labels=common_labels, 463 | ingress_extra_labels=ingress_extra_labels, 464 | ingress_extra_annotations=ingress_extra_annotations, 465 | ingress_class_name=self.ingress_class_name, 466 | ingress_specifications=ingress_specifications, 467 | reuse_existing_services=self.reuse_existing_services, 468 | ) 469 | 470 | async def ensure_object(create_func, patch_func, body, kind): 471 | try: 472 | await create_func(namespace=self.namespace, body=body) 473 | self.log.info('Created %s/%s', kind, safe_name) 474 | except client.rest.ApiException as e: 475 | if e.status == 409: 476 | # This object already exists, we should patch it to make it be what we want 477 | self.log.warn( 478 | "Trying to patch %s/%s, it already exists", kind, safe_name 479 | ) 480 | await patch_func( 481 | namespace=self.namespace, 482 | body=body, 483 | name=body.metadata.name, 484 | ) 485 | else: 486 | raise 487 | 488 | if endpoint is not None: 489 | await ensure_object( 490 | self.core_api.create_namespaced_endpoints, 491 | self.core_api.patch_namespaced_endpoints, 492 | body=endpoint, 493 | kind='endpoints', 494 | ) 495 | 496 | await exponential_backoff( 497 | lambda: full_name in self.endpoint_reflector.endpoints, 498 | 'Could not find endpoints/%s after creating it' % safe_name, 499 | ) 500 | else: 501 | delete_endpoint = self.core_api.delete_namespaced_endpoints( 502 | name=safe_name, 503 | namespace=self.namespace, 504 | body=client.V1DeleteOptions(grace_period_seconds=0), 505 | ) 506 | await self._delete_if_exists('endpoint', safe_name, delete_endpoint) 507 | 508 | if service is not None: 509 | await ensure_object( 510 | self.core_api.create_namespaced_service, 511 | self.core_api.patch_namespaced_service, 512 | body=service, 513 | kind='service', 514 | ) 515 | await exponential_backoff( 516 | lambda: full_name in self.service_reflector.services, 517 | 'Could not find services/%s after creating it' % safe_name, 518 | ) 519 | else: 520 | delete_service = self.core_api.delete_namespaced_service( 521 | name=safe_name, 522 | namespace=self.namespace, 523 | body=client.V1DeleteOptions(grace_period_seconds=0), 524 | ) 525 | await self._delete_if_exists('service', safe_name, delete_service) 526 | 527 | await ensure_object( 528 | self.networking_api.create_namespaced_ingress, 529 | self.networking_api.patch_namespaced_ingress, 530 | body=ingress, 531 | kind='ingress', 532 | ) 533 | 534 | await exponential_backoff( 535 | lambda: full_name in self.ingress_reflector.ingresses, 536 | 'Could not find ingress/%s after creating it' % safe_name, 537 | ) 538 | 539 | async def delete_route(self, routespec): 540 | # We just ensure that these objects are deleted. 541 | # This means if some of them are already deleted, we just let it 542 | # be. 543 | 544 | safe_name = self._safe_name_for_routespec(routespec).lower() 545 | 546 | delete_options = client.V1DeleteOptions(grace_period_seconds=0) 547 | 548 | delete_endpoint = self.core_api.delete_namespaced_endpoints( 549 | name=safe_name, 550 | namespace=self.namespace, 551 | body=delete_options, 552 | ) 553 | 554 | delete_service = self.core_api.delete_namespaced_service( 555 | name=safe_name, 556 | namespace=self.namespace, 557 | body=delete_options, 558 | ) 559 | 560 | delete_ingress = self.networking_api.delete_namespaced_ingress( 561 | name=safe_name, 562 | namespace=self.namespace, 563 | body=delete_options, 564 | grace_period_seconds=0, 565 | ) 566 | 567 | # This seems like cleanest way to parallelize all three of these while 568 | # also making sure we only ignore the exception when it's a 404. 569 | # The order matters for endpoint & service - deleting the service deletes 570 | # the endpoint in the background. This can be racy however, so we do so 571 | # explicitly ourselves as well. In the future, we can probably try a 572 | # foreground cascading deletion (https://kubernetes.io/docs/concepts/workloads/controllers/garbage-collection/#foreground-cascading-deletion) 573 | # instead, but for now this works well enough. 574 | await asyncio.gather( 575 | self._delete_if_exists('endpoint', safe_name, delete_endpoint), 576 | self._delete_if_exists('service', safe_name, delete_service), 577 | self._delete_if_exists('ingress', safe_name, delete_ingress), 578 | ) 579 | 580 | async def get_all_routes(self): 581 | if not self.ingress_reflector.first_load_future.done(): 582 | await self.ingress_reflector.first_load_future 583 | 584 | routes = { 585 | ingress["metadata"]["annotations"]['hub.jupyter.org/proxy-routespec']: { 586 | 'routespec': ingress["metadata"]["annotations"][ 587 | 'hub.jupyter.org/proxy-routespec' 588 | ], 589 | 'target': ingress["metadata"]["annotations"][ 590 | 'hub.jupyter.org/proxy-target' 591 | ], 592 | 'data': json.loads( 593 | ingress["metadata"]["annotations"]['hub.jupyter.org/proxy-data'] 594 | ), 595 | } 596 | for ingress in self.ingress_reflector.ingresses.values() 597 | } 598 | 599 | return routes 600 | -------------------------------------------------------------------------------- /kubespawner/reflector.py: -------------------------------------------------------------------------------- 1 | # specifically use concurrent.futures for threadsafety 2 | # asyncio Futures cannot be used across threads 3 | import asyncio 4 | import json 5 | import time 6 | from functools import partial 7 | 8 | from kubernetes_asyncio import client, watch 9 | from traitlets import Any, Bool, Dict, Int, Unicode 10 | from traitlets.config import LoggingConfigurable 11 | from urllib3.exceptions import ReadTimeoutError 12 | 13 | from .clients import shared_client 14 | 15 | # This is kubernetes client implementation specific, but we need to know 16 | # whether it was a network or watch timeout. 17 | 18 | 19 | class ResourceReflector(LoggingConfigurable): 20 | """Base class for keeping a local up-to-date copy of a set of 21 | kubernetes resources. 22 | 23 | Must be subclassed once per kind of resource that needs watching. 24 | 25 | Creating a reflector should be done with the create() classmethod, 26 | since that, in addition to creating the instance starts the watch task. 27 | 28 | Shutting down a reflector should be done by awaiting its stop() method. 29 | 30 | KubeSpawner does not do this, because its reflectors are singleton 31 | instances shared among multiple spawners. The watch task therefore runs 32 | until JupyterHub exits. 33 | """ 34 | 35 | labels = Dict( 36 | {}, 37 | config=True, 38 | help=""" 39 | Labels to reflect onto local cache 40 | """, 41 | ) 42 | 43 | fields = Dict( 44 | {}, 45 | config=True, 46 | help=""" 47 | Fields to restrict the reflected objects 48 | """, 49 | ) 50 | 51 | resources = Dict( 52 | {}, 53 | help=""" 54 | Dictionary of resource names to the appropriate resource objects. 55 | 56 | This can be accessed across threads safely. 57 | """, 58 | ) 59 | 60 | kind = Unicode( 61 | 'resource', 62 | help=""" 63 | Human readable name for kind of object we're watching for. 64 | 65 | Used for diagnostic messages. 66 | """, 67 | ) 68 | 69 | omit_namespace = Bool( 70 | False, 71 | config=True, 72 | help=""" 73 | Set this to true if the reflector is to operate across 74 | multiple namespaces. 75 | """, 76 | ) 77 | 78 | namespace = Unicode( 79 | None, 80 | allow_none=True, 81 | help=""" 82 | Namespace to watch for resources in; leave at 'None' for 83 | multi-namespace reflectors. 84 | """, 85 | ) 86 | 87 | list_method_name = Unicode( 88 | "", 89 | help=""" 90 | Name of function (on apigroup respresented by 91 | `api_group_name`) that is to be called to list resources. 92 | 93 | This will be passed a a label selector. 94 | 95 | If self.omit_namespace is False you want something of the form 96 | list_namespaced_ - for example, 97 | `list_namespaced_pod` will give you a PodReflector. It will 98 | take its namespace from self.namespace (which therefore should 99 | not be None). 100 | 101 | If self.omit_namespace is True, you want 102 | list__for_all_namespaces. 103 | 104 | This must be set by a subclass. 105 | 106 | It is not necessary to set it for pod or event reflectors, because 107 | __init__ will figure it out. If you create your own reflector 108 | subclass you probably want to add the logic to choose the method 109 | name to that class's __init__(). 110 | """, 111 | ) 112 | 113 | api_group_name = Unicode( 114 | 'CoreV1Api', 115 | help=""" 116 | Name of class that represents the apigroup on which 117 | `list_method_name` is to be found. 118 | 119 | Defaults to CoreV1Api, which has everything in the 'core' API group. If 120 | you want to watch Ingresses you would have to use NetworkingV1Api. 121 | """, 122 | ) 123 | 124 | request_timeout = Int( 125 | 60, 126 | config=True, 127 | help=""" 128 | Network timeout for kubernetes watch. 129 | 130 | Trigger watch reconnect when a given request is taking too long, 131 | which can indicate network issues. 132 | """, 133 | ) 134 | 135 | timeout_seconds = Int( 136 | 10, 137 | config=True, 138 | help=""" 139 | Timeout for kubernetes watch. 140 | 141 | Trigger watch reconnect when no watch event has been received. 142 | This will cause a full reload of the currently existing resources 143 | from the API server. 144 | """, 145 | ) 146 | 147 | restart_seconds = Int( 148 | 30, 149 | config=True, 150 | help=""" 151 | Maximum time before restarting a watch. 152 | 153 | The watch will be restarted at least this often, 154 | even if events are still arriving. 155 | Avoids trusting kubernetes watch to yield all events, 156 | which seems to not be a safe assumption. 157 | """, 158 | ) 159 | 160 | on_failure = Any(help="""Function to be called when the reflector gives up.""") 161 | 162 | _stopping = Bool(False) 163 | 164 | def __init__(self, *args, **kwargs): 165 | super().__init__(*args, **kwargs) 166 | 167 | # Client configuration for kubernetes, as done via the load_config 168 | # function, has already taken place in KubeSpawner or KubeIngressProxy 169 | # initialization steps. 170 | self.api = shared_client(self.api_group_name) 171 | 172 | # FIXME: Protect against malicious labels? 173 | self.label_selector = ','.join([f'{k}={v}' for k, v in self.labels.items()]) 174 | self.field_selector = ','.join([f'{k}={v}' for k, v in self.fields.items()]) 175 | 176 | self.first_load_future = asyncio.Future() 177 | 178 | # Make sure that we know kind, whether we should omit the 179 | # namespace, and what our list_method_name is. For the things 180 | # we already know about, we can derive list_method_name from 181 | # those two things. New reflector types should also update 182 | # their __init__() methods to derive list_method_name, but you 183 | # could just set it directly in the subclass. 184 | if not self.list_method_name: 185 | plural_to_singular = { 186 | "endpoints": "endpoints", 187 | "events": "event", 188 | "ingresses": "ingress", 189 | "pods": "pod", 190 | "services": "service", 191 | } 192 | 193 | if self.kind in plural_to_singular: 194 | if self.omit_namespace: 195 | self.list_method_name = ( 196 | f"list_{plural_to_singular[self.kind]}_for_all_namespaces" 197 | ) 198 | else: 199 | self.list_method_name = ( 200 | f"list_namespaced_{plural_to_singular[self.kind]}" 201 | ) 202 | 203 | # Make sure we have the required values. 204 | if not self.kind: 205 | raise RuntimeError("Reflector kind must be set!") 206 | if not self.list_method_name: 207 | raise RuntimeError("Reflector list_method_name must be set!") 208 | 209 | self.watch_task = None 210 | 211 | async def _list_and_update(self, resource_version=None): 212 | """ 213 | Update current list of resources by doing a full fetch. 214 | 215 | Overwrites all current resource info. 216 | """ 217 | initial_resources = None 218 | kwargs = dict( 219 | label_selector=self.label_selector, 220 | field_selector=self.field_selector, 221 | _request_timeout=self.request_timeout, 222 | _preload_content=False, 223 | ) 224 | if resource_version is not None: 225 | kwargs["resource_version"] = resource_version 226 | kwargs["resource_version_match"] = "NotOlderThan" 227 | if not self.omit_namespace: 228 | kwargs["namespace"] = self.namespace 229 | 230 | list_method = getattr(self.api, self.list_method_name) 231 | 232 | try: 233 | initial_resources_raw = await list_method(**kwargs) 234 | if not initial_resources_raw.ok: 235 | raise client.ApiException( 236 | status=initial_resources_raw.status, 237 | reason=initial_resources_raw.reason, 238 | ) 239 | except client.ApiException: 240 | self.log.exception( 241 | f'An error occurred when calling Kubernetes API.' 242 | f' Status: {initial_resources_raw.status} {initial_resources_raw.reason}.' 243 | f' Message: {(await initial_resources_raw.json())["message"]}' 244 | ) 245 | raise 246 | 247 | # This is an atomic operation on the dictionary! 248 | initial_resources = json.loads(await initial_resources_raw.read()) 249 | self.resources = { 250 | f'{p["metadata"]["namespace"]}/{p["metadata"]["name"]}': p 251 | for p in initial_resources["items"] 252 | } 253 | if not self.first_load_future.done(): 254 | # signal that we've loaded our initial data at least once 255 | self.first_load_future.set_result(None) 256 | # return the resource version so we can hook up a watch 257 | return initial_resources["metadata"]["resourceVersion"] 258 | 259 | async def _watch_and_update(self): 260 | """ 261 | Keeps the current list of resources up-to-date 262 | 263 | We first fetch the list of current resources, and store that. Then we 264 | register to be notified of changes to those resources, and keep our 265 | local store up-to-date based on these notifications. 266 | 267 | We also perform exponential backoff, giving up after we hit 32s 268 | wait time. This should protect against network connections dropping 269 | and intermittent unavailability of the api-server. Every time we 270 | recover from an exception we also do a full fetch, to pick up 271 | changes that might've been missed in the time we were not doing 272 | a watch. 273 | 274 | Since the resources are read-only in the Spawner (where they are 275 | used), then this is safe. The Spawner's view of the world might be 276 | out-of-date, but it's not going to corrupt any data. 277 | """ 278 | selectors = [] 279 | if self.label_selector: 280 | selectors.append("label selector=%r" % self.label_selector) 281 | if self.field_selector: 282 | selectors.append("field selector=%r" % self.field_selector) 283 | log_selector = ', '.join(selectors) 284 | 285 | # fetch Any (=api-server cached) data from apiserver on initial fetch 286 | # see https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions 287 | # for more information 288 | resource_version = "0" 289 | 290 | cur_delay = 0.1 291 | 292 | if self.omit_namespace: 293 | ns_str = "all namespaces" 294 | else: 295 | ns_str = f"namespace {self.namespace}" 296 | 297 | self.log.info( 298 | "watching for %s with %s in %s", 299 | self.kind, 300 | log_selector, 301 | ns_str, 302 | ) 303 | while True: 304 | self.log.debug("Connecting %s watcher", self.kind) 305 | start = time.monotonic() 306 | w = watch.Watch() 307 | try: 308 | resource_version = await self._list_and_update(resource_version) 309 | cur_delay = 0.1 310 | watch_args = { 311 | "label_selector": self.label_selector, 312 | "field_selector": self.field_selector, 313 | "resource_version": resource_version, 314 | } 315 | if not self.omit_namespace: 316 | watch_args["namespace"] = self.namespace 317 | if self.request_timeout: 318 | # set network receive timeout 319 | watch_args['_request_timeout'] = self.request_timeout 320 | if self.timeout_seconds: 321 | # set watch timeout 322 | watch_args['timeout_seconds'] = self.timeout_seconds 323 | # Calling the method with _preload_content=False is a performance 324 | # optimization making the Kubernetes client do less work. See 325 | # https://github.com/jupyterhub/kubespawner/pull/424. 326 | method = partial( 327 | getattr(self.api, self.list_method_name), _preload_content=False 328 | ) 329 | async with w.stream(method, **watch_args) as stream: 330 | async for watch_event in stream: 331 | # in case of timeout_seconds, the w.stream just exits (no exception thrown) 332 | # -> we stop the watcher and start a new one 333 | # Remember that these events are k8s api related WatchEvents 334 | # objects, not k8s Event or Pod representations, they will 335 | # reside in the WatchEvent's object field depending on what 336 | # kind of resource is watched. 337 | # 338 | # ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.28/#watchevent-v1-meta 339 | # ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.28/#event-v1-core 340 | cur_delay = 0.1 341 | resource = watch_event['raw_object'] 342 | ref_key = "{}/{}".format( 343 | resource["metadata"]["namespace"], 344 | resource["metadata"]["name"], 345 | ) 346 | if watch_event['type'] == 'DELETED': 347 | # This is an atomic delete operation on the dictionary! 348 | self.resources.pop(ref_key, None) 349 | else: 350 | # This is an atomic operation on the dictionary! 351 | self.resources[ref_key] = resource 352 | resource_version = resource["metadata"]["resourceVersion"] 353 | if self._stopping: 354 | self.log.info("%s watcher stopped: inner", self.kind) 355 | break 356 | watch_duration = time.monotonic() - start 357 | if watch_duration >= self.restart_seconds: 358 | self.log.debug( 359 | "Restarting %s watcher after %i seconds", 360 | self.kind, 361 | watch_duration, 362 | ) 363 | break 364 | 365 | except ReadTimeoutError: 366 | # network read time out, just continue and restart the watch 367 | # this could be due to a network problem or just low activity 368 | self.log.warning("Read timeout watching %s, reconnecting", self.kind) 369 | continue 370 | except asyncio.CancelledError: 371 | self.log.debug("Cancelled watching %s", self.kind) 372 | raise 373 | except Exception: 374 | # ensure we request a valid resource version on retry, 375 | # needed on 410 Gone errors 376 | resource_version = "0" 377 | cur_delay = cur_delay * 2 378 | if cur_delay > 30: 379 | self.log.exception("Watching resources never recovered, giving up") 380 | if self.on_failure: 381 | self.on_failure() 382 | return 383 | self.log.exception( 384 | "Error when watching resources, retrying in %ss", cur_delay 385 | ) 386 | await asyncio.sleep(cur_delay) 387 | continue 388 | else: 389 | # no events on watch, reconnect 390 | self.log.debug("%s watcher timeout", self.kind) 391 | finally: 392 | w.stop() 393 | await w.close() 394 | if self._stopping: 395 | self.log.info("%s watcher stopped: outer", self.kind) 396 | break 397 | self.log.warning("%s watcher finished", self.kind) 398 | 399 | async def start(self): 400 | """ 401 | Start the reflection process! 402 | 403 | We'll do a blocking read of all resources first, so that we don't 404 | race with any operations that are checking the state of the pod 405 | store - such as polls. This should be called only once at the 406 | start of program initialization (when the singleton is being created), 407 | and not afterwards! 408 | """ 409 | if self.watch_task and not self.watch_task.done(): 410 | raise RuntimeError(f"Task watching for {self.kind} is already running") 411 | try: 412 | await self._list_and_update() 413 | except Exception as e: 414 | self.log.exception(f"Initial list of {self.kind} failed") 415 | if not self.first_load_future.done(): 416 | # anyone awaiting our first load event should fail 417 | self.first_load_future.set_exception(e) 418 | raise 419 | 420 | self.watch_task = asyncio.create_task(self._watch_and_update()) 421 | 422 | async def stop(self): 423 | """ 424 | Cleanly shut down the watch task. 425 | """ 426 | self._stopping = True 427 | if self.watch_task and not self.watch_task.done(): 428 | # cancel the task, wait for it to complete 429 | self.watch_task.cancel() 430 | try: 431 | timeout = 5 432 | await asyncio.wait_for(self.watch_task, timeout) 433 | except asyncio.TimeoutError: 434 | # Raising the TimeoutError will cancel the task. 435 | self.log.warning( 436 | f"Watch task did not finish in {timeout}s and was cancelled" 437 | ) 438 | self.watch_task = None 439 | 440 | 441 | class NamespacedResourceReflector(ResourceReflector): 442 | """ 443 | Watches for resources in a particular namespace. The list_methods 444 | want both a method name and a namespace. 445 | """ 446 | 447 | omit_namespace = False 448 | 449 | 450 | class MultiNamespaceResourceReflector(ResourceReflector): 451 | """ 452 | Watches for resources across all namespaces. The list_methods 453 | want only a method name. Note that this requires the service account 454 | to be significantly more powerful, since it must be bound to ClusterRoles 455 | rather than just Roles, and therefore this is inherently more 456 | dangerous. 457 | """ 458 | 459 | omit_namespace = True 460 | -------------------------------------------------------------------------------- /kubespawner/slugs.py: -------------------------------------------------------------------------------- 1 | """Tools for generating slugs like k8s object names and labels 2 | 3 | Requirements: 4 | 5 | - always valid for arbitary strings 6 | - no collisions 7 | """ 8 | 9 | import hashlib 10 | import re 11 | import string 12 | 13 | import escapism 14 | 15 | _alphanum = tuple(string.ascii_letters + string.digits) 16 | _alpha_lower = tuple(string.ascii_lowercase) 17 | _alphanum_lower = tuple(string.ascii_lowercase + string.digits) 18 | _lower_plus_hyphen = _alphanum_lower + ('-',) 19 | 20 | # patterns _do not_ need to cover length or start/end conditions, 21 | # which are handled separately 22 | _object_pattern = re.compile(r'^[a-z0-9\-]+$') 23 | _label_pattern = re.compile(r'^[a-z0-9\.\-_]+$', flags=re.IGNORECASE) 24 | 25 | # match anything that's not lowercase alphanumeric (will be stripped, replaced with '-') 26 | _non_alphanum_pattern = re.compile(r'[^a-z0-9]+') 27 | 28 | # length of hash suffix 29 | _hash_length = 8 30 | 31 | # Make sure username and servername match the restrictions for DNS labels 32 | # Note: '-' is not in safe_chars, as it is being used as escape character 33 | _escape_slug_safe_chars = set(string.ascii_lowercase + string.digits) 34 | 35 | 36 | def escape_slug(name): 37 | """Generate a slug with the legacy system, safe_slug is preferred.""" 38 | return escapism.escape( 39 | name, 40 | safe=_escape_slug_safe_chars, 41 | escape_char='-', 42 | ).lower() 43 | 44 | 45 | def _is_valid_general( 46 | s, starts_with=None, ends_with=None, pattern=None, min_length=None, max_length=None 47 | ): 48 | """General is_valid check 49 | 50 | Checks rules: 51 | """ 52 | if min_length and len(s) < min_length: 53 | return False 54 | if max_length and len(s) > max_length: 55 | return False 56 | if starts_with and not s.startswith(starts_with): 57 | return False 58 | if ends_with and not s.endswith(ends_with): 59 | return False 60 | if pattern and not pattern.match(s): 61 | return False 62 | return True 63 | 64 | 65 | def is_valid_object_name(s): 66 | """is_valid check for object names 67 | 68 | Ensures all strictest object rules apply, 69 | satisfying both RFC 1035 and 1123 dns label name rules 70 | 71 | - 63 characters 72 | - starts with letter, ends with letter or number 73 | - only lowercalse letters, numbers, '-' 74 | """ 75 | # object rules: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names 76 | return _is_valid_general( 77 | s, 78 | starts_with=_alpha_lower, 79 | ends_with=_alphanum_lower, 80 | pattern=_object_pattern, 81 | max_length=63, 82 | min_length=1, 83 | ) 84 | 85 | 86 | def is_valid_label(s): 87 | """is_valid check for label values""" 88 | # label rules: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set 89 | if not s: 90 | # empty strings are valid labels 91 | return True 92 | return _is_valid_general( 93 | s, 94 | starts_with=_alphanum, 95 | ends_with=_alphanum, 96 | pattern=_label_pattern, 97 | max_length=63, 98 | ) 99 | 100 | 101 | def is_valid_default(s): 102 | """Strict is_valid 103 | 104 | Returns True if it's valid for _all_ our known uses 105 | 106 | Currently, this is the same as is_valid_object_name, 107 | which produces a valid DNS label under RFC1035 AND RFC 1123, 108 | which is always also a valid label value. 109 | """ 110 | return is_valid_object_name(s) 111 | 112 | 113 | def _extract_safe_name(name, max_length): 114 | """Generate safe substring of a name 115 | 116 | Guarantees: 117 | 118 | - always starts with a lowercase letter 119 | - always ends with a lowercase letter or number 120 | - never more than one hyphen in a row (no '--') 121 | - only contains lowercase letters, numbers, and hyphens 122 | - length at least 1 ('x' if other rules strips down to empty string) 123 | - max length not exceeded 124 | """ 125 | # compute safe slug from name (don't worry about collisions, hash handles that) 126 | # cast to lowercase 127 | # replace any sequence of non-alphanumeric characters with a single '-' 128 | safe_name = _non_alphanum_pattern.sub("-", name.lower()) 129 | # truncate to max_length chars, strip '-' off ends 130 | safe_name = safe_name.lstrip("-")[:max_length].rstrip("-") 131 | # ensure starts with lowercase letter 132 | if safe_name and not safe_name.startswith(_alpha_lower): 133 | safe_name = "x-" + safe_name[: max_length - 2] 134 | if not safe_name: 135 | # make sure it's non-empty 136 | safe_name = 'x' 137 | return safe_name 138 | 139 | 140 | def strip_and_hash(name, max_length=32): 141 | """Generate an always-safe, unique string for any input 142 | 143 | truncates name to max_length - len(hash_suffix) to fit in max_length 144 | after adding hash suffix 145 | """ 146 | name_length = max_length - (_hash_length + 3) 147 | if name_length < 1: 148 | raise ValueError(f"Cannot make safe names shorter than {_hash_length + 4}") 149 | # quick, short hash to avoid name collisions 150 | name_hash = hashlib.sha256(name.encode("utf8")).hexdigest()[:_hash_length] 151 | safe_name = _extract_safe_name(name, name_length) 152 | # due to stripping of '-' in _extract_safe_name, 153 | # the result will always have _exactly_ '---', never '--' nor '----' 154 | # use '---' to avoid colliding with `{username}--{servername}` template join 155 | return f"{safe_name}---{name_hash}" 156 | 157 | 158 | def safe_slug(name, is_valid=is_valid_default, max_length=None): 159 | """Always generate a safe slug 160 | 161 | is_valid should be a callable that returns True if a given string follows appropriate rules, 162 | and False if it does not. 163 | 164 | Given a string, if it's already valid, use it. 165 | If it's not valid, follow a safe encoding scheme that ensures: 166 | 167 | 1. validity, and 168 | 2. no collisions 169 | """ 170 | if '--' in name: 171 | # don't accept any names that could collide with the safe slug 172 | return strip_and_hash(name, max_length=max_length or 32) 173 | # allow max_length override for truncated sub-strings 174 | if is_valid(name) and (max_length is None or len(name) <= max_length): 175 | return name 176 | else: 177 | return strip_and_hash(name, max_length=max_length or 32) 178 | 179 | 180 | def multi_slug(names, max_length=48): 181 | """multi-component slug with single hash on the end 182 | 183 | same as strip_and_hash, but name components are joined with '--', 184 | so it looks like: 185 | 186 | {name1}--{name2}---{hash} 187 | 188 | In order to avoid hash collisions on boundaries, use `\\xFF` as delimiter 189 | """ 190 | hasher = hashlib.sha256() 191 | hasher.update(names[0].encode("utf8")) 192 | for name in names[1:]: 193 | # \xFF can't occur as a start byte in UTF8 194 | # so use it as a word delimiter to make sure overlapping words don't collide 195 | hasher.update(b"\xff") 196 | hasher.update(name.encode("utf8")) 197 | hash = hasher.hexdigest()[:_hash_length] 198 | 199 | name_slugs = [] 200 | available_chars = max_length - (_hash_length + 1) 201 | # allocate equal space per name 202 | # per_name accounts for '{name}--', so really two less 203 | per_name = available_chars // len(names) 204 | name_max_length = per_name - 2 205 | if name_max_length < 2: 206 | raise ValueError(f"Not enough characters for {len(names)} names: {max_length}") 207 | for name in names: 208 | name_slugs.append(_extract_safe_name(name, name_max_length)) 209 | 210 | # by joining names with '--', this cannot collide with single-hashed names, 211 | # which can only contain '-' and the '---' hash delimiter once 212 | return f"{'--'.join(name_slugs)}---{hash}" 213 | -------------------------------------------------------------------------------- /kubespawner/templates/form.html: -------------------------------------------------------------------------------- 1 | 2 |
3 | {%- for profile in profile_list %} 4 | {#- Wrap everything in a label tag so clicking anywhere selects the option #} 5 | 53 | {%- endfor %} 54 |
55 | 99 | -------------------------------------------------------------------------------- /kubespawner/templates/style.css: -------------------------------------------------------------------------------- 1 | /* 2 | .profile divs holds two div tags: one for a radio button, and one 3 | for the profile's content. 4 | */ 5 | #kubespawner-profiles-list .profile { 6 | display: flex; 7 | flex-direction: row; 8 | font-weight: normal; 9 | border-bottom: 1px solid #ccc; 10 | padding-bottom: 12px; 11 | } 12 | 13 | #kubespawner-profiles-list .profile .radio { 14 | padding: 12px; 15 | } 16 | 17 | /* .option divs holds a label and a select tag */ 18 | #kubespawner-profiles-list .profile .option { 19 | display: flex; 20 | flex-direction: row; 21 | align-items: center; 22 | padding-bottom: 12px; 23 | } 24 | 25 | #kubespawner-profiles-list .profile .option label { 26 | font-weight: normal; 27 | margin-right: 8px; 28 | min-width: 96px; 29 | } 30 | -------------------------------------------------------------------------------- /kubespawner/utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | Misc. general utility functions, not tied to KubeSpawner directly 3 | """ 4 | 5 | import copy 6 | import hashlib 7 | 8 | 9 | def generate_hashed_slug(slug, limit=63, hash_length=6): 10 | """ 11 | Generate a unique name that's within a certain length limit 12 | 13 | Most k8s objects have a 63 char name limit. We wanna be able to compress 14 | larger names down to that if required, while still maintaining some 15 | amount of legibility about what the objects really are. 16 | 17 | If the length of the slug is shorter than the limit - hash_length, we just 18 | return slug directly. If not, we truncate the slug to (limit - hash_length) 19 | characters, hash the slug and append hash_length characters from the hash 20 | to the end of the truncated slug. This ensures that these names are always 21 | unique no matter what. 22 | """ 23 | if len(slug) < (limit - hash_length): 24 | return slug 25 | 26 | slug_hash = hashlib.sha256(slug.encode('utf-8')).hexdigest() 27 | 28 | return '{prefix}-{hash}'.format( 29 | prefix=slug[: limit - hash_length - 1], 30 | hash=slug_hash[:hash_length], 31 | ).lower() 32 | 33 | 34 | def update_k8s_model(target, changes, logger=None, target_name=None, changes_name=None): 35 | """ 36 | Takes a model instance such as V1PodSpec() and updates it with another 37 | model, which is allowed to be a dict or another model instance of the same 38 | type. The logger is used to warn if any truthy value in the target is is 39 | overridden. The target_name parameter can for example be "pod.spec", and 40 | changes_name parameter could be "extra_pod_config". These parameters allows 41 | the logger to write out something more meaningful to the user whenever 42 | something is about to become overridden. 43 | """ 44 | model_type = type(target) 45 | if not hasattr(target, 'attribute_map'): 46 | raise AttributeError( 47 | "Attribute 'target' ({}) must be an object (such as 'V1PodSpec') with an attribute 'attribute_map'.".format( 48 | model_type.__name__ 49 | ) 50 | ) 51 | if not isinstance(changes, model_type) and not isinstance(changes, dict): 52 | raise AttributeError( 53 | "Attribute 'changes' ({}) must be an object of the same type as 'target' ({}) or a 'dict'.".format( 54 | type(changes).__name__, model_type.__name__ 55 | ) 56 | ) 57 | 58 | changes_dict = _get_k8s_model_dict(model_type, changes) 59 | for key, value in changes_dict.items(): 60 | if key not in target.attribute_map: 61 | raise ValueError( 62 | "The attribute 'changes' ({}) contained '{}' not modeled by '{}'.".format( 63 | type(changes).__name__, key, model_type.__name__ 64 | ) 65 | ) 66 | 67 | # If changes are passed as a dict, they will only have a few keys/value 68 | # pairs representing the specific changes. If the changes parameter is a 69 | # model instance on the other hand, the changes parameter will have a 70 | # lot of default values as well. These default values, which are also 71 | # falsy, should not use to override the target's values. 72 | if isinstance(changes, dict) or value: 73 | if getattr(target, key): 74 | if logger and changes_name: 75 | msg = "'{}.{}' current value: '{}' is overridden with '{}', which is the value of '{}.{}'.".format( 76 | target_name, key, getattr(target, key), value, changes_name, key 77 | ) 78 | logger.info(msg) 79 | setattr(target, key, value) 80 | 81 | return target 82 | 83 | 84 | def get_k8s_model(model_type, model_dict): 85 | """ 86 | Returns an instance of type specified model_type from an model instance or 87 | represantative dictionary. 88 | """ 89 | model_dict = copy.deepcopy(model_dict) 90 | 91 | if isinstance(model_dict, model_type): 92 | return model_dict 93 | elif isinstance(model_dict, dict): 94 | # convert the dictionaries camelCase keys to snake_case keys 95 | model_dict = _map_dict_keys_to_model_attributes(model_type, model_dict) 96 | # use the dictionary keys to initialize a model of given type 97 | return model_type(**model_dict) 98 | else: 99 | raise AttributeError( 100 | "Expected object of type 'dict' (or '{}') but got '{}'.".format( 101 | model_type.__name__, type(model_dict).__name__ 102 | ) 103 | ) 104 | 105 | 106 | def _get_k8s_model_dict(model_type, model): 107 | """ 108 | Returns a dictionary representation of a provided model type 109 | """ 110 | model = copy.deepcopy(model) 111 | 112 | if isinstance(model, model_type): 113 | return model.to_dict() 114 | elif isinstance(model, dict): 115 | return _map_dict_keys_to_model_attributes(model_type, model) 116 | else: 117 | raise AttributeError( 118 | "Expected object of type '{}' (or 'dict') but got '{}'.".format( 119 | model_type.__name__, type(model).__name__ 120 | ) 121 | ) 122 | 123 | 124 | def _map_dict_keys_to_model_attributes(model_type, model_dict): 125 | """ 126 | Maps a dict's keys to the provided models attributes using its attribute_map 127 | attribute. This is (always?) the same as converting camelCase to snake_case. 128 | Note that the function will not influence nested object's keys. 129 | """ 130 | 131 | new_dict = {} 132 | for key, value in model_dict.items(): 133 | new_dict[_get_k8s_model_attribute(model_type, key)] = value 134 | 135 | return new_dict 136 | 137 | 138 | def _get_k8s_model_attribute(model_type, field_name): 139 | """ 140 | Takes a model type and a Kubernetes API resource field name (such as 141 | "serviceAccount") and returns a related attribute name (such as 142 | "service_account") to be used with kubernetes.client.models objects. It is 143 | impossible to prove a negative but it seems like it is always a question of 144 | making camelCase to snake_case but by using the provided 'attribute_map' we 145 | also ensure that the fields actually exist. 146 | 147 | Example of V1PodSpec's attribute_map: 148 | { 149 | 'active_deadline_seconds': 'activeDeadlineSeconds', 150 | 'affinity': 'affinity', 151 | 'automount_service_account_token': 'automountServiceAccountToken', 152 | 'containers': 'containers', 153 | 'dns_policy': 'dnsPolicy', 154 | 'host_aliases': 'hostAliases', 155 | 'host_ipc': 'hostIPC', 156 | 'host_network': 'hostNetwork', 157 | 'host_pid': 'hostPID', 158 | 'hostname': 'hostname', 159 | 'image_pull_secrets': 'imagePullSecrets', 160 | 'init_containers': 'initContainers', 161 | 'node_name': 'nodeName', 162 | 'node_selector': 'nodeSelector', 163 | 'priority': 'priority', 164 | 'priority_class_name': 'priorityClassName', 165 | 'restart_policy': 'restartPolicy', 166 | 'scheduler_name': 'schedulerName', 167 | 'security_context': 'securityContext', 168 | 'service_account': 'serviceAccount', 169 | 'service_account_name': 'serviceAccountName', 170 | 'subdomain': 'subdomain', 171 | 'termination_grace_period_seconds': 'terminationGracePeriodSeconds', 172 | 'tolerations': 'tolerations', 173 | 'volumes': 'volumes' 174 | } 175 | """ 176 | # if we get "service_account", return 177 | if field_name in model_type.attribute_map: 178 | return field_name 179 | 180 | # if we get "serviceAccount", then return "service_account" 181 | for key, value in model_type.attribute_map.items(): 182 | if value == field_name: 183 | return key 184 | else: 185 | raise ValueError( 186 | "'{}' did not have an attribute matching '{}'".format( 187 | model_type.__name__, field_name 188 | ) 189 | ) 190 | 191 | 192 | def host_matching(host: str, wildcard: str) -> bool: 193 | # user.example.com == user.example.com 194 | # user.example.com != wrong.example.com 195 | # user.example.com != example.com 196 | if not wildcard.startswith("*."): 197 | return host == wildcard 198 | 199 | host_parts = host.split(".") 200 | wildcard_parts = wildcard.split(".") 201 | 202 | # user.example.com =~ *.example.com 203 | # user.example.com !~ *.user.example.com 204 | # user.example.com !~ *.example 205 | return host_parts[1:] == wildcard_parts[1:] 206 | 207 | 208 | # From https://github.com/jupyter-server/jupyter_server/blob/fc0ac3236fdd92778ea765db6e8982212c8389ee/jupyter_server/config_manager.py#L14 209 | def recursive_update(target, new): 210 | """ 211 | Recursively update one dictionary in-place using another. 212 | 213 | None values will delete their keys. 214 | """ 215 | for k, v in new.items(): 216 | if isinstance(v, dict): 217 | if k not in target: 218 | target[k] = {} 219 | recursive_update(target[k], v) 220 | 221 | elif v is None: 222 | target.pop(k, None) 223 | 224 | else: 225 | target[k] = v 226 | 227 | 228 | class IgnoreMissing(dict): 229 | """ 230 | Dictionary subclass for use with format_map 231 | 232 | Returns missing dictionary keys' values as "{key}", so format strings with 233 | missing values just get rendered as is. 234 | 235 | Stolen from https://docs.python.org/3/library/stdtypes.html#str.format_map 236 | """ 237 | 238 | def __missing__(self, key): 239 | return f"{{{key}}}" 240 | 241 | 242 | def recursive_format(format_object, **kwargs): 243 | """ 244 | Recursively format given object with values provided as keyword arguments. 245 | 246 | If the given object (string, list, set, or dict) has items that do not have 247 | placeholders for passed in kwargs, no formatting is performed. 248 | 249 | recursive_format("{v}", v=5) -> Returns "5" 250 | recrusive_format("{a}") -> Returns "{a}" rather than erroring, as is 251 | the behavior of "format" 252 | """ 253 | if isinstance(format_object, str): 254 | return format_object.format_map(IgnoreMissing(kwargs)) 255 | elif isinstance(format_object, list): 256 | return [recursive_format(i, **kwargs) for i in format_object] 257 | elif isinstance(format_object, set): 258 | return {recursive_format(i, **kwargs) for i in format_object} 259 | elif isinstance(format_object, dict): 260 | return { 261 | recursive_format(k, **kwargs): recursive_format(v, **kwargs) 262 | for k, v in format_object.items() 263 | } 264 | else: 265 | # Everything else just gets returned as is, unformatted 266 | return format_object 267 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | # build-system 2 | # - ref: https://peps.python.org/pep-0517/ 3 | # 4 | [build-system] 5 | requires = ["hatchling"] 6 | build-backend = "hatchling.build" 7 | 8 | 9 | # project 10 | # - ref 1: https://peps.python.org/pep-0621/ 11 | # - ref 2: https://hatch.pypa.io/latest/config/metadata/#project-metadata 12 | # 13 | [project] 14 | name = "jupyterhub-kubespawner" 15 | description = "JupyterHub Spawner for Kubernetes" 16 | readme = "README.md" 17 | requires-python = ">=3.7" 18 | license = {file = "LICENSE"} 19 | keywords = ["jupyterhub", "spawner"] 20 | authors = [ 21 | {name = "Jupyter Contributors", email = "jupyter@googlegroups.com"}, 22 | ] 23 | classifiers = [ 24 | "Development Status :: 5 - Production/Stable", 25 | "Programming Language :: Python :: 3", 26 | ] 27 | dependencies = [ 28 | # NOTE: If lower bounds are updated, also update our test for the lower 29 | # bounds in .github/workflows/test.yaml. 30 | "escapism", 31 | "jinja2", 32 | "jupyterhub>=4.0.0", 33 | "kubernetes_asyncio>=24.2.3", 34 | "python-slugify", 35 | "pyYAML", 36 | "traitlets", 37 | "urllib3", 38 | ] 39 | dynamic = ["version"] 40 | 41 | [project.optional-dependencies] 42 | test = [ 43 | "kubernetes>=11", 44 | "pytest>=5.4", 45 | "pytest-cov", 46 | # FIXME: unpin pytest-asyncio 47 | "pytest-asyncio>=0.17,<0.23", 48 | ] 49 | 50 | [project.urls] 51 | Documentation = "https://jupyterhub-kubespawner.readthedocs.io" 52 | Source = "https://github.com/jupyterhub/kubespawner" 53 | Issues = "https://github.com/jupyterhub/kubespawner/issues" 54 | 55 | # Explicitly include our profile_list templates, as hatch doesn't 56 | # respect MANIFEST.in. 57 | # Documentation: https://hatch.pypa.io/latest/config/build/#artifacts 58 | [tool.hatch.build] 59 | artifacts = [ 60 | "kubespawner/templates/*" 61 | ] 62 | # include is required since the project name doesn't match the folder name 63 | include = ["kubespawner"] 64 | 65 | [tool.hatch.build.targets.wheel] 66 | # packages is required when building wheels since the project name doesn't match 67 | # the folder name. 68 | packages = ["kubespawner"] 69 | 70 | # black is used for autoformatting Python code 71 | # 72 | # ref: https://black.readthedocs.io/en/stable/ 73 | # 74 | [tool.black] 75 | skip-string-normalization = true 76 | # target-version should be all supported versions, see 77 | # https://github.com/psf/black/issues/751#issuecomment-473066811 78 | target_version = [ 79 | "py37", 80 | "py38", 81 | "py39", 82 | "py310", 83 | "py311", 84 | ] 85 | 86 | 87 | # hatch ref: https://hatch.pypa.io/latest/ 88 | # 89 | [tool.hatch.version] 90 | path = "kubespawner/_version.py" 91 | 92 | 93 | # isort is used for autoformatting Python code 94 | # 95 | # ref: https://pycqa.github.io/isort/ 96 | # 97 | [tool.isort] 98 | profile = "black" 99 | 100 | 101 | # pytest is used for running Python based tests 102 | # 103 | # ref: https://docs.pytest.org/en/stable/ 104 | # 105 | [tool.pytest.ini_options] 106 | addopts = "--verbose --color=yes --durations=10" 107 | asyncio_mode = "auto" 108 | # Ignore thousands of tests in dependencies installed in a virtual environment 109 | norecursedirs = "lib lib64" 110 | 111 | 112 | # tbump is used to simplify and standardize the release process when updating 113 | # the version, making a git commit and tag, and pushing changes. 114 | # 115 | # ref: https://github.com/your-tools/tbump#readme 116 | # 117 | [tool.tbump] 118 | github_url = "https://github.com/jupyterhub/kubespawner" 119 | 120 | [tool.tbump.version] 121 | current = "7.0.0" 122 | regex = ''' 123 | (?P\d+) 124 | \. 125 | (?P\d+) 126 | \. 127 | (?P\d+) 128 | (?P
((a|b|rc)\d+)|)
129 |     \.?
130 |     (?P(?<=\.)dev\d*|)
131 | '''
132 | 
133 | [tool.tbump.git]
134 | message_template = "Bump to {new_version}"
135 | tag_template = "{new_version}"
136 | 
137 | [[tool.tbump.file]]
138 | src = "kubespawner/_version.py"
139 | 
140 | # djlint is used for autoformatting jinja templates
141 | #
142 | # ref: https://www.djlint.com/docs/formatter/
143 | #
144 | [tool.djlint]
145 | indent = 2
146 | profile = "jinja"
147 | 


--------------------------------------------------------------------------------
/tests/conftest.py:
--------------------------------------------------------------------------------
  1 | """pytest fixtures for kubespawner"""
  2 | 
  3 | import asyncio
  4 | import base64
  5 | import inspect
  6 | import io
  7 | import logging
  8 | import os
  9 | import sys
 10 | import tarfile
 11 | from functools import partial
 12 | 
 13 | import pytest
 14 | import pytest_asyncio
 15 | from jupyterhub.app import JupyterHub
 16 | from jupyterhub.objects import Hub
 17 | from kubernetes.client import CoreV1Api as sync_CoreV1Api
 18 | from kubernetes.config import load_kube_config as sync_load_kube_config
 19 | from kubernetes.stream import stream as sync_stream
 20 | from kubernetes_asyncio.client import (
 21 |     V1ConfigMap,
 22 |     V1Namespace,
 23 |     V1Pod,
 24 |     V1PodSpec,
 25 |     V1Secret,
 26 |     V1Service,
 27 |     V1ServicePort,
 28 |     V1ServiceSpec,
 29 | )
 30 | from kubernetes_asyncio.client.rest import ApiException
 31 | from kubernetes_asyncio.config import load_kube_config
 32 | from kubernetes_asyncio.watch import Watch
 33 | from traitlets.config import Config
 34 | 
 35 | from kubespawner import KubeSpawner
 36 | from kubespawner.clients import shared_client
 37 | 
 38 | here = os.path.abspath(os.path.dirname(__file__))
 39 | jupyterhub_config_py = os.path.join(here, "jupyterhub_config.py")
 40 | 
 41 | # We do these to set up the synchronous client, needed for executing
 42 | # python inside pods.
 43 | sync_load_kube_config()
 44 | sync_corev1api = sync_CoreV1Api()
 45 | 
 46 | 
 47 | async def cancel_tasks():
 48 |     """Cancel long-running tasks
 49 | 
 50 |     This is copied from JupyterHub's shutdown_cancel_tasks (as of 2.1.1)
 51 |     to emulate JupyterHub's cleanup of cancelled tasks at shutdown.
 52 | 
 53 |     shared_client's cleanup relies on this.
 54 |     """
 55 |     tasks = [t for t in asyncio.all_tasks() if t is not asyncio.current_task()]
 56 |     log = logging.getLogger("traitlets")
 57 |     if tasks:
 58 |         log.debug(f"Cancelling {len(tasks)} pending tasks")
 59 |         [t.cancel() for t in tasks]
 60 | 
 61 |         try:
 62 |             await asyncio.wait(tasks)
 63 |         except asyncio.CancelledError as e:
 64 |             log.debug("Caught Task CancelledError. Ignoring")
 65 |         except Exception:
 66 |             log.exception("Caught Exception in cancelled task")
 67 | 
 68 |         tasks = [t for t in asyncio.all_tasks() if t is not asyncio.current_task()]
 69 |         for t in tasks:
 70 |             log.debug("Task status: %s", t)
 71 | 
 72 | 
 73 | @pytest.fixture(scope="session")
 74 | def event_loop():
 75 |     loop = asyncio.get_event_loop_policy().new_event_loop()
 76 |     yield loop
 77 |     # cancel tasks, as is done in JupyterHub
 78 |     loop.run_until_complete(cancel_tasks())
 79 |     loop.close()
 80 | 
 81 | 
 82 | @pytest.fixture(autouse=True)
 83 | def traitlets_logging():
 84 |     """Ensure traitlets default logging is enabled
 85 | 
 86 |     so KubeSpawner logs are captured by pytest.
 87 |     By default, there is a "NullHandler" so no logs are produced.
 88 |     """
 89 |     logger = logging.getLogger('traitlets')
 90 |     logger.setLevel(logging.DEBUG)
 91 |     logger.handlers = []
 92 | 
 93 | 
 94 | @pytest.fixture(scope="session")
 95 | def kube_ns():
 96 |     """Fixture for the kubernetes namespace"""
 97 |     return os.environ.get("KUBESPAWNER_TEST_NAMESPACE") or "kubespawner-test"
 98 | 
 99 | 
100 | @pytest.fixture(scope="session")
101 | def kube_another_ns():
102 |     """Fixture for the another kubernetes namespace"""
103 |     return os.environ.get("KUBESPAWNER_ANOTHER_NAMESPACE") or "kubespawner-another"
104 | 
105 | 
106 | @pytest.fixture
107 | def config(kube_ns):
108 |     """Return a traitlets Config object
109 | 
110 |     The base configuration for testing.
111 |     Use when constructing Spawners for tests
112 |     """
113 |     cfg = Config()
114 |     cfg.KubeSpawner.namespace = kube_ns
115 |     cfg.KubeSpawner.cmd = ["jupyterhub-singleuser"]
116 |     cfg.KubeSpawner.start_timeout = 180
117 |     # prevent spawners from exiting early due to missing env
118 |     cfg.KubeSpawner.environment = {
119 |         "JUPYTERHUB_API_TOKEN": "test-secret-token",
120 |         "JUPYTERHUB_CLIENT_ID": "ignored",
121 |     }
122 |     return cfg
123 | 
124 | 
125 | @pytest.fixture(scope="session")
126 | def ssl_app(tmpdir_factory, kube_ns):
127 |     """Partially instantiate a JupyterHub instance to generate ssl certificates
128 | 
129 |     Generates ssl certificates on the host,
130 |     which will then be staged
131 | 
132 |     This is not a fully instantiated Hub,
133 |     but it will have internal_ssl-related attributes such as
134 |     .internal_trust_bundles and .internal_certs_location initialized.
135 |     """
136 |     tmpdir = tmpdir_factory.mktemp("ssl")
137 |     tmpdir.chdir()
138 |     config = Config()
139 |     config.JupyterHub.internal_ssl = True
140 |     tmpdir.mkdir("internal-ssl")
141 |     # use relative path for ssl certs
142 |     config.JupyterHub.internal_certs_location = "internal-ssl"
143 |     config.JupyterHub.trusted_alt_names = [
144 |         "DNS:hub-ssl",
145 |         f"DNS:hub-ssl.{kube_ns}",
146 |         f"DNS:hub-ssl.{kube_ns}.svc",
147 |         f"DNS:hub-ssl.{kube_ns}.svc.cluster.local",
148 |     ]
149 |     app = JupyterHub(config=config)
150 |     app.init_internal_ssl()
151 |     return app
152 | 
153 | 
154 | async def watch_logs(kube_client, pod_info):
155 |     """Stream a single pod's logs
156 | 
157 |     pod logs are streamed directly to sys.stdout,
158 |     so that pytest capture can deal with it.
159 | 
160 |     Called for each new pod from watch_kubernetes
161 |     """
162 |     watch = Watch()
163 |     while True:
164 |         try:
165 |             async for event in watch.stream(
166 |                 func=kube_client.read_namespaced_pod_log,
167 |                 namespace=pod_info.namespace,
168 |                 name=pod_info.name,
169 |             ):
170 |                 print(f"[{pod_info.name}]: {event}")
171 |         except ApiException as e:
172 |             if e.status == 400:
173 |                 # 400 can occur if the container is not yet ready
174 |                 # wait and retry
175 |                 await asyncio.sleep(1)
176 |                 continue
177 |             elif e.status == 404:
178 |                 # pod is gone, we are done
179 |                 return
180 |             else:
181 |                 # unexpected error
182 |                 print(f"Error watching logs for {pod_info.name}: {e}", file=sys.stderr)
183 |                 raise
184 |         else:
185 |             break
186 | 
187 | 
188 | async def watch_kubernetes(kube_client, kube_ns):
189 |     """Stream kubernetes events to stdout
190 | 
191 |     so that pytest io capturing can include k8s events and logs
192 | 
193 |     All events are streamed to stdout
194 | 
195 |     When a new pod is started, spawn an additional task to watch its logs
196 |     """
197 | 
198 |     watch = Watch()
199 |     watch_task = {}
200 | 
201 |     try:
202 |         async for event in watch.stream(
203 |             func=kube_client.list_namespaced_event,
204 |             namespace=kube_ns,
205 |         ):
206 |             resource = event['object']
207 |             obj = resource.involved_object
208 |             print(
209 |                 f"k8s event ({event['type']} {obj.kind}/{obj.name}): {resource.message}"
210 |             )
211 | 
212 |             # new pod appeared, start streaming its logs
213 |             if (
214 |                 obj.kind == "Pod"
215 |                 and event["type"] == "ADDED"
216 |                 and obj.name not in watch_task
217 |             ):
218 |                 watch_task[obj.name] = asyncio.create_task(
219 |                     watch_logs(
220 |                         kube_client,
221 |                         obj,
222 |                     ),
223 |                 )
224 | 
225 |     except asyncio.CancelledError as exc:
226 |         # kube_client cleanup cancelled us.  In turn, we should cancel
227 |         # the individual watch tasks.
228 |         for t in watch_task:
229 |             if watch_task[t] and not watch_task[t].done():
230 |                 try:
231 |                     watch_task[t].cancel()
232 |                 except asyncio.CancelledError:
233 |                     # Swallow these; they are what we expect.
234 |                     pass
235 |         # And re-raise so kube_client can finish cleanup
236 |         raise exc
237 | 
238 | 
239 | async def _delete_namespace(client, namespace):
240 |     await client.delete_namespace(namespace, body={}, grace_period_seconds=0)
241 |     for _ in range(20):  # Usually finishes a good deal faster
242 |         try:
243 |             await client.read_namespace(namespace)
244 |         except ApiException as e:
245 |             if e.status == 404:
246 |                 return
247 |             else:
248 |                 raise
249 |         else:
250 |             print("waiting for %s to delete" % namespace)
251 |             await asyncio.sleep(1)
252 |     raise Exception(f"Namespace {namespace} not deleted after 20 s")
253 | 
254 | 
255 | @pytest_asyncio.fixture(scope="session")
256 | async def kube_client(request, kube_ns, kube_another_ns):
257 |     """fixture for the Kubernetes client object.
258 |     skips test that require kubernetes if kubernetes cannot be contacted
259 |     - Ensures kube_ns and kube_another_ns namespaces do exist
260 |     - Hooks up kubernetes events and logs to pytest capture
261 |     - Cleans up kubernetes namespace on exit
262 |     """
263 |     await load_kube_config()
264 |     client = shared_client("CoreV1Api")
265 | 
266 |     expected_namespaces = [kube_ns, kube_another_ns]
267 |     try:
268 |         namespaces = await client.list_namespace(_request_timeout=3)
269 |     except Exception as e:
270 |         pytest.skip("Kubernetes not found: %s" % e)
271 | 
272 |     for namespace in expected_namespaces:
273 |         if not any(ns.metadata.name == namespace for ns in namespaces.items):
274 |             print("Creating namespace %s" % namespace)
275 |             await client.create_namespace(V1Namespace(metadata=dict(name=namespace)))
276 |         else:
277 |             print("Using existing namespace %s" % namespace)
278 | 
279 |     # begin streaming all logs and events in our test namespace
280 |     log_tasks = [
281 |         asyncio.create_task(watch_kubernetes(client, namespace))
282 |         for namespace in expected_namespaces
283 |     ]
284 | 
285 |     yield client
286 | 
287 |     # Clean up at close by sending a cancel to watch_kubernetes and letting
288 |     # it handle the signal, cancel the tasks *it* started, and then raising
289 |     # it back to us.
290 |     for task in log_tasks:
291 |         try:
292 |             task.cancel()
293 |         except asyncio.CancelledError:
294 |             pass
295 | 
296 |     # allow opting out of namespace cleanup, for post-mortem debugging
297 |     if not os.environ.get("KUBESPAWNER_DEBUG_NAMESPACE"):
298 |         # Delete in parallel so that if one deletion fails we still clean up the others
299 |         ns_deletions = asyncio.gather(
300 |             *[_delete_namespace(client, ns) for ns in expected_namespaces]
301 |         )
302 |         await ns_deletions
303 | 
304 | 
305 | async def wait_for_pod(kube_client, kube_ns, pod_name, timeout=90):
306 |     """Wait for a pod to be ready"""
307 |     conditions = {}
308 |     for i in range(int(timeout)):
309 |         pod = await kube_client.read_namespaced_pod(namespace=kube_ns, name=pod_name)
310 |         for condition in pod.status.conditions or []:
311 |             conditions[condition.type] = condition.status
312 | 
313 |         if conditions.get("Ready") != "True":
314 |             print(
315 |                 f"Waiting for pod {kube_ns}/{pod_name}; current status: {pod.status.phase}; {conditions}"
316 |             )
317 |             await asyncio.sleep(1)
318 |         else:
319 |             break
320 | 
321 |     if conditions.get("Ready") != "True":
322 |         raise TimeoutError(f"pod {kube_ns}/{pod_name} failed to start: {pod.status}")
323 |     return pod
324 | 
325 | 
326 | async def ensure_not_exists(kube_client, kube_ns, name, resource_type, timeout=30):
327 |     """Ensure an object doesn't exist
328 | 
329 |     Request deletion and wait for it to be gone
330 |     """
331 |     delete = getattr(kube_client, f"delete_namespaced_{resource_type}")
332 |     read = getattr(kube_client, f"read_namespaced_{resource_type}")
333 |     try:
334 |         await delete(namespace=kube_ns, name=name)
335 |     except ApiException as e:
336 |         if e.status != 404:
337 |             raise
338 | 
339 |     while True:
340 |         # wait for delete
341 |         try:
342 |             await read(namespace=kube_ns, name=name)
343 |         except ApiException as e:
344 |             if e.status == 404:
345 |                 # deleted
346 |                 break
347 |             else:
348 |                 raise
349 |         else:
350 |             print(f"waiting for {resource_type}/{name} to delete")
351 |             await asyncio.sleep(1)
352 | 
353 | 
354 | async def create_resource(
355 |     kube_client, kube_ns, resource_type, manifest, delete_first=True
356 | ):
357 |     """Create a kubernetes resource
358 | 
359 |     handling 409 errors and others that can occur due to rapid startup
360 |     (typically: default service account doesn't exist yet
361 |     """
362 |     name = manifest.metadata["name"]
363 |     if delete_first:
364 |         await ensure_not_exists(kube_client, kube_ns, name, resource_type)
365 |     print(f"Creating {resource_type} {name}")
366 |     create = getattr(kube_client, f"create_namespaced_{resource_type}")
367 |     error = None
368 |     for i in range(10):
369 |         try:
370 |             await create(
371 |                 body=manifest,
372 |                 namespace=kube_ns,
373 |             )
374 |         except ApiException as e:
375 |             if e.status == 409:
376 |                 break
377 |             error = e
378 |             # need to retry since this can fail if run too soon after namespace creation
379 |             print(e, file=sys.stderr)
380 |             await asyncio.sleep(int(e.headers.get("Retry-After", 1)))
381 |         else:
382 |             break
383 |     else:
384 |         raise error
385 | 
386 | 
387 | async def create_hub_pod(kube_client, kube_ns, pod_name="hub", ssl=False):
388 |     config_map_name = pod_name + "-config"
389 |     secret_name = pod_name + "-secret"
390 |     with open(jupyterhub_config_py) as f:
391 |         config = f.read()
392 | 
393 |     config_map_manifest = V1ConfigMap(
394 |         metadata={"name": config_map_name}, data={"jupyterhub_config.py": config}
395 |     )
396 | 
397 |     config_map = await create_resource(
398 |         kube_client,
399 |         kube_ns,
400 |         "config_map",
401 |         config_map_manifest,
402 |         delete_first=True,
403 |     )
404 | 
405 |     volumes = [{"name": "config", "configMap": {"name": config_map_name}}]
406 |     volume_mounts = [
407 |         {
408 |             "mountPath": "/etc/jupyterhub/jupyterhub_config.py",
409 |             "subPath": "jupyterhub_config.py",
410 |             "name": "config",
411 |         }
412 |     ]
413 |     if ssl:
414 |         volumes.append({"name": "secret", "secret": {"secretName": secret_name}})
415 |         volume_mounts.append(
416 |             {
417 |                 "mountPath": "/etc/jupyterhub/secret",
418 |                 "name": "secret",
419 |             }
420 |         )
421 | 
422 |     pod_manifest = V1Pod(
423 |         metadata={
424 |             "name": pod_name,
425 |             "labels": {"component": "hub", "hub-name": pod_name},
426 |         },
427 |         spec=V1PodSpec(
428 |             volumes=volumes,
429 |             containers=[
430 |                 {
431 |                     "image": "quay.io/jupyterhub/jupyterhub:latest",
432 |                     "name": "hub",
433 |                     "volumeMounts": volume_mounts,
434 |                     "args": [
435 |                         "jupyterhub",
436 |                         "-f",
437 |                         "/etc/jupyterhub/jupyterhub_config.py",
438 |                     ],
439 |                     "env": [{"name": "PYTHONUNBUFFERED", "value": "1"}],
440 |                     "readinessProbe": {
441 |                         "tcpSocket": {
442 |                             "port": 8081,
443 |                         },
444 |                         "periodSeconds": 1,
445 |                     },
446 |                 }
447 |             ],
448 |         ),
449 |     )
450 |     pod = await create_resource(kube_client, kube_ns, "pod", pod_manifest)
451 |     return await wait_for_pod(kube_client, kube_ns, pod_name)
452 | 
453 | 
454 | @pytest_asyncio.fixture(scope="session")
455 | async def hub_pod(kube_client, kube_ns):
456 |     """Create and return a pod running jupyterhub"""
457 |     return await create_hub_pod(kube_client, kube_ns)
458 | 
459 | 
460 | @pytest.fixture
461 | def hub(hub_pod):
462 |     """Return the jupyterhub Hub object for passing to Spawner constructors
463 | 
464 |     Ensures the hub_pod is running
465 |     """
466 |     return Hub(ip=hub_pod.status.pod_ip, port=8081)
467 | 
468 | 
469 | @pytest_asyncio.fixture(scope="session")
470 | async def hub_pod_ssl(kube_client, kube_ns, ssl_app):
471 |     """Start a hub pod with internal_ssl enabled"""
472 |     # load ssl dir to tarfile
473 |     buf = io.BytesIO()
474 |     tf = tarfile.TarFile(fileobj=buf, mode="w")
475 |     tf.add(ssl_app.internal_certs_location, arcname="internal-ssl", recursive=True)
476 | 
477 |     # store tarfile in a secret
478 |     b64_certs = base64.b64encode(buf.getvalue()).decode("ascii")
479 |     secret_name = "hub-ssl-secret"
480 |     secret_manifest = V1Secret(
481 |         metadata={"name": secret_name}, data={"internal-ssl.tar": b64_certs}
482 |     )
483 |     await create_resource(kube_client, kube_ns, "secret", secret_manifest)
484 | 
485 |     name = "hub-ssl"
486 | 
487 |     service_manifest = V1Service(
488 |         metadata=dict(name=name),
489 |         spec=V1ServiceSpec(
490 |             type="ClusterIP",
491 |             ports=[V1ServicePort(port=8081, target_port=8081)],
492 |             selector={"hub-name": name},
493 |         ),
494 |     )
495 | 
496 |     await create_resource(kube_client, kube_ns, "service", service_manifest)
497 | 
498 |     return await create_hub_pod(
499 |         kube_client,
500 |         kube_ns,
501 |         pod_name=name,
502 |         ssl=True,
503 |     )
504 | 
505 | 
506 | @pytest.fixture
507 | def hub_ssl(kube_ns, hub_pod_ssl):
508 |     """Return the Hub object for connecting to a running hub pod with internal_ssl enabled"""
509 |     return Hub(
510 |         proto="https",
511 |         ip=f"{hub_pod_ssl.metadata.name}.{kube_ns}",
512 |         port=8081,
513 |         base_url="/hub/",
514 |     )
515 | 
516 | 
517 | class ExecError(Exception):
518 |     """Error raised when a kubectl exec fails"""
519 | 
520 |     def __init__(self, exit_code, message="", command="exec"):
521 |         self.exit_code = exit_code
522 |         self.message = message
523 |         self.command = command
524 | 
525 |     def __str__(self):
526 |         return "{command} exited with status {exit_code}: {message}".format(
527 |             command=self.command,
528 |             exit_code=self.exit_code,
529 |             message=self.message,
530 |         )
531 | 
532 | 
533 | async def _exec_python_in_pod(
534 |     kube_client, kube_ns, pod_name, code, kwargs=None, _retries=0
535 | ):
536 |     """Run simple Python code in a pod
537 | 
538 |     code can be a str of code, or a 'simple' Python function,
539 |     where source can be extracted (i.e. self-contained imports, etc.)
540 | 
541 |     kwargs are passed to the function, if it is given.
542 |     """
543 |     pod = await wait_for_pod(kube_client, kube_ns, pod_name)
544 |     original_code = code
545 |     if not isinstance(code, str):
546 |         # allow simple self-contained (no globals or args) functions
547 |         func = code
548 |         code = "\n".join(
549 |             [
550 |                 inspect.getsource(func),
551 |                 "_kw = %r" % (kwargs or {}),
552 |                 f"{func.__name__}(**_kw)",
553 |                 "",
554 |             ]
555 |         )
556 |     elif kwargs:
557 |         raise ValueError("kwargs can only be passed to functions, not code strings.")
558 | 
559 |     exec_command = [
560 |         "python3",
561 |         "-c",
562 |         code,
563 |     ]
564 |     print(f"Running {code} in {pod_name}")
565 |     # need to create ws client to get returncode,
566 |     # see https://github.com/kubernetes-client/python/issues/812
567 |     #
568 |     # That's why we are using the synchronous Kubernetes client here
569 |     # and why we imported them in the first place: kubernetes_asyncio
570 |     # does not yet support multichannel ws clients, which are needed
571 |     # to get the return code.
572 |     # cf https://github.com/tomplus/kubernetes_asyncio/issues/12
573 |     client = sync_stream(
574 |         sync_corev1api.connect_get_namespaced_pod_exec,
575 |         pod_name,
576 |         namespace=kube_ns,
577 |         command=exec_command,
578 |         stderr=True,
579 |         stdin=False,
580 |         stdout=True,
581 |         tty=False,
582 |         _preload_content=False,
583 |     )
584 |     client.run_forever(timeout=60)
585 | 
586 |     # let pytest capture stderr
587 |     stderr = client.read_stderr()
588 |     print(stderr, file=sys.stderr)
589 | 
590 |     returncode = client.returncode
591 |     if returncode:
592 |         print(client.read_stdout())
593 |         if _retries == 0:
594 |             raise ExecError(exit_code=returncode, message=stderr, command=code)
595 |         else:
596 |             # retry
597 |             await asyncio.sleep(1)
598 |             return await _exec_python_in_pod(
599 |                 kube_client,
600 |                 kube_ns,
601 |                 pod_name,
602 |                 code,
603 |                 _retries=_retries - 1,
604 |             )
605 |     else:
606 |         return client.read_stdout().rstrip()
607 | 
608 | 
609 | @pytest.fixture
610 | def exec_python_pod(kube_client, kube_ns):
611 |     """Fixture to return callable to execute python in a pod by name
612 | 
613 |     Used as a fixture to contain references to client, namespace
614 |     """
615 |     return partial(_exec_python_in_pod, kube_client, kube_ns)
616 | 
617 | 
618 | @pytest_asyncio.fixture(scope="session")
619 | async def exec_python(kube_client, kube_ns):
620 |     """Return a callable to execute Python code in a pod in the test namespace
621 | 
622 |     This fixture creates a dedicated pod for executing commands
623 |     """
624 | 
625 |     # note: this was created when there were only single-user pods running,
626 |     # but now there's always a hub pod where we could be running,
627 |     # and the ssl case *must* run from the hub pod for access to certs
628 |     # Note: we could do without this feature if we always ran
629 | 
630 |     pod_name = "kubespawner-test-exec"
631 |     pod_manifest = V1Pod(
632 |         metadata={"name": pod_name},
633 |         spec=V1PodSpec(
634 |             containers=[
635 |                 {
636 |                     "image": "python:3.12-slim",
637 |                     "name": "python",
638 |                     "args": ["/bin/sh", "-c", "while true; do sleep 5; done"],
639 |                 }
640 |             ],
641 |             termination_grace_period_seconds=0,
642 |         ),
643 |     )
644 |     pod = await create_resource(kube_client, kube_ns, "pod", pod_manifest)
645 | 
646 |     yield partial(_exec_python_in_pod, kube_client, kube_ns, pod_name)
647 | 
648 | 
649 | @pytest.fixture(scope="function")
650 | async def reset_pod_reflectors():
651 |     """
652 |     Resets the class state KubeSpawner.reflectors before and after the
653 |     test function executes. This enables us to start fresh if a test needs to
654 |     test configuration influencing the pod reflector options.
655 |     """
656 | 
657 |     await KubeSpawner._stop_all_reflectors()
658 |     yield
659 |     await KubeSpawner._stop_all_reflectors()
660 | 


--------------------------------------------------------------------------------
/tests/jupyterhub_config.py:
--------------------------------------------------------------------------------
 1 | """Minimal jupyterhub config for hub pod"""
 2 | 
 3 | import json
 4 | import os
 5 | import socket
 6 | import tarfile
 7 | 
 8 | c = get_config()  # noqa
 9 | 
10 | c.JupyterHub.hub_ip = "0.0.0.0"
11 | c.JupyterHub.hub_connect_ip = socket.gethostname()
12 | c.JupyterHub.log_level = "DEBUG"
13 | 
14 | import pprint
15 | 
16 | pprint.pprint(dict(os.environ))
17 | 
18 | print("before")
19 | for root, dirs, files in os.walk("/etc/jupyterhub"):
20 |     for name in files:
21 |         print(os.path.join(root, name))
22 |     for name in dirs:
23 |         print(os.path.join(root, name) + "/")
24 | 
25 | ssl_tar_file = "/etc/jupyterhub/secret/internal-ssl.tar"
26 | if os.path.exists(ssl_tar_file):
27 |     print("Enabling internal SSL")
28 |     c.JupyterHub.internal_ssl = True
29 |     ssl_dir = "/etc/jupyterhub/internal-ssl"
30 |     c.JupyterHub.internal_certs_location = ssl_dir
31 | 
32 |     with tarfile.open(ssl_tar_file) as tf:
33 |         tf.extractall(path="/etc/jupyterhub")
34 | 
35 |     for root, dirs, files in os.walk("/etc/jupyterhub"):
36 |         for name in files:
37 |             print(os.path.join(root, name))
38 |         for name in dirs:
39 |             print(os.path.join(root, name) + "/")
40 | 
41 |     # rewrite paths in certipy config not created here
42 |     certipy_config = os.path.join(c.JupyterHub.internal_certs_location, "certipy.json")
43 |     with open(certipy_config) as f:
44 |         cfg = json.load(f)
45 |     print("cfg before", cfg)
46 |     path = cfg["hub-internal"]["files"]["key"]
47 |     prefix_len = path.index("/hub-internal")
48 |     prefix = path[:prefix_len]
49 |     print(f"relocating certipy {prefix} -> {ssl_dir}")
50 |     for name, service in cfg.items():
51 |         for key in list(service["files"]):
52 |             path = service["files"][key]
53 |             if path.startswith(prefix):
54 |                 service["files"][key] = ssl_dir + path[prefix_len:]
55 |             # path = service["files"][key]
56 |             # print(name, key, path)
57 |             # if path:
58 |             #     new_abs_path = ssl_dir + path[path.index("/" + name):]
59 |             #     print(path, new_abs_path)
60 |             #     service["files"][key] = new_abs_path
61 | 
62 |     print(cfg)
63 | 
64 |     with open(certipy_config, "w") as f:
65 |         json.dump(cfg, f)
66 | 
67 |     # c.JupyterHub.trusted_alt_names = socket.gethostname()
68 | 
69 | c.JupyterHub.services = [
70 |     {"name": "test", "admin": True, "api_token": "test-secret-token"},
71 | ]
72 | 
73 | print("after")
74 | for root, dirs, files in os.walk("/etc/jupyterhub"):
75 |     for name in files:
76 |         print(os.path.join(root, name))
77 |     for name in dirs:
78 |         print(os.path.join(root, name) + "/")
79 | 


--------------------------------------------------------------------------------
/tests/test_clients.py:
--------------------------------------------------------------------------------
 1 | import asyncio
 2 | 
 3 | from conftest import cancel_tasks
 4 | 
 5 | from kubespawner.clients import load_config, shared_client
 6 | 
 7 | 
 8 | async def test_shared_client():
 9 |     load_config()
10 |     core = shared_client("CoreV1Api")
11 |     core2 = shared_client("CoreV1Api")
12 |     assert core2 is core
13 |     ext = shared_client("NetworkingV1Api")
14 |     ext2 = shared_client("NetworkingV1Api")
15 |     assert ext is ext2
16 |     assert ext is not core
17 | 
18 | 
19 | def test_shared_client_close():
20 |     load_config()
21 |     # this test must be sync so we can call asyncio.run
22 |     core = None
23 | 
24 |     async def test():
25 |         nonlocal core
26 |         core = shared_client("CoreV1Api")
27 | 
28 |     loop = asyncio.new_event_loop()
29 |     loop.run_until_complete(test())
30 |     loop.run_until_complete(cancel_tasks())
31 |     loop.close()
32 |     # asyncio.run(test())
33 |     assert core is not None
34 |     # no public API to check if it's closed
35 |     assert core.api_client._pool is None
36 | 


--------------------------------------------------------------------------------
/tests/test_profile.py:
--------------------------------------------------------------------------------
  1 | import pytest
  2 | 
  3 | from kubespawner import KubeSpawner
  4 | 
  5 | 
  6 | @pytest.mark.parametrize(
  7 |     "unfilled_profile_list,filled_profile_list",
  8 |     [
  9 |         (
 10 |             [
 11 |                 {
 12 |                     'display_name': 'Something without a slug',
 13 |                     'kubespawner_override': {},
 14 |                 },
 15 |                 {
 16 |                     'display_name': 'Something with a slug',
 17 |                     'slug': 'sluggity-slug',
 18 |                     'kubespawner_override': {},
 19 |                 },
 20 |             ],
 21 |             [
 22 |                 {
 23 |                     'display_name': 'Something without a slug',
 24 |                     'slug': 'something-without-a-slug',
 25 |                     'default': True,
 26 |                     'kubespawner_override': {},
 27 |                 },
 28 |                 {
 29 |                     'display_name': 'Something with a slug',
 30 |                     'slug': 'sluggity-slug',
 31 |                     'kubespawner_override': {},
 32 |                 },
 33 |             ],
 34 |         ),
 35 |         (
 36 |             [
 37 |                 {
 38 |                     'display_name': 'Something without choices',
 39 |                     'kubespawner_override': {},
 40 |                 },
 41 |                 {
 42 |                     'display_name': 'Something with choices',
 43 |                     'kubespawner_override': {},
 44 |                     'default': True,
 45 |                     'profile_options': {
 46 |                         'no-defaults': {
 47 |                             'display_name': 'Some choice without a default set',
 48 |                             'choices': {
 49 |                                 'option-1': {
 50 |                                     'display_name': 'Option 1',
 51 |                                     'kubespawner_override': {},
 52 |                                 },
 53 |                                 'option-2': {
 54 |                                     'display_name': 'Option 2',
 55 |                                     'kubespawner_override': {},
 56 |                                 },
 57 |                             },
 58 |                         },
 59 |                         'only-unlisted': {
 60 |                             'display_name': 'Some option without any choices set',
 61 |                             'unlisted_choice': {'enabled': True},
 62 |                         },
 63 |                         'explicit-defaults': {
 64 |                             'display_name': 'Some choice with a default set',
 65 |                             'choices': {
 66 |                                 'option-1': {
 67 |                                     'display_name': 'Option 1',
 68 |                                     'kubespawner_override': {},
 69 |                                 },
 70 |                                 'option-2': {
 71 |                                     'display_name': 'Option 2',
 72 |                                     'default': True,
 73 |                                     'kubespawner_override': {},
 74 |                                 },
 75 |                             },
 76 |                         },
 77 |                     },
 78 |                 },
 79 |             ],
 80 |             [
 81 |                 {
 82 |                     'display_name': 'Something without choices',
 83 |                     'slug': 'something-without-choices',
 84 |                     'kubespawner_override': {},
 85 |                 },
 86 |                 {
 87 |                     'display_name': 'Something with choices',
 88 |                     'slug': 'something-with-choices',
 89 |                     'default': True,
 90 |                     'kubespawner_override': {},
 91 |                     'profile_options': {
 92 |                         'no-defaults': {
 93 |                             'display_name': 'Some choice without a default set',
 94 |                             'unlisted_choice': {'enabled': False},
 95 |                             'choices': {
 96 |                                 'option-1': {
 97 |                                     'display_name': 'Option 1',
 98 |                                     'default': True,
 99 |                                     'kubespawner_override': {},
100 |                                 },
101 |                                 'option-2': {
102 |                                     'display_name': 'Option 2',
103 |                                     'kubespawner_override': {},
104 |                                 },
105 |                             },
106 |                         },
107 |                         'only-unlisted': {
108 |                             'display_name': 'Some option without any choices set',
109 |                             'unlisted_choice': {
110 |                                 'enabled': True,
111 |                                 'display_name_in_choices': 'Other...',
112 |                             },
113 |                         },
114 |                         'explicit-defaults': {
115 |                             'display_name': 'Some choice with a default set',
116 |                             'unlisted_choice': {'enabled': False},
117 |                             'choices': {
118 |                                 'option-1': {
119 |                                     'display_name': 'Option 1',
120 |                                     'kubespawner_override': {},
121 |                                 },
122 |                                 'option-2': {
123 |                                     'display_name': 'Option 2',
124 |                                     'default': True,
125 |                                     'kubespawner_override': {},
126 |                                 },
127 |                             },
128 |                         },
129 |                     },
130 |                 },
131 |             ],
132 |         ),
133 |         ([], []),
134 |     ],
135 | )
136 | async def test_profile_missing_defaults_populated(
137 |     unfilled_profile_list, filled_profile_list
138 | ):
139 |     """
140 |     Tests that missing profileList values are populated
141 |     """
142 |     spawner = KubeSpawner(_mock=True)
143 |     assert (
144 |         spawner._get_initialized_profile_list(unfilled_profile_list)
145 |         == filled_profile_list
146 |     )
147 | 
148 | 
149 | @pytest.mark.parametrize(
150 |     "profile_list,slug,selected_profile",
151 |     [
152 |         (
153 |             [
154 |                 {
155 |                     'display_name': 'profile 1',
156 |                     'kubespawner_override': {},
157 |                 },
158 |                 {
159 |                     'display_name': 'profile 2',
160 |                     'kubespawner_override': {},
161 |                 },
162 |             ],
163 |             'profile-2',
164 |             {
165 |                 'display_name': 'profile 2',
166 |                 'slug': 'profile-2',
167 |                 'kubespawner_override': {},
168 |             },
169 |         ),
170 |         (
171 |             [
172 |                 {
173 |                     'display_name': 'profile 1',
174 |                     'kubespawner_override': {},
175 |                 },
176 |                 {
177 |                     'display_name': 'profile 2',
178 |                     'default': True,
179 |                     'kubespawner_override': {},
180 |                 },
181 |             ],
182 |             None,
183 |             {
184 |                 'display_name': 'profile 2',
185 |                 'slug': 'profile-2',
186 |                 'default': True,
187 |                 'kubespawner_override': {},
188 |             },
189 |         ),
190 |         (
191 |             [
192 |                 {
193 |                     'display_name': 'profile 1',
194 |                     'kubespawner_override': {},
195 |                 },
196 |                 {
197 |                     'display_name': 'profile 2',
198 |                     'default': True,
199 |                     'kubespawner_override': {},
200 |                 },
201 |             ],
202 |             '',
203 |             {
204 |                 'display_name': 'profile 2',
205 |                 'slug': 'profile-2',
206 |                 'default': True,
207 |                 'kubespawner_override': {},
208 |             },
209 |         ),
210 |     ],
211 | )
212 | async def test_find_slug(profile_list, slug, selected_profile):
213 |     """
214 |     Test that we can find the profile we expect given slugs
215 |     """
216 |     spawner = KubeSpawner(_mock=True)
217 |     profile_list = spawner._get_initialized_profile_list(profile_list)
218 |     assert spawner._get_profile(slug, profile_list) == selected_profile
219 | 
220 | 
221 | async def test_find_slug_exception():
222 |     """
223 |     Test that looking for a slug that doesn't exist gives us an exception
224 |     """
225 |     spawner = KubeSpawner(_mock=True)
226 |     profile_list = [
227 |         {
228 |             'display_name': 'profile 1',
229 |             'kubespawner_override': {},
230 |         },
231 |         {
232 |             'display_name': 'profile 2',
233 |             'kubespawner_override': {},
234 |         },
235 |     ]
236 |     profile_list = spawner._get_initialized_profile_list(profile_list)
237 |     with pytest.raises(ValueError):
238 |         spawner._get_profile('does-not-exist', profile_list)
239 | 
240 | 
241 | async def test_unlisted_choice_non_string_override():
242 |     profiles = [
243 |         {
244 |             'display_name': 'CPU only',
245 |             'slug': 'cpu',
246 |             'profile_options': {
247 |                 'image': {
248 |                     'display_name': 'Image',
249 |                     'unlisted_choice': {
250 |                         'enabled': True,
251 |                         'display_name': 'Image Location',
252 |                         'validation_regex': '^pangeo/.*$',
253 |                         'validation_message': 'Must be a pangeo image, matching ^pangeo/.*$',
254 |                         'kubespawner_override': {
255 |                             'image': '{value}',
256 |                             'environment': {
257 |                                 'CUSTOM_IMAGE_USED': 'yes',
258 |                                 'CUSTOM_IMAGE': '{value}',
259 |                                 # This should just be passed through, as JUPYTER_USER is not replaced
260 |                                 'USER': '${JUPYTER_USER}',
261 |                                 # This should render as ${JUPYTER_USER}, as the {{ and }} escape them.
262 |                                 # this matches existing behavior for other replacements elsewhere
263 |                                 'USER_TEST': '${{JUPYTER_USER}}',
264 |                             },
265 |                             "init_containers": [
266 |                                 {
267 |                                     "name": "testing",
268 |                                     "image": "{value}",
269 |                                     "securityContext": {"runAsUser": 1000},
270 |                                 }
271 |                             ],
272 |                         },
273 |                     },
274 |                 }
275 |             },
276 |         },
277 |     ]
278 |     spawner = KubeSpawner(_mock=True)
279 |     spawner.profile_list = profiles
280 | 
281 |     image = "pangeo/pangeo-notebook:latest"
282 |     # Set user option for image directly
283 |     spawner.user_options = {"profile": "cpu", "image--unlisted-choice": image}
284 | 
285 |     # this shouldn't error
286 |     await spawner.load_user_options()
287 | 
288 |     assert spawner.image == image
289 |     assert spawner.environment == {
290 |         'CUSTOM_IMAGE_USED': 'yes',
291 |         'CUSTOM_IMAGE': image,
292 |         'USER': '${JUPYTER_USER}',
293 |         'USER_TEST': '${JUPYTER_USER}',
294 |     }
295 |     assert spawner.init_containers == [
296 |         {"name": "testing", "image": image, 'securityContext': {'runAsUser': 1000}}
297 |     ]
298 | 
299 | 
300 | async def test_empty_user_options_and_profile_options_api():
301 |     profiles = [
302 |         {
303 |             'display_name': 'CPU only',
304 |             'profile_options': {
305 |                 'image': {
306 |                     'display_name': 'Image',
307 |                     'unlisted_choice': {
308 |                         'enabled': True,
309 |                         'display_name': 'Image Location',
310 |                         'validation_regex': '^pangeo/.*$',
311 |                         'validation_message': 'Must be a pangeo image, matching ^pangeo/.*$',
312 |                         'kubespawner_override': {'image': '{value}'},
313 |                     },
314 |                     "choices": {
315 |                         'op-1': {
316 |                             'display_name': 'Option 1',
317 |                             'kubespawner_override': {
318 |                                 'image': 'pangeo/pangeo-notebook:ebeb9dd'
319 |                             },
320 |                         },
321 |                         'op-2': {
322 |                             'display_name': 'Option 2',
323 |                             'kubespawner_override': {
324 |                                 'image': 'pangeo/pangeo-notebook:latest'
325 |                             },
326 |                         },
327 |                     },
328 |                 }
329 |             },
330 |         },
331 |     ]
332 |     spawner = KubeSpawner(_mock=True)
333 |     spawner.profile_list = profiles
334 |     # set user_options directly (e.g. via api)
335 |     spawner.user_options = {}
336 | 
337 |     # nothing should be loaded yet
338 |     assert spawner.cpu_limit is None
339 | 
340 |     # this shouldn't error
341 |     await spawner.load_user_options()
342 | 
343 |     # implicit defaults should be used
344 |     assert spawner.image == "pangeo/pangeo-notebook:ebeb9dd"
345 | 
346 | 
347 | @pytest.mark.parametrize(
348 |     "profile_list, formdata",
349 |     [
350 |         (
351 |             [
352 |                 {
353 |                     "display_name": "short",
354 |                     "slug": "short",
355 |                     "profile_options": {
356 |                         "relevant": {
357 |                             "choices": {
358 |                                 "choice-a": {
359 |                                     "kubespawner_override": {},
360 |                                 },
361 |                             },
362 |                         },
363 |                     },
364 |                 },
365 |                 {
366 |                     "display_name": "short-plus",
367 |                     "slug": "short-plus",
368 |                     "profile_options": {
369 |                         "irrelevant": {
370 |                             "choices": {
371 |                                 "choice-b": {
372 |                                     "kubespawner_override": {},
373 |                                 },
374 |                             },
375 |                         },
376 |                     },
377 |                 },
378 |             ],
379 |             # What is below is hardcoded based on what is above and based on
380 |             # how the HTML form looks currently. If that changes, whats below needs
381 |             # to change as well.
382 |             {
383 |                 'profile': ['short'],
384 |                 'profile-option-short--relevant': ['choice-a'],
385 |                 'profile-option-short-plus--irrelevant': ['choice-b'],
386 |             },
387 |         ),
388 |     ],
389 | )
390 | async def test_profile_slug_and_option_slug_mixup(profile_list, formdata):
391 |     """
392 |     If we have a profile list with two entries, their respective profile_options
393 |     should not be mixed up with each other. This has happened when one profile
394 |     list entry was named like another but shorter.
395 |     """
396 |     spawner = KubeSpawner(_mock=True)
397 |     spawner.profile_list = profile_list
398 | 
399 |     user_options = spawner.options_from_form(formdata)
400 |     assert user_options.get("profile") == "short"
401 |     assert user_options.get("relevant") == "choice-a"
402 |     assert not user_options.get("plus-irrelevant")
403 | 


--------------------------------------------------------------------------------
/tests/test_slugs.py:
--------------------------------------------------------------------------------
 1 | import pytest
 2 | 
 3 | from kubespawner.slugs import is_valid_label, safe_slug
 4 | 
 5 | 
 6 | @pytest.mark.parametrize(
 7 |     "name, expected",
 8 |     [
 9 |         ("jupyter-alex", "jupyter-alex"),
10 |         ("jupyter-Alex", "jupyter-alex---3a1c285c"),
11 |         ("jupyter-üni", "jupyter-ni---a5aaf5dd"),
12 |         ("endswith-", "endswith---165f1166"),
13 |         ("user@email.com", "user-email-com---0925f997"),
14 |         ("user-_@_emailß.com", "user-email-com---7e3a7efd"),
15 |         ("has.dot", "has-dot---03e27fdf"),
16 |         ("z9", "z9"),
17 |         ("9z9", "x-9z9---224de202"),
18 |         ("-start", "start---f587e2dc"),
19 |         ("üser", "ser---73506260"),
20 |         ("username--servername", "username-servername---d957f1de"),
21 |         ("start---f587e2dc", "start-f587e2dc---cc5bb9c9"),
22 |         pytest.param("x" * 63, "x" * 63, id="x63"),
23 |         pytest.param("x" * 64, "xxxxxxxxxxxxxxxxxxxxx---7ce10097", id="x64"),
24 |         pytest.param("x" * 65, "xxxxxxxxxxxxxxxxxxxxx---9537c5fd", id="x65"),
25 |         ("", "x---e3b0c442"),
26 |     ],
27 | )
28 | def test_safe_slug(name, expected):
29 |     slug = safe_slug(name)
30 |     assert slug == expected
31 | 
32 | 
33 | @pytest.mark.parametrize(
34 |     "max_length, length, expected",
35 |     [
36 |         (16, 16, "x" * 16),
37 |         (16, 17, "xxxxx---d04fd59f"),
38 |         (11, 16, "error"),
39 |         (12, 16, "x---9c572959"),
40 |     ],
41 | )
42 | def test_safe_slug_max_length(max_length, length, expected):
43 |     name = "x" * length
44 |     if expected == "error":
45 |         with pytest.raises(ValueError):
46 |             safe_slug(name, max_length=max_length)
47 |         return
48 | 
49 |     slug = safe_slug(name, max_length=max_length)
50 |     assert slug == expected
51 | 
52 | 
53 | @pytest.mark.parametrize(
54 |     "name, expected",
55 |     [
56 |         ("", ""),
57 |         ("x", "x"),
58 |         ("a-b", "a-b"),
59 |         ("9a", "9a"),
60 |         ("9.", "x-9---99a1b84b"),
61 |         ("AbC", "AbC"),
62 |         ("AbC.", "abc---dbe8c5d1"),
63 |         ("ab.c", "ab.c"),
64 |         ("a@b.c", "a-b-c---d648b243"),
65 |         ("-x", "x---a4209624"),
66 |         ("x-", "x---c8b60efc"),
67 |         pytest.param("x" * 63, "x" * 63, id="x63"),
68 |         pytest.param("x" * 64, "xxxxxxxxxxxxxxxxxxxxx---7ce10097", id="x64"),
69 |         pytest.param("x" * 65, "xxxxxxxxxxxxxxxxxxxxx---9537c5fd", id="x65"),
70 |     ],
71 | )
72 | def test_safe_slug_label(name, expected):
73 |     slug = safe_slug(name, is_valid=is_valid_label)
74 |     assert slug == expected
75 | 


--------------------------------------------------------------------------------
/tests/test_utils.py:
--------------------------------------------------------------------------------
  1 | import copy
  2 | 
  3 | import pytest
  4 | from conftest import ExecError
  5 | from kubernetes_asyncio.client.models import (
  6 |     V1Capabilities,
  7 |     V1Container,
  8 |     V1Lifecycle,
  9 |     V1PodSpec,
 10 |     V1SecurityContext,
 11 | )
 12 | 
 13 | from kubespawner.utils import _get_k8s_model_attribute, get_k8s_model, update_k8s_model
 14 | 
 15 | 
 16 | class MockLogger:
 17 |     """Trivial class to store logs for inspection after a test run."""
 18 | 
 19 |     def __init__(self):
 20 |         self.info_logs = []
 21 | 
 22 |     def info(self, message):
 23 |         self.info_logs.append(message)
 24 | 
 25 | 
 26 | def print_hello():
 27 |     print("hello!")
 28 | 
 29 | 
 30 | def exec_error():
 31 |     1 / 0
 32 | 
 33 | 
 34 | async def test_exec(exec_python):
 35 |     """Test the exec fixture itself"""
 36 |     r = await exec_python(print_hello)
 37 |     print("result: %r" % r)
 38 | 
 39 | 
 40 | async def test_exec_error(exec_python):
 41 |     """Test the exec fixture error handling"""
 42 |     with pytest.raises(ExecError):
 43 |         await exec_python(exec_error)
 44 | 
 45 | 
 46 | def test__get_k8s_model_attribute():
 47 |     """Verifies fundamental behavior"""
 48 |     assert _get_k8s_model_attribute(V1PodSpec, "service_account") == "service_account"
 49 |     assert _get_k8s_model_attribute(V1PodSpec, "serviceAccount") == "service_account"
 50 | 
 51 | 
 52 | def test_update_k8s_model():
 53 |     """Ensure update_k8s_model does what it should. The test is first updating
 54 |     attributes using the function and then and manually verifies that the
 55 |     correct changes have been made."""
 56 |     manually_updated_target = V1Container(
 57 |         name="mock_name",
 58 |         image="mock_image",
 59 |         command=['iptables'],
 60 |         security_context=V1SecurityContext(
 61 |             privileged=True,
 62 |             run_as_user=0,
 63 |             capabilities=V1Capabilities(add=['NET_ADMIN']),
 64 |         ),
 65 |     )
 66 |     target = copy.deepcopy(manually_updated_target)
 67 |     source = {"name": "new_mock_name"}
 68 |     update_k8s_model(target, source)
 69 | 
 70 |     manually_updated_target.name = "new_mock_name"
 71 | 
 72 |     assert target == manually_updated_target
 73 | 
 74 | 
 75 | def test_update_k8s_models_logger_message():
 76 |     """Ensure that the update_k8s_model function uses the logger to warn about
 77 |     overwriting previous values."""
 78 |     target = V1Container(name="mock_name")
 79 |     source = {"name": "new_mock_name", "image_pull_policy": "Always"}
 80 |     mock_logger = MockLogger()
 81 |     update_k8s_model(
 82 |         target,
 83 |         source,
 84 |         logger=mock_logger,
 85 |         target_name="notebook_container",
 86 |         changes_name="extra_container_config",
 87 |     )
 88 | 
 89 |     assert (
 90 |         mock_logger.info_logs[-1].find(
 91 |             "'notebook_container.name' current value: 'mock_name' is overridden with 'new_mock_name', which is the value of 'extra_container_config.name'"
 92 |         )
 93 |         != -1
 94 |     )
 95 | 
 96 | 
 97 | def test_get_k8s_model():
 98 |     """Thest that passing either a kubernetes.client.models object or as a
 99 |     dictionary to representing it get_k8s_model should work."""
100 |     # verify get_k8s_model for when passing dict objects
101 |     v1_lifecycle_from_dict = get_k8s_model(
102 |         V1Lifecycle,
103 |         {'preStop': {'exec': {'command': ['/bin/sh', 'test']}}},
104 |     )
105 | 
106 |     assert isinstance(v1_lifecycle_from_dict, V1Lifecycle)
107 |     assert v1_lifecycle_from_dict.to_dict() == {
108 |         'post_start': None,
109 |         'pre_stop': {'exec': {'command': ['/bin/sh', 'test']}},
110 |     }
111 | 
112 |     # verify get_k8s_model for when passing model objects
113 |     v1_lifecycle_from_model_object = get_k8s_model(V1Lifecycle, v1_lifecycle_from_dict)
114 | 
115 |     assert isinstance(v1_lifecycle_from_model_object, V1Lifecycle)
116 |     assert v1_lifecycle_from_model_object.to_dict() == {
117 |         'post_start': None,
118 |         'pre_stop': {'exec': {'command': ['/bin/sh', 'test']}},
119 |     }
120 | 


--------------------------------------------------------------------------------