├── .github └── workflows │ ├── release.yml │ └── test.yml ├── .gitignore ├── .travis.yml ├── LICENSE ├── README.md ├── envkernel.py ├── requirements-dev.txt ├── requirements.txt ├── setup.py ├── test-data └── env │ └── bin │ └── .gitkeep └── test_envkernel.py /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: pypi-publish 2 | on: 3 | release: 4 | types: [created] 5 | 6 | jobs: 7 | release: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/checkout@v2 11 | # https://github.com/actions/setup-python 12 | - name: Set up Python 13 | uses: actions/setup-python@v1 14 | with: 15 | python-version: '3.x' 16 | - name: Install dependencies 17 | run: | 18 | pip install -r requirements.txt -r requirements-dev.txt 19 | pip install . 20 | pip install setuptools wheel twine 21 | - uses: rkdarst/action-verify-python-version@main 22 | - name: Build 23 | run: | 24 | python setup.py sdist bdist_wheel 25 | - name: Publish a Python distribution to PyPI 26 | uses: pypa/gh-action-pypi-publish@release/v1 27 | with: 28 | user: __token__ 29 | password: ${{ secrets.PYPI_API_TOKEN_RKDARST }} 30 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | # This workflow will install Python dependencies, run tests and lint with a single version of Python 2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions 3 | 4 | name: Test 5 | 6 | on: 7 | push: 8 | branches: [ master ] 9 | pull_request: 10 | branches: [ master ] 11 | 12 | jobs: 13 | build: 14 | 15 | runs-on: ubuntu-latest 16 | 17 | steps: 18 | - uses: actions/checkout@v2 19 | - name: Set up Python 3.10 20 | uses: actions/setup-python@v2 21 | with: 22 | python-version: "3.10" 23 | - name: Install dependencies 24 | run: | 25 | python -m pip install --upgrade pip 26 | pip install flake8 pytest 27 | pip install -r requirements.txt 28 | pip install -r requirements-dev.txt 29 | - name: Lint with flake8 30 | run: | 31 | # stop the build if there are Python syntax errors or undefined names 32 | flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics 33 | # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide 34 | flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics 35 | - name: Test with pytest 36 | run: | 37 | pytest 38 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .ipynb_checkpoints/ 2 | /.nbgrader.log 3 | Untitled*.ipynb 4 | /build 5 | /dist/ 6 | /envkernel.egg-info/ 7 | __pycache__/ 8 | .cache 9 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | python: 3 | # - "2.7" # [..., *list, ...] is needed to support <= 3.4. 4 | # - "3.3" 5 | # - "3.4" 6 | - "3.5" 7 | - "3.6" 8 | - "3.7" 9 | install: 10 | - pip install -r requirements.txt -r requirements-dev.txt 11 | script: 12 | - pytest . -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2019, Aalto Science IT and other contributors 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | 1. Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | 2. Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | 3. Neither the name of the copyright holder nor the names of its 17 | contributors may be used to endorse or promote products derived from 18 | this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Switch environments before running Jupyter kernels 2 | 3 | Sometimes, one needs to execute Jupyter kernels in a different 4 | environment. Say you want to execute the kernel in a conda 5 | environment (that's easy, but actually misses setting certain 6 | environment variables). Or run it inside a Docker container. One 7 | could manually adjust the kernelspec files to set environment 8 | variables or run commands before starting the kernel, but envkernel 9 | automates this process. 10 | 11 | envkernel is equally usable for end users (on their own systems or 12 | clusters) to easily access environments in Jupyter, or sysadmins 13 | deploying this access on systems they administer. 14 | 15 | In general, there are two passes: First, install the kernel, e.g.: 16 | `envkernel virtualenv --name=my-venv /path/to/venv`. This parses some 17 | options and writes a kernelspec file with the the `--name` you 18 | specify. When Jupyter tries to start this kernel, it will execute the 19 | next phase. When Jupyter tries to run the kernel, the kernelspec file 20 | will re-execute `envkernel` in the run mode, which does whatever is 21 | needed to set up the environment (in this case, sets `PATH` to the 22 | `/path/to/venv/bin/` that is needed). Then it starts the normal 23 | IPython kernel. 24 | 25 | Available modes: 26 | * `conda`: Activate a [conda environment](https://docs.conda.io/) first. 27 | * `virtualenv`: Activate a virtualenv first. 28 | * `docker`: Run the kernel in a Docker container. 29 | * `singularity`: Run the kernel in a [singularity container](https://www.sylabs.io/docs/). 30 | * `Lmod`: Activate [Lmod](https://lmod.readthedocs.io/) modules first. 31 | 32 | 33 | 34 | 35 | 36 | ## Installation 37 | 38 | Available on the PiPI: `pip install envkernel`. 39 | 40 | Or, you can install latest from Github in the usual way: `pip install https://github.com/NordicHPC/envkernel/archive/master.zip` 41 | 42 | This is a single-file script and can be copied directly and added to 43 | `PATH` as well. By design, there are no dependencies except the basic 44 | Jupyter client (not notebook or any UI), and that is only needed at 45 | kernel-setup time, not at kernel-runtime. The script must be 46 | available both when a kernel is set up, and 47 | each time the kernel is started (and currently assumes they are in the 48 | same location). 49 | 50 | 51 | 52 | 53 | 54 | ## General usage and common arguments 55 | 56 | General invocation: 57 | 58 | ```shell 59 | envkernel [mode] [envkernel options] [mode-specific-options] 60 | ``` 61 | 62 | 63 | General arguments usable by *all* classes during the setup phase: 64 | 65 | These options directly map to normal Jupyter kernel install options: 66 | 67 | * `mode`: `singularity`, `docker`, `lmod`, or whatever mode is desired. 68 | * `--name $name`: Name of kernel to install (**required**). 69 | * `--user`: Install kernel into user directory. 70 | * `--sys-prefix`: Install to the current Python's `sys.prefix` (the Python which is running envkernel). 71 | * `--prefix`: same as normal kernel install option. 72 | * `--display-name NAME`: Human-readable name. 73 | * `--replace`: Replace existing kernel (Jupyter option, unsure what this means). 74 | * `--language`: What language to tag this kernel (default `python`). 75 | 76 | These are envkernel-specific options: 77 | 78 | * `--verbose`, `-v`: Print more debugging information when installing 79 | the kernel. It is always in verbose mode when actually running the 80 | kernel. 81 | * `--python`: Python interpreter to use when invoking inside the 82 | environment. (Default `python`. Unlike other kernels, this defaults 83 | to a relative path because the point of envkernel is to set up PATH 84 | properly.) If this is the special value `SELF`, this will be replaced 85 | with the value of `sys.executable` of the Python running envkernel. 86 | * `--kernel=NAME`: Auto-set `--language` and `--kernel-cmd` to 87 | that needed for these well-known kernels. Options include 88 | `ipykernel` (the default), `ir`, or `imatlab`. But all of these 89 | hard-code a kernel command line and could possibly be wrong some 90 | day. 91 | * `--kernel-cmd`: a string which is the kernel to start - space 92 | separated, no shell quoting, it will be split when saving. The 93 | default is `python -m ipykernel_launcher -f {connection_file}`, 94 | which is suitable for IPython. For example, to start an R kernel in 95 | the environment use `R --slave -e IRkernel::main() --args 96 | {connection_file}` as the value to this, being careful with quoting 97 | the spaces only once. To find what the strings should be, copy form 98 | some existing kernels. `--kernel=NAME` includes shortcut for some 99 | popular kernels. 100 | * `--kernel-template`: An already-installed kernel name which is used 101 | as a template for the new envkernel. This is searched using the 102 | normal Jupyter search paths. This kernel json file is loaded and 103 | used as a template for all kernel options (`--language`, 104 | `--kernel-cmd`, etc). Also, any other file in this directory (such 105 | as logos) are copied to the new kernel (like kernel.js in irkernel). 106 | * `--kernel-make-path-relative` removes an absolute path from the 107 | kernel command (mainly useful with `--kernel-template`). This would 108 | be useful, for example, where you are setting up an lmod install and 109 | the absolute path of the module might change, but you want it to 110 | always run Python relative to that module anyway. 111 | * `--env=NAME=VALUE`. Set these environment variables when running 112 | the kernel. These are actually just saved in the `kernel.json` file 113 | under the `env` key, which is used by Jupyter itself. So, this is 114 | just a shorthand for adding variables there, it is not used at the 115 | envkernel stage at all. 116 | 117 | Order of precedence of options (later in the list overrides earlier): 118 | `--kernel-template`, `--kernel`, `--kernel-cmd`, `--language`, 119 | `--python`, `--display-name`. 120 | 121 | 122 | 123 | 124 | ## Conda 125 | 126 | The Conda envkernel will activate Conda environments (set the `PATH`, 127 | `CPATH`, `LD_LIBRARY_PATH`, and `LIBRARY_PATH` environment variables). 128 | This is done manually, if anyone knows a better way to do this, please 129 | inform us. 130 | 131 | ### Conda example 132 | 133 | This will load the `anaconda` environment before invoking an IPython 134 | kernel using the name `python`, which will presumably be the one 135 | inside the `anaconda3` environment. 136 | 137 | ```shell 138 | envkernel conda --name=conda-anaconda3 /path/to/anaconda3 139 | ``` 140 | 141 | ### Conda mode arguments 142 | 143 | General invocation: 144 | 145 | ```shell 146 | envkernel conda --name=NAME [envkernel options] conda-env-full-path 147 | ``` 148 | 149 | * `conda-env-full-path`: Full path to the conda environment to load. 150 | 151 | 152 | 153 | 154 | 155 | ## Virtualenv 156 | 157 | This operates identically to `conda` mode, but with name `virtualenv` 158 | on virtualenvs. 159 | 160 | ### Virtualenv example 161 | 162 | ```shell 163 | envkernel virtualenv --name=conda-anaconda3 /path/to/anaconda3 164 | ``` 165 | 166 | 167 | 168 | 169 | 170 | ## Docker 171 | 172 | Docker is a containerization system that runs as a system service. 173 | 174 | Note: docker has not been fully tested, but has been reported to work. 175 | 176 | 177 | ### Docker example 178 | 179 | ```shell 180 | envkernel docker --name=NAME --pwd --bind /m/jh/coursedata/:/coursedata /path/to/image.simg 181 | ``` 182 | 183 | ### Docker mode arguments 184 | 185 | General invocation: 186 | 187 | ```shell 188 | envkernel docker --name=NAME [envkernel options] [docker options] [image] 189 | ``` 190 | 191 | * `image`: Required positional argument: name of docker image to run. 192 | 193 | * `--pwd`: Bind-mount the current working directory and use it as the 194 | current working directory inside the notebook. This is usually 195 | useful. 196 | 197 | * A few more yet-undocumented and untested arguments... 198 | 199 | Any unknown argument is passed directly to the `docker run` call, and 200 | thus can be any normal Docker argument. If `,copy` is included in the 201 | `--mount` command options, the directory will be copied before 202 | mounting. This may be useful if the directory is on a network mount 203 | which the root docker can't access. It is recommended to always use 204 | the form of options with `=`, such as `--option=X`, rather than 205 | separating them with a space, to avoid problems with argument/option 206 | detection. 207 | 208 | 209 | 210 | 211 | 212 | ## Singularity 213 | 214 | [Singularity](https://www.sylabs.io/docs/) is a containerization 215 | system somewhat similar to Docker, but designed for user-mode usage 216 | without root, and with a mindset of using user software instead of 217 | system services. 218 | 219 | 220 | ### Singularity example 221 | 222 | ```shell 223 | envkernel singularity --name=NAME --contain --bind /m/jh/coursedata/:/coursedata /path/to/image.simg 224 | ``` 225 | 226 | ### Singularity mode arguments 227 | 228 | General invocation: 229 | 230 | ```shell 231 | envkernel singularity --name=NAME [envkernel options] [singularity options] [image] 232 | ``` 233 | 234 | * `image`: Required positional argument: name of singularity image to 235 | run. 236 | 237 | * `--pwd`: Bind-mount the current working directory and use it as the 238 | current working directory inside the notebook. This may happen by 239 | default if you don't `--contain`. 240 | 241 | Any unknown argument is passed directly to the `singularity exec` 242 | call, and thus can be any normal Singularity arguments. It is 243 | recommended to always use the form of options with `=`, such as 244 | `--bind=X`, rather than separating them with a space, to avoid 245 | problems with argument/option detection. The most useful Singularity 246 | options are (nothing envkernel specific here): 247 | 248 | * `--contain` or `-c`: Don't share any filesystems by default. 249 | 250 | * `--bind src:dest[:ro]`: Bind mount `src` from the host to `dest` in 251 | the container. `:ro` is optional, and defaults to `rw`. 252 | 253 | * `--cleanenv`: Clean all environment before executing. 254 | 255 | * `--net` or `-n`: Run in new network namespace. This does **NOT** 256 | work with Jupyter kernels, because localhost must currently be 257 | shared. So don't use this unless we create proper net gateway. 258 | 259 | 260 | 261 | 262 | 263 | ## Lmod 264 | 265 | The Lmod envkernel will load/unload 266 | [Lmod](https://lmod.readthedocs.io/) modules before running a normal 267 | IPython kernel. 268 | 269 | Using envkernel is better than the naive (but functional) method of 270 | modifying a kernel to invoke a particular Python binary, because that 271 | will invoke the right Python interpreter but not set relevant other 272 | environment variables (so, for example, subprocesses won't be in the 273 | right environment). 274 | 275 | ### Lmod example 276 | 277 | This will run `module purge` and then `module load anaconda3` before 278 | invoking an IPython kernel using the name `python`, which will 279 | presumably be the one inside the `anaconda3` environment. 280 | 281 | ```shell 282 | envkernel lmod --name=anaconda3 --purge anaconda3 283 | ``` 284 | 285 | ### Lmod mode arguments 286 | 287 | General invocation: 288 | 289 | ```shell 290 | envkernel lmod --name=NAME [envkernel options] [module ...] 291 | ``` 292 | 293 | * `module ...`: Modules to load (positional argument). Note that if 294 | the module is prefixed with `-`, it is actually unloaded (this is a 295 | Lmod feature). 296 | 297 | * `--purge`: Purge all modules before loading the new modules. This 298 | can be safer, because sometimes users may automatically load modules 299 | from their `.bashrc` which will cause failures if you try to load 300 | conflicting ones. 301 | 302 | 303 | 304 | 305 | 306 | ## Other kernels 307 | 308 | Envkernel isn't specific to the IPython kernel. It defaults to 309 | ipykernel, but by using the `--kernel-template` option you can make it 310 | work with any other kernel without having to understand the internals. 311 | First, you install your other kernel normally, with some name (in this 312 | case, `R-3.6.1`). Then, you run envkernel with 313 | `--kernel-template=R-3.6.1`, which clones that (with all its support 314 | files from the kernel directory, argv, and so on), and (in this case) 315 | saves it to the same name with the `--name=R-3.6.1` option. 316 | 317 | ```shell 318 | # Load modules and install the IRKernel normally, without envkernel 319 | module load r-irkernel/1.1-python3 320 | module load jupyterhub/live 321 | Rscript -e "library(IRkernel); IRkernel::installspec(name='R-3.6.1', displayname='R 3.6 module')" 322 | 323 | # Use envkernel --kernel-template 324 | # - Do the normal Lmod envkernel setup 325 | # - copy the existing kernel, incuding argv, kernel.js, icon, and display name 326 | # - Save it again, to the same name, with envkernel wrapper. 327 | envkernel lmod --user --kernel-template=R-3.6.1 --name=R-3.6.1 r-irkernel/1.1-python3 328 | ``` 329 | 330 | This way, you can wrap any arbitrary kernel to run under envkernel. 331 | Also, you can always use `--kernel-cmd` to explicitly set your kernel 332 | command to whatever is needed for any other kernel (but you have to 333 | figure out that command yourself...). 334 | 335 | 336 | 337 | 338 | 339 | ## How it works 340 | 341 | When envkernel first runs, it sets up a kernelspec that will re-invoke 342 | envkernel when it runs. Some options are when firs run (kernelspec 343 | name and options), while usually most are passed through straight to 344 | the kernelspec. When the kernel is started, envkernel is re-invoked 345 | 346 | Example envkernel setup command. This makes a new Jupyter kernel 347 | (`envkernel singularity` means singularity create mode) named 348 | `testcourse-0.5.9` out of the image `/l/simg/0.5.9.simg` with the 349 | Singularity options `--contain` (contain, on default mounts) and 350 | `--bind` (bind a dir).` 351 | 352 | ```shell 353 | envkernel singularity --sys-prefix --name=testcourse-0.5.9 /l/simg/0.5.9.simg --contain --bind /m/jh/coursedata/:/coursedata 354 | ``` 355 | 356 | That will create this kernelspec. Note that most of the arguments are passed through: 357 | 358 | ```json 359 | { 360 | "argv": [ 361 | "/opt/conda-nbserver-0.5.9/bin/envkernel", 362 | "singularity", 363 | "run", 364 | "--connection-file", 365 | "{connection_file}", 366 | "--contain", 367 | "--bind", 368 | "/m/jh/coursedata/:/coursedata", 369 | "/l/simg/0.5.9.simg", 370 | "--", 371 | "python", 372 | "-m", 373 | "ipykernel_launcher", 374 | "-f", 375 | "{connection_file}" 376 | ], 377 | "display_name": "Singularity with /l/simg/0.5.9.simg", 378 | "language": "python" 379 | } 380 | ``` 381 | 382 | When this runs, it runs `singularity --contain --bind 383 | /m/jh/coursedata/:/coursedata /l/simg/0.5.9.simg`. Inside the image, 384 | it runs `python -m ipykernel_launcher -f {connection_file}`. 385 | envkernel parses and manipulates these arguments however is needed. 386 | 387 | 388 | 389 | 390 | 391 | ## Running multiple modes 392 | 393 | envkernel doesn't support running multiple modes - for example, 394 | `conda` and `lmod` at the same time. But, because of the general 395 | nature, you should be able to layer it yourself. The following 396 | example uses the `conda` mode to create an envkernel. Then, it uses 397 | `--kernel-template` to re-read that kernel and wrap it in `lmod`: 398 | 399 | ``` 400 | envkernel conda --name=test1 conda_path 401 | envkernel lmod --name=test1 --kernel-template=test1 lmod_module 402 | ``` 403 | 404 | There is nothing really special here, it is layering one envkernel 405 | execution on top of another. If you notice problems with this, please 406 | try to debug a bit and then send feedback/improvements, this is a 407 | relatively new feature. 408 | 409 | 410 | 411 | 412 | 413 | ## Use with nbgrader 414 | 415 | envkernel was orginally inspired by the need for nbgrader to securely 416 | contain student's code while autograding. To do this, set up a 417 | contained kernel as above - it's up to you to figure out how to do 418 | this properly with your chosen method (docker or singularity). Then 419 | autograde like normal, but add the `--ExecutePreprocessor.kernel_name` 420 | option. 421 | 422 | Set up a kernel: 423 | 424 | ```shell 425 | envkernel docker --user --name=testcourse-0.5.9 --pwd aaltoscienceit/notebook-server:0.5.9 --bind /mnt/jupyter/course/testcourse/data/:/coursedata 426 | ``` 427 | 428 | Run the autograding: 429 | 430 | ```shell 431 | nbgrader autograde --ExecutePreprocessor.kernel_name=testcourse-0.5.9 R1_Introduction 432 | 433 | ``` 434 | 435 | 436 | 437 | 438 | 439 | ## Kernel quick reference 440 | 441 | * `jupyter kernelspec list` 442 | * `jupyter kernelspec remove NAME` 443 | 444 | 445 | 446 | 447 | 448 | ## See also 449 | 450 | * General 451 | * [a2km, "Assistant to the kernel manager"](https://github.com/minrk/a2km) is a command line tool for dealing with kernels, including making kernels which activate conda/venv kernels. And some other handy kernel manipulations stuff. Unfortunately written in Ruby. 452 | * https://github.com/Anaconda-Platform/nb_conda_kernels - automatically create kernels from conda environments. Uses a KernelSpecManager so possibly overrides everything at once, and also defaults to all kernels. 453 | * The direct way to make a conda/virtualenv available in Jupyter is to activate the environment, then run `python -m ipykernel install [--user|--prefix=/path/to/other/env/]`. But this does *not* set up `PATH`, so calling other executables doesn't work... thus the benefit of envkernel. 454 | * [This thread](https://groups.google.com/forum/#!topic/jupyter/kQ9ZDX4rDEE) was the clue to getting a kernel inside Docker working. 455 | 456 | * The following commands are essential for kernel management 457 | * `jupyter kernelspec list` 458 | * `jupyter --paths` - each `$data_path/kernels` dir is searched for kernels. 459 | 460 | 461 | 462 | 463 | ## Development and contributions 464 | 465 | Developed at Aalto University Science-IT. Primary contact: Richard 466 | Darst. Contributions welcome from anyone. As of early 2019, it is 467 | mid 2019, it's usable but there may be bugs as it gets used in more 468 | sites. 469 | -------------------------------------------------------------------------------- /envkernel.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import argparse 4 | import copy 5 | import glob 6 | import json 7 | import logging 8 | import os 9 | from os.path import join as pjoin 10 | import re 11 | import shlex 12 | import shutil 13 | import subprocess 14 | import sys 15 | import tempfile 16 | import textwrap 17 | 18 | LOG = logging.getLogger('envkernel') 19 | LOG.setLevel(logging.INFO) 20 | logging.lastResort.setLevel(logging.DEBUG) 21 | 22 | 23 | version_info = (1, 1, 0,) # 'dev0') 24 | __version__ = '.'.join(str(x) for x in version_info) 25 | 26 | KNOWN_KERNELS = { 27 | 'ipykernel': { 28 | 'language': 'python', 29 | 'argv': ['python', 30 | "-m", 31 | "ipykernel_launcher", 32 | "-f", 33 | "{connection_file}"], 34 | }, 35 | 'ir': { 36 | 'language': 'R', 37 | 'argv': ['R', 38 | '--slave', 39 | '-e', 40 | 'IRkernel::main()', 41 | '--args', 42 | '{connection_file}'], 43 | }, 44 | 'imatlab': { 45 | 'language': 'matlab', 46 | 'argv': ['python', 47 | '-m', 48 | 'imatlab', 49 | '-f', 50 | '{connection_file}'], 51 | } 52 | } 53 | 54 | 55 | def split_doubledash(argv, maxsplit=None): 56 | """Split on '--', for spearating arguments""" 57 | new = [ ] 58 | last = 0 59 | nsplit = 0 60 | for i, x in enumerate(argv): 61 | if x == '--': 62 | new.append(argv[last:i]) 63 | last = i + 1 64 | nsplit += 1 65 | if maxsplit is not None and nsplit >= maxsplit: 66 | break 67 | new.append(argv[last:]) 68 | return new 69 | 70 | 71 | 72 | def find_connection_file(args): 73 | for i, a in enumerate(args): 74 | if a == '-f': 75 | return args[i+1] 76 | 77 | 78 | 79 | def path_join(*args): 80 | """Join the arguments using ':', like the PATH environment variable""" 81 | if len(args) == 1: 82 | return args[0] 83 | if args[1] is None or args[1] == '': 84 | return path_join(args[0], *args[2:]) 85 | path = os.pathsep.join([args[0], args[1]]) 86 | return path_join(path, *args[2:]) 87 | 88 | 89 | def printargs(args): 90 | return ' '.join(shlex.quote(x) for x in args) 91 | 92 | 93 | 94 | class envkernel(): 95 | execvp = staticmethod(os.execvp) 96 | def __init__(self, argv): 97 | LOG.debug('envkernel: cli args: %s', argv) 98 | self.argv = argv 99 | def setup(self): 100 | parser = argparse.ArgumentParser() 101 | parser.add_argument('--name', required=True, 102 | help="Kernel name to install as") 103 | parser.add_argument('--display-name', 104 | help="Display name of kernel") 105 | parser.add_argument('--user', action='store_true', default=False, 106 | help="Install kernel to user dir") 107 | parser.add_argument('--sys-prefix', action='store_true', 108 | help="Install kernel to this Python's sys.prefix") 109 | parser.add_argument('--prefix', 110 | help="Install kernel to this prefix") 111 | parser.add_argument('--replace', action='store_true', 112 | help="Replace existing kernel") 113 | parser.add_argument('--kernel', 114 | help="Kernel to install, options are ipykernel or ir (default ipykernel). This " 115 | "simply sets the --kernel-cmd and --language options to the proper " 116 | "values for these well-known kernels. It could break, however. --kernel-cmd " 117 | "overrides this.") 118 | parser.add_argument('--kernel-template') 119 | parser.add_argument('--python', default=None, 120 | help="Python command to run (default 'python')") 121 | parser.add_argument('--kernel-cmd', 122 | help="Kernel command to run, separated by spaces. If this is given, --python is not used.") 123 | parser.add_argument('--kernel-make-path-relative', action='store_true', 124 | help="Remove any leading absolute path from the kernel command. Mainly " 125 | "useful with --kernel-template.") 126 | parser.add_argument('--language', 127 | help="Language to put into kernel file (default based on --kernel)") 128 | parser.add_argument('--env', action='append', default=[], 129 | help="Environment to add, format NAME=VAL. Can be given multiple times. " 130 | "These are statically embedded in the kernel.json file") 131 | parser.add_argument('--verbose', '-v', action='store_true', 132 | help="Print more debugging information") 133 | args, unknown_args = parser.parse_known_args(self.argv) 134 | if args.verbose: 135 | LOG.setLevel(logging.DEBUG) 136 | 137 | LOG.debug('setup: envkernel setup args: %s', args) 138 | LOG.debug('setup: kernel-specific args: %s', unknown_args) 139 | self.setup_args = args 140 | self.name = args.name 141 | self.user = args.user 142 | if args.sys_prefix: 143 | self.prefix = sys.prefix 144 | else: 145 | self.prefix = args.prefix 146 | self.replace = args.replace 147 | self.copy_files = { } 148 | 149 | # Setting the kernel. Go through least-specific to 150 | # most-specific, updating self.kernel with the latest (most 151 | # specific) attributes. If *nothing* is specificed, the 152 | # --kernel option is taken as the default of ipykernel and 153 | # sets the minimal working set of things. 154 | 155 | # Existing kernel as a template. 156 | self.kernel = { } 157 | if args.kernel_template: 158 | import jupyter_client.kernelspec 159 | template = jupyter_client.kernelspec.KernelSpecManager().get_kernel_spec(args.kernel_template) 160 | template_dir = template.resource_dir 161 | self.copy_files.update({x: pjoin(template_dir, x) for x in os.listdir(template_dir)}) 162 | self.kernel = json.loads(template.to_json()) 163 | # --kernel which sets to default well-known kernels. 164 | if args.kernel is None and 'argv' not in self.kernel: 165 | args.kernel = 'ipykernel' 166 | if args.kernel: 167 | if args.kernel in KNOWN_KERNELS: 168 | self.kernel.update(copy.deepcopy(KNOWN_KERNELS[args.kernel])) 169 | else: 170 | LOG.critical("Unknown kernel: %s", args.kernel) 171 | # kernelcmd 172 | if args.kernel_cmd: 173 | self.kernel['argv'] = args.kernel_cmd.split() 174 | # language 175 | if args.language: 176 | self.kernel['language'] = args.language 177 | # python 178 | if args.python == 'SELF': 179 | self.kernel['argv'][0] = sys.executable 180 | elif args.python: 181 | self.kernel['argv'][0] = args.python 182 | if args.display_name: 183 | self.kernel['display_name'] = args.display_name 184 | # Make the kernel path relative 185 | if args.kernel_make_path_relative: 186 | self.kernel['argv'][0] = self.kernel['argv'][0].rsplit('/', 1)[-1] 187 | # Copy logos from upstream packages, if exists 188 | self.logos = None 189 | if self.kernel['language'] == 'python': 190 | try: 191 | import ipykernel 192 | ipykernel_dir = os.path.dirname(ipykernel.__file__) 193 | logos = glob.glob(pjoin(ipykernel_dir, 'resources', '*')) 194 | for fullpath in logos: 195 | fname = os.path.basename(fullpath) 196 | if fname not in self.copy_files: 197 | self.copy_files[fname] = fullpath 198 | except ImportError: 199 | LOG.debug("Could not automatically find ipykernel logos") 200 | # env 201 | for env in args.env: 202 | name, value = env.split('=', 1) 203 | self.kernel.setdefault('env', {})[name] = value 204 | # 205 | self.argv = unknown_args 206 | 207 | def _get_parser(self): 208 | pass 209 | 210 | def get_kernel(self): 211 | return copy.deepcopy(self.kernel) 212 | 213 | def install_kernel(self, kernel, name, user=False, replace=None, prefix=None, logos=None): 214 | """Install a kernel (as given by json) to a kernel directory 215 | 216 | A thin wrapper around jupyter_client.kernelspec.KernelSpecManager().install_kernel_spec. 217 | 218 | kernel: kernel JSON 219 | name: kernel name 220 | """ 221 | import jupyter_client.kernelspec 222 | #jupyter_client.kernelspec.KernelSpecManager().get_kernel_spec('python3').argv 223 | 224 | # 'replace' has been depricated since 2015 225 | if jupyter_client.version_info >= (4, 0, 0): 226 | replace = None 227 | 228 | with tempfile.TemporaryDirectory(prefix='jupyter-kernel-secure-') \ 229 | as kernel_dir: 230 | # Apply umask 231 | umask = os.umask(0) 232 | os.umask(umask) # Restore previous, this is just how it works... 233 | os.chmod(kernel_dir, 0o777& (~umask)) 234 | # Copy files from template. This also copies kernel.json, 235 | # but we will overwrite that next. 236 | for fname, fullpath in self.copy_files.items(): 237 | shutil.copy(fullpath, pjoin(kernel_dir, fname)) 238 | # Write kernel.json 239 | open(pjoin(kernel_dir, 'kernel.json'), 'w').write( 240 | json.dumps(kernel, sort_keys=True, indent=1)) 241 | jupyter_client.kernelspec.KernelSpecManager().install_kernel_spec( 242 | kernel_dir, kernel_name=name, 243 | user=user, replace=replace, prefix=prefix) 244 | 245 | LOG.info("") 246 | LOG.info(" Kernel command: %s", kernel['argv']) 247 | try: 248 | LOG.info(" Success: Kernel saved to {}".format(jupyter_client.kernelspec.KernelSpecManager().get_kernel_spec(name).resource_dir)) 249 | except jupyter_client.kernelspec.NoSuchKernel: 250 | LOG.info(" Note: Kernel not detected with current search path.") 251 | #LOG.info(" Kernel file:\n%s", textwrap.indent(json.dumps(kernel, sort_keys=True, indent=1), ' ')) 252 | 253 | def run(self): 254 | """Hook that gets run before kernel invoked""" 255 | # User does not directly see this (except interleaved in 256 | # normal jupyter logging output), so we can set it to debug 257 | # by default. 258 | LOG.setLevel(logging.DEBUG) 259 | 260 | 261 | 262 | class lmod(envkernel): 263 | def setup(self): 264 | super().setup() 265 | kernel = self.get_kernel() 266 | kernel['argv'] = [ 267 | os.path.realpath(sys.argv[0]), 268 | self.__class__.__name__, 'run', 269 | *self.argv, 270 | '--', 271 | *kernel['argv'], 272 | ] 273 | if 'display_name' not in kernel: 274 | kernel['display_name'] = "{}".format(' '.join(self.argv)) 275 | self.install_kernel(kernel, name=self.name, user=self.user, 276 | replace=self.replace, prefix=self.prefix) 277 | 278 | def run(self): 279 | """load modules and run: 280 | 281 | before '--': the modules to load 282 | after '--': the Python command to run after loading""" 283 | super().run() 284 | argv, rest = split_doubledash(self.argv, 1) 285 | parser = argparse.ArgumentParser() 286 | parser.add_argument('--purge', action='store_true', default=False, help="Purge existing modules first") 287 | parser.add_argument('module', nargs='+') 288 | args, unknown_args = parser.parse_known_args(argv) 289 | 290 | #print(args) 291 | #print('stderr', args, file=sys.stderr) 292 | LOG.debug('run: args: %s', args) 293 | LOG.debug('run: remaining args: %s', unknown_args) 294 | 295 | #LMOD_INIT = os.environ['LMOD_PKG']+'/init/env_modules_python.py' 296 | #exec(compile(open(LMOD_INIT).read(), LMOD_INIT, 'exec')) 297 | def module(command, *arguments): 298 | """Copy of the lmod command above, but works on python2&3 299 | 300 | ... to work around old lmod installations that don't have 301 | python3 support. 302 | """ 303 | commands = os.popen( 304 | '%s/libexec/lmod python %s %s'\ 305 | % (os.environ['LMOD_PKG'], command, ' '.join(arguments))).read() 306 | exec(commands) 307 | if args.purge: 308 | LOG.debug('Lmod purging') 309 | module('purge') 310 | LOG.debug('Lmod loading ' + ' '.join(args.module)) 311 | module('load', *args.module) 312 | 313 | LOG.debug('envkernel running: %s', printargs(rest)) 314 | LOG.debug('PATH: %s', os.environ['PATH']) 315 | self.execvp(rest[0], rest) 316 | 317 | 318 | 319 | class conda(envkernel): 320 | def setup(self): 321 | super().setup() 322 | parser = argparse.ArgumentParser() 323 | parser.add_argument('path') 324 | args, unknown_args = parser.parse_known_args(self.argv) 325 | 326 | kernel = self.get_kernel() 327 | path = args.path 328 | path = os.path.abspath(path) 329 | kernel['argv'] = [ 330 | os.path.realpath(sys.argv[0]), 331 | self.__class__.__name__, 'run', 332 | *unknown_args, 333 | path, 334 | '--', 335 | *kernel['argv'], 336 | ] 337 | if 'display_name' not in kernel: 338 | kernel['display_name'] = "{} ({}, {})".format( 339 | os.path.basename(args.path.strip('/')), 340 | self.__class__.__name__, 341 | path) 342 | if args.path != 'TESTTARGET': # un-expanded 343 | if not os.path.exists(path): 344 | print(self.notfound_message%(self.__class__.__name__, path)) 345 | LOG.critical("ERROR: %s does not exist: %s", self.__class__.__name__, path) 346 | sys.exit(1) 347 | if not os.path.exists(pjoin(path, 'bin')): 348 | print(self.notfound_message%(self.__class__.__name__, path+'/bin')) 349 | LOG.critical("ERROR: %s bin does not exist: %s/bin", self.__class__.__name__, path) 350 | sys.exit(1) 351 | self.install_kernel(kernel, name=self.name, user=self.user, 352 | replace=self.replace, prefix=self.prefix) 353 | 354 | notfound_message = """\ 355 | ERROR: %s path does not exist: %s 356 | 357 | You need to give a path to the environment, not just the name. (A 358 | relative path gets expanded to an absolute path.) 359 | 360 | For conda, you can get this from: 361 | conda env list 362 | 363 | If it's a conda environment and it is activated, you can use 364 | $CONDA_PREFIX like such: 365 | envkernel conda [other arguments] $CONDA_PREFIX 366 | """ 367 | 368 | 369 | def run(self): 370 | """load modules and run: 371 | 372 | before '--': the modules to load 373 | after '--': the Python command to run after loading""" 374 | super().run() 375 | argv, rest = split_doubledash(self.argv, 1) 376 | parser = argparse.ArgumentParser() 377 | #parser.add_argument('--purge', action='store_true', default=False, help="Purge existing modules first") 378 | parser.add_argument('path') 379 | args, unknown_args = parser.parse_known_args(argv) 380 | 381 | #print(args) 382 | #print('stderr', args, file=sys.stderr) 383 | LOG.debug('run: args: %s', args) 384 | LOG.debug('run: remaining args: %s', unknown_args) 385 | 386 | path = args.path 387 | if not os.path.exists(path): 388 | LOG.critical("%s path does not exist: %s", self.__class__.__name__, path) 389 | raise RuntimeError("envkernel: {} path {} does not exist".format(self.__class__.__name__, path)) 390 | if not os.path.exists(pjoin(path, 'bin')): 391 | LOG.critical("%s bin does not exist: %s/bin", self.__class__.__name__, path) 392 | raise RuntimeError("envkernel: {} path {} does not exist".format(self.__class__.__name__, path+'/bin')) 393 | 394 | self._run(args, rest) 395 | 396 | def _run(self, args, rest): 397 | path = args.path 398 | os.environ['PATH'] = path_join(pjoin(path, 'bin' ), os.environ.get('PATH', None)) 399 | os.environ['CPATH'] = path_join(pjoin(path, 'include'), os.environ.get('CPATH', None)) 400 | os.environ['LD_LIBRARY_PATH'] = path_join(pjoin(path, 'lib' ), os.environ.get('LD_LIBRARY_PATH', None)) 401 | os.environ['LIBRARY_PATH'] = path_join(pjoin(path, 'lib' ), os.environ.get('LIBRARY_PATH', None)) 402 | 403 | self.execvp(rest[0], rest) 404 | 405 | 406 | 407 | class virtualenv(conda): 408 | def _run(self, args, rest): 409 | path = args.path 410 | os.environ.pop('PYTHONHOME', None) 411 | os.environ['PATH'] = path_join(pjoin(path, 'bin'), os.environ.get('PATH', None)) 412 | if 'PS1' in os.environ: 413 | os.environ['PS1'] = "(venv3) " + os.environ['PS1'] 414 | 415 | self.execvp(rest[0], rest) 416 | notfound_message = """\ 417 | ERROR: %s path does not exist: %s 418 | 419 | You need to give a path to the environment, not just the name. A 420 | relative path gets expanded to an absolute path. 421 | """ 422 | 423 | 424 | 425 | class venv(virtualenv): 426 | pass 427 | 428 | 429 | 430 | class docker(envkernel): 431 | def setup(self): 432 | super().setup() 433 | parser = argparse.ArgumentParser() 434 | parser.add_argument('image') 435 | args, unknown_args = parser.parse_known_args(self.argv) 436 | LOG.debug('setup: %s', args) 437 | 438 | kernel = self.get_kernel() 439 | kernel['argv'] = [ 440 | os.path.realpath(sys.argv[0]), 441 | 'docker', 442 | 'run', 443 | '--connection-file', '{connection_file}', 444 | args.image, 445 | *unknown_args, 446 | '--', 447 | *kernel['argv'], 448 | ] 449 | if 'display_name' not in kernel: 450 | kernel['display_name'] = "Docker with {}".format(args.image), 451 | self.install_kernel(kernel, name=self.name, user=self.user, 452 | replace=self.replace, prefix=self.prefix) 453 | 454 | def run(self): 455 | super().run() 456 | argv, rest = split_doubledash(self.argv, 1) 457 | parser = argparse.ArgumentParser() 458 | parser.add_argument('image', help='Docker image name') 459 | #parser.add_argument('--mount', '-m', action='append', default=[], 460 | # help='mount to set up, format hostDir:containerMountPoint') 461 | parser.add_argument('--copy-workdir', default=False, action='store_true') 462 | parser.add_argument('--pwd', action='store_true', 463 | help="Also mount the Jupyter working directory (containing the notebook) " 464 | "in the image. This is needed if you want to access data from this dir.") 465 | parser.add_argument('--workdir', help='Location to mount working dir inside the container') 466 | parser.add_argument('--connection-file', help="Do not use, internal use.") 467 | 468 | args, unknown_args = parser.parse_known_args(argv) 469 | 470 | extra_mounts = [ ] 471 | 472 | # working dir 473 | if args.pwd or args.workdir: 474 | workdir = os.getcwd() 475 | if args.workdir: 476 | workdir = args.workdir 477 | # src = host data, dst=container mountpoint 478 | extra_mounts.extend(["--mount", "type=bind,source={},destination={},ro={}{}".format(os.getcwd(), workdir, 'false', ',copy' if args.copy_workdir else '')]) 479 | 480 | cmd = [ 481 | "docker", "run", "--rm", "-i", 482 | "--user", "%d:%d"%(os.getuid(), os.getgid()), 483 | ] 484 | 485 | # Parse connection file 486 | connection_file = args.connection_file 487 | connection_data = json.load(open(connection_file)) 488 | # Find all the (five) necessary ports 489 | for var in ('shell_port', 'iopub_port', 'stdin_port', 'control_port', 'hb_port'): 490 | # Forward each port to itself 491 | port = connection_data[var] 492 | #expose_ports.append((connection_data[var], connection_data[var])) 493 | cmd.extend(['--expose={}'.format(port), "-p", "{}:{}".format(port, port)]) 494 | # Mount the connection file inside the container 495 | extra_mounts.extend(["--mount", 496 | "type=bind,source={},destination={},ro={}".format( 497 | connection_file, connection_file, 'false' 498 | ) 499 | ]) 500 | #expose_mounts.append(dict(src=json_file, dst=json_file)) 501 | 502 | # Change connection_file to bind to all IPs. 503 | connection_data['ip'] = '0.0.0.0' 504 | open(connection_file, 'w').write(json.dumps(connection_data)) 505 | 506 | # Add options to expose the ports 507 | # for port_host, port_container in expose_ports: 508 | # cmd.extend(['--expose={}'.format(port_container), "-p", "{}:{}".format(port_host, port_container)]) 509 | 510 | ## Add options for exposing mounts 511 | #tmpdirs = [ ] # keep reference to clean up later 512 | #for mount in expose_mounts: 513 | # src = mount['src'] # host data 514 | # dst = mount['dst'] # container mountpoint 515 | # if mount.get('copy'): 516 | # tmpdir = tempfile.TemporaryDirectory(prefix='jupyter-secure-') 517 | # tmpdirs.append(tmpdir) 518 | # src = tmpdir.name + '/copy' 519 | # shutil.copytree(mount['src'], src) 520 | # cmd.extend(["--mount", "type=bind,source={},destination={},ro={}".format(src, dst, 'true' if mount.get('ro') else 'false')]) # ro=true 521 | #cmd.extend(("--workdir", workdir)) 522 | 523 | # Process all of our mounts, to do two things: 524 | # Substitute {workdir} with 525 | unknown_args.extend(extra_mounts) 526 | tmpdirs = [] 527 | for i, arg in enumerate(unknown_args): 528 | if '{workdir}' in arg and args.copy_workdir: 529 | arg = arg + ',copy' 530 | arg.format(workdir=os.getcwd) 531 | if ',copy' in arg: 532 | src_original = re.search('src=([^,]+)', arg).group(1) 533 | # Copy the source directory 534 | tmpdir = tempfile.TemporaryDirectory(prefix='jupyter-secure-') 535 | tmpdirs.append(tmpdir) 536 | src = tmpdir.name + '/copy' 537 | shutil.copytree(src_original, src) 538 | # 539 | newarg = re.sub('src=([^,]+)', 'src='+src, arg) # add in new src 540 | newarg = re.sub(',copy', '', newarg) # remove ,copy 541 | unknown_args[i] = newarg 542 | 543 | # Image name 544 | # cmd.append(args.image) 545 | 546 | # Remainder of all other arguments from the kernel specification 547 | cmd.extend([ 548 | *unknown_args, 549 | # '--debug', 550 | args.image, 551 | *rest, 552 | ]) 553 | 554 | # Run... 555 | LOG.info('docker: running cmd = %s', printargs(cmd)) 556 | ret = self.execvp(cmd[0], cmd) 557 | 558 | # Clean up all temparary directories 559 | for tmpdir in tmpdirs: 560 | tmpdir.cleanup() 561 | return(ret) 562 | 563 | 564 | class singularity(envkernel): 565 | def setup(self): 566 | """Install a new singularity kernelspec""" 567 | super().setup() 568 | parser = argparse.ArgumentParser() 569 | parser.add_argument('image') 570 | args, unknown_args = parser.parse_known_args(self.argv) 571 | LOG.debug('setup: args: %s', args) 572 | LOG.debug('setup: remaining args: %s', unknown_args) 573 | 574 | kernel = self.get_kernel() 575 | image = os.path.abspath(args.image) 576 | kernel['argv'] = [ 577 | os.path.realpath(sys.argv[0]), 578 | 'singularity', 'run', 579 | '--connection-file', '{connection_file}', 580 | #*[ '--mount={}'.format(x) for x in args.mount], 581 | image, 582 | *unknown_args, 583 | '--', 584 | *kernel['argv'], 585 | ] 586 | if 'display_name' not in kernel: 587 | kernel['display_name'] = "Singularity with {}".format(args.image) 588 | self.install_kernel(kernel, name=self.name, user=self.user, 589 | replace=self.replace, prefix=self.prefix) 590 | 591 | def run(self): 592 | super().run() 593 | argv, rest = split_doubledash(self.argv, 1) 594 | parser = argparse.ArgumentParser() 595 | parser.add_argument('image', help='image name') 596 | #parser.add_argument('--mount', '-m', action='append', default=[], 597 | # help='mount to set up, format hostDir:containerMountPoint') 598 | #parser.add_argument('--copy-pwd', default=False, action='store_true') 599 | parser.add_argument('--pwd', action='store_true') 600 | parser.add_argument('--connection-file') 601 | args, unknown_args = parser.parse_known_args(argv) 602 | LOG.debug('run: args: %s', args) 603 | LOG.debug('run: remaining args: %s', unknown_args) 604 | LOG.debug('run: rest: %s', rest) 605 | 606 | extra_args = [ ] 607 | 608 | # Find connection file and mount it: 609 | connection_file = args.connection_file 610 | # If we mount with --contain, then /tmp is missing and *can't* 611 | # have any extra files mounted inside of it. This means that 612 | # we have to relocate the connection file to "/" or somewhere. 613 | new_connection_file = "/"+os.path.basename(connection_file) 614 | if False: 615 | # Re-copy connection file to /tmp 616 | # Doesn't work now! 617 | f = tempfile.NamedTemporaryFile( 618 | suffix='-'+os.path.basename(connection_file)) 619 | f.write(open(connection_file, 'rb').read()) 620 | f.close() 621 | i = rest.index(connection_file) 622 | rest[i] = f.name 623 | else: 624 | # enable overlay = yes 625 | # We can directly mount the connection file in... 626 | extra_args.extend(['--bind', connection_file+":"+new_connection_file]) 627 | 628 | if args.pwd: 629 | extra_args.extend(['--bind', os.getcwd()]) 630 | extra_args.extend(['--pwd', os.getcwd()]) 631 | 632 | # Replace the connection file path with the new location. 633 | idx = rest.index(connection_file) 634 | rest[idx] = new_connection_file 635 | 636 | # In Singularity 2.4 at least, --pwd does not work when used 637 | # with --contain. This manually does it using a bash cd + exec. 638 | if ('-c' in unknown_args or '--contain' in unknown_args ) and args.pwd: 639 | rest = ["bash", "-c", "cd %s"%os.getcwd() + " ; exec "+(" ".join(shlex.quote(x) for x in rest))] 640 | 641 | cmd = [ 642 | 'singularity', 643 | 'exec', 644 | *extra_args, 645 | *unknown_args, 646 | args.image, 647 | *rest, 648 | ] 649 | 650 | LOG.debug('singularity: running cmd= %s', printargs(cmd)) 651 | ret = self.execvp(cmd[0], cmd) 652 | return(ret) 653 | 654 | 655 | 656 | def main(argv=sys.argv): 657 | mod = argv[1] 658 | if mod in {'-h', '--help'}: 659 | all_mods = {name for (name, x) in globals().items() 660 | if isinstance(x, type) and issubclass(x, envkernel) and x!=envkernel} 661 | print("envkernel must be called with the name of a module as the first argument.") 662 | print("Currently help does not show mode-options for each module, please see the") 663 | print("README.") 664 | print("") 665 | print("available modules:", *sorted(all_mods)) 666 | print("") 667 | print("General usage: envkernel [envkernel-options] [mode-options]") 668 | print("") 669 | print("envkernel-options:") 670 | print("") 671 | envkernel(sys.argv).setup() 672 | exit(0) 673 | cls = globals()[mod] 674 | if len(argv) > 2 and argv[2] == 'run': 675 | return cls(argv[3:]).run() 676 | else: 677 | cls(argv[2:]).setup() 678 | return 0 679 | 680 | if __name__ == '__main__': 681 | exit(main()) 682 | -------------------------------------------------------------------------------- /requirements-dev.txt: -------------------------------------------------------------------------------- 1 | pytest 2 | ipykernel 3 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | jupyter_client 2 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import setuptools 2 | 3 | # Get version 4 | print("in setup.py") 5 | main_ns = {} 6 | with open('envkernel.py') as ver_file: 7 | exec(ver_file.read(), main_ns) 8 | 9 | print(main_ns['version_info'], main_ns['__version__']) 10 | 11 | 12 | with open("README.md", "r") as fh: 13 | long_description = fh.read() 14 | 15 | setuptools.setup( 16 | name="envkernel", 17 | version=main_ns['__version__'], 18 | author="Richard Darst", 19 | author_email="rkd@zgib.net", 20 | description="Jupyter kernels manipulation and in other environments (docker, Lmod, etc.)", 21 | long_description=long_description, 22 | long_description_content_type="text/markdown", 23 | url="https://github.com/NordicHPC/envkernel", 24 | #packages=setuptools.find_packages(), 25 | py_modules=["envkernel"], 26 | keywords='jupyter kernelspec', 27 | python_requires='>=3.5', 28 | entry_points={ 29 | 'console_scripts': [ 30 | 'envkernel=envkernel:main', 31 | ], 32 | }, 33 | classifiers=[ 34 | "Programming Language :: Python :: 3", 35 | "License :: OSI Approved :: MIT License", 36 | "Operating System :: OS Independent", 37 | "Development Status :: 4 - Beta", 38 | "Framework :: Jupyter", 39 | "Environment :: Console", 40 | "Intended Audience :: System Administrators", 41 | "Intended Audience :: Developers", 42 | "Intended Audience :: Science/Research", 43 | ], 44 | ) 45 | -------------------------------------------------------------------------------- /test-data/env/bin/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NordicHPC/envkernel/47ab80ae0c5b4a2b87c0c9f745ae9242099e96a8/test-data/env/bin/.gitkeep -------------------------------------------------------------------------------- /test_envkernel.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | import os 4 | from os.path import join as pjoin 5 | import pytest 6 | import shlex 7 | import subprocess 8 | import sys 9 | import tempfile 10 | 11 | import envkernel 12 | 13 | sys.argv[0] = 'envkernel' 14 | 15 | TEST_CONNECTION_FILE = """\ 16 | { 17 | "shell_port": 10000, 18 | "iopub_port": 10001, 19 | "stdin_port": 10002, 20 | "control_port": 10003, 21 | "hb_port": 10004, 22 | "ip": "127.0.0.1", 23 | "key": "00000000-000000000000000000000000", 24 | "transport": "tcp", 25 | "signature_scheme": "hmac-sha256", 26 | "kernel_name": "" 27 | } 28 | """ 29 | ALL_MODULES = ["conda", "virtualenv", "venv", "lmod", "docker", "singularity"] 30 | 31 | 32 | def install(d, argv, name='testkernel'): 33 | """Run envkernel setup, return dict with properties""" 34 | if isinstance(argv, str): 35 | argv = shlex.split(argv) 36 | argv.insert(0, 'envkernel') 37 | argv[2:2] = ['--name', name, '--prefix', d] 38 | envkernel.main(argv) 39 | return get(d, name) 40 | 41 | def get(d, name): 42 | """From an installed kernel, return dict with properties for testing.""" 43 | dir_ = pjoin(d, 'share/jupyter/kernels/', name) 44 | kernel = json.load(open(pjoin(dir_, 'kernel.json'))) 45 | return { 46 | 'dir': dir_, 47 | 'kernel': kernel, 48 | 'ek': envkernel.split_doubledash(kernel['argv'])[0], 49 | 'k': envkernel.split_doubledash(kernel['argv'])[1], 50 | } 51 | 52 | def run(d, kern, execvp=lambda _argv0, argv: 0): 53 | """Start envkernel in "run" mode to see if it can run successfully. 54 | """ 55 | connection_file = pjoin(d, 'connection.json') 56 | open(connection_file, 'w').write(TEST_CONNECTION_FILE) 57 | # Do basic tests 58 | argv = kern['kernel']['argv'] 59 | clsname = argv[1] 60 | assert argv[2] == 'run' 61 | # Replace connecton file 62 | argv = [ replace_conn_file(x, connection_file) for x in argv ] 63 | # Setup object, override the execvp for the function, run. 64 | ek = getattr(envkernel, clsname)(argv[3:]) 65 | ek.execvp = execvp 66 | ek.run() 67 | 68 | @pytest.fixture(scope='function') 69 | def d(): 70 | """Temporary directory for test""" 71 | with tempfile.TemporaryDirectory() as dir_: 72 | yield dir_ 73 | 74 | def replace_conn_file(arg, connection_file): 75 | if isinstance(arg, list): 76 | return [ replace_conn_file(x, connection_file) for x in arg ] 77 | if arg == '{connection_file}': 78 | return connection_file 79 | else: 80 | return arg 81 | 82 | def all_modes(modes=None): 83 | if not modes: 84 | modes = ALL_MODULES 85 | return pytest.mark.parametrize("mode", modes) 86 | 87 | def is_sublist(list_, sublist): 88 | """A sublist is part of a given list""" 89 | return any(list_[i:i+len(sublist)] == sublist 90 | for i in range(len(list_)-len(sublist)+1)) 91 | 92 | 93 | 94 | 95 | @all_modes() 96 | def test_basic(d, mode): 97 | kern = install(d, "%s TESTTARGET"%mode) 98 | #assert kern['argv'][0] == 'envkernel' # defined above 99 | assert kern['ek'][1:3] == [mode, 'run'] 100 | 101 | @all_modes() 102 | def test_display_name(d, mode): 103 | kern = install(d, "%s --display-name=AAA TESTTARGET"%mode) 104 | assert kern['kernel']['display_name'] == 'AAA' 105 | 106 | @all_modes(['conda']) 107 | def test_template(d, mode): 108 | os.environ['JUPYTER_PATH'] = pjoin(d, 'share/jupyter') 109 | subprocess.call("python -m ipykernel install --name=aaa-ipy --display-name=BBB --prefix=%s"%d, shell=True) 110 | #os.environ['ENVKERNEL_TESTPATH'] = os.path.join(d, 'share/jupyter/kernels') 111 | kern = install(d, "%s --kernel-template aaa-ipy TESTTARGET"%mode) 112 | assert kern['kernel']['display_name'] == 'BBB' 113 | 114 | @all_modes(['conda']) 115 | def test_template_copyfiles(d, mode): 116 | os.environ['JUPYTER_PATH'] = pjoin(d, 'share/jupyter') 117 | subprocess.call("python -m ipykernel install --name=aaa-ipy --display-name=BBB --prefix=%s"%d, shell=True) 118 | f = open(pjoin(d, 'share/jupyter/kernels/', 'aaa-ipy', 'A.txt'), 'w') 119 | f.write('LMNO') 120 | f.close() 121 | kern = install(d, "%s --kernel-template aaa-ipy TESTTARGET"%mode) 122 | assert os.path.exists(pjoin(kern['dir'], 'A.txt')) 123 | assert open(pjoin(kern['dir'], 'A.txt')).read() == 'LMNO' 124 | 125 | @all_modes(['conda']) 126 | def test_template_make_path_relative(d, mode): 127 | os.environ['JUPYTER_PATH'] = pjoin(d, 'share/jupyter') 128 | subprocess.call("python -m ipykernel install --name=aaa-ipy --display-name=BBB --prefix=%s"%d, shell=True) 129 | # First test it without, ensure it has the full path 130 | kern = install(d, "%s --kernel-template aaa-ipy TESTTARGET"%mode) 131 | assert kern['k'][0] != 'python' # This is an absolete path 132 | # Now test it with --kernel-make-path-relative and ensure it's relative 133 | kern = install(d, "%s --kernel-template aaa-ipy --kernel-make-path-relative TESTTARGET"%mode) 134 | assert kern['k'][0] == 'python' # This is an absolete path 135 | 136 | def test_help(): 137 | """Test that the global -h option works and prints module names""" 138 | p = subprocess.Popen("python -m envkernel -h", shell=True, stdout=subprocess.PIPE) 139 | stdout = p.stdout.read().decode() 140 | p.wait() 141 | for mod in ALL_MODULES: 142 | assert mod in stdout, "%s not found in --help output"%mod 143 | assert p.returncode == 0 144 | 145 | def test_logging(d, caplog): 146 | """Test that the global -v option works and increases debugging 147 | 148 | Run first without -v and make sure some stuff isn't printed. 149 | Then, run with -v and ensure that the argument processing is output. 150 | """ 151 | cmd = "python3 -m envkernel lmod --name=ABC --display-name=AAA LMOD --prefix=%s"%d 152 | print(d) 153 | env = os.environ.copy() 154 | env['JUPYTER_PATH'] = pjoin(d, 'share/jupyter') 155 | # First, test non-verbose (should have minimal output) 156 | p = subprocess.Popen(cmd, env=env, 157 | shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) 158 | stdout = p.stdout.read().decode() 159 | p.wait() 160 | print(stdout) 161 | assert 'Namespace' not in stdout 162 | assert 'kernel-specific' not in stdout 163 | assert 'Kernel command:' in stdout 164 | # Now test verbose (should have some debugging info) 165 | p = subprocess.Popen(cmd+' -v', env=env, 166 | shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) 167 | stdout = p.stdout.read().decode() 168 | p.wait() 169 | #print(stdout) 170 | assert 'Namespace' in stdout 171 | assert 'kernel-specific' in stdout 172 | assert 'Kernel command:' in stdout 173 | 174 | def test_umask(d): 175 | orig_umask = os.umask(0) 176 | # Test multiple umasks, and kerneldir + kernel.json 177 | for umask in [0, 0o022]: 178 | os.umask(umask) 179 | kern = install(d, "lmod TESTTARGET") 180 | assert os.stat(kern['dir']).st_mode & 0o777 == 0o777&(~umask) 181 | assert os.stat(pjoin(kern['dir'], 'kernel.json')).st_mode & 0o777 == 0o666&(~umask) 182 | os.umask(orig_umask) 183 | 184 | 185 | @all_modes() 186 | def test_set_python(d, mode): 187 | kern = install(d, "%s --python=AAA TESTTARGET"%mode) 188 | assert kern['k'][0] == 'AAA' 189 | 190 | @all_modes() 191 | def test_set_kernel_cmd(d, mode): 192 | kern = install(d, "%s --kernel-cmd='a b c d' TESTTARGET"%mode) 193 | assert kern['k'] == ['a', 'b', 'c', 'd'] 194 | 195 | @all_modes() 196 | def test_language(d, mode): 197 | kern = install(d, "%s --language=AAA TESTTARGET"%mode) 198 | assert kern['kernel']['language'] == 'AAA' 199 | 200 | @all_modes() 201 | def test_env(d, mode): 202 | kern = install(d, "%s --env=AAA=BBB --env=CCC=DDD TESTTARGET"%mode) 203 | assert kern['kernel']['env']['AAA'] == 'BBB' 204 | assert kern['kernel']['env']['CCC'] == 'DDD' 205 | 206 | 207 | def test_recursive_run(d): 208 | """Test envkernel being called on itself. 209 | 210 | Roughly runs this, which : 211 | 212 | envkernel lmod --name=test1 LMOD 213 | envkernel conda --kernel-template=test1 --name=test1 CPATH 214 | """ 215 | # Must set JUPYTER_PATH to be able to load the new kernel template 216 | os.environ['JUPYTER_PATH'] = pjoin(d, 'share/jupyter') 217 | kern1 = install(d, "lmod LMOD", name='test1') 218 | #print(kern1['kernel']['argv']) 219 | # Test the conda install 220 | CPATH = pjoin(d, 'test-conda') 221 | os.mkdir(CPATH) 222 | os.mkdir(pjoin(CPATH, 'bin')) 223 | kern2 = install(d, "conda --kernel-template=test1 %s"%CPATH, name='test1') 224 | #print(kern2['kernel']['argv']) 225 | def test_exec(_file, _args): 226 | #print(kern1['kernel']['argv']) 227 | # Exclude the last argument, which is the connection file 228 | assert _args[:-1] == kern1['kernel']['argv'][:-1] 229 | run(d, kern2, test_exec) 230 | 231 | 232 | 233 | # Languages 234 | @all_modes() 235 | def test_default_language(d, mode): 236 | # default language is ipykernel 237 | kern = install(d, "%s TESTTARGET"%mode) 238 | assert kern['kernel']['language'] == 'python' 239 | assert kern['k'][0] == 'python' 240 | assert kern['k'][1:4] == ['-m', 'ipykernel_launcher', '-f'] 241 | 242 | @all_modes() 243 | def test_ipykernel(d, mode): 244 | kern = install(d, "%s --kernel=ipykernel TESTTARGET"%mode) 245 | assert kern['kernel']['language'] == 'python' 246 | assert kern['k'][0] == 'python' 247 | assert kern['k'][1:4] == ['-m', 'ipykernel_launcher', '-f'] 248 | 249 | @all_modes() 250 | def test_ir(d, mode): 251 | kern = install(d, "%s --kernel=ir TESTTARGET"%mode) 252 | assert kern['kernel']['language'] == 'R' 253 | assert kern['k'][0] == 'R' 254 | assert kern['k'][1:5] == ['--slave', '-e', 'IRkernel::main()', '--args'] 255 | 256 | @all_modes() 257 | def test_imatlab(d, mode): 258 | kern = install(d, "%s --kernel=imatlab TESTTARGET"%mode) 259 | assert kern['kernel']['language'] == 'matlab' 260 | assert kern['k'][0].endswith('python') 261 | assert kern['k'][1:4] == ['-m', 'imatlab', '-f'] 262 | 263 | 264 | 265 | # Test setting up specific kernels 266 | def test_lmod(d): 267 | kern = install(d, "lmod MOD1 MOD2") 268 | #assert kern['argv'][0] == 'envkernel' # defined above 269 | assert kern['ek'][1:3] == ['lmod', 'run'] 270 | assert kern['ek'][-2] == 'MOD1' 271 | assert kern['ek'][-1] == 'MOD2' 272 | 273 | def test_lmod_purge(d): 274 | kern = install(d, "lmod --purge MOD3") 275 | #assert kern['argv'][0] == 'envkernel' # defined above 276 | assert '--purge' in kern['ek'][3:] 277 | assert kern['ek'][-1] == 'MOD3' 278 | 279 | def test_conda(d): 280 | kern = install(d, "conda test-data/env") 281 | #assert kern['argv'][0] == 'envkernel' # defined above 282 | assert kern['ek'][1:3] == ['conda', 'run'] 283 | assert kern['ek'][-1].endswith('test-data/env') 284 | 285 | def test_virtualenv(d): 286 | kern = install(d, "virtualenv test-data/env") 287 | #assert kern['argv'][0] == 'envkernel' # defined above 288 | assert kern['ek'][1:3] == ['virtualenv', 'run'] 289 | assert kern['ek'][-1].endswith('test-data/env') 290 | 291 | def test_docker(d): 292 | kern = install(d, "docker --some-arg=AAA TESTIMAGE") 293 | #assert kern['argv'][0] == 'envkernel' # defined above 294 | assert kern['ek'][1:3] == ['docker', 'run'] 295 | assert kern['ek'][-2] == 'TESTIMAGE' 296 | assert '--some-arg=AAA' in kern['ek'] 297 | 298 | def test_singularity(d): 299 | kern = install(d, "singularity --some-arg=AAA /PATH/TO/TESTIMAGE2") 300 | #assert kern['argv'][0] == 'envkernel' # defined above 301 | assert kern['ek'][1:3] == ['singularity', 'run'] 302 | assert kern['ek'][-2] == '/PATH/TO/TESTIMAGE2' 303 | assert '--some-arg=AAA' in kern['ek'] 304 | 305 | 306 | 307 | # Test running kernels 308 | def test_run_conda(d): 309 | PATH = pjoin(d, 'test-conda') 310 | os.mkdir(PATH) 311 | os.mkdir(pjoin(PATH, 'bin')) 312 | 313 | def test_exec(_file, _args): 314 | assert pjoin(PATH, 'bin') in os.environ['PATH'].split(':') 315 | assert pjoin(PATH, 'include') in os.environ['CPATH'].split(':') 316 | assert pjoin(PATH, 'lib') in os.environ['LD_LIBRARY_PATH'].split(':') 317 | assert pjoin(PATH, 'lib') in os.environ['LIBRARY_PATH'].split(':') 318 | kern = install(d, "conda %s"%PATH) 319 | run(d, kern, test_exec) 320 | 321 | def test_run_venv(d): 322 | PATH = pjoin(d, 'test-venv') 323 | os.mkdir(PATH) 324 | os.mkdir(pjoin(PATH, 'bin')) 325 | 326 | def test_exec(_file, _args): 327 | assert pjoin(PATH, 'bin') in os.environ['PATH'].split(':') 328 | kern = install(d, "virtualenv %s"%PATH) 329 | run(d, kern, test_exec) 330 | 331 | # Requires lmod installed... and a module to exist 332 | #def test_run_lmod(d): 333 | 334 | def test_run_docker(d): 335 | def test_exec(_file, argv): 336 | assert argv[0:5] == ['docker', 'run', '--rm', '-i', '--user'] 337 | for port in range(10000, 10005): 338 | assert '--expose=%d'%port in argv 339 | assert is_sublist(argv, ['-p', '%d:%d'%(port, port)]) 340 | assert '--some-arg=AAA' in argv 341 | assert 'IMAGE' in argv 342 | i = argv.index('-f') 343 | assert json.loads(open(argv[i+1]).read())['ip'] == '0.0.0.0' 344 | connection_file = pjoin(d, 'connection.json') 345 | assert is_sublist(argv, ["--mount", "type=bind,source=%s,destination=%s,ro=false"%(connection_file, connection_file)]) 346 | kern = install(d, "docker --some-arg=AAA IMAGE") 347 | run(d, kern, test_exec) 348 | 349 | # Test the --pwd option, and also that it does not go when it is NOT present 350 | pwd_mount = ["--mount", "type=bind,source=%s,destination=%s,ro=false"%(os.getcwd(), os.getcwd())] 351 | def test_exec(_file, argv): 352 | assert is_sublist(argv, pwd_mount) 353 | kern = install(d, "docker --some-arg=AAA --pwd IMAGE") 354 | run(d, kern, test_exec) 355 | # Test above, but reversed 356 | def test_exec(_file, argv): 357 | assert not is_sublist(argv, pwd_mount) 358 | kern = install(d, "docker --some-arg=AAA IMAGE") 359 | run(d, kern, test_exec) 360 | # Custom workdir 361 | pwd_mount = ["--mount", "type=bind,source=%s,destination=%s,ro=false"%(os.getcwd(), '/WORKDIR')] 362 | def test_exec(_file, argv): 363 | assert is_sublist(argv, pwd_mount) 364 | kern = install(d, "docker --some-arg=AAA --workdir=/WORKDIR IMAGE") 365 | run(d, kern, test_exec) 366 | 367 | 368 | def test_run_singularity(d): 369 | def test_exec(_file, argv): 370 | assert argv[0] == 'singularity' 371 | assert '--some-arg=AAA' in argv 372 | assert os.path.join(os.getcwd(), 'IMAGE') in argv 373 | kern = install(d, "singularity --some-arg=AAA IMAGE") 374 | run(d, kern, test_exec) 375 | 376 | def test_exec(_file, argv): 377 | assert argv[0] == 'singularity' 378 | assert is_sublist(argv, ['--bind', os.getcwd()]) 379 | assert is_sublist(argv, ['--pwd', os.getcwd()]) 380 | #assert is_sublist(argv, ['--bind', '/PATH/AAA:/PATH/AAA']) 381 | kern = install(d, "singularity --pwd IMAGE") 382 | run(d, kern, test_exec) 383 | --------------------------------------------------------------------------------