├── .github └── workflows │ ├── code-quality.yml │ └── main.yml ├── .gitignore ├── .pre-commit-config.yaml ├── .prettierignore ├── .releash.py ├── LICENSE ├── MANIFEST.in ├── README.md ├── RELEASE.md ├── binder └── requirements.txt ├── dev_environment.yml ├── docs ├── Makefile ├── environment.yml ├── make.bat └── source │ ├── AudioRecorder.ipynb │ ├── AudioStream.ipynb │ ├── Big.Buck.Bunny.mp3 │ ├── Big.Buck.Bunny.mp4 │ ├── CameraStream.ipynb │ ├── ImageRecorder.ipynb │ ├── VideoRecorder.ipynb │ ├── VideoStream.ipynb │ ├── WidgetStream.ipynb │ ├── api.rst │ ├── conf.py │ ├── index.rst │ └── ipyvolume.mp4 ├── etc └── jupyter │ └── nbconfig │ └── notebook.d │ └── jupyter-webrtc.json ├── ipywebrtc ├── __init__.py ├── _version.py └── webrtc.py ├── js ├── README.md ├── karma.conf.js ├── package-lock.json ├── package.json ├── src │ ├── embed.js │ ├── extension.js │ ├── index.js │ ├── labplugin.js │ ├── utils.js │ └── webrtc.js ├── test │ ├── dummy-manager.ts │ ├── image-recorder.ts │ ├── index.ts │ ├── jupyter.jpg │ ├── mediastream.ts │ └── widget-utils.ts ├── tsconfig.json ├── tslint.json ├── webpack.config.js └── webpack.config.lab3.js ├── pyproject.toml ├── readthedocs.yml ├── setup.cfg └── setup.py /.github/workflows/code-quality.yml: -------------------------------------------------------------------------------- 1 | name: code-quality 2 | 3 | on: 4 | pull_request: 5 | push: 6 | branches: [main] 7 | 8 | defaults: 9 | run: 10 | shell: bash -l {0} 11 | 12 | jobs: 13 | pre-commit: 14 | runs-on: ubuntu-22.04 15 | steps: 16 | - name: Checkout 17 | uses: actions/checkout@v2 18 | 19 | - name: Install Conda environment with Micromamba 20 | uses: mamba-org/provision-with-micromamba@main 21 | with: 22 | environment-name: ipywebrtc-dev 23 | environment-file: dev_environment.yml 24 | python-version: ${{ matrix.python-version }} 25 | mamba-version: "*" 26 | auto-activate-base: false 27 | channels: conda-forge 28 | 29 | - name: Install dependencies 30 | run: | 31 | pip install ".[dev]" 32 | pre-commit install 33 | - name: run pre-commit 34 | run: | 35 | pre-commit run --all-files 36 | -------------------------------------------------------------------------------- /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | name: Tests 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | pull_request: 8 | branches: 9 | - master 10 | 11 | defaults: 12 | run: 13 | shell: bash -l {0} 14 | 15 | jobs: 16 | tests: 17 | runs-on: ${{ matrix.os }} 18 | 19 | strategy: 20 | fail-fast: false 21 | matrix: 22 | os: [ubuntu-latest, macos-latest] 23 | python-version: [3.9] 24 | 25 | steps: 26 | - name: Checkout 27 | uses: actions/checkout@v2 28 | 29 | - name: Install Conda environment with Micromamba 30 | uses: mamba-org/provision-with-micromamba@main 31 | with: 32 | environment-name: ipywebrtc-dev 33 | environment-file: dev_environment.yml 34 | python-version: ${{ matrix.python-version }} 35 | mamba-version: "*" 36 | auto-activate-base: false 37 | channels: conda-forge 38 | 39 | - name: Install ipywebrtc 40 | run: pip install . 41 | 42 | - name: Check installation files 43 | run: | 44 | test -d $CONDA_PREFIX/share/jupyter/nbextensions/jupyter-webrtc 45 | test -f $CONDA_PREFIX/share/jupyter/nbextensions/jupyter-webrtc/extension.js 46 | test -f $CONDA_PREFIX/share/jupyter/nbextensions/jupyter-webrtc/index.js 47 | test -d $CONDA_PREFIX/share/jupyter/labextensions/jupyter-webrtc 48 | test -f $CONDA_PREFIX/share/jupyter/labextensions/jupyter-webrtc/package.json 49 | 50 | - name: Check nbextension and labextension 51 | run: | 52 | jupyter nbextension list 2>&1 | grep -ie "jupyter-webrtc/extension.*enabled" - 53 | jupyter labextension list 2>&1 | grep -ie "jupyter-webrtc.*enabled.*ok" - 54 | 55 | - name: Run js tests 56 | run: | 57 | npm install 58 | npm run test 59 | working-directory: js 60 | 61 | - name: Build docs (Only on MacOS for build speed) 62 | if: matrix.os == 'macos-latest' 63 | run: | 64 | cd docs/source/ 65 | sphinx-build . _build/html 66 | cd ../.. 67 | 68 | build: 69 | runs-on: ubuntu-latest 70 | steps: 71 | - name: Checkout 72 | uses: actions/checkout@v2 73 | 74 | - name: Install Conda environment with Micromamba 75 | uses: mamba-org/provision-with-micromamba@main 76 | with: 77 | environment-name: ipywebrtc-dev 78 | environment-file: dev_environment.yml 79 | python-version: ${{ matrix.python-version }} 80 | mamba-version: "*" 81 | auto-activate-base: false 82 | channels: conda-forge 83 | 84 | - name: Build packages 85 | run: | 86 | python setup.py sdist bdist_wheel 87 | cd dist 88 | sha256sum * | tee SHA256SUMS 89 | - name: Upload builds 90 | uses: actions/upload-artifact@v2 91 | with: 92 | name: dist ${{ github.run_number }} 93 | path: ./dist 94 | 95 | install: 96 | runs-on: ${{ matrix.os }}-latest 97 | needs: [build] 98 | 99 | strategy: 100 | fail-fast: false 101 | matrix: 102 | os: [ubuntu, macos, windows] 103 | python: ["3.6", "3.9"] 104 | include: 105 | - python: "3.6" 106 | dist: "ipywebrtc*.tar.gz" 107 | - python: "3.9" 108 | dist: "ipywebrtc*.whl" 109 | 110 | steps: 111 | - name: Checkout 112 | uses: actions/checkout@v2 113 | 114 | - name: Install Conda environment with Micromamba 115 | uses: mamba-org/provision-with-micromamba@main 116 | with: 117 | environment-name: ipywebrtc-dev 118 | environment-file: dev_environment.yml 119 | python-version: ${{ matrix.python-version }} 120 | mamba-version: "*" 121 | auto-activate-base: false 122 | channels: conda-forge 123 | 124 | - uses: actions/download-artifact@v2 125 | with: 126 | name: dist ${{ github.run_number }} 127 | path: ./dist 128 | 129 | - name: Install the package 130 | run: | 131 | cd dist 132 | pip install -vv ${{ matrix.dist }} 133 | 134 | - name: Test installation files 135 | run: | 136 | test -d $CONDA_PREFIX/share/jupyter/nbextensions/jupyter-webrtc 137 | test -f $CONDA_PREFIX/share/jupyter/nbextensions/jupyter-webrtc/extension.js 138 | test -f $CONDA_PREFIX/share/jupyter/nbextensions/jupyter-webrtc/index.js 139 | test -d $CONDA_PREFIX/share/jupyter/labextensions/jupyter-webrtc 140 | test -f $CONDA_PREFIX/share/jupyter/labextensions/jupyter-webrtc/package.json 141 | test -d $CONDA_PREFIX/share/jupyter/labextensions/jupyter-webrtc/static 142 | 143 | - name: Validate the nbextension 144 | run: jupyter nbextension list 2>&1 | grep "jupyter-webrtc/extension" 145 | 146 | - name: Validate the labextension 147 | run: jupyter labextension list 2>&1 | grep jupyter-webrtc 148 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.egg-info/ 2 | .ipynb_checkpoints/ 3 | dist/ 4 | build/ 5 | *.py[cod] 6 | node_modules/ 7 | 8 | # Compiled javascript 9 | ipywebrtc/static/ 10 | js/test_js/ 11 | share 12 | 13 | # OS X 14 | .DS_Store 15 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: "https://github.com/pre-commit/mirrors-prettier" 3 | rev: "v3.1.0" 4 | hooks: 5 | - id: prettier 6 | types: [javascript] 7 | stages: [commit] 8 | - repo: https://github.com/charliermarsh/ruff-pre-commit 9 | rev: "v0.1.9" 10 | hooks: 11 | - id: ruff 12 | stages: [commit] 13 | - id: ruff-format 14 | stages: [commit] 15 | -------------------------------------------------------------------------------- /.prettierignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | **/node_modules 3 | **/lib 4 | **/package.json -------------------------------------------------------------------------------- /.releash.py: -------------------------------------------------------------------------------- 1 | import glob 2 | 3 | from releash import * 4 | 5 | # these objects only tag when they are exe 6 | gitpush = ReleaseTargetGitPush() 7 | 8 | 9 | filenames_python = glob.glob("*") 10 | filenames_python.remove("js") 11 | # filenames_python.remove('notebooks') 12 | package_python = add_package(".", "py", distribution_name="ipywebrtc", filenames=filenames_python) 13 | 14 | version_python = VersionSource(package_python, "{path}/ipywebrtc/_version.py") 15 | gittag = ReleaseTargetGitTagVersion(version_source=version_python) 16 | 17 | package_python.version_source = version_python 18 | package_python.version_targets.append(VersionTarget(package_python, "{path}/ipywebrtc/_version.py")) 19 | 20 | package_python.release_targets.append(gittag) 21 | package_python.release_targets.append(ReleaseTargetSourceDist(package_python)) 22 | # core.release_targets.append(gitpush) 23 | # package_python.release_targets.append(ReleaseTargetCondaForge(package_python, '../feedstocks/ipywebrtc-feedstock')) 24 | 25 | 26 | # js part 27 | package_js = add_package("js", "js", distribution_name="ipywebrtc") 28 | 29 | version_js = VersionSource( 30 | package_js, 31 | "{path}/../ipywebrtc/_version.py", 32 | tuple_variable_name="__version_tuple_js__", 33 | ) 34 | 35 | package_js.version_source = version_js 36 | package_js.version_targets.append( 37 | VersionTarget( 38 | package_js, 39 | "{path}/../ipywebrtc/_version.py", 40 | tuple_variable_name="__version_tuple_js__", 41 | string_variable_name="__version_js__", 42 | ) 43 | ) 44 | package_js.version_targets.append(VersionTargetJson(package_js, "{path}/package.json")) 45 | 46 | gittag_js = ReleaseTargetGitTagVersion(version_source=version_js, postfix="_js") 47 | package_js.release_targets.append(gittag_js) 48 | package_js.release_targets.append(ReleaseTargetNpm(package_js)) 49 | # core.release_targets.append(gitpush) 50 | # package_python.release_targets.append(ReleaseTargetCondaForge(package_python, '../feedstocks/ipyvolume-feedstock')) 51 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Maarten Breddels 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include LICENSE 2 | # include only by default in setuptools 43.0.0+ 3 | include pyproject.toml -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ipywebrtc 2 | 3 | [![Travis](https://travis-ci.org/maartenbreddels/ipywebrtc.svg?branch=master)](https://travis-ci.org/maartenbreddels/ipywebrtc) 4 | [![Documentation](https://readthedocs.org/projects/ipywebrtc/badge/?version=latest)](https://ipywebrtc.readthedocs.io/en/latest/) 5 | [![Binder](https://mybinder.org/badge.svg)](https://mybinder.org/v2/gh/maartenbreddels/ipywebrtc/master?filepath=docs/source) 6 | [![Chat](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/jupyter-widgets/Lobby) 7 | 8 | WebRTC and MediaStream API exposed in the Jupyter notebook/lab. 9 | 10 | [See this tweet](https://twitter.com/maartenbreddels/status/1027995865024262144) for a demo screencast. 11 | 12 | # Why use ipywebrtc? 13 | 14 | Using ipywebrtc you can create a [MediaStream](api.html#ipywebrtc.webrtc.MediaStream) out of: 15 | 16 | - [Any ipywidget](https://ipywebrtc.readthedocs.io/en/latest/api.html#ipywebrtc.webrtc.WidgetStream). 17 | - A [video](https://ipywebrtc.readthedocs.io/en/latest/api.html#ipywebrtc.webrtc.VideoStream) file. 18 | - An [image](https://ipywebrtc.readthedocs.io/en/latest/api.html#ipywebrtc.webrtc.ImageStream) file. 19 | - An [audio](https://ipywebrtc.readthedocs.io/en/latest/api.html#ipywebrtc.webrtc.AudioStream) file. 20 | - Your [webcam/camera](https://ipywebrtc.readthedocs.io/en/latest/api.html#ipywebrtc.webrtc.CameraStream). 21 | 22 | From this MediaStream you can: 23 | 24 | - [Record a movie](https://ipywebrtc.readthedocs.io/en/latest/api.html#ipywebrtc.webrtc.VideoRecorder). 25 | - [Record an image snapshot](https://ipywebrtc.readthedocs.io/en/latest/api.html#ipywebrtc.webrtc.ImageRecorder). 26 | - [Record an audio fragment](https://ipywebrtc.readthedocs.io/en/latest/api.html#ipywebrtc.webrtc.AudioRecorder). 27 | - Stream it to peers using the simple [chat function](https://ipywebrtc.readthedocs.io/en/latest/api.html#ipywebrtc.chat) 28 | - [Use it as a texture in ipyvolume](https://twitter.com/maartenbreddels/status/894983501996584961) 29 | 30 | # Demos 31 | 32 | ## WebRTC and [ipyvolume](https://github.com/maartenbreddels/ipyvolume/) 33 | 34 | Use remote MediaStreams and show them in 3d using [ipyvolume](https://github.com/maartenbreddels/ipyvolume/). 35 | 36 | ![webrtc](https://user-images.githubusercontent.com/1765949/43977008-03dbfac0-9ce3-11e8-9bb9-4a5f8f2cc79a.gif) 37 | 38 | ## ImageRecorder 39 | 40 | Record and image from _any_ stream for postprocessing. 41 | 42 | ![recorder](https://user-images.githubusercontent.com/1765949/43978560-fe0bf500-9ce7-11e8-81b9-9d30d26d7492.gif) 43 | 44 | ## WidgetStream 45 | 46 | Turn _any_ widget into a MediaStream. 47 | 48 | ![widget-stream](https://user-images.githubusercontent.com/1765949/43977992-1668d534-9ce6-11e8-8fab-783105476e98.gif) 49 | 50 | # Installation 51 | 52 | To install: 53 | 54 | ``` 55 | $ pip install ipywebrtc # will auto enable for notebook >= 5.3 56 | ``` 57 | 58 | For a development installation (requires npm), 59 | 60 | ``` 61 | $ git clone https://github.com/maartenbreddels/ipywebrtc 62 | $ cd ipywebrtc 63 | $ pip install -e . 64 | $ jupyter nbextension install --py --symlink --sys-prefix ipywebrtc 65 | $ jupyter nbextension enable --py --sys-prefix ipywebrtc 66 | $ jupyter labextension develop . --overwrite 67 | ``` 68 | -------------------------------------------------------------------------------- /RELEASE.md: -------------------------------------------------------------------------------- 1 | - To release a new version of ipywebrtc on PyPI: 2 | 3 | Update \_version.py (set release version, remove 'dev') 4 | git add and git commit 5 | python setup.py sdist upload 6 | python setup.py bdist_wheel upload 7 | git tag -a X.X.X -m 'comment' 8 | Update \_version.py (add 'dev' and increment minor) 9 | git add and git commit 10 | git push 11 | git push --tags 12 | 13 | - To release a new version of jupyter-webrtc on NPM: 14 | 15 | # nuke the `dist` and `node_modules` 16 | 17 | git clean -fdx 18 | npm install 19 | npm publish 20 | -------------------------------------------------------------------------------- /binder/requirements.txt: -------------------------------------------------------------------------------- 1 | # for mybinder 2 | ipyleaflet 3 | pythreejs 4 | scikit-image 5 | pillow 6 | notebook 7 | ipywidgets 8 | ipywebrtc 9 | -------------------------------------------------------------------------------- /dev_environment.yml: -------------------------------------------------------------------------------- 1 | name: ipywebrtc-dev 2 | channels: 3 | - conda-forge 4 | dependencies: 5 | - pip 6 | - nodejs=16.* 7 | - yarn 8 | - jupyter-packaging 9 | - jupyterlab=3 10 | - notebook 11 | - ipywidgets>=7.6 12 | - ruff 13 | - nbsphinx 14 | - sphinx 15 | - sphinx_rtd_theme 16 | - pygments==2.6.1 17 | - jupyter-sphinx 18 | - pre-commit 19 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | SPHINXPROJ = ipywebrtc 8 | SOURCEDIR = source 9 | BUILDDIR = build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) -------------------------------------------------------------------------------- /docs/environment.yml: -------------------------------------------------------------------------------- 1 | name: ipywebrtc_docs 2 | channels: 3 | - conda-forge 4 | - defaults 5 | dependencies: 6 | - ipywidgets 7 | - nbformat 8 | - nbsphinx 9 | - notebook>=4.2 10 | - python=3.6 11 | - sphinx>=1.4.6 12 | - sphinx_rtd_theme 13 | - tornado 14 | - python-dateutil 15 | - recommonmark 16 | - traitlets 17 | - ipywidgets 18 | - nodejs 19 | - pip 20 | - pip: 21 | - ipywebrtc 22 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=source 11 | set BUILDDIR=build 12 | set SPHINXPROJ=ipywebrtc 13 | 14 | if "%1" == "" goto help 15 | 16 | %SPHINXBUILD% >NUL 2>NUL 17 | if errorlevel 9009 ( 18 | echo. 19 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 20 | echo.installed, then set the SPHINXBUILD environment variable to point 21 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 22 | echo.may add the Sphinx directory to PATH. 23 | echo. 24 | echo.If you don't have Sphinx installed, grab it from 25 | echo.http://sphinx-doc.org/ 26 | exit /b 1 27 | ) 28 | 29 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 30 | goto end 31 | 32 | :help 33 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 34 | 35 | :end 36 | popd 37 | -------------------------------------------------------------------------------- /docs/source/AudioRecorder.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# AudioRecorder\n", 8 | "A [AudioRecorder](api.rst#ipywebrtc.webrtc.AudioRecorder) allows you to record audio from almost any stream object, e.g. from:\n", 9 | " \n", 10 | " * [VideoStream](api.rst#ipywebrtc.webrtc.VideoStream)\n", 11 | " * [AudioStream](api.rst#ipywebrtc.webrtc.AudioStream)\n", 12 | " * [WidgetStream](api.rst#ipywebrtc.webrtc.WidgetStream)\n", 13 | " * [CameraStream](api.rst#ipywebrtc.webrtc.CameraStream)" 14 | ] 15 | }, 16 | { 17 | "cell_type": "code", 18 | "execution_count": null, 19 | "metadata": {}, 20 | "outputs": [], 21 | "source": [ 22 | "from ipywebrtc import VideoStream, AudioStream, AudioRecorder" 23 | ] 24 | }, 25 | { 26 | "cell_type": "code", 27 | "execution_count": null, 28 | "metadata": {}, 29 | "outputs": [], 30 | "source": [ 31 | "video = VideoStream.from_url('./Big.Buck.Bunny.mp4')\n", 32 | "video" 33 | ] 34 | }, 35 | { 36 | "cell_type": "code", 37 | "execution_count": null, 38 | "metadata": {}, 39 | "outputs": [], 40 | "source": [ 41 | "recorder = AudioRecorder(stream=video)\n", 42 | "recorder" 43 | ] 44 | }, 45 | { 46 | "cell_type": "code", 47 | "execution_count": null, 48 | "metadata": {}, 49 | "outputs": [], 50 | "source": [ 51 | "video.playing = False" 52 | ] 53 | }, 54 | { 55 | "cell_type": "code", 56 | "execution_count": null, 57 | "metadata": {}, 58 | "outputs": [], 59 | "source": [ 60 | "recorder.audio" 61 | ] 62 | }, 63 | { 64 | "cell_type": "code", 65 | "execution_count": null, 66 | "metadata": {}, 67 | "outputs": [], 68 | "source": [ 69 | "recorder.save('example.webm')" 70 | ] 71 | }, 72 | { 73 | "cell_type": "code", 74 | "execution_count": null, 75 | "metadata": {}, 76 | "outputs": [], 77 | "source": [ 78 | "from ipywidgets import Audio \n", 79 | "\n", 80 | "example = Audio.from_file('example.webm')\n", 81 | "example" 82 | ] 83 | }, 84 | { 85 | "cell_type": "code", 86 | "execution_count": null, 87 | "metadata": {}, 88 | "outputs": [], 89 | "source": [ 90 | "audio_stream = AudioStream.from_file('example.webm')\n", 91 | "audio_stream" 92 | ] 93 | }, 94 | { 95 | "cell_type": "code", 96 | "execution_count": null, 97 | "metadata": {}, 98 | "outputs": [], 99 | "source": [ 100 | "recorder2 = AudioRecorder(stream=audio_stream)\n", 101 | "recorder2" 102 | ] 103 | }, 104 | { 105 | "cell_type": "code", 106 | "execution_count": null, 107 | "metadata": {}, 108 | "outputs": [], 109 | "source": [ 110 | "audio_stream.playing = False" 111 | ] 112 | } 113 | ], 114 | "metadata": { 115 | "kernelspec": { 116 | "display_name": "Python 3 (ipykernel)", 117 | "language": "python", 118 | "name": "python3" 119 | }, 120 | "language_info": { 121 | "codemirror_mode": { 122 | "name": "ipython", 123 | "version": 3 124 | }, 125 | "file_extension": ".py", 126 | "mimetype": "text/x-python", 127 | "name": "python", 128 | "nbconvert_exporter": "python", 129 | "pygments_lexer": "ipython3", 130 | "version": "3.9.15" 131 | } 132 | }, 133 | "nbformat": 4, 134 | "nbformat_minor": 2 135 | } 136 | -------------------------------------------------------------------------------- /docs/source/AudioStream.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# AudioStream\n", 8 | "A [AudioStream](api.rst#ipywebrtc.webrtc.AudioStream) is similar to the VideoStream, but for audio only.\n", 9 | " " 10 | ] 11 | }, 12 | { 13 | "cell_type": "code", 14 | "execution_count": null, 15 | "metadata": {}, 16 | "outputs": [], 17 | "source": [ 18 | "from ipywebrtc import AudioStream" 19 | ] 20 | }, 21 | { 22 | "cell_type": "code", 23 | "execution_count": null, 24 | "metadata": {}, 25 | "outputs": [], 26 | "source": [ 27 | "audio = AudioStream.from_url('Big.Buck.Bunny.mp3')\n", 28 | "audio" 29 | ] 30 | }, 31 | { 32 | "cell_type": "code", 33 | "execution_count": null, 34 | "metadata": {}, 35 | "outputs": [], 36 | "source": [ 37 | "audio.playing = False" 38 | ] 39 | }, 40 | { 41 | "cell_type": "code", 42 | "execution_count": null, 43 | "metadata": {}, 44 | "outputs": [], 45 | "source": [] 46 | } 47 | ], 48 | "metadata": { 49 | "kernelspec": { 50 | "display_name": "Python 3 (ipykernel)", 51 | "language": "python", 52 | "name": "python3" 53 | }, 54 | "language_info": { 55 | "codemirror_mode": { 56 | "name": "ipython", 57 | "version": 3 58 | }, 59 | "file_extension": ".py", 60 | "mimetype": "text/x-python", 61 | "name": "python", 62 | "nbconvert_exporter": "python", 63 | "pygments_lexer": "ipython3", 64 | "version": "3.9.15" 65 | } 66 | }, 67 | "nbformat": 4, 68 | "nbformat_minor": 2 69 | } 70 | -------------------------------------------------------------------------------- /docs/source/Big.Buck.Bunny.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maartenbreddels/ipywebrtc/568595763af55bc93fdc424be3899242eebbda0c/docs/source/Big.Buck.Bunny.mp3 -------------------------------------------------------------------------------- /docs/source/Big.Buck.Bunny.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maartenbreddels/ipywebrtc/568595763af55bc93fdc424be3899242eebbda0c/docs/source/Big.Buck.Bunny.mp4 -------------------------------------------------------------------------------- /docs/source/CameraStream.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# CameraStream\n", 8 | "A [CameraStream](api.rst#ipywebrtc.webrtc.CameraStream) is a [MediaStream](api.rst#ipywebrtc.webrtc.MediaStream) from an attached camera device or webcam." 9 | ] 10 | }, 11 | { 12 | "cell_type": "code", 13 | "execution_count": null, 14 | "metadata": {}, 15 | "outputs": [], 16 | "source": [ 17 | "from ipywebrtc import CameraStream, ImageRecorder" 18 | ] 19 | }, 20 | { 21 | "cell_type": "markdown", 22 | "metadata": {}, 23 | "source": [ 24 | "## With constraints" 25 | ] 26 | }, 27 | { 28 | "cell_type": "markdown", 29 | "metadata": {}, 30 | "source": [ 31 | "You can pass [constraints](api.rst#ipywebrtc.webrtc.CameraStream.constraints) to the camera:" 32 | ] 33 | }, 34 | { 35 | "cell_type": "code", 36 | "execution_count": null, 37 | "metadata": {}, 38 | "outputs": [], 39 | "source": [ 40 | "camera = CameraStream(constraints=\n", 41 | " {'facing_mode': 'user',\n", 42 | " 'audio': False,\n", 43 | " 'video': { 'width': 640, 'height': 480 }\n", 44 | " })\n", 45 | "camera" 46 | ] 47 | }, 48 | { 49 | "cell_type": "markdown", 50 | "metadata": {}, 51 | "source": [ 52 | "## Front and back camera" 53 | ] 54 | }, 55 | { 56 | "cell_type": "markdown", 57 | "metadata": {}, 58 | "source": [ 59 | "Or use the two convenience methods:\n", 60 | "\n", 61 | "* [CameraStream.facing_user](http://localhost:8000/api.rst#ipywebrtc.webrtc.CameraStream.facing_user)\n", 62 | "* [CameraStream.facing_environment](http://localhost:8000/api.rst#ipywebrtc.webrtc.CameraStream.facing_environment)" 63 | ] 64 | }, 65 | { 66 | "cell_type": "code", 67 | "execution_count": null, 68 | "metadata": {}, 69 | "outputs": [], 70 | "source": [ 71 | "# this is a shorter way to get the user facing camera\n", 72 | "front_camera = CameraStream.facing_user(audio=False)\n", 73 | "# or the back facing camera\n", 74 | "back_camera = CameraStream.facing_environment(audio=False)" 75 | ] 76 | }, 77 | { 78 | "cell_type": "code", 79 | "execution_count": null, 80 | "metadata": {}, 81 | "outputs": [], 82 | "source": [ 83 | "back_camera" 84 | ] 85 | }, 86 | { 87 | "cell_type": "markdown", 88 | "metadata": {}, 89 | "source": [ 90 | "## Record images from the camera" 91 | ] 92 | }, 93 | { 94 | "cell_type": "code", 95 | "execution_count": null, 96 | "metadata": {}, 97 | "outputs": [], 98 | "source": [ 99 | "image_recorder = ImageRecorder(stream=camera)\n", 100 | "image_recorder" 101 | ] 102 | }, 103 | { 104 | "cell_type": "code", 105 | "execution_count": null, 106 | "metadata": {}, 107 | "outputs": [], 108 | "source": [ 109 | "import PIL.Image\n", 110 | "import PIL.ImageFilter\n", 111 | "import io\n", 112 | "im = PIL.Image.open(io.BytesIO(image_recorder.image.value))" 113 | ] 114 | }, 115 | { 116 | "cell_type": "code", 117 | "execution_count": null, 118 | "metadata": {}, 119 | "outputs": [], 120 | "source": [ 121 | "im.filter(PIL.ImageFilter.BLUR)" 122 | ] 123 | }, 124 | { 125 | "cell_type": "code", 126 | "execution_count": null, 127 | "metadata": { 128 | "scrolled": true 129 | }, 130 | "outputs": [], 131 | "source": [ 132 | "import numpy as np\n", 133 | "im_array = np.array(im)\n", 134 | "im_array" 135 | ] 136 | }, 137 | { 138 | "cell_type": "code", 139 | "execution_count": null, 140 | "metadata": {}, 141 | "outputs": [], 142 | "source": [] 143 | } 144 | ], 145 | "metadata": { 146 | "kernelspec": { 147 | "display_name": "Python 3 (ipykernel)", 148 | "language": "python", 149 | "name": "python3" 150 | }, 151 | "language_info": { 152 | "codemirror_mode": { 153 | "name": "ipython", 154 | "version": 3 155 | }, 156 | "file_extension": ".py", 157 | "mimetype": "text/x-python", 158 | "name": "python", 159 | "nbconvert_exporter": "python", 160 | "pygments_lexer": "ipython3", 161 | "version": "3.9.15" 162 | } 163 | }, 164 | "nbformat": 4, 165 | "nbformat_minor": 2 166 | } 167 | -------------------------------------------------------------------------------- /docs/source/ImageRecorder.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# ImageRecorder\n", 8 | "A [ImageRecorder](api.rst#ipywebrtc.webrtc.ImageRecorder) allows you to record a screenshot from any stream object, e.g. from:\n", 9 | " \n", 10 | " * [VideoStream](api.rst#ipywebrtc.webrtc.VideoStream)\n", 11 | " * [WidgetStream](api.rst#ipywebrtc.webrtc.WidgetStream)\n", 12 | " * [CameraStream](api.rst#ipywebrtc.webrtc.CameraStream)" 13 | ] 14 | }, 15 | { 16 | "cell_type": "code", 17 | "execution_count": null, 18 | "metadata": {}, 19 | "outputs": [], 20 | "source": [ 21 | "import ipywidgets as widgets\n", 22 | "from ipywebrtc import ImageRecorder, VideoStream" 23 | ] 24 | }, 25 | { 26 | "cell_type": "code", 27 | "execution_count": null, 28 | "metadata": {}, 29 | "outputs": [], 30 | "source": [ 31 | "video = VideoStream.from_url('Big.Buck.Bunny.mp4')\n", 32 | "video" 33 | ] 34 | }, 35 | { 36 | "cell_type": "markdown", 37 | "metadata": {}, 38 | "source": [ 39 | "Using the image recorder, you can take screenshot of the stream clicking the camera button" 40 | ] 41 | }, 42 | { 43 | "cell_type": "code", 44 | "execution_count": null, 45 | "metadata": { 46 | "scrolled": true 47 | }, 48 | "outputs": [], 49 | "source": [ 50 | "image_recorder = ImageRecorder(stream=video)\n", 51 | "image_recorder" 52 | ] 53 | }, 54 | { 55 | "cell_type": "code", 56 | "execution_count": null, 57 | "metadata": {}, 58 | "outputs": [], 59 | "source": [ 60 | "image_recorder.image" 61 | ] 62 | }, 63 | { 64 | "cell_type": "markdown", 65 | "metadata": {}, 66 | "source": [ 67 | "Or do it, programatically:" 68 | ] 69 | }, 70 | { 71 | "cell_type": "code", 72 | "execution_count": null, 73 | "metadata": {}, 74 | "outputs": [], 75 | "source": [ 76 | "image_recorder.recording = True" 77 | ] 78 | }, 79 | { 80 | "cell_type": "code", 81 | "execution_count": null, 82 | "metadata": {}, 83 | "outputs": [], 84 | "source": [ 85 | "image_recorder.autosave = False" 86 | ] 87 | }, 88 | { 89 | "cell_type": "code", 90 | "execution_count": null, 91 | "metadata": {}, 92 | "outputs": [], 93 | "source": [ 94 | "image_recorder.download()" 95 | ] 96 | }, 97 | { 98 | "cell_type": "code", 99 | "execution_count": null, 100 | "metadata": {}, 101 | "outputs": [], 102 | "source": [ 103 | "image_recorder.image.height" 104 | ] 105 | }, 106 | { 107 | "cell_type": "markdown", 108 | "metadata": {}, 109 | "source": [ 110 | "The data is PNG encoded (by default), so we show how to use PIL to read in the data" 111 | ] 112 | }, 113 | { 114 | "cell_type": "code", 115 | "execution_count": null, 116 | "metadata": {}, 117 | "outputs": [], 118 | "source": [ 119 | "import PIL.Image\n", 120 | "import PIL.ImageFilter\n", 121 | "import io\n", 122 | "im = PIL.Image.open(io.BytesIO(image_recorder.image.value))" 123 | ] 124 | }, 125 | { 126 | "cell_type": "markdown", 127 | "metadata": {}, 128 | "source": [ 129 | "PIL Images display by default as image in the notebook. Calling the filter methods returns a new image which gets displayed directly." 130 | ] 131 | }, 132 | { 133 | "cell_type": "code", 134 | "execution_count": null, 135 | "metadata": {}, 136 | "outputs": [], 137 | "source": [ 138 | "im.filter(PIL.ImageFilter.BLUR)" 139 | ] 140 | }, 141 | { 142 | "cell_type": "markdown", 143 | "metadata": {}, 144 | "source": [ 145 | "## Example with scikit image\n", 146 | "We first convert the png encoded data to raw pixel values (as a numpy array)." 147 | ] 148 | }, 149 | { 150 | "cell_type": "code", 151 | "execution_count": null, 152 | "metadata": { 153 | "scrolled": true 154 | }, 155 | "outputs": [], 156 | "source": [ 157 | "import numpy as np\n", 158 | "im_array = np.array(im)\n", 159 | "im_array" 160 | ] 161 | }, 162 | { 163 | "cell_type": "markdown", 164 | "metadata": {}, 165 | "source": [ 166 | "Now we can do easy manipulatios, such as reordering the channels (red, green, blue, alpha)" 167 | ] 168 | }, 169 | { 170 | "cell_type": "code", 171 | "execution_count": null, 172 | "metadata": {}, 173 | "outputs": [], 174 | "source": [ 175 | "PIL.Image.fromarray(im_array[...,::-1])" 176 | ] 177 | }, 178 | { 179 | "cell_type": "markdown", 180 | "metadata": {}, 181 | "source": [ 182 | "Or build a slightly more sophisticated example using scikit-image (run this notebook with a live kernel, such as mybinder for this to work)" 183 | ] 184 | }, 185 | { 186 | "cell_type": "code", 187 | "execution_count": null, 188 | "metadata": { 189 | "scrolled": false 190 | }, 191 | "outputs": [], 192 | "source": [ 193 | "from skimage.filters import roberts, sobel, scharr, prewitt\n", 194 | "from skimage.color import rgb2gray\n", 195 | "from skimage.color.adapt_rgb import adapt_rgb, each_channel, hsv_value\n", 196 | "from skimage import filters\n", 197 | "\n", 198 | "\n", 199 | "image = widgets.Image()\n", 200 | "output = widgets.Output()\n", 201 | "filter_options = [('roberts', roberts), ('sobel', sobel), ('scharr', scharr), ('prewitt', prewitt)]\n", 202 | "filter_widget = widgets.ToggleButtons(options=filter_options)\n", 203 | "\n", 204 | "@output.capture()\n", 205 | "def update_image(change):\n", 206 | " # turn into nparray\n", 207 | " im_in = PIL.Image.open(io.BytesIO(image_recorder.image.value))\n", 208 | " im_array = np.array(im_in)[...,:3] # no alpha\n", 209 | " \n", 210 | " # filter\n", 211 | " filter_function = filter_widget.value\n", 212 | " im_array_edges = adapt_rgb(each_channel)(filter_function)(im_array)\n", 213 | " im_array_edges = ((1-im_array_edges) * 255).astype(np.uint8)\n", 214 | " im_out = PIL.Image.fromarray(im_array_edges)\n", 215 | " \n", 216 | " # store in image widget\n", 217 | " f = io.BytesIO()\n", 218 | " im_out.save(f, format='png')\n", 219 | " image.value = f.getvalue()\n", 220 | "\n", 221 | "image_recorder.image.observe(update_image, 'value')\n", 222 | "filter_widget.observe(update_image, 'value')\n", 223 | "widgets.VBox([filter_widget, video, widgets.HBox([image_recorder, image]), output])" 224 | ] 225 | }, 226 | { 227 | "cell_type": "code", 228 | "execution_count": null, 229 | "metadata": {}, 230 | "outputs": [], 231 | "source": [] 232 | } 233 | ], 234 | "metadata": { 235 | "kernelspec": { 236 | "display_name": "Python 3 (ipykernel)", 237 | "language": "python", 238 | "name": "python3" 239 | }, 240 | "language_info": { 241 | "codemirror_mode": { 242 | "name": "ipython", 243 | "version": 3 244 | }, 245 | "file_extension": ".py", 246 | "mimetype": "text/x-python", 247 | "name": "python", 248 | "nbconvert_exporter": "python", 249 | "pygments_lexer": "ipython3", 250 | "version": "3.9.15" 251 | } 252 | }, 253 | "nbformat": 4, 254 | "nbformat_minor": 2 255 | } 256 | -------------------------------------------------------------------------------- /docs/source/VideoRecorder.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# VideoRecorder\n", 8 | "A [VideoRecorder](api.rst#ipywebrtc.webrtc.VideoRecorder) allows you to record any stream object, e.g. from:\n", 9 | " \n", 10 | " * [VideoStream](api.rst#ipywebrtc.webrtc.VideoStream)\n", 11 | " * [WidgetStream](api.rst#ipywebrtc.webrtc.WidgetStream)\n", 12 | " * [CameraStream](api.rst#ipywebrtc.webrtc.CameraStream)" 13 | ] 14 | }, 15 | { 16 | "cell_type": "code", 17 | "execution_count": null, 18 | "metadata": {}, 19 | "outputs": [], 20 | "source": [ 21 | "from ipywebrtc import VideoStream, VideoRecorder" 22 | ] 23 | }, 24 | { 25 | "cell_type": "code", 26 | "execution_count": null, 27 | "metadata": {}, 28 | "outputs": [], 29 | "source": [ 30 | "video = VideoStream.from_url('./Big.Buck.Bunny.mp4')" 31 | ] 32 | }, 33 | { 34 | "cell_type": "code", 35 | "execution_count": null, 36 | "metadata": {}, 37 | "outputs": [], 38 | "source": [ 39 | "video" 40 | ] 41 | }, 42 | { 43 | "cell_type": "code", 44 | "execution_count": null, 45 | "metadata": { 46 | "scrolled": true 47 | }, 48 | "outputs": [], 49 | "source": [ 50 | "recorder = VideoRecorder(stream=video)\n", 51 | "recorder" 52 | ] 53 | }, 54 | { 55 | "cell_type": "code", 56 | "execution_count": null, 57 | "metadata": {}, 58 | "outputs": [], 59 | "source": [ 60 | "video.playing = False" 61 | ] 62 | }, 63 | { 64 | "cell_type": "code", 65 | "execution_count": null, 66 | "metadata": {}, 67 | "outputs": [], 68 | "source": [ 69 | "recorder.video" 70 | ] 71 | }, 72 | { 73 | "cell_type": "markdown", 74 | "metadata": {}, 75 | "source": [ 76 | "Use 'record' button for recording.\n", 77 | "Programatical control is available using the [MediaRecorder.record](api.rst#ipywebrtc.webrtc.MediaRecorder.record) trait." 78 | ] 79 | }, 80 | { 81 | "cell_type": "code", 82 | "execution_count": null, 83 | "metadata": {}, 84 | "outputs": [], 85 | "source": [ 86 | "recorder.recording = True" 87 | ] 88 | }, 89 | { 90 | "cell_type": "code", 91 | "execution_count": null, 92 | "metadata": {}, 93 | "outputs": [], 94 | "source": [ 95 | "recorder.recording = False" 96 | ] 97 | }, 98 | { 99 | "cell_type": "markdown", 100 | "metadata": {}, 101 | "source": [ 102 | "Saving can be done by clicking the download button, or programmatically using the save method. If [autosave](api.rst#ipywebrtc.webrtc.MediaRecorder.autosave) is True, the recording will be saved directly to disk." 103 | ] 104 | }, 105 | { 106 | "cell_type": "code", 107 | "execution_count": null, 108 | "metadata": {}, 109 | "outputs": [], 110 | "source": [ 111 | "recorder.save('example.webm')" 112 | ] 113 | }, 114 | { 115 | "cell_type": "code", 116 | "execution_count": null, 117 | "metadata": {}, 118 | "outputs": [], 119 | "source": [ 120 | "from ipywidgets import Video \n", 121 | "\n", 122 | "example = Video.from_file('example.webm')\n", 123 | "example" 124 | ] 125 | } 126 | ], 127 | "metadata": { 128 | "kernelspec": { 129 | "display_name": "Python 3 (ipykernel)", 130 | "language": "python", 131 | "name": "python3" 132 | }, 133 | "language_info": { 134 | "codemirror_mode": { 135 | "name": "ipython", 136 | "version": 3 137 | }, 138 | "file_extension": ".py", 139 | "mimetype": "text/x-python", 140 | "name": "python", 141 | "nbconvert_exporter": "python", 142 | "pygments_lexer": "ipython3", 143 | "version": "3.9.15" 144 | } 145 | }, 146 | "nbformat": 4, 147 | "nbformat_minor": 2 148 | } 149 | -------------------------------------------------------------------------------- /docs/source/VideoStream.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# VideoStream\n", 8 | "A [VideoStream](api.rst#ipywebrtc.webrtc.VideoStream) is a [MediaStream](api.rst#ipywebrtc.webrtc.MediaStream) from an attached video file or url." 9 | ] 10 | }, 11 | { 12 | "cell_type": "code", 13 | "execution_count": null, 14 | "metadata": {}, 15 | "outputs": [], 16 | "source": [ 17 | "from ipywebrtc import VideoStream" 18 | ] 19 | }, 20 | { 21 | "cell_type": "markdown", 22 | "metadata": {}, 23 | "source": [ 24 | "## Local file\n", 25 | "You can create a video stream from a local file, note that the content of the file is embedded in the widget, meaning your notebook file can become quite large." 26 | ] 27 | }, 28 | { 29 | "cell_type": "code", 30 | "execution_count": null, 31 | "metadata": {}, 32 | "outputs": [], 33 | "source": [ 34 | "# commented out since it increases the size of the notebook a lot\n", 35 | "# video = VideoStream.from_file('./Big.Buck.Bunny.mp4')\n", 36 | "# video" 37 | ] 38 | }, 39 | { 40 | "cell_type": "code", 41 | "execution_count": null, 42 | "metadata": {}, 43 | "outputs": [], 44 | "source": [ 45 | "# video" 46 | ] 47 | }, 48 | { 49 | "cell_type": "markdown", 50 | "metadata": {}, 51 | "source": [ 52 | "## URL\n", 53 | "A URL is also supported, but it must respect the same-origin policy (e.g. it must be hosted from the same server as the Javascript is executed from)." 54 | ] 55 | }, 56 | { 57 | "cell_type": "code", 58 | "execution_count": null, 59 | "metadata": {}, 60 | "outputs": [], 61 | "source": [ 62 | "# video2 = VideoStream.from_url('http://localhost:8888/path_to_your_hosted_file.mp4')\n", 63 | "video2 = VideoStream.from_url('./Big.Buck.Bunny.mp4')\n", 64 | "video2" 65 | ] 66 | }, 67 | { 68 | "cell_type": "markdown", 69 | "metadata": {}, 70 | "source": [ 71 | "In this example, video2 does not include the data of the video itself, only the url." 72 | ] 73 | }, 74 | { 75 | "cell_type": "markdown", 76 | "metadata": {}, 77 | "source": [ 78 | "## Download\n", 79 | "For convenience, if a video is not same-origin, the below code will download it and put the content of the file in the widget (note again that the notebook will be large)." 80 | ] 81 | }, 82 | { 83 | "cell_type": "code", 84 | "execution_count": null, 85 | "metadata": {}, 86 | "outputs": [], 87 | "source": [ 88 | "# commented out since it increases the size of the notebook a lot\n", 89 | "# video3 = VideoStream.from_download('https://webrtc.github.io/samples/src/video/chrome.webm')\n", 90 | "# video3" 91 | ] 92 | }, 93 | { 94 | "cell_type": "markdown", 95 | "metadata": {}, 96 | "source": [ 97 | "## Controlling\n", 98 | "You can control a video for intance by linking a ToggleButton to a VideoStream:" 99 | ] 100 | }, 101 | { 102 | "cell_type": "code", 103 | "execution_count": null, 104 | "metadata": {}, 105 | "outputs": [], 106 | "source": [ 107 | "import ipywidgets as widgets\n", 108 | "\n", 109 | "play_button = widgets.ToggleButton(description=\"Play\")\n", 110 | "widgets.jslink((play_button, 'value'), (video2, 'playing'))\n", 111 | "widgets.VBox(children=[video2, play_button])" 112 | ] 113 | }, 114 | { 115 | "cell_type": "code", 116 | "execution_count": null, 117 | "metadata": {}, 118 | "outputs": [], 119 | "source": [] 120 | } 121 | ], 122 | "metadata": { 123 | "kernelspec": { 124 | "display_name": "Python 3 (ipykernel)", 125 | "language": "python", 126 | "name": "python3" 127 | }, 128 | "language_info": { 129 | "codemirror_mode": { 130 | "name": "ipython", 131 | "version": 3 132 | }, 133 | "file_extension": ".py", 134 | "mimetype": "text/x-python", 135 | "name": "python", 136 | "nbconvert_exporter": "python", 137 | "pygments_lexer": "ipython3", 138 | "version": "3.9.15" 139 | } 140 | }, 141 | "nbformat": 4, 142 | "nbformat_minor": 2 143 | } 144 | -------------------------------------------------------------------------------- /docs/source/WidgetStream.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# WidgetStream\n", 8 | "A [WidgetStream](api.rst#ipywebrtc.webrtc.VideoStream) creates a [MediaStream](api.rst#ipywebrtc.webrtc.MediaStream) out of any widget." 9 | ] 10 | }, 11 | { 12 | "cell_type": "code", 13 | "execution_count": null, 14 | "metadata": {}, 15 | "outputs": [], 16 | "source": [ 17 | "from ipywebrtc import WidgetStream, VideoStream" 18 | ] 19 | }, 20 | { 21 | "cell_type": "markdown", 22 | "metadata": {}, 23 | "source": [ 24 | "### Example with pythreejs: streaming of a webgl canvas" 25 | ] 26 | }, 27 | { 28 | "cell_type": "code", 29 | "execution_count": null, 30 | "metadata": {}, 31 | "outputs": [], 32 | "source": [ 33 | "from pythreejs import Mesh, SphereGeometry, MeshLambertMaterial, PerspectiveCamera, DirectionalLight, Scene, AmbientLight, Renderer, OrbitControls\n", 34 | "ball = Mesh(\n", 35 | " geometry=SphereGeometry(radius=1), \n", 36 | " material=MeshLambertMaterial(color='red'),\n", 37 | " position=[2, 1, 0]\n", 38 | ")\n", 39 | "\n", 40 | "c = PerspectiveCamera(\n", 41 | " position=[0, 5, 5], up=[0, 1, 0],\n", 42 | " children=[DirectionalLight(color='white', position=[3, 5, 1], intensity=0.5)]\n", 43 | ")\n", 44 | "\n", 45 | "scene = Scene(children=[ball, c, AmbientLight(color='#777777')])\n", 46 | "\n", 47 | "renderer = Renderer(\n", 48 | " camera=c, \n", 49 | " scene=scene, \n", 50 | " controls=[OrbitControls(controlling=c)]\n", 51 | ")\n", 52 | "\n", 53 | "renderer" 54 | ] 55 | }, 56 | { 57 | "cell_type": "code", 58 | "execution_count": null, 59 | "metadata": {}, 60 | "outputs": [], 61 | "source": [ 62 | "# the webgl_stream will be updated after the scene has changed (so drag the above ball around if nothing happens)\n", 63 | "webgl_stream = WidgetStream(widget=renderer)\n", 64 | "webgl_stream" 65 | ] 66 | }, 67 | { 68 | "cell_type": "code", 69 | "execution_count": null, 70 | "metadata": {}, 71 | "outputs": [], 72 | "source": [ 73 | "# You can limit the fps\n", 74 | "webgl_stream2 = WidgetStream(widget=renderer, max_fps=5)\n", 75 | "webgl_stream2" 76 | ] 77 | }, 78 | { 79 | "cell_type": "markdown", 80 | "metadata": {}, 81 | "source": [ 82 | "### Example with ipywidgets: streaming of a slider widget" 83 | ] 84 | }, 85 | { 86 | "cell_type": "code", 87 | "execution_count": null, 88 | "metadata": {}, 89 | "outputs": [], 90 | "source": [ 91 | "from ipywidgets import FloatSlider\n", 92 | "slider = FloatSlider(\n", 93 | " value=7.5,\n", 94 | " step=0.1,\n", 95 | " description='Test:',\n", 96 | " disabled=False,\n", 97 | " continuous_update=False,\n", 98 | " orientation='horizontal',\n", 99 | " readout=True,\n", 100 | " readout_format='.1f',\n", 101 | ")\n", 102 | "\n", 103 | "slider" 104 | ] 105 | }, 106 | { 107 | "cell_type": "code", 108 | "execution_count": null, 109 | "metadata": {}, 110 | "outputs": [], 111 | "source": [ 112 | "widget_stream = WidgetStream(widget=slider, max_fps=1)\n", 113 | "widget_stream" 114 | ] 115 | }, 116 | { 117 | "cell_type": "code", 118 | "execution_count": null, 119 | "metadata": {}, 120 | "outputs": [], 121 | "source": [ 122 | "# Close the stream\n", 123 | "widget_stream.close()" 124 | ] 125 | }, 126 | { 127 | "cell_type": "markdown", 128 | "metadata": {}, 129 | "source": [ 130 | "### Example with ipyleaflet: streaming of a map widget" 131 | ] 132 | }, 133 | { 134 | "cell_type": "code", 135 | "execution_count": null, 136 | "metadata": {}, 137 | "outputs": [], 138 | "source": [ 139 | "from ipyleaflet import Map\n", 140 | "m = Map(center=(46, 14), zoom=5)\n", 141 | "m" 142 | ] 143 | }, 144 | { 145 | "cell_type": "code", 146 | "execution_count": null, 147 | "metadata": {}, 148 | "outputs": [], 149 | "source": [ 150 | "map_stream = WidgetStream(widget=m, max_fps=1)\n", 151 | "map_stream" 152 | ] 153 | }, 154 | { 155 | "cell_type": "code", 156 | "execution_count": null, 157 | "metadata": {}, 158 | "outputs": [], 159 | "source": [ 160 | "map_stream.close()" 161 | ] 162 | }, 163 | { 164 | "cell_type": "code", 165 | "execution_count": null, 166 | "metadata": {}, 167 | "outputs": [], 168 | "source": [] 169 | } 170 | ], 171 | "metadata": { 172 | "kernelspec": { 173 | "display_name": "Python 3 (ipykernel)", 174 | "language": "python", 175 | "name": "python3" 176 | }, 177 | "language_info": { 178 | "codemirror_mode": { 179 | "name": "ipython", 180 | "version": 3 181 | }, 182 | "file_extension": ".py", 183 | "mimetype": "text/x-python", 184 | "name": "python", 185 | "nbconvert_exporter": "python", 186 | "pygments_lexer": "ipython3", 187 | "version": "3.9.15" 188 | } 189 | }, 190 | "nbformat": 4, 191 | "nbformat_minor": 2 192 | } 193 | -------------------------------------------------------------------------------- /docs/source/api.rst: -------------------------------------------------------------------------------- 1 | API docs 2 | ======== 3 | 4 | Note that :mod:`ipywebrtc.webrtc` is imported in the ipywebrtc namespace, to you can access ipywebrtc.CameraStream instead of :class:`ipywebrtc.webrtc.CameraStream`. 5 | 6 | 7 | ipywebrtc 8 | --------- 9 | 10 | .. automodule:: ipywebrtc 11 | :members: chat 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | ipywebrtc.webrtc 16 | ---------------- 17 | 18 | .. automodule:: ipywebrtc.webrtc 19 | :members: MediaStream, VideoStream, AudioStream, CameraStream, WidgetStream, ImageStream, Recorder, VideoRecorder, ImageRecorder, AudioRecorder, WebRTCPeer, WebRTCRoom, WebRTCRoomLocal, WebRTCRoomMqtt 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | -------------------------------------------------------------------------------- /docs/source/conf.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | # 4 | # ipywebrtc documentation build configuration file, created by 5 | # sphinx-quickstart on Thu Aug 10 19:59:03 2017. 6 | # 7 | # This file is execfile()d with the current directory set to its 8 | # containing dir. 9 | # 10 | # Note that not all possible configuration values are present in this 11 | # autogenerated file. 12 | # 13 | # All configuration values have a default; values that are commented out 14 | # serve to show the default. 15 | 16 | # If extensions (or modules to document with autodoc) are in another directory, 17 | # add these directories to sys.path here. If the directory is relative to the 18 | # documentation root, use os.path.abspath to make it absolute, like shown here. 19 | # 20 | # import os 21 | # import sys 22 | # sys.path.insert(0, os.path.abspath('.')) 23 | 24 | 25 | # -- General configuration ------------------------------------------------ 26 | 27 | # If your documentation needs a minimal Sphinx version, state it here. 28 | # 29 | # needs_sphinx = '1.0' 30 | 31 | # Add any Sphinx extension module names here, as strings. They can be 32 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 33 | # ones. 34 | extensions = [ 35 | "sphinx.ext.autodoc", 36 | "sphinx.ext.mathjax", 37 | "sphinx.ext.viewcode", 38 | "sphinx.ext.githubpages", 39 | "sphinx.ext.napoleon", # support for NumPy-style docstrings 40 | "nbsphinx", 41 | ] 42 | 43 | # Add any paths that contain templates here, relative to this directory. 44 | templates_path = ["_templates"] 45 | 46 | napoleon_use_rtype = False 47 | 48 | html_extra_path = ["ipyvolume.mp4", "Big.Buck.Bunny.mp3", "Big.Buck.Bunny.mp4"] 49 | # The suffix(es) of source filenames. 50 | # You can specify multiple suffix as a list of string: 51 | # 52 | source_suffix = [".rst"] 53 | 54 | # The master toctree document. 55 | master_doc = "index" 56 | 57 | # General information about the project. 58 | project = "ipywebrtc" 59 | copyright = "2017, Maarten Breddels" 60 | author = "Maarten Breddels" 61 | 62 | # The version info for the project you're documenting, acts as replacement for 63 | # |version| and |release|, also used in various other places throughout the 64 | # built documents. 65 | # 66 | # from https://github.com/ipython/ipywidgets/blob/master/docs/source/conf.py 67 | _release = {} 68 | exec( 69 | compile( 70 | open("../../ipywebrtc/_version.py").read(), 71 | "../../ipywebrtc/_version.py", 72 | "exec", 73 | ), 74 | _release, 75 | ) 76 | version = ".".join(map(str, _release["version_info"][:2])) 77 | release = _release["__version__"] 78 | 79 | # The language for content autogenerated by Sphinx. Refer to documentation 80 | # for a list of supported languages. 81 | # 82 | # This is also used if you do content translation via gettext catalogs. 83 | # Usually you set "language" from the command line for these cases. 84 | language = None 85 | 86 | # List of patterns, relative to source directory, that match files and 87 | # directories to ignore when looking for source files. 88 | # This patterns also effect to html_static_path and html_extra_path 89 | exclude_patterns = [] 90 | 91 | # The name of the Pygments (syntax highlighting) style to use. 92 | pygments_style = "sphinx" 93 | 94 | # If true, `todo` and `todoList` produce output, else they produce nothing. 95 | todo_include_todos = False 96 | 97 | 98 | # -- Options for HTML output ---------------------------------------------- 99 | 100 | # The theme to use for HTML and HTML Help pages. See the documentation for 101 | # a list of builtin themes. 102 | # 103 | html_theme = "alabaster" 104 | try: 105 | import sphinx_rtd_theme 106 | 107 | html_theme = "sphinx_rtd_theme" 108 | html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] 109 | except Exception: 110 | print("rtd theme not found") 111 | 112 | # Theme options are theme-specific and customize the look and feel of a theme 113 | # further. For a list of options available for each theme, see the 114 | # documentation. 115 | # 116 | # html_theme_options = {} 117 | 118 | # Add any paths that contain custom static files (such as style sheets) here, 119 | # relative to this directory. They are copied after the builtin static files, 120 | # so a file named "default.css" will overwrite the builtin "default.css". 121 | # html_static_path = ['_static'] 122 | 123 | 124 | # -- Options for HTMLHelp output ------------------------------------------ 125 | 126 | # Output file base name for HTML help builder. 127 | htmlhelp_basename = "ipywebrtcdoc" 128 | 129 | 130 | # -- Options for LaTeX output --------------------------------------------- 131 | 132 | latex_elements = { 133 | # The paper size ('letterpaper' or 'a4paper'). 134 | # 135 | # 'papersize': 'letterpaper', 136 | # The font size ('10pt', '11pt' or '12pt'). 137 | # 138 | # 'pointsize': '10pt', 139 | # Additional stuff for the LaTeX preamble. 140 | # 141 | # 'preamble': '', 142 | # Latex figure (float) alignment 143 | # 144 | # 'figure_align': 'htbp', 145 | } 146 | 147 | # Grouping the document tree into LaTeX files. List of tuples 148 | # (source start file, target name, title, 149 | # author, documentclass [howto, manual, or own class]). 150 | latex_documents = [ 151 | ( 152 | master_doc, 153 | "ipywebrtc.tex", 154 | "ipywebrtc Documentation", 155 | "Maarten Breddels", 156 | "manual", 157 | ), 158 | ] 159 | 160 | 161 | # -- Options for manual page output --------------------------------------- 162 | 163 | # One entry per manual page. List of tuples 164 | # (source start file, name, description, authors, manual section). 165 | man_pages = [(master_doc, "ipywebrtc", "ipywebrtc Documentation", [author], 1)] 166 | 167 | 168 | # -- Options for Texinfo output ------------------------------------------- 169 | 170 | # Grouping the document tree into Texinfo files. List of tuples 171 | # (source start file, target name, title, author, 172 | # dir menu entry, description, category) 173 | texinfo_documents = [ 174 | ( 175 | master_doc, 176 | "ipywebrtc", 177 | "ipywebrtc Documentation", 178 | author, 179 | "ipywebrtc", 180 | "One line description of project.", 181 | "Miscellaneous", 182 | ), 183 | ] 184 | 185 | nbsphinx_allow_errors = True 186 | -------------------------------------------------------------------------------- /docs/source/index.rst: -------------------------------------------------------------------------------- 1 | .. ipywebrtc documentation master file, created by 2 | sphinx-quickstart on Thu Aug 10 19:59:03 2017. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to IPyWebRTC's documentation! 7 | ===================================== 8 | 9 | WebRTC and MediaStream API exposed in the Jupyter notebook/lab. 10 | 11 | `IPyWebRTC's GitHub repo `_. 12 | 13 | `See this tweet `_ for a demo screencast. 14 | 15 | Using ipywebrtc you can create a `MediaStream `_ out of: 16 | * `Any ipywidget `_. 17 | * A `video `_ file. 18 | * An `image `_ file. 19 | * An `audio `_ file. 20 | * Your `webcam/camera `_. 21 | 22 | From this MediaStream you can: 23 | 24 | * `Record a movie `_. 25 | * `Record an image snapshot `_. 26 | * `Record an audio fragment `_. 27 | * Stream it to peers using the simple `chat function `_. 28 | * `Use it as a texture in ipyvolume `_. 29 | 30 | 31 | Installation 32 | ============ 33 | 34 | Pip users:: 35 | 36 | $ pip install ipywebrtc # will auto enable for notebook >= 5.3 37 | $ jupyter labextension install jupyter-webrtc # for jupyter lab 38 | 39 | 40 | Conda users:: 41 | 42 | $ conda install -c conda-forge ipywebrtc 43 | $ jupyter labextension install jupyter-webrtc # for jupyter lab 44 | 45 | 46 | 47 | 48 | 49 | .. toctree:: 50 | :maxdepth: 2 51 | :caption: Examples and API docs: 52 | 53 | VideoStream.ipynb 54 | CameraStream.ipynb 55 | AudioStream.ipynb 56 | WidgetStream.ipynb 57 | VideoRecorder.ipynb 58 | ImageRecorder.ipynb 59 | AudioRecorder.ipynb 60 | api 61 | 62 | 63 | Demos 64 | ===== 65 | 66 | WebRTC and `ipyvolume`_ 67 | ----------------------- 68 | 69 | Use remote MediaStreams and show them in 3d using `ipyvolume`_. 70 | 71 | .. figure:: https://user-images.githubusercontent.com/1765949/43977008-03dbfac0-9ce3-11e8-9bb9-4a5f8f2cc79a.gif 72 | :alt: webrtc 73 | 74 | webrtc 75 | 76 | ImageRecorder 77 | ------------- 78 | 79 | Record and image from *any* stream for postprocessing. 80 | 81 | .. figure:: https://user-images.githubusercontent.com/1765949/43978560-fe0bf500-9ce7-11e8-81b9-9d30d26d7492.gif 82 | :alt: recorder 83 | 84 | recorder 85 | 86 | WidgetStream 87 | ------------ 88 | 89 | Turn *any* widget into a MediaStream. 90 | 91 | .. figure:: https://user-images.githubusercontent.com/1765949/43977992-1668d534-9ce6-11e8-8fab-783105476e98.gif 92 | :alt: widget-stream 93 | 94 | widget-stream 95 | 96 | .. _ipyvolume: https://github.com/maartenbreddels/ipyvolume/ 97 | 98 | 99 | Indices and tables 100 | ================== 101 | 102 | * :ref:`genindex` 103 | * :ref:`modindex` 104 | * :ref:`search` 105 | -------------------------------------------------------------------------------- /docs/source/ipyvolume.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maartenbreddels/ipywebrtc/568595763af55bc93fdc424be3899242eebbda0c/docs/source/ipyvolume.mp4 -------------------------------------------------------------------------------- /etc/jupyter/nbconfig/notebook.d/jupyter-webrtc.json: -------------------------------------------------------------------------------- 1 | { 2 | "load_extensions": { 3 | "jupyter-webrtc/extension": true 4 | } 5 | } -------------------------------------------------------------------------------- /ipywebrtc/__init__.py: -------------------------------------------------------------------------------- 1 | import random 2 | 3 | import ipywidgets as widgets 4 | from IPython.display import display 5 | 6 | from ._version import __version__, version_info # noqa 7 | from .webrtc import CameraStream, WebRTCRoomMqtt # noqa 8 | 9 | 10 | def _prefix(): 11 | import sys 12 | from pathlib import Path 13 | 14 | prefix = sys.prefix 15 | here = Path(__file__).parent 16 | # for when in dev mode 17 | if (here.parent / "share/jupyter/nbextensions/jupyter-webrtc").parent.exists(): 18 | prefix = here.parent 19 | return prefix 20 | 21 | 22 | def _jupyter_labextension_paths(): 23 | return [ 24 | { 25 | "src": f"{_prefix()}/share/jupyter/labextensions/jupyter-webrtc/", 26 | "dest": "jupyter-webrtc", 27 | } 28 | ] 29 | 30 | 31 | def _jupyter_nbextension_paths(): 32 | return [ 33 | { 34 | "section": "notebook", 35 | "src": f"{_prefix()}/share/jupyter/nbextensions/jupyter-webrtc/", 36 | "dest": "jupyter-webrtc", 37 | "require": "jupyter-webrtc/extension", 38 | } 39 | ] 40 | 41 | 42 | def _random_room(): 43 | return "".join(chr(ord("0") + random.randint(0, 9)) for k in range(6)) 44 | 45 | 46 | def chat(room=None, stream=None, **kwargs): 47 | """Quick setup for a chatroom. 48 | 49 | :param str room: Roomname, if not given, a random sequence is generated and printed. 50 | :param MediaStream stream: The media stream to share, if not given a CameraStream will be created. 51 | :rtype: WebRTCRoom 52 | 53 | """ 54 | if room is None: 55 | room = _random_room() 56 | print("room =", room) 57 | if stream is None: 58 | stream = CameraStream() 59 | room = WebRTCRoomMqtt(stream=stream, room=room) 60 | box = widgets.HBox(children=[]) 61 | widgets.jslink((room, "streams"), (box, "children")) 62 | display(box) 63 | return room 64 | -------------------------------------------------------------------------------- /ipywebrtc/_version.py: -------------------------------------------------------------------------------- 1 | __version_tuple__ = (0, 6, 0) 2 | __version_tuple_js__ = (0, 6, 0) 3 | __version__ = "0.6.0" 4 | __version_js__ = "0.6.0" 5 | 6 | version_info = __version_tuple__ # kept for backward compatibility 7 | -------------------------------------------------------------------------------- /ipywebrtc/webrtc.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import logging 4 | import os 5 | from urllib.request import urlopen 6 | 7 | import traitlets 8 | from ipywidgets import Audio, DOMWidget, Image, Video, register, widget_serialization 9 | from traitlets import ( 10 | Bool, 11 | Dict, 12 | Instance, 13 | Int, 14 | List, 15 | TraitError, 16 | Unicode, 17 | observe, 18 | validate, 19 | ) 20 | 21 | import ipywebrtc._version 22 | 23 | logger = logging.getLogger("jupyter-webrtc") 24 | semver_range_frontend = "~" + ipywebrtc._version.__version_js__ 25 | 26 | 27 | @register 28 | class MediaStream(DOMWidget): 29 | """Represents a media source. 30 | 31 | See https://developer.mozilla.org/nl/docs/Web/API/MediaStream for details 32 | In practice this can a stream coming from an HTMLVideoElement, 33 | HTMLCanvasElement (could be a WebGL canvas) or a camera/webcam/microphone 34 | using getUserMedia. 35 | 36 | The currently supported MediaStream (subclasses) are: 37 | * :class:`VideoStream`: A video file/data as media stream. 38 | * :class:`CameraStream`: Webcam/camera as media stream. 39 | * :class:`ImageStream`: An image as a static stream. 40 | * :class:`WidgetStream`: Arbitrary DOMWidget as stream. 41 | 42 | A MediaStream can be used with: 43 | * :class:`VideoRecorder`: To record a movie 44 | * :class:`ImageRecorder`: To create images/snapshots. 45 | * :class:`AudioRecorder`: To record audio. 46 | * :class:`WebRTCRoom` (or rather :class:`WebRTCRoomMqtt`): To stream a media stream to a (set of) peers. 47 | """ 48 | 49 | _model_module = Unicode("jupyter-webrtc").tag(sync=True) 50 | _view_module = Unicode("jupyter-webrtc").tag(sync=True) 51 | _view_name = Unicode("MediaStreamView").tag(sync=True) 52 | _model_name = Unicode("MediaStreamModel").tag(sync=True) 53 | _view_module_version = Unicode(semver_range_frontend).tag(sync=True) 54 | _model_module_version = Unicode(semver_range_frontend).tag(sync=True) 55 | 56 | 57 | # for backwards compatibility with ipyvolume 58 | HasStream = MediaStream 59 | 60 | 61 | @register 62 | class WidgetStream(MediaStream): 63 | """Represents a widget media source.""" 64 | 65 | _model_name = Unicode("WidgetStreamModel").tag(sync=True) 66 | _view_name = Unicode("WidgetStreamView").tag(sync=True) 67 | 68 | widget = Instance( 69 | DOMWidget, 70 | allow_none=False, 71 | help="An instance of ipywidgets.DOMWidget that will be the source of the MediaStream.", 72 | ).tag(sync=True, **widget_serialization) 73 | max_fps = Int( 74 | None, 75 | allow_none=True, 76 | help="(int, default None) The maximum amount of frames per second to capture, or only on new data when the valeus is None.", 77 | ).tag(sync=True) 78 | _html2canvas_start_streaming = Bool(False).tag(sync=True) 79 | 80 | @validate("max_fps") 81 | def _valid_fps(self, proposal): 82 | if proposal["value"] is not None and proposal["value"] < 0: 83 | raise TraitError("max_fps attribute must be a positive integer") 84 | return proposal["value"] 85 | 86 | 87 | @register 88 | class ImageStream(MediaStream): 89 | """Represent a media stream by a static image""" 90 | 91 | _model_name = Unicode("ImageStreamModel").tag(sync=True) 92 | 93 | image = Instance( 94 | Image, 95 | help="An ipywidgets.Image instance that will be the source of the media stream.", 96 | ).tag(sync=True, **widget_serialization) 97 | 98 | @classmethod 99 | def from_file(cls, filename, **kwargs): 100 | """Create a `ImageStream` from a local file. 101 | 102 | Parameters 103 | ---------- 104 | filename: str 105 | The location of a file to read into the value from disk. 106 | **kwargs 107 | Extra keyword arguments for `ImageStream` 108 | """ 109 | return cls(image=Image.from_file(filename), **kwargs) 110 | 111 | @classmethod 112 | def from_url(cls, url, **kwargs): 113 | """Create a `ImageStream` from a url. 114 | 115 | This will create a `ImageStream` from an Image using its url 116 | 117 | Parameters 118 | ---------- 119 | url: str 120 | The url of the file that will be used for the .image trait. 121 | **kwargs 122 | Extra keyword arguments for `ImageStream` 123 | """ 124 | return cls(image=Image.from_url(url), **kwargs) 125 | 126 | @classmethod 127 | def from_download(cls, url, **kwargs): 128 | """Create a `ImageStream` from a url by downloading 129 | 130 | Parameters 131 | ---------- 132 | url: str 133 | The url of the file that will be downloadeded and its bytes 134 | assigned to the value trait of the video trait. 135 | **kwargs 136 | Extra keyword arguments for `ImageStream` 137 | """ 138 | ext = os.path.splitext(url)[1] 139 | if ext: 140 | format = ext[1:] 141 | image = Image(value=urlopen(url).read(), format=format) 142 | return cls(image=image, **kwargs) 143 | 144 | 145 | @register 146 | class VideoStream(MediaStream): 147 | """Represent a stream of a video element""" 148 | 149 | _model_name = Unicode("VideoStreamModel").tag(sync=True) 150 | 151 | video = Instance( 152 | Video, 153 | allow_none=False, 154 | help="An ipywidgets.Video instance that will be the source of the media stream.", 155 | ).tag(sync=True, **widget_serialization) 156 | playing = Bool(True, help="Plays the videostream or pauses it.").tag(sync=True) 157 | 158 | @classmethod 159 | def from_file(cls, filename, **kwargs): 160 | """Create a `VideoStream` from a local file. 161 | 162 | Parameters 163 | ---------- 164 | filename: str 165 | The location of a file to read into the value from disk. 166 | **kwargs 167 | Extra keyword arguments for `VideoStream` 168 | """ 169 | video = Video.from_file(filename, autoplay=False, controls=False) 170 | return cls(video=video, **kwargs) 171 | 172 | @classmethod 173 | def from_url(cls, url, **kwargs): 174 | """Create a `VideoStream` from a url. 175 | 176 | This will create a `VideoStream` from a Video using its url 177 | 178 | Parameters 179 | ---------- 180 | url: str 181 | The url of the file that will be used for the .video trait. 182 | **kwargs 183 | Extra keyword arguments for `VideoStream` 184 | """ 185 | video = Video.from_url(url, autoplay=False, controls=False) 186 | return cls(video=video, **kwargs) 187 | 188 | @classmethod 189 | def from_download(cls, url, **kwargs): 190 | """Create a `VideoStream` from a url by downloading 191 | 192 | Parameters 193 | ---------- 194 | url: str 195 | The url of the file that will be downloadeded and its bytes 196 | assigned to the value trait of the video trait. 197 | **kwargs 198 | Extra keyword arguments for `VideoStream` 199 | 200 | """ 201 | ext = os.path.splitext(url)[1] 202 | if ext: 203 | format = ext[1:] 204 | video = Video(value=urlopen(url).read(), format=format, autoplay=False, controls=False) 205 | return cls(video=video, **kwargs) 206 | 207 | 208 | @register 209 | class AudioStream(MediaStream): 210 | """Represent a stream of an audio element""" 211 | 212 | _model_name = Unicode("AudioStreamModel").tag(sync=True) 213 | _view_name = Unicode("AudioStreamView").tag(sync=True) 214 | 215 | audio = Instance( 216 | Audio, 217 | help="An ipywidgets.Audio instance that will be the source of the media stream.", 218 | ).tag(sync=True, **widget_serialization) 219 | playing = Bool(True, help="Plays the audiostream or pauses it.").tag(sync=True) 220 | 221 | @classmethod 222 | def from_file(cls, filename, **kwargs): 223 | """Create a `AudioStream` from a local file. 224 | 225 | Parameters 226 | ---------- 227 | filename: str 228 | The location of a file to read into the audio value from disk. 229 | **kwargs 230 | Extra keyword arguments for `AudioStream` 231 | """ 232 | audio = Audio.from_file(filename, autoplay=False, controls=False) 233 | return cls(audio=audio, **kwargs) 234 | 235 | @classmethod 236 | def from_url(cls, url, **kwargs): 237 | """Create a `AudioStream` from a url. 238 | 239 | This will create a `AudioStream` from an Audio using its url 240 | 241 | Parameters 242 | ---------- 243 | url: str 244 | The url of the file that will be used for the .audio trait. 245 | **kwargs 246 | Extra keyword arguments for `AudioStream` 247 | """ 248 | audio = Audio.from_url(url, autoplay=False, controls=False) 249 | return cls(audio=audio, **kwargs) 250 | 251 | @classmethod 252 | def from_download(cls, url, **kwargs): 253 | """Create a `AudioStream` from a url by downloading 254 | 255 | Parameters 256 | ---------- 257 | url: str 258 | The url of the file that will be downloadeded and its bytes 259 | assigned to the value trait of the video trait. 260 | **kwargs 261 | Extra keyword arguments for `AudioStream` 262 | """ 263 | ext = os.path.splitext(url)[1] 264 | if ext: 265 | format = ext[1:] 266 | audio = Audio(value=urlopen(url).read(), format=format, autoplay=False, controls=False) 267 | return cls(audio=audio, **kwargs) 268 | 269 | 270 | @register 271 | class CameraStream(MediaStream): 272 | """Represents a media source by a camera/webcam/microphone using 273 | getUserMedia. See 274 | https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getUserMedia 275 | for more detail. 276 | The constraints trait can be set to specify constraints for the camera or 277 | microphone, which is described in the documentation of getUserMedia, such 278 | as in the link above, 279 | Two convenience methods are avaiable to easily get access to the 'front' 280 | and 'back' camera, when present 281 | 282 | >>> CameraStream.facing_user(audio=False) 283 | >>> CameraStream.facing_environment(audio=False) 284 | """ 285 | 286 | _model_name = Unicode("CameraStreamModel").tag(sync=True) 287 | 288 | # Specify audio constraint and video constraint 289 | # see https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getUserMedia 290 | constraints = Dict( 291 | {"audio": True, "video": True}, 292 | help="Constraints for the camera, see https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getUserMedia for details.", 293 | ).tag(sync=True) 294 | 295 | @classmethod 296 | def facing_user(cls, audio=True, **kwargs): 297 | """Convenience method to get the camera facing the user (often front) 298 | 299 | Parameters 300 | ---------- 301 | audio: bool 302 | Capture audio or not 303 | **kwargs 304 | Extra keyword arguments passed to the `CameraStream` 305 | """ 306 | return cls._facing(facing_mode="user", audio=audio, **kwargs) 307 | 308 | @classmethod 309 | def facing_environment(cls, audio=True, **kwargs): 310 | """Convenience method to get the camera facing the environment (often the back) 311 | 312 | Parameters 313 | ---------- 314 | audio: bool 315 | Capture audio or not 316 | **kwargs 317 | Extra keyword arguments passed to the `CameraStream` 318 | """ 319 | return cls._facing(facing_mode="environment", audio=audio, **kwargs) 320 | 321 | @staticmethod 322 | def _facing(facing_mode, audio=True, **kwargs): 323 | kwargs = dict(kwargs) 324 | constraints = kwargs.pop("constraints", {}) 325 | if "audio" not in constraints: 326 | constraints["audio"] = audio 327 | if "video" not in constraints: 328 | constraints["video"] = {} 329 | constraints["video"]["facingMode"] = facing_mode 330 | return CameraStream(constraints=constraints, **kwargs) 331 | 332 | 333 | class Recorder(DOMWidget): 334 | _model_module = Unicode("jupyter-webrtc").tag(sync=True) 335 | _view_module = Unicode("jupyter-webrtc").tag(sync=True) 336 | _view_module_version = Unicode(semver_range_frontend).tag(sync=True) 337 | _model_module_version = Unicode(semver_range_frontend).tag(sync=True) 338 | 339 | stream = Instance( 340 | MediaStream, 341 | allow_none=True, 342 | help="An instance of :class:`MediaStream` that is the source for recording.", 343 | ).tag(sync=True, **widget_serialization) 344 | filename = Unicode("record", help="The filename used for downloading or auto saving.").tag( 345 | sync=True 346 | ) 347 | format = Unicode("webm", help="The format of the recording.").tag(sync=True) 348 | recording = Bool( 349 | False, 350 | help="(boolean) Indicator and controller of the recorder state, i.e. putting the value to True will start recording.", 351 | ).tag(sync=True) 352 | autosave = Bool( 353 | False, 354 | help="If true, will save the data to a file once the recording is finished (based on filename and format)", 355 | ).tag(sync=True) 356 | _data_src = Unicode("").tag(sync=True) 357 | 358 | def download(self): 359 | """Download the recording (usually a popup appears in the browser)""" 360 | self.send({"msg": "download"}) 361 | 362 | 363 | @register 364 | class ImageRecorder(Recorder): 365 | """Creates a recorder which allows to grab an Image from a MediaStream widget.""" 366 | 367 | _model_name = Unicode("ImageRecorderModel").tag(sync=True) 368 | _view_name = Unicode("ImageRecorderView").tag(sync=True) 369 | 370 | image = Instance(Image).tag(sync=True, **widget_serialization) 371 | format = Unicode("png", help="The format of the image.").tag(sync=True) 372 | _width = Unicode().tag(sync=True) 373 | _height = Unicode().tag(sync=True) 374 | 375 | def __init__( 376 | self, 377 | format="png", 378 | filename=Recorder.filename.default_value, 379 | recording=False, 380 | autosave=False, 381 | **kwargs, 382 | ): 383 | super(ImageRecorder, self).__init__( 384 | format=format, 385 | filename=filename, 386 | recording=recording, 387 | autosave=autosave, 388 | **kwargs, 389 | ) 390 | if "image" not in kwargs: 391 | # Set up initial observer on child: 392 | self.image.observe(self._check_autosave, "value") 393 | 394 | @traitlets.default("image") 395 | def _default_image(self): 396 | return Image(width=self._width, height=self._height, format=self.format) 397 | 398 | @observe("_width") 399 | def _update_image_width(self, change): 400 | self.image.width = self._width 401 | 402 | @observe("_height") 403 | def _update_image_height(self, change): 404 | self.image.height = self._height 405 | 406 | @observe("format") 407 | def _update_image_format(self, change): 408 | self.image.format = self.format 409 | 410 | @observe("image") 411 | def _bind_image(self, change): 412 | if change.old: 413 | change.old.unobserve(self._check_autosave, "value") 414 | change.new.observe(self._check_autosave, "value") 415 | 416 | def _check_autosave(self, change): 417 | if len(self.image.value) and self.autosave: 418 | self.save() 419 | 420 | def save(self, filename=None): 421 | """Save the image to a file, if no filename is given it is based on the filename trait and the format. 422 | 423 | >>> recorder = ImageRecorder(filename='test', format='png') 424 | >>> ... 425 | >>> recorder.save() # will save to test.png 426 | >>> recorder.save('foo') # will save to foo.png 427 | >>> recorder.save('foo.dat') # will save to foo.dat 428 | 429 | """ 430 | filename = filename or self.filename 431 | if "." not in filename: 432 | filename += "." + self.format 433 | if len(self.image.value) == 0: 434 | raise ValueError("No data, did you record anything?") 435 | with open(filename, "wb") as f: 436 | f.write(self.image.value) 437 | 438 | 439 | @register 440 | class VideoRecorder(Recorder): 441 | """Creates a recorder which allows to record a MediaStream widget, play the 442 | record in the Notebook, and download it or turn it into a Video widget. 443 | 444 | For help on supported values for the "codecs" attribute, see 445 | https://stackoverflow.com/questions/41739837/all-mime-types-supported-by-mediarecorder-in-firefox-and-chrome 446 | """ 447 | 448 | _model_name = Unicode("VideoRecorderModel").tag(sync=True) 449 | _view_name = Unicode("VideoRecorderView").tag(sync=True) 450 | 451 | video = Instance(Video).tag(sync=True, **widget_serialization) 452 | codecs = Unicode("", help='Optional codecs for the recording, e.g. "vp8" or "vp9, opus".').tag( 453 | sync=True 454 | ) 455 | 456 | def __init__( 457 | self, 458 | format="webm", 459 | filename=Recorder.filename.default_value, 460 | recording=False, 461 | autosave=False, 462 | **kwargs, 463 | ): 464 | super(VideoRecorder, self).__init__( 465 | format=format, 466 | filename=filename, 467 | recording=recording, 468 | autosave=autosave, 469 | **kwargs, 470 | ) 471 | if "video" not in kwargs: 472 | # Set up initial observer on child: 473 | self.video.observe(self._check_autosave, "value") 474 | 475 | @traitlets.default("video") 476 | def _default_video(self): 477 | return Video(format=self.format, controls=True) 478 | 479 | @observe("format") 480 | def _update_video_format(self, change): 481 | self.video.format = self.format 482 | 483 | @observe("video") 484 | def _bind_video(self, change): 485 | if change.old: 486 | change.old.unobserve(self._check_autosave, "value") 487 | change.new.observe(self._check_autosave, "value") 488 | 489 | def _check_autosave(self, change): 490 | if len(self.video.value) and self.autosave: 491 | self.save() 492 | 493 | def save(self, filename=None): 494 | """Save the video to a file, if no filename is given it is based on the filename trait and the format. 495 | 496 | >>> recorder = VideoRecorder(filename='test', format='mp4') 497 | >>> ... 498 | >>> recorder.save() # will save to test.mp4 499 | >>> recorder.save('foo') # will save to foo.mp4 500 | >>> recorder.save('foo.dat') # will save to foo.dat 501 | 502 | """ 503 | filename = filename or self.filename 504 | if "." not in filename: 505 | filename += "." + self.format 506 | if len(self.video.value) == 0: 507 | raise ValueError("No data, did you record anything?") 508 | with open(filename, "wb") as f: 509 | f.write(self.video.value) 510 | 511 | 512 | @register 513 | class AudioRecorder(Recorder): 514 | """Creates a recorder which allows to record the Audio of a MediaStream widget, play the 515 | record in the Notebook, and download it or turn it into an Audio widget. 516 | 517 | For help on supported values for the "codecs" attribute, see 518 | https://stackoverflow.com/questions/41739837/all-mime-types-supported-by-mediarecorder-in-firefox-and-chrome 519 | """ 520 | 521 | _model_name = Unicode("AudioRecorderModel").tag(sync=True) 522 | _view_name = Unicode("AudioRecorderView").tag(sync=True) 523 | 524 | audio = Instance(Audio).tag(sync=True, **widget_serialization) 525 | codecs = Unicode("", help='Optional codecs for the recording, e.g. "opus".').tag(sync=True) 526 | 527 | def __init__( 528 | self, 529 | format="webm", 530 | filename=Recorder.filename.default_value, 531 | recording=False, 532 | autosave=False, 533 | **kwargs, 534 | ): 535 | super(AudioRecorder, self).__init__( 536 | format=format, 537 | filename=filename, 538 | recording=recording, 539 | autosave=autosave, 540 | **kwargs, 541 | ) 542 | if "audio" not in kwargs: 543 | # Set up initial observer on child: 544 | self.audio.observe(self._check_autosave, "value") 545 | 546 | @traitlets.default("audio") 547 | def _default_audio(self): 548 | return Audio(format=self.format, controls=True) 549 | 550 | @observe("format") 551 | def _update_audio_format(self, change): 552 | self.audio.format = self.format 553 | 554 | @observe("audio") 555 | def _bind_audio(self, change): 556 | if change.old: 557 | change.old.unobserve(self._check_autosave, "value") 558 | change.new.observe(self._check_autosave, "value") 559 | 560 | def _check_autosave(self, change): 561 | if len(self.audio.value) and self.autosave: 562 | self.save() 563 | 564 | def save(self, filename=None): 565 | """Save the audio to a file, if no filename is given it is based on the filename trait and the format. 566 | 567 | >>> recorder = AudioRecorder(filename='test', format='mp3') 568 | >>> ... 569 | >>> recorder.save() # will save to test.mp3 570 | >>> recorder.save('foo') # will save to foo.mp3 571 | >>> recorder.save('foo.dat') # will save to foo.dat 572 | 573 | """ 574 | filename = filename or self.filename 575 | if "." not in filename: 576 | filename += "." + self.format 577 | if len(self.audio.value) == 0: 578 | raise ValueError("No data, did you record anything?") 579 | with open(filename, "wb") as f: 580 | f.write(self.audio.value) 581 | 582 | 583 | @register 584 | class WebRTCPeer(DOMWidget): 585 | """A peer-to-peer webrtc connection""" 586 | 587 | _model_module = Unicode("jupyter-webrtc").tag(sync=True) 588 | _view_module = Unicode("jupyter-webrtc").tag(sync=True) 589 | _view_name = Unicode("WebRTCPeerView").tag(sync=True) 590 | _model_name = Unicode("WebRTCPeerModel").tag(sync=True) 591 | _view_module_version = Unicode(semver_range_frontend).tag(sync=True) 592 | _model_module_version = Unicode(semver_range_frontend).tag(sync=True) 593 | 594 | stream_local = Instance(MediaStream, allow_none=True).tag(sync=True, **widget_serialization) 595 | stream_remote = Instance(MediaStream, allow_none=True).tag(sync=True, **widget_serialization) 596 | id_local = Unicode("").tag(sync=True) 597 | id_remote = Unicode("").tag(sync=True) 598 | connected = Bool(False, read_only=True).tag(sync=True) 599 | failed = Bool(False, read_only=True).tag(sync=True) 600 | 601 | def connect(self): 602 | self.send({"msg": "connect"}) 603 | 604 | 605 | @register 606 | class WebRTCRoom(DOMWidget): 607 | """A 'chatroom', which consists of a list of :`WebRTCPeer` connections""" 608 | 609 | _model_module = Unicode("jupyter-webrtc").tag(sync=True) 610 | _view_module = Unicode("jupyter-webrtc").tag(sync=True) 611 | _model_name = Unicode("WebRTCRoomModel").tag(sync=True) 612 | _view_module_version = Unicode(semver_range_frontend).tag(sync=True) 613 | _model_module_version = Unicode(semver_range_frontend).tag(sync=True) 614 | 615 | room = Unicode("room").tag(sync=True) 616 | stream = Instance(MediaStream, allow_none=True).tag(sync=True, **widget_serialization) 617 | room_id = Unicode(read_only=True).tag(sync=True) 618 | nickname = Unicode("anonymous").tag(sync=True) 619 | peers = List(Instance(WebRTCPeer), [], allow_none=False).tag(sync=True, **widget_serialization) 620 | streams = List(Instance(MediaStream), [], allow_none=False).tag( 621 | sync=True, **widget_serialization 622 | ) 623 | 624 | 625 | @register 626 | class WebRTCRoomLocal(WebRTCRoom): 627 | _model_name = Unicode("WebRTCRoomLocalModel").tag(sync=True) 628 | 629 | 630 | @register 631 | class WebRTCRoomMqtt(WebRTCRoom): 632 | """Use a mqtt server to connect to other peers""" 633 | 634 | _model_name = Unicode("WebRTCRoomMqttModel").tag(sync=True) 635 | 636 | server = Unicode("wss://iot.eclipse.org:443/ws").tag(sync=True) 637 | 638 | 639 | # add all help strings to the __doc__ for the api docstrings 640 | for name, cls in list(vars().items()): 641 | try: 642 | if issubclass(cls, traitlets.HasTraits): 643 | for trait_name, trait in cls.class_traits().items(): 644 | if "help" in trait.metadata: 645 | trait.__doc__ = trait.metadata["help"] 646 | except TypeError: 647 | pass 648 | -------------------------------------------------------------------------------- /js/README.md: -------------------------------------------------------------------------------- 1 | WebRTC for Jupyter notebook/lab 2 | 3 | ## Package Install 4 | 5 | **Prerequisites** 6 | 7 | - [node](http://nodejs.org/) 8 | 9 | ```bash 10 | npm install --save jupyter-webrtc 11 | ``` 12 | -------------------------------------------------------------------------------- /js/karma.conf.js: -------------------------------------------------------------------------------- 1 | var webpackConfig = require("./webpack.config.js"); 2 | var webpack = require("webpack"); 3 | 4 | module.exports = function (config) { 5 | config.set({ 6 | basePath: "", 7 | frameworks: ["mocha", "chai", "sinon"], 8 | files: [{ pattern: "test_js/test/index.js" }], 9 | exclude: ["**/embed.js", "src/**"], 10 | preprocessors: { 11 | "test_js/test/index.js": ["webpack", "sourcemap"], 12 | }, 13 | webpack: { 14 | module: webpackConfig[1].module, 15 | devtool: "source-map", 16 | mode: "development", 17 | resolve: { 18 | extensions: [".js"], 19 | }, 20 | plugins: [ 21 | // see https://github.com/webpack-contrib/karma-webpack/issues/109#issuecomment-224961264 22 | new webpack.SourceMapDevToolPlugin({ 23 | filename: null, // if no value is provided the sourcemap is inlined 24 | test: /\.(js)($|\?)/i, // process .js files only 25 | }), 26 | new webpack.ProvidePlugin({ 27 | Buffer: ["buffer", "Buffer"], 28 | process: "process/browser", 29 | }), 30 | ], 31 | }, 32 | mochaReporter: { 33 | showDiff: true, 34 | }, 35 | reporters: ["progress", "mocha"], 36 | port: 9876, 37 | colors: true, 38 | // possible values: config.LOG_DISABLE || config.LOG_ERROR || config.LOG_WARN || config.LOG_INFO || config.LOG_DEBUG 39 | logLevel: config.LOG_INFO, 40 | autoWatch: true, 41 | // available browser launchers: https://npmjs.org/browse/keyword/karma-launcher 42 | browsers: ["HeadlessChrome"], 43 | customLaunchers: { 44 | HeadlessChrome: { 45 | base: "Chrome", 46 | flags: ["--headless", "--disable-gpu", "--remote-debugging-port=9222"], 47 | }, 48 | }, 49 | // if true, Karma captures browsers, runs the tests and exits 50 | singleRun: true, 51 | // how many browser should be started simultaneous 52 | concurrency: Infinity, 53 | }); 54 | }; 55 | -------------------------------------------------------------------------------- /js/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "jupyter-webrtc", 3 | "version": "0.6.0", 4 | "description": "WebRTC for Jupyter notebook/lab", 5 | "author": "Maarten Breddels", 6 | "main": "src/index.js", 7 | "repository": { 8 | "type": "git", 9 | "url": "https://github.com/maartenbreddels/ipywebrtc.git" 10 | }, 11 | "jupyterlab": { 12 | "extension": "src/labplugin", 13 | "webpackConfig": "webpack.config.lab3.js", 14 | "outputDir": "../share/jupyter/labextensions/jupyter-webrtc", 15 | "sharedPackages": { 16 | "@jupyter-widgets/base": { 17 | "bundled": false, 18 | "singleton": true 19 | } 20 | } 21 | }, 22 | "keywords": [ 23 | "jupyter", 24 | "widgets", 25 | "ipython", 26 | "ipywidgets" 27 | ], 28 | "scripts": { 29 | "build": "npm run build:labextension && npm run build:nbextension", 30 | "build:labextension": "jupyter labextension build .", 31 | "build:nbextension": "webpack --mode=production", 32 | "watch": "npm-run-all -p watch:*", 33 | "watch:nbextension": "webpack --watch --mode=development", 34 | "watch:labextension": "jupyter labextension watch .", 35 | "test": "tsc && copyfiles package.json test_js && karma start --single-run" 36 | }, 37 | "devDependencies": { 38 | "@jupyter-widgets/controls": "^1.5.0 || ^2 || ^3", 39 | "@jupyterlab/builder": "^3.0.1", 40 | "@jupyterlab/services": "^2.0.3", 41 | "@types/chai": "^4.1.4", 42 | "@types/expect.js": "^0.3.29", 43 | "@types/mocha": "^7.0.2", 44 | "@types/sinon": "^9.0.0", 45 | "arraybuffer-loader": "^1.0.6", 46 | "base64-image-loader": "^1.2.1", 47 | "chai": "^4.1.2", 48 | "copyfiles": "^2.4.1", 49 | "json-loader": "^0.5.4", 50 | "karma": "^5.0.2", 51 | "karma-chai": "^0.1.0", 52 | "karma-chrome-launcher": "^3.1.0", 53 | "karma-mocha": "^2.0.0", 54 | "karma-mocha-reporter": "^2.2.5", 55 | "karma-sinon": "^1.0.5", 56 | "karma-sourcemap-loader": "^0.3.7", 57 | "karma-webpack": "^5.0.0", 58 | "mocha": "^7.1.1", 59 | "npm-run-all": "^4.1.3", 60 | "rimraf": "^2.4.1", 61 | "sinon": "^9.0.2", 62 | "sinon-chai": "^3.3.0", 63 | "style-loader": "^0.13.1", 64 | "tslint": "^5.11.0", 65 | "typescript": "^3.5.2", 66 | "webpack": "^5", 67 | "webpack-cli": "^4.5.0" 68 | }, 69 | "dependencies": { 70 | "@jupyter-widgets/base": "*", 71 | "buffer": "^4.9.2", 72 | "html2canvas": "v1.0.0-alpha.12", 73 | "mqtt": "^2.11.0", 74 | "process": "^0.11.10", 75 | "underscore": "^1.8.3", 76 | "url": "^0.11.0", 77 | "util": "^0.12.3", 78 | "webrtc-adapter": "^4.2.2" 79 | }, 80 | "files": [ 81 | "dist/", 82 | "css/", 83 | "src/" 84 | ] 85 | } 86 | -------------------------------------------------------------------------------- /js/src/embed.js: -------------------------------------------------------------------------------- 1 | // Entry point for the unpkg bundle containing custom model definitions. 2 | // 3 | // It differs from the notebook bundle in that it does not need to define a 4 | // dynamic baseURL for the static assets and may load some css that would 5 | // already be loaded by the notebook otherwise. 6 | 7 | // Export widget models and views, and the npm package version number. 8 | module.exports = require("./webrtc.js"); 9 | module.exports["version"] = require("../package.json").version; 10 | -------------------------------------------------------------------------------- /js/src/extension.js: -------------------------------------------------------------------------------- 1 | // This file contains the javascript that is run when the notebook is loaded. 2 | // It contains some requirejs configuration and the `load_ipython_extension` 3 | // which is required for any notebook extension. 4 | 5 | // Configure requirejs 6 | if (window.require) { 7 | window.require.config({ 8 | map: { 9 | "*": { 10 | "jupyter-webrtc": "nbextensions/jupyter-webrtc/index", 11 | }, 12 | }, 13 | }); 14 | } 15 | 16 | // Export the required load_ipython_extention 17 | module.exports = { 18 | load_ipython_extension: function () {}, 19 | }; 20 | -------------------------------------------------------------------------------- /js/src/index.js: -------------------------------------------------------------------------------- 1 | // Entry point for the notebook bundle containing custom model definitions. 2 | // 3 | // Setup notebook base URL 4 | // 5 | // Some static assets may be required by the custom widget javascript. The base 6 | // url for the notebook is not known at build time and is therefore computed 7 | // dynamically. 8 | // this sometimes gives issues with jupyter lab it seems, and doesn't seem to hurt to comment out 9 | // __webpack_public_path__ = document.querySelector('body').getAttribute('data-base-url') + 'nbextensions/jupyter-webrtc/'; 10 | 11 | // Export widget models and views, and the npm package version number. 12 | module.exports = require("./webrtc.js"); 13 | module.exports["version"] = require("../package.json").version; 14 | -------------------------------------------------------------------------------- /js/src/labplugin.js: -------------------------------------------------------------------------------- 1 | const jupyter_webrtc = require("./index"); 2 | const base = require("@jupyter-widgets/base"); 3 | 4 | module.exports = { 5 | id: "jupyter-webrtc", 6 | requires: [base.IJupyterWidgetRegistry], 7 | activate: function (app, widgets) { 8 | widgets.registerWidget({ 9 | name: "jupyter-webrtc", 10 | version: jupyter_webrtc.version, 11 | exports: jupyter_webrtc, 12 | }); 13 | }, 14 | autoStart: true, 15 | }; 16 | -------------------------------------------------------------------------------- /js/src/utils.js: -------------------------------------------------------------------------------- 1 | export function download(data, filename) { 2 | let a = document.createElement("a"); 3 | a.download = filename; 4 | a.href = data; 5 | // see https://stackoverflow.com/questions/18480474/how-to-save-an-image-from-canvas 6 | if (document.createEvent) { 7 | let e = document.createEvent("MouseEvents"); 8 | e.initMouseEvent( 9 | "click", 10 | true, 11 | true, 12 | window, 13 | 0, 14 | 0, 15 | 0, 16 | 0, 17 | 0, 18 | false, 19 | false, 20 | false, 21 | false, 22 | 0, 23 | null, 24 | ); 25 | 26 | a.dispatchEvent(e); 27 | } else if (lnk.fireEvent) { 28 | a.fireEvent("onclick"); 29 | } 30 | } 31 | 32 | export function downloadBlob(blob, filename) { 33 | let url = window.URL.createObjectURL(blob); 34 | download(url, filename); 35 | setTimeout(function () { 36 | window.URL.revokeObjectURL(url); 37 | }, 100); 38 | } 39 | 40 | export async function onCanPlay(videoElement) { 41 | // wait till a video element is ready to play, and can be drawn on a canvas 42 | return new Promise((resolve, reject) => { 43 | // see https://github.com/webrtc/samples/pull/853 44 | if (videoElement.readyState >= 3) { 45 | resolve(); 46 | } else { 47 | videoElement.addEventListener("canplay", resolve); 48 | videoElement.addEventListener("error", (event) => 49 | reject(new Error("cannot play video stream")), 50 | ); 51 | } 52 | }); 53 | } 54 | 55 | export async function onLoadedMetaData(videoElement) { 56 | // before the event is fired, videoHeight might be 0 57 | // see https://stackoverflow.com/questions/4129102/html5-video-dimensions 58 | return new Promise((resolve, reject) => { 59 | if (videoElement.videoHeight > 0) resolve(); 60 | else videoElement.addEventListener("loadedmetadata", resolve); 61 | }); 62 | } 63 | 64 | export async function canvasToBlob(canvas, mimeType) { 65 | return new Promise((resolve, reject) => { 66 | canvas.toBlob((blob) => resolve(blob), mimeType); 67 | }); 68 | } 69 | export async function blobToBytes(blob) { 70 | return new Promise((resolve, reject) => { 71 | const reader = new FileReader(); 72 | reader.readAsArrayBuffer(blob); 73 | reader.onloadend = () => { 74 | const bytes = new Uint8Array(reader.result); 75 | resolve(bytes); 76 | }; 77 | }); 78 | } 79 | export async function imageWidgetToCanvas(widget, canvas) { 80 | // this code should move to jupyter-widgets's ImageModel widget, so all this logic is in one place 81 | // returns when the image is drawn on the canvas 82 | let url; 83 | let format = widget.get("format"); 84 | let value = widget.get("value"); 85 | if (format !== "url") { 86 | let blob = new Blob([value], { type: `image/${widget.get("format")}` }); 87 | url = URL.createObjectURL(blob); 88 | } else { 89 | url = new TextDecoder("utf-8").decode(value.buffer); 90 | } 91 | 92 | let el = document.createElement("img"); 93 | el.src = url; 94 | let width = widget.get("width"); 95 | if (width !== undefined && width.length > 0) { 96 | el.setAttribute("width", width); 97 | } else { 98 | el.removeAttribute("width"); 99 | } 100 | 101 | let height = widget.get("height"); 102 | if (height !== undefined && height.length > 0) { 103 | el.setAttribute("height", height); 104 | } else { 105 | el.removeAttribute("height"); 106 | } 107 | let context = canvas.getContext("2d"); 108 | context.drawImage(el, 0, 0); 109 | return new Promise((resolve, reject) => { 110 | el.onload = () => { 111 | canvas.width = el.width; 112 | canvas.height = el.height; 113 | context.drawImage(el, 0, 0); 114 | if (typeof oldurl !== "string") { 115 | URL.revokeObjectURL(url); 116 | } 117 | resolve(); 118 | }; 119 | }); 120 | } 121 | -------------------------------------------------------------------------------- /js/src/webrtc.js: -------------------------------------------------------------------------------- 1 | import * as widgets from "@jupyter-widgets/base"; 2 | import * as html2canvas from "html2canvas"; 3 | import * as _ from "underscore"; 4 | require("webrtc-adapter"); 5 | 6 | // Workaround for JupyterLab: "ws" is not defined 7 | // https://github.com/maartenbreddels/ipywebrtc/issues/55 8 | window.ws = global.WebSocket; 9 | 10 | import * as mqtt from "mqtt"; 11 | import * as utils from "./utils"; 12 | const semver_range = "~" + require("../package.json").version; 13 | 14 | import { imageWidgetToCanvas } from "./utils"; 15 | 16 | export class MediaStreamModel extends widgets.DOMWidgetModel { 17 | defaults() { 18 | return { 19 | ...super.defaults(), 20 | _model_module: "jupyter-webrtc", 21 | _view_module: "jupyter-webrtc", 22 | _model_name: "MediaStreamModel", 23 | _view_name: "MediaStreamView", 24 | _model_module_version: semver_range, 25 | _view_module_version: semver_range, 26 | }; 27 | } 28 | 29 | get stream() { 30 | return this.captureStream(); 31 | } 32 | 33 | captureStream() { 34 | throw new Error("Not implemented"); 35 | } 36 | } 37 | 38 | const captureStream = function (widget) { 39 | if (widget.captureStream) { 40 | return widget.captureStream(); 41 | } else { 42 | return widget.stream; 43 | } 44 | }; 45 | 46 | export class MediaStreamView extends widgets.DOMWidgetView { 47 | render() { 48 | super.render.apply(this, arguments); 49 | window.last_media_stream_view = this; 50 | this.video = document.createElement("video"); 51 | this.video.controls = true; 52 | this.pWidget.addClass("jupyter-widgets"); 53 | this.pWidget.addClass("widget-image"); 54 | 55 | this.initPromise = this.model.captureStream(); 56 | 57 | this.initPromise.then( 58 | (stream) => { 59 | this.video.srcObject = stream; 60 | this.el.appendChild(this.video); 61 | this.video.play(); 62 | }, 63 | (error) => { 64 | const text = document.createElement("div"); 65 | text.innerHTML = 66 | "Error creating view for mediastream: " + error.message; 67 | this.el.appendChild(text); 68 | }, 69 | ); 70 | } 71 | 72 | remove() { 73 | if (this.initPromise === null) { 74 | // Remove already called 75 | return; 76 | } 77 | this.initPromise.then((stream) => { 78 | this.video.pause(); 79 | this.video.srcObject = null; 80 | }); 81 | this.initPromise = null; 82 | return super.remove.apply(this, arguments); 83 | } 84 | } 85 | 86 | export class ImageStreamModel extends MediaStreamModel { 87 | defaults() { 88 | return { 89 | ...super.defaults(), 90 | _model_name: "ImageStreamModel", 91 | image: null, 92 | }; 93 | } 94 | 95 | initialize() { 96 | super.initialize.apply(this, arguments); 97 | window.last_image_stream = this; 98 | this.canvas = document.createElement("canvas"); 99 | this.context = this.canvas.getContext("2d"); 100 | 101 | this.canvas.width = this.get("width"); 102 | this.canvas.height = this.get("height"); 103 | // I was hoping this should do it 104 | imageWidgetToCanvas(this.get("image"), this.canvas); 105 | this.get("image").on("change:value", this.sync_image, this); 106 | } 107 | 108 | sync_image() { 109 | // not sure if firefox uses moz prefix also on a canvas 110 | if (this.canvas.captureStream) { 111 | // TODO: add a fps trait 112 | // but for some reason we need to do it again 113 | imageWidgetToCanvas(this.get("image"), this.canvas); 114 | } else { 115 | throw new Error("captureStream not supported for this browser"); 116 | } 117 | } 118 | 119 | async captureStream() { 120 | this.sync_image(); 121 | return this.canvas.captureStream(); 122 | } 123 | } 124 | 125 | ImageStreamModel.serializers = { 126 | ...MediaStreamModel.serializers, 127 | image: { deserialize: widgets.unpack_models }, 128 | }; 129 | 130 | class StreamModel extends MediaStreamModel { 131 | defaults() { 132 | return { ...super.defaults(), playing: true }; 133 | } 134 | 135 | initialize() { 136 | super.initialize.apply(this, arguments); 137 | 138 | this.media = undefined; 139 | 140 | this.on("change:playing", this.updatePlay, this); 141 | } 142 | 143 | async captureStream() { 144 | if (!this.createView) { 145 | this.createView = _.once(() => { 146 | return this.widget_manager 147 | .create_view(this.get(this.type)) 148 | .then((view) => { 149 | this.media_wid = view; 150 | this.media = this.media_wid.el; 151 | }); 152 | }); 153 | } 154 | let widget = this.get(this.type); 155 | if (!widget) throw new Error("no media widget passed"); 156 | await this.createView(); 157 | if (this.media.captureStream || this.media.mozCaptureStream) { 158 | // following https://github.com/webrtc/samples/blob/gh-pages/src/content/capture/video-pc/js/main.js 159 | await utils.onCanPlay(this.media); 160 | 161 | this.updatePlay(); 162 | 163 | if (this.media.captureStream) { 164 | return this.media.captureStream(); 165 | } else if (this.media.mozCaptureStream) { 166 | return this.media.mozCaptureStream(); 167 | } 168 | } else { 169 | throw new Error("captureStream not supported for this browser"); 170 | } 171 | } 172 | 173 | updatePlay() { 174 | if (this.get("playing")) { 175 | this.media.play(); 176 | } else { 177 | this.media.pause(); 178 | } 179 | } 180 | 181 | close() { 182 | const returnValue = super.close.apply(this, arguments); 183 | this.media.pause(); 184 | this.media_wid.close(); 185 | return returnValue; 186 | } 187 | } 188 | 189 | export class VideoStreamModel extends StreamModel { 190 | defaults() { 191 | return { 192 | ...super.defaults(), 193 | _model_name: "VideoStreamModel", 194 | video: null, 195 | }; 196 | } 197 | 198 | initialize() { 199 | super.initialize.apply(this, arguments); 200 | window.last_video_stream = this; 201 | 202 | this.type = "video"; 203 | } 204 | } 205 | 206 | VideoStreamModel.serializers = { 207 | ...StreamModel.serializers, 208 | video: { deserialize: widgets.unpack_models }, 209 | }; 210 | 211 | export class AudioStreamModel extends StreamModel { 212 | defaults() { 213 | return { 214 | ...super.defaults(), 215 | _model_name: "AudioStreamModel", 216 | _view_name: "AudioStreamView", 217 | audio: undefined, 218 | }; 219 | } 220 | 221 | initialize() { 222 | super.initialize.apply(this, arguments); 223 | window.last_audio_stream = this; 224 | 225 | this.type = "audio"; 226 | } 227 | } 228 | 229 | AudioStreamModel.serializers = { 230 | ...StreamModel.serializers, 231 | audio: { deserialize: widgets.unpack_models }, 232 | }; 233 | 234 | export class AudioStreamView extends widgets.DOMWidgetView { 235 | render() { 236 | super.render.apply(this, arguments); 237 | window.last_audio_stream_view = this; 238 | this.audio = document.createElement("audio"); 239 | this.audio.controls = true; 240 | this.pWidget.addClass("jupyter-widgets"); 241 | 242 | this.model.captureStream().then( 243 | (stream) => { 244 | this.audio.srcObject = stream; 245 | this.el.appendChild(this.audio); 246 | this.audio.play(); 247 | }, 248 | (error) => { 249 | const text = document.createElement("div"); 250 | text.innerHTML = 251 | "Error creating view for mediastream: " + error.message; 252 | this.el.appendChild(text); 253 | }, 254 | ); 255 | } 256 | 257 | remove() { 258 | this.model.captureStream().then((stream) => { 259 | this.audio.pause(); 260 | this.audio.srcObject = null; 261 | }); 262 | return widgets.super.remove.apply(this, arguments); 263 | } 264 | } 265 | 266 | export class WidgetStreamModel extends MediaStreamModel { 267 | defaults() { 268 | return { 269 | ...super.defaults(), 270 | _model_name: "WidgetStreamModel", 271 | _view_name: "WidgetStreamView", 272 | widget: null, 273 | max_fps: null, 274 | _html2canvas_start_streaming: false, 275 | }; 276 | } 277 | 278 | initialize() { 279 | super.initialize.apply(this, arguments); 280 | 281 | this.on( 282 | "change:_html2canvas_start_streaming", 283 | this.updateHTML2CanvasStreaming, 284 | this, 285 | ); 286 | this.rendered_view = null; 287 | 288 | // If the widget already has a captureStream -> use it 289 | if (typeof this.get("widget").captureStream === "function") { 290 | const fps = this.get("max_fps"); 291 | this.captureStream = () => { 292 | if (fps === null || fps === undefined) { 293 | return this.get("widget").captureStream(); 294 | } 295 | return this.get("widget").captureStream(fps); 296 | }; 297 | } 298 | // Else try to stream the first view of this widget 299 | else { 300 | this.captureStream = () => { 301 | const id_views = Object.keys(this.get("widget").views); 302 | if (id_views.length === 0) { 303 | return new Promise((resolve, reject) => { 304 | reject({ 305 | message: 306 | "Cannot create WidgetStream if the widget has no view rendered", 307 | }); 308 | }); 309 | } 310 | 311 | const first_view = this.get("widget").views[id_views[0]]; 312 | return first_view.then((view) => { 313 | this.rendered_view = view; 314 | 315 | // If the widget view is a canvas or a video element 316 | const capturable_obj = this.find_capturable_obj( 317 | this.rendered_view.el, 318 | ); 319 | if (capturable_obj) { 320 | return this._captureStream(capturable_obj); 321 | } 322 | 323 | // Else use html2canvas 324 | this.canvas = document.createElement("canvas"); 325 | this.set("_html2canvas_start_streaming", true); 326 | return this._captureStream(this.canvas); 327 | }); 328 | }; 329 | } 330 | } 331 | 332 | _captureStream(capturable_obj) { 333 | return new Promise((resolve, reject) => { 334 | const fps = this.get("max_fps"); 335 | 336 | if (capturable_obj.captureStream) { 337 | if (fps === null || fps === undefined) { 338 | resolve(capturable_obj.captureStream()); 339 | } else { 340 | resolve(capturable_obj.captureStream(fps)); 341 | } 342 | } 343 | 344 | if (capturable_obj.mozCaptureStream) { 345 | if (fps === null || fps === undefined) { 346 | resolve(capturable_obj.mozCaptureStream()); 347 | } else { 348 | resolve(capturable_obj.mozCaptureStream(fps)); 349 | } 350 | } 351 | 352 | reject(new Error("captureStream not supported for this browser")); 353 | }); 354 | } 355 | 356 | find_capturable_obj(element) { 357 | const nb_children = element.children.length; 358 | for (let child_idx = 0; child_idx < nb_children; child_idx++) { 359 | const child = element.children[child_idx]; 360 | if (child.captureStream || child.mozCaptureStream) { 361 | return child; 362 | } 363 | 364 | const capturable_obj = this.find_capturable_obj(child); 365 | if (capturable_obj) { 366 | return capturable_obj; 367 | } 368 | } 369 | } 370 | 371 | updateHTML2CanvasStreaming() { 372 | if ( 373 | this.get("_html2canvas_start_streaming") && 374 | !this.html2CanvasStreaming 375 | ) { 376 | this.html2CanvasStreaming = true; 377 | 378 | let lastTime; 379 | const updateStream = (currentTime) => { 380 | if (!this._closed) { 381 | if (!lastTime) { 382 | lastTime = currentTime; 383 | } 384 | const timeSinceLastFrame = currentTime - lastTime; 385 | lastTime = currentTime; 386 | 387 | const fps = this.get("max_fps"); 388 | if (fps === 0) { 389 | /* TODO: maybe implement the same behavior as here: 390 | https://developer.mozilla.org/en-US/docs/Web/API/HTMLCanvasElement/captureStream */ 391 | } else { 392 | let waitingTime = 0; 393 | if (fps !== null && fps !== undefined) { 394 | waitingTime = 1000 / fps - timeSinceLastFrame; 395 | if (waitingTime < 0) { 396 | waitingTime = 0; 397 | } 398 | } 399 | 400 | setTimeout(() => { 401 | html2canvas(this.rendered_view.el, { 402 | canvas: this.canvas, 403 | logging: false, 404 | useCORS: true, 405 | ignoreElements: (element) => { 406 | return !( 407 | // Do not ignore if the element contains what we want to render 408 | ( 409 | element.contains(this.rendered_view.el) || 410 | // Do not ignore if the element is contained by what we want to render 411 | this.rendered_view.el.contains(element) || 412 | // Do not ignore if the element is contained by the head (style and scripts) 413 | document.head.contains(element) 414 | ) 415 | ); 416 | }, 417 | }).then(() => { 418 | window.requestAnimationFrame(updateStream); 419 | }); 420 | }, waitingTime); 421 | } 422 | } 423 | }; 424 | window.requestAnimationFrame(updateStream); 425 | } 426 | } 427 | } 428 | 429 | WidgetStreamModel.serializers = { 430 | ...MediaStreamModel.serializers, 431 | widget: { deserialize: widgets.unpack_models }, 432 | }; 433 | 434 | export class WidgetStreamView extends MediaStreamView {} 435 | 436 | export class CameraStreamModel extends MediaStreamModel { 437 | defaults() { 438 | return { 439 | ...super.defaults(), 440 | _model_name: "CameraStreamModel", 441 | constraints: { audio: true, video: true }, 442 | }; 443 | } 444 | 445 | captureStream() { 446 | if (!this.cameraStream) { 447 | this.cameraStream = navigator.mediaDevices.getUserMedia( 448 | this.get("constraints"), 449 | ); 450 | } 451 | return this.cameraStream; 452 | } 453 | 454 | close() { 455 | if (this.cameraStream) { 456 | this.cameraStream.then((stream) => { 457 | stream.getTracks().forEach((track) => { 458 | track.stop(); 459 | }); 460 | }); 461 | } 462 | return super.close.apply(this, arguments); 463 | } 464 | } 465 | 466 | class RecorderModel extends widgets.DOMWidgetModel { 467 | defaults() { 468 | return { 469 | ...super.defaults(), 470 | _model_module: "jupyter-webrtc", 471 | _view_module: "jupyter-webrtc", 472 | _model_module_version: semver_range, 473 | _view_module_version: semver_range, 474 | stream: null, 475 | filename: "record", 476 | format: "webm", 477 | codecs: "", 478 | recording: false, 479 | _data_src: "", 480 | }; 481 | } 482 | 483 | initialize() { 484 | super.initialize.apply(this, arguments); 485 | 486 | this.on("msg:custom", this.handleCustomMessage, this); 487 | this.on("change:recording", this.updateRecord, this); 488 | 489 | this.mediaRecorder = null; 490 | this.chunks = []; 491 | this.stopping = null; 492 | } 493 | 494 | handleCustomMessage(content) { 495 | if (content.msg === "download") { 496 | this.download(); 497 | } 498 | } 499 | 500 | get mimeType() { 501 | const codecs = this.get("codecs") || ""; 502 | let mimeType = `${this.type}/${this.get("format")}`; 503 | if (codecs) { 504 | mimeType += `; codecs="${codecs}"`; 505 | } 506 | return mimeType; 507 | } 508 | 509 | updateRecord() { 510 | const source = this.get("stream"); 511 | if (!source) { 512 | throw new Error("No stream specified"); 513 | } 514 | 515 | const mimeType = this.mimeType; 516 | if (!MediaRecorder.isTypeSupported(mimeType)) { 517 | throw new Error( 518 | `The mimeType ${mimeType} is not supported for record on this browser`, 519 | ); 520 | } 521 | 522 | if (this.get("recording")) { 523 | this.chunks = []; 524 | 525 | captureStream(source).then((stream) => { 526 | this.mediaRecorder = new MediaRecorder(stream, { 527 | audioBitsPerSecond: 128000, 528 | videoBitsPerSecond: 2500000, 529 | mimeType: mimeType, 530 | }); 531 | this.mediaRecorder.start(); 532 | this.mediaRecorder.ondataavailable = (event) => { 533 | this.chunks.push(event.data); 534 | }; 535 | }); 536 | } else { 537 | this.stopping = new Promise((resolve, reject) => { 538 | this.mediaRecorder.onstop = (e) => { 539 | if (this.get("_data_src") !== "") { 540 | URL.revokeObjectURL(this.get("_data_src")); 541 | } 542 | const blob = new Blob(this.chunks, { type: mimeType }); 543 | this.set("_data_src", window.URL.createObjectURL(blob)); 544 | this.save_changes(); 545 | 546 | const reader = new FileReader(); 547 | reader.readAsArrayBuffer(blob); 548 | reader.onloadend = () => { 549 | const bytes = new Uint8Array(reader.result); 550 | this.get(this.type).set("value", new DataView(bytes.buffer)); 551 | this.get(this.type).save_changes(); 552 | resolve(); 553 | }; 554 | }; 555 | }); 556 | this.stopping.then(() => { 557 | this.stopping = null; 558 | }); 559 | this.mediaRecorder.stop(); 560 | } 561 | } 562 | 563 | download() { 564 | if (this.chunks.length === 0) { 565 | if (this.stopping === null) { 566 | throw new Error("Nothing to download"); 567 | } 568 | // Re-trigger after stop completes 569 | this.stopping.then(() => { 570 | this.download(); 571 | }); 572 | return; 573 | } 574 | let blob = new Blob(this.chunks, { type: this.mimeType }); 575 | let filename = this.get("filename"); 576 | if (filename.indexOf(".") < 0) { 577 | filename = this.get("filename") + "." + this.get("format"); 578 | } 579 | utils.downloadBlob(blob, filename); 580 | } 581 | 582 | close() { 583 | if (this.get("_data_src") !== "") { 584 | URL.revokeObjectURL(this.get("_data_src")); 585 | } 586 | return super.close.apply(this, arguments); 587 | } 588 | } 589 | 590 | RecorderModel.serializers = { 591 | ...widgets.DOMWidgetModel.serializers, 592 | stream: { deserialize: widgets.unpack_models }, 593 | }; 594 | 595 | class RecorderView extends widgets.DOMWidgetView { 596 | render() { 597 | super.render.apply(this, arguments); 598 | 599 | this.el.classList.add("jupyter-widgets"); 600 | 601 | this.buttons = document.createElement("div"); 602 | this.buttons.classList.add("widget-inline-hbox"); 603 | this.buttons.classList.add("widget-play"); 604 | 605 | this.recordButton = document.createElement("button"); 606 | this.downloadButton = document.createElement("button"); 607 | this.result = document.createElement(this.tag); 608 | this.result.controls = true; 609 | 610 | this.recordButton.className = "jupyter-button"; 611 | this.downloadButton.className = "jupyter-button"; 612 | 613 | this.buttons.appendChild(this.recordButton); 614 | this.buttons.appendChild(this.downloadButton); 615 | this.el.appendChild(this.buttons); 616 | this.el.appendChild(this.result); 617 | 618 | const recordIcon = document.createElement("i"); 619 | recordIcon.className = this.recordIconClass; 620 | this.recordButton.appendChild(recordIcon); 621 | const downloadIcon = document.createElement("i"); 622 | downloadIcon.className = "fa fa-download"; 623 | this.downloadButton.appendChild(downloadIcon); 624 | 625 | this.recordButton.onclick = () => { 626 | this.model.set("recording", !this.model.get("recording")); 627 | }; 628 | this.downloadButton.onclick = this.model.download.bind(this.model); 629 | 630 | this.listenTo(this.model, "change:recording", () => { 631 | if (this.model.get("recording")) { 632 | recordIcon.style.color = "darkred"; 633 | } else { 634 | recordIcon.style.color = ""; 635 | } 636 | }); 637 | 638 | this.listenTo(this.model, "change:_data_src", () => { 639 | this.result.src = this.model.get("_data_src"); 640 | if (this.result.play) { 641 | this.result.play(); 642 | } 643 | }); 644 | } 645 | } 646 | 647 | export class ImageRecorderModel extends RecorderModel { 648 | defaults() { 649 | return { 650 | ...super.defaults(), 651 | _model_name: "ImageRecorderModel", 652 | _view_name: "ImageRecorderView", 653 | image: null, 654 | _height: "", 655 | _width: "", 656 | }; 657 | } 658 | 659 | initialize() { 660 | super.initialize.apply(this, arguments); 661 | window.last_image_recorder = this; 662 | 663 | this.type = "image"; 664 | } 665 | 666 | async snapshot() { 667 | const mimeType = this.type + "/" + this.get("format"); 668 | const mediaStream = await captureStream(this.get("stream")); 669 | // turn the mediastream into a video element 670 | let video = document.createElement("video"); 671 | video.srcObject = mediaStream; 672 | video.play(); 673 | await utils.onCanPlay(video); 674 | await utils.onLoadedMetaData(video); 675 | // and the video element can be drawn onto a canvas 676 | let canvas = document.createElement("canvas"); 677 | let context = canvas.getContext("2d"); 678 | let height = video.videoHeight; 679 | let width = video.videoWidth; 680 | canvas.height = height; 681 | canvas.width = width; 682 | context.drawImage(video, 0, 0, canvas.width, canvas.height); 683 | 684 | // from the canvas we can get the underlying encoded data 685 | // TODO: check support for toBlob, or find a polyfill 686 | const blob = await utils.canvasToBlob(canvas, mimeType); 687 | this.set("_data_src", window.URL.createObjectURL(blob)); 688 | this._last_blob = blob; 689 | 690 | const bytes = await utils.blobToBytes(blob); 691 | 692 | this.get(this.type).set("value", new DataView(bytes.buffer)); 693 | this.get(this.type).save_changes(); 694 | this.set("_height", height.toString() + "px"); 695 | this.set("_width", width.toString() + "px"); 696 | this.set("recording", false); 697 | this.save_changes(); 698 | } 699 | 700 | updateRecord() { 701 | const source = this.get("stream"); 702 | if (!source) { 703 | throw new Error("No stream specified"); 704 | } 705 | 706 | if (this.get("_data_src") !== "") { 707 | URL.revokeObjectURL(this.get("_data_src")); 708 | } 709 | if (this.get("recording")) this.snapshot(); 710 | } 711 | 712 | download() { 713 | let filename = this.get("filename"); 714 | let format = this.get("format"); 715 | if (filename.indexOf(".") < 0) { 716 | filename = this.get("filename") + "." + format; 717 | } 718 | utils.downloadBlob(this._last_blob, filename); 719 | } 720 | } 721 | 722 | ImageRecorderModel.serializers = { 723 | ...RecorderModel.serializers, 724 | image: { deserialize: widgets.unpack_models }, 725 | }; 726 | 727 | export class ImageRecorderView extends RecorderView { 728 | initialize() { 729 | super.initialize.apply(this, arguments); 730 | this.tag = "img"; 731 | this.recordIconClass = "fa fa-camera"; 732 | } 733 | } 734 | 735 | export class VideoRecorderModel extends RecorderModel { 736 | defaults() { 737 | return { 738 | ...super.defaults(), 739 | _model_name: "VideoRecorderModel", 740 | _view_name: "VideoRecorderView", 741 | video: null, 742 | }; 743 | } 744 | 745 | initialize() { 746 | super.initialize.apply(this, arguments); 747 | window.last_video_recorder = this; 748 | 749 | this.type = "video"; 750 | } 751 | } 752 | 753 | VideoRecorderModel.serializers = { 754 | ...RecorderModel.serializers, 755 | video: { deserialize: widgets.unpack_models }, 756 | }; 757 | 758 | export class VideoRecorderView extends RecorderView { 759 | initialize() { 760 | super.initialize.apply(this, arguments); 761 | this.tag = "video"; 762 | this.recordIconClass = "fa fa-circle"; 763 | } 764 | } 765 | 766 | export class AudioRecorderModel extends RecorderModel { 767 | defaults() { 768 | return { 769 | ...super.defaults(), 770 | _model_name: "AudioRecorderModel", 771 | _view_name: "AudioRecorderView", 772 | audio: null, 773 | }; 774 | } 775 | 776 | initialize() { 777 | super.initialize.apply(this, arguments); 778 | window.last_audio_recorder = this; 779 | 780 | this.type = "audio"; 781 | } 782 | } 783 | 784 | AudioRecorderModel.serializers = { 785 | ...RecorderModel.serializers, 786 | audio: { deserialize: widgets.unpack_models }, 787 | }; 788 | 789 | export class AudioRecorderView extends RecorderView { 790 | initialize() { 791 | super.initialize.apply(this, arguments); 792 | this.tag = "audio"; 793 | this.recordIconClass = "fa fa-circle"; 794 | } 795 | } 796 | 797 | export class WebRTCRoomModel extends widgets.DOMWidgetModel { 798 | defaults() { 799 | return { 800 | ...super.defaults(), 801 | _model_name: "WebRTCRoomModel", 802 | //_view_name: 'WebRTCRoomView', 803 | _model_module: "jupyter-webrtc", 804 | //_view_module: 'jupyter-webrtc', 805 | _model_module_version: semver_range, 806 | _view_module_version: semver_range, 807 | room: "room", 808 | stream: null, 809 | room_id: widgets.uuid(), 810 | nickname: "anonymous", 811 | peers: [], 812 | streams: [], 813 | }; 814 | } 815 | log() { 816 | let args = [this.get("nickname") + " " + this.get("room_id") + ": "]; 817 | args = args.concat(Array.prototype.slice.call(arguments)); 818 | console.log.apply(null, args); 819 | } 820 | initialize() { 821 | super.initialize.apply(this, arguments); 822 | this.set("room_id", widgets.uuid()); 823 | this.room_id = this.get("room_id"); 824 | this.room = this.get("room"); 825 | this.peers = {}; // room_id (string) to WebRTCPeerModel 826 | window["last_webrtc_room_" + this.room_id] = this; 827 | const stream = this.get("stream"); 828 | if (stream) { 829 | this.set("streams", [stream]); 830 | } 831 | this.save_changes(); 832 | this.on("msg:custom", this.custom_msg, this); 833 | } 834 | custom_msg(content) { 835 | if (content.msg === "close") { 836 | this.close(); 837 | } 838 | } 839 | close() { 840 | this.get("peers").forEach((peer) => peer.close()); 841 | } 842 | create_peer(from_id) { 843 | return this.widget_manager 844 | .new_widget( 845 | { 846 | model_name: "WebRTCPeerModel", 847 | model_module: "jupyter-webrtc", 848 | model_module_version: semver_range, 849 | view_name: "WebRTCPeerView", 850 | view_module: "jupyter-webrtc", 851 | view_module_version: semver_range, 852 | widget_class: "webrtc.WebRTCPeerModel", // ipywidgets6 853 | }, 854 | { 855 | stream_local: this.get("stream"), 856 | id_local: this.get("room_id"), 857 | id_remote: from_id, 858 | }, 859 | ) 860 | .then((peer) => { 861 | peer.peer_msg_send = (msg) => { 862 | msg.room_id = this.get("room_id"); 863 | msg.to = from_id; 864 | this.log("send to peer", msg); 865 | //console.log('sending to room', msg, from_id); 866 | peer.save_changes(); 867 | this.room_msg_send(msg); 868 | }; 869 | return peer; 870 | }); 871 | } 872 | listen_to_remote_stream(peer) { 873 | peer.on( 874 | "change:stream_remote", 875 | _.once(() => { 876 | this.log("add remote stream"); 877 | const streams = this.get("streams").slice(); 878 | const stream = peer.get("stream_remote"); 879 | streams.push(stream); 880 | this.set("streams", streams); 881 | this.save_changes(); 882 | }), 883 | ); 884 | peer.on("change:connected", () => { 885 | const connected = peer.get("connected"); 886 | this.log( 887 | "changed connected status for ", 888 | peer.get("id_remote"), 889 | "to", 890 | connected, 891 | ); 892 | if (!connected) { 893 | let streams = this.get("streams").slice(); 894 | const stream = peer.get("stream_remote"); 895 | streams = _.without(streams, stream); 896 | this.set("streams", streams); 897 | 898 | let peers = this.get("peers").slice(); 899 | peers = _.without(peers, peer); 900 | this.set("peers", peers); 901 | 902 | delete this.peers[peer.get("id_remote")]; 903 | this.save_changes(); 904 | } 905 | }); 906 | } 907 | on_room_msg(msg) { 908 | const from_id = msg.room_id; 909 | if (msg.room_id === this.room_id) return; // skip my own msg'es 910 | if (msg.type === "join") { 911 | this.log("join from", msg.room_id); 912 | this.peers[from_id] = this.create_peer(from_id).then((peer) => { 913 | this.listen_to_remote_stream(peer); 914 | peer.join().then(() => { 915 | const peers = this.get("peers").slice(); 916 | peers.push(peer); 917 | this.set("peers", peers); 918 | this.save_changes(); 919 | }); 920 | return peer; 921 | }); 922 | this.log(": added peer", from_id); 923 | } else if (msg.room_id) { 924 | if (msg.to !== this.room_id) { 925 | return; 926 | } 927 | if (!this.peers[msg.room_id]) { 928 | this.peers[from_id] = this.create_peer(from_id).then((peer) => { 929 | this.listen_to_remote_stream(peer); 930 | const peers = this.get("peers").slice(); 931 | peers.push(peer); 932 | this.set("peers", peers); 933 | this.save_changes(); 934 | return peer; 935 | }); 936 | this.log("added peer", from_id); 937 | } 938 | const peer = this.peers[msg.room_id]; 939 | if (peer) { 940 | //console.log(this.room_id, ': peer', msg.room_id, peer, this, this.cid) 941 | peer.then((peer) => { 942 | this.log("sending from", msg.room_id, " to", msg.to, msg); 943 | peer.on_peer_msg(msg); 944 | }); 945 | } else { 946 | console.error("sending to unknown peer", msg.room_id); 947 | } 948 | } else { 949 | console.error("expected a to room_id to be present"); 950 | } 951 | } 952 | } 953 | 954 | WebRTCRoomModel.serializers = { 955 | ...widgets.DOMWidgetModel.serializers, 956 | stream: { deserialize: widgets.unpack_models }, 957 | peers: { deserialize: widgets.unpack_models }, 958 | }; 959 | 960 | const global_rooms = {}; 961 | 962 | export class WebRTCRoomLocalModel extends WebRTCRoomModel { 963 | defaults() { 964 | return { ...super.defaults(), _model_name: "WebRTCRoomLocalModel" }; 965 | } 966 | initialize() { 967 | super.initialize.apply(this, arguments); 968 | this.join(); 969 | } 970 | join() { 971 | const room = this.get("room"); 972 | console.log("joining room", room); 973 | const callbacks = global_rooms[room] || []; 974 | callbacks.push((msg) => this.on_room_msg(msg)); 975 | global_rooms[room] = callbacks; 976 | this.room_msg_send({ type: "join", room_id: this.get("room_id") }); 977 | } 978 | room_msg_send(msg) { 979 | const room = this.get("room"); 980 | console.log("send to room", room, msg, global_rooms[room]); 981 | _.each(global_rooms[room], function (callback) { 982 | callback(msg); 983 | }); 984 | } 985 | } 986 | 987 | export class WebRTCRoomMqttModel extends WebRTCRoomModel { 988 | defaults() { 989 | return { 990 | ...super.defaults(), 991 | _model_name: "WebRTCRoomMqttModel", 992 | server: "wss://iot.eclipse.org:443/ws", 993 | }; 994 | } 995 | initialize() { 996 | super.initialize.apply(this, arguments); 997 | console.log("connecting to", this.get("server")); 998 | this.mqtt_client = mqtt.connect(this.get("server")); 999 | const client = this.mqtt_client; 1000 | this.topic_join = "jupyter-webrtc/" + this.get("room") + "/join"; 1001 | //this.topic_present = 'jupyter-webrtc/' +this.room +'/present' 1002 | this.mqtt_client.on("connect", () => { 1003 | client.subscribe(this.topic_join); 1004 | //client.subscribe(this.topic_present); 1005 | //client.publish('jupyter-webrtc/room-a/present', 'you|me', {retain:true}); 1006 | //client.publish('jupyter-webrtc/room-a/join', 'Hello mqtt'); 1007 | }); 1008 | client.on("message", (topic, message) => { 1009 | const msg = JSON.parse(message); 1010 | console.log("msg received", message, msg); 1011 | if (topic === this.topic_join) { 1012 | this.on_room_msg(msg); 1013 | } 1014 | }); 1015 | this.join(); 1016 | } 1017 | join() { 1018 | this.room_msg_send({ type: "join", room_id: this.get("room_id") }); 1019 | } 1020 | room_msg_send(msg) { 1021 | const text = JSON.stringify(msg); 1022 | console.log("send to mqtt channel", msg); 1023 | this.mqtt_client.publish(this.topic_join, text); 1024 | } 1025 | } 1026 | 1027 | export class WebRTCPeerModel extends widgets.DOMWidgetModel { 1028 | defaults() { 1029 | return { 1030 | ...super.defaults(), 1031 | _model_name: "WebRTCPeerModel", 1032 | _view_name: "WebRTCPeerView", 1033 | _model_module: "jupyter-webrtc", 1034 | _view_module: "jupyter-webrtc", 1035 | _model_module_version: semver_range, 1036 | _view_module_version: semver_range, 1037 | }; 1038 | } 1039 | log() { 1040 | let args = [this.get("room_id") + ": "]; 1041 | args = args.concat(Array.prototype.slice.call(arguments)); 1042 | console.log.apply(null, args); 1043 | } 1044 | on_peer_msg(info) { 1045 | this.log("peer msg", info); 1046 | if (info.sdp) { 1047 | // the other party send us the sdp 1048 | this.log(name, "got sdp"); 1049 | const sdp_remote = new RTCSessionDescription(info.sdp); 1050 | const remote_description_set = this.pc.setRemoteDescription(sdp_remote); 1051 | if (!this.initiator) { 1052 | console.log( 1053 | this.get("id_local"), 1054 | "did not initiate, reply with answer", 1055 | ); 1056 | // if we didn't initiate, we should respond with an answer 1057 | // now we create an answer, and send a sdp back 1058 | Promise.all([remote_description_set, this.tracks_added]) 1059 | .then(() => this.pc.createAnswer()) 1060 | .then((sdp) => { 1061 | console.log("sending sdp", this.room_id); 1062 | this.send_sdp(sdp); 1063 | this.pc.setLocalDescription(sdp); 1064 | }); 1065 | } 1066 | } else if (info.candidate) { 1067 | const c = new RTCIceCandidate(info.candidate); 1068 | this.pc.addIceCandidate(c); 1069 | } 1070 | } 1071 | initialize() { 1072 | super.initialize.apply(this, arguments); 1073 | 1074 | const room_id = (this.room_id = this.get("id_local")); 1075 | this.initiator = false; 1076 | 1077 | const pc_config = { 1078 | iceServers: [ 1079 | { 1080 | urls: [ 1081 | "stun:stun.l.google.com:19302", 1082 | "stun:stun1.l.google.com:19302", 1083 | "stun:stun2.l.google.com:19302", 1084 | ], 1085 | }, 1086 | ], 1087 | }; 1088 | //const pc_config = null; 1089 | this.pc = new RTCPeerConnection(pc_config); 1090 | 1091 | window["last_webrtc_" + room_id] = this; 1092 | //this.other = null 1093 | 1094 | if (this.get("stream_local")) { 1095 | this.tracks_added = new Promise((resolve, reject) => { 1096 | this.get("stream_local").stream.then((stream) => { 1097 | console.log("add stream", stream); 1098 | //this.pc.addStream(stream) (this crashes/hangs chrome) 1099 | // so we use the addTrack api 1100 | stream.getTracks().forEach((track) => { 1101 | this.pc.addTrack(track, stream); 1102 | }); 1103 | resolve(); 1104 | }); // TODO: catch/reject? 1105 | }); 1106 | } else { 1107 | console.log("no stream"); 1108 | this.tracks_added = Promise.resolve(); 1109 | } 1110 | this.tracks_added.then(() => console.log("tracks added")); 1111 | this.pc.onicecandidate = (event) => { 1112 | console.log(this.room_id, "onicecandidate", event.candidate); 1113 | this.event_candidate = event; 1114 | this.send_ice_candidate(event.candidate); 1115 | }; 1116 | this.pc.onopen = () => { 1117 | console.log("onopen", name); 1118 | }; 1119 | this.pc.onaddstream = (evt) => { 1120 | console.log("onaddstream", name); 1121 | this.widget_manager 1122 | .new_widget({ 1123 | model_name: "MediaStreamModel", 1124 | model_module: "jupyter-webrtc", 1125 | model_module_version: semver_range, 1126 | view_name: "MediaStreamView", 1127 | view_module: "jupyter-webrtc", 1128 | view_module_version: semver_range, 1129 | widget_class: "webrtc.MediaStreamModel", // ipywidgets6 1130 | }) 1131 | .then((model) => { 1132 | model.captureStream = () => { 1133 | return new Promise((resolve, reject) => { 1134 | resolve(evt.stream); 1135 | }); 1136 | }; // TODO: not nice to just set the method... 1137 | this.set("stream_remote", model); 1138 | //mo 1139 | this.save_changes(); 1140 | console.log(this.room_id, ": added stream_remote"); 1141 | return model; 1142 | }); 1143 | }; 1144 | this.pc.onconnecting = () => { 1145 | console.log("onconnecting", name); 1146 | }; 1147 | this.pc.oniceconnectionstatechange = () => { 1148 | console.log( 1149 | this.room_id, 1150 | "ICE connection state", 1151 | this.pc.iceConnectionState, 1152 | ); 1153 | if (this.pc.iceConnectionState === "disconnected") { 1154 | this.set("connected", false); 1155 | this.save_changes(); 1156 | } 1157 | if (this.pc.iceConnectionState === "connected") { 1158 | this.set("connected", true); 1159 | this.save_changes(); 1160 | } 1161 | // TODO: can we recover from this? 1162 | if (this.pc.iceConnectionState === "failed") { 1163 | this.set("connected", false); 1164 | this.save_changes(); 1165 | } 1166 | }; 1167 | /* 1168 | this doesn't seem to exist in chrome at least, lets rely on ice state change above 1169 | this.pc.onconnectionstatechange = () => { 1170 | console.log(this.room_id, 'connection state', this.pc.connectionState); 1171 | if (this.pc.connectionState === 'disconnected') { 1172 | this.set('connected', false) 1173 | this.save_changes() 1174 | } 1175 | if (this.pc.connectionState === 'connected') { 1176 | this.set('connected', true) 1177 | this.save_changes() 1178 | } 1179 | }, this) 1180 | */ 1181 | this.on("msg:custom", this.custom_msg, this); 1182 | //this.disconnect = _.once(this.disconnect, this)); 1183 | window.addEventListener("beforeunload", () => { 1184 | this.close(); 1185 | }); 1186 | } 1187 | custom_msg(content) { 1188 | console.log("custom msg", content); 1189 | if (content.msg === "connect") { 1190 | this.connect(); 1191 | } else if (content.msg === "close") { 1192 | this.close(); 1193 | } else { 1194 | this.disconnect(); 1195 | } 1196 | } 1197 | close() { 1198 | //console.log('disconnect') 1199 | this.pc.close(); // does not trigger ice conncection status changes 1200 | this.set("connected", false); 1201 | this.save_changes(); 1202 | } 1203 | join() { 1204 | this.initiator = true; 1205 | return this.tracks_added.then(() => { 1206 | return new Promise((resolve, reject) => { 1207 | const room_id = this.get("room_id"); 1208 | const offer = { 1209 | offerToReceiveAudio: 1, 1210 | offerToReceiveVideo: 1, 1211 | }; 1212 | 1213 | this.pc 1214 | .createOffer(offer) 1215 | .then((sdp) => { 1216 | console.log("set local desc"); 1217 | this.pc.setLocalDescription(sdp); 1218 | console.log(room_id, "send sdp"); 1219 | this.send_sdp(sdp); 1220 | resolve(); 1221 | }) 1222 | .catch((e) => { 1223 | console.error(e); 1224 | reject(e); 1225 | }); 1226 | return this; 1227 | }); 1228 | }); 1229 | } 1230 | send_sdp(sdp) { 1231 | this.broadcast({ sdp: sdp }); 1232 | } 1233 | send_ice_candidate(candidate) { 1234 | this.broadcast({ candidate: candidate }); 1235 | } 1236 | broadcast(msg) { 1237 | this.peer_msg_send(msg); 1238 | } 1239 | } 1240 | 1241 | WebRTCPeerModel.serializers = { 1242 | ...widgets.DOMWidgetModel.serializers, 1243 | stream: { deserialize: widgets.unpack_models }, 1244 | peers: { deserialize: widgets.unpack_models }, 1245 | }; 1246 | 1247 | export class WebRTCPeerView extends widgets.DOMWidgetView { 1248 | initialize() { 1249 | const el = document.createElement("video"); 1250 | window.last_media_view = this; 1251 | this.setElement(el); 1252 | super.initialize.apply(this, arguments); 1253 | } 1254 | 1255 | render() { 1256 | this.model.stream.then((stream) => { 1257 | this.el.srcObject = stream; 1258 | this.el.play(); 1259 | }); 1260 | } 1261 | } 1262 | -------------------------------------------------------------------------------- /js/test/dummy-manager.ts: -------------------------------------------------------------------------------- 1 | // Copyright (c) Jupyter Development Team. 2 | // Distributed under the terms of the Modified BSD License. 3 | 4 | //import * as widgets from '../../lib'; 5 | import * as base from "@jupyter-widgets/base"; 6 | import * as widgets from "@jupyter-widgets/controls"; 7 | import * as services from "@jupyterlab/services"; 8 | import * as Backbone from "backbone"; 9 | 10 | let numComms = 0; 11 | 12 | export class MockComm { 13 | constructor() { 14 | this.comm_id = `mock-comm-id-${numComms}`; 15 | numComms += 1; 16 | } 17 | on_open(fn) { 18 | this._on_open = fn; 19 | } 20 | on_close(fn) { 21 | this._on_close = fn; 22 | } 23 | on_msg(fn) { 24 | this._on_msg = fn; 25 | } 26 | _process_msg(msg) { 27 | if (this._on_msg) { 28 | return this._on_msg(msg); 29 | } else { 30 | return Promise.resolve(); 31 | } 32 | } 33 | open() { 34 | if (this._on_open) { 35 | this._on_open(); 36 | } 37 | return ""; 38 | } 39 | close() { 40 | if (this._on_close) { 41 | this._on_close(); 42 | } 43 | return ""; 44 | } 45 | send() { 46 | return ""; 47 | } 48 | comm_id: string; 49 | target_name: string; 50 | _on_msg: Function = null; 51 | _on_open: Function = null; 52 | _on_close: Function = null; 53 | } 54 | 55 | export class DummyManager extends base.ManagerBase { 56 | constructor(library: any) { 57 | super(); 58 | this.el = window.document.createElement("div"); 59 | window.document.body.appendChild(this.el); 60 | this.library = library; 61 | } 62 | 63 | // @ts-ignore 64 | display_view( 65 | msg: services.KernelMessage.IMessage, 66 | view: Backbone.View, 67 | options: any, 68 | ) { 69 | // TODO: make this a spy 70 | // TODO: return an html element 71 | return Promise.resolve(view).then((view) => { 72 | this.el.appendChild(view.el); 73 | view.on("remove", () => console.log("view removed", view)); 74 | (window).last_view = view; 75 | view.trigger("displayed"); 76 | return view.el; 77 | }); 78 | } 79 | 80 | protected loadClass( 81 | className: string, 82 | moduleName: string, 83 | moduleVersion: string, 84 | ): Promise { 85 | if (moduleName === "@jupyter-widgets/base") { 86 | if (base[className]) { 87 | return Promise.resolve(base[className]); 88 | } else { 89 | return Promise.reject(`Cannot find class ${className}`); 90 | } 91 | } else if (moduleName === "@jupyter-widgets/controls") { 92 | if (widgets[className]) { 93 | return Promise.resolve(widgets[className]); 94 | } else { 95 | return Promise.reject(`Cannot find class ${className}`); 96 | } 97 | } else if (moduleName in this.library) { 98 | return Promise.resolve(this.library[moduleName][className]); 99 | } else { 100 | return Promise.reject(`Cannot find module ${moduleName}`); 101 | } 102 | } 103 | 104 | _get_comm_info() { 105 | return Promise.resolve({}); 106 | } 107 | 108 | _create_comm() { 109 | return Promise.resolve(new MockComm()); 110 | } 111 | 112 | el: HTMLElement; 113 | library: any; 114 | } 115 | -------------------------------------------------------------------------------- /js/test/image-recorder.ts: -------------------------------------------------------------------------------- 1 | import * as jupyter_webrtc from "../src"; 2 | import { DummyManager } from "./dummy-manager"; 3 | import { 4 | create_image_stream, 5 | create_model, 6 | create_model_webrtc, 7 | } from "./widget-utils"; 8 | 9 | // declare function require(string): string; 10 | // let image_data_src = require("base64-image-loader!./jupyter.jpg"); 11 | 12 | describe("image recorder >", () => { 13 | beforeEach(async function () { 14 | this.manager = new DummyManager({ "jupyter-webrtc": jupyter_webrtc }); 15 | }); 16 | 17 | it("create", async function () { 18 | // let image_data = (new TextEncoder().encode(image_data_src)).buffer; 19 | // let imageModel = await create_model(this.manager, '@jupyter-widgets/controls', 'ImageModel', 'ImageView', 'im1', {value: {buffer: image_data}, format: 'url'}); 20 | // let imageStreamModel = await create_model_webrtc(this.manager, 'ImageStream', 'is1', {image: 'IPY_MODEL_im1'}); 21 | let image_target = await create_model( 22 | this.manager, 23 | "@jupyter-widgets/controls", 24 | "ImageModel", 25 | "imageView", 26 | `image1`, 27 | ); 28 | let image = await create_image_stream(this.manager, "is1"); 29 | let imageRecorder = await create_model_webrtc( 30 | this.manager, 31 | "ImageRecorder", 32 | "mir1", 33 | { stream: "IPY_MODEL_is1", image: "IPY_MODEL_image1" }, 34 | ); 35 | let view = await this.manager.create_view(imageRecorder); 36 | await imageRecorder.snapshot(); 37 | // console.log(imageRecorder.get('data')) 38 | // let bytes = 39 | // expect() 40 | // view.recordButton.click() 41 | // why does this not compile? 42 | // expect(mediaImageRecorder.get('data')).to.have.lengthOf(0); 43 | }); 44 | }); 45 | -------------------------------------------------------------------------------- /js/test/index.ts: -------------------------------------------------------------------------------- 1 | // import all tests here, otherwise if we include them in karma.conf.js it will all be separate bundles 2 | import "./image-recorder"; 3 | import "./mediastream"; 4 | -------------------------------------------------------------------------------- /js/test/jupyter.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maartenbreddels/ipywebrtc/568595763af55bc93fdc424be3899242eebbda0c/js/test/jupyter.jpg -------------------------------------------------------------------------------- /js/test/mediastream.ts: -------------------------------------------------------------------------------- 1 | import * as jupyter_webrtc from "../src"; 2 | import { DummyManager } from "./dummy-manager"; 3 | import { create_model_webrtc } from "./widget-utils"; 4 | 5 | describe("VideoStream >", () => { 6 | beforeEach(async function () { 7 | this.manager = new DummyManager({ "jupyter-webrtc": jupyter_webrtc }); 8 | }); 9 | 10 | it("captureStream no widget", async function () { 11 | let videoModel = await create_model_webrtc( 12 | this.manager, 13 | "VideoStream", 14 | "video1", 15 | ); 16 | try { 17 | await videoModel.captureStream(); 18 | throw new Error("should thrown error"); 19 | } catch (e) { 20 | if (e.message != "no media widget passed") { 21 | new Error("wrong msg: " + e.message); 22 | } 23 | } 24 | }); 25 | // it("captureStream creates 1 view", async function() { 26 | // let videoModel: any = await create_video_stream(this.manager, 'video1'); 27 | // await videoModel.captureStream() 28 | // let widget1: any = videoModel.media_wid; 29 | // await videoModel.captureStream() 30 | // let widget2: any = videoModel.media_wid; 31 | // expect(widget1).to.equal(widget2); 32 | // }); 33 | }); 34 | -------------------------------------------------------------------------------- /js/test/widget-utils.ts: -------------------------------------------------------------------------------- 1 | // some helper functions to quickly create widgets 2 | 3 | export async function create_model_webrtc( 4 | manager, 5 | name: string, 6 | id: string, 7 | args = {}, 8 | ) { 9 | return create_model( 10 | manager, 11 | "jupyter-webrtc", 12 | `${name}Model`, 13 | name, 14 | id, 15 | args, 16 | ); 17 | } 18 | 19 | export async function create_model( 20 | manager, 21 | module: string, 22 | model: string, 23 | view: string, 24 | id: string, 25 | args = {}, 26 | ) { 27 | let model_widget = await manager.new_widget( 28 | { 29 | model_module: module, 30 | model_name: model, 31 | model_module_version: "*", 32 | view_module: module, 33 | view_name: view, 34 | view_module_version: "*", 35 | model_id: id, 36 | }, 37 | args, 38 | ); 39 | return model_widget; 40 | } 41 | 42 | export async function create_view(manager, model, options = {}) { 43 | let view = await manager.create_view(model, options); 44 | return view; 45 | } 46 | 47 | declare function require(string): string; 48 | 49 | export async function create_video_stream(manager, id: string, options = {}) { 50 | let video_data: any = require("arraybuffer-loader!../../../docs/source/Big.Buck.Bunny.mp4"); 51 | // let ivideoModel = await create_model(manager, '@jupyter-widgets/controls', 'VideoModel', 'VideoView', id, {value: {buffer: new DataView((new Uint8Array(video_data)).buffer)}, format: 'mp4'}); 52 | // let ivideoModel = await create_model(manager, '@jupyter-widgets/controls', 'VideoModel', 'VideoView', id, {value: new Uint8Array(video_data), format: 'mp4'}); 53 | let videoModel = await create_model( 54 | manager, 55 | "@jupyter-widgets/controls", 56 | "VideoModel", 57 | "VideoView", 58 | id, 59 | { value: new DataView(new Uint8Array(video_data).buffer), format: "mp4" }, 60 | ); 61 | return await create_model_webrtc(manager, "VideoStream", "vs1", { 62 | video: `IPY_MODEL_${id}`, 63 | }); 64 | } 65 | 66 | let image_data: any = require("arraybuffer-loader!../../test/jupyter.jpg"); 67 | export { image_data }; 68 | 69 | export async function create_image_stream(manager, id: string, options = {}) { 70 | let imageModel = await create_model( 71 | manager, 72 | "@jupyter-widgets/controls", 73 | "ImageModel", 74 | "imageView", 75 | `im_${id}`, 76 | { value: new DataView(new Uint8Array(image_data).buffer), format: "png" }, 77 | ); 78 | return await create_model_webrtc(manager, "ImageStream", id, { 79 | image: `IPY_MODEL_im_${id}`, 80 | }); 81 | } 82 | 83 | /* 84 | export 85 | async function create_widget(manager, name: string, id: string, args: Object) { 86 | let model = await create_model_bqplot(manager, name, id, args) 87 | let view = await manager.create_view(model); 88 | await manager.display_view(undefined, view); 89 | return {model: model, view:view}; 90 | } 91 | 92 | export 93 | async function create_figure_scatter(manager, x, y) { 94 | let layout = await create_model(manager, '@jupyter-widgets/base', 'LayoutModel', 'LayoutView', 'layout_figure1', {_dom_classes: '', width: '400px', height: '500px'}) 95 | let scale_x = await create_model_bqplot(manager, 'LinearScale', 'scale_x', {min:0, max:1, allow_padding: false}) 96 | let scale_y = await create_model_bqplot(manager, 'LinearScale', 'scale_y', {min:2, max:3, allow_padding: false}) 97 | let scales = {x: 'IPY_MODEL_scale_x', y: 'IPY_MODEL_scale_y'} 98 | let color = null; 99 | let size = {type: null, values: null}; 100 | let opacity = {type: null, values: null}; 101 | let rotation = {type: null, values: null}; 102 | let skew = {type: null, values: null}; 103 | 104 | let scatterModel = await create_model_bqplot(manager, 'Scatter', 'scatter1', {scales: scales, 105 | x: x, y: y, color: color, size: size, opacity: opacity, rotation: rotation, skew: skew, 106 | visible: true, default_size: 64, 107 | preserve_domain: {}, _view_module_version: '*', _view_module: 'bqplot'}) 108 | let figureModel; 109 | try { 110 | figureModel = await create_model_bqplot(manager, 'Figure', 'figure1', {scale_x: scales['x'], scale_y: scales['y'], 111 | layout: 'IPY_MODEL_layout_figure1', _dom_classes: [], 112 | figure_padding_y: 0, fig_margin: {bottom: 0, left: 0, right: 0, top: 0}, 113 | marks: ['IPY_MODEL_scatter1']}) 114 | } catch(e) { 115 | console.error('error', e) 116 | } 117 | let figure = await create_view(manager, figureModel); 118 | await manager.display_view(undefined, figure); 119 | return {figure: figure, scatter: await figure.mark_views.views[0]} 120 | }*/ 121 | -------------------------------------------------------------------------------- /js/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | // "noImplicitAny": true, 4 | "lib": ["dom", "es5", "es2015"], 5 | "noEmitOnError": true, 6 | // "strictNullChecks": true, 7 | "module": "commonjs", 8 | "moduleResolution": "node", 9 | "target": "es2015", 10 | "outDir": "test_js", 11 | "skipLibCheck": true, 12 | "sourceMap": true, 13 | "allowJs": true 14 | }, 15 | "include": ["./test/*"], 16 | "exclude": ["node_modules", "**/*.spec.ts"] 17 | } 18 | -------------------------------------------------------------------------------- /js/tslint.json: -------------------------------------------------------------------------------- 1 | { 2 | "jsRules": { 3 | "class-name": true, 4 | "comment-format": [false, "check-space"], 5 | "indent": [true, "spaces"], 6 | "no-duplicate-variable": true, 7 | "no-eval": true, 8 | "no-trailing-whitespace": true, 9 | "no-unsafe-finally": true, 10 | "no-var-keyword": true, 11 | "one-line": [true, "check-open-brace", "check-whitespace"], 12 | "quotemark": [true, "single"], 13 | "semicolon": [true, "always"], 14 | "triple-equals": [true, "allow-null-check"], 15 | "variable-name": [true, "ban-keywords"], 16 | "whitespace": [ 17 | true, 18 | "check-branch", 19 | "check-decl", 20 | "check-operator", 21 | "check-separator", 22 | "check-type" 23 | ] 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /js/webpack.config.js: -------------------------------------------------------------------------------- 1 | var version = require("./package.json").version; 2 | var webpack = require("webpack"); 3 | var crypto = require("crypto"); 4 | 5 | // // Custom webpack loaders are generally the same for all webpack bundles, hence 6 | // // stored in a separate local variable. 7 | // var loaders = [ 8 | // { test: /\.json$/, loader: 'json-loader' }, 9 | // ]; 10 | 11 | const path = require("path"); 12 | 13 | // Workaround for loaders using "md4" by default, which is not supported in FIPS-compliant OpenSSL 14 | var cryptoOrigCreateHash = crypto.createHash; 15 | crypto.createHash = (algorithm) => 16 | cryptoOrigCreateHash(algorithm == "md4" ? "sha256" : algorithm); 17 | 18 | var rules = [ 19 | // { test: /\.json$/, use: "json-loader" }, 20 | ]; 21 | var externals = [ 22 | "@jupyter-widgets/base", 23 | "@jupyter-widgets/controls", 24 | "jupyter-js-widgets", 25 | ]; 26 | var plugin_name = "jupyter-webrtc"; 27 | 28 | var resolve = { 29 | extensions: [".js"], 30 | fallback: { 31 | util: require.resolve("util/"), 32 | url: require.resolve("url/"), 33 | process: require.resolve("process/"), 34 | }, 35 | }; 36 | 37 | var plugins = [ 38 | new webpack.ProvidePlugin({ 39 | process: "process/browser", 40 | Buffer: ["buffer", "Buffer"], 41 | }), 42 | ]; 43 | 44 | module.exports = [ 45 | { 46 | entry: "./src/extension.js", 47 | output: { 48 | filename: "extension.js", 49 | path: path.resolve( 50 | __dirname, 51 | `../share/jupyter/nbextensions/${plugin_name}`, 52 | ), 53 | libraryTarget: "amd", 54 | publicPath: "", 55 | }, 56 | mode: "development", 57 | resolve: resolve, 58 | plugins: plugins, 59 | }, 60 | { 61 | entry: "./src/index.js", 62 | output: { 63 | filename: "index.js", 64 | path: path.resolve( 65 | __dirname, 66 | `../share/jupyter/nbextensions/${plugin_name}`, 67 | ), 68 | libraryTarget: "amd", 69 | publicPath: "", 70 | }, 71 | devtool: "source-map", 72 | module: { 73 | rules: rules, 74 | }, 75 | externals: externals, 76 | mode: "development", 77 | resolve: resolve, 78 | plugins: plugins, 79 | }, 80 | { 81 | // Embeddable jupyter-webrtc bundle 82 | entry: "./src/embed.js", 83 | output: { 84 | filename: "index.js", 85 | path: path.resolve(__dirname, "./dist/"), 86 | libraryTarget: "amd", 87 | publicPath: "https://unpkg.com/jupyter-webrtc@" + version + "/dist/", 88 | }, 89 | devtool: "source-map", 90 | module: { 91 | rules: rules, 92 | }, 93 | externals: externals, 94 | mode: "development", 95 | resolve: resolve, 96 | plugins: plugins, 97 | }, 98 | ]; 99 | -------------------------------------------------------------------------------- /js/webpack.config.lab3.js: -------------------------------------------------------------------------------- 1 | var path = require("path"); 2 | var webpack = require("webpack"); 3 | const crypto = require("crypto"); 4 | 5 | var plugins = [ 6 | new webpack.ProvidePlugin({ 7 | Buffer: ["buffer", "Buffer"], 8 | }), 9 | ]; 10 | 11 | module.exports = { 12 | plugins: plugins, 13 | }; 14 | 15 | // Workaround for loaders using "md4" by default, which is not supported in FIPS-compliant OpenSSL 16 | const cryptoOrigCreateHash = crypto.createHash; 17 | crypto.createHash = (algorithm) => 18 | cryptoOrigCreateHash(algorithm == "md4" ? "sha256" : algorithm); 19 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["jupyter_packaging~=0.7.9", "jupyterlab~=3.0", "setuptools>=40.8.0", "wheel"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [tool.ruff] 6 | fix = true 7 | exclude = [ 8 | '.git', 9 | 'dist', 10 | '.eggs', 11 | '.releash.py' 12 | ] 13 | line-length = 100 14 | select = ["E", "W", "F", "Q", "I"] 15 | extend-include = ["*.ipynb"] 16 | 17 | [tool.ruff.per-file-ignores] 18 | "__init__.py" = ["F401", "E501"] 19 | "webrtc.py" = ["E501"] 20 | "docs/source/conf.py" = ["E402"] -------------------------------------------------------------------------------- /readthedocs.yml: -------------------------------------------------------------------------------- 1 | conda: 2 | file: docs/environment.yml 3 | python: 4 | version: 3 5 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [bdist_wheel] 2 | universal=1 3 | 4 | [metadata] 5 | license_file = LICENSE 6 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import os 4 | from os.path import join as pjoin 5 | 6 | from jupyter_packaging import ( 7 | combine_commands, 8 | create_cmdclass, 9 | ensure_targets, 10 | get_version, 11 | install_npm, 12 | skip_if_exists, 13 | ) 14 | from setuptools import find_packages, setup 15 | 16 | name = "ipywebrtc" 17 | here = os.path.dirname(os.path.abspath(__file__)) 18 | node_root = os.path.join(here, "js") 19 | is_repo = os.path.exists(os.path.join(here, ".git")) 20 | 21 | npm_path = os.pathsep.join( 22 | [ 23 | os.path.join(node_root, "node_modules", ".bin"), 24 | os.environ.get("PATH", os.defpath), 25 | ] 26 | ) 27 | 28 | 29 | LONG_DESCRIPTION = "WebRTC for Jupyter notebook/lab" 30 | version = get_version(pjoin(name, "_version.py")) 31 | 32 | js_dir = pjoin(here, "js") 33 | 34 | # Representative files that should exist after a successful build 35 | jstargets = [ 36 | pjoin("share", "jupyter", "nbextensions", "jupyter-webrtc", "index.js"), 37 | pjoin("share", "jupyter", "labextensions", "jupyter-webrtc", "package.json"), 38 | ] 39 | 40 | data_files_spec = [ 41 | ( 42 | "share/jupyter/nbextensions/jupyter-webrtc", 43 | "share/jupyter/nbextensions/jupyter-webrtc", 44 | "*.js", 45 | ), 46 | ( 47 | "share/jupyter/labextensions/jupyter-webrtc/", 48 | "share/jupyter/labextensions/jupyter-webrtc/", 49 | "**", 50 | ), 51 | ( 52 | "etc/jupyter/nbconfig/notebook.d", 53 | "etc/jupyter/nbconfig/notebook.d", 54 | "jupyter-webrtc.json", 55 | ), 56 | ] 57 | 58 | js_command = combine_commands( 59 | install_npm(js_dir, build_dir="share/jupyter/", source_dir="js/src", build_cmd="build"), 60 | ensure_targets(jstargets), 61 | ) 62 | 63 | cmdclass = create_cmdclass("jsdeps", data_files_spec=data_files_spec) 64 | is_repo = os.path.exists(os.path.join(here, ".git")) 65 | if is_repo: 66 | cmdclass["jsdeps"] = js_command 67 | else: 68 | cmdclass["jsdeps"] = skip_if_exists(jstargets, js_command) 69 | 70 | setup( 71 | name="ipywebrtc", 72 | version=version, 73 | description="WebRTC for Jupyter notebook/lab", 74 | long_description=LONG_DESCRIPTION, 75 | license="MIT", 76 | include_package_data=True, 77 | install_require=[ 78 | "ipywidgets>=7.4.0", 79 | ], 80 | extras_require={"dev": ["pre-commit"]}, 81 | packages=find_packages(), 82 | zip_safe=False, 83 | cmdclass=cmdclass, 84 | author="Maarten Breddels", 85 | author_email="maartenbreddels@gmail.com", 86 | url="https://github.com/maartenbreddels/ipywebrtc", 87 | keywords=[ 88 | "ipython", 89 | "jupyter", 90 | "widgets", 91 | ], 92 | classifiers=[ 93 | "Development Status :: 4 - Beta", 94 | "Framework :: IPython", 95 | "Intended Audience :: Developers", 96 | "Intended Audience :: Science/Research", 97 | "Topic :: Multimedia :: Graphics", 98 | "Programming Language :: Python :: 2", 99 | "Programming Language :: Python :: 2.7", 100 | "Programming Language :: Python :: 3", 101 | "Programming Language :: Python :: 3.3", 102 | "Programming Language :: Python :: 3.4", 103 | "Programming Language :: Python :: 3.5", 104 | ], 105 | ) 106 | --------------------------------------------------------------------------------