├── .flake8 ├── .github ├── dependabot.yml ├── release.yml └── workflows │ └── main.yml ├── .gitignore ├── .nvmrc ├── .pre-commit-config.yaml ├── CHANGELOG.md ├── DEVELOPMENT.md ├── LICENSE ├── Makefile ├── README.md ├── app_deepspeech.py ├── app_videochat.py ├── docs └── images │ ├── streamlit_webrtc_basic.gif │ └── streamlit_webrtc_flipped.gif ├── home.py ├── mypy.ini ├── pages ├── 10_sendonly_audio.py ├── 11_programatic_control_playing.py ├── 12_media_constraints_configs.py ├── 13_ui_texts_customization.py ├── 14_programmable_source.py ├── 1_object_detection.py ├── 2_opencv_filters.py ├── 3_audio_filter.py ├── 4_delayed_echo.py ├── 5_fork_multi_outputs.py ├── 6_mix_multi_inputs.py ├── 7_record.py ├── 8_media_files_streaming.py ├── 9_sendonly_video.py └── pyproject.toml ├── pyproject.toml ├── sample_utils ├── __init__.py └── download.py ├── scripts └── release_check.py ├── streamlit_webrtc ├── __init__.py ├── _compat.py ├── component.py ├── components_callbacks.py ├── config.py ├── credentials.py ├── eventloop.py ├── factory.py ├── frontend │ ├── .gitignore │ ├── .npmrc │ ├── .prettierignore │ ├── .prettierrc.json │ ├── eslint.config.js │ ├── index.html │ ├── package.json │ ├── pnpm-lock.yaml │ ├── src │ │ ├── DeviceSelect │ │ │ ├── DeviceSelect.stories.tsx │ │ │ ├── DeviceSelect.tsx │ │ │ ├── DeviceSelectForm.tsx │ │ │ ├── VideoPreview.tsx │ │ │ ├── components │ │ │ │ ├── Defer.stories.tsx │ │ │ │ ├── Defer.tsx │ │ │ │ ├── DeviceSelectContainer.tsx │ │ │ │ ├── VideoPreview.tsx │ │ │ │ ├── VideoPreviewContainer.tsx │ │ │ │ ├── VoidVideoPreview.stories.tsx │ │ │ │ ├── VoidVideoPreview.tsx │ │ │ │ └── messages │ │ │ │ │ ├── AccessDeniedMessage.stories.tsx │ │ │ │ │ ├── AccessDeniedMessage.tsx │ │ │ │ │ ├── AskPermissionMessage.stories.tsx │ │ │ │ │ ├── AskPermissionMessage.tsx │ │ │ │ │ ├── DeviceNotAvailableMessage.stories.tsx │ │ │ │ │ ├── DeviceNotAvailableMessage.tsx │ │ │ │ │ ├── MediaApiNotAvailableMessage.stories.tsx │ │ │ │ │ ├── MediaApiNotAvailableMessage.tsx │ │ │ │ │ ├── Message.stories.tsx │ │ │ │ │ └── Message.tsx │ │ │ └── utils.ts │ │ ├── InfoHeader.stories.tsx │ │ ├── InfoHeader.tsx │ │ ├── MediaStreamPlayer.tsx │ │ ├── Placeholder.test.tsx │ │ ├── Placeholder.tsx │ │ ├── ThemeProvider.tsx │ │ ├── WebRtcStreamer.tsx │ │ ├── component-value.ts │ │ ├── index.tsx │ │ ├── media-constraint.ts │ │ ├── media-constraints.test.ts │ │ ├── react-app-env.d.ts │ │ ├── translation │ │ │ ├── TranslationProvider.tsx │ │ │ ├── components │ │ │ │ └── TranslatedButton.tsx │ │ │ ├── types.ts │ │ │ └── useTranslation.ts │ │ ├── use-timeout.ts │ │ └── webrtc │ │ │ ├── actions.ts │ │ │ ├── index.ts │ │ │ ├── reducer.ts │ │ │ └── use-unique-id.ts │ ├── tsconfig.app.json │ ├── tsconfig.json │ ├── tsconfig.node.json │ └── vite.config.ts ├── mix.py ├── models.py ├── process.py ├── py.typed ├── receive.py ├── relay.py ├── server.py ├── session_info.py ├── shutdown.py ├── source.py └── webrtc.py ├── tests ├── __init__.py ├── conftest.py ├── import_test.py └── session_info_test.py └── uv.lock /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | exclude = 3 | .git, 4 | .venv, 5 | __pycache__, 6 | build, 7 | dist, 8 | fronend, 9 | data, 10 | models 11 | # Black compatible configs 12 | max-line-length = 88 13 | extend-ignore = E203 14 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: pip 4 | directory: "/" 5 | schedule: 6 | interval: daily 7 | ignore: 8 | - dependency-name: "cryptography" 9 | versions: ["39.0.0"] # https://github.com/whitphx/streamlit-webrtc/issues/1164 10 | open-pull-requests-limit: 10 11 | - package-ecosystem: npm 12 | directory: "/streamlit_webrtc/frontend" 13 | schedule: 14 | interval: daily 15 | open-pull-requests-limit: 10 16 | - package-ecosystem: github-actions 17 | directory: "/" 18 | schedule: 19 | interval: "daily" 20 | -------------------------------------------------------------------------------- /.github/release.yml: -------------------------------------------------------------------------------- 1 | changelog: 2 | categories: 3 | - title: 🏕 Updates 4 | labels: 5 | - "*" 6 | exclude: 7 | labels: 8 | - dependencies 9 | - title: 👒 Dependencies 10 | labels: 11 | - dependencies 12 | -------------------------------------------------------------------------------- /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | name: Test, Build, and Publish 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | - develop/** 8 | tags: 9 | - 'v*' 10 | pull_request: 11 | branches: 12 | - main 13 | - develop/** 14 | 15 | permissions: {} 16 | 17 | jobs: 18 | test-python: 19 | runs-on: ubuntu-latest 20 | strategy: 21 | fail-fast: false 22 | matrix: 23 | python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"] 24 | streamlit-version: [null] 25 | include: 26 | # Test with streamlit <1.4.0 and >=1.4.0. See https://github.com/whitphx/streamlit-webrtc/issues/598 27 | - python-version: 3.9 28 | streamlit-version: 1.0.0 29 | - python-version: 3.9 30 | streamlit-version: 1.4.0 31 | # Test with streamlit >=1.6.0. See https://github.com/whitphx/streamlit-webrtc/issues/709 32 | - python-version: 3.9 33 | streamlit-version: 1.6.0 34 | # Test with streamlit >=1.8.0. See https://github.com/whitphx/streamlit-webrtc/issues/759 35 | - python-version: 3.9 36 | streamlit-version: 1.8.0 37 | # Test with streamlit >=1.12.0. See https://github.com/whitphx/streamlit-webrtc/issues/1004 38 | - python-version: 3.9 39 | streamlit-version: 1.12.0 40 | # Test with streamlit >=1.12.1. See https://github.com/whitphx/streamlit-webrtc/issues/1024 41 | - python-version: 3.9 42 | streamlit-version: 1.12.1 43 | # Test with streamlit >=1.14.0. See https://github.com/whitphx/streamlit-webrtc/pull/1189 44 | - python-version: 3.9 45 | streamlit-version: 1.14.0 46 | # Test with streamlit >=1.18.0. See https://github.com/whitphx/streamlit-webrtc/issues/1187 47 | - python-version: 3.9 48 | streamlit-version: 1.18.0 49 | # Test with streamlit >=1.27.0. See https://github.com/whitphx/streamlit-webrtc/pull/1393 50 | - python-version: 3.9 51 | streamlit-version: 1.27.0 52 | # Test with streamlit >=1.34.0. See https://github.com/whitphx/streamlit-webrtc/pull/1627 53 | - python-version: 3.9 54 | streamlit-version: 1.34.0 55 | 56 | steps: 57 | - uses: actions/checkout@v4 58 | with: 59 | persist-credentials: false 60 | 61 | - name: Install uv 62 | uses: astral-sh/setup-uv@v5 63 | 64 | - name: Set up Python ${{ matrix.python-version }} 65 | uses: actions/setup-python@v5 66 | with: 67 | python-version: ${{ matrix.python-version }} 68 | 69 | - name: Install dependencies 70 | run: uv sync --all-extras --dev 71 | 72 | - name: Install a specific version of Streamlit 73 | if: ${{ matrix.streamlit-version }} 74 | run: uv add --dev streamlit=="${STREAMLIT_VERSION}" setuptools 75 | env: 76 | STREAMLIT_VERSION: ${{ matrix.streamlit-version }} 77 | 78 | - name: Run Ruff 79 | run: | 80 | uv run ruff check --output-format=github . 81 | uv run ruff format . --check 82 | 83 | - name: Type checking with mypy 84 | run: uv run mypy . 85 | 86 | - name: Release check 87 | run: uv run scripts/release_check.py streamlit_webrtc/component.py 88 | 89 | # The frontend build directory is necessary to import the package 90 | # avoiding an error from `components.declare_component`. 91 | - name: Create a mock frontend build directory 92 | run: mkdir streamlit_webrtc/frontend/dist 93 | 94 | - name: Test with pytest 95 | run: uv run pytest 96 | 97 | test-frontend: 98 | runs-on: ubuntu-latest 99 | 100 | defaults: 101 | run: 102 | working-directory: streamlit_webrtc/frontend 103 | 104 | steps: 105 | - uses: actions/checkout@v4 106 | with: 107 | persist-credentials: false 108 | 109 | - uses: pnpm/action-setup@v4 110 | name: Install pnpm 111 | with: 112 | version: 10 113 | run_install: false 114 | 115 | - name: Install Node.js 116 | uses: actions/setup-node@v4 117 | with: 118 | cache: 'pnpm' 119 | node-version-file: .nvmrc 120 | cache-dependency-path: streamlit_webrtc/frontend/pnpm-lock.yaml 121 | 122 | - name: Install dependencies 123 | run: pnpm install 124 | 125 | - run: pnpm run lint 126 | 127 | - run: pnpm run build 128 | 129 | - run: pnpm test 130 | 131 | build: 132 | needs: [test-python, test-frontend] 133 | 134 | runs-on: ubuntu-latest 135 | 136 | steps: 137 | - uses: actions/checkout@v4 138 | with: 139 | persist-credentials: false 140 | fetch-depth: 0 # Fetch all history for hatch-vcs to get the correct version 141 | 142 | - name: Install uv 143 | uses: astral-sh/setup-uv@v5 144 | 145 | - name: Set up Python 146 | uses: actions/setup-python@v5 147 | with: 148 | python-version-file: "pyproject.toml" 149 | 150 | - name: Install Python dependencies 151 | run: uv sync --all-extras --dev 152 | 153 | # Set up frontend dependencies 154 | - uses: pnpm/action-setup@v4 155 | name: Install pnpm 156 | with: 157 | version: 10 158 | run_install: false 159 | 160 | - name: Install Node.js 161 | uses: actions/setup-node@v4 162 | with: 163 | cache: 'pnpm' 164 | node-version-file: .nvmrc 165 | cache-dependency-path: streamlit_webrtc/frontend/pnpm-lock.yaml 166 | 167 | - name: Install dependencies 168 | run: pnpm install 169 | working-directory: streamlit_webrtc/frontend 170 | 171 | - name: Build 172 | run: make build 173 | 174 | - name: Upload the built artifacts 175 | uses: actions/upload-artifact@v4 176 | with: 177 | name: streamlit-webrtc-${{ startsWith(github.ref, 'refs/tags/v') && github.ref_name || github.sha }} 178 | path: dist 179 | 180 | deploy-preview-wheel: 181 | runs-on: ubuntu-latest 182 | needs: [build] 183 | if: github.event_name == 'pull_request' 184 | permissions: 185 | pull-requests: write 186 | name: Deploy wheel file to Cloudflare Pages 187 | outputs: 188 | url: ${{ steps.deploy.outputs.deployment-url }} 189 | steps: 190 | - run: mkdir -p ${{ runner.temp }}/artifacts/ 191 | 192 | - name: Download all the dists 193 | uses: actions/download-artifact@v4 194 | with: 195 | name: streamlit-webrtc-${{ startsWith(github.ref, 'refs/tags/v') && github.ref_name || github.sha }} 196 | path: ${{ runner.temp }}/artifacts/streamlit-webrtc 197 | 198 | - name: Deploy 199 | uses: cloudflare/wrangler-action@da0e0dfe58b7a431659754fdf3f186c529afbe65 200 | id: deploy 201 | with: 202 | apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }} 203 | accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }} 204 | command: >- 205 | pages deploy ${{ runner.temp }}/artifacts/streamlit-webrtc --project-name=streamlit-webrtc-preview --branch=${{ github.head_ref || github.ref_name }} --commit-hash=${{ github.sha }} 206 | 207 | - name: Comment on the PR to inform the deployment file URLs 208 | uses: actions/github-script@v7 209 | with: 210 | github-token: ${{ secrets.GITHUB_TOKEN }} 211 | script: | 212 | const fs = require('fs'); 213 | const path = require('path'); 214 | const deploymentUrl = '${{ steps.deploy.outputs.deployment-url }}'; 215 | 216 | // Get all files in the artifacts directory 217 | const artifactsDir = '${{ runner.temp }}/artifacts/streamlit-webrtc'; 218 | const allFiles = fs.readdirSync(artifactsDir); 219 | // Filter to include only wheel (.whl) and source distribution (.tar.gz) files 220 | const files = allFiles.filter(file => file.endsWith('.whl') || file.endsWith('.tar.gz')); 221 | 222 | // Create message with links to each file 223 | let fileLinks = files.map(file => { 224 | const installCommand = `pip install ${deploymentUrl}/${file}`; 225 | return `- [${file}](${deploymentUrl}/${file})\n \`\`\`bash\n ${installCommand}\n \`\`\``; 226 | }).join('\n'); 227 | 228 | const message = `📦 Wheel files have been deployed to Cloudflare Pages: 229 | 230 | ${fileLinks}`; 231 | 232 | github.rest.issues.createComment({ 233 | issue_number: context.issue.number, 234 | owner: context.repo.owner, 235 | repo: context.repo.repo, 236 | body: message 237 | }); 238 | 239 | publish-to-pypi: 240 | name: Publish Python 🐍 distribution 📦 to PyPI 241 | if: startsWith(github.ref, 'refs/tags/v') 242 | needs: [build] 243 | runs-on: ubuntu-latest 244 | environment: 245 | name: pypi 246 | url: https://pypi.org/p/streamlit-webrtc 247 | permissions: 248 | id-token: write # IMPORTANT: mandatory for trusted publishing 249 | 250 | steps: 251 | - name: Download all the dists 252 | uses: actions/download-artifact@v4 253 | with: 254 | name: streamlit-webrtc-${{ github.ref_name }} 255 | path: dist/ 256 | - name: Publish distribution 📦 to PyPI 257 | uses: pypa/gh-action-pypi-publish@release/v1 258 | 259 | github-release: 260 | name: >- 261 | Sign the Python 🐍 distribution 📦 with Sigstore 262 | and upload them to GitHub Release 263 | needs: 264 | - publish-to-pypi 265 | runs-on: ubuntu-latest 266 | 267 | permissions: 268 | contents: write # IMPORTANT: mandatory for making GitHub Releases 269 | id-token: write # IMPORTANT: mandatory for sigstore 270 | 271 | steps: 272 | - name: Download all the dists 273 | uses: actions/download-artifact@v4 274 | with: 275 | name: streamlit-webrtc-${{ github.ref_name }} 276 | path: dist/ 277 | - name: Sign the dists with Sigstore 278 | uses: sigstore/gh-action-sigstore-python@v3.0.0 279 | with: 280 | inputs: >- 281 | ./dist/*.tar.gz 282 | ./dist/*.whl 283 | - name: Create GitHub Release 284 | env: 285 | GITHUB_TOKEN: ${{ github.token }} 286 | run: >- 287 | gh release create 288 | "$GITHUB_REF_NAME" 289 | --repo "$GITHUB_REPOSITORY" 290 | --notes "" 291 | - name: Upload artifact signatures to GitHub Release 292 | env: 293 | GITHUB_TOKEN: ${{ github.token }} 294 | # Upload to GitHub Release using the `gh` CLI. 295 | # `dist/` contains the built packages, and the 296 | # sigstore-produced signatures and certificates. 297 | run: >- 298 | gh release upload 299 | "$GITHUB_REF_NAME" dist/** 300 | --repo "$GITHUB_REPOSITORY" 301 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | ### https://raw.github.com/github/gitignore/218a941be92679ce67d0484547e3e142b2f5f6f0/Python.gitignore 2 | 3 | # Byte-compiled / optimized / DLL files 4 | __pycache__/ 5 | *.py[cod] 6 | *$py.class 7 | 8 | # C extensions 9 | *.so 10 | 11 | # Distribution / packaging 12 | .Python 13 | build/ 14 | develop-eggs/ 15 | dist/ 16 | downloads/ 17 | eggs/ 18 | .eggs/ 19 | lib/ 20 | lib64/ 21 | parts/ 22 | sdist/ 23 | var/ 24 | wheels/ 25 | share/python-wheels/ 26 | *.egg-info/ 27 | .installed.cfg 28 | *.egg 29 | MANIFEST 30 | 31 | # PyInstaller 32 | # Usually these files are written by a python script from a template 33 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 34 | *.manifest 35 | *.spec 36 | 37 | # Installer logs 38 | pip-log.txt 39 | pip-delete-this-directory.txt 40 | 41 | # Unit test / coverage reports 42 | htmlcov/ 43 | .tox/ 44 | .nox/ 45 | .coverage 46 | .coverage.* 47 | .cache 48 | nosetests.xml 49 | coverage.xml 50 | *.cover 51 | *.py,cover 52 | .hypothesis/ 53 | .pytest_cache/ 54 | cover/ 55 | 56 | # Translations 57 | *.mo 58 | *.pot 59 | 60 | # Django stuff: 61 | *.log 62 | local_settings.py 63 | db.sqlite3 64 | db.sqlite3-journal 65 | 66 | # Flask stuff: 67 | instance/ 68 | .webassets-cache 69 | 70 | # Scrapy stuff: 71 | .scrapy 72 | 73 | # Sphinx documentation 74 | docs/_build/ 75 | 76 | # PyBuilder 77 | .pybuilder/ 78 | target/ 79 | 80 | # Jupyter Notebook 81 | .ipynb_checkpoints 82 | 83 | # IPython 84 | profile_default/ 85 | ipython_config.py 86 | 87 | # pyenv 88 | # For a library or package, you might want to ignore these files since the code is 89 | # intended to run in multiple environments; otherwise, check them in: 90 | # .python-version 91 | 92 | # pipenv 93 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 94 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 95 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 96 | # install all needed dependencies. 97 | #Pipfile.lock 98 | 99 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 100 | __pypackages__/ 101 | 102 | # Celery stuff 103 | celerybeat-schedule 104 | celerybeat.pid 105 | 106 | # SageMath parsed files 107 | *.sage.py 108 | 109 | # Environments 110 | #.env # Not ignore this file because it contains Streamlit specific global settings. 111 | .venv 112 | env/ 113 | venv/ 114 | ENV/ 115 | env.bak/ 116 | venv.bak/ 117 | 118 | # Spyder project settings 119 | .spyderproject 120 | .spyproject 121 | 122 | # Rope project settings 123 | .ropeproject 124 | 125 | # mkdocs documentation 126 | /site 127 | 128 | # mypy 129 | .mypy_cache/ 130 | .dmypy.json 131 | dmypy.json 132 | 133 | # Pyre type checker 134 | .pyre/ 135 | 136 | # pytype static type analyzer 137 | .pytype/ 138 | 139 | # Cython debug symbols 140 | cython_debug/ 141 | 142 | # Streamlit secrets 143 | .streamlit/secrets.toml 144 | 145 | # Demo data 146 | models/ 147 | data/ 148 | *.flv 149 | -------------------------------------------------------------------------------- /.nvmrc: -------------------------------------------------------------------------------- 1 | lts/* 2 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/astral-sh/ruff-pre-commit 3 | rev: v0.9.10 4 | hooks: 5 | - id: ruff 6 | args: [ --fix ] 7 | - id: ruff-format 8 | - repo: https://github.com/pre-commit/mirrors-mypy 9 | rev: v1.11.2 10 | hooks: 11 | - id: mypy 12 | -------------------------------------------------------------------------------- /DEVELOPMENT.md: -------------------------------------------------------------------------------- 1 | # Development of `streamlit-webrtc` 2 | 3 | ## Set up 4 | * Install `uv` 5 | * Install dependencies 6 | ```shell 7 | $ uv sync 8 | ``` 9 | * Install pre-commit 10 | ```shell 11 | $ pre-commit install 12 | ``` 13 | 14 | ## Development 15 | * Edit `streamlit_webrtc/component.py` to set `_RELEASE = False` in order to show the frontend view served from a development server as below instead of the production build. 16 | * Do not commit this change. This setting is only for development. 17 | * If `_RELEASE = False` is set, the build command fails, which is described in the next section. See the `build` rule in `Makefile` and `release_check.py` for the details. 18 | * Run the frontend dev server 19 | ```shell 20 | $ cd streamlit_webrtc/frontend 21 | $ pnpm dev 22 | ``` 23 | * In another shell, run `app.py` 24 | ```shell 25 | $ streamlit run home.py 26 | ``` 27 | 28 | ## Release 29 | 1. Edit `CHANGELOG.md` and commit it. 30 | 2. Set the next version with the following command, which creates a new Git tag representing this release. 31 | ``` 32 | $ uv run bump-my-version bump --tag --commit --commit-args='--allow-empty' --verbose 33 | ``` 34 | NOTE: `patch`, `minor`, or `major` can be used as ``. 35 | 3. Push the commit with the tag to GitHub. After pushing the tag, CI/CD automatically deploys the release. 36 | ``` 37 | $ git push 38 | $ git push --tags 39 | ``` 40 | 41 | ## Build 42 | The following command is run to build the package during the automated release process in CI/CD described above. 43 | When you want to run the build locally for development or test, you can directly use this command. 44 | ``` 45 | $ make build 46 | ``` 47 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) Yuichiro Tachibana (Tsuchiya) 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | build: 2 | python scripts/release_check.py streamlit_webrtc/component.py 3 | cd streamlit_webrtc/frontend && pnpm run build 4 | uv build 5 | 6 | format/backend: 7 | uv run ruff format . 8 | uv run ruff check . --fix 9 | 10 | format/frontend: 11 | cd streamlit_webrtc/frontend && pnpm format 12 | 13 | format: 14 | $(MAKE) format/backend 15 | $(MAKE) format/frontend 16 | 17 | release/patch: 18 | $(MAKE) version=patch release 19 | 20 | release/minor: 21 | $(MAKE) version=minor release 22 | 23 | release/major: 24 | $(MAKE) version=major release 25 | 26 | release: 27 | uv run bump-my-version bump $(version) --tag --commit --commit-args='--allow-empty' --verbose 28 | git push 29 | git push --tags 30 | -------------------------------------------------------------------------------- /app_deepspeech.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import logging.handlers 3 | import queue 4 | import threading 5 | import time 6 | import urllib.request 7 | from collections import deque 8 | from pathlib import Path 9 | from typing import List 10 | 11 | import av 12 | import numpy as np 13 | import pydub 14 | import streamlit as st 15 | 16 | from streamlit_webrtc import WebRtcMode, webrtc_streamer 17 | 18 | HERE = Path(__file__).parent 19 | 20 | logger = logging.getLogger(__name__) 21 | 22 | 23 | # This code is based on https://github.com/streamlit/demo-self-driving/blob/230245391f2dda0cb464008195a470751c01770b/streamlit_app.py#L48 # noqa: E501 24 | def download_file(url, download_to: Path, expected_size=None): 25 | # Don't download the file twice. 26 | # (If possible, verify the download using the file length.) 27 | if download_to.exists(): 28 | if expected_size: 29 | if download_to.stat().st_size == expected_size: 30 | return 31 | else: 32 | st.info(f"{url} is already downloaded.") 33 | if not st.button("Download again?"): 34 | return 35 | 36 | download_to.parent.mkdir(parents=True, exist_ok=True) 37 | 38 | # These are handles to two visual elements to animate. 39 | weights_warning, progress_bar = None, None 40 | try: 41 | weights_warning = st.warning("Downloading %s..." % url) 42 | progress_bar = st.progress(0) 43 | with open(download_to, "wb") as output_file: 44 | with urllib.request.urlopen(url) as response: 45 | length = int(response.info()["Content-Length"]) 46 | counter = 0.0 47 | MEGABYTES = 2.0**20.0 48 | while True: 49 | data = response.read(8192) 50 | if not data: 51 | break 52 | counter += len(data) 53 | output_file.write(data) 54 | 55 | # We perform animation by overwriting the elements. 56 | weights_warning.warning( 57 | "Downloading %s... (%6.2f/%6.2f MB)" 58 | % (url, counter / MEGABYTES, length / MEGABYTES) 59 | ) 60 | progress_bar.progress(min(counter / length, 1.0)) 61 | # Finally, we remove these visual elements by calling .empty(). 62 | finally: 63 | if weights_warning is not None: 64 | weights_warning.empty() 65 | if progress_bar is not None: 66 | progress_bar.empty() 67 | 68 | 69 | def main(): 70 | st.header("Real Time Speech-to-Text") 71 | st.markdown( 72 | """ 73 | This demo app is using [DeepSpeech](https://github.com/mozilla/DeepSpeech), 74 | an open speech-to-text engine. 75 | 76 | A pre-trained model released with 77 | [v0.9.3](https://github.com/mozilla/DeepSpeech/releases/tag/v0.9.3), 78 | trained on American English is being served. 79 | """ 80 | ) 81 | 82 | # https://github.com/mozilla/DeepSpeech/releases/tag/v0.9.3 83 | MODEL_URL = "https://github.com/mozilla/DeepSpeech/releases/download/v0.9.3/deepspeech-0.9.3-models.pbmm" # noqa 84 | LANG_MODEL_URL = "https://github.com/mozilla/DeepSpeech/releases/download/v0.9.3/deepspeech-0.9.3-models.scorer" # noqa 85 | MODEL_LOCAL_PATH = HERE / "models/deepspeech-0.9.3-models.pbmm" 86 | LANG_MODEL_LOCAL_PATH = HERE / "models/deepspeech-0.9.3-models.scorer" 87 | 88 | download_file(MODEL_URL, MODEL_LOCAL_PATH, expected_size=188915987) 89 | download_file(LANG_MODEL_URL, LANG_MODEL_LOCAL_PATH, expected_size=953363776) 90 | 91 | lm_alpha = 0.931289039105002 92 | lm_beta = 1.1834137581510284 93 | beam = 100 94 | 95 | sound_only_page = "Sound only (sendonly)" 96 | with_video_page = "With video (sendrecv)" 97 | app_mode = st.selectbox("Choose the app mode", [sound_only_page, with_video_page]) 98 | 99 | if app_mode == sound_only_page: 100 | app_sst( 101 | str(MODEL_LOCAL_PATH), str(LANG_MODEL_LOCAL_PATH), lm_alpha, lm_beta, beam 102 | ) 103 | elif app_mode == with_video_page: 104 | app_sst_with_video( 105 | str(MODEL_LOCAL_PATH), str(LANG_MODEL_LOCAL_PATH), lm_alpha, lm_beta, beam 106 | ) 107 | 108 | 109 | def app_sst(model_path: str, lm_path: str, lm_alpha: float, lm_beta: float, beam: int): 110 | webrtc_ctx = webrtc_streamer( 111 | key="speech-to-text", 112 | mode=WebRtcMode.SENDONLY, 113 | audio_receiver_size=1024, 114 | media_stream_constraints={"video": False, "audio": True}, 115 | ) 116 | 117 | status_indicator = st.empty() 118 | 119 | if not webrtc_ctx.state.playing: 120 | return 121 | 122 | status_indicator.write("Loading...") 123 | text_output = st.empty() 124 | stream = None 125 | 126 | while True: 127 | if webrtc_ctx.audio_receiver: 128 | if stream is None: 129 | from deepspeech import Model 130 | 131 | model = Model(model_path) 132 | model.enableExternalScorer(lm_path) 133 | model.setScorerAlphaBeta(lm_alpha, lm_beta) 134 | model.setBeamWidth(beam) 135 | 136 | stream = model.createStream() 137 | 138 | status_indicator.write("Model loaded.") 139 | 140 | sound_chunk = pydub.AudioSegment.empty() 141 | try: 142 | audio_frames = webrtc_ctx.audio_receiver.get_frames(timeout=1) 143 | except queue.Empty: 144 | time.sleep(0.1) 145 | status_indicator.write("No frame arrived.") 146 | continue 147 | 148 | status_indicator.write("Running. Say something!") 149 | 150 | for audio_frame in audio_frames: 151 | sound = pydub.AudioSegment( 152 | data=audio_frame.to_ndarray().tobytes(), 153 | sample_width=audio_frame.format.bytes, 154 | frame_rate=audio_frame.sample_rate, 155 | channels=len(audio_frame.layout.channels), 156 | ) 157 | sound_chunk += sound 158 | 159 | if len(sound_chunk) > 0: 160 | sound_chunk = sound_chunk.set_channels(1).set_frame_rate( 161 | model.sampleRate() 162 | ) 163 | buffer = np.array(sound_chunk.get_array_of_samples()) 164 | stream.feedAudioContent(buffer) 165 | text = stream.intermediateDecode() 166 | text_output.markdown(f"**Text:** {text}") 167 | else: 168 | status_indicator.write("AudioReciver is not set. Abort.") 169 | break 170 | 171 | 172 | def app_sst_with_video( 173 | model_path: str, lm_path: str, lm_alpha: float, lm_beta: float, beam: int 174 | ): 175 | frames_deque_lock = threading.Lock() 176 | frames_deque: deque = deque([]) 177 | 178 | async def queued_audio_frames_callback( 179 | frames: List[av.AudioFrame], 180 | ) -> List[av.AudioFrame]: 181 | with frames_deque_lock: 182 | frames_deque.extend(frames) 183 | 184 | # Return empty frames to be silent. 185 | new_frames = [] 186 | for frame in frames: 187 | input_array = frame.to_ndarray() 188 | new_frame = av.AudioFrame.from_ndarray( 189 | np.zeros(input_array.shape, dtype=input_array.dtype), # type: ignore 190 | layout=frame.layout.name, 191 | ) 192 | new_frame.sample_rate = frame.sample_rate 193 | new_frames.append(new_frame) 194 | 195 | return new_frames 196 | 197 | webrtc_ctx = webrtc_streamer( 198 | key="speech-to-text-w-video", 199 | mode=WebRtcMode.SENDRECV, 200 | queued_audio_frames_callback=queued_audio_frames_callback, 201 | media_stream_constraints={"video": True, "audio": True}, 202 | ) 203 | 204 | status_indicator = st.empty() 205 | 206 | if not webrtc_ctx.state.playing: 207 | return 208 | 209 | status_indicator.write("Loading...") 210 | text_output = st.empty() 211 | stream = None 212 | 213 | while True: 214 | if webrtc_ctx.state.playing: 215 | if stream is None: 216 | from deepspeech import Model 217 | 218 | model = Model(model_path) 219 | model.enableExternalScorer(lm_path) 220 | model.setScorerAlphaBeta(lm_alpha, lm_beta) 221 | model.setBeamWidth(beam) 222 | 223 | stream = model.createStream() 224 | 225 | status_indicator.write("Model loaded.") 226 | 227 | sound_chunk = pydub.AudioSegment.empty() 228 | 229 | audio_frames = [] 230 | with frames_deque_lock: 231 | while len(frames_deque) > 0: 232 | frame = frames_deque.popleft() 233 | audio_frames.append(frame) 234 | 235 | if len(audio_frames) == 0: 236 | time.sleep(0.1) 237 | status_indicator.write("No frame arrived.") 238 | continue 239 | 240 | status_indicator.write("Running. Say something!") 241 | 242 | for audio_frame in audio_frames: 243 | sound = pydub.AudioSegment( 244 | data=audio_frame.to_ndarray().tobytes(), 245 | sample_width=audio_frame.format.bytes, 246 | frame_rate=audio_frame.sample_rate, 247 | channels=len(audio_frame.layout.channels), 248 | ) 249 | sound_chunk += sound 250 | 251 | if len(sound_chunk) > 0: 252 | sound_chunk = sound_chunk.set_channels(1).set_frame_rate( 253 | model.sampleRate() 254 | ) 255 | buffer = np.array(sound_chunk.get_array_of_samples()) 256 | stream.feedAudioContent(buffer) 257 | text = stream.intermediateDecode() 258 | text_output.markdown(f"**Text:** {text}") 259 | else: 260 | status_indicator.write("Stopped.") 261 | break 262 | 263 | 264 | if __name__ == "__main__": 265 | import os 266 | 267 | DEBUG = os.environ.get("DEBUG", "false").lower() not in ["false", "no", "0"] 268 | 269 | logging.basicConfig( 270 | format="[%(asctime)s] %(levelname)7s from %(name)s in %(pathname)s:%(lineno)d: " 271 | "%(message)s", 272 | force=True, 273 | ) 274 | 275 | logger.setLevel(level=logging.DEBUG if DEBUG else logging.INFO) 276 | 277 | st_webrtc_logger = logging.getLogger("streamlit_webrtc") 278 | st_webrtc_logger.setLevel(logging.DEBUG) 279 | 280 | fsevents_logger = logging.getLogger("fsevents") 281 | fsevents_logger.setLevel(logging.WARNING) 282 | 283 | main() 284 | -------------------------------------------------------------------------------- /app_videochat.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import math 3 | from typing import List 4 | 5 | try: 6 | from typing import Literal 7 | except ImportError: 8 | from typing_extensions import Literal # type: ignore 9 | 10 | import av 11 | import cv2 12 | import numpy as np 13 | import streamlit as st 14 | from streamlit_server_state import server_state, server_state_lock 15 | 16 | from streamlit_webrtc import ( 17 | VideoProcessorBase, 18 | WebRtcMode, 19 | WebRtcStreamerContext, 20 | create_mix_track, 21 | create_process_track, 22 | webrtc_streamer, 23 | ) 24 | 25 | logger = logging.getLogger(__name__) 26 | 27 | 28 | class OpenCVVideoProcessor(VideoProcessorBase): 29 | type: Literal["noop", "cartoon", "edges", "rotate"] 30 | 31 | def __init__(self) -> None: 32 | self.type = "noop" 33 | 34 | def recv(self, frame: av.VideoFrame) -> av.VideoFrame: 35 | img = frame.to_ndarray(format="bgr24") 36 | 37 | if self.type == "noop": 38 | pass 39 | elif self.type == "cartoon": 40 | # prepare color 41 | img_color = cv2.pyrDown(cv2.pyrDown(img)) 42 | for _ in range(6): 43 | img_color = cv2.bilateralFilter(img_color, 9, 9, 7) 44 | img_color = cv2.pyrUp(cv2.pyrUp(img_color)) 45 | 46 | # prepare edges 47 | img_edges = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) 48 | img_edges = cv2.adaptiveThreshold( 49 | cv2.medianBlur(img_edges, 7), 50 | 255, 51 | cv2.ADAPTIVE_THRESH_MEAN_C, 52 | cv2.THRESH_BINARY, 53 | 9, 54 | 2, 55 | ) 56 | img_edges = cv2.cvtColor(img_edges, cv2.COLOR_GRAY2RGB) 57 | 58 | # combine color and edges 59 | img = cv2.bitwise_and(img_color, img_edges) 60 | elif self.type == "edges": 61 | # perform edge detection 62 | img = cv2.cvtColor(cv2.Canny(img, 100, 200), cv2.COLOR_GRAY2BGR) 63 | elif self.type == "rotate": 64 | # rotate image 65 | rows, cols, _ = img.shape 66 | M = cv2.getRotationMatrix2D((cols / 2, rows / 2), frame.time * 45, 1) 67 | img = cv2.warpAffine(img, M, (cols, rows)) 68 | 69 | return av.VideoFrame.from_ndarray(img, format="bgr24") 70 | 71 | 72 | def mixer_callback(frames: List[av.VideoFrame]) -> av.VideoFrame: 73 | buf_w = 640 74 | buf_h = 480 75 | buffer = np.zeros((buf_h, buf_w, 3), dtype=np.uint8) 76 | 77 | n_inputs = len(frames) 78 | 79 | n_cols = math.ceil(math.sqrt(n_inputs)) 80 | n_rows = math.ceil(n_inputs / n_cols) 81 | grid_w = buf_w // n_cols 82 | grid_h = buf_h // n_rows 83 | 84 | for i in range(n_inputs): 85 | frame = frames[i] 86 | if frame is None: 87 | continue 88 | 89 | grid_x = (i % n_cols) * grid_w 90 | grid_y = (i // n_cols) * grid_h 91 | 92 | img = frame.to_ndarray(format="bgr24") 93 | src_h, src_w = img.shape[0:2] 94 | 95 | aspect_ratio = src_w / src_h 96 | 97 | window_w = min(grid_w, int(grid_h * aspect_ratio)) 98 | window_h = min(grid_h, int(window_w / aspect_ratio)) 99 | 100 | window_offset_x = (grid_w - window_w) // 2 101 | window_offset_y = (grid_h - window_h) // 2 102 | 103 | window_x0 = grid_x + window_offset_x 104 | window_y0 = grid_y + window_offset_y 105 | window_x1 = window_x0 + window_w 106 | window_y1 = window_y0 + window_h 107 | 108 | buffer[window_y0:window_y1, window_x0:window_x1, :] = cv2.resize( 109 | img, (window_w, window_h) 110 | ) 111 | 112 | new_frame = av.VideoFrame.from_ndarray(buffer, format="bgr24") 113 | 114 | return new_frame 115 | 116 | 117 | def main() -> None: 118 | with server_state_lock["webrtc_contexts"]: 119 | if "webrtc_contexts" not in server_state: 120 | server_state["webrtc_contexts"] = [] 121 | 122 | with server_state_lock["mix_track"]: 123 | if "mix_track" not in server_state: 124 | server_state["mix_track"] = create_mix_track( 125 | kind="video", mixer_callback=mixer_callback, key="mix" 126 | ) 127 | 128 | mix_track = server_state["mix_track"] 129 | 130 | self_ctx = webrtc_streamer( 131 | key="self", 132 | mode=WebRtcMode.SENDRECV, 133 | media_stream_constraints={"video": True, "audio": True}, 134 | source_video_track=mix_track, 135 | sendback_audio=False, 136 | ) 137 | 138 | self_process_track = None 139 | if self_ctx.input_video_track: 140 | self_process_track = create_process_track( 141 | input_track=self_ctx.input_video_track, 142 | processor_factory=OpenCVVideoProcessor, 143 | ) 144 | mix_track.add_input_track(self_process_track) 145 | 146 | self_process_track.processor.type = ( 147 | st.radio( 148 | "Select transform type", 149 | ("noop", "cartoon", "edges", "rotate"), 150 | key="filter1-type", 151 | ) 152 | or "noop" 153 | ) 154 | 155 | with server_state_lock["webrtc_contexts"]: 156 | webrtc_contexts: List[WebRtcStreamerContext] = server_state["webrtc_contexts"] 157 | self_is_playing = self_ctx.state.playing and self_process_track 158 | if self_is_playing and self_ctx not in webrtc_contexts: 159 | webrtc_contexts.append(self_ctx) 160 | server_state["webrtc_contexts"] = webrtc_contexts 161 | elif not self_is_playing and self_ctx in webrtc_contexts: 162 | webrtc_contexts.remove(self_ctx) 163 | server_state["webrtc_contexts"] = webrtc_contexts 164 | 165 | if self_ctx.state.playing: 166 | # Audio streams are transferred in SFU manner 167 | # TODO: Create MCU to mix audio streams 168 | for ctx in webrtc_contexts: 169 | if ctx == self_ctx or not ctx.state.playing: 170 | continue 171 | webrtc_streamer( 172 | key=f"sound-{id(ctx)}", 173 | mode=WebRtcMode.RECVONLY, 174 | media_stream_constraints={"video": False, "audio": True}, 175 | source_audio_track=ctx.input_audio_track, 176 | desired_playing_state=ctx.state.playing, 177 | ) 178 | 179 | 180 | if __name__ == "__main__": 181 | import os 182 | 183 | DEBUG = os.environ.get("DEBUG", "false").lower() not in ["false", "no", "0"] 184 | 185 | logging.basicConfig( 186 | format="[%(asctime)s] %(levelname)7s from %(name)s in %(pathname)s:%(lineno)d: " 187 | "%(message)s", 188 | force=True, 189 | ) 190 | 191 | logger.setLevel(level=logging.DEBUG if DEBUG else logging.INFO) 192 | 193 | st_webrtc_logger = logging.getLogger("streamlit_webrtc") 194 | st_webrtc_logger.setLevel(logging.DEBUG if DEBUG else logging.INFO) 195 | 196 | aioice_logger = logging.getLogger("aioice") 197 | aioice_logger.setLevel(logging.WARNING) 198 | 199 | fsevents_logger = logging.getLogger("fsevents") 200 | fsevents_logger.setLevel(logging.WARNING) 201 | 202 | main() 203 | -------------------------------------------------------------------------------- /docs/images/streamlit_webrtc_basic.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/whitphx/streamlit-webrtc/e129c13f8f16034a2cd0d19b89da3496a2893983/docs/images/streamlit_webrtc_basic.gif -------------------------------------------------------------------------------- /docs/images/streamlit_webrtc_flipped.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/whitphx/streamlit-webrtc/e129c13f8f16034a2cd0d19b89da3496a2893983/docs/images/streamlit_webrtc_flipped.gif -------------------------------------------------------------------------------- /home.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import streamlit as st 4 | 5 | logger = logging.getLogger() 6 | 7 | st.title("streamlit-webrtc demo!") 8 | st.info( 9 | """👈 Select the demo 10 | """ 11 | ) 12 | 13 | 14 | if __name__ == "__main__": 15 | import os 16 | 17 | DEBUG = os.environ.get("DEBUG", "false").lower() not in ["false", "no", "0"] 18 | 19 | logging.basicConfig( 20 | format="[%(asctime)s] %(levelname)7s from %(name)s in %(pathname)s:%(lineno)d: " 21 | "%(message)s", 22 | level=logging.DEBUG if DEBUG else logging.INFO, 23 | force=True, 24 | ) 25 | 26 | fsevents_logger = logging.getLogger("fsevents") 27 | fsevents_logger.setLevel(logging.WARNING) 28 | 29 | aiortc_logger = logging.getLogger("aiortc") 30 | aiortc_logger.setLevel(logging.INFO) 31 | 32 | aioice_logger = logging.getLogger("aioice") 33 | aioice_logger.setLevel(logging.INFO) 34 | 35 | if DEBUG: 36 | os.environ["PYTHONASYNCIODEBUG"] = "1" 37 | -------------------------------------------------------------------------------- /mypy.ini: -------------------------------------------------------------------------------- 1 | [mypy] 2 | python_version = 3.9 3 | ignore_missing_imports = True 4 | -------------------------------------------------------------------------------- /pages/10_sendonly_audio.py: -------------------------------------------------------------------------------- 1 | """A sample to use WebRTC in sendonly mode to transfer audio frames 2 | from the browser to the server and visualize them with matplotlib 3 | and `st.pyplot`.""" 4 | 5 | import logging 6 | import queue 7 | 8 | import matplotlib.pyplot as plt 9 | import numpy as np 10 | import pydub 11 | import streamlit as st 12 | from streamlit_webrtc import WebRtcMode, webrtc_streamer 13 | 14 | logger = logging.getLogger(__name__) 15 | 16 | 17 | webrtc_ctx = webrtc_streamer( 18 | key="sendonly-audio", 19 | mode=WebRtcMode.SENDONLY, 20 | audio_receiver_size=256, 21 | media_stream_constraints={"audio": True}, 22 | ) 23 | 24 | fig_place = st.empty() 25 | 26 | fig, [ax_time, ax_freq] = plt.subplots(2, 1, gridspec_kw={"top": 1.5, "bottom": 0.2}) 27 | 28 | sound_window_len = 5000 # 5s 29 | sound_window_buffer = None 30 | while True: 31 | if webrtc_ctx.audio_receiver: 32 | try: 33 | audio_frames = webrtc_ctx.audio_receiver.get_frames(timeout=1) 34 | except queue.Empty: 35 | logger.warning("Queue is empty. Abort.") 36 | break 37 | 38 | sound_chunk = pydub.AudioSegment.empty() 39 | for audio_frame in audio_frames: 40 | sound = pydub.AudioSegment( 41 | data=audio_frame.to_ndarray().tobytes(), 42 | sample_width=audio_frame.format.bytes, 43 | frame_rate=audio_frame.sample_rate, 44 | channels=len(audio_frame.layout.channels), 45 | ) 46 | sound_chunk += sound 47 | 48 | if len(sound_chunk) > 0: 49 | if sound_window_buffer is None: 50 | sound_window_buffer = pydub.AudioSegment.silent( 51 | duration=sound_window_len 52 | ) 53 | 54 | sound_window_buffer += sound_chunk 55 | if len(sound_window_buffer) > sound_window_len: 56 | sound_window_buffer = sound_window_buffer[-sound_window_len:] 57 | 58 | if sound_window_buffer: 59 | # Ref: https://own-search-and-study.xyz/2017/10/27/python%E3%82%92%E4%BD%BF%E3%81%A3%E3%81%A6%E9%9F%B3%E5%A3%B0%E3%83%87%E3%83%BC%E3%82%BF%E3%81%8B%E3%82%89%E3%82%B9%E3%83%9A%E3%82%AF%E3%83%88%E3%83%AD%E3%82%B0%E3%83%A9%E3%83%A0%E3%82%92%E4%BD%9C/ # noqa 60 | sound_window_buffer = sound_window_buffer.set_channels(1) # Stereo to mono 61 | sample = np.array(sound_window_buffer.get_array_of_samples()) 62 | 63 | ax_time.cla() 64 | times = (np.arange(-len(sample), 0)) / sound_window_buffer.frame_rate 65 | ax_time.plot(times, sample) 66 | ax_time.set_xlabel("Time") 67 | ax_time.set_ylabel("Magnitude") 68 | 69 | spec = np.fft.fft(sample) 70 | freq = np.fft.fftfreq(sample.shape[0], 1.0 / sound_chunk.frame_rate) 71 | freq = freq[: int(freq.shape[0] / 2)] 72 | spec = spec[: int(spec.shape[0] / 2)] 73 | spec[0] = spec[0] / 2 74 | 75 | ax_freq.cla() 76 | ax_freq.plot(freq, np.abs(spec)) 77 | ax_freq.set_xlabel("Frequency") 78 | ax_freq.set_yscale("log") 79 | ax_freq.set_ylabel("Magnitude") 80 | 81 | fig_place.pyplot(fig) 82 | else: 83 | logger.warning("AudioReciver is not set. Abort.") 84 | break 85 | -------------------------------------------------------------------------------- /pages/11_programatic_control_playing.py: -------------------------------------------------------------------------------- 1 | """A sample of controlling the playing state from Python.""" 2 | 3 | import streamlit as st 4 | from streamlit_webrtc import WebRtcMode, webrtc_streamer 5 | 6 | playing = st.checkbox("Playing", value=True) 7 | 8 | webrtc_streamer( 9 | key="programatic_control", 10 | desired_playing_state=playing, 11 | mode=WebRtcMode.SENDRECV, 12 | ) 13 | -------------------------------------------------------------------------------- /pages/12_media_constraints_configs.py: -------------------------------------------------------------------------------- 1 | """A sample to configure MediaStreamConstraints object""" 2 | 3 | import streamlit as st 4 | from streamlit_webrtc import WebRtcMode, webrtc_streamer 5 | 6 | frame_rate = 5 7 | webrtc_streamer( 8 | key="media-constraints", 9 | mode=WebRtcMode.SENDRECV, 10 | media_stream_constraints={ 11 | "video": {"frameRate": {"ideal": frame_rate}}, 12 | }, 13 | video_html_attrs={ 14 | "style": {"width": "50%", "margin": "0 auto", "border": "5px yellow solid"}, 15 | "controls": False, 16 | "autoPlay": True, 17 | }, 18 | ) 19 | st.write(f"The frame rate is set as {frame_rate}. Video style is changed.") 20 | -------------------------------------------------------------------------------- /pages/13_ui_texts_customization.py: -------------------------------------------------------------------------------- 1 | from streamlit_webrtc import webrtc_streamer 2 | 3 | webrtc_streamer( 4 | key="custom_ui_texts", 5 | translations={ 6 | "start": "開始", 7 | "stop": "停止", 8 | "select_device": "デバイス選択", 9 | "media_api_not_available": "Media APIが利用できない環境です", 10 | "device_ask_permission": "メディアデバイスへのアクセスを許可してください", 11 | "device_not_available": "メディアデバイスを利用できません", 12 | "device_access_denied": "メディアデバイスへのアクセスが拒否されました", 13 | }, 14 | ) 15 | -------------------------------------------------------------------------------- /pages/14_programmable_source.py: -------------------------------------------------------------------------------- 1 | import fractions 2 | import time 3 | 4 | import av 5 | import cv2 6 | import numpy as np 7 | import streamlit as st 8 | from streamlit_webrtc import WebRtcMode, create_video_source_track, webrtc_streamer 9 | 10 | thickness = st.slider("thickness", 1, 10, 3, 1) 11 | 12 | 13 | def video_source_callback(pts: int, time_base: fractions.Fraction) -> av.VideoFrame: 14 | pts_sec = pts * time_base 15 | 16 | buffer = np.zeros((480, 640, 3), dtype=np.uint8) 17 | buffer = cv2.putText( 18 | buffer, 19 | text=f"time: {time.time():.2f}", 20 | org=(0, 32), 21 | fontFace=cv2.FONT_HERSHEY_SIMPLEX, 22 | fontScale=1.0, 23 | color=(255, 255, 0), 24 | thickness=thickness, 25 | lineType=cv2.LINE_4, 26 | ) 27 | buffer = cv2.putText( 28 | buffer, 29 | text=f"pts: {pts} ({float(pts_sec):.2f} sec)", 30 | org=(0, 64), 31 | fontFace=cv2.FONT_HERSHEY_SIMPLEX, 32 | fontScale=1.0, 33 | color=(255, 255, 0), 34 | thickness=thickness, 35 | lineType=cv2.LINE_4, 36 | ) 37 | return av.VideoFrame.from_ndarray(buffer, format="bgr24") 38 | 39 | 40 | fps = st.slider("fps", 1, 30, 30, 1) 41 | 42 | 43 | video_source_track = create_video_source_track( 44 | video_source_callback, key="video_source_track", fps=fps 45 | ) 46 | 47 | 48 | def on_change(): 49 | ctx = st.session_state["player"] 50 | stopped = not ctx.state.playing and not ctx.state.signalling 51 | if stopped: 52 | video_source_track.stop() # Manually stop the track. 53 | 54 | 55 | webrtc_streamer( 56 | key="player", 57 | mode=WebRtcMode.RECVONLY, 58 | source_video_track=video_source_track, 59 | media_stream_constraints={"video": True, "audio": False}, 60 | on_change=on_change, 61 | ) 62 | -------------------------------------------------------------------------------- /pages/1_object_detection.py: -------------------------------------------------------------------------------- 1 | """Object detection demo with MobileNet SSD. 2 | This model and code are based on 3 | https://github.com/robmarkcole/object-detection-app 4 | """ 5 | 6 | import logging 7 | import queue 8 | from pathlib import Path 9 | from typing import List, NamedTuple 10 | 11 | import av 12 | import cv2 13 | import numpy as np 14 | import streamlit as st 15 | from streamlit_session_memo import st_session_memo 16 | from streamlit_webrtc import ( 17 | WebRtcMode, 18 | webrtc_streamer, 19 | __version__ as st_webrtc_version, 20 | ) 21 | import aiortc 22 | 23 | from sample_utils.download import download_file 24 | 25 | HERE = Path(__file__).parent 26 | ROOT = HERE.parent 27 | 28 | logger = logging.getLogger(__name__) 29 | 30 | 31 | MODEL_URL = "https://github.com/robmarkcole/object-detection-app/raw/master/model/MobileNetSSD_deploy.caffemodel" # noqa: E501 32 | MODEL_LOCAL_PATH = ROOT / "./models/MobileNetSSD_deploy.caffemodel" 33 | PROTOTXT_URL = "https://github.com/robmarkcole/object-detection-app/raw/master/model/MobileNetSSD_deploy.prototxt.txt" # noqa: E501 34 | PROTOTXT_LOCAL_PATH = ROOT / "./models/MobileNetSSD_deploy.prototxt.txt" 35 | 36 | CLASSES = [ 37 | "background", 38 | "aeroplane", 39 | "bicycle", 40 | "bird", 41 | "boat", 42 | "bottle", 43 | "bus", 44 | "car", 45 | "cat", 46 | "chair", 47 | "cow", 48 | "diningtable", 49 | "dog", 50 | "horse", 51 | "motorbike", 52 | "person", 53 | "pottedplant", 54 | "sheep", 55 | "sofa", 56 | "train", 57 | "tvmonitor", 58 | ] 59 | 60 | 61 | class Detection(NamedTuple): 62 | class_id: int 63 | label: str 64 | score: float 65 | box: np.ndarray 66 | 67 | 68 | @st.cache_resource # type: ignore 69 | def generate_label_colors(): 70 | return np.random.uniform(0, 255, size=(len(CLASSES), 3)) 71 | 72 | 73 | COLORS = generate_label_colors() 74 | 75 | download_file(MODEL_URL, MODEL_LOCAL_PATH, expected_size=23147564) 76 | download_file(PROTOTXT_URL, PROTOTXT_LOCAL_PATH, expected_size=29353) 77 | 78 | 79 | @st_session_memo 80 | def get_model(): 81 | return cv2.dnn.readNetFromCaffe(str(PROTOTXT_LOCAL_PATH), str(MODEL_LOCAL_PATH)) 82 | 83 | 84 | net = get_model() 85 | 86 | score_threshold = st.slider("Score threshold", 0.0, 1.0, 0.5, 0.05) 87 | 88 | # NOTE: The callback will be called in another thread, 89 | # so use a queue here for thread-safety to pass the data 90 | # from inside to outside the callback. 91 | # TODO: A general-purpose shared state object may be more useful. 92 | result_queue: "queue.Queue[List[Detection]]" = queue.Queue() 93 | 94 | 95 | def video_frame_callback(frame: av.VideoFrame) -> av.VideoFrame: 96 | image = frame.to_ndarray(format="bgr24") 97 | 98 | # Run inference 99 | blob = cv2.dnn.blobFromImage( 100 | image=cv2.resize(image, (300, 300)), 101 | scalefactor=0.007843, 102 | size=(300, 300), 103 | mean=(127.5, 127.5, 127.5), 104 | ) 105 | net.setInput(blob) 106 | output = net.forward() 107 | 108 | h, w = image.shape[:2] 109 | 110 | # Convert the output array into a structured form. 111 | output = output.squeeze() # (1, 1, N, 7) -> (N, 7) 112 | output = output[output[:, 2] >= score_threshold] 113 | detections = [ 114 | Detection( 115 | class_id=int(detection[1]), 116 | label=CLASSES[int(detection[1])], 117 | score=float(detection[2]), 118 | box=(detection[3:7] * np.array([w, h, w, h])), 119 | ) 120 | for detection in output 121 | ] 122 | 123 | # Render bounding boxes and captions 124 | for detection in detections: 125 | caption = f"{detection.label}: {round(detection.score * 100, 2)}%" 126 | color = COLORS[detection.class_id] 127 | xmin, ymin, xmax, ymax = detection.box.astype("int") 128 | 129 | cv2.rectangle(image, (xmin, ymin), (xmax, ymax), color, 2) 130 | cv2.putText( 131 | image, 132 | caption, 133 | (xmin, ymin - 15 if ymin - 15 > 15 else ymin + 15), 134 | cv2.FONT_HERSHEY_SIMPLEX, 135 | 0.5, 136 | color, 137 | 2, 138 | ) 139 | 140 | result_queue.put(detections) 141 | 142 | return av.VideoFrame.from_ndarray(image, format="bgr24") 143 | 144 | 145 | webrtc_ctx = webrtc_streamer( 146 | key="object-detection", 147 | mode=WebRtcMode.SENDRECV, 148 | video_frame_callback=video_frame_callback, 149 | media_stream_constraints={"video": True, "audio": False}, 150 | async_processing=True, 151 | ) 152 | 153 | if st.checkbox("Show the detected labels", value=True): 154 | if webrtc_ctx.state.playing: 155 | labels_placeholder = st.empty() 156 | # NOTE: The video transformation with object detection and 157 | # this loop displaying the result labels are running 158 | # in different threads asynchronously. 159 | # Then the rendered video frames and the labels displayed here 160 | # are not strictly synchronized. 161 | while True: 162 | result = result_queue.get() 163 | labels_placeholder.table(result) 164 | 165 | st.markdown( 166 | "This demo uses a model and code from " 167 | "https://github.com/robmarkcole/object-detection-app. " 168 | "Many thanks to the project." 169 | ) 170 | 171 | st.markdown( 172 | f"Streamlit version: {st.__version__} \n" 173 | f"Streamlit-WebRTC version: {st_webrtc_version} \n" 174 | f"aiortc version: {aiortc.__version__} \n" 175 | ) 176 | -------------------------------------------------------------------------------- /pages/2_opencv_filters.py: -------------------------------------------------------------------------------- 1 | """Video transforms with OpenCV""" 2 | 3 | import av 4 | import cv2 5 | import streamlit as st 6 | from streamlit_webrtc import WebRtcMode, webrtc_streamer 7 | 8 | _type = st.radio("Select transform type", ("noop", "cartoon", "edges", "rotate")) 9 | 10 | 11 | def callback(frame: av.VideoFrame) -> av.VideoFrame: 12 | img = frame.to_ndarray(format="bgr24") 13 | 14 | if _type == "noop": 15 | pass 16 | elif _type == "cartoon": 17 | # prepare color 18 | img_color = cv2.pyrDown(cv2.pyrDown(img)) 19 | for _ in range(6): 20 | img_color = cv2.bilateralFilter(img_color, 9, 9, 7) 21 | img_color = cv2.pyrUp(cv2.pyrUp(img_color)) 22 | 23 | # prepare edges 24 | img_edges = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) 25 | img_edges = cv2.adaptiveThreshold( 26 | cv2.medianBlur(img_edges, 7), 27 | 255, 28 | cv2.ADAPTIVE_THRESH_MEAN_C, 29 | cv2.THRESH_BINARY, 30 | 9, 31 | 2, 32 | ) 33 | img_edges = cv2.cvtColor(img_edges, cv2.COLOR_GRAY2RGB) 34 | 35 | # combine color and edges 36 | img = cv2.bitwise_and(img_color, img_edges) 37 | elif _type == "edges": 38 | # perform edge detection 39 | img = cv2.cvtColor(cv2.Canny(img, 100, 200), cv2.COLOR_GRAY2BGR) 40 | elif _type == "rotate": 41 | # rotate image 42 | rows, cols, _ = img.shape 43 | M = cv2.getRotationMatrix2D((cols / 2, rows / 2), frame.time * 45, 1) 44 | img = cv2.warpAffine(img, M, (cols, rows)) 45 | 46 | return av.VideoFrame.from_ndarray(img, format="bgr24") 47 | 48 | 49 | webrtc_streamer( 50 | key="opencv-filter", 51 | mode=WebRtcMode.SENDRECV, 52 | video_frame_callback=callback, 53 | media_stream_constraints={"video": True, "audio": False}, 54 | async_processing=True, 55 | ) 56 | 57 | st.markdown( 58 | "This demo is based on " 59 | "https://github.com/aiortc/aiortc/blob/2362e6d1f0c730a0f8c387bbea76546775ad2fe8/examples/server/server.py#L34. " # noqa: E501 60 | "Many thanks to the project." 61 | ) 62 | -------------------------------------------------------------------------------- /pages/3_audio_filter.py: -------------------------------------------------------------------------------- 1 | import av 2 | import numpy as np 3 | import pydub 4 | import streamlit as st 5 | from streamlit_webrtc import WebRtcMode, webrtc_streamer 6 | 7 | gain = st.slider("Gain", -10.0, +20.0, 1.0, 0.05) 8 | 9 | 10 | def process_audio(frame: av.AudioFrame) -> av.AudioFrame: 11 | raw_samples = frame.to_ndarray() 12 | sound = pydub.AudioSegment( 13 | data=raw_samples.tobytes(), 14 | sample_width=frame.format.bytes, 15 | frame_rate=frame.sample_rate, 16 | channels=len(frame.layout.channels), 17 | ) 18 | 19 | sound = sound.apply_gain(gain) 20 | 21 | # Ref: https://github.com/jiaaro/pydub/blob/master/API.markdown#audiosegmentget_array_of_samples # noqa 22 | channel_sounds = sound.split_to_mono() 23 | channel_samples = [s.get_array_of_samples() for s in channel_sounds] 24 | new_samples: np.ndarray = np.array(channel_samples).T 25 | new_samples = new_samples.reshape(raw_samples.shape) 26 | 27 | new_frame = av.AudioFrame.from_ndarray(new_samples, layout=frame.layout.name) 28 | new_frame.sample_rate = frame.sample_rate 29 | return new_frame 30 | 31 | 32 | webrtc_streamer( 33 | key="audio-filter", 34 | mode=WebRtcMode.SENDRECV, 35 | audio_frame_callback=process_audio, 36 | async_processing=True, 37 | ) 38 | -------------------------------------------------------------------------------- /pages/4_delayed_echo.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | from typing import List 4 | 5 | import av 6 | import streamlit as st 7 | from streamlit_webrtc import WebRtcMode, webrtc_streamer 8 | 9 | logger = logging.getLogger(__name__) 10 | 11 | 12 | delay = st.slider("Delay", 0.0, 5.0, 1.0, 0.05) 13 | 14 | 15 | async def queued_video_frames_callback( 16 | frames: List[av.VideoFrame], 17 | ) -> List[av.VideoFrame]: 18 | logger.debug("Delay: %f", delay) 19 | # A standalone `await ...` is interpreted as an expression and 20 | # the Streamlit magic's target, which leads implicit calls of `st.write`. 21 | # To prevent it, fix it as `_ = await ...`, a statement. 22 | # See https://discuss.streamlit.io/t/issue-with-asyncio-run-in-streamlit/7745/15 23 | await asyncio.sleep(delay) 24 | return frames 25 | 26 | 27 | async def queued_audio_frames_callback( 28 | frames: List[av.AudioFrame], 29 | ) -> List[av.AudioFrame]: 30 | await asyncio.sleep(delay) 31 | return frames 32 | 33 | 34 | webrtc_streamer( 35 | key="delay", 36 | mode=WebRtcMode.SENDRECV, 37 | queued_video_frames_callback=queued_video_frames_callback, 38 | queued_audio_frames_callback=queued_audio_frames_callback, 39 | async_processing=True, 40 | ) 41 | -------------------------------------------------------------------------------- /pages/5_fork_multi_outputs.py: -------------------------------------------------------------------------------- 1 | try: 2 | from typing import Literal 3 | except ImportError: 4 | from typing_extensions import Literal # type: ignore 5 | 6 | from typing import cast 7 | 8 | import av 9 | import cv2 10 | import streamlit as st 11 | from streamlit_webrtc import WebRtcMode, webrtc_streamer 12 | 13 | st.markdown( 14 | """ 15 | Fork one input to multiple outputs with different video filters. 16 | """ 17 | ) 18 | 19 | VideoFilterType = Literal["noop", "cartoon", "edges", "rotate"] 20 | 21 | 22 | def make_video_frame_callback(_type: VideoFilterType): 23 | def callback(frame: av.VideoFrame) -> av.VideoFrame: 24 | img = frame.to_ndarray(format="bgr24") 25 | 26 | if _type == "noop": 27 | pass 28 | elif _type == "cartoon": 29 | # prepare color 30 | img_color = cv2.pyrDown(cv2.pyrDown(img)) 31 | for _ in range(6): 32 | img_color = cv2.bilateralFilter(img_color, 9, 9, 7) 33 | img_color = cv2.pyrUp(cv2.pyrUp(img_color)) 34 | 35 | # prepare edges 36 | img_edges = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) 37 | img_edges = cv2.adaptiveThreshold( 38 | cv2.medianBlur(img_edges, 7), 39 | 255, 40 | cv2.ADAPTIVE_THRESH_MEAN_C, 41 | cv2.THRESH_BINARY, 42 | 9, 43 | 2, 44 | ) 45 | img_edges = cv2.cvtColor(img_edges, cv2.COLOR_GRAY2RGB) 46 | 47 | # combine color and edges 48 | img = cv2.bitwise_and(img_color, img_edges) 49 | elif _type == "edges": 50 | # perform edge detection 51 | img = cv2.cvtColor(cv2.Canny(img, 100, 200), cv2.COLOR_GRAY2BGR) 52 | elif _type == "rotate": 53 | # rotate image 54 | rows, cols, _ = img.shape 55 | M = cv2.getRotationMatrix2D((cols / 2, rows / 2), frame.time * 45, 1) 56 | img = cv2.warpAffine(img, M, (cols, rows)) 57 | 58 | return av.VideoFrame.from_ndarray(img, format="bgr24") 59 | 60 | return callback 61 | 62 | 63 | st.header("Input") 64 | ctx = webrtc_streamer( 65 | key="loopback", 66 | mode=WebRtcMode.SENDRECV, 67 | media_stream_constraints={"video": True, "audio": False}, 68 | ) 69 | 70 | st.header("Forked output 1") 71 | filter1_type = st.radio( 72 | "Select transform type", 73 | ("noop", "cartoon", "edges", "rotate"), 74 | key="fork-filter1-type", 75 | ) 76 | callback = make_video_frame_callback(cast(VideoFilterType, filter1_type)) 77 | webrtc_streamer( 78 | key="filter1", 79 | mode=WebRtcMode.RECVONLY, 80 | video_frame_callback=callback, 81 | source_video_track=ctx.output_video_track, 82 | desired_playing_state=ctx.state.playing, 83 | media_stream_constraints={"video": True, "audio": False}, 84 | ) 85 | 86 | st.header("Forked output 2") 87 | filter2_type = st.radio( 88 | "Select transform type", 89 | ("noop", "cartoon", "edges", "rotate"), 90 | key="fork-filter2-type", 91 | ) 92 | callback = make_video_frame_callback(cast(VideoFilterType, filter2_type)) 93 | webrtc_streamer( 94 | key="filter2", 95 | mode=WebRtcMode.RECVONLY, 96 | video_frame_callback=callback, 97 | source_video_track=ctx.output_video_track, 98 | desired_playing_state=ctx.state.playing, 99 | media_stream_constraints={"video": True, "audio": False}, 100 | ) 101 | -------------------------------------------------------------------------------- /pages/6_mix_multi_inputs.py: -------------------------------------------------------------------------------- 1 | import math 2 | from typing import List 3 | 4 | try: 5 | from typing import Literal, cast 6 | except ImportError: 7 | from typing_extensions import Literal # type: ignore 8 | 9 | import av 10 | import cv2 11 | import numpy as np 12 | import streamlit as st 13 | from streamlit_webrtc import ( 14 | MediaStreamMixTrack, 15 | WebRtcMode, 16 | create_mix_track, 17 | create_process_track, 18 | webrtc_streamer, 19 | ) 20 | 21 | st.markdown( 22 | """ 23 | Mix multiple inputs with different video filters into one stream. 24 | """ 25 | ) 26 | 27 | VideoFilterType = Literal["noop", "cartoon", "edges", "rotate"] 28 | 29 | 30 | def make_video_frame_callback(_type: VideoFilterType): 31 | def callback(frame: av.VideoFrame) -> av.VideoFrame: 32 | img = frame.to_ndarray(format="bgr24") 33 | 34 | if _type == "noop": 35 | pass 36 | elif _type == "cartoon": 37 | # prepare color 38 | img_color = cv2.pyrDown(cv2.pyrDown(img)) 39 | for _ in range(6): 40 | img_color = cv2.bilateralFilter(img_color, 9, 9, 7) 41 | img_color = cv2.pyrUp(cv2.pyrUp(img_color)) 42 | 43 | # prepare edges 44 | img_edges = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) 45 | img_edges = cv2.adaptiveThreshold( 46 | cv2.medianBlur(img_edges, 7), 47 | 255, 48 | cv2.ADAPTIVE_THRESH_MEAN_C, 49 | cv2.THRESH_BINARY, 50 | 9, 51 | 2, 52 | ) 53 | img_edges = cv2.cvtColor(img_edges, cv2.COLOR_GRAY2RGB) 54 | 55 | # combine color and edges 56 | img = cv2.bitwise_and(img_color, img_edges) 57 | elif _type == "edges": 58 | # perform edge detection 59 | img = cv2.cvtColor(cv2.Canny(img, 100, 200), cv2.COLOR_GRAY2BGR) 60 | elif _type == "rotate": 61 | # rotate image 62 | rows, cols, _ = img.shape 63 | M = cv2.getRotationMatrix2D((cols / 2, rows / 2), frame.time * 45, 1) 64 | img = cv2.warpAffine(img, M, (cols, rows)) 65 | 66 | return av.VideoFrame.from_ndarray(img, format="bgr24") 67 | 68 | return callback 69 | 70 | 71 | def mixer_callback(frames: List[av.VideoFrame]) -> av.VideoFrame: 72 | buf_w = 640 73 | buf_h = 480 74 | buffer = np.zeros((buf_h, buf_w, 3), dtype=np.uint8) 75 | 76 | n_inputs = len(frames) 77 | 78 | n_cols = math.ceil(math.sqrt(n_inputs)) 79 | n_rows = math.ceil(n_inputs / n_cols) 80 | grid_w = buf_w // n_cols 81 | grid_h = buf_h // n_rows 82 | 83 | for i in range(n_inputs): 84 | frame = frames[i] 85 | if frame is None: 86 | continue 87 | 88 | grid_x = (i % n_cols) * grid_w 89 | grid_y = (i // n_cols) * grid_h 90 | 91 | img = frame.to_ndarray(format="bgr24") 92 | src_h, src_w = img.shape[0:2] 93 | 94 | aspect_ratio = src_w / src_h 95 | 96 | window_w = min(grid_w, int(grid_h * aspect_ratio)) 97 | window_h = min(grid_h, int(window_w / aspect_ratio)) 98 | 99 | window_offset_x = (grid_w - window_w) // 2 100 | window_offset_y = (grid_h - window_h) // 2 101 | 102 | window_x0 = grid_x + window_offset_x 103 | window_y0 = grid_y + window_offset_y 104 | window_x1 = window_x0 + window_w 105 | window_y1 = window_y0 + window_h 106 | 107 | buffer[window_y0:window_y1, window_x0:window_x1, :] = cv2.resize( 108 | img, (window_w, window_h) 109 | ) 110 | 111 | new_frame = av.VideoFrame.from_ndarray(buffer, format="bgr24") 112 | 113 | return new_frame 114 | 115 | 116 | st.header("Input 1") 117 | input1_ctx = webrtc_streamer( 118 | key="input1_ctx", 119 | mode=WebRtcMode.SENDRECV, 120 | media_stream_constraints={"video": True, "audio": False}, 121 | ) 122 | filter1_type = st.radio( 123 | "Select transform type", 124 | ("noop", "cartoon", "edges", "rotate"), 125 | key="mix-filter1-type", 126 | ) 127 | callback = make_video_frame_callback(cast(VideoFilterType, filter1_type)) 128 | input1_video_process_track = None 129 | if input1_ctx.output_video_track: 130 | input1_video_process_track = create_process_track( 131 | input_track=input1_ctx.output_video_track, 132 | frame_callback=callback, 133 | ) 134 | 135 | st.header("Input 2") 136 | input2_ctx = webrtc_streamer( 137 | key="input2_ctx", 138 | mode=WebRtcMode.SENDRECV, 139 | media_stream_constraints={"video": True, "audio": False}, 140 | ) 141 | filter2_type = st.radio( 142 | "Select transform type", 143 | ("noop", "cartoon", "edges", "rotate"), 144 | key="mix-filter2-type", 145 | ) 146 | callback = make_video_frame_callback(cast(VideoFilterType, filter2_type)) 147 | input2_video_process_track = None 148 | if input2_ctx.output_video_track: 149 | input2_video_process_track = create_process_track( 150 | input_track=input2_ctx.output_video_track, frame_callback=callback 151 | ) 152 | 153 | st.header("Input 3 (no filter)") 154 | input3_ctx = webrtc_streamer( 155 | key="input3_ctx", 156 | mode=WebRtcMode.SENDRECV, 157 | media_stream_constraints={"video": True, "audio": False}, 158 | ) 159 | 160 | st.header("Mixed output") 161 | mix_track = create_mix_track(kind="video", mixer_callback=mixer_callback, key="mix") 162 | mix_ctx = webrtc_streamer( 163 | key="mix", 164 | mode=WebRtcMode.RECVONLY, 165 | source_video_track=mix_track, 166 | desired_playing_state=input1_ctx.state.playing 167 | or input2_ctx.state.playing 168 | or input3_ctx.state.playing, 169 | ) 170 | 171 | if mix_ctx.source_video_track and input1_video_process_track: 172 | cast(MediaStreamMixTrack, mix_ctx.source_video_track).add_input_track( 173 | input1_video_process_track 174 | ) 175 | if mix_ctx.source_video_track and input2_video_process_track: 176 | cast(MediaStreamMixTrack, mix_ctx.source_video_track).add_input_track( 177 | input2_video_process_track 178 | ) 179 | if mix_ctx.source_video_track and input3_ctx.output_video_track: 180 | # Input3 is sourced without any filter. 181 | cast(MediaStreamMixTrack, mix_ctx.source_video_track).add_input_track( 182 | input3_ctx.output_video_track 183 | ) 184 | -------------------------------------------------------------------------------- /pages/7_record.py: -------------------------------------------------------------------------------- 1 | import uuid 2 | from pathlib import Path 3 | 4 | import av 5 | import cv2 6 | import streamlit as st 7 | from aiortc.contrib.media import MediaRecorder 8 | from streamlit_webrtc import WebRtcMode, webrtc_streamer 9 | 10 | 11 | def video_frame_callback(frame: av.VideoFrame) -> av.VideoFrame: 12 | img = frame.to_ndarray(format="bgr24") 13 | 14 | # perform edge detection 15 | img = cv2.cvtColor(cv2.Canny(img, 100, 200), cv2.COLOR_GRAY2BGR) 16 | 17 | return av.VideoFrame.from_ndarray(img, format="bgr24") 18 | 19 | 20 | RECORD_DIR = Path("./records") 21 | RECORD_DIR.mkdir(exist_ok=True) 22 | 23 | 24 | def app(): 25 | if "prefix" not in st.session_state: 26 | st.session_state["prefix"] = str(uuid.uuid4()) 27 | prefix = st.session_state["prefix"] 28 | in_file = RECORD_DIR / f"{prefix}_input.flv" 29 | out_file = RECORD_DIR / f"{prefix}_output.flv" 30 | 31 | def in_recorder_factory() -> MediaRecorder: 32 | return MediaRecorder( 33 | str(in_file), format="flv" 34 | ) # HLS does not work. See https://github.com/aiortc/aiortc/issues/331 35 | 36 | def out_recorder_factory() -> MediaRecorder: 37 | return MediaRecorder(str(out_file), format="flv") 38 | 39 | webrtc_streamer( 40 | key="record", 41 | mode=WebRtcMode.SENDRECV, 42 | media_stream_constraints={ 43 | "video": True, 44 | "audio": True, 45 | }, 46 | video_frame_callback=video_frame_callback, 47 | in_recorder_factory=in_recorder_factory, 48 | out_recorder_factory=out_recorder_factory, 49 | ) 50 | 51 | if in_file.exists(): 52 | with in_file.open("rb") as f: 53 | st.download_button( 54 | "Download the recorded video without video filter", f, "input.flv" 55 | ) 56 | if out_file.exists(): 57 | with out_file.open("rb") as f: 58 | st.download_button( 59 | "Download the recorded video with video filter", f, "output.flv" 60 | ) 61 | 62 | 63 | if __name__ == "__main__": 64 | app() 65 | -------------------------------------------------------------------------------- /pages/8_media_files_streaming.py: -------------------------------------------------------------------------------- 1 | """Media streamings""" 2 | 3 | import logging 4 | from pathlib import Path 5 | from typing import Dict, Optional, cast 6 | 7 | import av 8 | import cv2 9 | import streamlit as st 10 | from aiortc.contrib.media import MediaPlayer 11 | from streamlit_webrtc import WebRtcMode, WebRtcStreamerContext, webrtc_streamer 12 | 13 | from sample_utils.download import download_file 14 | 15 | HERE = Path(__file__).parent 16 | ROOT = HERE.parent 17 | 18 | logger = logging.getLogger(__name__) 19 | 20 | 21 | MEDIAFILES: Dict[str, Dict] = { 22 | "big_buck_bunny_720p_2mb.mp4 (local)": { 23 | "url": "https://sample-videos.com/video123/mp4/720/big_buck_bunny_720p_2mb.mp4", # noqa: E501 24 | "local_file_path": ROOT / "data/big_buck_bunny_720p_2mb.mp4", 25 | "type": "video", 26 | }, 27 | "big_buck_bunny_720p_10mb.mp4 (local)": { 28 | "url": "https://sample-videos.com/video123/mp4/720/big_buck_bunny_720p_10mb.mp4", # noqa: E501 29 | "local_file_path": ROOT / "data/big_buck_bunny_720p_10mb.mp4", 30 | "type": "video", 31 | }, 32 | "file_example_MP3_700KB.mp3 (local)": { 33 | "url": "https://file-examples-com.github.io/uploads/2017/11/file_example_MP3_700KB.mp3", # noqa: E501 34 | "local_file_path": ROOT / "data/file_example_MP3_700KB.mp3", 35 | "type": "audio", 36 | }, 37 | "file_example_MP3_5MG.mp3 (local)": { 38 | "url": "https://file-examples-com.github.io/uploads/2017/11/file_example_MP3_5MG.mp3", # noqa: E501 39 | "local_file_path": ROOT / "data/file_example_MP3_5MG.mp3", 40 | "type": "audio", 41 | }, 42 | "rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mov": { 43 | "url": "rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mov", 44 | "type": "video", 45 | }, 46 | } 47 | media_file_label = st.radio("Select a media source to stream", tuple(MEDIAFILES.keys())) 48 | media_file_info = MEDIAFILES[cast(str, media_file_label)] 49 | if "local_file_path" in media_file_info: 50 | download_file(media_file_info["url"], media_file_info["local_file_path"]) 51 | 52 | 53 | def create_player(): 54 | if "local_file_path" in media_file_info: 55 | return MediaPlayer(str(media_file_info["local_file_path"])) 56 | else: 57 | return MediaPlayer(media_file_info["url"]) 58 | 59 | # NOTE: To stream the video from webcam, use the code below. 60 | # return MediaPlayer( 61 | # "1:none", 62 | # format="avfoundation", 63 | # options={"framerate": "30", "video_size": "1280x720"}, 64 | # ) 65 | 66 | 67 | key = f"media-streaming-{media_file_label}" 68 | ctx: Optional[WebRtcStreamerContext] = st.session_state.get(key) 69 | if media_file_info["type"] == "video" and ctx and ctx.state.playing: 70 | _type = st.radio("Select transform type", ("noop", "cartoon", "edges", "rotate")) 71 | else: 72 | _type = "noop" 73 | 74 | 75 | def video_frame_callback(frame: av.VideoFrame) -> av.VideoFrame: 76 | img = frame.to_ndarray(format="bgr24") 77 | 78 | if _type == "noop": 79 | pass 80 | elif _type == "cartoon": 81 | # prepare color 82 | img_color = cv2.pyrDown(cv2.pyrDown(img)) 83 | for _ in range(6): 84 | img_color = cv2.bilateralFilter(img_color, 9, 9, 7) 85 | img_color = cv2.pyrUp(cv2.pyrUp(img_color)) 86 | 87 | # prepare edges 88 | img_edges = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) 89 | img_edges = cv2.adaptiveThreshold( 90 | cv2.medianBlur(img_edges, 7), 91 | 255, 92 | cv2.ADAPTIVE_THRESH_MEAN_C, 93 | cv2.THRESH_BINARY, 94 | 9, 95 | 2, 96 | ) 97 | img_edges = cv2.cvtColor(img_edges, cv2.COLOR_GRAY2RGB) 98 | 99 | # combine color and edges 100 | img = cv2.bitwise_and(img_color, img_edges) 101 | elif _type == "edges": 102 | # perform edge detection 103 | img = cv2.cvtColor(cv2.Canny(img, 100, 200), cv2.COLOR_GRAY2BGR) 104 | elif _type == "rotate": 105 | # rotate image 106 | rows, cols, _ = img.shape 107 | M = cv2.getRotationMatrix2D((cols / 2, rows / 2), frame.time * 45, 1) 108 | img = cv2.warpAffine(img, M, (cols, rows)) 109 | 110 | return av.VideoFrame.from_ndarray(img, format="bgr24") 111 | 112 | 113 | webrtc_streamer( 114 | key=key, 115 | mode=WebRtcMode.RECVONLY, 116 | media_stream_constraints={ 117 | "video": media_file_info["type"] == "video", 118 | "audio": media_file_info["type"] == "audio", 119 | }, 120 | player_factory=create_player, 121 | video_frame_callback=video_frame_callback, 122 | ) 123 | 124 | st.markdown( 125 | "The video filter in this demo is based on " 126 | "https://github.com/aiortc/aiortc/blob/2362e6d1f0c730a0f8c387bbea76546775ad2fe8/examples/server/server.py#L34. " # noqa: E501 127 | "Many thanks to the project." 128 | ) 129 | -------------------------------------------------------------------------------- /pages/9_sendonly_video.py: -------------------------------------------------------------------------------- 1 | """A sample to use WebRTC in sendonly mode to transfer frames 2 | from the browser to the server and to render frames via `st.image`.""" 3 | 4 | import logging 5 | import queue 6 | 7 | import streamlit as st 8 | from streamlit_webrtc import WebRtcMode, webrtc_streamer 9 | 10 | logger = logging.getLogger(__name__) 11 | 12 | 13 | webrtc_ctx = webrtc_streamer( 14 | key="video-sendonly", 15 | mode=WebRtcMode.SENDONLY, 16 | media_stream_constraints={"video": True}, 17 | ) 18 | 19 | image_place = st.empty() 20 | 21 | while True: 22 | if webrtc_ctx.video_receiver: 23 | try: 24 | video_frame = webrtc_ctx.video_receiver.get_frame(timeout=1) 25 | except queue.Empty: 26 | logger.warning("Queue is empty. Abort.") 27 | break 28 | 29 | img_rgb = video_frame.to_ndarray(format="rgb24") 30 | image_place.image(img_rgb) 31 | else: 32 | logger.warning("AudioReciver is not set. Abort.") 33 | break 34 | -------------------------------------------------------------------------------- /pages/pyproject.toml: -------------------------------------------------------------------------------- 1 | 2 | [tool.ruff.lint.isort] 3 | known-third-party = ["streamlit_webrtc"] 4 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "streamlit-webrtc" 3 | description = "Real-time video and audio processing on Streamlit" 4 | authors = [{ name = "Yuichiro Tachibana (Tsuchiya)", email = "t.yic.yt@gmail.com" }] 5 | requires-python = ">=3.9,!=3.9.7" # 3.9.7 is excluded due to https://github.com/streamlit/streamlit/pull/5168 6 | readme = "README.md" 7 | license = "MIT" 8 | dynamic = ["version"] 9 | dependencies = [ 10 | # For allow-same-origin, >=0.73.0 is required. See https://blog.streamlit.io/streamlit-components-security-and-a-five-month-quest-to-ship-a-single-line-of-code/ 11 | # 0.84.0 has an error at marshalling component values. 12 | # For cache_data or experimental_memo, >=0.89.0 is required. See https://docs.streamlit.io/develop/quick-reference/release-notes/2021#version-0890, https://docs.streamlit.io/develop/quick-reference/release-notes/2023#version-1180 13 | "streamlit>=0.89.0", 14 | "aiortc>=1.11.0", # aiortc<1.4.0 causes an error with cryptography>=39.0.0. See https://github.com/whitphx/streamlit-webrtc/issues/1164. The fix was introduced into aiortc in https://github.com/aiortc/aiortc/commit/08b0a7e9f5030a9f7e5617382e92560d4ae763a2 that 1.4.0 included. 15 | "packaging>=20.0", 16 | "aioice>=0.10.0", 17 | ] 18 | 19 | [project.urls] 20 | Repository = "https://github.com/whitphx/streamlit-webrtc" 21 | 22 | [dependency-groups] 23 | dev = [ 24 | "ruff>=0.9.10", 25 | "pytest>=7.1.2", 26 | "mypy[faster-cache]>=1.15.0", 27 | "bump-my-version>=1.0.2", 28 | "pre-commit>=4.2.0", 29 | "pydub>=0.25.1", 30 | "matplotlib>=3.5.1", 31 | "streamlit-server-state>=0.17.1", 32 | "twilio>=8.1", 33 | "opencv-python-headless>=4.5.4.58", 34 | "streamlit-session-memo>=0.3.2", 35 | # For testing older versions of Streamlit: https://discuss.streamlit.io/t/modulenotfounderror-no-module-named-altair-vegalite-v4/42921 36 | "altair<5", 37 | # For testing older versions of Streamlit: https://discuss.streamlit.io/t/streamlit-run-with-protobuf-error/25632/3 38 | "protobuf<=3.20", 39 | ] 40 | 41 | [tool.hatch.version] 42 | source = "vcs" 43 | 44 | [tool.hatch.build.targets.sdist] 45 | include = ["/streamlit_webrtc"] 46 | exclude = ["/streamlit_webrtc/frontend", "!/streamlit_webrtc/frontend/dist"] 47 | 48 | [tool.hatch.build.targets.wheel] 49 | include = ["/streamlit_webrtc"] 50 | exclude = ["/streamlit_webrtc/frontend", "!/streamlit_webrtc/frontend/dist"] 51 | 52 | [build-system] 53 | requires = ["hatchling", "hatch-vcs"] 54 | build-backend = "hatchling.build" 55 | 56 | [tool.ruff.lint] 57 | extend-select = ["I"] 58 | -------------------------------------------------------------------------------- /sample_utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/whitphx/streamlit-webrtc/e129c13f8f16034a2cd0d19b89da3496a2893983/sample_utils/__init__.py -------------------------------------------------------------------------------- /sample_utils/download.py: -------------------------------------------------------------------------------- 1 | import urllib.request 2 | from pathlib import Path 3 | 4 | import streamlit as st 5 | 6 | 7 | # This code is based on https://github.com/streamlit/demo-self-driving/blob/230245391f2dda0cb464008195a470751c01770b/streamlit_app.py#L48 # noqa: E501 8 | def download_file(url, download_to: Path, expected_size=None): 9 | # Don't download the file twice. 10 | # (If possible, verify the download using the file length.) 11 | if download_to.exists(): 12 | if expected_size: 13 | if download_to.stat().st_size == expected_size: 14 | return 15 | else: 16 | st.info(f"{url} is already downloaded.") 17 | if not st.button("Download again?"): 18 | return 19 | 20 | download_to.parent.mkdir(parents=True, exist_ok=True) 21 | 22 | # These are handles to two visual elements to animate. 23 | weights_warning, progress_bar = None, None 24 | try: 25 | weights_warning = st.warning("Downloading %s..." % url) 26 | progress_bar = st.progress(0) 27 | with open(download_to, "wb") as output_file: 28 | with urllib.request.urlopen(url) as response: 29 | length = int(response.info()["Content-Length"]) 30 | counter = 0.0 31 | MEGABYTES = 2.0**20.0 32 | while True: 33 | data = response.read(8192) 34 | if not data: 35 | break 36 | counter += len(data) 37 | output_file.write(data) 38 | 39 | # We perform animation by overwriting the elements. 40 | weights_warning.warning( 41 | "Downloading %s... (%6.2f/%6.2f MB)" 42 | % (url, counter / MEGABYTES, length / MEGABYTES) 43 | ) 44 | progress_bar.progress(min(counter / length, 1.0)) 45 | # Finally, we remove these visual elements by calling .empty(). 46 | finally: 47 | if weights_warning is not None: 48 | weights_warning.empty() 49 | if progress_bar is not None: 50 | progress_bar.empty() 51 | -------------------------------------------------------------------------------- /scripts/release_check.py: -------------------------------------------------------------------------------- 1 | """A script to check whether a variable `_RELEASE` is set as True.""" 2 | 3 | import argparse 4 | import ast 5 | import sys 6 | from pathlib import Path 7 | from typing import cast 8 | 9 | 10 | def get_release_flag_value(filepath: Path): 11 | with open(filepath) as f: 12 | fbody = f.read() 13 | 14 | parsed_ast = ast.parse(fbody) 15 | 16 | toplevel_assignments = [ 17 | node for node in parsed_ast.body if isinstance(node, ast.Assign) 18 | ] 19 | 20 | release_val = None 21 | for node in toplevel_assignments: 22 | if len(node.targets) != 1: 23 | continue 24 | if not isinstance(node.targets[0], ast.Name): 25 | continue 26 | single_target = cast(ast.Name, node.targets[0]) 27 | 28 | assigned_value = node.value 29 | 30 | if single_target.id == "_RELEASE": 31 | ExpectedConstantClass = ( 32 | ast.Constant if sys.version_info.minor >= 8 else ast.NameConstant 33 | ) 34 | if not isinstance(assigned_value, ExpectedConstantClass): 35 | raise Exception( 36 | f"Not a constant value {assigned_value} is " 37 | f"assigned to {single_target}" 38 | ) 39 | release_val = assigned_value.value 40 | 41 | return release_val 42 | 43 | 44 | if __name__ == "__main__": 45 | parser = argparse.ArgumentParser() 46 | parser.add_argument("filename", type=Path) 47 | 48 | args = parser.parse_args() 49 | 50 | is_release = get_release_flag_value(args.filename) 51 | 52 | if not is_release: 53 | print("_RELEASE flag is not set as True") 54 | sys.exit(-1) 55 | 56 | sys.exit(0) 57 | -------------------------------------------------------------------------------- /streamlit_webrtc/__init__.py: -------------------------------------------------------------------------------- 1 | """streamlit-webrtc""" 2 | 3 | import importlib.metadata 4 | 5 | from .component import ( 6 | WebRtcStreamerContext, 7 | WebRtcStreamerState, 8 | webrtc_streamer, 9 | ) 10 | from .config import ( 11 | DEFAULT_AUDIO_HTML_ATTRS, 12 | DEFAULT_MEDIA_STREAM_CONSTRAINTS, 13 | DEFAULT_VIDEO_HTML_ATTRS, 14 | AudioHTMLAttributes, 15 | MediaStreamConstraints, 16 | RTCConfiguration, 17 | Translations, 18 | VideoHTMLAttributes, 19 | ) 20 | from .credentials import ( 21 | get_hf_ice_servers, 22 | get_twilio_ice_servers, 23 | ) 24 | from .factory import create_mix_track, create_process_track, create_video_source_track 25 | from .mix import MediaStreamMixTrack, MixerCallback 26 | from .source import VideoSourceCallback, VideoSourceTrack 27 | from .webrtc import ( 28 | AudioProcessorBase, 29 | AudioProcessorFactory, 30 | AudioReceiver, 31 | MediaPlayerFactory, 32 | MediaRecorderFactory, 33 | VideoProcessorBase, 34 | VideoProcessorFactory, 35 | VideoReceiver, 36 | VideoTransformerBase, 37 | WebRtcMode, 38 | WebRtcWorker, 39 | ) 40 | 41 | # Set __version__ dynamically base on metadata. 42 | # https://github.com/python-poetry/poetry/issues/1036#issuecomment-489880822 43 | # https://github.com/python-poetry/poetry/issues/144#issuecomment-623927302 44 | # https://github.com/python-poetry/poetry/pull/2366#issuecomment-652418094 45 | try: 46 | __version__ = importlib.metadata.version(__name__) 47 | except importlib.metadata.PackageNotFoundError: 48 | pass 49 | 50 | # For backward compatibility 51 | VideoTransformerFactory = VideoProcessorFactory 52 | 53 | 54 | __all__ = [ 55 | "webrtc_streamer", 56 | "AudioProcessorBase", 57 | "AudioProcessorFactory", 58 | "AudioReceiver", 59 | "MediaPlayerFactory", 60 | "MediaRecorderFactory", 61 | "VideoProcessorBase", 62 | "VideoProcessorFactory", 63 | "VideoTransformerBase", # XXX: Deprecated 64 | "VideoReceiver", 65 | "VideoSourceTrack", 66 | "VideoSourceCallback", 67 | "create_video_source_track", 68 | "WebRtcMode", 69 | "WebRtcWorker", 70 | "MediaStreamConstraints", 71 | "RTCConfiguration", 72 | "Translations", 73 | "VideoHTMLAttributes", 74 | "AudioHTMLAttributes", 75 | "create_process_track", 76 | "create_mix_track", 77 | "MixerCallback", 78 | "MediaStreamMixTrack", 79 | "WebRtcStreamerContext", 80 | "WebRtcStreamerState", 81 | "DEFAULT_AUDIO_HTML_ATTRS", 82 | "DEFAULT_MEDIA_STREAM_CONSTRAINTS", 83 | "DEFAULT_VIDEO_HTML_ATTRS", 84 | "get_hf_ice_servers", 85 | "get_twilio_ice_servers", 86 | ] 87 | -------------------------------------------------------------------------------- /streamlit_webrtc/_compat.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | from packaging import version 3 | 4 | ST_VERSION = version.parse(st.__version__) 5 | 6 | VER_GTE_1_12_0 = ST_VERSION >= version.parse("1.12.0") 7 | """ Since 1.12.0, Streamlit has changed its internal architecture 8 | creating new `web` and `runtime` submodules to which some files have been moved 9 | decoupling the web server-related files and the core runtime, 10 | e.g. https://github.com/streamlit/streamlit/pull/4956. 11 | 12 | During this a huge refactoring, `Server._singleton` and 13 | its accessor `Server.get_current()` have been removed 14 | (https://github.com/streamlit/streamlit/pull/4966) 15 | that we have been using as a server-wide global object, 16 | so we have to change the way to access it. 17 | """ 18 | 19 | VER_GTE_1_12_1 = ST_VERSION >= version.parse("1.12.1") 20 | """ Since 1.12.1, as a part of the decoupling of the runtime and the web server, 21 | a large part of the `Server` class attributes including the session states 22 | has moved to the `runtime` submodule, and the `Server` class has a `_runtime` attribute. 23 | 24 | Ref: https://github.com/streamlit/streamlit/pull/5136 25 | """ 26 | 27 | VER_GTE_1_14_0 = ST_VERSION >= version.parse("1.14.0") 28 | """ Since 1.14.0, Runtime is a singleton. 29 | So we can access it via `Runtime.instance()` directly without the server object, 30 | and use it as a global object to attach some our original attributes 31 | instead of the server object. 32 | 33 | Ref: https://github.com/streamlit/streamlit/pull/5432 34 | """ 35 | 36 | VER_GTE_1_18_0 = ST_VERSION >= version.parse("1.18.0") 37 | """ Since 1.18.0, Streamlit introduced `SessionManager` protocol 38 | to abstract and improve the session behavior. 39 | 40 | Ref: https://github.com/streamlit/streamlit/pull/5856 41 | """ 42 | 43 | VER_GTE_1_36_0 = ST_VERSION >= version.parse("1.36.0") 44 | """ Since 1.36.0, the `on_change` handler on a custom component is supported. 45 | Ref: https://github.com/streamlit/streamlit/issues/3977 46 | Also, the `.components_callbacks.register_callback` hack no longer works since 1.39.0 47 | where the registered callback via this hack is not called, 48 | so we must use the new `on_change` handler. 49 | """ 50 | 51 | try: 52 | from streamlit.runtime.app_session import AppSession, AppSessionState 53 | except ModuleNotFoundError: 54 | # streamlit < 1.12.0 55 | try: 56 | from streamlit.app_session import AppSession, AppSessionState # type: ignore 57 | except ModuleNotFoundError: 58 | # streamlit < 1.4 59 | from streamlit.report_session import ( # type: ignore 60 | ReportSession as AppSession, 61 | ) 62 | from streamlit.report_session import ( # type: ignore 63 | ReportSessionState as AppSessionState, 64 | ) 65 | 66 | try: 67 | # `SessionManager.get_active_session_info()`, which plays the same role 68 | # as the old `get_session_info()` returns an instance of `ActiveSessionInfo`, 69 | # not `SessionInfo` since 1.18.0. 70 | from streamlit.runtime.session_manager import ActiveSessionInfo as SessionInfo 71 | except ModuleNotFoundError: 72 | # streamlit < 1.18.0 73 | try: 74 | from streamlit.runtime.runtime import SessionInfo # type: ignore 75 | except ModuleNotFoundError: 76 | # streamlit < 1.12.1 77 | try: 78 | from streamlit.web.server.server import SessionInfo # type: ignore 79 | except ModuleNotFoundError: 80 | # streamlit < 1.12.0 81 | from streamlit.server.server import SessionInfo # type: ignore 82 | 83 | try: 84 | from streamlit.runtime.scriptrunner import get_script_run_ctx 85 | except ModuleNotFoundError: 86 | # streamlit < 1.12.0 87 | try: 88 | from streamlit.scriptrunner import get_script_run_ctx # type: ignore 89 | except ModuleNotFoundError: 90 | # streamlit < 1.8 91 | try: 92 | from streamlit.script_run_context import get_script_run_ctx # type: ignore 93 | except ModuleNotFoundError: 94 | # streamlit < 1.4 95 | from streamlit.report_thread import ( # type: ignore # isort:skip 96 | get_report_ctx as get_script_run_ctx, 97 | ) 98 | 99 | 100 | try: 101 | from streamlit import rerun # type: ignore 102 | except ImportError: 103 | # streamlit < 1.27.0 104 | from streamlit import experimental_rerun as rerun # type: ignore 105 | 106 | try: 107 | from streamlit import cache_data # type: ignore 108 | except ImportError: 109 | # streamlit < 1.18.0 110 | from streamlit import experimental_memo as cache_data # type: ignore 111 | 112 | 113 | __all__ = [ 114 | "VER_GTE_1_12_0", 115 | "VER_GTE_1_12_1", 116 | "VER_GTE_1_14_0", 117 | "VER_GTE_1_18_0", 118 | "AppSession", 119 | "AppSessionState", 120 | "SessionInfo", 121 | "get_script_run_ctx", 122 | "rerun", 123 | "cache_data", 124 | ] 125 | -------------------------------------------------------------------------------- /streamlit_webrtc/components_callbacks.py: -------------------------------------------------------------------------------- 1 | """Patch to use callbacks with Streamlit custom components. 2 | 3 | Usage 4 | ----- 5 | 6 | >>> import streamlit.components.v1 as components 7 | >>> from components_callbacks import register_callback 8 | >>> 9 | >>> print("Script begins...") 10 | >>> 11 | >>> def my_callback(arg1, arg2): 12 | >>> print("New component value:", st.session_state.my_key) 13 | >>> print("Args:", arg1, arg2) 14 | >>> 15 | >>> register_callback("my_key", my_callback, "hello", arg2="world") 16 | >>> 17 | >>> my_component = components.declare_component(...) 18 | >>> my_component(..., key="my_key") 19 | 20 | 21 | Here's the result when you call Streamlit.setComponentValue(): 22 | 23 | New component value: 24 | Args: hello world 25 | Script begins... 26 | 27 | """ 28 | 29 | from streamlit import session_state as _state 30 | 31 | try: 32 | # Streamlit >= 1.34.0 (Ref: https://github.com/streamlit/streamlit/pull/8457) 33 | from streamlit.components.v1 import custom_component as _components # type: ignore 34 | except ImportError: 35 | from streamlit.components.v1 import components as _components # type: ignore 36 | 37 | 38 | def _patch_register_widget(register_widget): 39 | def wrapper_register_widget(*args, **kwargs): 40 | user_key = kwargs.get("user_key", None) 41 | callbacks = _state.get("_components_callbacks", None) 42 | 43 | # Check if a callback was registered for that user_key. 44 | if user_key and callbacks and user_key in callbacks: 45 | callback = callbacks[user_key] 46 | 47 | # Add callback-specific args for the real register_widget function. 48 | kwargs["on_change_handler"] = callback[0] 49 | kwargs["args"] = callback[1] 50 | kwargs["kwargs"] = callback[2] 51 | 52 | # Call the original function with updated kwargs. 53 | return register_widget(*args, **kwargs) 54 | 55 | return wrapper_register_widget 56 | 57 | 58 | # Patch function only once. 59 | if not hasattr(_components.register_widget, "__callbacks_patched__"): 60 | setattr(_components.register_widget, "__callbacks_patched__", True) 61 | _components.register_widget = _patch_register_widget(_components.register_widget) 62 | 63 | 64 | def register_callback(element_key, callback, *callback_args, **callback_kwargs): 65 | # Initialize callbacks store. 66 | if "_components_callbacks" not in _state: 67 | _state._components_callbacks = {} 68 | 69 | # Register a callback for a given element_key. 70 | _state._components_callbacks[element_key] = ( 71 | callback, 72 | callback_args, 73 | callback_kwargs, 74 | ) 75 | -------------------------------------------------------------------------------- /streamlit_webrtc/config.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Dict, List, Optional, TypedDict, Union 2 | 3 | from aiortc import ( 4 | RTCConfiguration as AiortcRTCConfiguration, 5 | ) 6 | from aiortc import ( 7 | RTCIceServer as AiortcRTCIceServer, 8 | ) 9 | 10 | RTCIceServer = TypedDict( 11 | "RTCIceServer", 12 | { 13 | "urls": Union[str, List[str]], 14 | "username": Optional[str], 15 | "credential": Optional[str], 16 | }, 17 | total=False, 18 | ) 19 | 20 | 21 | class RTCConfiguration(TypedDict, total=False): 22 | iceServers: Optional[List[RTCIceServer]] 23 | 24 | 25 | def compile_rtc_ice_server( 26 | ice_server: Union[RTCIceServer, dict[str, Any]], 27 | ) -> AiortcRTCIceServer: 28 | if not isinstance(ice_server, dict): 29 | raise ValueError("ice_server must be a dict") 30 | if "urls" not in ice_server: 31 | raise ValueError("ice_server must have a urls key") 32 | 33 | return AiortcRTCIceServer( 34 | urls=ice_server["urls"], # type: ignore # aiortc's type def is incorrect 35 | username=ice_server.get("username"), 36 | credential=ice_server.get("credential"), 37 | ) 38 | 39 | 40 | def compile_ice_servers( 41 | ice_servers: Union[List[RTCIceServer], List[dict[str, Any]]], 42 | ) -> List[AiortcRTCIceServer]: 43 | return [ 44 | compile_rtc_ice_server(server) 45 | for server in ice_servers 46 | if isinstance(server, dict) and "urls" in server 47 | ] 48 | 49 | 50 | def compile_rtc_configuration( 51 | rtc_configuration: Union[RTCConfiguration, dict[str, Any]], 52 | ) -> AiortcRTCConfiguration: 53 | if not isinstance(rtc_configuration, dict): 54 | raise ValueError("rtc_configuration must be a dict") 55 | ice_servers = rtc_configuration.get("iceServers", []) 56 | if not isinstance(ice_servers, list): 57 | raise ValueError("iceServers must be a list") 58 | return AiortcRTCConfiguration( 59 | iceServers=compile_ice_servers(ice_servers), 60 | ) 61 | 62 | 63 | Number = Union[int, float] 64 | 65 | 66 | class DoubleRange(TypedDict, total=False): 67 | max: Number 68 | min: Number 69 | 70 | 71 | class ConstrainDoubleRange(DoubleRange, total=False): 72 | exact: Number 73 | ideal: Number 74 | 75 | 76 | class ConstrainBooleanParameters(TypedDict, total=False): 77 | exact: bool 78 | ideal: bool 79 | 80 | 81 | class ULongRange(TypedDict, total=False): 82 | max: Number 83 | min: Number 84 | 85 | 86 | class ConstrainULongRange(ULongRange, total=False): 87 | exact: Number 88 | ideal: Number 89 | 90 | 91 | class ConstrainDOMStringParameters(TypedDict, total=False): 92 | exact: Union[str, List[str]] 93 | ideal: Union[str, List[str]] 94 | 95 | 96 | ConstrainDouble = Union[Number, ConstrainDoubleRange] 97 | ConstrainBoolean = Union[bool, ConstrainBooleanParameters] 98 | ConstrainULong = Union[Number, ConstrainULongRange] 99 | ConstrainDOMString = Union[str, List[str], ConstrainDOMStringParameters] 100 | 101 | 102 | class MediaTrackConstraintSet(TypedDict, total=False): 103 | aspectRatio: ConstrainDouble 104 | autoGainControl: ConstrainBoolean 105 | channelCount: ConstrainULong 106 | # deviceId: ConstrainDOMString 107 | echoCancellation: ConstrainBoolean 108 | facingMode: ConstrainDOMString 109 | frameRate: ConstrainDouble 110 | groupId: ConstrainDOMString 111 | height: ConstrainULong 112 | latency: ConstrainDouble 113 | noiseSuppression: ConstrainBoolean 114 | resizeMode: ConstrainDOMString 115 | sampleRate: ConstrainULong 116 | sampleSize: ConstrainULong 117 | width: ConstrainULong 118 | 119 | 120 | class MediaTrackConstraints(MediaTrackConstraintSet, total=False): 121 | advanced: List[MediaTrackConstraintSet] 122 | 123 | 124 | # Ref: https://github.com/microsoft/TypeScript/blob/971133d5d0a56cf362571d21ac971888f8a66820/lib/lib.dom.d.ts#L719 # noqa 125 | class MediaStreamConstraints(TypedDict, total=False): 126 | audio: Union[bool, MediaTrackConstraints] 127 | video: Union[bool, MediaTrackConstraints] 128 | peerIdentity: str 129 | 130 | 131 | CSSProperties = Dict[str, Union[str, int, float]] 132 | 133 | 134 | # Ref: https://github.com/DefinitelyTyped/DefinitelyTyped/blob/2563cecd0398fd9337b2806059446fb9d29abec2/types/react/index.d.ts#L1815 # noqa: E501 135 | class HTMLAttributes(TypedDict, total=False): 136 | # Only necessary attributes are defined here 137 | hidden: bool 138 | style: CSSProperties 139 | 140 | 141 | # Ref: https://github.com/DefinitelyTyped/DefinitelyTyped/blob/2563cecd0398fd9337b2806059446fb9d29abec2/types/react/index.d.ts#L2235 # noqa: E501 142 | class MediaHTMLAttributes(HTMLAttributes, total=False): 143 | autoPlay: bool 144 | controls: bool 145 | controlsList: str 146 | crossOrigin: str 147 | loop: bool 148 | mediaGroup: str 149 | muted: bool 150 | playsInline: bool 151 | preload: str 152 | # src: str # src is controlled by streamlit-webrtc 153 | 154 | 155 | # Ref: https://github.com/DefinitelyTyped/DefinitelyTyped/blob/2563cecd0398fd9337b2806059446fb9d29abec2/types/react/index.d.ts#L2421 # noqa: E501 156 | class VideoHTMLAttributes(MediaHTMLAttributes, total=False): 157 | height: Union[Number, str] 158 | # playsInline: bool # This field already exists in MediaHTMLAttributes and overwriting it when extending is not allowed though it is in the original TypeScript code. # noqa: E501 159 | poster: str 160 | width: Union[Number, str] 161 | disablePictureInPicture: bool 162 | disableRemotePlayback: bool 163 | 164 | 165 | # Ref: https://github.com/DefinitelyTyped/DefinitelyTyped/blob/2563cecd0398fd9337b2806059446fb9d29abec2/types/react/index.d.ts#L2016 # noqa: E501 166 | class AudioHTMLAttributes(MediaHTMLAttributes, total=False): 167 | pass 168 | 169 | 170 | class Translations(TypedDict, total=False): 171 | start: str 172 | stop: str 173 | select_device: str 174 | media_api_not_available: str 175 | device_ask_permission: str 176 | device_not_available: str 177 | device_access_denied: str 178 | 179 | 180 | DEFAULT_MEDIA_STREAM_CONSTRAINTS = MediaStreamConstraints(audio=True, video=True) 181 | DEFAULT_VIDEO_HTML_ATTRS = VideoHTMLAttributes( 182 | autoPlay=True, controls=True, style={"width": "100%"} 183 | ) 184 | DEFAULT_AUDIO_HTML_ATTRS = AudioHTMLAttributes(autoPlay=True, controls=True) 185 | -------------------------------------------------------------------------------- /streamlit_webrtc/credentials.py: -------------------------------------------------------------------------------- 1 | """ 2 | MIT License 3 | 4 | Copyright (c) 2024 Freddy Boulton 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE. 23 | """ 24 | # Original: https://github.com/freddyaboulton/fastrtc/blob/66f0a81b76684c5d58761464fb67642891066f93/LICENSE 25 | 26 | import json 27 | import logging 28 | import os 29 | import urllib.error 30 | import urllib.request 31 | from typing import List, Optional 32 | 33 | from ._compat import cache_data 34 | from .config import RTCIceServer 35 | 36 | LOGGER = logging.getLogger(__name__) 37 | 38 | 39 | HF_ICE_SERVER_TTL = 3600 # 1 hour. Not sure if this is the best value. 40 | 41 | 42 | @cache_data(ttl=HF_ICE_SERVER_TTL) 43 | def get_hf_ice_servers(token: Optional[str] = None) -> List[RTCIceServer]: 44 | if token is None: 45 | token = os.getenv("HF_TOKEN") 46 | 47 | if token is None: 48 | raise ValueError("HF_TOKEN is not set") 49 | 50 | req = urllib.request.Request( 51 | "https://fastrtc-turn-server-login.hf.space/credentials", 52 | headers={"X-HF-Access-Token": token}, 53 | ) 54 | try: 55 | with urllib.request.urlopen(req) as response: 56 | if response.status != 200: 57 | raise ValueError("Failed to get credentials from HF turn server") 58 | credentials = json.loads(response.read()) 59 | return [ 60 | { 61 | "urls": "turn:gradio-turn.com:80", 62 | **credentials, 63 | }, 64 | ] 65 | except urllib.error.URLError: 66 | raise ValueError("Failed to get credentials from HF turn server") 67 | 68 | 69 | TWILIO_CRED_TTL = 3600 # 1 hour. Twilio's default is 1 day. Shorter TTL should be ok for this library's use case. 70 | 71 | 72 | @cache_data(ttl=TWILIO_CRED_TTL) 73 | def get_twilio_ice_servers( 74 | twilio_sid: Optional[str] = None, twilio_token: Optional[str] = None 75 | ) -> List[RTCIceServer]: 76 | try: 77 | from twilio.rest import Client 78 | except ImportError: 79 | raise ImportError("Please install twilio with `pip install twilio`") 80 | 81 | if not twilio_sid and not twilio_token: 82 | twilio_sid = os.getenv("TWILIO_ACCOUNT_SID") 83 | twilio_token = os.getenv("TWILIO_AUTH_TOKEN") 84 | 85 | if twilio_sid is None or twilio_token is None: 86 | raise ValueError("TWILIO_ACCOUNT_SID and TWILIO_AUTH_TOKEN must be set") 87 | 88 | client = Client(twilio_sid, twilio_token) 89 | 90 | token = client.tokens.create(ttl=TWILIO_CRED_TTL) 91 | 92 | return token.ice_servers 93 | 94 | 95 | @cache_data(ttl=min(HF_ICE_SERVER_TTL, TWILIO_CRED_TTL)) 96 | def get_available_ice_servers() -> List[RTCIceServer]: 97 | try: 98 | LOGGER.info("Try to use TURN server from Hugging Face.") 99 | hf_turn_servers = get_hf_ice_servers() 100 | LOGGER.info("Successfully got TURN credentials from Hugging Face.") 101 | LOGGER.info("Using TURN server from Hugging Face and STUN server from Google.") 102 | ice_servers = hf_turn_servers + [ 103 | RTCIceServer(urls="stun:stun.l.google.com:19302"), 104 | ] 105 | return ice_servers 106 | except Exception as e: 107 | LOGGER.info("Failed to get TURN credentials from Hugging Face: %s", e) 108 | 109 | try: 110 | LOGGER.info("Try to use STUN/TURN server from Twilio.") 111 | ice_servers = get_twilio_ice_servers() 112 | LOGGER.info("Successfully got STUN/TURN credentials from Twilio.") 113 | return ice_servers 114 | except Exception as e: 115 | LOGGER.info("Failed to get TURN credentials from Twilio: %s", e) 116 | 117 | # NOTE: aiortc anyway uses this STUN server by default if the ICE server config is not set. 118 | # Ref: https://github.com/aiortc/aiortc/blob/3ff9bdd03f22bf511a8d304df30f29392338a070/src/aiortc/rtcicetransport.py#L204-L209 119 | # We set the STUN server here as this will be used on the browser side as well. 120 | LOGGER.info("Use STUN server from Google.") 121 | return [RTCIceServer(urls="stun:stun.l.google.com:19302")] 122 | -------------------------------------------------------------------------------- /streamlit_webrtc/eventloop.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import contextlib 3 | from typing import Union 4 | 5 | from tornado.platform.asyncio import BaseAsyncIOLoop 6 | 7 | from ._compat import VER_GTE_1_12_0, VER_GTE_1_12_1, VER_GTE_1_14_0 8 | from .server import get_current_server 9 | 10 | 11 | def get_global_event_loop() -> asyncio.AbstractEventLoop: 12 | if VER_GTE_1_14_0: 13 | from streamlit.runtime.runtime import Runtime 14 | 15 | async_objs = Runtime.instance()._get_async_objs() # type: ignore 16 | return async_objs.eventloop 17 | 18 | current_server = get_current_server() 19 | 20 | if VER_GTE_1_12_1: 21 | async_objs = current_server._runtime._get_async_objs() 22 | return async_objs.eventloop 23 | 24 | if VER_GTE_1_12_0: 25 | return current_server._eventloop 26 | 27 | ioloop = current_server._ioloop 28 | 29 | # `ioloop` is expected to be of type `BaseAsyncIOLoop`, 30 | # which has the `asyncio_loop` attribute. 31 | if not isinstance(ioloop, BaseAsyncIOLoop): 32 | raise Exception("Unexpectedly failed to access the asyncio event loop.") 33 | 34 | return ioloop.asyncio_loop 35 | 36 | 37 | @contextlib.contextmanager 38 | def loop_context(loop: asyncio.AbstractEventLoop): 39 | cur_ev_loop: Union[asyncio.AbstractEventLoop, None] 40 | try: 41 | cur_ev_loop = asyncio.get_event_loop() 42 | except RuntimeError: 43 | cur_ev_loop = None 44 | asyncio.set_event_loop(loop) 45 | 46 | yield 47 | 48 | asyncio.set_event_loop(cur_ev_loop) 49 | -------------------------------------------------------------------------------- /streamlit_webrtc/factory.py: -------------------------------------------------------------------------------- 1 | from typing import Literal, Optional, Type, Union, overload 2 | 3 | import streamlit as st 4 | 5 | from .eventloop import get_global_event_loop, loop_context 6 | from .mix import MediaStreamMixTrack, MixerCallback 7 | from .models import ( 8 | AudioProcessorFactory, 9 | AudioProcessorT, 10 | CallbackAttachableProcessor, 11 | FrameCallback, 12 | FrameT, 13 | MediaEndedCallback, 14 | ProcessorFactory, 15 | QueuedVideoFramesCallback, 16 | VideoProcessorFactory, 17 | VideoProcessorT, 18 | ) 19 | from .process import ( 20 | AsyncAudioProcessTrack, 21 | AsyncMediaProcessTrack, 22 | AsyncVideoProcessTrack, 23 | AudioProcessTrack, 24 | MediaProcessTrack, 25 | VideoProcessTrack, 26 | ) 27 | from .relay import get_global_relay 28 | from .source import VideoSourceCallback, VideoSourceTrack 29 | 30 | _PROCESSOR_TRACK_CACHE_KEY_PREFIX = "__PROCESSOR_TRACK_CACHE__" 31 | 32 | 33 | def _get_track_class( 34 | kind: Literal["video", "audio"], async_processing: bool 35 | ) -> Union[Type[MediaProcessTrack], Type[AsyncMediaProcessTrack]]: 36 | if kind == "video": 37 | if async_processing: 38 | return AsyncVideoProcessTrack 39 | else: 40 | return VideoProcessTrack 41 | elif kind == "audio": 42 | if async_processing: 43 | return AsyncAudioProcessTrack 44 | else: 45 | return AudioProcessTrack 46 | else: 47 | raise ValueError(f"Unsupported track type: {kind}") 48 | 49 | 50 | # Overloads for the cases where the processor_factory is specified 51 | @overload 52 | def create_process_track( 53 | input_track, 54 | *, 55 | processor_factory: AudioProcessorFactory[AudioProcessorT], 56 | async_processing: Literal[False], 57 | frame_callback: Optional[FrameCallback] = None, 58 | queued_frames_callback: Optional[QueuedVideoFramesCallback] = None, 59 | on_ended: Optional[MediaEndedCallback] = None, 60 | ) -> AudioProcessTrack[AudioProcessorT]: ... 61 | 62 | 63 | @overload 64 | def create_process_track( 65 | input_track, 66 | *, 67 | processor_factory: AudioProcessorFactory[AudioProcessorT], 68 | async_processing: Literal[True] = True, 69 | frame_callback: Optional[FrameCallback] = None, 70 | queued_frames_callback: Optional[QueuedVideoFramesCallback] = None, 71 | on_ended: Optional[MediaEndedCallback] = None, 72 | ) -> AsyncAudioProcessTrack[AudioProcessorT]: ... 73 | 74 | 75 | @overload 76 | def create_process_track( 77 | input_track, 78 | *, 79 | processor_factory: VideoProcessorFactory[VideoProcessorT], 80 | async_processing: Literal[False], 81 | frame_callback: Optional[FrameCallback] = None, 82 | queued_frames_callback: Optional[QueuedVideoFramesCallback] = None, 83 | on_ended: Optional[MediaEndedCallback] = None, 84 | ) -> VideoProcessTrack[VideoProcessorT]: ... 85 | 86 | 87 | @overload 88 | def create_process_track( 89 | input_track, 90 | *, 91 | processor_factory: VideoProcessorFactory[VideoProcessorT], 92 | async_processing: Literal[True] = True, 93 | frame_callback: Optional[FrameCallback] = None, 94 | queued_frames_callback: Optional[QueuedVideoFramesCallback] = None, 95 | on_ended: Optional[MediaEndedCallback] = None, 96 | ) -> AsyncVideoProcessTrack[VideoProcessorT]: ... 97 | 98 | 99 | # Overloads for the cases where the processor_factory is NOT specified 100 | @overload 101 | def create_process_track( 102 | input_track, 103 | *, 104 | frame_callback: FrameCallback[FrameT], 105 | async_processing: Literal[False], 106 | processor_factory: Literal[None] = None, 107 | queued_frames_callback: Optional[QueuedVideoFramesCallback] = None, 108 | on_ended: Optional[MediaEndedCallback] = None, 109 | ) -> MediaProcessTrack[CallbackAttachableProcessor[FrameT], FrameT]: ... 110 | 111 | 112 | @overload 113 | def create_process_track( 114 | input_track, 115 | *, 116 | frame_callback: FrameCallback[FrameT], 117 | processor_factory: Literal[None] = None, 118 | async_processing: Literal[True] = True, 119 | queued_frames_callback: Optional[QueuedVideoFramesCallback] = None, 120 | on_ended: Optional[MediaEndedCallback] = None, 121 | ) -> AsyncMediaProcessTrack[CallbackAttachableProcessor[FrameT], FrameT]: ... 122 | 123 | 124 | def create_process_track( 125 | input_track, 126 | frame_callback: Optional[FrameCallback] = None, 127 | queued_frames_callback: Optional[QueuedVideoFramesCallback] = None, 128 | on_ended: Optional[MediaEndedCallback] = None, 129 | processor_factory: Optional[ProcessorFactory] = None, # Old API 130 | async_processing=True, 131 | ) -> Union[MediaProcessTrack, AsyncMediaProcessTrack]: 132 | cache_key = _PROCESSOR_TRACK_CACHE_KEY_PREFIX + str(input_track.id) 133 | 134 | if cache_key in st.session_state: 135 | processor_track = st.session_state[cache_key] 136 | if not processor_factory: 137 | processor: CallbackAttachableProcessor = processor_track.processor 138 | processor.update_callbacks( 139 | frame_callback=frame_callback, 140 | queued_frames_callback=queued_frames_callback, 141 | ended_callback=on_ended, 142 | ) 143 | else: 144 | if processor_factory: 145 | processor = processor_factory() 146 | else: 147 | processor = CallbackAttachableProcessor( 148 | frame_callback=frame_callback, 149 | queued_frames_callback=queued_frames_callback, 150 | ended_callback=on_ended, 151 | ) 152 | Track = _get_track_class(input_track.kind, async_processing) 153 | loop = get_global_event_loop() 154 | relay = get_global_relay() 155 | with loop_context(loop): 156 | processor_track = Track(relay.subscribe(input_track), processor) 157 | st.session_state[cache_key] = processor_track 158 | 159 | return processor_track 160 | 161 | 162 | _MIXER_TRACK_CACHE_KEY_PREFIX = "__MIXER_TRACK_CACHE__" 163 | 164 | 165 | def create_mix_track( 166 | kind: str, 167 | mixer_callback: MixerCallback[FrameT], 168 | key: str, 169 | mixer_output_interval: float = 1 / 30, 170 | ) -> MediaStreamMixTrack[FrameT]: 171 | cache_key = _MIXER_TRACK_CACHE_KEY_PREFIX + key 172 | if cache_key in st.session_state: 173 | mixer_track: MediaStreamMixTrack = st.session_state[cache_key] 174 | mixer_track._update_mixer_callback(mixer_callback) 175 | else: 176 | mixer_track = MediaStreamMixTrack( 177 | kind=kind, 178 | mixer_callback=mixer_callback, 179 | mixer_output_interval=mixer_output_interval, 180 | ) 181 | st.session_state[cache_key] = mixer_track 182 | return mixer_track 183 | 184 | 185 | _VIDEO_SOURCE_TRACK_CACHE_KEY_PREFIX = "__VIDEO_SOURCE_TRACK_CACHE__" 186 | 187 | 188 | def create_video_source_track( 189 | callback: VideoSourceCallback, 190 | key: str, 191 | fps=30, 192 | ) -> VideoSourceTrack: 193 | cache_key = _VIDEO_SOURCE_TRACK_CACHE_KEY_PREFIX + key 194 | if ( 195 | cache_key in st.session_state 196 | and isinstance(st.session_state[cache_key], VideoSourceTrack) 197 | and st.session_state[cache_key].kind == "video" 198 | and st.session_state[cache_key].readyState == "live" 199 | ): 200 | video_source_track: VideoSourceTrack = st.session_state[cache_key] 201 | video_source_track._callback = callback 202 | video_source_track._fps = fps 203 | else: 204 | video_source_track = VideoSourceTrack(callback=callback, fps=fps) 205 | st.session_state[cache_key] = video_source_track 206 | return video_source_track 207 | -------------------------------------------------------------------------------- /streamlit_webrtc/frontend/.gitignore: -------------------------------------------------------------------------------- 1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files. 2 | 3 | # dependencies 4 | /node_modules 5 | /.pnp 6 | .pnp.js 7 | 8 | # testing 9 | /coverage 10 | 11 | # production 12 | /build 13 | 14 | # misc 15 | .DS_Store 16 | .env.local 17 | .env.development.local 18 | .env.test.local 19 | .env.production.local 20 | 21 | npm-debug.log* 22 | yarn-debug.log* 23 | yarn-error.log* 24 | 25 | *storybook.log -------------------------------------------------------------------------------- /streamlit_webrtc/frontend/.npmrc: -------------------------------------------------------------------------------- 1 | public-hoist-pattern[]=streamlit-component-lib 2 | -------------------------------------------------------------------------------- /streamlit_webrtc/frontend/.prettierignore: -------------------------------------------------------------------------------- 1 | dist 2 | coverage 3 | 4 | pnpm-lock.yaml 5 | -------------------------------------------------------------------------------- /streamlit_webrtc/frontend/.prettierrc.json: -------------------------------------------------------------------------------- 1 | {} 2 | -------------------------------------------------------------------------------- /streamlit_webrtc/frontend/eslint.config.js: -------------------------------------------------------------------------------- 1 | import globals from "globals"; 2 | import { defineConfig } from "eslint/config"; 3 | import pluginJs from "@eslint/js"; 4 | import tseslint from "typescript-eslint"; 5 | import pluginReact from "eslint-plugin-react"; 6 | import reactHooks from "eslint-plugin-react-hooks"; 7 | import reactRefresh from "eslint-plugin-react-refresh"; 8 | import pluginReactJSXRuntime from "eslint-plugin-react/configs/jsx-runtime.js"; 9 | 10 | /** @type {import('eslint').Linter.Config[]} */ 11 | export default defineConfig([ 12 | { ignores: ["dist"] }, 13 | { files: ["**/*.{js,mjs,cjs,ts,jsx,tsx}"] }, 14 | { languageOptions: { globals: globals.browser } }, 15 | pluginJs.configs.recommended, 16 | ...tseslint.configs.recommended, 17 | { settings: { react: { version: "detect" } } }, 18 | pluginReact.configs.flat.recommended, 19 | reactHooks.configs["recommended-latest"], 20 | reactRefresh.configs.recommended, 21 | pluginReactJSXRuntime, 22 | ]); 23 | -------------------------------------------------------------------------------- /streamlit_webrtc/frontend/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Streamlit WebRTC Component 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 |
13 | 14 | 15 | 16 | -------------------------------------------------------------------------------- /streamlit_webrtc/frontend/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "streamlit_webrtc", 3 | "version": "0.1.0", 4 | "private": true, 5 | "type": "module", 6 | "scripts": { 7 | "dev": "vite", 8 | "build": "tsc -b && vite build", 9 | "test": "vitest", 10 | "format": "run-p format:*", 11 | "format:eslint": "eslint --fix 'src/**/*.{ts,tsx}'", 12 | "format:prettier": "prettier --write .", 13 | "lint": "run-p lint:*", 14 | "lint:eslint": "eslint .", 15 | "lint:prettier": "prettier --check .", 16 | "ladle": "ladle serve" 17 | }, 18 | "dependencies": { 19 | "@emotion/react": "^11.14.0", 20 | "@emotion/styled": "^11.14.0", 21 | "@mui/icons-material": "^6.4.8", 22 | "@mui/material": "^6.4.8", 23 | "chroma-js": "^3.1.2", 24 | "react": "^18.3.1", 25 | "react-dom": "^18.3.1", 26 | "streamlit-component-lib-react-hooks": "^2.1.0", 27 | "webrtc-adapter": "^9.0.1" 28 | }, 29 | "devDependencies": { 30 | "@eslint/js": "^9.24.0", 31 | "@ladle/react": "^5.0.2", 32 | "@testing-library/react": "^16.3.0", 33 | "@testing-library/user-event": "^14.6.1", 34 | "@types/chroma-js": "^3.1.1", 35 | "@types/node": "^22.14.0", 36 | "@types/react": "^18.3.12", 37 | "@types/react-dom": "^18.3.0", 38 | "@vitejs/plugin-react": "^4.3.4", 39 | "eslint": "^9.24.0", 40 | "eslint-plugin-react": "^7.37.5", 41 | "eslint-plugin-react-hooks": "^5.2.0", 42 | "eslint-plugin-react-refresh": "^0.4.19", 43 | "globals": "^16.0.0", 44 | "jsdom": "^26.0.0", 45 | "npm-run-all2": "^7.0.2", 46 | "prettier": "^3.5.3", 47 | "typescript": "^5.8.3", 48 | "typescript-eslint": "^8.29.1", 49 | "vite": "^6.2.6", 50 | "vitest": "^3.1.1" 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /streamlit_webrtc/frontend/src/DeviceSelect/DeviceSelect.stories.tsx: -------------------------------------------------------------------------------- 1 | import type { Story } from "@ladle/react"; 2 | 3 | import DeviceSelect, { DeviceSelectProps } from "./DeviceSelect"; 4 | 5 | const Base: Story = (props: DeviceSelectProps) => ( 6 | 7 | ); 8 | Base.argTypes = { 9 | onSelect: { action: "selected" }, 10 | }; 11 | 12 | export const Both = Base.bind({}); 13 | Both.args = { 14 | video: true, 15 | audio: true, 16 | defaultVideoDeviceId: undefined, 17 | defaultAudioDeviceId: undefined, 18 | onSelect: () => {}, 19 | }; 20 | 21 | export const VideoOnly = Base.bind({}); 22 | VideoOnly.args = { 23 | video: true, 24 | audio: false, 25 | defaultVideoDeviceId: undefined, 26 | defaultAudioDeviceId: undefined, 27 | onSelect: () => {}, 28 | }; 29 | 30 | export const AudioOnly = Base.bind({}); 31 | AudioOnly.args = { 32 | video: false, 33 | audio: true, 34 | defaultVideoDeviceId: undefined, 35 | defaultAudioDeviceId: undefined, 36 | onSelect: () => {}, 37 | }; 38 | -------------------------------------------------------------------------------- /streamlit_webrtc/frontend/src/DeviceSelect/DeviceSelectForm.tsx: -------------------------------------------------------------------------------- 1 | import Box from "@mui/material/Box"; 2 | import Button from "@mui/material/Button"; 3 | import Stack from "@mui/material/Stack"; 4 | import DeviceSelect, { DeviceSelectProps } from "./DeviceSelect"; 5 | 6 | export interface DeviceSelectFormProps extends DeviceSelectProps { 7 | onClose: () => void; 8 | } 9 | function DeviceSelectForm({ 10 | onClose, 11 | ...deviceSelectProps 12 | }: DeviceSelectFormProps) { 13 | return ( 14 | 15 | 16 | 17 | 20 | 21 | 22 | ); 23 | } 24 | 25 | export default DeviceSelectForm; 26 | -------------------------------------------------------------------------------- /streamlit_webrtc/frontend/src/DeviceSelect/VideoPreview.tsx: -------------------------------------------------------------------------------- 1 | import React, { useEffect, useRef } from "react"; 2 | import VideoPreviewComponent from "./components/VideoPreview"; 3 | import { stopAllTracks } from "./utils"; 4 | 5 | export interface VideoPreviewProps { 6 | deviceId: MediaDeviceInfo["deviceId"]; 7 | } 8 | function VideoPreview(props: VideoPreviewProps) { 9 | const videoRef = useRef(null); 10 | 11 | useEffect(() => { 12 | if (props.deviceId == null) { 13 | return; 14 | } 15 | 16 | let stream: MediaStream | null = null; 17 | let unmounted = false; 18 | navigator.mediaDevices 19 | .getUserMedia({ video: { deviceId: props.deviceId }, audio: false }) 20 | .then((_stream) => { 21 | stream = _stream; 22 | 23 | if (unmounted) { 24 | stopAllTracks(stream); 25 | return; 26 | } 27 | 28 | if (videoRef.current) { 29 | videoRef.current.srcObject = stream; 30 | } 31 | }); 32 | 33 | return () => { 34 | unmounted = true; 35 | if (stream) { 36 | stopAllTracks(stream); 37 | } 38 | }; 39 | }, [props.deviceId]); 40 | 41 | return ; 42 | } 43 | 44 | export default React.memo(VideoPreview); 45 | -------------------------------------------------------------------------------- /streamlit_webrtc/frontend/src/DeviceSelect/components/Defer.stories.tsx: -------------------------------------------------------------------------------- 1 | import Paper from "@mui/material/Paper"; 2 | import type { Story } from "@ladle/react"; 3 | 4 | import Defer, { DeferProps } from "./Defer"; 5 | 6 | const InnerComponent = () => Lorem ipsum; 7 | 8 | export const Default: Story = (props: DeferProps) => ( 9 | 10 | ); 11 | Default.args = { 12 | time: 1000, 13 | children: , 14 | }; 15 | -------------------------------------------------------------------------------- /streamlit_webrtc/frontend/src/DeviceSelect/components/Defer.tsx: -------------------------------------------------------------------------------- 1 | import React, { useEffect, useState } from "react"; 2 | import Box from "@mui/material/Box"; 3 | import { styled } from "@mui/material/styles"; 4 | 5 | interface OverlayBoxProps { 6 | $transparent: boolean; 7 | } 8 | const OverlayBox = styled(Box, { 9 | shouldForwardProp: (prop) => prop !== "$transparent", // Prevent the custom prop to be passed to the inner HTML tag. 10 | })(({ theme, $transparent }) => ({ 11 | margin: 0, 12 | padding: 0, 13 | position: "relative", 14 | "&:before": { 15 | position: "absolute", 16 | content: '""', 17 | width: "100%", 18 | height: "100%", 19 | opacity: $transparent ? 0 : 1, 20 | backgroundColor: theme.palette.background.default, 21 | transition: "opacity 0.3s", 22 | }, 23 | })); 24 | 25 | export interface DeferProps { 26 | time: number; 27 | children: React.ReactElement; 28 | } 29 | function Defer(props: DeferProps) { 30 | const [elapsed, setElapsed] = useState(false); 31 | 32 | useEffect(() => { 33 | const timer = setTimeout(() => { 34 | setElapsed(true); 35 | }, props.time); 36 | 37 | return () => clearTimeout(timer); 38 | }, [props.time]); 39 | 40 | return {props.children}; 41 | } 42 | 43 | export default Defer; 44 | -------------------------------------------------------------------------------- /streamlit_webrtc/frontend/src/DeviceSelect/components/DeviceSelectContainer.tsx: -------------------------------------------------------------------------------- 1 | import { Streamlit } from "streamlit-component-lib"; 2 | import React, { useEffect } from "react"; 3 | import Stack from "@mui/material/Stack"; 4 | import { useTheme } from "@mui/material/styles"; 5 | import useMediaQuery from "@mui/material/useMediaQuery"; 6 | 7 | interface DeviceSelectContainerProps { 8 | children: React.ReactNode; 9 | } 10 | function DeviceSelectContainer(props: DeviceSelectContainerProps) { 11 | const theme = useTheme(); 12 | const isSmallViewport = useMediaQuery(theme.breakpoints.down("sm")); 13 | 14 | useEffect(() => { 15 | Streamlit.setFrameHeight(); 16 | }, [isSmallViewport]); 17 | 18 | return ( 19 | 20 | {props.children} 21 | 22 | ); 23 | } 24 | 25 | export default DeviceSelectContainer; 26 | -------------------------------------------------------------------------------- /streamlit_webrtc/frontend/src/DeviceSelect/components/VideoPreview.tsx: -------------------------------------------------------------------------------- 1 | import { styled } from "@mui/material/styles"; 2 | 3 | const StyledVideo = styled("video")({ 4 | maxWidth: "100%", 5 | maxHeight: "100%", 6 | }); 7 | 8 | const VideoPreview = StyledVideo; 9 | 10 | export default VideoPreview; 11 | -------------------------------------------------------------------------------- /streamlit_webrtc/frontend/src/DeviceSelect/components/VideoPreviewContainer.tsx: -------------------------------------------------------------------------------- 1 | import Box from "@mui/material/Box"; 2 | import { styled } from "@mui/material/styles"; 3 | 4 | const StyledBox = styled(Box)(({ theme }) => ({ 5 | position: "relative", 6 | [theme.breakpoints.down("sm")]: { 7 | width: "100%", 8 | }, 9 | width: theme.spacing(24), 10 | height: theme.spacing(16), 11 | maxHeight: theme.spacing(16), 12 | display: "flex", 13 | justifyContent: "center", 14 | alignItems: "center", 15 | })); 16 | 17 | const VideoPreviewContainer = StyledBox; 18 | 19 | export default VideoPreviewContainer; 20 | -------------------------------------------------------------------------------- /streamlit_webrtc/frontend/src/DeviceSelect/components/VoidVideoPreview.stories.tsx: -------------------------------------------------------------------------------- 1 | import type { Story } from "@ladle/react"; 2 | 3 | import VoidVideoPreview from "./VoidVideoPreview"; 4 | 5 | export const Default: Story = () => ; 6 | -------------------------------------------------------------------------------- /streamlit_webrtc/frontend/src/DeviceSelect/components/VoidVideoPreview.tsx: -------------------------------------------------------------------------------- 1 | import React from "react"; 2 | import Paper from "@mui/material/Paper"; 3 | import VideocamOffIcon from "@mui/icons-material/VideocamOff"; 4 | import { styled } from "@mui/material/styles"; 5 | 6 | const StyledPaper = styled(Paper)({ 7 | display: "flex", 8 | justifyContent: "center", 9 | alignItems: "center", 10 | width: "100%", 11 | height: "100%", 12 | }); 13 | 14 | function VoidVideoPreview() { 15 | return ( 16 | 17 | 18 | 19 | ); 20 | } 21 | 22 | export default React.memo(VoidVideoPreview); 23 | -------------------------------------------------------------------------------- /streamlit_webrtc/frontend/src/DeviceSelect/components/messages/AccessDeniedMessage.stories.tsx: -------------------------------------------------------------------------------- 1 | import type { Story } from "@ladle/react"; 2 | 3 | import AccessDeniedMessage, { 4 | AccessDeniedMessageProps, 5 | } from "./AccessDeniedMessage"; 6 | 7 | export const Default: Story = ( 8 | props: AccessDeniedMessageProps, 9 | ) => ; 10 | Default.args = { 11 | error: new Error("This is an error"), 12 | }; 13 | -------------------------------------------------------------------------------- /streamlit_webrtc/frontend/src/DeviceSelect/components/messages/AccessDeniedMessage.tsx: -------------------------------------------------------------------------------- 1 | import Message from "./Message"; 2 | import { useTranslation } from "../../../translation/useTranslation"; 3 | 4 | export interface AccessDeniedMessageProps { 5 | error: Error; 6 | } 7 | function AccessDeniedMessage(props: AccessDeniedMessageProps) { 8 | return ( 9 | 10 | {useTranslation("device_access_denied") || "Access denied"} ( 11 | {props.error.message}) 12 | 13 | ); 14 | } 15 | 16 | export default AccessDeniedMessage; 17 | -------------------------------------------------------------------------------- /streamlit_webrtc/frontend/src/DeviceSelect/components/messages/AskPermissionMessage.stories.tsx: -------------------------------------------------------------------------------- 1 | import type { Story } from "@ladle/react"; 2 | 3 | import AskPermissionMessage from "./AskPermissionMessage"; 4 | 5 | export const Default: Story = () => ; 6 | -------------------------------------------------------------------------------- /streamlit_webrtc/frontend/src/DeviceSelect/components/messages/AskPermissionMessage.tsx: -------------------------------------------------------------------------------- 1 | import Message from "./Message"; 2 | import { useTranslation } from "../../../translation/useTranslation"; 3 | 4 | function AskPermissionMessage() { 5 | return ( 6 | 7 | {useTranslation("device_ask_permission") || 8 | "Please allow the app to use your media devices"} 9 | 10 | ); 11 | } 12 | 13 | export default AskPermissionMessage; 14 | -------------------------------------------------------------------------------- /streamlit_webrtc/frontend/src/DeviceSelect/components/messages/DeviceNotAvailableMessage.stories.tsx: -------------------------------------------------------------------------------- 1 | import type { Story } from "@ladle/react"; 2 | 3 | import DeviceNotAvailableMessage, { 4 | DeviceNotAvailableMessageProps, 5 | } from "./DeviceNotAvailableMessage"; 6 | 7 | export const Default: Story = ( 8 | props: DeviceNotAvailableMessageProps, 9 | ) => ; 10 | Default.args = { 11 | error: new Error("This is an error"), 12 | }; 13 | -------------------------------------------------------------------------------- /streamlit_webrtc/frontend/src/DeviceSelect/components/messages/DeviceNotAvailableMessage.tsx: -------------------------------------------------------------------------------- 1 | import Message from "./Message"; 2 | import { useTranslation } from "../../../translation/useTranslation"; 3 | 4 | export interface DeviceNotAvailableMessageProps { 5 | error: Error; 6 | } 7 | function DeviceNotAvailableMessage(props: DeviceNotAvailableMessageProps) { 8 | return ( 9 | 10 | {useTranslation("device_not_available") || "Device not available"} ( 11 | {props.error.message}) 12 | 13 | ); 14 | } 15 | 16 | export default DeviceNotAvailableMessage; 17 | -------------------------------------------------------------------------------- /streamlit_webrtc/frontend/src/DeviceSelect/components/messages/MediaApiNotAvailableMessage.stories.tsx: -------------------------------------------------------------------------------- 1 | import type { Story } from "@ladle/react"; 2 | 3 | import MediaApiNotAvailableMessage from "./MediaApiNotAvailableMessage"; 4 | 5 | export const Default: Story = () => ; 6 | -------------------------------------------------------------------------------- /streamlit_webrtc/frontend/src/DeviceSelect/components/messages/MediaApiNotAvailableMessage.tsx: -------------------------------------------------------------------------------- 1 | import Message from "./Message"; 2 | import { useTranslation } from "../../../translation/useTranslation"; 3 | 4 | function MediaApiNotAvailableMessage() { 5 | return ( 6 | 7 | {useTranslation("media_api_not_available") || "Media API not available"} 8 | 9 | ); 10 | } 11 | 12 | export default MediaApiNotAvailableMessage; 13 | -------------------------------------------------------------------------------- /streamlit_webrtc/frontend/src/DeviceSelect/components/messages/Message.stories.tsx: -------------------------------------------------------------------------------- 1 | import type { Story } from "@ladle/react"; 2 | 3 | import Message from "./Message"; 4 | 5 | export const Default: Story = () => Lorem ipsum; 6 | Default.args = { 7 | children: "Lorem ipsum", 8 | }; 9 | -------------------------------------------------------------------------------- /streamlit_webrtc/frontend/src/DeviceSelect/components/messages/Message.tsx: -------------------------------------------------------------------------------- 1 | import Paper from "@mui/material/Paper"; 2 | import { styled } from "@mui/material/styles"; 3 | 4 | const StyledPaper = styled(Paper)(({ theme }) => ({ 5 | display: "flex", 6 | justifyContent: "center", 7 | alignItems: "center", 8 | width: "100%", 9 | height: "100%", 10 | padding: theme.spacing(2), 11 | boxSizing: "border-box", 12 | })); 13 | 14 | const Message = StyledPaper; 15 | 16 | export default Message; 17 | -------------------------------------------------------------------------------- /streamlit_webrtc/frontend/src/DeviceSelect/utils.ts: -------------------------------------------------------------------------------- 1 | export function stopAllTracks(stream: MediaStream) { 2 | stream.getVideoTracks().forEach((track) => track.stop()); 3 | stream.getAudioTracks().forEach((track) => track.stop()); 4 | } 5 | -------------------------------------------------------------------------------- /streamlit_webrtc/frontend/src/InfoHeader.stories.tsx: -------------------------------------------------------------------------------- 1 | import type { Story } from "@ladle/react"; 2 | 3 | import InfoHeader, { InfoHeaderProps } from "./InfoHeader"; 4 | 5 | const Base: Story = (props: InfoHeaderProps) => ( 6 | 7 | ); 8 | 9 | export const ErrorCase = Base.bind({}); 10 | ErrorCase.args = { 11 | error: new Error("Some error"), 12 | shouldShowTakingTooLongWarning: false, 13 | }; 14 | 15 | export const TakingTooLongWarningCase = Base.bind({}); 16 | TakingTooLongWarningCase.args = { 17 | error: null, 18 | shouldShowTakingTooLongWarning: true, 19 | }; 20 | -------------------------------------------------------------------------------- /streamlit_webrtc/frontend/src/InfoHeader.tsx: -------------------------------------------------------------------------------- 1 | import React, { useEffect } from "react"; 2 | import Alert from "@mui/material/Alert"; 3 | import Fade from "@mui/material/Fade"; 4 | import { Streamlit } from "streamlit-component-lib"; 5 | 6 | export interface InfoHeaderProps { 7 | error: Error | undefined | null; 8 | shouldShowTakingTooLongWarning: boolean; 9 | } 10 | function InfoHeader(props: InfoHeaderProps) { 11 | useEffect(() => { 12 | Streamlit.setFrameHeight(); 13 | }); 14 | 15 | return ( 16 | <> 17 | {props.error ? ( 18 | 19 | {props.error.name}: {props.error.message} 20 | 21 | ) : ( 22 | props.shouldShowTakingTooLongWarning && ( 23 | 24 | 25 | Connection is taking longer than expected. Check your network or 26 | ask the developer for STUN/TURN settings if the problem persists. 27 | 28 | 29 | ) 30 | )} 31 | 32 | ); 33 | } 34 | 35 | export default React.memo(InfoHeader); 36 | -------------------------------------------------------------------------------- /streamlit_webrtc/frontend/src/MediaStreamPlayer.tsx: -------------------------------------------------------------------------------- 1 | import { Streamlit } from "streamlit-component-lib"; 2 | import React, { 3 | useEffect, 4 | useCallback, 5 | VideoHTMLAttributes, 6 | AudioHTMLAttributes, 7 | HTMLAttributes, 8 | } from "react"; 9 | 10 | type UserDefinedHTMLVideoAttributes = Partial< 11 | Omit< 12 | VideoHTMLAttributes, 13 | keyof Omit, "hidden" | "style"> | "src" 14 | > 15 | >; 16 | type UserDefinedHTMLAudioAttributes = Partial< 17 | Omit< 18 | AudioHTMLAttributes, 19 | keyof Omit, "hidden" | "style"> | "src" 20 | > 21 | >; 22 | 23 | interface MediaStreamPlayerProps { 24 | stream: MediaStream; 25 | userDefinedVideoAttrs: UserDefinedHTMLVideoAttributes | undefined; 26 | userDefinedAudioAttrs: UserDefinedHTMLAudioAttributes | undefined; 27 | } 28 | function MediaStreamPlayer(props: MediaStreamPlayerProps) { 29 | useEffect(() => { 30 | Streamlit.setFrameHeight(); 31 | }); 32 | 33 | const hasVideo = props.stream.getVideoTracks().length > 0; 34 | 35 | const refCallback = useCallback( 36 | (node: HTMLVideoElement | HTMLAudioElement | null) => { 37 | if (node) { 38 | node.srcObject = props.stream; 39 | } 40 | }, 41 | [props.stream], 42 | ); 43 | 44 | const refreshFrameHeight = useCallback(() => Streamlit.setFrameHeight(), []); 45 | 46 | if (hasVideo) { 47 | // NOTE: Enumerate all allowed props instead of simply using spread operator 48 | // passing all the fields in props.userDefinedVideoAttrs 49 | // in order to block unexpected fields especially like dangerouslySetInnerHTML. 50 | const videoProps: VideoHTMLAttributes = { 51 | hidden: props.userDefinedVideoAttrs?.hidden, 52 | style: props.userDefinedVideoAttrs?.style, 53 | autoPlay: props.userDefinedVideoAttrs?.autoPlay, 54 | controls: props.userDefinedVideoAttrs?.controls, 55 | controlsList: props.userDefinedVideoAttrs?.controlsList, 56 | crossOrigin: props.userDefinedVideoAttrs?.crossOrigin, 57 | loop: props.userDefinedVideoAttrs?.loop, 58 | mediaGroup: props.userDefinedVideoAttrs?.mediaGroup, 59 | muted: props.userDefinedVideoAttrs?.muted, 60 | playsInline: props.userDefinedVideoAttrs?.playsInline, 61 | preload: props.userDefinedVideoAttrs?.preload, 62 | height: props.userDefinedVideoAttrs?.height, 63 | poster: props.userDefinedVideoAttrs?.poster, 64 | width: props.userDefinedVideoAttrs?.width, 65 | disablePictureInPicture: 66 | props.userDefinedVideoAttrs?.disablePictureInPicture, 67 | disableRemotePlayback: props.userDefinedVideoAttrs?.disableRemotePlayback, 68 | }; 69 | 70 | return ( 71 |