├── .github └── workflows │ ├── docs.yml │ ├── tests-frontend.yml │ └── tests.yml ├── .gitignore ├── CNAME ├── LICENSE ├── README.md ├── backend └── fastrtc │ ├── __init__.py │ ├── credentials.py │ ├── pause_detection │ ├── __init__.py │ ├── protocol.py │ └── silero.py │ ├── py.typed │ ├── reply_on_pause.py │ ├── reply_on_stopwords.py │ ├── speech_to_text │ ├── __init__.py │ ├── stt_.py │ └── test_file.wav │ ├── stream.py │ ├── templates │ ├── component │ │ ├── _basePickBy-BWyW5hVA.js │ │ ├── _baseUniq-BfI_PfuI.js │ │ ├── arc-BHDr2DIN.js │ │ ├── architectureDiagram-IEHRJDOE-SNjtt7L7.js │ │ ├── assets │ │ │ └── worker-lPYB70QI.js │ │ ├── blockDiagram-JOT3LUYC-DoVxwmWs.js │ │ ├── c4Diagram-VJAJSXHY-BFLwyIU9.js │ │ ├── channel-BLI8LD7T.js │ │ ├── chunk-4BMEZGHF-4N88GRXN.js │ │ ├── chunk-A2AXSNBT-C4qvwI5K.js │ │ ├── chunk-AEK57VVT-OrsXQu-U.js │ │ ├── chunk-D6G4REZN-BSQJOIKu.js │ │ ├── chunk-RZ5BOZE2-BQm8zocb.js │ │ ├── chunk-XZIHB7SX-DIHERCaT.js │ │ ├── classDiagram-GIVACNV2-KrkkVrlR.js │ │ ├── classDiagram-v2-COTLJTTW-KrkkVrlR.js │ │ ├── clone-D_f12Uao.js │ │ ├── cytoscape.esm-C2cgT2B2.js │ │ ├── dagre-OKDRZEBW-BpZfNC14.js │ │ ├── diagram-SSKATNLV-OTX44Aig.js │ │ ├── diagram-VNBRO52H-CxKrAEhh.js │ │ ├── erDiagram-Q7BY3M3F-CO2pasYc.js │ │ ├── flowDiagram-4HSFHLVR-DiHdHcaJ.js │ │ ├── ganttDiagram-APWFNJXF-BzPYWX9W.js │ │ ├── gitGraphDiagram-7IBYFJ6S-DFMHUBmV.js │ │ ├── graph-as_7zmXK.js │ │ ├── index-xxHpJ_RR.js │ │ ├── index.js │ │ ├── infoDiagram-PH2N3AL5-fhMlkv6w.js │ │ ├── init-DjUOC4st.js │ │ ├── journeyDiagram-U35MCT3I-BI3B5NA4.js │ │ ├── kanban-definition-NDS4AKOZ-BdftdmWH.js │ │ ├── layout-BG95tefZ.js │ │ ├── linear-CRa8eD4r.js │ │ ├── mermaid.core-C0Blj36u.js │ │ ├── mindmap-definition-ALO5MXBD-BO2Uu9ee.js │ │ ├── ordinal-DfAQgscy.js │ │ ├── pieDiagram-IB7DONF6-DDe9KgBF.js │ │ ├── quadrantDiagram-7GDLP6J5-BGl9qPho.js │ │ ├── radar-MK3ICKWK-Uwn-jZp4.js │ │ ├── requirementDiagram-KVF5MWMF-BvKrRVax.js │ │ ├── sankeyDiagram-QLVOVGJD-B_m0WTk6.js │ │ ├── sequenceDiagram-X6HHIX6F-Bfni-YW_.js │ │ ├── stateDiagram-DGXRK772-CpehDlzW.js │ │ ├── stateDiagram-v2-YXO3MK2T-CFM2lJF8.js │ │ ├── style.css │ │ ├── timeline-definition-BDJGKUSR-C6DrPqLg.js │ │ └── xychartDiagram-VJFVF3MP-BKtEAN5R.js │ └── example │ │ ├── assets │ │ └── worker-lPYB70QI.js │ │ ├── index.js │ │ └── style.css │ ├── text_to_speech │ ├── __init__.py │ ├── test_tts.py │ └── tts.py │ ├── tracks.py │ ├── utils.py │ ├── webrtc.py │ ├── webrtc_connection_mixin.py │ └── websocket.py ├── demo ├── __init__.py ├── echo_audio │ ├── README.md │ ├── app.py │ └── requirements.txt ├── gemini_audio_video │ ├── README.md │ ├── app.py │ └── requirements.txt ├── gemini_conversation │ ├── README.md │ └── app.py ├── hello_computer │ ├── README.md │ ├── README_gradio.md │ ├── app.py │ ├── index.html │ └── requirements.txt ├── llama_code_editor │ ├── README.md │ ├── app.py │ ├── assets │ │ ├── sandbox.html │ │ └── spinner.html │ ├── handler.py │ ├── requirements.in │ ├── requirements.txt │ └── ui.py ├── llm_voice_chat │ ├── README.md │ ├── README_gradio.md │ ├── app.py │ └── requirements.txt ├── moonshine_live │ ├── README.md │ ├── app.py │ ├── default-favicon.ico │ └── requirements.txt ├── nextjs_voice_chat │ ├── README.md │ ├── backend │ │ ├── env.py │ │ └── server.py │ ├── frontend │ │ └── fastrtc-demo │ │ │ ├── .gitignore │ │ │ ├── README.md │ │ │ ├── app │ │ │ ├── favicon.ico │ │ │ ├── globals.css │ │ │ ├── layout.tsx │ │ │ └── page.tsx │ │ │ ├── components.json │ │ │ ├── components │ │ │ ├── background-circle-provider.tsx │ │ │ ├── theme-provider.tsx │ │ │ └── ui │ │ │ │ ├── ai-voice-input.tsx │ │ │ │ ├── background-circles.tsx │ │ │ │ ├── reset-chat.tsx │ │ │ │ ├── theme-toggle.tsx │ │ │ │ └── theme-transition.tsx │ │ │ ├── eslint.config.mjs │ │ │ ├── lib │ │ │ ├── utils.ts │ │ │ └── webrtc-client.ts │ │ │ ├── next.config.ts │ │ │ ├── package.json │ │ │ ├── postcss.config.mjs │ │ │ ├── public │ │ │ ├── file.svg │ │ │ ├── globe.svg │ │ │ ├── next.svg │ │ │ ├── vercel.svg │ │ │ └── window.svg │ │ │ └── tsconfig.json │ ├── requirements.txt │ └── run.sh ├── object_detection │ ├── README.md │ ├── app.py │ ├── index.html │ ├── inference.py │ ├── requirements.txt │ └── utils.py ├── patient_intake │ └── app.py ├── phonic_chat │ ├── README.md │ ├── app.py │ └── requirements.txt ├── qwen_phone_chat │ ├── README.md │ ├── app.py │ └── requirements.txt ├── send_text_or_audio │ ├── app.py │ └── index.html ├── talk_to_azure_openai │ ├── README.md │ ├── README_gradio.md │ ├── app.py │ ├── index.html │ └── requirements.txt ├── talk_to_claude │ ├── README.md │ ├── app.py │ ├── index.html │ └── requirements.txt ├── talk_to_gemini │ ├── README.md │ ├── README_gradio.md │ ├── app.py │ ├── index.html │ └── requirements.txt ├── talk_to_llama4 │ ├── AV_Huggy.png │ ├── README.md │ ├── app.py │ ├── index.html │ └── requirements.txt ├── talk_to_openai │ ├── README.md │ ├── README_gradio.md │ ├── app.py │ ├── index.html │ └── requirements.txt ├── talk_to_sambanova │ ├── README.md │ ├── README_gradio.md │ ├── app.py │ ├── index.html │ └── requirements.txt ├── talk_to_smolagents │ ├── README.md │ ├── app.py │ └── requirements.txt ├── text_mode │ └── app.py ├── voice_text_editor │ ├── README.md │ └── app.py ├── voice_text_editor_local │ └── app.py ├── webrtc_vs_websocket │ ├── README.md │ ├── app.py │ ├── index.html │ └── requirements.txt └── whisper_realtime │ ├── README.md │ ├── README_gradio.md │ ├── app.py │ ├── index.html │ └── requirements.txt ├── docs ├── CNAME ├── Discord-Symbol-White.svg ├── advanced-configuration.md ├── cookbook.md ├── deployment.md ├── faq.md ├── fastrtc_logo.png ├── fastrtc_logo_small.png ├── gradio-logo-with-title.svg ├── gradio-logo.svg ├── hf-logo-with-title.svg ├── hf-logo.svg ├── index.md ├── reference │ ├── credentials.md │ ├── reply_on_pause.md │ ├── stream.md │ ├── stream_handlers.md │ └── utils.md ├── speech_to_text_gallery.md ├── stylesheets │ └── extra.css ├── text_to_speech_gallery.md ├── turn_taking_gallery.md ├── userguide │ ├── api.md │ ├── audio-video.md │ ├── audio.md │ ├── gradio.md │ ├── streams.md │ ├── video.md │ ├── webrtc_docs.md │ └── websocket_docs.md └── utils.md ├── frontend ├── .prettierrc ├── Example.svelte ├── Index.svelte ├── gradio.config.js ├── index.ts ├── package-lock.json ├── package.json └── shared │ ├── AudioWave.svelte │ ├── InteractiveAudio.svelte │ ├── InteractiveVideo.svelte │ ├── MicrophoneMuted.svelte │ ├── PulsingIcon.svelte │ ├── StaticAudio.svelte │ ├── StaticVideo.svelte │ ├── TextboxWithMic.svelte │ ├── Webcam.svelte │ ├── WebcamPermissions.svelte │ ├── index.ts │ ├── stream_utils.ts │ ├── utils.ts │ └── webrtc_utils.ts ├── justfile ├── mkdocs.yml ├── overrides └── partials │ └── header.html ├── pyproject.toml ├── test ├── __init__.py ├── test_tts.py ├── test_utils.py └── test_webrtc_connection_mixin.py └── upload_space.py /.github/workflows/docs.yml: -------------------------------------------------------------------------------- 1 | name: docs 2 | on: 3 | push: 4 | branches: 5 | - main 6 | pull_request: 7 | branches: 8 | - main 9 | 10 | permissions: 11 | contents: write 12 | pull-requests: write 13 | deployments: write 14 | pages: write 15 | 16 | jobs: 17 | deploy: 18 | runs-on: ubuntu-latest 19 | if: github.event_name == 'push' || (github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork == false) 20 | steps: 21 | - uses: actions/checkout@v4 22 | - name: Configure Git Credentials 23 | run: | 24 | git config user.name github-actions[bot] 25 | git config user.email 41898282+github-actions[bot]@users.noreply.github.com 26 | - uses: actions/setup-python@v5 27 | with: 28 | python-version: 3.x 29 | - run: echo "cache_id=$(date --utc '+%V')" >> $GITHUB_ENV 30 | - uses: actions/cache@v4 31 | with: 32 | key: mkdocs-material-${{ env.cache_id }} 33 | path: .cache 34 | restore-keys: | 35 | mkdocs-material- 36 | - run: pip install mkdocs-material mkdocs-llmstxt==0.1.0 37 | - name: Build docs 38 | run: mkdocs build 39 | 40 | - name: Deploy to GH Pages (main) 41 | if: github.event_name == 'push' 42 | run: mkdocs gh-deploy --force 43 | 44 | - name: Deploy PR Preview 45 | if: github.event_name == 'pull_request' 46 | uses: rossjrw/pr-preview-action@v1 47 | with: 48 | source-dir: ./site 49 | preview-branch: gh-pages 50 | umbrella-dir: pr-preview 51 | action: auto -------------------------------------------------------------------------------- /.github/workflows/tests-frontend.yml: -------------------------------------------------------------------------------- 1 | name: tests 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | prettier: 7 | runs-on: ubuntu-latest 8 | steps: 9 | - uses: actions/checkout@v4 10 | - uses: actions/setup-node@v4 11 | with: 12 | node-version: 18 13 | - name: Run prettier 14 | run: | 15 | cd frontend 16 | npm install 17 | npx prettier --check . -------------------------------------------------------------------------------- /.github/workflows/tests.yml: -------------------------------------------------------------------------------- 1 | name: tests 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | lint: 7 | runs-on: ubuntu-latest 8 | steps: 9 | - uses: actions/checkout@v4 10 | - uses: actions/setup-python@v5 11 | with: 12 | python-version: '3.10' 13 | - name: Run linters 14 | run: | 15 | pip install ruff pyright 16 | pip install -e .[dev] 17 | ruff check . 18 | ruff format --check --diff . 19 | pyright 20 | test: 21 | runs-on: ${{ matrix.os }} 22 | strategy: 23 | fail-fast: false 24 | matrix: 25 | os: [ubuntu-latest] 26 | python: 27 | - '3.10' 28 | - '3.13' 29 | steps: 30 | - uses: actions/checkout@v4 31 | - uses: actions/setup-python@v5 32 | with: 33 | python-version: ${{ matrix.python }} 34 | - name: Run tests 35 | run: | 36 | python -m pip install -U pip 37 | pip install '.[dev, tts]' 38 | python -m pytest --capture=no 39 | shell: bash 40 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .eggs/ 2 | dist/ 3 | *.pyc 4 | __pycache__/ 5 | *.py[cod] 6 | *$py.class 7 | __tmp/* 8 | *.pyi 9 | .mypycache 10 | .ruff_cache 11 | node_modules 12 | demo/MobileNetSSD_deploy.caffemodel 13 | demo/MobileNetSSD_deploy.prototxt.txt 14 | demo/scratch 15 | .gradio 16 | .vscode 17 | .DS_Store 18 | .venv* 19 | .env 20 | -------------------------------------------------------------------------------- /CNAME: -------------------------------------------------------------------------------- 1 | fastrtc.org -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Freddy Boulton 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /backend/fastrtc/__init__.py: -------------------------------------------------------------------------------- 1 | from .credentials import ( 2 | get_cloudflare_turn_credentials, 3 | get_cloudflare_turn_credentials_async, 4 | get_hf_turn_credentials, 5 | get_hf_turn_credentials_async, 6 | get_turn_credentials, 7 | get_turn_credentials_async, 8 | get_twilio_turn_credentials, 9 | ) 10 | from .pause_detection import ( 11 | ModelOptions, 12 | PauseDetectionModel, 13 | SileroVadOptions, 14 | get_silero_model, 15 | ) 16 | from .reply_on_pause import AlgoOptions, ReplyOnPause 17 | from .reply_on_stopwords import ReplyOnStopWords 18 | from .speech_to_text import MoonshineSTT, get_stt_model 19 | from .stream import Stream, UIArgs 20 | from .text_to_speech import ( 21 | CartesiaTTSOptions, 22 | KokoroTTSOptions, 23 | get_tts_model, 24 | ) 25 | from .tracks import ( 26 | AsyncAudioVideoStreamHandler, 27 | AsyncStreamHandler, 28 | AudioEmitType, 29 | AudioVideoStreamHandler, 30 | StreamHandler, 31 | VideoEmitType, 32 | VideoStreamHandler, 33 | ) 34 | from .utils import ( 35 | AdditionalOutputs, 36 | CloseStream, 37 | Warning, 38 | WebRTCData, 39 | WebRTCError, 40 | aggregate_bytes_to_16bit, 41 | async_aggregate_bytes_to_16bit, 42 | audio_to_bytes, 43 | audio_to_file, 44 | audio_to_float32, 45 | audio_to_int16, 46 | get_current_context, 47 | wait_for_item, 48 | ) 49 | from .webrtc import ( 50 | WebRTC, 51 | ) 52 | 53 | __all__ = [ 54 | "AsyncStreamHandler", 55 | "AudioVideoStreamHandler", 56 | "AudioEmitType", 57 | "AsyncAudioVideoStreamHandler", 58 | "AlgoOptions", 59 | "AdditionalOutputs", 60 | "aggregate_bytes_to_16bit", 61 | "async_aggregate_bytes_to_16bit", 62 | "audio_to_bytes", 63 | "audio_to_file", 64 | "audio_to_float32", 65 | "audio_to_int16", 66 | "get_hf_turn_credentials", 67 | "get_twilio_turn_credentials", 68 | "get_turn_credentials", 69 | "ReplyOnPause", 70 | "ReplyOnStopWords", 71 | "SileroVadOptions", 72 | "get_stt_model", 73 | "MoonshineSTT", 74 | "StreamHandler", 75 | "Stream", 76 | "VideoEmitType", 77 | "WebRTC", 78 | "WebRTCError", 79 | "Warning", 80 | "get_tts_model", 81 | "KokoroTTSOptions", 82 | "get_cloudflare_turn_credentials_async", 83 | "get_hf_turn_credentials_async", 84 | "get_turn_credentials_async", 85 | "get_cloudflare_turn_credentials", 86 | "wait_for_item", 87 | "UIArgs", 88 | "ModelOptions", 89 | "PauseDetectionModel", 90 | "get_silero_model", 91 | "SileroVadOptions", 92 | "VideoStreamHandler", 93 | "CloseStream", 94 | "get_current_context", 95 | "CartesiaTTSOptions", 96 | "WebRTCData", 97 | ] 98 | -------------------------------------------------------------------------------- /backend/fastrtc/pause_detection/__init__.py: -------------------------------------------------------------------------------- 1 | from .protocol import ModelOptions, PauseDetectionModel 2 | from .silero import SileroVADModel, SileroVadOptions, get_silero_model 3 | 4 | __all__ = [ 5 | "SileroVADModel", 6 | "SileroVadOptions", 7 | "PauseDetectionModel", 8 | "ModelOptions", 9 | "get_silero_model", 10 | ] 11 | -------------------------------------------------------------------------------- /backend/fastrtc/pause_detection/protocol.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Protocol, TypeAlias 2 | 3 | import numpy as np 4 | from numpy.typing import NDArray 5 | 6 | from ..utils import AudioChunk 7 | 8 | ModelOptions: TypeAlias = Any 9 | 10 | 11 | class PauseDetectionModel(Protocol): 12 | def vad( 13 | self, 14 | audio: tuple[int, NDArray[np.int16] | NDArray[np.float32]], 15 | options: ModelOptions, 16 | ) -> tuple[float, list[AudioChunk]]: ... 17 | 18 | def warmup( 19 | self, 20 | ) -> None: ... 21 | -------------------------------------------------------------------------------- /backend/fastrtc/py.typed: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /backend/fastrtc/speech_to_text/__init__.py: -------------------------------------------------------------------------------- 1 | from .stt_ import MoonshineSTT, get_stt_model, stt_for_chunks 2 | 3 | __all__ = ["get_stt_model", "MoonshineSTT", "get_stt_model", "stt_for_chunks"] 4 | -------------------------------------------------------------------------------- /backend/fastrtc/speech_to_text/stt_.py: -------------------------------------------------------------------------------- 1 | from functools import lru_cache 2 | from pathlib import Path 3 | from typing import Literal, Protocol 4 | 5 | import click 6 | import librosa 7 | import numpy as np 8 | from numpy.typing import NDArray 9 | 10 | from ..utils import AudioChunk, audio_to_float32 11 | 12 | curr_dir = Path(__file__).parent 13 | 14 | 15 | class STTModel(Protocol): 16 | def stt(self, audio: tuple[int, NDArray[np.int16 | np.float32]]) -> str: ... 17 | 18 | 19 | class MoonshineSTT(STTModel): 20 | def __init__( 21 | self, model: Literal["moonshine/base", "moonshine/tiny"] = "moonshine/base" 22 | ): 23 | try: 24 | from moonshine_onnx import MoonshineOnnxModel, load_tokenizer 25 | except (ImportError, ModuleNotFoundError): 26 | raise ImportError( 27 | "Install fastrtc[stt] for speech-to-text and stopword detection support." 28 | ) 29 | 30 | self.model = MoonshineOnnxModel(model_name=model) 31 | self.tokenizer = load_tokenizer() 32 | 33 | def stt(self, audio: tuple[int, NDArray[np.int16 | np.float32]]) -> str: 34 | sr, audio_np = audio # type: ignore 35 | audio_np = audio_to_float32(audio_np) 36 | if sr != 16000: 37 | audio_np: NDArray[np.float32] = librosa.resample( 38 | audio_np, orig_sr=sr, target_sr=16000 39 | ) 40 | if audio_np.ndim == 1: 41 | audio_np = audio_np.reshape(1, -1) 42 | tokens = self.model.generate(audio_np) 43 | return self.tokenizer.decode_batch(tokens)[0] 44 | 45 | 46 | @lru_cache 47 | def get_stt_model( 48 | model: Literal["moonshine/base", "moonshine/tiny"] = "moonshine/base", 49 | ) -> STTModel: 50 | import os 51 | 52 | os.environ["TOKENIZERS_PARALLELISM"] = "false" 53 | m = MoonshineSTT(model) 54 | from moonshine_onnx import load_audio 55 | 56 | audio = load_audio(str(curr_dir / "test_file.wav")) 57 | print(click.style("INFO", fg="green") + ":\t Warming up STT model.") 58 | 59 | m.stt((16000, audio)) 60 | print(click.style("INFO", fg="green") + ":\t STT model warmed up.") 61 | return m 62 | 63 | 64 | def stt_for_chunks( 65 | stt_model: STTModel, 66 | audio: tuple[int, NDArray[np.int16 | np.float32]], 67 | chunks: list[AudioChunk], 68 | ) -> str: 69 | sr, audio_np = audio 70 | return " ".join( 71 | [ 72 | stt_model.stt((sr, audio_np[chunk["start"] : chunk["end"]])) 73 | for chunk in chunks 74 | ] 75 | ) 76 | -------------------------------------------------------------------------------- /backend/fastrtc/speech_to_text/test_file.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gradio-app/fastrtc/c97b1885c059bb9446f80a542ee589676021eae9/backend/fastrtc/speech_to_text/test_file.wav -------------------------------------------------------------------------------- /backend/fastrtc/templates/component/_basePickBy-BWyW5hVA.js: -------------------------------------------------------------------------------- 1 | import { e as x, c as b, g as m, k as P, h as p, j as w, l as N, m as c, n as I, t as A, o as M } from "./_baseUniq-BfI_PfuI.js"; 2 | import { aJ as g, az as E, aK as F, aL as _, aM as $, aN as l, aO as B, aP as T, aQ as y, aR as L } from "./mermaid.core-C0Blj36u.js"; 3 | var R = /\s/; 4 | function S(n) { 5 | for (var r = n.length; r-- && R.test(n.charAt(r)); ) 6 | ; 7 | return r; 8 | } 9 | var z = /^\s+/; 10 | function G(n) { 11 | return n && n.slice(0, S(n) + 1).replace(z, ""); 12 | } 13 | var o = NaN, H = /^[-+]0x[0-9a-f]+$/i, K = /^0b[01]+$/i, q = /^0o[0-7]+$/i, C = parseInt; 14 | function J(n) { 15 | if (typeof n == "number") 16 | return n; 17 | if (x(n)) 18 | return o; 19 | if (g(n)) { 20 | var r = typeof n.valueOf == "function" ? n.valueOf() : n; 21 | n = g(r) ? r + "" : r; 22 | } 23 | if (typeof n != "string") 24 | return n === 0 ? n : +n; 25 | n = G(n); 26 | var t = K.test(n); 27 | return t || q.test(n) ? C(n.slice(2), t ? 2 : 8) : H.test(n) ? o : +n; 28 | } 29 | var v = 1 / 0, Q = 17976931348623157e292; 30 | function W(n) { 31 | if (!n) 32 | return n === 0 ? n : 0; 33 | if (n = J(n), n === v || n === -v) { 34 | var r = n < 0 ? -1 : 1; 35 | return r * Q; 36 | } 37 | return n === n ? n : 0; 38 | } 39 | function X(n) { 40 | var r = W(n), t = r % 1; 41 | return r === r ? t ? r - t : r : 0; 42 | } 43 | function fn(n) { 44 | var r = n == null ? 0 : n.length; 45 | return r ? b(n) : []; 46 | } 47 | var O = Object.prototype, Y = O.hasOwnProperty, dn = E(function(n, r) { 48 | n = Object(n); 49 | var t = -1, i = r.length, a = i > 2 ? r[2] : void 0; 50 | for (a && F(r[0], r[1], a) && (i = 1); ++t < i; ) 51 | for (var f = r[t], e = _(f), s = -1, d = e.length; ++s < d; ) { 52 | var u = e[s], h = n[u]; 53 | (h === void 0 || $(h, O[u]) && !Y.call(n, u)) && (n[u] = f[u]); 54 | } 55 | return n; 56 | }); 57 | function un(n) { 58 | var r = n == null ? 0 : n.length; 59 | return r ? n[r - 1] : void 0; 60 | } 61 | function D(n) { 62 | return function(r, t, i) { 63 | var a = Object(r); 64 | if (!l(r)) { 65 | var f = m(t); 66 | r = P(r), t = function(s) { 67 | return f(a[s], s, a); 68 | }; 69 | } 70 | var e = n(r, t, i); 71 | return e > -1 ? a[f ? r[e] : e] : void 0; 72 | }; 73 | } 74 | var U = Math.max; 75 | function Z(n, r, t) { 76 | var i = n == null ? 0 : n.length; 77 | if (!i) 78 | return -1; 79 | var a = t == null ? 0 : X(t); 80 | return a < 0 && (a = U(i + a, 0)), p(n, m(r), a); 81 | } 82 | var hn = D(Z); 83 | function V(n, r) { 84 | var t = -1, i = l(n) ? Array(n.length) : []; 85 | return w(n, function(a, f, e) { 86 | i[++t] = r(a, f, e); 87 | }), i; 88 | } 89 | function gn(n, r) { 90 | var t = B(n) ? N : V; 91 | return t(n, m(r)); 92 | } 93 | var j = Object.prototype, k = j.hasOwnProperty; 94 | function nn(n, r) { 95 | return n != null && k.call(n, r); 96 | } 97 | function mn(n, r) { 98 | return n != null && c(n, r, nn); 99 | } 100 | function rn(n, r) { 101 | return n < r; 102 | } 103 | function tn(n, r, t) { 104 | for (var i = -1, a = n.length; ++i < a; ) { 105 | var f = n[i], e = r(f); 106 | if (e != null && (s === void 0 ? e === e && !x(e) : t(e, s))) 107 | var s = e, d = f; 108 | } 109 | return d; 110 | } 111 | function on(n) { 112 | return n && n.length ? tn(n, T, rn) : void 0; 113 | } 114 | function an(n, r, t, i) { 115 | if (!g(n)) 116 | return n; 117 | r = I(r, n); 118 | for (var a = -1, f = r.length, e = f - 1, s = n; s != null && ++a < f; ) { 119 | var d = A(r[a]), u = t; 120 | if (d === "__proto__" || d === "constructor" || d === "prototype") 121 | return n; 122 | if (a != e) { 123 | var h = s[d]; 124 | u = void 0, u === void 0 && (u = g(h) ? h : y(r[a + 1]) ? [] : {}); 125 | } 126 | L(s, d, u), s = s[d]; 127 | } 128 | return n; 129 | } 130 | function vn(n, r, t) { 131 | for (var i = -1, a = r.length, f = {}; ++i < a; ) { 132 | var e = r[i], s = M(n, e); 133 | t(s, e) && an(f, I(e, n), s); 134 | } 135 | return f; 136 | } 137 | export { 138 | rn as a, 139 | tn as b, 140 | V as c, 141 | vn as d, 142 | on as e, 143 | fn as f, 144 | hn as g, 145 | mn as h, 146 | dn as i, 147 | X as j, 148 | un as l, 149 | gn as m, 150 | W as t 151 | }; 152 | -------------------------------------------------------------------------------- /backend/fastrtc/templates/component/assets/worker-lPYB70QI.js: -------------------------------------------------------------------------------- 1 | (function(){"use strict";const R="https://unpkg.com/@ffmpeg/core@0.12.6/dist/umd/ffmpeg-core.js";var E;(function(t){t.LOAD="LOAD",t.EXEC="EXEC",t.WRITE_FILE="WRITE_FILE",t.READ_FILE="READ_FILE",t.DELETE_FILE="DELETE_FILE",t.RENAME="RENAME",t.CREATE_DIR="CREATE_DIR",t.LIST_DIR="LIST_DIR",t.DELETE_DIR="DELETE_DIR",t.ERROR="ERROR",t.DOWNLOAD="DOWNLOAD",t.PROGRESS="PROGRESS",t.LOG="LOG",t.MOUNT="MOUNT",t.UNMOUNT="UNMOUNT"})(E||(E={}));const a=new Error("unknown message type"),f=new Error("ffmpeg is not loaded, call `await ffmpeg.load()` first"),u=new Error("failed to import ffmpeg-core.js");let r;const O=async({coreURL:t,wasmURL:n,workerURL:e})=>{const o=!r;try{t||(t=R),importScripts(t)}catch{if(t||(t=R.replace("/umd/","/esm/")),self.createFFmpegCore=(await import(t)).default,!self.createFFmpegCore)throw u}const s=t,c=n||t.replace(/.js$/g,".wasm"),b=e||t.replace(/.js$/g,".worker.js");return r=await self.createFFmpegCore({mainScriptUrlOrBlob:`${s}#${btoa(JSON.stringify({wasmURL:c,workerURL:b}))}`}),r.setLogger(i=>self.postMessage({type:E.LOG,data:i})),r.setProgress(i=>self.postMessage({type:E.PROGRESS,data:i})),o},l=({args:t,timeout:n=-1})=>{r.setTimeout(n),r.exec(...t);const e=r.ret;return r.reset(),e},m=({path:t,data:n})=>(r.FS.writeFile(t,n),!0),D=({path:t,encoding:n})=>r.FS.readFile(t,{encoding:n}),S=({path:t})=>(r.FS.unlink(t),!0),I=({oldPath:t,newPath:n})=>(r.FS.rename(t,n),!0),L=({path:t})=>(r.FS.mkdir(t),!0),N=({path:t})=>{const n=r.FS.readdir(t),e=[];for(const o of n){const s=r.FS.stat(`${t}/${o}`),c=r.FS.isDir(s.mode);e.push({name:o,isDir:c})}return e},A=({path:t})=>(r.FS.rmdir(t),!0),w=({fsType:t,options:n,mountPoint:e})=>{const o=t,s=r.FS.filesystems[o];return s?(r.FS.mount(s,n,e),!0):!1},k=({mountPoint:t})=>(r.FS.unmount(t),!0);self.onmessage=async({data:{id:t,type:n,data:e}})=>{const o=[];let s;try{if(n!==E.LOAD&&!r)throw f;switch(n){case E.LOAD:s=await O(e);break;case E.EXEC:s=l(e);break;case E.WRITE_FILE:s=m(e);break;case E.READ_FILE:s=D(e);break;case E.DELETE_FILE:s=S(e);break;case E.RENAME:s=I(e);break;case E.CREATE_DIR:s=L(e);break;case E.LIST_DIR:s=N(e);break;case E.DELETE_DIR:s=A(e);break;case E.MOUNT:s=w(e);break;case E.UNMOUNT:s=k(e);break;default:throw a}}catch(c){self.postMessage({id:t,type:E.ERROR,data:c.toString()});return}s instanceof Uint8Array&&o.push(s.buffer),self.postMessage({id:t,type:n,data:s},o)}})(); 2 | -------------------------------------------------------------------------------- /backend/fastrtc/templates/component/channel-BLI8LD7T.js: -------------------------------------------------------------------------------- 1 | import { ao as r, ap as n } from "./mermaid.core-C0Blj36u.js"; 2 | const t = (a, o) => r.lang.round(n.parse(a)[o]); 3 | export { 4 | t as c 5 | }; 6 | -------------------------------------------------------------------------------- /backend/fastrtc/templates/component/chunk-4BMEZGHF-4N88GRXN.js: -------------------------------------------------------------------------------- 1 | import { _ as l } from "./mermaid.core-C0Blj36u.js"; 2 | function m(e, c) { 3 | var i, t, o; 4 | e.accDescr && ((i = c.setAccDescription) == null || i.call(c, e.accDescr)), e.accTitle && ((t = c.setAccTitle) == null || t.call(c, e.accTitle)), e.title && ((o = c.setDiagramTitle) == null || o.call(c, e.title)); 5 | } 6 | l(m, "populateCommonDb"); 7 | export { 8 | m as p 9 | }; 10 | -------------------------------------------------------------------------------- /backend/fastrtc/templates/component/chunk-D6G4REZN-BSQJOIKu.js: -------------------------------------------------------------------------------- 1 | import { _ as n, T as c, m as l } from "./mermaid.core-C0Blj36u.js"; 2 | var o = /* @__PURE__ */ n((a, t) => { 3 | const e = a.append("rect"); 4 | if (e.attr("x", t.x), e.attr("y", t.y), e.attr("fill", t.fill), e.attr("stroke", t.stroke), e.attr("width", t.width), e.attr("height", t.height), t.name && e.attr("name", t.name), t.rx && e.attr("rx", t.rx), t.ry && e.attr("ry", t.ry), t.attrs !== void 0) 5 | for (const r in t.attrs) 6 | e.attr(r, t.attrs[r]); 7 | return t.class && e.attr("class", t.class), e; 8 | }, "drawRect"), d = /* @__PURE__ */ n((a, t) => { 9 | const e = { 10 | x: t.startx, 11 | y: t.starty, 12 | width: t.stopx - t.startx, 13 | height: t.stopy - t.starty, 14 | fill: t.fill, 15 | stroke: t.stroke, 16 | class: "rect" 17 | }; 18 | o(a, e).lower(); 19 | }, "drawBackgroundRect"), g = /* @__PURE__ */ n((a, t) => { 20 | const e = t.text.replace(c, " "), r = a.append("text"); 21 | r.attr("x", t.x), r.attr("y", t.y), r.attr("class", "legend"), r.style("text-anchor", t.anchor), t.class && r.attr("class", t.class); 22 | const s = r.append("tspan"); 23 | return s.attr("x", t.x + t.textMargin * 2), s.text(e), r; 24 | }, "drawText"), m = /* @__PURE__ */ n((a, t, e, r) => { 25 | const s = a.append("image"); 26 | s.attr("x", t), s.attr("y", e); 27 | const i = l(r); 28 | s.attr("xlink:href", i); 29 | }, "drawImage"), h = /* @__PURE__ */ n((a, t, e, r) => { 30 | const s = a.append("use"); 31 | s.attr("x", t), s.attr("y", e); 32 | const i = l(r); 33 | s.attr("xlink:href", `#${i}`); 34 | }, "drawEmbeddedImage"), y = /* @__PURE__ */ n(() => ({ 35 | x: 0, 36 | y: 0, 37 | width: 100, 38 | height: 100, 39 | fill: "#EDF2AE", 40 | stroke: "#666", 41 | anchor: "start", 42 | rx: 0, 43 | ry: 0 44 | }), "getNoteRect"), p = /* @__PURE__ */ n(() => ({ 45 | x: 0, 46 | y: 0, 47 | width: 100, 48 | height: 100, 49 | "text-anchor": "start", 50 | style: "#666", 51 | textMargin: 0, 52 | rx: 0, 53 | ry: 0, 54 | tspan: !0 55 | }), "getTextObj"); 56 | export { 57 | p as a, 58 | d as b, 59 | h as c, 60 | o as d, 61 | m as e, 62 | g as f, 63 | y as g 64 | }; 65 | -------------------------------------------------------------------------------- /backend/fastrtc/templates/component/chunk-RZ5BOZE2-BQm8zocb.js: -------------------------------------------------------------------------------- 1 | import { _ as n, j as r, k as g, l as d } from "./mermaid.core-C0Blj36u.js"; 2 | var u = /* @__PURE__ */ n((t, e) => { 3 | let o; 4 | return e === "sandbox" && (o = r("#i" + t)), (e === "sandbox" ? r(o.nodes()[0].contentDocument.body) : r("body")).select(`[id="${t}"]`); 5 | }, "getDiagramElement"), b = /* @__PURE__ */ n((t, e, o, i) => { 6 | t.attr("class", o); 7 | const { width: a, height: s, x: h, y: x } = l(t, e); 8 | g(t, s, a, i); 9 | const c = w(h, x, a, s, e); 10 | t.attr("viewBox", c), d.debug(`viewBox configured: ${c} with padding: ${e}`); 11 | }, "setupViewPortForSVG"), l = /* @__PURE__ */ n((t, e) => { 12 | var i; 13 | const o = ((i = t.node()) == null ? void 0 : i.getBBox()) || { width: 0, height: 0, x: 0, y: 0 }; 14 | return { 15 | width: o.width + e * 2, 16 | height: o.height + e * 2, 17 | x: o.x, 18 | y: o.y 19 | }; 20 | }, "calculateDimensionsWithPadding"), w = /* @__PURE__ */ n((t, e, o, i, a) => `${t - a} ${e - a} ${o} ${i}`, "createViewBox"); 21 | export { 22 | u as g, 23 | b as s 24 | }; 25 | -------------------------------------------------------------------------------- /backend/fastrtc/templates/component/chunk-XZIHB7SX-DIHERCaT.js: -------------------------------------------------------------------------------- 1 | import { _ as s } from "./mermaid.core-C0Blj36u.js"; 2 | var t, e = (t = class { 3 | /** 4 | * @param init - Function that creates the default state. 5 | */ 6 | constructor(i) { 7 | this.init = i, this.records = this.init(); 8 | } 9 | reset() { 10 | this.records = this.init(); 11 | } 12 | }, s(t, "ImperativeState"), t); 13 | export { 14 | e as I 15 | }; 16 | -------------------------------------------------------------------------------- /backend/fastrtc/templates/component/classDiagram-GIVACNV2-KrkkVrlR.js: -------------------------------------------------------------------------------- 1 | import { c as r, C as s, a as e, s as t } from "./chunk-A2AXSNBT-C4qvwI5K.js"; 2 | import { _ as l } from "./mermaid.core-C0Blj36u.js"; 3 | var d = { 4 | parser: r, 5 | get db() { 6 | return new s(); 7 | }, 8 | renderer: e, 9 | styles: t, 10 | init: /* @__PURE__ */ l((a) => { 11 | a.class || (a.class = {}), a.class.arrowMarkerAbsolute = a.arrowMarkerAbsolute; 12 | }, "init") 13 | }; 14 | export { 15 | d as diagram 16 | }; 17 | -------------------------------------------------------------------------------- /backend/fastrtc/templates/component/classDiagram-v2-COTLJTTW-KrkkVrlR.js: -------------------------------------------------------------------------------- 1 | import { c as r, C as s, a as e, s as t } from "./chunk-A2AXSNBT-C4qvwI5K.js"; 2 | import { _ as l } from "./mermaid.core-C0Blj36u.js"; 3 | var d = { 4 | parser: r, 5 | get db() { 6 | return new s(); 7 | }, 8 | renderer: e, 9 | styles: t, 10 | init: /* @__PURE__ */ l((a) => { 11 | a.class || (a.class = {}), a.class.arrowMarkerAbsolute = a.arrowMarkerAbsolute; 12 | }, "init") 13 | }; 14 | export { 15 | d as diagram 16 | }; 17 | -------------------------------------------------------------------------------- /backend/fastrtc/templates/component/clone-D_f12Uao.js: -------------------------------------------------------------------------------- 1 | import { b as r } from "./_baseUniq-BfI_PfuI.js"; 2 | var e = 4; 3 | function a(o) { 4 | return r(o, e); 5 | } 6 | export { 7 | a as c 8 | }; 9 | -------------------------------------------------------------------------------- /backend/fastrtc/templates/component/index.js: -------------------------------------------------------------------------------- 1 | import { E as s, a as t, I as l, l as d, d as o, b as p } from "./index-xxHpJ_RR.js"; 2 | export { 3 | s as BaseExample, 4 | t as BaseInteractiveVideo, 5 | l as default, 6 | d as loaded, 7 | o as playable, 8 | p as prettyBytes 9 | }; 10 | -------------------------------------------------------------------------------- /backend/fastrtc/templates/component/infoDiagram-PH2N3AL5-fhMlkv6w.js: -------------------------------------------------------------------------------- 1 | import { _ as e, l as s, H as o, k as i, I as g } from "./mermaid.core-C0Blj36u.js"; 2 | import { p } from "./radar-MK3ICKWK-Uwn-jZp4.js"; 3 | var v = { 4 | parse: /* @__PURE__ */ e(async (r) => { 5 | const a = await p("info", r); 6 | s.debug(a); 7 | }, "parse") 8 | }, d = { version: g.version }, c = /* @__PURE__ */ e(() => d.version, "getVersion"), m = { 9 | getVersion: c 10 | }, l = /* @__PURE__ */ e((r, a, n) => { 11 | s.debug(`rendering info diagram 12 | ` + r); 13 | const t = o(a); 14 | i(t, 100, 400, !0), t.append("g").append("text").attr("x", 100).attr("y", 40).attr("class", "version").attr("font-size", 32).style("text-anchor", "middle").text(`v${n}`); 15 | }, "draw"), f = { draw: l }, b = { 16 | parser: v, 17 | db: m, 18 | renderer: f 19 | }; 20 | export { 21 | b as diagram 22 | }; 23 | -------------------------------------------------------------------------------- /backend/fastrtc/templates/component/init-DjUOC4st.js: -------------------------------------------------------------------------------- 1 | function t(e, a) { 2 | switch (arguments.length) { 3 | case 0: 4 | break; 5 | case 1: 6 | this.range(e); 7 | break; 8 | default: 9 | this.range(a).domain(e); 10 | break; 11 | } 12 | return this; 13 | } 14 | export { 15 | t as i 16 | }; 17 | -------------------------------------------------------------------------------- /backend/fastrtc/templates/component/ordinal-DfAQgscy.js: -------------------------------------------------------------------------------- 1 | import { i as a } from "./init-DjUOC4st.js"; 2 | class o extends Map { 3 | constructor(n, t = g) { 4 | if (super(), Object.defineProperties(this, { _intern: { value: /* @__PURE__ */ new Map() }, _key: { value: t } }), n != null) for (const [r, s] of n) this.set(r, s); 5 | } 6 | get(n) { 7 | return super.get(c(this, n)); 8 | } 9 | has(n) { 10 | return super.has(c(this, n)); 11 | } 12 | set(n, t) { 13 | return super.set(l(this, n), t); 14 | } 15 | delete(n) { 16 | return super.delete(p(this, n)); 17 | } 18 | } 19 | function c({ _intern: e, _key: n }, t) { 20 | const r = n(t); 21 | return e.has(r) ? e.get(r) : t; 22 | } 23 | function l({ _intern: e, _key: n }, t) { 24 | const r = n(t); 25 | return e.has(r) ? e.get(r) : (e.set(r, t), t); 26 | } 27 | function p({ _intern: e, _key: n }, t) { 28 | const r = n(t); 29 | return e.has(r) && (t = e.get(r), e.delete(r)), t; 30 | } 31 | function g(e) { 32 | return e !== null && typeof e == "object" ? e.valueOf() : e; 33 | } 34 | const f = Symbol("implicit"); 35 | function h() { 36 | var e = new o(), n = [], t = [], r = f; 37 | function s(u) { 38 | let i = e.get(u); 39 | if (i === void 0) { 40 | if (r !== f) return r; 41 | e.set(u, i = n.push(u) - 1); 42 | } 43 | return t[i % t.length]; 44 | } 45 | return s.domain = function(u) { 46 | if (!arguments.length) return n.slice(); 47 | n = [], e = new o(); 48 | for (const i of u) 49 | e.has(i) || e.set(i, n.push(i) - 1); 50 | return s; 51 | }, s.range = function(u) { 52 | return arguments.length ? (t = Array.from(u), s) : t.slice(); 53 | }, s.unknown = function(u) { 54 | return arguments.length ? (r = u, s) : r; 55 | }, s.copy = function() { 56 | return h(n, t).unknown(r); 57 | }, a.apply(s, arguments), s; 58 | } 59 | export { 60 | h as o 61 | }; 62 | -------------------------------------------------------------------------------- /backend/fastrtc/templates/component/stateDiagram-v2-YXO3MK2T-CFM2lJF8.js: -------------------------------------------------------------------------------- 1 | import { s as a, S as t, b as r, a as s } from "./chunk-AEK57VVT-OrsXQu-U.js"; 2 | import { _ as i } from "./mermaid.core-C0Blj36u.js"; 3 | var _ = { 4 | parser: a, 5 | get db() { 6 | return new t(2); 7 | }, 8 | renderer: r, 9 | styles: s, 10 | init: /* @__PURE__ */ i((e) => { 11 | e.state || (e.state = {}), e.state.arrowMarkerAbsolute = e.arrowMarkerAbsolute; 12 | }, "init") 13 | }; 14 | export { 15 | _ as diagram 16 | }; 17 | -------------------------------------------------------------------------------- /backend/fastrtc/templates/example/assets/worker-lPYB70QI.js: -------------------------------------------------------------------------------- 1 | (function(){"use strict";const R="https://unpkg.com/@ffmpeg/core@0.12.6/dist/umd/ffmpeg-core.js";var E;(function(t){t.LOAD="LOAD",t.EXEC="EXEC",t.WRITE_FILE="WRITE_FILE",t.READ_FILE="READ_FILE",t.DELETE_FILE="DELETE_FILE",t.RENAME="RENAME",t.CREATE_DIR="CREATE_DIR",t.LIST_DIR="LIST_DIR",t.DELETE_DIR="DELETE_DIR",t.ERROR="ERROR",t.DOWNLOAD="DOWNLOAD",t.PROGRESS="PROGRESS",t.LOG="LOG",t.MOUNT="MOUNT",t.UNMOUNT="UNMOUNT"})(E||(E={}));const a=new Error("unknown message type"),f=new Error("ffmpeg is not loaded, call `await ffmpeg.load()` first"),u=new Error("failed to import ffmpeg-core.js");let r;const O=async({coreURL:t,wasmURL:n,workerURL:e})=>{const o=!r;try{t||(t=R),importScripts(t)}catch{if(t||(t=R.replace("/umd/","/esm/")),self.createFFmpegCore=(await import(t)).default,!self.createFFmpegCore)throw u}const s=t,c=n||t.replace(/.js$/g,".wasm"),b=e||t.replace(/.js$/g,".worker.js");return r=await self.createFFmpegCore({mainScriptUrlOrBlob:`${s}#${btoa(JSON.stringify({wasmURL:c,workerURL:b}))}`}),r.setLogger(i=>self.postMessage({type:E.LOG,data:i})),r.setProgress(i=>self.postMessage({type:E.PROGRESS,data:i})),o},l=({args:t,timeout:n=-1})=>{r.setTimeout(n),r.exec(...t);const e=r.ret;return r.reset(),e},m=({path:t,data:n})=>(r.FS.writeFile(t,n),!0),D=({path:t,encoding:n})=>r.FS.readFile(t,{encoding:n}),S=({path:t})=>(r.FS.unlink(t),!0),I=({oldPath:t,newPath:n})=>(r.FS.rename(t,n),!0),L=({path:t})=>(r.FS.mkdir(t),!0),N=({path:t})=>{const n=r.FS.readdir(t),e=[];for(const o of n){const s=r.FS.stat(`${t}/${o}`),c=r.FS.isDir(s.mode);e.push({name:o,isDir:c})}return e},A=({path:t})=>(r.FS.rmdir(t),!0),w=({fsType:t,options:n,mountPoint:e})=>{const o=t,s=r.FS.filesystems[o];return s?(r.FS.mount(s,n,e),!0):!1},k=({mountPoint:t})=>(r.FS.unmount(t),!0);self.onmessage=async({data:{id:t,type:n,data:e}})=>{const o=[];let s;try{if(n!==E.LOAD&&!r)throw f;switch(n){case E.LOAD:s=await O(e);break;case E.EXEC:s=l(e);break;case E.WRITE_FILE:s=m(e);break;case E.READ_FILE:s=D(e);break;case E.DELETE_FILE:s=S(e);break;case E.RENAME:s=I(e);break;case E.CREATE_DIR:s=L(e);break;case E.LIST_DIR:s=N(e);break;case E.DELETE_DIR:s=A(e);break;case E.MOUNT:s=w(e);break;case E.UNMOUNT:s=k(e);break;default:throw a}}catch(c){self.postMessage({id:t,type:E.ERROR,data:c.toString()});return}s instanceof Uint8Array&&o.push(s.buffer),self.postMessage({id:t,type:n,data:s},o)}})(); 2 | -------------------------------------------------------------------------------- /backend/fastrtc/templates/example/style.css: -------------------------------------------------------------------------------- 1 | .container.svelte-1uoo7dd{flex:none;max-width:none}.container.svelte-1uoo7dd video{width:var(--size-full);height:var(--size-full);object-fit:cover}.container.svelte-1uoo7dd:hover,.container.selected.svelte-1uoo7dd{border-color:var(--border-color-accent)}.container.table.svelte-1uoo7dd{margin:0 auto;border:2px solid var(--border-color-primary);border-radius:var(--radius-lg);overflow:hidden;width:var(--size-20);height:var(--size-20);object-fit:cover}.container.gallery.svelte-1uoo7dd{height:var(--size-20);max-height:var(--size-20);object-fit:cover} 2 | -------------------------------------------------------------------------------- /backend/fastrtc/text_to_speech/__init__.py: -------------------------------------------------------------------------------- 1 | from .tts import ( 2 | CartesiaTTSOptions, 3 | KokoroTTSOptions, 4 | get_tts_model, 5 | ) 6 | 7 | __all__ = ["get_tts_model", "KokoroTTSOptions", "CartesiaTTSOptions"] 8 | -------------------------------------------------------------------------------- /backend/fastrtc/text_to_speech/test_tts.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gradio-app/fastrtc/c97b1885c059bb9446f80a542ee589676021eae9/backend/fastrtc/text_to_speech/test_tts.py -------------------------------------------------------------------------------- /demo/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gradio-app/fastrtc/c97b1885c059bb9446f80a542ee589676021eae9/demo/__init__.py -------------------------------------------------------------------------------- /demo/echo_audio/README.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Echo Audio 3 | emoji: 🪩 4 | colorFrom: purple 5 | colorTo: red 6 | sdk: gradio 7 | sdk_version: 5.16.0 8 | app_file: app.py 9 | pinned: false 10 | license: mit 11 | short_description: Simple echo stream - simplest FastRTC demo 12 | tags: [webrtc, websocket, gradio, secret|TWILIO_ACCOUNT_SID, secret|TWILIO_AUTH_TOKEN] 13 | --- 14 | 15 | Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference -------------------------------------------------------------------------------- /demo/echo_audio/app.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from fastapi import FastAPI 3 | from fastapi.responses import RedirectResponse 4 | from fastrtc import ReplyOnPause, Stream, get_twilio_turn_credentials 5 | from gradio.utils import get_space 6 | 7 | 8 | def detection(audio: tuple[int, np.ndarray]): 9 | # Implement any iterator that yields audio 10 | # See "LLM Voice Chat" for a more complete example 11 | yield audio 12 | 13 | 14 | stream = Stream( 15 | handler=ReplyOnPause(detection), 16 | modality="audio", 17 | mode="send-receive", 18 | rtc_configuration=get_twilio_turn_credentials() if get_space() else None, 19 | concurrency_limit=5 if get_space() else None, 20 | time_limit=90 if get_space() else None, 21 | ) 22 | 23 | app = FastAPI() 24 | 25 | stream.mount(app) 26 | 27 | 28 | @app.get("/") 29 | async def index(): 30 | return RedirectResponse( 31 | url="/ui" if not get_space() else "https://fastrtc-echo-audio.hf.space/ui/" 32 | ) 33 | 34 | 35 | if __name__ == "__main__": 36 | import os 37 | 38 | if (mode := os.getenv("MODE")) == "UI": 39 | stream.ui.launch(server_port=7860) 40 | elif mode == "PHONE": 41 | stream.fastphone(port=7860) 42 | else: 43 | import uvicorn 44 | 45 | uvicorn.run(app, host="0.0.0.0", port=7860) 46 | -------------------------------------------------------------------------------- /demo/echo_audio/requirements.txt: -------------------------------------------------------------------------------- 1 | fastrtc[vad] 2 | twilio 3 | python-dotenv 4 | -------------------------------------------------------------------------------- /demo/gemini_audio_video/README.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Gemini Audio Video 3 | emoji: ♊️ 4 | colorFrom: purple 5 | colorTo: red 6 | sdk: gradio 7 | sdk_version: 5.25.2 8 | app_file: app.py 9 | pinned: false 10 | license: mit 11 | short_description: Gemini understands audio and video! 12 | tags: [webrtc, websocket, gradio, secret|HF_TOKEN, secret|GEMINI_API_KEY] 13 | --- 14 | 15 | Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference -------------------------------------------------------------------------------- /demo/gemini_audio_video/requirements.txt: -------------------------------------------------------------------------------- 1 | fastrtc==0.0.23.rc1 2 | python-dotenv 3 | google-genai 4 | twilio 5 | -------------------------------------------------------------------------------- /demo/gemini_conversation/README.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Gemini Talking to Gemini 3 | emoji: ♊️ 4 | colorFrom: purple 5 | colorTo: red 6 | sdk: gradio 7 | sdk_version: 5.17.0 8 | app_file: app.py 9 | pinned: false 10 | license: mit 11 | short_description: Have two Gemini agents talk to each other 12 | tags: [webrtc, websocket, gradio, secret|TWILIO_ACCOUNT_SID, secret|TWILIO_AUTH_TOKEN, secret|GEMINI_API_KEY] 13 | --- 14 | 15 | Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference -------------------------------------------------------------------------------- /demo/hello_computer/README.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Hello Computer 3 | emoji: 💻 4 | colorFrom: purple 5 | colorTo: red 6 | sdk: gradio 7 | sdk_version: 5.16.0 8 | app_file: app.py 9 | pinned: false 10 | license: mit 11 | short_description: Say computer before asking your question 12 | tags: [webrtc, websocket, gradio, secret|TWILIO_ACCOUNT_SID, secret|TWILIO_AUTH_TOKEN, secret|SAMBANOVA_API_KEY] 13 | --- 14 | 15 | Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference -------------------------------------------------------------------------------- /demo/hello_computer/README_gradio.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Hello Computer (Gradio) 3 | emoji: 💻 4 | colorFrom: purple 5 | colorTo: red 6 | sdk: gradio 7 | sdk_version: 5.16.0 8 | app_file: app.py 9 | pinned: false 10 | license: mit 11 | short_description: Say computer (Gradio) 12 | tags: [webrtc, websocket, gradio, secret|TWILIO_ACCOUNT_SID, secret|TWILIO_AUTH_TOKEN, secret|SAMBANOVA_API_KEY] 13 | --- 14 | 15 | Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference -------------------------------------------------------------------------------- /demo/hello_computer/app.py: -------------------------------------------------------------------------------- 1 | import base64 2 | import json 3 | import os 4 | from pathlib import Path 5 | 6 | import gradio as gr 7 | import huggingface_hub 8 | import numpy as np 9 | from dotenv import load_dotenv 10 | from fastapi import FastAPI 11 | from fastapi.responses import HTMLResponse, StreamingResponse 12 | from fastrtc import ( 13 | AdditionalOutputs, 14 | ReplyOnStopWords, 15 | Stream, 16 | get_stt_model, 17 | get_twilio_turn_credentials, 18 | ) 19 | from gradio.utils import get_space 20 | from pydantic import BaseModel 21 | 22 | load_dotenv() 23 | 24 | curr_dir = Path(__file__).parent 25 | 26 | 27 | client = huggingface_hub.InferenceClient( 28 | api_key=os.environ.get("SAMBANOVA_API_KEY"), 29 | provider="sambanova", 30 | ) 31 | model = get_stt_model() 32 | 33 | 34 | def response( 35 | audio: tuple[int, np.ndarray], 36 | gradio_chatbot: list[dict] | None = None, 37 | conversation_state: list[dict] | None = None, 38 | ): 39 | gradio_chatbot = gradio_chatbot or [] 40 | conversation_state = conversation_state or [] 41 | text = model.stt(audio) 42 | print("STT in handler", text) 43 | sample_rate, array = audio 44 | gradio_chatbot.append( 45 | {"role": "user", "content": gr.Audio((sample_rate, array.squeeze()))} 46 | ) 47 | yield AdditionalOutputs(gradio_chatbot, conversation_state) 48 | 49 | conversation_state.append({"role": "user", "content": text}) 50 | 51 | request = client.chat.completions.create( 52 | model="meta-llama/Llama-3.2-3B-Instruct", 53 | messages=conversation_state, # type: ignore 54 | temperature=0.1, 55 | top_p=0.1, 56 | ) 57 | response = {"role": "assistant", "content": request.choices[0].message.content} 58 | 59 | conversation_state.append(response) 60 | gradio_chatbot.append(response) 61 | 62 | yield AdditionalOutputs(gradio_chatbot, conversation_state) 63 | 64 | 65 | chatbot = gr.Chatbot(type="messages", value=[]) 66 | state = gr.State(value=[]) 67 | stream = Stream( 68 | ReplyOnStopWords( 69 | response, # type: ignore 70 | stop_words=["computer"], 71 | input_sample_rate=16000, 72 | ), 73 | mode="send", 74 | modality="audio", 75 | additional_inputs=[chatbot, state], 76 | additional_outputs=[chatbot, state], 77 | additional_outputs_handler=lambda *a: (a[2], a[3]), 78 | concurrency_limit=5 if get_space() else None, 79 | time_limit=90 if get_space() else None, 80 | rtc_configuration=get_twilio_turn_credentials() if get_space() else None, 81 | ) 82 | app = FastAPI() 83 | stream.mount(app) 84 | 85 | 86 | class Message(BaseModel): 87 | role: str 88 | content: str 89 | 90 | 91 | class InputData(BaseModel): 92 | webrtc_id: str 93 | chatbot: list[Message] 94 | state: list[Message] 95 | 96 | 97 | @app.get("/") 98 | async def _(): 99 | rtc_config = get_twilio_turn_credentials() if get_space() else None 100 | html_content = (curr_dir / "index.html").read_text() 101 | html_content = html_content.replace("__RTC_CONFIGURATION__", json.dumps(rtc_config)) 102 | return HTMLResponse(content=html_content) 103 | 104 | 105 | @app.post("/input_hook") 106 | async def _(data: InputData): 107 | body = data.model_dump() 108 | stream.set_input(data.webrtc_id, body["chatbot"], body["state"]) 109 | 110 | 111 | def audio_to_base64(file_path): 112 | audio_format = "wav" 113 | with open(file_path, "rb") as audio_file: 114 | encoded_audio = base64.b64encode(audio_file.read()).decode("utf-8") 115 | return f"data:audio/{audio_format};base64,{encoded_audio}" 116 | 117 | 118 | @app.get("/outputs") 119 | async def _(webrtc_id: str): 120 | async def output_stream(): 121 | async for output in stream.output_stream(webrtc_id): 122 | chatbot = output.args[0] 123 | state = output.args[1] 124 | data = { 125 | "message": state[-1], 126 | "audio": audio_to_base64(chatbot[-1]["content"].value["path"]) 127 | if chatbot[-1]["role"] == "user" 128 | else None, 129 | } 130 | yield f"event: output\ndata: {json.dumps(data)}\n\n" 131 | 132 | return StreamingResponse(output_stream(), media_type="text/event-stream") 133 | 134 | 135 | if __name__ == "__main__": 136 | import os 137 | 138 | if (mode := os.getenv("MODE")) == "UI": 139 | stream.ui.launch(server_port=7860) 140 | elif mode == "PHONE": 141 | raise ValueError("Phone mode not supported") 142 | else: 143 | import uvicorn 144 | 145 | uvicorn.run(app, host="0.0.0.0", port=7860) 146 | -------------------------------------------------------------------------------- /demo/hello_computer/requirements.txt: -------------------------------------------------------------------------------- 1 | fastrtc[stopword] 2 | python-dotenv 3 | huggingface_hub>=0.29.0 4 | twilio -------------------------------------------------------------------------------- /demo/llama_code_editor/README.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Llama Code Editor 3 | emoji: 🦙 4 | colorFrom: indigo 5 | colorTo: pink 6 | sdk: gradio 7 | sdk_version: 5.16.0 8 | app_file: app.py 9 | pinned: false 10 | license: mit 11 | short_description: Create interactive HTML web pages with your voice 12 | tags: [webrtc, websocket, gradio, secret|TWILIO_ACCOUNT_SID, secret|TWILIO_AUTH_TOKEN, 13 | secret|SAMBANOVA_API_KEY, secret|GROQ_API_KEY] 14 | --- 15 | 16 | Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference 17 | -------------------------------------------------------------------------------- /demo/llama_code_editor/app.py: -------------------------------------------------------------------------------- 1 | from fastapi import FastAPI 2 | from fastapi.responses import RedirectResponse 3 | from fastrtc import Stream 4 | from gradio.utils import get_space 5 | 6 | try: 7 | from demo.llama_code_editor.handler import ( 8 | CodeHandler, 9 | ) 10 | from demo.llama_code_editor.ui import demo as ui 11 | except (ImportError, ModuleNotFoundError): 12 | from handler import CodeHandler 13 | from ui import demo as ui 14 | 15 | 16 | stream = Stream( 17 | handler=CodeHandler, 18 | modality="audio", 19 | mode="send-receive", 20 | concurrency_limit=10 if get_space() else None, 21 | time_limit=90 if get_space() else None, 22 | ) 23 | 24 | stream.ui = ui 25 | 26 | app = FastAPI() 27 | 28 | 29 | @app.get("/") 30 | async def _(): 31 | url = "/ui" if not get_space() else "https://fastrtc-llama-code-editor.hf.space/ui/" 32 | return RedirectResponse(url) 33 | 34 | 35 | if __name__ == "__main__": 36 | import os 37 | 38 | if (mode := os.getenv("MODE")) == "UI": 39 | stream.ui.launch(server_port=7860, server_name="0.0.0.0") 40 | elif mode == "PHONE": 41 | stream.fastphone(host="0.0.0.0", port=7860) 42 | else: 43 | import uvicorn 44 | 45 | uvicorn.run(app, host="0.0.0.0", port=7860) 46 | -------------------------------------------------------------------------------- /demo/llama_code_editor/assets/sandbox.html: -------------------------------------------------------------------------------- 1 |
This may take a few moments
53 | 54 | 60 |