├── .gitignore ├── Dockerfile ├── README.md ├── bot.py ├── env.example ├── local_development_runner.py ├── requirements.txt └── runner.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # UV 98 | # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | #uv.lock 102 | 103 | # poetry 104 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 105 | # This is especially recommended for binary packages to ensure reproducibility, and is more 106 | # commonly ignored for libraries. 107 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 108 | #poetry.lock 109 | 110 | # pdm 111 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 112 | #pdm.lock 113 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 114 | # in version control. 115 | # https://pdm.fming.dev/latest/usage/project/#working-with-version-control 116 | .pdm.toml 117 | .pdm-python 118 | .pdm-build/ 119 | 120 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 121 | __pypackages__/ 122 | 123 | # Celery stuff 124 | celerybeat-schedule 125 | celerybeat.pid 126 | 127 | # SageMath parsed files 128 | *.sage.py 129 | 130 | # Environments 131 | .env 132 | .venv 133 | env/ 134 | venv/ 135 | ENV/ 136 | env.bak/ 137 | venv.bak/ 138 | 139 | # Spyder project settings 140 | .spyderproject 141 | .spyproject 142 | 143 | # Rope project settings 144 | .ropeproject 145 | 146 | # mkdocs documentation 147 | /site 148 | 149 | # mypy 150 | .mypy_cache/ 151 | .dmypy.json 152 | dmypy.json 153 | 154 | # Pyre type checker 155 | .pyre/ 156 | 157 | # pytype static type analyzer 158 | .pytype/ 159 | 160 | # Cython debug symbols 161 | cython_debug/ 162 | 163 | # PyCharm 164 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 165 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 166 | # and can be added to the global gitignore or merged into this file. For a more nuclear 167 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 168 | #.idea/ 169 | 170 | # Ruff stuff: 171 | .ruff_cache/ 172 | 173 | # PyPI configuration file 174 | .pypirc 175 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM dailyco/pipecat-base:latest 2 | 3 | COPY ./requirements.txt requirements.txt 4 | 5 | RUN pip install --no-cache-dir --upgrade -r requirements.txt 6 | 7 | COPY ./runner.py runner.py 8 | COPY ./bot.py bot.py 9 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Groq Voice AI Web and Phone Starter Kit 2 | 3 | ## Groq + Llama 4 + Pipecat + function calling + (optionally) Twilio 4 | 5 | A conversational agent built with Pipecat, powered by Groq's APIs and Llama 4. Ask it about the weather! 6 | 7 | You can deploy this bot to Pipecat Cloud and optionally connect it to Twilio to make it available by phone. 8 | 9 | ## Configuration 10 | 11 | Rename the `env.example` file to `.env` and set the following: 12 | 13 | - `GROQ_API_KEY` 14 | 15 | You'll need a Docker Hub account to deploy. You'll also need a Twilio account if you want to call your bot. 16 | 17 | ## Set up a local development environment 18 | 19 | Install dependencies. 20 | 21 | ```bash 22 | python3 -m venv .venv 23 | source .venv/bin/activate 24 | pip install -r requirements.txt 25 | ``` 26 | 27 | ``` 28 | ## Run the bot 29 | python bot.py 30 | ``` 31 | 32 | To talk to the bot, copy the URL that prints out in the console and open it in your browser. This URL will be `http://localhost:7860` unless you change it in the code for the local development server: 33 | 34 | ``` 35 | % python bot.py 36 | 2025-04-13 21:09:21.484 | INFO | pipecat::13 - ᓚᘏᗢ Pipecat 0.0.63 ᓚᘏᗢ 37 | Looking for dist directory at: /Users/khkramer/src/pcc-groq-twilio/venv/lib/python3.11/site-packages/pipecat_ai_small_webrtc_prebuilt/client/dist 38 | 2025-04-13 21:09:22.194 | INFO | __main__::218 - Starting local development mode 39 | INFO:pipecat-server:Successfully loaded bot from /Users/khkramer/src/pcc-groq-twilio/bot.py, starting web server... 40 | INFO: Started server process [48084] 41 | INFO: Waiting for application startup. 42 | INFO: Application startup complete. 43 | INFO: Uvicorn running on http://localhost:7860 (Press CTRL+C to quit) 44 | ``` 45 | 46 | To write your own web, iOS, Android, or C++ clients that connect to this bot, see the [Pipecat Client SDK documentation](https://docs.pipecat.ai/client/introduction). 47 | 48 | When you test the bot locally, you are talking to the bot using the Pipecat [serverless WebRTC transport](https://docs.pipecat.ai/server/services/transport/small-webrtc). 49 | 50 | You can use this transport in production, but we generally recommend using WebRTC cloud infrastructure like [Daily](https://docs.pipecat.ai/server/services/transport/daily) if you are running voice AI agents in production at scale. 51 | 52 | See below for both deploying to Daily's Pipecat Cloud voice agent hosting service and using the Daily WebRTC transport. 53 | 54 | ## Optionally deploy to Pipecat Cloud 55 | 56 | You can host Pipecat agents on any infrastructure that can run Python code and that supports your preferred network transport (WebSockets, WebRTC, etc). See the [Deployment Guide](https://docs.pipecat.ai/guides/deployment/overview) in the Pipecat docs for information and deployment examples. 57 | 58 | Pipecat Cloud is a voice agent hosting service built on Daily's [global realtime infrastructure](https://www.daily.co/blog/global-mesh-network/). Pipecat Cloud provides enterprise-grade scalability and management for voice AI agents. 59 | 60 | When you use Pipecat Cloud, Daily WebRTC transport for 1:1 audio sessions is [included at no extra cost](https://docs.pipecat.daily.co/pipecat-in-production/daily-webrtc). The code for using Daily WebRTC is in the [bot.py](bot.py) file. So when you deploy this code to Pipecat Cloud your bot will automatically use Daily WebRTC instead of the serverless WebRTC transport. 61 | 62 | Here are instructions for deploying to Pipecat Cloud, taken from the [Pipecat Cloud Quickstart](https://docs.pipecat.io/guides/pipecat-cloud/quickstart/). 63 | 64 | Build the docker image: 65 | 66 | ```bash 67 | docker build --platform=linux/arm64 -t groq-llama:latest . 68 | docker tag groq-llama:latest your-username/groq-llama:0.1 69 | docker push your-username/groq-llama:0.1 70 | ``` 71 | 72 | Deploy it to Pipecat Cloud: 73 | 74 | You will either need to set your Docker repository to be public, or [provide credentials](https://docs.pipecat.daily.co/agents/deploy#using-pcc-deploy-toml) so Pipecat Cloud can pull from your private repository. 75 | 76 | ``` 77 | pcc auth login # to authenticate 78 | pcc secrets set groq-llama-secrets --file .env # to store your environment variables 79 | pcc deploy groq-llama your-username/groq-llama:0.1 --secrets groq-llama-secrets 80 | ``` 81 | 82 | After you've deployed your bot, click on the `groq-llama` agent in the Pipecat Cloud console and then navigate to the `Sandbox` tab to try out your bot. 83 | 84 | To learn more about scaling and managing agents using the Pipecat Cloud APIs, see the [Pipecat Cloud documentation](https://docs.pipecat.daily.co/introduction). 85 | 86 | ## Configuring Twilio support 87 | 88 | To connect this agent to Twilio: 89 | 90 | 1. [Purchase a number from Twilio](https://help.twilio.com/articles/223135247-How-to-Search-for-and-Buy-a-Twilio-Phone-Number-from-Console), if you haven't already 91 | 92 | 2. Collect your Pipecat Cloud organization name: 93 | 94 | ```bash 95 | pcc organizations list 96 | ``` 97 | 98 | You'll use this information in the next step. 99 | 100 | 3. Create a [TwiML Bin](https://help.twilio.com/articles/360043489573-Getting-started-with-TwiML-Bins): 101 | 102 | ```xml 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | ``` 112 | 113 | where: 114 | 115 | - AGENT_NAME is your agent's name (the name you used when deploying) 116 | - ORGANIZATION_NAME is the value returned in the previous step 117 | 118 | In this case, it will look something like `value="groq-llama.level-gorilla-gray-123"`. 119 | 120 | 4. Assign the TwiML Bin to your phone number: 121 | 122 | - Select your number from the Twilio dashboard 123 | - In the `Configure` tab, set `A call comes in` to `TwiML Bin` 124 | - Set `TwiML Bin` to the Bin you created in the previous step 125 | - Save your configuration 126 | 127 | Now call your Twilio number, and you should be connected to your bot! 128 | 129 | ## Customizing the Bot 130 | 131 | ### Changing the Bot Personality 132 | 133 | Modify the system prompt in `bot.py`: 134 | 135 | ```python 136 | instructions="""You are a helpful and friendly AI... 137 | ``` 138 | 139 | ### Adding more function calls 140 | 141 | Search for `get_current_weather` in the codebase to find where the existing function calls are registered. Learn all about Pipecat function calling [here](https://docs.pipecat.io/guides/function-calling/). 142 | -------------------------------------------------------------------------------- /bot.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2024–2025, Daily 3 | # 4 | # SPDX-License-Identifier: BSD 2-Clause License 5 | # 6 | 7 | import json 8 | import os 9 | import sys 10 | 11 | from dotenv import load_dotenv 12 | from loguru import logger 13 | from pipecat.adapters.schemas.function_schema import FunctionSchema 14 | from pipecat.adapters.schemas.tools_schema import ToolsSchema 15 | from pipecat.audio.vad.silero import SileroVADAnalyzer 16 | from pipecat.audio.vad.vad_analyzer import VADParams 17 | from pipecat.pipeline.pipeline import Pipeline 18 | from pipecat.pipeline.runner import PipelineRunner 19 | from pipecat.pipeline.task import PipelineParams, PipelineTask 20 | from pipecat.processors.aggregators.openai_llm_context import OpenAILLMContext 21 | from pipecat.serializers.twilio import TwilioFrameSerializer 22 | from pipecat.transports.network.fastapi_websocket import ( 23 | FastAPIWebsocketParams, 24 | FastAPIWebsocketTransport, 25 | ) 26 | from pipecat.transports.services.daily import DailyParams, DailyTransport 27 | from pipecatcloud.agent import ( 28 | DailySessionArguments, 29 | SessionArguments, 30 | WebSocketSessionArguments, 31 | ) 32 | 33 | from pipecat.services.groq.llm import GroqLLMService 34 | from pipecat.services.groq.stt import GroqSTTService 35 | from pipecat.services.groq.tts import GroqTTSService 36 | from pipecat.processors.aggregators.llm_response import LLMUserAggregatorParams 37 | 38 | 39 | load_dotenv(override=True) 40 | 41 | instructions = """ 42 | You are a helpful assistant in a voice conversation. Your goal is to respond in a friendly, creative, and succinct way to the user's statements and questions. Your output will be converted to audio so don't include special characters in your answers. 43 | 44 | Keep your answers short unless asked to perform a task that requires a long answer, or asked to provide detail. 45 | 46 | You have access to a function get_current_weather that you can use to look up the current weather in a location. get_current_weather is a demonstration function that always returns the same information for every location. If the user expresses confusion about the weather information you provide, tell them that everything is working as expected, and the get_current_weather function is a demonstration function that always returns hard-coded data. 47 | 48 | If the user asks what you can do, you can respond that you can have a conversation with them and that you have access to current weather information anywhere in the world. 49 | 50 | Now say "Hi, nice to be talking to you!" and then wait for the user to respond. 51 | """ 52 | 53 | 54 | async def fetch_weather_from_api(function_name, tool_call_id, args, llm, context, result_callback): 55 | await result_callback({"conditions": "nice", "temperature": "75"}) 56 | 57 | 58 | async def main(args: SessionArguments): 59 | if isinstance(args, WebSocketSessionArguments): 60 | logger.debug("Starting WebSocket bot") 61 | 62 | start_data = args.websocket.iter_text() 63 | await start_data.__anext__() 64 | call_data = json.loads(await start_data.__anext__()) 65 | stream_sid = call_data["start"]["streamSid"] 66 | transport = FastAPIWebsocketTransport( 67 | websocket=args.websocket, 68 | params=FastAPIWebsocketParams( 69 | audio_in_enabled=True, 70 | audio_out_enabled=True, 71 | add_wav_header=False, 72 | vad_enabled=True, 73 | vad_analyzer=SileroVADAnalyzer(params=VADParams(stop_secs=0.4)), 74 | vad_audio_passthrough=True, 75 | serializer=TwilioFrameSerializer(stream_sid), 76 | ), 77 | ) 78 | elif isinstance(args, DailySessionArguments): 79 | logger.debug("Starting Daily bot") 80 | transport = DailyTransport( 81 | args.room_url, 82 | args.token, 83 | "Respond bot", 84 | DailyParams( 85 | audio_in_enabled=True, 86 | audio_out_enabled=True, 87 | transcription_enabled=False, 88 | vad_enabled=True, 89 | vad_analyzer=SileroVADAnalyzer(params=VADParams(stop_secs=0.5)), 90 | vad_audio_passthrough=True, 91 | ), 92 | ) 93 | else: 94 | from pipecat.transports.network.webrtc_connection import SmallWebRTCConnection 95 | from pipecat.transports.base_transport import TransportParams 96 | from pipecat.transports.network.small_webrtc import SmallWebRTCTransport 97 | 98 | if isinstance(args, SmallWebRTCConnection): 99 | logger.debug("Starting SmallWebRTC bot") 100 | transport = SmallWebRTCTransport( 101 | webrtc_connection=args, 102 | params=TransportParams( 103 | audio_in_enabled=True, 104 | audio_out_enabled=True, 105 | vad_enabled=True, 106 | vad_analyzer=SileroVADAnalyzer(), 107 | vad_audio_passthrough=True, 108 | ), 109 | ) 110 | else: 111 | raise ValueError(f"Unsupported session arguments type: {type(args)}") 112 | 113 | stt = GroqSTTService(api_key=os.getenv("GROQ_API_KEY"), model="distil-whisper-large-v3-en") 114 | 115 | tts = GroqTTSService(api_key=os.getenv("GROQ_API_KEY")) 116 | 117 | llm = GroqLLMService( 118 | api_key=os.getenv("GROQ_API_KEY"), model="meta-llama/llama-4-maverick-17b-128e-instruct" 119 | ) 120 | # You can also register a function_name of None to get all functions 121 | # sent to the same callback with an additional function_name parameter. 122 | llm.register_function("get_current_weather", fetch_weather_from_api) 123 | 124 | weather_function = FunctionSchema( 125 | name="get_current_weather", 126 | description="Get the current weather", 127 | properties={ 128 | "location": { 129 | "type": "string", 130 | "description": "The city and state, e.g. San Francisco, CA", 131 | }, 132 | "format": { 133 | "type": "string", 134 | "enum": ["celsius", "fahrenheit"], 135 | "description": "The temperature unit to use. Infer this from the user's location.", 136 | }, 137 | }, 138 | required=["location"], 139 | ) 140 | tools = ToolsSchema(standard_tools=[weather_function]) 141 | messages = [ 142 | { 143 | "role": "system", 144 | "content": instructions, 145 | }, 146 | ] 147 | 148 | context = OpenAILLMContext(messages, tools) 149 | context_aggregator = llm.create_context_aggregator( 150 | context, user_params=LLMUserAggregatorParams(aggregation_timeout=0.05) 151 | ) 152 | 153 | pipeline = Pipeline( 154 | [ 155 | transport.input(), 156 | stt, 157 | context_aggregator.user(), 158 | llm, 159 | tts, 160 | transport.output(), 161 | context_aggregator.assistant(), 162 | ] 163 | ) 164 | 165 | task = PipelineTask( 166 | pipeline, 167 | params=PipelineParams( 168 | allow_interruptions=True, 169 | enable_metrics=True, 170 | enable_usage_metrics=True, 171 | ), 172 | ) 173 | 174 | if isinstance(args, DailySessionArguments): 175 | 176 | @transport.event_handler("on_first_participant_joined") 177 | async def on_first_participant_joined(transport, participant): 178 | await task.queue_frames([context_aggregator.user().get_context_frame()]) 179 | 180 | @transport.event_handler("on_participant_left") 181 | async def on_participant_left(transport, participant, reason): 182 | await task.cancel() 183 | else: 184 | 185 | @transport.event_handler("on_client_connected") 186 | async def on_client_connected(transport, client): 187 | logger.info("Client connected") 188 | # Kick off the conversation. 189 | await task.queue_frames([context_aggregator.user().get_context_frame()]) 190 | 191 | @transport.event_handler("on_client_disconnected") 192 | async def on_client_disconnected(transport, client): 193 | logger.info("Client disconnected") 194 | await task.cancel() 195 | 196 | @transport.event_handler("on_client_closed") 197 | async def on_client_closed(transport, client): 198 | logger.info("Client closed connection") 199 | await task.cancel() 200 | 201 | runner = PipelineRunner(handle_sigint=False, force_gc=True) 202 | 203 | await runner.run(task) 204 | 205 | 206 | async def bot(args: SessionArguments): 207 | try: 208 | await main(args) 209 | logger.info("Bot process completed") 210 | except Exception as e: 211 | logger.exception(f"Error in bot process: {str(e)}") 212 | raise 213 | 214 | 215 | if __name__ == "__main__": 216 | from local_development_runner import local_development_main 217 | 218 | logger.remove() 219 | logger.add(sys.stderr, level="DEBUG") 220 | 221 | logger.info("Starting local development mode") 222 | local_development_main() 223 | -------------------------------------------------------------------------------- /env.example: -------------------------------------------------------------------------------- 1 | GROQ_API_KEY=get it from the Groq console -------------------------------------------------------------------------------- /local_development_runner.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2024–2025, Daily 3 | # 4 | # SPDX-License-Identifier: BSD 2-Clause License 5 | # 6 | 7 | import asyncio 8 | import importlib.util 9 | import logging 10 | import os 11 | import sys 12 | from contextlib import asynccontextmanager 13 | from inspect import iscoroutinefunction 14 | from typing import Any, Callable, Dict, Optional 15 | 16 | import uvicorn 17 | from dotenv import load_dotenv 18 | from fastapi import BackgroundTasks, FastAPI 19 | from fastapi.responses import RedirectResponse 20 | from pipecat_ai_small_webrtc_prebuilt.frontend import SmallWebRTCPrebuiltUI 21 | 22 | from pipecat.transports.network.webrtc_connection import SmallWebRTCConnection 23 | 24 | # Load environment variables 25 | load_dotenv(override=True) 26 | 27 | # Configure logger 28 | logging.basicConfig( 29 | level=logging.INFO, 30 | format="%(message)s", 31 | handlers=[logging.StreamHandler()], 32 | ) 33 | logger = logging.getLogger("pipecat-server") 34 | # We have to explicitly set the new logger level to INFO 35 | logger.setLevel(logging.INFO) 36 | 37 | app = FastAPI() 38 | 39 | # Store connections by pc_id 40 | pcs_map: Dict[str, SmallWebRTCConnection] = {} 41 | 42 | ice_servers = ["stun:stun.l.google.com:19302"] 43 | 44 | # Mount the frontend at / 45 | app.mount("/client", SmallWebRTCPrebuiltUI) 46 | 47 | # Store the bot module and function info 48 | bot_module: Any = None 49 | run_bot_func: Optional[Callable] = None 50 | 51 | 52 | def import_bot_file(file_path: str) -> Any: 53 | """Dynamically import the bot file and determine how to run it. 54 | 55 | Returns: 56 | module: The imported module 57 | """ 58 | if not os.path.exists(file_path): 59 | raise FileNotFoundError(f"Bot file not found: {file_path}") 60 | 61 | # Extract module name without extension 62 | module_name = os.path.splitext(os.path.basename(file_path))[0] 63 | 64 | # Load the module 65 | spec = importlib.util.spec_from_file_location(module_name, file_path) 66 | if not spec or not spec.loader: 67 | raise ImportError(f"Could not load spec for {file_path}") 68 | 69 | module = importlib.util.module_from_spec(spec) 70 | sys.modules[module_name] = module 71 | spec.loader.exec_module(module) 72 | 73 | # Fall back to main function 74 | if hasattr(module, "main") and iscoroutinefunction(module.main): 75 | return module 76 | 77 | raise AttributeError(f"No run_bot or async main function found in {file_path}") 78 | 79 | 80 | @app.get("/", include_in_schema=False) 81 | async def root_redirect(): 82 | return RedirectResponse(url="/client/") 83 | 84 | 85 | @app.post("/api/offer") 86 | async def offer(request: dict, background_tasks: BackgroundTasks): 87 | if not run_bot_func: 88 | return {"error": "No bot function available to run"} 89 | 90 | pc_id = request.get("pc_id") 91 | 92 | if pc_id and pc_id in pcs_map: 93 | pipecat_connection = pcs_map[pc_id] 94 | logger.info(f"Reusing existing connection for pc_id: {pc_id}") 95 | await pipecat_connection.renegotiate( 96 | sdp=request["sdp"], type=request["type"], restart_pc=request.get("restart_pc", False) 97 | ) 98 | else: 99 | pipecat_connection = SmallWebRTCConnection(ice_servers) 100 | await pipecat_connection.initialize(sdp=request["sdp"], type=request["type"]) 101 | 102 | @pipecat_connection.event_handler("closed") 103 | async def handle_disconnected(webrtc_connection: SmallWebRTCConnection): 104 | logger.info(f"Discarding peer connection for pc_id: {webrtc_connection.pc_id}") 105 | pcs_map.pop(webrtc_connection.pc_id, None) 106 | 107 | # We've already checked that run_bot_func exists 108 | assert run_bot_func is not None 109 | background_tasks.add_task(run_bot_func, pipecat_connection) 110 | 111 | answer = pipecat_connection.get_answer() 112 | # Updating the peer connection inside the map 113 | pcs_map[answer["pc_id"]] = pipecat_connection 114 | 115 | return answer 116 | 117 | 118 | @asynccontextmanager 119 | async def lifespan(app: FastAPI): 120 | yield # Run app 121 | coros = [pc.close() for pc in pcs_map.values()] 122 | await asyncio.gather(*coros) 123 | pcs_map.clear() 124 | 125 | 126 | async def run_standalone_bot() -> None: 127 | """Run a standalone bot that doesn't require WebRTC""" 128 | global run_bot_func 129 | if run_bot_func is not None: 130 | await run_bot_func() 131 | else: 132 | raise RuntimeError("No bot function available to run") 133 | 134 | 135 | def local_development_main(): 136 | host = "localhost" 137 | port = 7860 138 | # Get the __file__ of the script that called main() 139 | import inspect 140 | 141 | caller_frame = inspect.stack()[1] 142 | caller_globals = caller_frame.frame.f_globals 143 | bot_file = caller_globals.get("__file__") 144 | 145 | if not bot_file: 146 | print("❌ Could not determine the bot file. Pass it explicitly to main().") 147 | sys.exit(1) 148 | 149 | # Import the bot file 150 | try: 151 | global run_bot_func, bot_module 152 | bot_module = import_bot_file(bot_file) 153 | run_bot_func = bot_module.main 154 | logger.info(f"Successfully loaded bot from {bot_file}, starting web server...") 155 | uvicorn.run(app, host=host, port=port) 156 | 157 | except Exception as e: 158 | logger.error(f"Error loading bot file: {e}") 159 | sys.exit(1) 160 | 161 | 162 | if __name__ == "__main__": 163 | local_development_main() 164 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | pipecatcloud 2 | pipecat-ai[groq,daily,webrtc,silero]==0.0.65 3 | pipecat-ai-small-webrtc-prebuilt 4 | python-dotenv 5 | fastapi 6 | uvicorn 7 | aiohttp 8 | websockets 9 | -------------------------------------------------------------------------------- /runner.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2024–2025, Daily 3 | # 4 | # SPDX-License-Identifier: BSD 2-Clause License 5 | # 6 | 7 | import argparse 8 | import os 9 | import time 10 | from typing import Optional 11 | 12 | import aiohttp 13 | from loguru import logger 14 | from pipecat.transports.services.helpers.daily_rest import ( 15 | DailyRESTHelper, 16 | DailyRoomParams, 17 | DailyRoomProperties, 18 | ) 19 | 20 | 21 | async def configure(aiohttp_session: aiohttp.ClientSession): 22 | (url, token, _) = await configure_with_args(aiohttp_session) 23 | return (url, token) 24 | 25 | 26 | async def configure_with_args( 27 | aiohttp_session: aiohttp.ClientSession, parser: Optional[argparse.ArgumentParser] = None 28 | ): 29 | if not parser: 30 | parser = argparse.ArgumentParser(description="Daily AI SDK Bot Sample") 31 | parser.add_argument( 32 | "-u", "--url", type=str, required=False, help="URL of the Daily room to join" 33 | ) 34 | parser.add_argument( 35 | "-k", 36 | "--apikey", 37 | type=str, 38 | required=False, 39 | help="Daily API Key (needed to create an owner token for the room)", 40 | ) 41 | 42 | args, unknown = parser.parse_known_args() 43 | key = args.apikey or os.getenv("DAILY_API_KEY") 44 | daily_rest_helper = DailyRESTHelper( 45 | daily_api_key=key, 46 | daily_api_url=os.getenv("DAILY_API_URL", "https://api.daily.co/v1"), 47 | aiohttp_session=aiohttp_session, 48 | ) 49 | url = ( 50 | args.url 51 | or ( 52 | # Create a Daily room with a 10 minute expiration and no prejoin UI 53 | await daily_rest_helper.create_room( 54 | DailyRoomParams( 55 | properties=DailyRoomProperties( 56 | exp=time.time() + 600, start_video_off=True, enable_prejoin_ui=False 57 | ) 58 | ) 59 | ) 60 | ).url 61 | ) 62 | 63 | if not url: 64 | raise Exception( 65 | "No Daily room specified. use the -u/--url option from the command line, or set DAILY_SAMPLE_ROOM_URL in your environment to specify a Daily room URL." 66 | ) 67 | 68 | if not key: 69 | raise Exception( 70 | "No Daily API key specified. use the -k/--apikey option from the command line, or set DAILY_API_KEY in your environment to specify a Daily API key, available from https://pipecat.daily.co//settings/daily." 71 | ) 72 | 73 | # Create a meeting token for the given room with an expiration 1 hour in 74 | # the future. 75 | expiry_time: float = 60 * 60 76 | 77 | token = await daily_rest_helper.get_token(url, expiry_time) 78 | logger.info(f"Daily room URL: {url}") 79 | return (url, token, args) 80 | --------------------------------------------------------------------------------