├── pdf_ocr ├── src │ ├── __init__.py │ ├── functions │ │ ├── __init__.py │ │ └── openai_chat.py │ ├── workflows │ │ ├── __init__.py │ │ ├── files.py │ │ └── pdf.py │ ├── client.py │ └── services.py ├── .gitignore ├── .env.Example ├── screenshot-run.png ├── Dockerfile ├── pyproject.toml └── README.md ├── agent_apis ├── src │ ├── __init__.py │ ├── functions │ │ ├── __init__.py │ │ ├── weather.py │ │ └── llm.py │ ├── workflows │ │ ├── __init__.py │ │ └── multistep.py │ ├── client.py │ └── services.py ├── .gitignore ├── .env.Example ├── workflow_get.png ├── workflow_run.png ├── schedule_workflow.py ├── pyproject.toml ├── README.md └── requirements.txt ├── agent_chat ├── src │ ├── __init__.py │ ├── agents │ │ ├── __init__.py │ │ └── agent.py │ ├── functions │ │ ├── __init__.py │ │ └── llm_chat.py │ ├── client.py │ └── services.py ├── .env.Example ├── .gitignore ├── chat_put.png ├── chat_run.png ├── chat_post.png ├── schedule_agent.py ├── event_agent.py ├── pyproject.toml └── requirements.txt ├── agent_rag ├── src │ ├── __init__.py │ ├── agents │ │ └── __init__.py │ ├── functions │ │ ├── __init__.py │ │ └── llm_chat.py │ ├── client.py │ └── services.py ├── .gitignore ├── .env.Example ├── chat_post.png ├── chat_put.png ├── chat_run.png ├── event-send-again.png └── pyproject.toml ├── agent_stream ├── src │ ├── __init__.py │ ├── agents │ │ ├── __init__.py │ │ └── agent.py │ ├── functions │ │ ├── __init__.py │ │ └── llm_chat.py │ ├── client.py │ └── services.py ├── .env.Example ├── .gitignore ├── chat_post.png ├── chat_put.png ├── chat_run.png ├── schedule_agent.py ├── event_agent.py └── pyproject.toml ├── agent_todo ├── src │ ├── __init__.py │ ├── agents │ │ └── __init__.py │ ├── functions │ │ ├── __init__.py │ │ ├── get_random.py │ │ ├── get_result.py │ │ ├── todo_create.py │ │ └── llm_chat.py │ ├── workflows │ │ └── __init__.py │ ├── client.py │ └── services.py ├── .env.Example ├── .gitignore ├── todo_put.png ├── todo_llm_answer.png ├── todo_first_message.png ├── todo_child_workflow.png ├── todo_second_message.png ├── schedule.py ├── pyproject.toml └── requirements.txt ├── agent_tool ├── src │ ├── __init__.py │ ├── agents │ │ └── __init__.py │ ├── functions │ │ ├── __init__.py │ │ └── llm_chat.py │ ├── client.py │ └── services.py ├── .gitignore ├── .env.Example ├── chat_put.png ├── chat_run.png ├── chat_post.png ├── event-send-again.png └── pyproject.toml ├── agent_video ├── src │ ├── __init__.py │ ├── agents │ │ ├── __init__.py │ │ └── agent.py │ ├── functions │ │ ├── __init__.py │ │ ├── context_docs.py │ │ └── llm_chat.py │ ├── workflows │ │ ├── __init__.py │ │ └── room.py │ ├── client.py │ └── services.py ├── .python-version ├── room_url.png ├── agent_messages.png ├── tavus_replica.png ├── .env.example └── pyproject.toml ├── encryption ├── src │ ├── __init__.py │ ├── functions │ │ ├── __init__.py │ │ └── function.py │ ├── workflows │ │ ├── __init__.py │ │ └── workflow.py │ ├── services.py │ ├── client.py │ ├── codec_server.py │ └── codec.py ├── .gitignore ├── .env.example ├── schedule_workflow.py ├── pyproject.toml ├── requirements.txt └── README.md ├── agent_humanloop ├── src │ ├── __init__.py │ ├── agents │ │ ├── __init__.py │ │ └── agent.py │ ├── functions │ │ ├── __init__.py │ │ └── function.py │ ├── client.py │ └── services.py ├── .env.Example ├── .gitignore ├── pyproject.toml ├── schedule_agent.py ├── requirements.txt └── README.md ├── audio_transcript ├── src │ ├── __init__.py │ ├── functions │ │ ├── __init__.py │ │ ├── transcribe_audio.py │ │ └── translate_text.py │ ├── workflows │ │ ├── __init__.py │ │ └── transcribe_translate.py │ ├── client.py │ └── services.py ├── .env.example ├── test.mp3 ├── ui-screenshot.png ├── pyproject.toml ├── schedule_workflow.py ├── README.md └── requirements.txt ├── child_workflows ├── src │ ├── __init__.py │ ├── functions │ │ ├── __init__.py │ │ └── function.py │ ├── workflows │ │ ├── __init__.py │ │ ├── child.py │ │ └── parent.py │ ├── client.py │ └── services.py ├── .gitignore ├── Dockerfile ├── schedule_workflow.py ├── pyproject.toml ├── requirements.txt └── README.md ├── production_demo ├── src │ ├── __init__.py │ ├── functions │ │ ├── __init__.py │ │ ├── function.py │ │ ├── generate.py │ │ └── evaluate.py │ ├── workflows │ │ ├── __init__.py │ │ ├── child.py │ │ └── workflow.py │ ├── client.py │ └── services.py ├── .env.Example ├── .gitignore ├── ui-child.png ├── ui-parent.png ├── ui-endpoints.png ├── schedule_scale.py ├── schedule_workflow.py ├── schedule_interval.py ├── pyproject.toml └── requirements.txt ├── agent_voice ├── livekit │ ├── agent │ │ ├── src │ │ │ ├── __init__.py │ │ │ ├── agents │ │ │ │ ├── __init__.py │ │ │ │ └── agent.py │ │ │ ├── functions │ │ │ │ ├── __init__.py │ │ │ │ ├── livekit_dispatch.py │ │ │ │ └── llm_chat.py │ │ │ ├── client.py │ │ │ └── services.py │ │ ├── README.md │ │ ├── .gitignore │ │ ├── .env.example │ │ ├── schedule_agent.py │ │ ├── event_agent.py │ │ └── pyproject.toml │ ├── livekit_pipeline │ │ ├── .gitignore │ │ ├── README.md │ │ ├── .dockerignore │ │ ├── .env.example │ │ ├── entrypoint.sh │ │ ├── Dockerfile │ │ └── pyproject.toml │ ├── agent_voice_post.png │ ├── agent_voice_livekit.png │ └── agent_voice_replay.png └── pipecat │ └── pipecat_pipeline │ ├── .python-version │ ├── .env.example │ ├── README.md │ └── pyproject.toml ├── agent_telephony ├── vapi │ └── agent_vapi │ │ ├── src │ │ ├── __init__.py │ │ ├── agents │ │ │ └── __init__.py │ │ ├── functions │ │ │ ├── __init__.py │ │ │ ├── vapi_call.py │ │ │ └── llm_chat.py │ │ ├── client.py │ │ └── services.py │ │ ├── .env.Example │ │ ├── .gitignore │ │ ├── agent_call.png │ │ ├── agent_replay.png │ │ ├── event_agent.py │ │ ├── schedule_agent.py │ │ └── pyproject.toml └── twilio_livekit │ ├── agent_twilio │ ├── src │ │ ├── __init__.py │ │ ├── agents │ │ │ └── __init__.py │ │ ├── functions │ │ │ ├── __init__.py │ │ │ ├── send_agent_event.py │ │ │ ├── livekit_delete_room.py │ │ │ ├── context_docs.py │ │ │ ├── livekit_token.py │ │ │ ├── livekit_create_room.py │ │ │ ├── livekit_send_data.py │ │ │ ├── livekit_dispatch.py │ │ │ ├── livekit_call.py │ │ │ ├── livekit_start_recording.py │ │ │ ├── llm_logic.py │ │ │ └── livekit_outbound_trunk.py │ │ ├── client.py │ │ └── services.py │ ├── .gitignore │ ├── README.md │ ├── .env.Example │ ├── schedule_agent.py │ ├── event_agent.py │ └── pyproject.toml │ ├── livekit-trunk-setup │ ├── .python-version │ ├── .gitignore │ ├── main.py │ ├── .env.example │ ├── pyproject.toml │ └── README.md │ ├── livekit_pipeline │ ├── .gitignore │ ├── .dockerignore │ ├── README.md │ ├── .env.example │ ├── entrypoint.sh │ ├── Dockerfile │ ├── pyproject.toml │ └── src │ │ ├── restack │ │ ├── client.py │ │ └── utils.py │ │ ├── env_check.py │ │ └── pipeline.py │ ├── agent_call.png │ └── agent_replay.png ├── .gitignore ├── CONTRIBUTING.md ├── pyproject.toml └── readme.md /pdf_ocr/src/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /agent_apis/src/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /agent_chat/src/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /agent_rag/src/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /agent_stream/src/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /agent_todo/src/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /agent_tool/src/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /agent_video/src/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /encryption/src/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /agent_chat/src/agents/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /agent_humanloop/src/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /agent_rag/src/agents/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /agent_todo/src/agents/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /agent_tool/src/agents/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /audio_transcript/src/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /child_workflows/src/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /pdf_ocr/src/functions/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /pdf_ocr/src/workflows/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /production_demo/src/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /agent_apis/src/functions/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /agent_apis/src/workflows/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /agent_chat/src/functions/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /agent_humanloop/src/agents/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /agent_rag/src/functions/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /agent_stream/src/agents/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /agent_stream/src/functions/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /agent_todo/src/functions/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /agent_todo/src/workflows/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /agent_tool/src/functions/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /agent_video/.python-version: -------------------------------------------------------------------------------- 1 | 3.12 2 | -------------------------------------------------------------------------------- /agent_video/src/agents/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /agent_video/src/functions/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /agent_video/src/workflows/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /encryption/src/functions/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /encryption/src/workflows/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /agent_humanloop/src/functions/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /agent_voice/livekit/agent/src/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /audio_transcript/src/functions/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /audio_transcript/src/workflows/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /child_workflows/src/functions/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /child_workflows/src/workflows/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /production_demo/src/functions/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /production_demo/src/workflows/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /agent_telephony/vapi/agent_vapi/src/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /agent_voice/livekit/agent/src/agents/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /pdf_ocr/.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | .env 3 | 4 | -------------------------------------------------------------------------------- /agent_apis/.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | .env 3 | 4 | -------------------------------------------------------------------------------- /agent_voice/livekit/agent/src/functions/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /encryption/.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | .env 3 | 4 | -------------------------------------------------------------------------------- /agent_telephony/vapi/agent_vapi/src/agents/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /agent_telephony/vapi/agent_vapi/src/functions/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /pdf_ocr/.env.Example: -------------------------------------------------------------------------------- 1 | OPENAI_API_KEY= -------------------------------------------------------------------------------- /agent_apis/.env.Example: -------------------------------------------------------------------------------- 1 | OPENAI_API_KEY= -------------------------------------------------------------------------------- /agent_chat/.env.Example: -------------------------------------------------------------------------------- 1 | OPENAI_API_KEY= -------------------------------------------------------------------------------- /agent_stream/.env.Example: -------------------------------------------------------------------------------- 1 | OPENAI_API_KEY= -------------------------------------------------------------------------------- /agent_telephony/twilio_livekit/agent_twilio/src/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /agent_todo/.env.Example: -------------------------------------------------------------------------------- 1 | OPENAI_API_KEY= -------------------------------------------------------------------------------- /agent_voice/pipecat/pipecat_pipeline/.python-version: -------------------------------------------------------------------------------- 1 | 3.12 2 | -------------------------------------------------------------------------------- /encryption/.env.example: -------------------------------------------------------------------------------- 1 | OPENAI_API_KEY= -------------------------------------------------------------------------------- /agent_humanloop/.env.Example: -------------------------------------------------------------------------------- 1 | OPENAI_API_KEY= -------------------------------------------------------------------------------- /agent_telephony/twilio_livekit/agent_twilio/src/agents/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /agent_voice/livekit/livekit_pipeline/.gitignore: -------------------------------------------------------------------------------- 1 | .venv 2 | .env.local -------------------------------------------------------------------------------- /audio_transcript/.env.example: -------------------------------------------------------------------------------- 1 | OPENAI_API_KEY=your-openai-api-key 2 | -------------------------------------------------------------------------------- /production_demo/.env.Example: -------------------------------------------------------------------------------- 1 | OPENAI_API_KEY= -------------------------------------------------------------------------------- /agent_telephony/twilio_livekit/agent_twilio/src/functions/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /agent_telephony/twilio_livekit/livekit-trunk-setup/.python-version: -------------------------------------------------------------------------------- 1 | 3.12 2 | -------------------------------------------------------------------------------- /agent_telephony/twilio_livekit/livekit_pipeline/.gitignore: -------------------------------------------------------------------------------- 1 | .venv 2 | .env.local -------------------------------------------------------------------------------- /agent_telephony/vapi/agent_vapi/.env.Example: -------------------------------------------------------------------------------- 1 | OPENAI_API_KEY= 2 | VAPI_TOKEN= -------------------------------------------------------------------------------- /agent_stream/.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | .pytest_cache 3 | venv 4 | .env 5 | .vscode 6 | -------------------------------------------------------------------------------- /agent_chat/.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | .pytest_cache 3 | venv 4 | .env 5 | .vscode 6 | 7 | -------------------------------------------------------------------------------- /agent_rag/.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | .pytest_cache 3 | venv 4 | .env 5 | .vscode 6 | 7 | -------------------------------------------------------------------------------- /agent_todo/.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | .pytest_cache 3 | venv 4 | .env 5 | .vscode 6 | 7 | -------------------------------------------------------------------------------- /agent_tool/.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | .pytest_cache 3 | venv 4 | .env 5 | .vscode 6 | 7 | -------------------------------------------------------------------------------- /agent_voice/livekit/agent/README.md: -------------------------------------------------------------------------------- 1 | 2 | See parent readme at /agent_voice/livekit/README.md -------------------------------------------------------------------------------- /agent_humanloop/.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | .pytest_cache 3 | venv 4 | .env 5 | .vscode 6 | 7 | -------------------------------------------------------------------------------- /child_workflows/.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | .pytest_cache 3 | venv 4 | .env 5 | .vscode 6 | 7 | -------------------------------------------------------------------------------- /production_demo/.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | .pytest_cache 3 | venv 4 | .env 5 | .vscode 6 | 7 | -------------------------------------------------------------------------------- /agent_rag/.env.Example: -------------------------------------------------------------------------------- 1 | # Restack API key for the llm call 2 | OPENAI_API_KEY= -------------------------------------------------------------------------------- /agent_tool/.env.Example: -------------------------------------------------------------------------------- 1 | # Restack API key for the llm call 2 | OPENAI_API_KEY= -------------------------------------------------------------------------------- /agent_voice/livekit/agent/.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | .pytest_cache 3 | venv 4 | .env 5 | .vscode 6 | -------------------------------------------------------------------------------- /agent_voice/livekit/livekit_pipeline/README.md: -------------------------------------------------------------------------------- 1 | 2 | See parent readme at /agent_voice/livekit/README.md -------------------------------------------------------------------------------- /agent_chat/chat_put.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/restackio/examples-python/HEAD/agent_chat/chat_put.png -------------------------------------------------------------------------------- /agent_chat/chat_run.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/restackio/examples-python/HEAD/agent_chat/chat_run.png -------------------------------------------------------------------------------- /agent_rag/chat_post.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/restackio/examples-python/HEAD/agent_rag/chat_post.png -------------------------------------------------------------------------------- /agent_rag/chat_put.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/restackio/examples-python/HEAD/agent_rag/chat_put.png -------------------------------------------------------------------------------- /agent_rag/chat_run.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/restackio/examples-python/HEAD/agent_rag/chat_run.png -------------------------------------------------------------------------------- /agent_telephony/vapi/agent_vapi/.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | .pytest_cache 3 | venv 4 | .env 5 | .vscode 6 | -------------------------------------------------------------------------------- /agent_todo/todo_put.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/restackio/examples-python/HEAD/agent_todo/todo_put.png -------------------------------------------------------------------------------- /agent_tool/chat_put.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/restackio/examples-python/HEAD/agent_tool/chat_put.png -------------------------------------------------------------------------------- /agent_tool/chat_run.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/restackio/examples-python/HEAD/agent_tool/chat_run.png -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | .pytest_cache 3 | venv 4 | .venv 5 | .ruff_cache 6 | .env 7 | .vscode 8 | uv.lock 9 | -------------------------------------------------------------------------------- /agent_chat/chat_post.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/restackio/examples-python/HEAD/agent_chat/chat_post.png -------------------------------------------------------------------------------- /agent_stream/chat_post.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/restackio/examples-python/HEAD/agent_stream/chat_post.png -------------------------------------------------------------------------------- /agent_stream/chat_put.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/restackio/examples-python/HEAD/agent_stream/chat_put.png -------------------------------------------------------------------------------- /agent_stream/chat_run.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/restackio/examples-python/HEAD/agent_stream/chat_run.png -------------------------------------------------------------------------------- /agent_tool/chat_post.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/restackio/examples-python/HEAD/agent_tool/chat_post.png -------------------------------------------------------------------------------- /agent_video/room_url.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/restackio/examples-python/HEAD/agent_video/room_url.png -------------------------------------------------------------------------------- /audio_transcript/test.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/restackio/examples-python/HEAD/audio_transcript/test.mp3 -------------------------------------------------------------------------------- /pdf_ocr/screenshot-run.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/restackio/examples-python/HEAD/pdf_ocr/screenshot-run.png -------------------------------------------------------------------------------- /agent_apis/workflow_get.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/restackio/examples-python/HEAD/agent_apis/workflow_get.png -------------------------------------------------------------------------------- /agent_apis/workflow_run.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/restackio/examples-python/HEAD/agent_apis/workflow_run.png -------------------------------------------------------------------------------- /agent_telephony/twilio_livekit/agent_twilio/.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | .pytest_cache 3 | venv 4 | .env 5 | .vscode 6 | -------------------------------------------------------------------------------- /agent_telephony/twilio_livekit/livekit-trunk-setup/.gitignore: -------------------------------------------------------------------------------- 1 | 2 | .venv 3 | .env 4 | uv.lock 5 | inbound_trunk.json -------------------------------------------------------------------------------- /production_demo/ui-child.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/restackio/examples-python/HEAD/production_demo/ui-child.png -------------------------------------------------------------------------------- /agent_rag/event-send-again.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/restackio/examples-python/HEAD/agent_rag/event-send-again.png -------------------------------------------------------------------------------- /agent_todo/todo_llm_answer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/restackio/examples-python/HEAD/agent_todo/todo_llm_answer.png -------------------------------------------------------------------------------- /agent_tool/event-send-again.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/restackio/examples-python/HEAD/agent_tool/event-send-again.png -------------------------------------------------------------------------------- /agent_video/agent_messages.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/restackio/examples-python/HEAD/agent_video/agent_messages.png -------------------------------------------------------------------------------- /agent_video/tavus_replica.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/restackio/examples-python/HEAD/agent_video/tavus_replica.png -------------------------------------------------------------------------------- /agent_voice/livekit/livekit_pipeline/.dockerignore: -------------------------------------------------------------------------------- 1 | .venv 2 | .env.local 3 | .gitignore 4 | .dockerignore 5 | __pycache__ -------------------------------------------------------------------------------- /production_demo/ui-parent.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/restackio/examples-python/HEAD/production_demo/ui-parent.png -------------------------------------------------------------------------------- /agent_todo/todo_first_message.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/restackio/examples-python/HEAD/agent_todo/todo_first_message.png -------------------------------------------------------------------------------- /production_demo/ui-endpoints.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/restackio/examples-python/HEAD/production_demo/ui-endpoints.png -------------------------------------------------------------------------------- /agent_telephony/twilio_livekit/livekit_pipeline/.dockerignore: -------------------------------------------------------------------------------- 1 | .venv 2 | .env.local 3 | .gitignore 4 | .dockerignore 5 | __pycache__ -------------------------------------------------------------------------------- /agent_todo/todo_child_workflow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/restackio/examples-python/HEAD/agent_todo/todo_child_workflow.png -------------------------------------------------------------------------------- /agent_todo/todo_second_message.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/restackio/examples-python/HEAD/agent_todo/todo_second_message.png -------------------------------------------------------------------------------- /audio_transcript/ui-screenshot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/restackio/examples-python/HEAD/audio_transcript/ui-screenshot.png -------------------------------------------------------------------------------- /agent_voice/livekit/agent_voice_post.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/restackio/examples-python/HEAD/agent_voice/livekit/agent_voice_post.png -------------------------------------------------------------------------------- /agent_voice/livekit/agent_voice_livekit.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/restackio/examples-python/HEAD/agent_voice/livekit/agent_voice_livekit.png -------------------------------------------------------------------------------- /agent_voice/livekit/agent_voice_replay.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/restackio/examples-python/HEAD/agent_voice/livekit/agent_voice_replay.png -------------------------------------------------------------------------------- /agent_telephony/twilio_livekit/agent_call.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/restackio/examples-python/HEAD/agent_telephony/twilio_livekit/agent_call.png -------------------------------------------------------------------------------- /agent_telephony/vapi/agent_vapi/agent_call.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/restackio/examples-python/HEAD/agent_telephony/vapi/agent_vapi/agent_call.png -------------------------------------------------------------------------------- /agent_voice/pipecat/pipecat_pipeline/.env.example: -------------------------------------------------------------------------------- 1 | DEEPGRAM_API_KEY= 2 | OPENAI_API_KEY= 3 | ELEVENLABS_API_KEY= 4 | 5 | DAILY_API_URL= 6 | DAILY_API_KEY= -------------------------------------------------------------------------------- /agent_telephony/twilio_livekit/agent_replay.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/restackio/examples-python/HEAD/agent_telephony/twilio_livekit/agent_replay.png -------------------------------------------------------------------------------- /agent_telephony/twilio_livekit/agent_twilio/README.md: -------------------------------------------------------------------------------- 1 | 2 | See parent README.md at /agent_telephony/twilio/readme.md for instructions on how to run the agent. -------------------------------------------------------------------------------- /agent_telephony/vapi/agent_vapi/agent_replay.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/restackio/examples-python/HEAD/agent_telephony/vapi/agent_vapi/agent_replay.png -------------------------------------------------------------------------------- /agent_video/.env.example: -------------------------------------------------------------------------------- 1 | DEEPGRAM_API_KEY= 2 | CARTESIA_API_KEY= 3 | CARTESIA_VOICE_ID= 4 | ELEVENLABS_API_KEY= 5 | 6 | TAVUS_API_KEY= 7 | TAVUS_REPLICA_ID= -------------------------------------------------------------------------------- /agent_telephony/twilio_livekit/livekit_pipeline/README.md: -------------------------------------------------------------------------------- 1 | 2 | See parent README.md at /agent_telephony/twilio/readme.md for instructions on how to run the agent. -------------------------------------------------------------------------------- /agent_voice/pipecat/pipecat_pipeline/README.md: -------------------------------------------------------------------------------- 1 | # Restack AI - Agent with voice 2 | 3 | Build an AI agent that users can interact with in realtime with voice. -------------------------------------------------------------------------------- /agent_telephony/twilio_livekit/livekit-trunk-setup/main.py: -------------------------------------------------------------------------------- 1 | def main(): 2 | print("Hello from twilio!") 3 | 4 | 5 | if __name__ == "__main__": 6 | main() 7 | -------------------------------------------------------------------------------- /agent_telephony/twilio_livekit/livekit_pipeline/.env.example: -------------------------------------------------------------------------------- 1 | LIVEKIT_URL= 2 | LIVEKIT_API_KEY= 3 | LIVEKIT_API_SECRET= 4 | 5 | DEEPGRAM_API_KEY= 6 | ELEVEN_API_KEY= -------------------------------------------------------------------------------- /agent_voice/livekit/livekit_pipeline/.env.example: -------------------------------------------------------------------------------- 1 | LIVEKIT_URL="wss://XXXXX.livekit.cloud" 2 | LIVEKIT_API_KEY= 3 | LIVEKIT_API_SECRET= 4 | 5 | OPENAI_API_KEY= 6 | DEEPGRAM_API_KEY= 7 | CARTESIA_API_KEY= -------------------------------------------------------------------------------- /agent_telephony/twilio_livekit/livekit-trunk-setup/.env.example: -------------------------------------------------------------------------------- 1 | TWILIO_ACCOUNT_SID= 2 | TWILIO_AUTH_TOKEN= 3 | TWILIO_PHONE_NUMBER="+000000000000" 4 | LIVEKIT_SIP_URI="sip:XXXXX.sip.livekit.cloud;transport=tcp" 5 | TRUNK_NAME="LiveKit Trunk" -------------------------------------------------------------------------------- /agent_voice/livekit/livekit_pipeline/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -e 3 | 4 | echo "Running download-files..." 5 | uv run python src/pipeline.py download-files 6 | 7 | echo "Starting pipeline..." 8 | exec uv run python src/pipeline.py start -------------------------------------------------------------------------------- /agent_telephony/twilio_livekit/livekit_pipeline/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -e 3 | 4 | echo "Running download-files..." 5 | uv run python src/worker.py download-files 6 | 7 | echo "Starting livekit worker..." 8 | exec uv run python src/worker.py start -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | ## Development 4 | 5 | ### Prerequisites 6 | 7 | - Python 3.12+ 8 | - Docker 9 | - Uv 10 | 11 | ### Uv setup 12 | 13 | In each folder, to switch environment: 14 | 15 | ```bash 16 | uv venv && source .venv/bin/activate 17 | ``` 18 | -------------------------------------------------------------------------------- /child_workflows/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ghcr.io/astral-sh/uv:python3.10-bookworm-slim AS builder 2 | 3 | WORKDIR /app 4 | 5 | COPY . . 6 | 7 | WORKDIR /app 8 | 9 | #Install dependencies 10 | RUN uv sync --no-dev 11 | 12 | # Expose port 80 13 | EXPOSE 80 14 | 15 | CMD ["uv", "run", "services"] -------------------------------------------------------------------------------- /pdf_ocr/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ghcr.io/restackio/uv-torch:main 2 | 3 | WORKDIR /app 4 | 5 | COPY pyproject.toml requirements.txt ./ 6 | 7 | COPY . . 8 | 9 | # Install dependencies 10 | RUN uv sync --no-dev 11 | 12 | # Expose port 80 13 | EXPOSE 80 14 | 15 | CMD ["uv", "run", "services"] 16 | -------------------------------------------------------------------------------- /agent_telephony/twilio_livekit/livekit-trunk-setup/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "agent_telephony_livekit_trunk_setup" 3 | version = "0.1.0" 4 | description = "Add your description here" 5 | readme = "README.md" 6 | requires-python = ">=3.12" 7 | dependencies = [ 8 | "dotenv>=0.9.9", 9 | "twilio>=9.4.6", 10 | ] -------------------------------------------------------------------------------- /agent_voice/livekit/agent/.env.example: -------------------------------------------------------------------------------- 1 | LIVEKIT_API_KEY= 2 | LIVEKIT_API_SECRET= 3 | LIVEKIT_URL="wss://XXXXlivekit.cloud" 4 | 5 | LIVEKIT_SIP_ADDRESS="XXXX.pstn.twilio.com" 6 | TWILIO_PHONE_NUMBER= 7 | TWILIO_TRUNK_AUTH_USERNAME= 8 | TWILIO_TRUNK_AUTH_PASSWORD= 9 | 10 | ELEVEN_API_KEY= 11 | DEEPGRAM_API_KEY= 12 | OPENAI_API_KEY= -------------------------------------------------------------------------------- /agent_telephony/twilio_livekit/agent_twilio/.env.Example: -------------------------------------------------------------------------------- 1 | OPENAI_API_KEY= 2 | 3 | LIVEKIT_API_KEY= 4 | LIVEKIT_API_SECRET= 5 | LIVEKIT_URL= 6 | 7 | TWILIO_PHONE_NUMBER= 8 | TWILIO_TRUNK_AUTH_USERNAME= 9 | TWILIO_TRUNK_AUTH_PASSWORD= 10 | TWILIO_TRUNK_TERMINATION_SIP_URL= 11 | 12 | ELEVEN_API_KEY= 13 | DEEPGRAM_API_KEY= 14 | OPENAI_API_KEY= 15 | 16 | GCP_CREDENTIALS= -------------------------------------------------------------------------------- /encryption/src/functions/function.py: -------------------------------------------------------------------------------- 1 | from restack_ai.function import function, log 2 | 3 | @function.defn() 4 | async def welcome(input: str) -> str: 5 | try: 6 | log.info("welcome function started", input=input) 7 | return f"Hello, {input}!" 8 | except Exception as e: 9 | log.error("welcome function failed", error=e) 10 | raise e 11 | -------------------------------------------------------------------------------- /encryption/src/services.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from src.functions.function import welcome 3 | from src.client import client 4 | from src.workflows.workflow import EncryptedWorkflow 5 | 6 | async def main(): 7 | 8 | await client.start_service( 9 | workflows= [EncryptedWorkflow], 10 | functions= [welcome] 11 | ) 12 | 13 | def run_services(): 14 | asyncio.run(main()) 15 | 16 | if __name__ == "__main__": 17 | run_services() -------------------------------------------------------------------------------- /agent_voice/livekit/livekit_pipeline/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ghcr.io/astral-sh/uv:python3.12-bookworm-slim 2 | 3 | WORKDIR /app 4 | 5 | COPY . . 6 | 7 | RUN apt-get update && \ 8 | apt-get install -y gcc && \ 9 | rm -rf /var/lib/apt/lists/* 10 | 11 | # Install dependencies 12 | RUN uv sync --no-dev 13 | 14 | # Copy and set the entrypoint script 15 | COPY entrypoint.sh /entrypoint.sh 16 | RUN chmod +x /entrypoint.sh 17 | 18 | ENTRYPOINT ["/entrypoint.sh"] -------------------------------------------------------------------------------- /agent_telephony/twilio_livekit/livekit_pipeline/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ghcr.io/astral-sh/uv:python3.12-bookworm-slim 2 | 3 | WORKDIR /app 4 | 5 | COPY . . 6 | 7 | RUN apt-get update && \ 8 | apt-get install -y gcc && \ 9 | rm -rf /var/lib/apt/lists/* 10 | 11 | # Install dependencies 12 | RUN uv sync --no-dev 13 | 14 | # Copy and set the entrypoint script 15 | COPY entrypoint.sh /entrypoint.sh 16 | RUN chmod +x /entrypoint.sh 17 | 18 | ENTRYPOINT ["/entrypoint.sh"] -------------------------------------------------------------------------------- /agent_chat/schedule_agent.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import sys 3 | import time 4 | 5 | from restack_ai import Restack 6 | 7 | 8 | async def main() -> None: 9 | client = Restack() 10 | 11 | agent_id = f"{int(time.time() * 1000)}-AgentChat" 12 | await client.schedule_agent(agent_name="AgentChat", agent_id=agent_id) 13 | 14 | sys.exit(0) 15 | 16 | 17 | def run_schedule_agent() -> None: 18 | asyncio.run(main()) 19 | 20 | 21 | if __name__ == "__main__": 22 | run_schedule_agent() 23 | -------------------------------------------------------------------------------- /child_workflows/src/functions/function.py: -------------------------------------------------------------------------------- 1 | from restack_ai.function import NonRetryableError, function, log 2 | 3 | 4 | @function.defn(name="welcome") 5 | async def welcome(function_input: str) -> str: 6 | try: 7 | log.info("welcome function started", function_input=function_input) 8 | return f"Hello, {function_input}!" 9 | except Exception as e: 10 | log.error("welcome function failed", error=e) 11 | raise NonRetryableError(f"Welcome function failed: {e}") from e 12 | -------------------------------------------------------------------------------- /agent_humanloop/src/functions/function.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | 3 | from restack_ai.function import function, log 4 | 5 | 6 | @dataclass 7 | class InputFeedback: 8 | feedback: str 9 | 10 | @function.defn() 11 | async def goodbye() -> str: 12 | log.info("goodbye function started") 13 | return "Goodbye!" 14 | 15 | @function.defn() 16 | async def feedback(input: InputFeedback) -> str: 17 | log.info("feedback function started", input=input) 18 | return f"Received feedback: {input.feedback}" 19 | -------------------------------------------------------------------------------- /encryption/src/client.py: -------------------------------------------------------------------------------- 1 | import os 2 | from restack_ai import Restack 3 | from restack_ai.restack import CloudConnectionOptions 4 | from restack_ai.security import converter 5 | import dataclasses 6 | from .codec import EncryptionCodec 7 | 8 | connection_options = CloudConnectionOptions( 9 | engine_id=os.getenv("RESTACK_ENGINE_ID"), 10 | api_key=os.getenv("RESTACK_ENGINE_API_KEY"), 11 | address=os.getenv("RESTACK_ENGINE_ADDRESS"), 12 | data_converter=dataclasses.replace(converter.default(), payload_codec=EncryptionCodec()) 13 | ) 14 | client = Restack(connection_options) 15 | -------------------------------------------------------------------------------- /encryption/src/workflows/workflow.py: -------------------------------------------------------------------------------- 1 | from datetime import timedelta 2 | from restack_ai.workflow import workflow, import_functions, log 3 | with import_functions(): 4 | from src.functions.function import welcome 5 | 6 | @workflow.defn() 7 | class EncryptedWorkflow: 8 | @workflow.run 9 | async def run(self): 10 | log.info("EncryptedWorkflow started") 11 | result = await workflow.step(function=welcome, function_input="world", start_to_close_timeout=timedelta(seconds=120)) 12 | log.info("EncryptedWorkflow completed", result=result) 13 | return result 14 | 15 | 16 | -------------------------------------------------------------------------------- /encryption/schedule_workflow.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import time 3 | from src.client import client 4 | async def main(): 5 | 6 | workflow_id = f"{int(time.time() * 1000)}-EncryptedWorkflow" 7 | run_id = await client.schedule_workflow( 8 | workflow_name="EncryptedWorkflow", 9 | workflow_id=workflow_id 10 | ) 11 | 12 | await client.get_workflow_result( 13 | workflow_id=workflow_id, 14 | run_id=run_id 15 | ) 16 | 17 | exit(0) 18 | 19 | def run_schedule_workflow(): 20 | asyncio.run(main()) 21 | 22 | if __name__ == "__main__": 23 | run_schedule_workflow() -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "examples-python" 3 | version = "0.0.1" 4 | requires-python = ">=3.10,<3.14" 5 | 6 | [dependency-groups] 7 | dev = [ 8 | "ruff>=0.9.4", 9 | ] 10 | 11 | [tool.ruff] 12 | line-length = 66 13 | 14 | [tool.ruff.lint] 15 | extend-select = ["ALL"] 16 | ignore = ["ANN401", "E501", "D100", "D101", "D102", "D103", "D107", "TRY002", "D213", "D203", "COM812", "D104", "INP001"] 17 | 18 | [tool.ruff.format] 19 | quote-style = "double" 20 | indent-style = "space" 21 | docstring-code-format = true 22 | docstring-code-line-length = 66 23 | 24 | [tool.ruff.lint.pydocstyle] 25 | convention = "google" -------------------------------------------------------------------------------- /agent_todo/src/functions/get_random.py: -------------------------------------------------------------------------------- 1 | import secrets 2 | 3 | from pydantic import BaseModel 4 | from restack_ai.function import NonRetryableError, function 5 | 6 | 7 | class RandomParams(BaseModel): 8 | todo_title: str 9 | 10 | 11 | @function.defn() 12 | async def get_random(params: RandomParams) -> str: 13 | try: 14 | random_number = secrets.randbelow(100) 15 | except Exception as e: 16 | error_message = f"get_random function failed: {e}" 17 | raise NonRetryableError(error_message) from e 18 | else: 19 | return f"The random number for {params.todo_title} is {random_number}." 20 | -------------------------------------------------------------------------------- /production_demo/schedule_scale.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import time 3 | from restack_ai import Restack 4 | 5 | from src.workflows.workflow import ExampleWorkflowInput 6 | 7 | async def main(): 8 | 9 | client = Restack() 10 | 11 | workflow_id = f"{int(time.time() * 1000)}-ExampleWorkflow" 12 | await client.schedule_workflow( 13 | workflow_name="ExampleWorkflow", 14 | workflow_id=workflow_id, 15 | input=ExampleWorkflowInput(amount=50) 16 | ) 17 | 18 | exit(0) 19 | 20 | def run_schedule_scale(): 21 | asyncio.run(main()) 22 | 23 | if __name__ == "__main__": 24 | run_schedule_scale() -------------------------------------------------------------------------------- /agent_apis/src/client.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from dotenv import load_dotenv 4 | from restack_ai import Restack 5 | from restack_ai.restack import CloudConnectionOptions 6 | 7 | # Load environment variables from a .env file 8 | load_dotenv() 9 | 10 | 11 | engine_id = os.getenv("RESTACK_ENGINE_ID") 12 | address = os.getenv("RESTACK_ENGINE_ADDRESS") 13 | api_key = os.getenv("RESTACK_ENGINE_API_KEY") 14 | api_address = os.getenv("RESTACK_ENGINE_API_ADDRESS") 15 | 16 | connection_options = CloudConnectionOptions( 17 | engine_id=engine_id, address=address, api_key=api_key, api_address=api_address 18 | ) 19 | client = Restack(connection_options) 20 | -------------------------------------------------------------------------------- /agent_chat/src/client.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from dotenv import load_dotenv 4 | from restack_ai import Restack 5 | from restack_ai.restack import CloudConnectionOptions 6 | 7 | # Load environment variables from a .env file 8 | load_dotenv() 9 | 10 | 11 | engine_id = os.getenv("RESTACK_ENGINE_ID") 12 | address = os.getenv("RESTACK_ENGINE_ADDRESS") 13 | api_key = os.getenv("RESTACK_ENGINE_API_KEY") 14 | api_address = os.getenv("RESTACK_ENGINE_API_ADDRESS") 15 | 16 | connection_options = CloudConnectionOptions( 17 | engine_id=engine_id, address=address, api_key=api_key, api_address=api_address 18 | ) 19 | client = Restack(connection_options) 20 | -------------------------------------------------------------------------------- /agent_rag/src/client.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from dotenv import load_dotenv 4 | from restack_ai import Restack 5 | from restack_ai.restack import CloudConnectionOptions 6 | 7 | # Load environment variables from a .env file 8 | load_dotenv() 9 | 10 | 11 | engine_id = os.getenv("RESTACK_ENGINE_ID") 12 | address = os.getenv("RESTACK_ENGINE_ADDRESS") 13 | api_key = os.getenv("RESTACK_ENGINE_API_KEY") 14 | api_address = os.getenv("RESTACK_ENGINE_API_ADDRESS") 15 | 16 | connection_options = CloudConnectionOptions( 17 | engine_id=engine_id, address=address, api_key=api_key, api_address=api_address 18 | ) 19 | client = Restack(connection_options) 20 | -------------------------------------------------------------------------------- /agent_todo/src/client.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from dotenv import load_dotenv 4 | from restack_ai import Restack 5 | from restack_ai.restack import CloudConnectionOptions 6 | 7 | # Load environment variables from a .env file 8 | load_dotenv() 9 | 10 | 11 | engine_id = os.getenv("RESTACK_ENGINE_ID") 12 | address = os.getenv("RESTACK_ENGINE_ADDRESS") 13 | api_key = os.getenv("RESTACK_ENGINE_API_KEY") 14 | api_address = os.getenv("RESTACK_ENGINE_API_ADDRESS") 15 | 16 | connection_options = CloudConnectionOptions( 17 | engine_id=engine_id, address=address, api_key=api_key, api_address=api_address 18 | ) 19 | client = Restack(connection_options) 20 | -------------------------------------------------------------------------------- /agent_tool/src/client.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from dotenv import load_dotenv 4 | from restack_ai import Restack 5 | from restack_ai.restack import CloudConnectionOptions 6 | 7 | # Load environment variables from a .env file 8 | load_dotenv() 9 | 10 | 11 | engine_id = os.getenv("RESTACK_ENGINE_ID") 12 | address = os.getenv("RESTACK_ENGINE_ADDRESS") 13 | api_key = os.getenv("RESTACK_ENGINE_API_KEY") 14 | api_address = os.getenv("RESTACK_ENGINE_API_ADDRESS") 15 | 16 | connection_options = CloudConnectionOptions( 17 | engine_id=engine_id, address=address, api_key=api_key, api_address=api_address 18 | ) 19 | client = Restack(connection_options) 20 | -------------------------------------------------------------------------------- /agent_video/src/client.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from dotenv import load_dotenv 4 | from restack_ai import Restack 5 | from restack_ai.restack import CloudConnectionOptions 6 | 7 | # Load environment variables from a .env file 8 | load_dotenv() 9 | 10 | 11 | engine_id = os.getenv("RESTACK_ENGINE_ID") 12 | address = os.getenv("RESTACK_ENGINE_ADDRESS") 13 | api_key = os.getenv("RESTACK_ENGINE_API_KEY") 14 | api_address = os.getenv("RESTACK_ENGINE_API_ADDRESS") 15 | 16 | connection_options = CloudConnectionOptions( 17 | engine_id=engine_id, address=address, api_key=api_key, api_address=api_address 18 | ) 19 | client = Restack(connection_options) 20 | -------------------------------------------------------------------------------- /agent_stream/src/client.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from dotenv import load_dotenv 4 | from restack_ai import Restack 5 | from restack_ai.restack import CloudConnectionOptions 6 | 7 | # Load environment variables from a .env file 8 | load_dotenv() 9 | 10 | 11 | engine_id = os.getenv("RESTACK_ENGINE_ID") 12 | address = os.getenv("RESTACK_ENGINE_ADDRESS") 13 | api_key = os.getenv("RESTACK_ENGINE_API_KEY") 14 | api_address = os.getenv("RESTACK_ENGINE_API_ADDRESS") 15 | 16 | connection_options = CloudConnectionOptions( 17 | engine_id=engine_id, address=address, api_key=api_key, api_address=api_address 18 | ) 19 | client = Restack(connection_options) 20 | -------------------------------------------------------------------------------- /production_demo/schedule_workflow.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import time 3 | from restack_ai import Restack 4 | 5 | async def main(): 6 | 7 | client = Restack() 8 | 9 | workflow_id = f"{int(time.time() * 1000)}-ChildWorkflow" 10 | run_id = await client.schedule_workflow( 11 | workflow_name="ChildWorkflow", 12 | workflow_id=workflow_id 13 | ) 14 | 15 | await client.get_workflow_result( 16 | workflow_id=workflow_id, 17 | run_id=run_id 18 | ) 19 | 20 | exit(0) 21 | 22 | def run_schedule_workflow(): 23 | asyncio.run(main()) 24 | 25 | if __name__ == "__main__": 26 | run_schedule_workflow() -------------------------------------------------------------------------------- /agent_voice/livekit/agent/src/client.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from dotenv import load_dotenv 4 | from restack_ai import Restack 5 | from restack_ai.restack import CloudConnectionOptions 6 | 7 | # Load environment variables from a .env file 8 | load_dotenv() 9 | 10 | 11 | engine_id = os.getenv("RESTACK_ENGINE_ID") 12 | address = os.getenv("RESTACK_ENGINE_ADDRESS") 13 | api_key = os.getenv("RESTACK_ENGINE_API_KEY") 14 | api_address = os.getenv("RESTACK_ENGINE_API_ADDRESS") 15 | 16 | connection_options = CloudConnectionOptions( 17 | engine_id=engine_id, address=address, api_key=api_key, api_address=api_address 18 | ) 19 | client = Restack(connection_options) 20 | -------------------------------------------------------------------------------- /production_demo/src/client.py: -------------------------------------------------------------------------------- 1 | import os 2 | from restack_ai import Restack 3 | from restack_ai.restack import CloudConnectionOptions 4 | from dotenv import load_dotenv 5 | # Load environment variables from a .env file 6 | load_dotenv() 7 | 8 | 9 | engine_id = os.getenv("RESTACK_ENGINE_ID") 10 | address = os.getenv("RESTACK_ENGINE_ADDRESS") 11 | api_key = os.getenv("RESTACK_ENGINE_API_KEY") 12 | api_address = os.getenv("RESTACK_ENGINE_API_ADDRESS") 13 | 14 | connection_options = CloudConnectionOptions( 15 | engine_id=engine_id, 16 | address=address, 17 | api_key=api_key, 18 | api_address=api_address 19 | ) 20 | client = Restack(connection_options) -------------------------------------------------------------------------------- /agent_telephony/vapi/agent_vapi/src/client.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from dotenv import load_dotenv 4 | from restack_ai import Restack 5 | from restack_ai.restack import CloudConnectionOptions 6 | 7 | # Load environment variables from a .env file 8 | load_dotenv() 9 | 10 | 11 | engine_id = os.getenv("RESTACK_ENGINE_ID") 12 | address = os.getenv("RESTACK_ENGINE_ADDRESS") 13 | api_key = os.getenv("RESTACK_ENGINE_API_KEY") 14 | api_address = os.getenv("RESTACK_ENGINE_API_ADDRESS") 15 | 16 | connection_options = CloudConnectionOptions( 17 | engine_id=engine_id, address=address, api_key=api_key, api_address=api_address 18 | ) 19 | client = Restack(connection_options) 20 | -------------------------------------------------------------------------------- /audio_transcript/src/client.py: -------------------------------------------------------------------------------- 1 | import os 2 | from restack_ai import Restack 3 | from restack_ai.restack import CloudConnectionOptions 4 | from dotenv import load_dotenv 5 | # Load environment variables from a .env file 6 | load_dotenv() 7 | 8 | 9 | engine_id = os.getenv("RESTACK_ENGINE_ID") 10 | address = os.getenv("RESTACK_ENGINE_ADDRESS") 11 | api_key = os.getenv("RESTACK_ENGINE_API_KEY") 12 | api_address = os.getenv("RESTACK_ENGINE_API_ADDRESS") 13 | 14 | connection_options = CloudConnectionOptions( 15 | engine_id=engine_id, 16 | address=address, 17 | api_key=api_key, 18 | api_address=api_address 19 | ) 20 | client = Restack(connection_options) -------------------------------------------------------------------------------- /child_workflows/schedule_workflow.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import time 3 | 4 | from restack_ai import Restack 5 | 6 | 7 | async def main(): 8 | 9 | client = Restack() 10 | 11 | workflow_id = f"{int(time.time() * 1000)}-ParentWorkflow" 12 | runId = await client.schedule_workflow( 13 | workflow_name="ParentWorkflow", 14 | workflow_id=workflow_id 15 | ) 16 | 17 | await client.get_workflow_result( 18 | workflow_id=workflow_id, 19 | run_id=runId 20 | ) 21 | 22 | exit(0) 23 | 24 | def run_schedule_workflow(): 25 | asyncio.run(main()) 26 | 27 | if __name__ == "__main__": 28 | run_schedule_workflow() 29 | -------------------------------------------------------------------------------- /agent_telephony/vapi/agent_vapi/event_agent.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import sys 3 | 4 | from restack_ai import Restack 5 | 6 | 7 | async def main(agent_id: str, run_id: str) -> None: 8 | client = Restack() 9 | 10 | await client.send_agent_event( 11 | agent_id=agent_id, 12 | run_id=run_id, 13 | event_name="call", 14 | event_input={"messages": [{"role": "user", "content": "Tell me another joke"}]}, 15 | ) 16 | 17 | sys.exit(0) 18 | 19 | 20 | def run_event_workflow() -> None: 21 | asyncio.run(main(agent_id="your-agent-id", run_id="your-run-id")) 22 | 23 | 24 | if __name__ == "__main__": 25 | run_event_workflow() 26 | -------------------------------------------------------------------------------- /pdf_ocr/src/client.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from dotenv import load_dotenv 4 | from restack_ai import Restack 5 | from restack_ai.restack import CloudConnectionOptions 6 | 7 | # Load environment variables from a .env file 8 | load_dotenv() 9 | 10 | 11 | engine_id = os.getenv("RESTACK_ENGINE_ID") 12 | address = os.getenv("RESTACK_ENGINE_ADDRESS") 13 | api_key = os.getenv("RESTACK_ENGINE_API_KEY") 14 | api_address = os.getenv("RESTACK_ENGINE_API_ADDRESS") 15 | 16 | connection_options = CloudConnectionOptions( 17 | engine_id=engine_id, 18 | address=address, 19 | api_key=api_key, 20 | api_address=api_address 21 | ) 22 | client = Restack(connection_options) 23 | -------------------------------------------------------------------------------- /agent_voice/pipecat/pipecat_pipeline/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pipecat-pipeline" 3 | version = "0.1.0" 4 | description = "Add your description here" 5 | readme = "README.md" 6 | requires-python = ">=3.10" 7 | dependencies = [ 8 | "loguru>=0.7.3", 9 | "openai>=1.59.9", 10 | "pipecat-ai[daily,deepgram,openai,silero,elevenlabs]>=0.0.58", 11 | "python-dotenv>=1.0.1", 12 | ] 13 | 14 | [project.scripts] 15 | dev = "src.pipeline:main" 16 | 17 | [tool.hatch.build.targets.sdist] 18 | include = ["src"] 19 | 20 | [tool.hatch.build.targets.wheel] 21 | include = ["src"] 22 | 23 | [build-system] 24 | requires = ["hatchling"] 25 | build-backend = "hatchling.build" -------------------------------------------------------------------------------- /agent_humanloop/src/client.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from dotenv import load_dotenv 4 | from restack_ai import Restack 5 | from restack_ai.restack import CloudConnectionOptions 6 | 7 | # Load environment variables from a .env file 8 | load_dotenv() 9 | 10 | 11 | engine_id = os.getenv("RESTACK_ENGINE_ID") 12 | address = os.getenv("RESTACK_ENGINE_ADDRESS") 13 | api_key = os.getenv("RESTACK_ENGINE_API_KEY") 14 | api_address = os.getenv("RESTACK_ENGINE_API_ADDRESS") 15 | 16 | connection_options = CloudConnectionOptions( 17 | engine_id=engine_id, 18 | address=address, 19 | api_key=api_key, 20 | api_address=api_address 21 | ) 22 | client = Restack(connection_options) 23 | -------------------------------------------------------------------------------- /agent_stream/schedule_agent.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import sys 3 | import time 4 | 5 | from restack_ai import Restack 6 | from src.agents.agent import AgentStream 7 | 8 | 9 | async def main() -> None: 10 | client = Restack() 11 | 12 | agent_id = f"{int(time.time() * 1000)}-{AgentStream.__name__}" 13 | run_id = await client.schedule_agent( 14 | agent_name=AgentStream.__name__, agent_id=agent_id 15 | ) 16 | 17 | await client.get_agent_result(agent_id=agent_id, run_id=run_id) 18 | 19 | sys.exit(0) 20 | 21 | 22 | def run_schedule_agent() -> None: 23 | asyncio.run(main()) 24 | 25 | 26 | if __name__ == "__main__": 27 | run_schedule_agent() 28 | -------------------------------------------------------------------------------- /agent_telephony/vapi/agent_vapi/schedule_agent.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import sys 3 | import time 4 | 5 | from restack_ai import Restack 6 | from src.agents.agent import AgentStream 7 | 8 | 9 | async def main() -> None: 10 | client = Restack() 11 | 12 | agent_id = f"{int(time.time() * 1000)}-{AgentStream.__name__}" 13 | run_id = await client.schedule_agent( 14 | agent_name=AgentStream.__name__, agent_id=agent_id 15 | ) 16 | 17 | await client.get_agent_result(agent_id=agent_id, run_id=run_id) 18 | 19 | sys.exit(0) 20 | 21 | 22 | def run_schedule_agent() -> None: 23 | asyncio.run(main()) 24 | 25 | 26 | if __name__ == "__main__": 27 | run_schedule_agent() 28 | -------------------------------------------------------------------------------- /agent_telephony/twilio_livekit/agent_twilio/src/client.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from dotenv import load_dotenv 4 | from restack_ai import Restack 5 | from restack_ai.restack import CloudConnectionOptions 6 | 7 | # Load environment variables from a .env file 8 | load_dotenv() 9 | 10 | 11 | engine_id = os.getenv("RESTACK_ENGINE_ID") 12 | address = os.getenv("RESTACK_ENGINE_ADDRESS") 13 | api_key = os.getenv("RESTACK_ENGINE_API_KEY") 14 | api_address = os.getenv("RESTACK_ENGINE_API_ADDRESS") 15 | 16 | connection_options = CloudConnectionOptions( 17 | engine_id=engine_id, 18 | address=address, 19 | api_key=api_key, 20 | api_address=api_address, 21 | ) 22 | client = Restack(connection_options) 23 | -------------------------------------------------------------------------------- /agent_todo/schedule.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import sys 3 | import time 4 | 5 | from restack_ai import Restack 6 | from src.agents.agent_todo import AgentTodo 7 | 8 | 9 | async def main() -> None: 10 | client = Restack() 11 | 12 | agent_id = f"{int(time.time() * 1000)}-{AgentTodo.__name__}" 13 | run_id = await client.schedule_agent( 14 | agent_name=AgentTodo.__name__, 15 | agent_id=agent_id, 16 | input=AgentTodo(), 17 | ) 18 | 19 | await client.get_agent_result(agent_id=agent_id, run_id=run_id) 20 | 21 | sys.exit(0) 22 | 23 | 24 | def run_schedule() -> None: 25 | asyncio.run(main()) 26 | 27 | 28 | if __name__ == "__main__": 29 | run_schedule() 30 | -------------------------------------------------------------------------------- /child_workflows/src/client.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from dotenv import load_dotenv 4 | from restack_ai import Restack 5 | from restack_ai.restack import CloudConnectionOptions 6 | 7 | # Load environment variables from a .env file 8 | load_dotenv() 9 | 10 | 11 | engine_id = os.getenv("RESTACK_ENGINE_ID") 12 | address = os.getenv("RESTACK_ENGINE_ADDRESS") 13 | api_key = os.getenv("RESTACK_ENGINE_API_KEY") 14 | api_address = os.getenv("RESTACK_ENGINE_API_ADDRESS") 15 | 16 | connection_options = CloudConnectionOptions( 17 | engine_id=engine_id, 18 | address=address, 19 | api_key=api_key, 20 | api_address=api_address 21 | ) 22 | print(connection_options) 23 | client = Restack(connection_options) 24 | -------------------------------------------------------------------------------- /production_demo/src/functions/function.py: -------------------------------------------------------------------------------- 1 | from restack_ai.function import function, log, RetryableError 2 | 3 | tries = 0 4 | 5 | from pydantic import BaseModel 6 | 7 | class ExampleFunctionInput(BaseModel): 8 | name: str 9 | 10 | @function.defn() 11 | async def example_function(input: ExampleFunctionInput) -> str: 12 | try: 13 | global tries 14 | 15 | if tries == 0: 16 | tries += 1 17 | raise RetryableError(message="Simulated failure") 18 | 19 | log.info("example function started", input=input) 20 | return f"Hello, {input.name}!" 21 | except Exception as e: 22 | log.error("example function failed", error=e) 23 | raise e 24 | -------------------------------------------------------------------------------- /agent_telephony/twilio_livekit/agent_twilio/schedule_agent.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import sys 3 | import time 4 | 5 | from restack_ai import Restack 6 | from src.agents.agent import AgentStream 7 | 8 | 9 | async def main() -> None: 10 | client = Restack() 11 | 12 | agent_id = f"{int(time.time() * 1000)}-{AgentStream.__name__}" 13 | run_id = await client.schedule_agent( 14 | agent_name=AgentStream.__name__, agent_id=agent_id 15 | ) 16 | 17 | await client.get_agent_result( 18 | agent_id=agent_id, run_id=run_id 19 | ) 20 | 21 | sys.exit(0) 22 | 23 | 24 | def run_schedule_agent() -> None: 25 | asyncio.run(main()) 26 | 27 | 28 | if __name__ == "__main__": 29 | run_schedule_agent() 30 | -------------------------------------------------------------------------------- /agent_todo/src/functions/get_result.py: -------------------------------------------------------------------------------- 1 | import secrets 2 | 3 | from pydantic import BaseModel 4 | from restack_ai.function import NonRetryableError, function 5 | 6 | 7 | class ResultParams(BaseModel): 8 | todo_title: str 9 | todo_id: str 10 | 11 | 12 | class ResultResponse(BaseModel): 13 | status: str 14 | todo_id: str 15 | 16 | 17 | @function.defn() 18 | async def get_result(params: ResultParams) -> ResultResponse: 19 | try: 20 | status = secrets.choice(["completed", "failed"]) 21 | return ResultResponse(todo_id=params.todo_id, status=status) 22 | except Exception as e: 23 | error_message = f"get_result function failed: {e}" 24 | raise NonRetryableError(error_message) from e 25 | -------------------------------------------------------------------------------- /agent_video/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "agent_video" 3 | version = "0.1.0" 4 | description = "Add your description here" 5 | readme = "README.md" 6 | requires-python = ">=3.10" 7 | dependencies = [ 8 | "openai>=1.59.9", 9 | "pipecat-ai[daily,deepgram,openai,silero,cartesia]>=0.0.58", 10 | "python-dotenv>=1.0.1", 11 | "pydantic>=2.10.6", 12 | "watchfiles>=1.0.4", 13 | "restack-ai>=0.0.94",] 14 | 15 | [project.scripts] 16 | dev = "src.services:watch_services" 17 | services = "src.services:run_services" 18 | 19 | [tool.hatch.build.targets.sdist] 20 | include = ["src"] 21 | 22 | [tool.hatch.build.targets.wheel] 23 | include = ["src"] 24 | 25 | [build-system] 26 | requires = ["hatchling"] 27 | build-backend = "hatchling.build" 28 | -------------------------------------------------------------------------------- /agent_voice/livekit/agent/schedule_agent.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import sys 3 | import time 4 | 5 | from restack_ai import Restack 6 | from src.agents.agent import AgentVoice 7 | 8 | 9 | async def main(room_id: str) -> None: 10 | client = Restack() 11 | 12 | agent_id = f"{int(time.time() * 1000)}-{AgentVoice.__name__}" 13 | run_id = await client.schedule_agent( 14 | agent_name=AgentVoice.__name__, 15 | agent_id=agent_id, 16 | agent_input={"room_id": room_id}, 17 | ) 18 | 19 | await client.get_agent_result(agent_id=agent_id, run_id=run_id) 20 | 21 | sys.exit(0) 22 | 23 | 24 | def run_schedule_agent() -> None: 25 | asyncio.run(main(room_id="room-id")) 26 | 27 | 28 | if __name__ == "__main__": 29 | run_schedule_agent() 30 | -------------------------------------------------------------------------------- /production_demo/schedule_interval.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import time 3 | from restack_ai import Restack 4 | from restack_ai.restack import ScheduleSpec, ScheduleIntervalSpec 5 | from datetime import timedelta 6 | 7 | async def main(): 8 | 9 | client = Restack() 10 | 11 | workflow_id = f"{int(time.time() * 1000)}-ChildWorkflow" 12 | await client.schedule_workflow( 13 | workflow_name="ChildWorkflow", 14 | workflow_id=workflow_id, 15 | schedule=ScheduleSpec( 16 | intervals=[ScheduleIntervalSpec( 17 | every=timedelta(seconds=1) 18 | )] 19 | ) 20 | ) 21 | 22 | exit(0) 23 | 24 | def run_schedule_scale(): 25 | asyncio.run(main()) 26 | 27 | if __name__ == "__main__": 28 | run_schedule_scale() -------------------------------------------------------------------------------- /agent_rag/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "agent_rag" 3 | version = "0.0.1" 4 | description = "A an agent with RAG" 5 | authors = [{ name = "Restack Team", email = "service@restack.io" }] 6 | requires-python = ">=3.10,<3.14" 7 | readme = "README.md" 8 | dependencies = [ 9 | "openai>=1.61.0", 10 | "pydantic>=2.10.6", 11 | "watchfiles>=1.0.4", 12 | "requests==2.32.3", 13 | "python-dotenv==1.0.1", 14 | "restack-ai>=0.0.94",] 15 | 16 | [project.scripts] 17 | dev = "src.services:watch_services" 18 | services = "src.services:run_services" 19 | 20 | [tool.hatch.build.targets.sdist] 21 | include = ["src"] 22 | 23 | [tool.hatch.build.targets.wheel] 24 | include = ["src"] 25 | 26 | [build-system] 27 | requires = ["hatchling"] 28 | build-backend = "hatchling.build" 29 | -------------------------------------------------------------------------------- /agent_todo/src/functions/todo_create.py: -------------------------------------------------------------------------------- 1 | import secrets 2 | 3 | from pydantic import BaseModel 4 | from restack_ai.function import NonRetryableError, function, log 5 | 6 | 7 | class TodoCreateParams(BaseModel): 8 | title: str 9 | 10 | 11 | @function.defn() 12 | async def todo_create(params: TodoCreateParams) -> str: 13 | try: 14 | log.info("todo_create function start", title=params.title) 15 | 16 | todo_id = f"todo-{secrets.randbelow(9000) + 1000}" 17 | except Exception as e: 18 | error_message = f"todo_create function failed: {e}" 19 | raise NonRetryableError(error_message) from e 20 | else: 21 | log.info("todo_create function completed", todo_id=todo_id) 22 | return f"Created the todo '{params.title}' with id: {todo_id}" 23 | -------------------------------------------------------------------------------- /agent_tool/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "agent_tool" 3 | version = "0.0.1" 4 | description = "A tool for an AI agent" 5 | authors = [{ name = "Restack Team", email = "service@restack.io" }] 6 | requires-python = ">=3.10,<3.14" 7 | readme = "README.md" 8 | dependencies = [ 9 | "openai>=1.61.0", 10 | "pydantic>=2.10.6", 11 | "watchfiles>=1.0.4", 12 | "requests==2.32.3", 13 | "python-dotenv==1.0.1", 14 | "restack-ai>=0.0.94",] 15 | 16 | [project.scripts] 17 | dev = "src.services:watch_services" 18 | services = "src.services:run_services" 19 | 20 | [tool.hatch.build.targets.sdist] 21 | include = ["src"] 22 | 23 | [tool.hatch.build.targets.wheel] 24 | include = ["src"] 25 | 26 | [build-system] 27 | requires = ["hatchling"] 28 | build-backend = "hatchling.build" 29 | -------------------------------------------------------------------------------- /agent_voice/livekit/livekit_pipeline/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "agent_voice_livekit_pipeline" 3 | version = "0.1.0" 4 | description = "Add your description here" 5 | readme = "README.md" 6 | requires-python = ">=3.10" 7 | dependencies = [ 8 | "livekit-agents>=0.12.15", 9 | "livekit-plugins-deepgram>=0.6.19", 10 | "livekit-plugins-elevenlabs>=0.7.13", 11 | "livekit-plugins-openai>=0.11.0", 12 | "livekit-plugins-silero>=0.7.4", 13 | "livekit-plugins-turn-detector>=0.4.2", 14 | "python-dotenv==1.0.1", 15 | "restack-ai>=0.0.77", 16 | ] 17 | 18 | [tool.hatch.build.targets.sdist] 19 | include = ["src"] 20 | 21 | [tool.hatch.build.targets.wheel] 22 | include = ["src"] 23 | 24 | [build-system] 25 | requires = ["hatchling"] 26 | build-backend = "hatchling.build" 27 | -------------------------------------------------------------------------------- /agent_stream/event_agent.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import sys 3 | 4 | from restack_ai import Restack 5 | 6 | 7 | async def main(agent_id: str, run_id: str) -> None: 8 | client = Restack() 9 | 10 | await client.send_agent_event( 11 | agent_id=agent_id, 12 | run_id=run_id, 13 | event_name="messages", 14 | event_input={"messages": [{"role": "user", "content": "Tell me another joke"}]}, 15 | ) 16 | 17 | await client.send_agent_event( 18 | agent_id=agent_id, 19 | run_id=run_id, 20 | event_name="end", 21 | ) 22 | 23 | sys.exit(0) 24 | 25 | 26 | def run_event_workflow() -> None: 27 | asyncio.run(main(agent_id="your-agent-id", run_id="your-run-id")) 28 | 29 | 30 | if __name__ == "__main__": 31 | run_event_workflow() 32 | -------------------------------------------------------------------------------- /agent_todo/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "quickstart" 3 | version = "0.0.1" 4 | description = "A quickstart for Restack" 5 | authors = [{ name = "Restack Team", email = "service@restack.io" }] 6 | requires-python = ">=3.10,<3.14" 7 | readme = "README.md" 8 | dependencies = [ 9 | "pydantic>=2.10.6", 10 | "watchfiles>=1.0.4", 11 | "python-dotenv==1.0.1", 12 | "openai>=1.61.0", 13 | "restack-ai>=0.0.94",] 14 | 15 | [project.scripts] 16 | dev = "src.services:watch_services" 17 | services = "src.services:run_services" 18 | schedule = "schedule:run_schedule" 19 | 20 | [tool.hatch.build.targets.sdist] 21 | include = ["src"] 22 | 23 | [tool.hatch.build.targets.wheel] 24 | include = ["src"] 25 | 26 | [build-system] 27 | requires = ["hatchling"] 28 | build-backend = "hatchling.build" 29 | -------------------------------------------------------------------------------- /agent_voice/livekit/agent/event_agent.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import sys 3 | 4 | from restack_ai import Restack 5 | 6 | 7 | async def main(agent_id: str, run_id: str) -> None: 8 | client = Restack() 9 | 10 | await client.send_agent_event( 11 | agent_id=agent_id, 12 | run_id=run_id, 13 | event_name="messages", 14 | event_input={"messages": [{"role": "user", "content": "Tell me another joke"}]}, 15 | ) 16 | 17 | await client.send_agent_event( 18 | agent_id=agent_id, 19 | run_id=run_id, 20 | event_name="end", 21 | ) 22 | 23 | sys.exit(0) 24 | 25 | 26 | def run_event_workflow() -> None: 27 | asyncio.run(main(agent_id="agent-id", run_id="run-id")) 28 | 29 | 30 | if __name__ == "__main__": 31 | run_event_workflow() 32 | -------------------------------------------------------------------------------- /agent_telephony/twilio_livekit/livekit_pipeline/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "agent_telephony_livekit_pipeline" 3 | version = "0.1.0" 4 | description = "Add your description here" 5 | readme = "README.md" 6 | requires-python = ">=3.10" 7 | dependencies = [ 8 | "livekit-agents>=0.12.15", 9 | "livekit-plugins-deepgram>=0.6.19", 10 | "livekit-plugins-elevenlabs>=0.7.13", 11 | "livekit-plugins-openai>=0.11.0", 12 | "livekit-plugins-silero>=0.7.4", 13 | "livekit-plugins-turn-detector>=0.4.2", 14 | "python-dotenv==1.0.1", 15 | "restack-ai>=0.0.94", 16 | ] 17 | 18 | [tool.hatch.build.targets.sdist] 19 | include = ["src"] 20 | 21 | [tool.hatch.build.targets.wheel] 22 | include = ["src"] 23 | 24 | [build-system] 25 | requires = ["hatchling"] 26 | build-backend = "hatchling.build" 27 | -------------------------------------------------------------------------------- /encryption/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "encryption" 3 | version = "0.0.1" 4 | description = "A simple example to get started with the restack-ai SDK" 5 | authors = [{ name = "Restack Team", email = "service@restack.io" }] 6 | requires-python = ">=3.10,<3.14" 7 | readme = "README.md" 8 | dependencies = [ 9 | "aiohttp>=3.11.10", 10 | "restack-ai==0.0.94", 11 | ] 12 | 13 | [project.scripts] 14 | services = "src.services:run_services" 15 | schedule = "schedule_workflow:run_schedule_workflow" 16 | codec = "src.codec_server:run_codec_server" 17 | 18 | [dependency-groups] 19 | dev = ["pytest==6.2"] 20 | 21 | [tool.hatch.build.targets.sdist] 22 | include = ["src"] 23 | 24 | [tool.hatch.build.targets.wheel] 25 | include = ["src"] 26 | 27 | [build-system] 28 | requires = ["hatchling"] 29 | build-backend = "hatchling.build" 30 | -------------------------------------------------------------------------------- /agent_telephony/twilio_livekit/agent_twilio/event_agent.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import sys 3 | 4 | from restack_ai import Restack 5 | 6 | 7 | async def main(agent_id: str, run_id: str) -> None: 8 | client = Restack() 9 | 10 | await client.send_agent_event( 11 | agent_id=agent_id, 12 | run_id=run_id, 13 | event_name="call", 14 | event_input={ 15 | "messages": [ 16 | { 17 | "role": "user", 18 | "content": "What is Restack framework?", 19 | } 20 | ] 21 | }, 22 | ) 23 | 24 | sys.exit(0) 25 | 26 | 27 | def run_event_workflow() -> None: 28 | asyncio.run( 29 | main(agent_id="agent-id", run_id="run-id") 30 | ) 31 | 32 | 33 | if __name__ == "__main__": 34 | run_event_workflow() 35 | -------------------------------------------------------------------------------- /agent_apis/schedule_workflow.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import sys 3 | import time 4 | from dataclasses import dataclass 5 | 6 | from restack_ai import Restack 7 | 8 | 9 | @dataclass 10 | class InputParams: 11 | name: str 12 | 13 | 14 | async def main() -> None: 15 | client = Restack() 16 | 17 | workflow_id = f"{int(time.time() * 1000)}-MultistepWorkflow" 18 | run_id = await client.schedule_workflow( 19 | workflow_name="MultistepWorkflow", 20 | workflow_id=workflow_id, 21 | workflow_input=InputParams(name="Restack AI SDK User"), 22 | ) 23 | 24 | await client.get_workflow_result(workflow_id=workflow_id, run_id=run_id) 25 | 26 | sys.exit(0) 27 | 28 | 29 | def run_schedule_workflow() -> None: 30 | asyncio.run(main()) 31 | 32 | 33 | if __name__ == "__main__": 34 | run_schedule_workflow() 35 | -------------------------------------------------------------------------------- /agent_chat/event_agent.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import sys 3 | 4 | from restack_ai import Restack 5 | 6 | 7 | async def main(agent_id: str, run_id: str) -> None: 8 | client = Restack() 9 | 10 | await client.send_agent_event( 11 | agent_id=agent_id, 12 | run_id=run_id, 13 | event_name="message", 14 | event_input={"content": "Tell me another joke"}, 15 | ) 16 | 17 | await client.send_agent_event( 18 | agent_id=agent_id, 19 | run_id=run_id, 20 | event_name="end", 21 | ) 22 | 23 | sys.exit(0) 24 | 25 | 26 | def run_event_agent() -> None: 27 | asyncio.run( 28 | main( 29 | agent_id="1739788461173-AgentChat", 30 | run_id="c3937cc9-8d88-4e37-85e1-59e78cf1bf60", 31 | ) 32 | ) 33 | 34 | 35 | if __name__ == "__main__": 36 | run_event_agent() 37 | -------------------------------------------------------------------------------- /agent_chat/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "agent_chat" 3 | version = "0.0.1" 4 | description = "An agent chat for Restack" 5 | authors = [{ name = "Restack Team", email = "service@restack.io" }] 6 | requires-python = ">=3.10,<3.14" 7 | readme = "README.md" 8 | dependencies = [ 9 | "pydantic>=2.10.6", 10 | "watchfiles>=1.0.4", 11 | "python-dotenv==1.0.1", 12 | "openai>=1.61.0", 13 | "restack-ai>=0.0.100", 14 | ] 15 | 16 | [project.scripts] 17 | dev = "src.services:watch_services" 18 | services = "src.services:run_services" 19 | schedule = "schedule_agent:run_schedule_agent" 20 | event = "event_agent:run_event_agent" 21 | 22 | [tool.hatch.build.targets.sdist] 23 | include = ["src"] 24 | 25 | [tool.hatch.build.targets.wheel] 26 | include = ["src"] 27 | 28 | [build-system] 29 | requires = ["hatchling"] 30 | build-backend = "hatchling.build" 31 | -------------------------------------------------------------------------------- /agent_apis/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "openai_greet" 3 | version = "0.0.1" 4 | description = "A simple example to greet a person using OpenAI" 5 | authors = [{ name = "Restack Team", email = "service@restack.io" }] 6 | requires-python = ">=3.10,<3.14" 7 | readme = "README.md" 8 | dependencies = [ 9 | "pydantic>=2.10.6", 10 | "watchfiles>=1.0.4", 11 | "python-dotenv==1.0.1", 12 | "openai>=1.61.0", 13 | "aiohttp>=3.11.12", 14 | "restack-ai>=0.0.94",] 15 | 16 | [project.scripts] 17 | dev = "src.services:watch_services" 18 | services = "src.services:run_services" 19 | schedule = "schedule_workflow:run_schedule_workflow" 20 | 21 | [tool.hatch.build.targets.sdist] 22 | include = ["src"] 23 | 24 | [tool.hatch.build.targets.wheel] 25 | include = ["src"] 26 | 27 | [build-system] 28 | requires = ["hatchling"] 29 | build-backend = "hatchling.build" 30 | -------------------------------------------------------------------------------- /agent_stream/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "agent_stream" 3 | version = "0.0.1" 4 | description = "An agent with streaming for Restack" 5 | authors = [{ name = "Restack Team", email = "service@restack.io" }] 6 | requires-python = ">=3.10,<3.14" 7 | readme = "README.md" 8 | dependencies = [ 9 | "pydantic>=2.10.6", 10 | "watchfiles>=1.0.4", 11 | "python-dotenv==1.0.1", 12 | "openai>=1.61.0", 13 | "livekit-api>=0.8.2", 14 | "restack-ai>=0.0.94", 15 | ] 16 | 17 | [project.scripts] 18 | dev = "src.services:watch_services" 19 | services = "src.services:run_services" 20 | schedule = "schedule_agent:run_schedule_agent" 21 | event = "event_agent:run_event_agent" 22 | 23 | [tool.hatch.build.targets.sdist] 24 | include = ["src"] 25 | 26 | [tool.hatch.build.targets.wheel] 27 | include = ["src"] 28 | 29 | 30 | [build-system] 31 | requires = ["hatchling"] 32 | build-backend = "hatchling.build" 33 | -------------------------------------------------------------------------------- /pdf_ocr/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pdf_ocr" 3 | version = "0.0.1" 4 | description = "A simple example to OCR a pdf" 5 | authors = [{ name = "Restack Team", email = "service@restack.io" }] 6 | requires-python = ">=3.12,<3.13" 7 | readme = "README.md" 8 | dependencies = [ 9 | "pydantic>=2.10.6", 10 | "watchfiles>=1.0.4", 11 | "python-dotenv==1.0.1", 12 | "openai>=1.61.0", 13 | "python-multipart==0.0.19", 14 | "numpy==2.2.0", 15 | "pillow==11.0.0", 16 | "python-doctr[torch]==0.10.0", 17 | "requests==2.32.3", 18 | "restack-ai>=0.0.94",] 19 | 20 | [project.scripts] 21 | dev = "src.services:watch_services" 22 | services = "src.services:run_services" 23 | 24 | [tool.hatch.build.targets.sdist] 25 | include = ["src"] 26 | 27 | [tool.hatch.build.targets.wheel] 28 | include = ["src"] 29 | 30 | [build-system] 31 | requires = ["hatchling"] 32 | build-backend = "hatchling.build" 33 | 34 | -------------------------------------------------------------------------------- /agent_humanloop/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "human_loop" 3 | version = "0.0.1" 4 | description = "A simple example to get started with human-in-the-loop using the restack-ai python library" 5 | authors = [{ name = "Restack Team", email = "service@restack.io" }] 6 | requires-python = ">=3.10,<3.14" 7 | readme = "README.md" 8 | dependencies = [ 9 | "pydantic>=2.10.6", 10 | "restack-ai==0.0.94", 11 | "watchfiles>=1.0.4", 12 | "python-dotenv==1.0.1", 13 | ] 14 | 15 | [project.scripts] 16 | dev = "src.services:watch_services" 17 | services = "src.services:run_services" 18 | schedule = "schedule_workflow:run_schedule_workflow" 19 | 20 | [dependency-groups] 21 | dev = ["pytest==6.2"] 22 | 23 | [tool.hatch.build.targets.sdist] 24 | include = ["src"] 25 | 26 | [tool.hatch.build.targets.wheel] 27 | include = ["src"] 28 | 29 | [build-system] 30 | requires = ["hatchling"] 31 | build-backend = "hatchling.build" 32 | -------------------------------------------------------------------------------- /agent_voice/livekit/agent/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "agent_voice_livekit_agent" 3 | version = "0.0.1" 4 | description = "An agent with streaming for Restack" 5 | authors = [{ name = "Restack Team", email = "service@restack.io" }] 6 | requires-python = ">=3.10,<3.14" 7 | readme = "README.md" 8 | dependencies = [ 9 | "pydantic>=2.10.6", 10 | "watchfiles>=1.0.4", 11 | "python-dotenv==1.0.1", 12 | "openai>=1.61.0", 13 | "livekit-api>=0.8.2", 14 | "restack-ai>=0.0.94",] 15 | 16 | [project.scripts] 17 | dev = "src.services:watch_services" 18 | services = "src.services:run_services" 19 | schedule = "schedule_agent:run_schedule_agent" 20 | event = "event_agent:run_event_agent" 21 | 22 | [tool.hatch.build.targets.sdist] 23 | include = ["src"] 24 | 25 | [tool.hatch.build.targets.wheel] 26 | include = ["src"] 27 | 28 | [build-system] 29 | requires = ["hatchling"] 30 | build-backend = "hatchling.build" 31 | -------------------------------------------------------------------------------- /agent_humanloop/src/services.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import os 3 | import webbrowser 4 | 5 | from watchfiles import run_process 6 | 7 | from src.agents.agent import AgentHumanLoop 8 | from src.client import client 9 | from src.functions.function import feedback, goodbye 10 | 11 | 12 | async def main(): 13 | 14 | await client.start_service( 15 | agents= [AgentHumanLoop], 16 | functions= [feedback, goodbye] 17 | ) 18 | 19 | def run_services(): 20 | try: 21 | asyncio.run(main()) 22 | except KeyboardInterrupt: 23 | print("Service interrupted by user. Exiting gracefully.") 24 | 25 | def watch_services(): 26 | watch_path = os.getcwd() 27 | print(f"Watching {watch_path} and its subdirectories for changes...") 28 | webbrowser.open("http://localhost:5233") 29 | run_process(watch_path, recursive=True, target=run_services) 30 | 31 | if __name__ == "__main__": 32 | run_services() 33 | -------------------------------------------------------------------------------- /agent_telephony/vapi/agent_vapi/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "agent_telephony_agent_vapi" 3 | version = "0.0.1" 4 | description = "An agent with streaming for Restack" 5 | authors = [{ name = "Restack Team", email = "service@restack.io" }] 6 | requires-python = ">=3.10,<3.14" 7 | readme = "README.md" 8 | dependencies = [ 9 | "pydantic>=2.10.6", 10 | "watchfiles>=1.0.4", 11 | "python-dotenv==1.0.1", 12 | "openai>=1.61.0", 13 | "restack-ai>=0.0.77", 14 | "vapi-server-sdk>=0.4.0", 15 | ] 16 | 17 | [project.scripts] 18 | dev = "src.services:watch_services" 19 | services = "src.services:run_services" 20 | schedule = "schedule_agent:run_schedule_agent" 21 | event = "event_agent:run_event_agent" 22 | 23 | [tool.hatch.build.targets.sdist] 24 | include = ["src"] 25 | 26 | [tool.hatch.build.targets.wheel] 27 | include = ["src"] 28 | 29 | [build-system] 30 | requires = ["hatchling"] 31 | build-backend = "hatchling.build" 32 | -------------------------------------------------------------------------------- /agent_telephony/twilio_livekit/agent_twilio/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "agent_telephony_agent_twilio" 3 | version = "0.0.1" 4 | description = "An agent with streaming for Restack" 5 | authors = [{ name = "Restack Team", email = "service@restack.io" }] 6 | requires-python = ">=3.10,<3.14" 7 | readme = "README.md" 8 | dependencies = [ 9 | "pydantic>=2.10.6", 10 | "watchfiles>=1.0.4", 11 | "python-dotenv==1.0.1", 12 | "openai>=1.61.0", 13 | "livekit-api>=0.8.2", 14 | "restack-ai>=0.0.94",] 15 | 16 | [project.scripts] 17 | dev = "src.services:watch_services" 18 | services = "src.services:run_services" 19 | schedule = "schedule_agent:run_schedule_agent" 20 | event = "event_agent:run_event_agent" 21 | 22 | [tool.hatch.build.targets.sdist] 23 | include = ["src"] 24 | 25 | [tool.hatch.build.targets.wheel] 26 | include = ["src"] 27 | 28 | [build-system] 29 | requires = ["hatchling"] 30 | build-backend = "hatchling.build" 31 | -------------------------------------------------------------------------------- /audio_transcript/src/functions/transcribe_audio.py: -------------------------------------------------------------------------------- 1 | from restack_ai.function import function, FunctionFailure, log 2 | from dataclasses import dataclass 3 | from openai import OpenAI 4 | import os 5 | from dotenv import load_dotenv 6 | 7 | load_dotenv() 8 | 9 | @dataclass 10 | class TranscribeAudioInput: 11 | file_path: str 12 | 13 | @function.defn() 14 | async def transcribe_audio(input: TranscribeAudioInput): 15 | if (os.environ.get("OPENAI_API_KEY") is None): 16 | raise FunctionFailure("OPENAI_API_KEY is not set", non_retryable=True) 17 | 18 | client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY")) 19 | 20 | try: 21 | response = client.audio.transcriptions.create( 22 | model="whisper-1", 23 | file=open(input.file_path, "rb") 24 | ) 25 | except Exception as error: 26 | log.error("An error occurred during transcription", error) 27 | 28 | return response.text 29 | 30 | -------------------------------------------------------------------------------- /agent_chat/src/services.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | import webbrowser 4 | from pathlib import Path 5 | 6 | from watchfiles import run_process 7 | 8 | from src.agents.agent import AgentChat 9 | from src.client import client 10 | from src.functions.llm_chat import llm_chat 11 | 12 | 13 | async def main() -> None: 14 | await client.start_service(agents=[AgentChat], functions=[llm_chat]) 15 | 16 | 17 | def run_services() -> None: 18 | try: 19 | asyncio.run(main()) 20 | except KeyboardInterrupt: 21 | logging.info("Service interrupted by user. Exiting gracefully.") 22 | 23 | 24 | def watch_services() -> None: 25 | watch_path = Path.cwd() 26 | logging.info("Watching %s and its subdirectories for changes...", watch_path) 27 | webbrowser.open("http://localhost:5233") 28 | run_process(watch_path, recursive=True, target=run_services) 29 | 30 | 31 | if __name__ == "__main__": 32 | run_services() 33 | -------------------------------------------------------------------------------- /agent_stream/src/services.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | import webbrowser 4 | from pathlib import Path 5 | 6 | from watchfiles import run_process 7 | 8 | from src.agents.agent import AgentStream 9 | from src.client import client 10 | from src.functions.llm_chat import llm_chat 11 | 12 | 13 | async def main() -> None: 14 | await client.start_service(agents=[AgentStream], functions=[llm_chat]) 15 | 16 | 17 | def run_services() -> None: 18 | try: 19 | asyncio.run(main()) 20 | except KeyboardInterrupt: 21 | logging.info("Service interrupted by user. Exiting gracefully.") 22 | 23 | 24 | def watch_services() -> None: 25 | watch_path = Path.cwd() 26 | logging.info("Watching %s and its subdirectories for changes...", watch_path) 27 | webbrowser.open("http://localhost:5233") 28 | run_process(watch_path, recursive=True, target=run_services) 29 | 30 | 31 | if __name__ == "__main__": 32 | run_services() 33 | -------------------------------------------------------------------------------- /agent_telephony/vapi/agent_vapi/src/functions/vapi_call.py: -------------------------------------------------------------------------------- 1 | import os 2 | from dataclasses import dataclass 3 | 4 | from restack_ai.function import NonRetryableError, function, log 5 | from vapi import AsyncVapi, Call 6 | 7 | 8 | @dataclass 9 | class VapiCallInput: 10 | assistant_id: str 11 | phone_number: str 12 | 13 | 14 | @function.defn() 15 | async def vapi_call(function_input: VapiCallInput) -> Call: 16 | try: 17 | client = AsyncVapi( 18 | token=os.getenv("VAPI_TOKEN"), 19 | ) 20 | 21 | call = await client.calls.create( 22 | assistant_id=function_input.assistant_id, 23 | phone_number_id=function_input.phone_number, 24 | ) 25 | 26 | log.info("vapi_call: ", call=call) 27 | 28 | except Exception as e: 29 | error_message = f"vapi_call function failed: {e}" 30 | raise NonRetryableError(error_message) from e 31 | else: 32 | return call 33 | -------------------------------------------------------------------------------- /agent_video/src/functions/context_docs.py: -------------------------------------------------------------------------------- 1 | import aiohttp 2 | from restack_ai.function import NonRetryableError, function, log 3 | 4 | 5 | async def fetch_content_from_url(url: str) -> str: 6 | async with aiohttp.ClientSession() as session: 7 | async with session.get(url) as response: 8 | if response.status == 200: 9 | return await response.text() 10 | error_message = f"Failed to fetch content: {response.status}" 11 | raise NonRetryableError(error_message) 12 | 13 | 14 | @function.defn() 15 | async def context_docs() -> str: 16 | try: 17 | docs_content = await fetch_content_from_url("https://docs.restack.io/llms-full.txt") 18 | log.info("Fetched content from URL", content=len(docs_content)) 19 | 20 | return docs_content 21 | 22 | except Exception as e: 23 | error_message = f"context_docs function failed: {e}" 24 | raise NonRetryableError(error_message) from e 25 | -------------------------------------------------------------------------------- /agent_telephony/twilio_livekit/agent_twilio/src/functions/send_agent_event.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | 3 | from pydantic import BaseModel 4 | from restack_ai.function import NonRetryableError, function 5 | 6 | from src.client import client 7 | 8 | 9 | class SendAgentEventInput(BaseModel): 10 | event_name: str 11 | agent_id: str 12 | run_id: str | None = None 13 | event_input: dict[str, Any] | None = None 14 | 15 | 16 | @function.defn() 17 | async def send_agent_event( 18 | function_input: SendAgentEventInput, 19 | ) -> str: 20 | try: 21 | return await client.send_agent_event( 22 | event_name=function_input.event_name, 23 | agent_id=function_input.agent_id, 24 | run_id=function_input.run_id, 25 | event_input=function_input.event_input, 26 | ) 27 | 28 | except Exception as e: 29 | raise NonRetryableError( 30 | f"send_agent_event failed: {e}" 31 | ) from e 32 | -------------------------------------------------------------------------------- /audio_transcript/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "audio_transcript" 3 | version = "0.0.1" 4 | description = "Transcribe audio with OpenAI Whisper and translate the text with OpenAI gpt-4.1-mini" 5 | authors = [{ name = "Restack Team", email = "service@restack.io" }] 6 | requires-python = ">=3.10,<3.14" 7 | readme = "README.md" 8 | dependencies = [ 9 | "openai>=1.61.0", 10 | "pydantic>=2.10.6", 11 | "restack-ai==0.0.94", 12 | "watchfiles>=1.0.4", 13 | "python-dotenv==1.0.1", 14 | ] 15 | 16 | [project.scripts] 17 | dev = "src.services:watch_services" 18 | services = "src.services:run_services" 19 | schedule = "schedule_workflow:run_schedule_workflow" 20 | schedule_failure = "schedule_workflow_failure:run_schedule_workflow_failure" 21 | 22 | [tool.hatch.build.targets.sdist] 23 | include = ["src"] 24 | 25 | [tool.hatch.build.targets.wheel] 26 | include = ["src"] 27 | 28 | [build-system] 29 | requires = ["hatchling"] 30 | build-backend = "hatchling.build" 31 | -------------------------------------------------------------------------------- /child_workflows/src/services.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import os 3 | import webbrowser 4 | 5 | from watchfiles import run_process 6 | 7 | from src.client import client 8 | from src.functions.function import welcome 9 | from src.workflows.child import ChildWorkflow 10 | from src.workflows.parent import ParentWorkflow 11 | 12 | 13 | async def main(): 14 | await client.start_service( 15 | workflows= [ParentWorkflow, ChildWorkflow], 16 | functions= [welcome] 17 | ) 18 | 19 | def run_services(): 20 | try: 21 | asyncio.run(main()) 22 | except KeyboardInterrupt: 23 | print("Service interrupted by user. Exiting gracefully.") 24 | 25 | 26 | def watch_services(): 27 | watch_path = os.getcwd() 28 | print(f"Watching {watch_path} and its subdirectories for changes...") 29 | webbrowser.open("http://localhost:5233") 30 | run_process(watch_path, recursive=True, target=run_services) 31 | 32 | if __name__ == "__main__": 33 | run_services() 34 | -------------------------------------------------------------------------------- /agent_humanloop/schedule_agent.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import time 3 | 4 | from restack_ai import Restack 5 | 6 | 7 | async def main(): 8 | 9 | client = Restack() 10 | 11 | agent_id = f"{int(time.time() * 1000)}-AgentHumanLoop" 12 | runId = await client.schedule_agent( 13 | agent_name="AgentHumanLoop", 14 | agent_id=agent_id 15 | ) 16 | 17 | await client.send_agent_event( 18 | event_name="event_feedback", 19 | event_input={ 20 | "feedback": "This is a human feedback" 21 | }, 22 | agent_id=agent_id, 23 | run_id=runId, 24 | ) 25 | 26 | end = await client.send_agent_event( 27 | event_name="event_end", 28 | event_input={ 29 | "end": True 30 | }, 31 | agent_id=agent_id, 32 | run_id=runId, 33 | ) 34 | 35 | exit(0) 36 | 37 | def run_schedule_workflow(): 38 | asyncio.run(main()) 39 | 40 | if __name__ == "__main__": 41 | run_schedule_workflow() 42 | -------------------------------------------------------------------------------- /child_workflows/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "child_workflows" 3 | version = "0.0.1" 4 | description = "A simple example to get started with the restack-ai SDK" 5 | authors = [{ name = "Restack Team", email = "service@restack.io" }] 6 | requires-python = ">=3.10,<3.14" 7 | readme = "README.md" 8 | dependencies = [ 9 | "pydantic>=2.10.6", 10 | "watchfiles>=1.0.4", 11 | "python-dotenv==1.0.1", 12 | "restack-ai>=0.0.89", 13 | "temporalio[opentelemetry]>=1.11.1", 14 | "opentelemetry-exporter-otlp-proto-grpc==1.18.0", 15 | ] 16 | 17 | [project.scripts] 18 | dev = "src.services:watch_services" 19 | services = "src.services:run_services" 20 | schedule = "schedule_workflow:run_schedule_workflow" 21 | 22 | [dependency-groups] 23 | dev = ["pytest==6.2"] 24 | 25 | [tool.hatch.build.targets.sdist] 26 | include = ["src"] 27 | 28 | [tool.hatch.build.targets.wheel] 29 | include = ["src"] 30 | 31 | [build-system] 32 | requires = ["hatchling"] 33 | build-backend = "hatchling.build" 34 | -------------------------------------------------------------------------------- /agent_rag/src/services.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | import webbrowser 4 | from pathlib import Path 5 | 6 | from watchfiles import run_process 7 | 8 | from src.agents.chat_rag import AgentRag 9 | from src.client import client 10 | from src.functions.llm_chat import llm_chat 11 | from src.functions.lookup_sales import lookup_sales 12 | 13 | 14 | async def main() -> None: 15 | await client.start_service(agents=[AgentRag], functions=[lookup_sales, llm_chat]) 16 | 17 | 18 | def run_services() -> None: 19 | try: 20 | asyncio.run(main()) 21 | except KeyboardInterrupt: 22 | logging.info("Service interrupted by user. Exiting gracefully.") 23 | 24 | 25 | def watch_services() -> None: 26 | watch_path = Path.cwd() 27 | logging.info("Watching %s and its subdirectories for changes...", watch_path) 28 | webbrowser.open("http://localhost:5233") 29 | run_process(watch_path, recursive=True, target=run_services) 30 | 31 | 32 | if __name__ == "__main__": 33 | run_services() 34 | -------------------------------------------------------------------------------- /production_demo/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "production_demo" 3 | version = "0.0.1" 4 | description = "A simple example to get started with the restack-ai SDK" 5 | authors = [{ name = "Restack Team", email = "service@restack.io" }] 6 | requires-python = ">=3.10,<3.14" 7 | readme = "README.md" 8 | dependencies = [ 9 | "pydantic>=2.10.6", 10 | "restack-ai==0.0.94", 11 | "watchfiles>=1.0.4", 12 | "python-dotenv==1.0.1", 13 | "openai>=1.61.0", 14 | ] 15 | 16 | [project.scripts] 17 | dev = "src.services:watch_services" 18 | services = "src.services:run_services" 19 | workflow = "schedule_workflow:run_schedule_workflow" 20 | interval = "schedule_interval:run_schedule_interval" 21 | scale = "schedule_scale:run_schedule_scale" 22 | 23 | [dependency-groups] 24 | dev = ["pytest==6.2"] 25 | 26 | [tool.hatch.build.targets.sdist] 27 | include = ["src"] 28 | 29 | [tool.hatch.build.targets.wheel] 30 | include = ["src"] 31 | 32 | [build-system] 33 | requires = ["hatchling"] 34 | build-backend = "hatchling.build" 35 | -------------------------------------------------------------------------------- /agent_apis/src/services.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | import webbrowser 4 | from pathlib import Path 5 | 6 | from watchfiles import run_process 7 | 8 | from src.client import client 9 | from src.functions.llm import llm 10 | from src.functions.weather import weather 11 | from src.workflows.multistep import MultistepWorkflow 12 | 13 | 14 | async def main() -> None: 15 | await client.start_service( 16 | workflows=[MultistepWorkflow], 17 | functions=[llm, weather], 18 | ) 19 | 20 | 21 | def run_services() -> None: 22 | try: 23 | asyncio.run(main()) 24 | except KeyboardInterrupt: 25 | logging.info("Service interrupted by user. Exiting gracefully.") 26 | 27 | 28 | def watch_services() -> None: 29 | watch_path = Path.cwd() 30 | logging.info("Watching %s and its subdirectories for changes...", watch_path) 31 | webbrowser.open("http://localhost:5233") 32 | run_process(watch_path, recursive=True, target=run_services) 33 | 34 | 35 | if __name__ == "__main__": 36 | run_services() 37 | -------------------------------------------------------------------------------- /audio_transcript/schedule_workflow.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import time 3 | from restack_ai import Restack 4 | from dotenv import load_dotenv 5 | from src.workflows.transcribe_translate import WorkflowInputParams 6 | 7 | load_dotenv() 8 | 9 | async def main(input: WorkflowInputParams): 10 | client = Restack() 11 | 12 | workflow_id = f"{int(time.time() * 1000)}-TranscribeTranslateWorkflow" 13 | 14 | run_id = await client.schedule_workflow( 15 | workflow_name="TranscribeTranslateWorkflow", 16 | workflow_id=workflow_id, 17 | input=WorkflowInputParams( 18 | file_path=input.file_path, 19 | target_language=input.target_language 20 | ) 21 | ) 22 | 23 | await client.get_workflow_result( 24 | workflow_id=workflow_id, 25 | run_id=run_id 26 | ) 27 | 28 | exit(0) 29 | 30 | def run_schedule_workflow(): 31 | asyncio.run(main(WorkflowInputParams(file_path="./test.mp3", target_language="Spanish"))) 32 | 33 | if __name__ == "__main__": 34 | run_schedule_workflow() 35 | -------------------------------------------------------------------------------- /agent_telephony/vapi/agent_vapi/src/services.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | import webbrowser 4 | from pathlib import Path 5 | 6 | from watchfiles import run_process 7 | 8 | from src.agents.agent import AgentVapi 9 | from src.client import client 10 | from src.functions.llm_chat import llm_chat 11 | from src.functions.vapi_call import vapi_call 12 | 13 | 14 | async def main() -> None: 15 | await client.start_service( 16 | agents=[AgentVapi], 17 | functions=[llm_chat, vapi_call], 18 | ) 19 | 20 | 21 | def run_services() -> None: 22 | try: 23 | asyncio.run(main()) 24 | except KeyboardInterrupt: 25 | logging.info("Service interrupted by user. Exiting gracefully.") 26 | 27 | 28 | def watch_services() -> None: 29 | watch_path = Path.cwd() 30 | logging.info("Watching %s and its subdirectories for changes...", watch_path) 31 | webbrowser.open("http://localhost:5233") 32 | run_process(watch_path, recursive=True, target=run_services) 33 | 34 | 35 | if __name__ == "__main__": 36 | run_services() 37 | -------------------------------------------------------------------------------- /agent_apis/src/functions/weather.py: -------------------------------------------------------------------------------- 1 | import aiohttp 2 | from restack_ai.function import NonRetryableError, function, log 3 | 4 | HTTP_OK = 200 5 | 6 | 7 | def raise_exception(message: str) -> None: 8 | log.error(message) 9 | raise Exception(message) 10 | 11 | 12 | @function.defn() 13 | async def weather() -> str: 14 | url = "https://api.open-meteo.com/v1/forecast?latitude=52.52&longitude=13.41¤t=temperature_2m,wind_speed_10m&hourly=temperature_2m,relative_humidity_2m,wind_speed_10m" 15 | try: 16 | async with aiohttp.ClientSession() as session, session.get(url) as response: 17 | log.info("response", response=response) 18 | if response.status == HTTP_OK: 19 | data = await response.json() 20 | log.info("weather data", data=data) 21 | return str(data) 22 | error_message = f"Error: {response.status}" 23 | raise_exception(error_message) 24 | except Exception as e: 25 | error_message = f"Error: {e}" 26 | raise NonRetryableError(error_message) from e 27 | -------------------------------------------------------------------------------- /agent_telephony/twilio_livekit/agent_twilio/src/functions/livekit_delete_room.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from livekit import api 4 | from livekit.api import DeleteRoomRequest, DeleteRoomResponse 5 | from restack_ai.function import ( 6 | NonRetryableError, 7 | function, 8 | function_info, 9 | ) 10 | 11 | 12 | @function.defn() 13 | async def livekit_delete_room() -> DeleteRoomResponse: 14 | try: 15 | lkapi = api.LiveKitAPI( 16 | url=os.getenv("LIVEKIT_API_URL"), 17 | api_key=os.getenv("LIVEKIT_API_KEY"), 18 | api_secret=os.getenv("LIVEKIT_API_SECRET"), 19 | ) 20 | 21 | run_id = function_info().workflow_run_id 22 | 23 | deleted_room = await lkapi.room.delete_room( 24 | DeleteRoomRequest(room=run_id) 25 | ) 26 | 27 | await lkapi.aclose() 28 | 29 | except Exception as e: 30 | error_message = ( 31 | f"livekit_delete_room function failed: {e}" 32 | ) 33 | raise NonRetryableError(error_message) from e 34 | 35 | else: 36 | return deleted_room 37 | -------------------------------------------------------------------------------- /audio_transcript/src/services.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from src.client import client 3 | from src.workflows.transcribe_translate import TranscribeTranslateWorkflow 4 | from src.functions.transcribe_audio import transcribe_audio 5 | from src.functions.translate_text import translate_text 6 | from watchfiles import run_process 7 | import webbrowser 8 | import os 9 | 10 | async def main(): 11 | await asyncio.gather( 12 | client.start_service( 13 | workflows=[TranscribeTranslateWorkflow], 14 | functions=[transcribe_audio, translate_text] 15 | ) 16 | ) 17 | 18 | def run_services(): 19 | try: 20 | asyncio.run(main()) 21 | except KeyboardInterrupt: 22 | print("Service interrupted by user. Exiting gracefully.") 23 | 24 | def watch_services(): 25 | watch_path = os.getcwd() 26 | print(f"Watching {watch_path} and its subdirectories for changes...") 27 | webbrowser.open("http://localhost:5233") 28 | run_process(watch_path, recursive=True, target=run_services) 29 | 30 | if __name__ == "__main__": 31 | run_services() 32 | -------------------------------------------------------------------------------- /agent_telephony/twilio_livekit/agent_twilio/src/functions/context_docs.py: -------------------------------------------------------------------------------- 1 | import aiohttp 2 | from restack_ai.function import NonRetryableError, function, log 3 | 4 | 5 | async def fetch_content_from_url(url: str) -> str: 6 | async with ( 7 | aiohttp.ClientSession() as session, 8 | session.get(url) as response, 9 | ): 10 | if response.status == 200: 11 | return await response.text() 12 | error_message = ( 13 | f"Failed to fetch content: {response.status}" 14 | ) 15 | raise NonRetryableError(error_message) 16 | 17 | 18 | @function.defn() 19 | async def context_docs() -> str: 20 | try: 21 | docs_content = await fetch_content_from_url( 22 | "https://docs.restack.io/llms-full.txt" 23 | ) 24 | log.info( 25 | "Fetched content from URL", content=len(docs_content) 26 | ) 27 | 28 | return docs_content 29 | 30 | except Exception as e: 31 | error_message = f"context_docs function failed: {e}" 32 | raise NonRetryableError(error_message) from e 33 | -------------------------------------------------------------------------------- /agent_tool/src/services.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | import webbrowser 4 | from pathlib import Path 5 | 6 | from watchfiles import run_process 7 | 8 | from src.agents.chat_tool_functions import AgentChatToolFunctions 9 | from src.client import client 10 | from src.functions.llm_chat import llm_chat 11 | from src.functions.lookup_sales import lookup_sales 12 | 13 | 14 | async def main() -> None: 15 | await client.start_service( 16 | agents=[AgentChatToolFunctions], 17 | functions=[lookup_sales, llm_chat], 18 | ) 19 | 20 | 21 | def run_services() -> None: 22 | try: 23 | asyncio.run(main()) 24 | except KeyboardInterrupt: 25 | logging.info("Service interrupted by user. Exiting gracefully.") 26 | 27 | 28 | def watch_services() -> None: 29 | watch_path = Path.cwd() 30 | logging.info("Watching %s and its subdirectories for changes...", watch_path) 31 | webbrowser.open("http://localhost:5233") 32 | run_process(watch_path, recursive=True, target=run_services) 33 | 34 | 35 | if __name__ == "__main__": 36 | run_services() 37 | -------------------------------------------------------------------------------- /pdf_ocr/src/services.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import os 3 | import webbrowser 4 | 5 | from watchfiles import run_process 6 | 7 | from src.client import client 8 | from src.functions.openai_chat import openai_chat 9 | from src.functions.torch_ocr import torch_ocr 10 | from src.workflows.files import FilesWorkflow 11 | from src.workflows.pdf import PdfWorkflow 12 | 13 | 14 | async def main(): 15 | 16 | await asyncio.gather( 17 | await client.start_service( 18 | workflows= [PdfWorkflow, FilesWorkflow], 19 | functions= [torch_ocr, openai_chat] 20 | ) 21 | ) 22 | 23 | def run_services(): 24 | try: 25 | asyncio.run(main()) 26 | except KeyboardInterrupt: 27 | print("Service interrupted by user. Exiting gracefully.") 28 | 29 | def watch_services(): 30 | watch_path = os.getcwd() 31 | print(f"Watching {watch_path} and its subdirectories for changes...") 32 | webbrowser.open("http://localhost:5233") 33 | run_process(watch_path, recursive=True, target=run_services) 34 | 35 | if __name__ == "__main__": 36 | run_services() 37 | -------------------------------------------------------------------------------- /production_demo/src/functions/generate.py: -------------------------------------------------------------------------------- 1 | from restack_ai.function import function, NonRetryableError, log 2 | from openai import OpenAI 3 | 4 | from pydantic import BaseModel 5 | 6 | class GenerateInput(BaseModel): 7 | prompt: str 8 | 9 | @function.defn() 10 | async def llm_generate(input: GenerateInput) -> str: 11 | 12 | try: 13 | client = OpenAI(base_url="http://192.168.178.57:1234/v1/",api_key="llmstudio") 14 | except Exception as e: 15 | log.error(f"Failed to create LLM client {e}") 16 | raise NonRetryableError(message=f"Failed to create OpenAI client {e}") from e 17 | 18 | try: 19 | response = client.chat.completions.create( 20 | model="llama-3.2-3b-instruct", 21 | messages=[ 22 | { 23 | "role": "user", 24 | "content": input.prompt 25 | } 26 | ], 27 | temperature=0.5, 28 | ) 29 | 30 | except Exception as e: 31 | log.error(f"Failed to generate {e}") 32 | 33 | return response.choices[0].message.content 34 | 35 | -------------------------------------------------------------------------------- /agent_telephony/twilio_livekit/livekit-trunk-setup/README.md: -------------------------------------------------------------------------------- 1 | 2 | # Twilio Trunking 3 | 4 | https://docs.livekit.io/sip/quickstarts/configuring-twilio-trunk/ 5 | 6 | 7 | ## Add envs 8 | 9 | ``` 10 | cp .env.example .env 11 | ``` 12 | 13 | ## Start python shell 14 | 15 | ```bash 16 | uv venv && source .venv/bin/activate 17 | ``` 18 | 19 | 20 | ## Install dependencies 21 | 22 | ```bash 23 | uv sync 24 | ``` 25 | 26 | ## Run setup trunk script 27 | 28 | ```bash 29 | uv run python twilio_trunk.py 30 | ``` 31 | 32 | # Outbound 33 | 34 | ## Step 1: Create a credential list 35 | 36 | https://console.twilio.com/us1/develop/voice/manage/cls?frameUrl=/console/voice/sip/cls 37 | 38 | 39 | ## Step 2: Associate the credential list with your SIP trunk 40 | 41 | https://console.twilio.com/us1/develop/sip-trunking/manage/trunks?frameUrl=%2Fconsole%2Fsip-trunking%2Ftrunks%3Fx-target-region%3Dus1 42 | 43 | Select Elastic SIP Trunking » Manage » Trunks and select the outbound trunk created in the previous steps. 44 | Select Termination » Authentication » Credential Lists and select the credential list you just created. 45 | Select Save. -------------------------------------------------------------------------------- /agent_voice/livekit/agent/src/services.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | import webbrowser 4 | from pathlib import Path 5 | 6 | from watchfiles import run_process 7 | 8 | from src.agents.agent import AgentVoice 9 | from src.client import client 10 | from src.functions.livekit_dispatch import livekit_dispatch 11 | from src.functions.llm_chat import llm_chat 12 | 13 | 14 | async def main() -> None: 15 | await client.start_service( 16 | agents=[AgentVoice], 17 | functions=[ 18 | llm_chat, 19 | livekit_dispatch, 20 | ], 21 | ) 22 | 23 | 24 | def run_services() -> None: 25 | try: 26 | asyncio.run(main()) 27 | except KeyboardInterrupt: 28 | logging.info("Service interrupted by user. Exiting gracefully.") 29 | 30 | 31 | def watch_services() -> None: 32 | watch_path = Path.cwd() 33 | logging.info("Watching %s and its subdirectories for changes...", watch_path) 34 | webbrowser.open("http://localhost:5233") 35 | run_process(watch_path, recursive=True, target=run_services) 36 | 37 | 38 | if __name__ == "__main__": 39 | run_services() 40 | -------------------------------------------------------------------------------- /child_workflows/src/workflows/child.py: -------------------------------------------------------------------------------- 1 | from datetime import timedelta 2 | 3 | from pydantic import BaseModel 4 | from restack_ai.workflow import NonRetryableError, import_functions, log, workflow 5 | 6 | with import_functions(): 7 | from src.functions.function import welcome 8 | 9 | 10 | class ChildInput(BaseModel): 11 | name: str = "world" 12 | 13 | class ChildOutput(BaseModel): 14 | result: str 15 | 16 | @workflow.defn() 17 | class ChildWorkflow: 18 | @workflow.run 19 | async def run(self, workflow_input: ChildInput) -> ChildOutput: 20 | log.info("ChildWorkflow started") 21 | try: 22 | result = await workflow.step( 23 | function=welcome, 24 | function_input=workflow_input.name, 25 | start_to_close_timeout=timedelta(seconds=120) 26 | ) 27 | except Exception as e: 28 | error_message = f"Error during welcome: {e}" 29 | raise NonRetryableError(error_message) from e 30 | else: 31 | log.info("ChildWorkflow completed", result=result) 32 | return ChildOutput(result=result) 33 | -------------------------------------------------------------------------------- /agent_telephony/twilio_livekit/agent_twilio/src/functions/livekit_token.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from livekit import api 4 | from pydantic import BaseModel 5 | from restack_ai.function import NonRetryableError, function, log 6 | 7 | 8 | class LivekitTokenInput(BaseModel): 9 | room_id: str 10 | 11 | 12 | @function.defn() 13 | async def livekit_token(function_input: LivekitTokenInput) -> str: 14 | try: 15 | token = ( 16 | api.AccessToken( 17 | os.getenv("LIVEKIT_API_KEY"), 18 | os.getenv("LIVEKIT_API_SECRET"), 19 | ) 20 | .with_identity("identity") 21 | .with_name("dev_user") 22 | .with_grants( 23 | api.VideoGrants( 24 | room_join=True, 25 | room=function_input.room_id, 26 | ) 27 | ) 28 | ) 29 | log.info("Token generated", token=token.to_jwt()) 30 | except Exception as e: 31 | error_message = f"livekit_room function failed: {e}" 32 | raise NonRetryableError(error_message) from e 33 | 34 | else: 35 | return token.to_jwt() 36 | -------------------------------------------------------------------------------- /agent_telephony/twilio_livekit/livekit_pipeline/src/restack/client.py: -------------------------------------------------------------------------------- 1 | """Client. 2 | 3 | Initializes and exposes the Restack client used for sending events from the LiveKit pipeline agent. 4 | """ 5 | 6 | import os 7 | 8 | from dotenv import load_dotenv 9 | from restack_ai import Restack 10 | from restack_ai.restack import CloudConnectionOptions 11 | from src.utils import logger 12 | 13 | # Load environment variables from the .env file. 14 | load_dotenv() 15 | 16 | try: 17 | engine_id = os.getenv("RESTACK_ENGINE_ID") 18 | address = os.getenv("RESTACK_ENGINE_ADDRESS") 19 | api_key = os.getenv("RESTACK_ENGINE_API_KEY") 20 | api_address = os.getenv("RESTACK_ENGINE_API_ADDRESS") 21 | 22 | connection_options = CloudConnectionOptions( 23 | engine_id=engine_id, 24 | address=address, 25 | api_key=api_key, 26 | api_address=api_address, 27 | ) 28 | client = Restack(connection_options) 29 | logger.info( 30 | "Initialized Restack client with engine_id: %s", engine_id 31 | ) 32 | except Exception as e: 33 | logger.exception("Error initializing Restack client: %s", e) 34 | raise 35 | -------------------------------------------------------------------------------- /agent_telephony/twilio_livekit/agent_twilio/src/functions/livekit_create_room.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from livekit import api 4 | from livekit.api import CreateRoomRequest, Room 5 | from restack_ai.function import ( 6 | NonRetryableError, 7 | function, 8 | function_info, 9 | ) 10 | 11 | 12 | @function.defn() 13 | async def livekit_create_room() -> Room: 14 | try: 15 | lkapi = api.LiveKitAPI( 16 | url=os.getenv("LIVEKIT_API_URL"), 17 | api_key=os.getenv("LIVEKIT_API_KEY"), 18 | api_secret=os.getenv("LIVEKIT_API_SECRET"), 19 | ) 20 | 21 | run_id = function_info().workflow_run_id 22 | 23 | room = await lkapi.room.create_room( 24 | CreateRoomRequest( 25 | name=run_id, 26 | empty_timeout=10 * 60, 27 | max_participants=20, 28 | ) 29 | ) 30 | 31 | await lkapi.aclose() 32 | 33 | except Exception as e: 34 | error_message = ( 35 | f"livekit_create_room function failed: {e}" 36 | ) 37 | raise NonRetryableError(error_message) from e 38 | 39 | else: 40 | return room 41 | -------------------------------------------------------------------------------- /agent_telephony/twilio_livekit/agent_twilio/src/functions/livekit_send_data.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from livekit import api 4 | from livekit.api import SendDataRequest, SendDataResponse 5 | from pydantic import BaseModel 6 | from restack_ai.function import NonRetryableError, function 7 | 8 | 9 | class LivekitSendDataInput(BaseModel): 10 | room_id: str 11 | text: str 12 | 13 | 14 | @function.defn() 15 | async def livekit_send_data( 16 | function_input: LivekitSendDataInput, 17 | ) -> SendDataResponse: 18 | try: 19 | lkapi = api.LiveKitAPI( 20 | url=os.getenv("LIVEKIT_API_URL"), 21 | api_key=os.getenv("LIVEKIT_API_KEY"), 22 | api_secret=os.getenv("LIVEKIT_API_SECRET"), 23 | ) 24 | 25 | send_data_reponse = await lkapi.room.send_data( 26 | SendDataRequest( 27 | room=function_input.room_id, 28 | data=function_input.text.encode("utf-8"), 29 | ) 30 | ) 31 | 32 | await lkapi.aclose() 33 | 34 | except Exception as e: 35 | error_message = ( 36 | f"livekit_delete_room function failed: {e}" 37 | ) 38 | raise NonRetryableError(error_message) from e 39 | 40 | else: 41 | return send_data_reponse 42 | -------------------------------------------------------------------------------- /agent_todo/src/services.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | import webbrowser 4 | from pathlib import Path 5 | 6 | from watchfiles import run_process 7 | 8 | from src.agents.agent_todo import AgentTodo 9 | from src.client import client 10 | from src.functions.get_random import get_random 11 | from src.functions.get_result import get_result 12 | from src.functions.llm_chat import llm_chat 13 | from src.functions.todo_create import todo_create 14 | from src.workflows.todo_execute import TodoExecute 15 | 16 | 17 | async def main() -> None: 18 | await client.start_service( 19 | agents=[AgentTodo], 20 | workflows=[TodoExecute], 21 | functions=[todo_create, get_random, get_result, llm_chat], 22 | ) 23 | 24 | 25 | def run_services() -> None: 26 | try: 27 | asyncio.run(main()) 28 | except KeyboardInterrupt: 29 | logging.info("Service interrupted by user. Exiting gracefully.") 30 | 31 | 32 | def watch_services() -> None: 33 | watch_path = Path.cwd() 34 | logging.info("Watching %s and its subdirectories for changes...", watch_path) 35 | webbrowser.open("http://localhost:5233") 36 | run_process(watch_path, recursive=True, target=run_services) 37 | 38 | 39 | if __name__ == "__main__": 40 | run_services() 41 | -------------------------------------------------------------------------------- /agent_video/src/services.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | import webbrowser 4 | from pathlib import Path 5 | 6 | from watchfiles import run_process 7 | 8 | from src.agents.agent import AgentVideo 9 | from src.client import client 10 | from src.functions.context_docs import context_docs 11 | from src.functions.llm_chat import llm_chat 12 | from src.functions.pipeline import pipecat_pipeline 13 | from src.workflows.room import RoomWorkflow 14 | 15 | 16 | async def main() -> None: 17 | await client.start_service( 18 | agents=[AgentVideo], 19 | workflows=[RoomWorkflow], 20 | functions=[ 21 | llm_chat, 22 | pipecat_pipeline, 23 | context_docs, 24 | ], 25 | ) 26 | 27 | 28 | def run_services() -> None: 29 | try: 30 | asyncio.run(main()) 31 | except KeyboardInterrupt: 32 | logging.info("Service interrupted by user. Exiting gracefully.") 33 | 34 | 35 | def watch_services() -> None: 36 | watch_path = Path.cwd() 37 | logging.info("Watching %s and its subdirectories for changes...", watch_path) 38 | webbrowser.open("http://localhost:5233") 39 | run_process(watch_path, recursive=True, target=run_services) 40 | 41 | 42 | if __name__ == "__main__": 43 | run_services() 44 | -------------------------------------------------------------------------------- /audio_transcript/src/functions/translate_text.py: -------------------------------------------------------------------------------- 1 | from restack_ai.function import function, log, FunctionFailure 2 | from dataclasses import dataclass 3 | from openai import OpenAI 4 | import os 5 | from dotenv import load_dotenv 6 | 7 | load_dotenv() 8 | 9 | @dataclass 10 | class TranslateTextInput: 11 | text: str 12 | target_language: str 13 | 14 | @function.defn() 15 | async def translate_text(input: TranslateTextInput): 16 | if (os.environ.get("OPENAI_API_KEY") is None): 17 | raise FunctionFailure("OPENAI_API_KEY is not set", non_retryable=True) 18 | 19 | client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY")) 20 | 21 | try: 22 | response = client.chat.completions.create( 23 | model="gpt-4.1-mini", 24 | messages=[ 25 | { 26 | "role": "system", 27 | "content": "You are a helpful assistant that translates text from one language to another." 28 | }, 29 | { 30 | "role": "user", 31 | "content": f"Translate the following text to {input.target_language}: {input.text}" 32 | } 33 | ] 34 | ) 35 | except Exception as error: 36 | log.error("An error occurred during translation", error) 37 | 38 | return response.choices[0].message.content 39 | 40 | -------------------------------------------------------------------------------- /audio_transcript/src/workflows/transcribe_translate.py: -------------------------------------------------------------------------------- 1 | from restack_ai.workflow import workflow, import_functions, log 2 | from pydantic import BaseModel, Field 3 | with import_functions(): 4 | from src.functions.transcribe_audio import transcribe_audio, TranscribeAudioInput 5 | from src.functions.translate_text import translate_text, TranslateTextInput 6 | 7 | class WorkflowInputParams(BaseModel): 8 | file_path: str = Field(default="/test.mp3") 9 | target_language: str = Field(default="fr") 10 | 11 | @workflow.defn() 12 | class TranscribeTranslateWorkflow: 13 | @workflow.run 14 | async def run(self, input: WorkflowInputParams): 15 | log.info("TranscribeTranslateWorkflow started", input=input) 16 | 17 | transcription = await workflow.step( 18 | function=transcribe_audio, 19 | function_input=TranscribeAudioInput( 20 | file_path=input.file_path, 21 | ), 22 | ) 23 | 24 | translation = await workflow.step( 25 | function=translate_text, 26 | function_input=TranslateTextInput( 27 | text=transcription, 28 | target_language=input.target_language, 29 | ), 30 | ) 31 | 32 | return { 33 | "transcription": transcription, 34 | "translation": translation 35 | } 36 | -------------------------------------------------------------------------------- /pdf_ocr/src/functions/openai_chat.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from dotenv import load_dotenv 4 | from openai import OpenAI 5 | from pydantic import BaseModel 6 | from restack_ai.function import NonRetryableError, function, log 7 | 8 | load_dotenv() 9 | 10 | class OpenAiChatInput(BaseModel): 11 | user_content: str 12 | system_content: str | None = None 13 | model: str | None = None 14 | 15 | @function.defn() 16 | async def openai_chat(input: OpenAiChatInput) -> str: 17 | try: 18 | log.info("openai_chat function started", input=input) 19 | 20 | if (os.environ.get("OPENAI_API_KEY") is None): 21 | raise NonRetryableError("OPENAI_API_KEY is not set") 22 | 23 | client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY")) 24 | 25 | messages = [] 26 | if input.system_content: 27 | messages.append({"role": "system", "content": input.system_content}) 28 | messages.append({"role": "user", "content": input.user_content}) 29 | 30 | response = client.chat.completions.create( 31 | model=input.model or "gpt-4.1-mini", 32 | messages=messages 33 | ) 34 | log.info("openai_chat function completed", response=response) 35 | return response.choices[0].message.content 36 | except Exception as e: 37 | log.error("openai_chat function failed", error=e) 38 | raise e 39 | -------------------------------------------------------------------------------- /encryption/requirements.txt: -------------------------------------------------------------------------------- 1 | # This file was autogenerated by uv via the following command: 2 | # uv pip compile pyproject.toml -o requirements.txt 3 | aiohappyeyeballs==2.4.4 4 | # via aiohttp 5 | aiohttp==3.11.11 6 | # via 7 | # encryption (pyproject.toml) 8 | # restack-ai 9 | aiosignal==1.3.2 10 | # via aiohttp 11 | annotated-types==0.7.0 12 | # via pydantic 13 | asyncio==3.4.3 14 | # via restack-ai 15 | attrs==25.1.0 16 | # via aiohttp 17 | colorama==0.4.6 18 | # via restack-ai 19 | frozenlist==1.5.0 20 | # via 21 | # aiohttp 22 | # aiosignal 23 | idna==3.10 24 | # via yarl 25 | msgspec==0.18.6 26 | # via restack-ai 27 | multidict==6.1.0 28 | # via 29 | # aiohttp 30 | # yarl 31 | propcache==0.2.1 32 | # via 33 | # aiohttp 34 | # yarl 35 | protobuf==5.29.3 36 | # via temporalio 37 | pydantic==2.10.6 38 | # via restack-ai 39 | pydantic-core==2.27.2 40 | # via pydantic 41 | python-dotenv==1.0.1 42 | # via restack-ai 43 | restack-ai==0.0.94 44 | # via encryption (pyproject.toml) 45 | temporalio==1.10.0 46 | # via restack-ai 47 | types-protobuf==5.29.1.20241207 48 | # via temporalio 49 | typing-extensions==4.12.2 50 | # via 51 | # pydantic 52 | # pydantic-core 53 | # restack-ai 54 | # temporalio 55 | websockets==14.2 56 | # via restack-ai 57 | yarl==1.18.3 58 | # via aiohttp 59 | -------------------------------------------------------------------------------- /agent_telephony/twilio_livekit/livekit_pipeline/src/env_check.py: -------------------------------------------------------------------------------- 1 | """Env Check. 2 | 3 | This module checks that all required environment variables are set. 4 | It is intended to be used during application startup to warn developers about missing configurations. 5 | """ 6 | 7 | import os 8 | from dotenv import load_dotenv 9 | from src.utils import logger # Using the shared logger from utils 10 | 11 | REQUIRED_ENVS: dict[str, str] = { 12 | "LIVEKIT_URL": "LiveKit server URL", 13 | "LIVEKIT_API_KEY": "API Key for LiveKit", 14 | "LIVEKIT_API_SECRET": "API Secret for LiveKit", 15 | "DEEPGRAM_API_KEY": "API key for Deepgram (used for STT)", 16 | "ELEVEN_API_KEY": "API key for ElevenLabs (used for TTS)", 17 | } 18 | 19 | 20 | # Load environment variables from the .env file. 21 | load_dotenv(dotenv_path=".env") 22 | 23 | def check_env_vars() -> None: 24 | """Check required environment variables and log warnings if any are missing.""" 25 | try: 26 | for key, description in REQUIRED_ENVS.items(): 27 | if not os.environ.get(key): 28 | logger.warning( 29 | "Environment variable '%s' (%s) is not set.", 30 | key, 31 | description, 32 | ) 33 | logger.info("Environment variable check complete.") 34 | except Exception as e: 35 | logger.exception( 36 | "Error during environment variable check: %s", e 37 | ) 38 | raise 39 | -------------------------------------------------------------------------------- /agent_voice/livekit/agent/src/functions/livekit_dispatch.py: -------------------------------------------------------------------------------- 1 | import os 2 | from dataclasses import dataclass 3 | 4 | from livekit import api 5 | from livekit.protocol.agent_dispatch import AgentDispatch 6 | from restack_ai.function import NonRetryableError, function, function_info 7 | 8 | 9 | @dataclass 10 | class LivekitDispatchInput: 11 | room_id: str | None = None 12 | 13 | 14 | @function.defn() 15 | async def livekit_dispatch(function_input: LivekitDispatchInput) -> AgentDispatch: 16 | try: 17 | lkapi = api.LiveKitAPI( 18 | url=os.getenv("LIVEKIT_API_URL"), 19 | api_key=os.getenv("LIVEKIT_API_KEY"), 20 | api_secret=os.getenv("LIVEKIT_API_SECRET"), 21 | ) 22 | 23 | agent_name = function_info().workflow_type 24 | agent_id = function_info().workflow_id 25 | run_id = function_info().workflow_run_id 26 | 27 | metadata = {"agent_name": agent_name, "agent_id": agent_id, "run_id": run_id} 28 | 29 | room = function_input.room_id or run_id 30 | 31 | dispatch = await lkapi.agent_dispatch.create_dispatch( 32 | api.CreateAgentDispatchRequest( 33 | agent_name=agent_name, room=room, metadata=str(metadata) 34 | ) 35 | ) 36 | 37 | await lkapi.aclose() 38 | 39 | except Exception as e: 40 | error_message = "Livekit dispatch failed" 41 | raise NonRetryableError(error_message) from e 42 | 43 | else: 44 | return dispatch 45 | -------------------------------------------------------------------------------- /agent_humanloop/src/agents/agent.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from datetime import timedelta 3 | 4 | from restack_ai.agent import agent, import_functions, log 5 | 6 | with import_functions(): 7 | from src.functions.function import InputFeedback, goodbye 8 | from src.functions.function import ( 9 | feedback as feedback_function, 10 | ) 11 | 12 | @dataclass 13 | class Feedback: 14 | feedback: str 15 | 16 | @dataclass 17 | class End: 18 | end: bool 19 | 20 | @agent.defn() 21 | class AgentHumanLoop: 22 | def __init__(self) -> None: 23 | self.end_workflow = False 24 | self.feedbacks = [] 25 | @agent.event 26 | async def event_feedback(self, feedback: Feedback) -> Feedback: 27 | result = await agent.step(function=feedback_function, function_input=InputFeedback(feedback.feedback), start_to_close_timeout=timedelta(seconds=120)) 28 | log.info("Received feedback", result=result) 29 | return result 30 | 31 | @agent.event 32 | async def event_end(self, end: End) -> End: 33 | log.info("Received end", end=end) 34 | self.end_workflow = end.end 35 | return end 36 | 37 | @agent.run 38 | async def run(self): 39 | await agent.condition( 40 | lambda: self.end_workflow 41 | ) 42 | result = await agent.step(function=goodbye, start_to_close_timeout=timedelta(seconds=120)) 43 | log.info("Agent ended", result=result) 44 | return result 45 | 46 | 47 | -------------------------------------------------------------------------------- /production_demo/src/functions/evaluate.py: -------------------------------------------------------------------------------- 1 | from restack_ai.function import function, NonRetryableError, log 2 | from openai import OpenAI 3 | from pydantic import BaseModel 4 | 5 | class EvaluateInput(BaseModel): 6 | generated_text: str 7 | 8 | @function.defn() 9 | async def llm_evaluate(input: EvaluateInput) -> str: 10 | try: 11 | client = OpenAI(base_url="http://192.168.205.1:1234/v1/",api_key="llmstudio") 12 | except Exception as e: 13 | log.error(f"Failed to create LLM client {e}") 14 | raise NonRetryableError(message=f"Failed to create OpenAI client {e}") from e 15 | 16 | prompt = ( 17 | f"Evaluate the following joke for humor, creativity, and originality. " 18 | f"Provide a score out of 10 for each category for your score.\n\n" 19 | f"Joke: {input.generated_text}\n\n" 20 | f"Response format:\n" 21 | f"Humor: [score]/10" 22 | f"Creativity: [score]/10" 23 | f"Originality: [score]/10" 24 | f"Average score: [score]/10" 25 | f"Only answer with the scores" 26 | ) 27 | 28 | try: 29 | response = client.chat.completions.create( 30 | model="llama-3.2-3b-instruct", 31 | messages=[ 32 | { 33 | "role": "user", 34 | "content": prompt 35 | } 36 | ], 37 | temperature=0.5, 38 | ) 39 | 40 | except Exception as e: 41 | log.error(f"Failed to generate {e}") 42 | 43 | return response.choices[0].message.content -------------------------------------------------------------------------------- /production_demo/src/services.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import os 3 | from watchfiles import run_process 4 | 5 | from src.client import client 6 | from restack_ai.restack import ServiceOptions 7 | 8 | from src.functions.function import example_function 9 | from src.functions.generate import llm_generate 10 | from src.functions.evaluate import llm_evaluate 11 | from src.workflows.workflow import ExampleWorkflow, ChildWorkflow 12 | 13 | import webbrowser 14 | 15 | 16 | async def main(): 17 | await asyncio.gather( 18 | client.start_service( 19 | workflows=[ExampleWorkflow, ChildWorkflow], 20 | functions=[example_function], 21 | options=ServiceOptions( 22 | max_concurrent_workflow_runs=1000 23 | ) 24 | ), 25 | client.start_service( 26 | task_queue="llm", 27 | functions=[llm_generate, llm_evaluate], 28 | options=ServiceOptions( 29 | rate_limit=1, 30 | max_concurrent_function_runs=1 31 | ) 32 | ), 33 | ) 34 | 35 | def run_services(): 36 | try: 37 | asyncio.run(main()) 38 | except KeyboardInterrupt: 39 | print("Service interrupted by user. Exiting gracefully.") 40 | 41 | def watch_services(): 42 | watch_path = os.getcwd() 43 | print(f"Watching {watch_path} and its subdirectories for changes...") 44 | webbrowser.open("http://localhost:5233") 45 | run_process(watch_path, recursive=True, target=run_services) 46 | 47 | if __name__ == "__main__": 48 | run_services() -------------------------------------------------------------------------------- /agent_telephony/twilio_livekit/livekit_pipeline/src/restack/utils.py: -------------------------------------------------------------------------------- 1 | """Utils. 2 | 3 | Provides utility functions and a shared logger configuration for the LiveKit pipeline example project. 4 | """ 5 | 6 | import os 7 | from typing import Any 8 | 9 | 10 | def extract_restack_agent_info( 11 | metadata_obj: Any, 12 | ) -> tuple[str | None, str | None, str | None]: 13 | """Extract agent-related information from the metadata object. 14 | 15 | Args: 16 | metadata_obj (Any): The metadata object. 17 | 18 | Returns: 19 | tuple[str | None, str | None, str | None]: A tuple of (agent_name, agent_id, run_id). 20 | 21 | """ 22 | return ( 23 | metadata_obj.get("agent_name"), 24 | metadata_obj.get("agent_id"), 25 | metadata_obj.get("run_id"), 26 | ) 27 | 28 | 29 | def get_restack_agent_url( 30 | agent_name: str, agent_id: str, run_id: str 31 | ) -> str: 32 | """Retrieve the agent base URL. 33 | 34 | Args: 35 | agent_name (str): The name of the agent. 36 | agent_id (str): The ID of the agent. 37 | run_id (str): The run ID of the agent. 38 | 39 | Returns: 40 | str: The agent base URL. 41 | 42 | """ 43 | engine_api_address = os.environ.get( 44 | "RESTACK_ENGINE_API_ADDRESS" 45 | ) 46 | if not engine_api_address: 47 | hostname = "http://localhost:9233" 48 | elif not engine_api_address.startswith("https://"): 49 | hostname = "https://" + engine_api_address 50 | else: 51 | hostname = engine_api_address 52 | 53 | return f"{hostname}/stream/agents/{agent_name}/{agent_id}/{run_id}" 54 | -------------------------------------------------------------------------------- /agent_apis/src/functions/llm.py: -------------------------------------------------------------------------------- 1 | import os 2 | from dataclasses import dataclass 3 | 4 | from dotenv import load_dotenv 5 | from openai import OpenAI 6 | from restack_ai.function import NonRetryableError, function, log 7 | 8 | load_dotenv() 9 | 10 | 11 | @dataclass 12 | class FunctionInputParams: 13 | user_content: str 14 | system_content: str | None = None 15 | model: str | None = None 16 | 17 | 18 | def raise_exception(message: str) -> None: 19 | log.error(message) 20 | raise Exception(message) 21 | 22 | 23 | @function.defn() 24 | async def llm(function_input: FunctionInputParams) -> str: 25 | try: 26 | log.info("llm function started", input=function_input) 27 | 28 | if os.environ.get("OPENAI_API_KEY") is None: 29 | error_message = "OPENAI_API_KEY is not set" 30 | raise_exception(error_message) 31 | 32 | client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY")) 33 | 34 | messages = [] 35 | if function_input.system_content: 36 | messages.append( 37 | {"role": "system", "content": function_input.system_content} 38 | ) 39 | messages.append({"role": "user", "content": function_input.user_content}) 40 | 41 | response = client.chat.completions.create( 42 | model=function_input.model or "gpt-4.1-mini", messages=messages 43 | ) 44 | log.info("llm function completed", response=response) 45 | return response.choices[0].message.content 46 | except Exception as e: 47 | error_message = "llm function failed" 48 | raise NonRetryableError(error_message) from e 49 | -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 | # Restack AI Python Examples 2 | 3 | This repository contains various examples demonstrating how to use the Restack AI Python SDK. These examples are designed to help you get started with Restack AI and showcase different features and use cases. 4 | 5 | This repository is organized into two sections: 6 | 7 | - [Official examples](/): Actively maintained and tested by the Restack team 8 | - [Community](/community) examples: Contributed by the community and may not be regularly updated 9 | 10 | ## Prerequisites 11 | 12 | - Python 3.12 or higher 13 | - Uv (for dependency management) 14 | 15 | ## Getting Started 16 | 17 | 1. Clone this repository: 18 | 19 | ```bash 20 | git clone https://github.com/restackio/examples-python 21 | cd examples-python 22 | ``` 23 | 24 | 2. Navigate to the example you want to explore: 25 | 26 | ```bash 27 | cd examples-python/ 28 | ``` 29 | 30 | 3. Follow the specific instructions in each example's README file. 31 | 32 | ## Running Restack in Docker 33 | 34 | To run Restack locally using Docker, you have two options: 35 | 36 | Using `docker run`: 37 | 38 | ```bash 39 | docker run -d --pull always --name restack -p 5233:5233 -p 6233:6233 -p 7233:7233 -p 9233:9233 -p 10233:10233 ghcr.io/restackio/restack:main 40 | ``` 41 | 42 | This will force repulling and rebuilding. 43 | 44 | After running either of these commands, the Restack UI will be available at http://localhost:5233 45 | 46 | ## Contributing 47 | 48 | We welcome contributions to this repository. If you have an example you'd like to add or improvements to existing examples, please feel free to submit a pull request. 49 | -------------------------------------------------------------------------------- /agent_humanloop/requirements.txt: -------------------------------------------------------------------------------- 1 | # This file was autogenerated by uv via the following command: 2 | # uv pip compile pyproject.toml -o requirements.txt 3 | aiohappyeyeballs==2.4.4 4 | # via aiohttp 5 | aiohttp==3.11.11 6 | # via restack-ai 7 | aiosignal==1.3.2 8 | # via aiohttp 9 | annotated-types==0.7.0 10 | # via pydantic 11 | anyio==4.8.0 12 | # via watchfiles 13 | asyncio==3.4.3 14 | # via restack-ai 15 | attrs==25.1.0 16 | # via aiohttp 17 | colorama==0.4.6 18 | # via restack-ai 19 | frozenlist==1.5.0 20 | # via 21 | # aiohttp 22 | # aiosignal 23 | idna==3.10 24 | # via 25 | # anyio 26 | # yarl 27 | msgspec==0.18.6 28 | # via restack-ai 29 | multidict==6.1.0 30 | # via 31 | # aiohttp 32 | # yarl 33 | propcache==0.2.1 34 | # via 35 | # aiohttp 36 | # yarl 37 | protobuf==5.29.3 38 | # via temporalio 39 | pydantic==2.10.6 40 | # via 41 | # human-loop (pyproject.toml) 42 | # restack-ai 43 | pydantic-core==2.27.2 44 | # via pydantic 45 | python-dotenv==1.0.1 46 | # via 47 | # human-loop (pyproject.toml) 48 | # restack-ai 49 | restack-ai==0.0.94 50 | # via human-loop (pyproject.toml) 51 | sniffio==1.3.1 52 | # via anyio 53 | temporalio==1.10.0 54 | # via restack-ai 55 | types-protobuf==5.29.1.20241207 56 | # via temporalio 57 | typing-extensions==4.12.2 58 | # via 59 | # anyio 60 | # pydantic 61 | # pydantic-core 62 | # restack-ai 63 | # temporalio 64 | watchfiles==1.0.4 65 | # via human-loop (pyproject.toml) 66 | websockets==14.2 67 | # via restack-ai 68 | yarl==1.18.3 69 | # via aiohttp 70 | -------------------------------------------------------------------------------- /agent_telephony/twilio_livekit/agent_twilio/src/functions/livekit_dispatch.py: -------------------------------------------------------------------------------- 1 | import os 2 | from dataclasses import dataclass 3 | 4 | from livekit import api 5 | from livekit.protocol.agent_dispatch import AgentDispatch 6 | from restack_ai.function import ( 7 | NonRetryableError, 8 | function, 9 | function_info, 10 | ) 11 | 12 | 13 | @dataclass 14 | class LivekitDispatchInput: 15 | room_id: str | None = None 16 | 17 | 18 | @function.defn() 19 | async def livekit_dispatch( 20 | function_input: LivekitDispatchInput, 21 | ) -> AgentDispatch: 22 | try: 23 | lkapi = api.LiveKitAPI( 24 | url=os.getenv("LIVEKIT_API_URL"), 25 | api_key=os.getenv("LIVEKIT_API_KEY"), 26 | api_secret=os.getenv("LIVEKIT_API_SECRET"), 27 | ) 28 | 29 | agent_name = function_info().workflow_type 30 | agent_id = function_info().workflow_id 31 | run_id = function_info().workflow_run_id 32 | 33 | metadata = { 34 | "agent_name": agent_name, 35 | "agent_id": agent_id, 36 | "run_id": run_id, 37 | } 38 | 39 | room = function_input.room_id or run_id 40 | 41 | dispatch = await lkapi.agent_dispatch.create_dispatch( 42 | api.CreateAgentDispatchRequest( 43 | agent_name=agent_name, 44 | room=room, 45 | metadata=str(metadata), 46 | ) 47 | ) 48 | 49 | await lkapi.aclose() 50 | 51 | except Exception as e: 52 | error_message = f"livekit_dispatch function failed: {e}" 53 | raise NonRetryableError(error_message) from e 54 | 55 | else: 56 | return dispatch 57 | -------------------------------------------------------------------------------- /child_workflows/requirements.txt: -------------------------------------------------------------------------------- 1 | # This file was autogenerated by uv via the following command: 2 | # uv pip compile pyproject.toml -o requirements.txt 3 | aiohappyeyeballs==2.4.4 4 | # via aiohttp 5 | aiohttp==3.11.11 6 | # via restack-ai 7 | aiosignal==1.3.2 8 | # via aiohttp 9 | annotated-types==0.7.0 10 | # via pydantic 11 | anyio==4.8.0 12 | # via watchfiles 13 | asyncio==3.4.3 14 | # via restack-ai 15 | attrs==25.1.0 16 | # via aiohttp 17 | colorama==0.4.6 18 | # via restack-ai 19 | frozenlist==1.5.0 20 | # via 21 | # aiohttp 22 | # aiosignal 23 | idna==3.10 24 | # via 25 | # anyio 26 | # yarl 27 | msgspec==0.18.6 28 | # via restack-ai 29 | multidict==6.1.0 30 | # via 31 | # aiohttp 32 | # yarl 33 | propcache==0.2.1 34 | # via 35 | # aiohttp 36 | # yarl 37 | protobuf==5.29.3 38 | # via temporalio 39 | pydantic==2.10.6 40 | # via 41 | # child-workflows (pyproject.toml) 42 | # restack-ai 43 | pydantic-core==2.27.2 44 | # via pydantic 45 | python-dotenv==1.0.1 46 | # via 47 | # child-workflows (pyproject.toml) 48 | # restack-ai 49 | restack-ai==0.0.94 50 | # via child-workflows (pyproject.toml) 51 | sniffio==1.3.1 52 | # via anyio 53 | temporalio==1.10.0 54 | # via restack-ai 55 | types-protobuf==5.29.1.20241207 56 | # via temporalio 57 | typing-extensions==4.12.2 58 | # via 59 | # anyio 60 | # pydantic 61 | # pydantic-core 62 | # restack-ai 63 | # temporalio 64 | watchfiles==1.0.4 65 | # via child-workflows (pyproject.toml) 66 | websockets==14.2 67 | # via restack-ai 68 | yarl==1.18.3 69 | # via aiohttp 70 | -------------------------------------------------------------------------------- /child_workflows/README.md: -------------------------------------------------------------------------------- 1 | # Restack AI - Child Workflows Example 2 | 3 | This repository contains a simple example project to help you get started with child workflows 4 | 5 | ## Prerequisites 6 | 7 | - Python 3.8 or higher 8 | - Uv (for dependency management) 9 | - Docker (for running the Restack services) 10 | 11 | ## Usage 12 | 13 | ## Start Restack 14 | 15 | To start the Restack, use the following Docker command: 16 | 17 | ```bash 18 | docker run -d --pull always --name restack -p 5233:5233 -p 6233:6233 -p 7233:7233 -p 9233:9233 -p 10233:10233 ghcr.io/restackio/restack:main 19 | ``` 20 | 21 | ## Start python shell 22 | 23 | If using uv: 24 | 25 | ```bash 26 | uv venv && source .venv/bin/activate 27 | ``` 28 | 29 | If using pip: 30 | 31 | ```bash 32 | python -m venv .venv && source .venv/bin/activate 33 | ``` 34 | 35 | ## Install dependencies 36 | 37 | If using uv: 38 | 39 | ```bash 40 | uv sync 41 | uv run dev 42 | ``` 43 | 44 | If using pip: 45 | 46 | ```bash 47 | pip install -e . 48 | python -c "from src.services import watch_services; watch_services()" 49 | ``` 50 | 51 | ## In a new terminal, schedule the workflow: 52 | 53 | If using uv: 54 | 55 | ```bash 56 | uv run schedule 57 | ``` 58 | 59 | If using pip: 60 | 61 | ```bash 62 | python -c "from src.schedule_workflow import run_schedule_workflow; run_schedule_workflow()" 63 | ``` 64 | 65 | ## Project Structure 66 | 67 | - `src/`: Main source code directory 68 | - `client.py`: Initializes the Restack client 69 | - `functions/`: Contains function definitions 70 | - `workflows/`: Contains workflow definitions 71 | - `services.py`: Sets up and runs the Restack services 72 | - `schedule_workflow.py`: Example script to schedule and run a workflow 73 | -------------------------------------------------------------------------------- /agent_telephony/twilio_livekit/agent_twilio/src/functions/livekit_call.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | 3 | from livekit import api 4 | from livekit.protocol.sip import ( 5 | CreateSIPParticipantRequest, 6 | SIPParticipantInfo, 7 | ) 8 | from restack_ai.function import NonRetryableError, function, log 9 | 10 | 11 | @dataclass 12 | class LivekitCallInput: 13 | sip_trunk_id: str 14 | phone_number: str 15 | room_id: str 16 | agent_name: str 17 | agent_id: str 18 | run_id: str 19 | 20 | 21 | @function.defn() 22 | async def livekit_call( 23 | function_input: LivekitCallInput, 24 | ) -> SIPParticipantInfo: 25 | try: 26 | livekit_api = api.LiveKitAPI() 27 | 28 | request = CreateSIPParticipantRequest( 29 | sip_trunk_id=function_input.sip_trunk_id, 30 | sip_call_to=function_input.phone_number, 31 | room_name=function_input.room_id, 32 | participant_identity=function_input.agent_id, 33 | participant_name=function_input.agent_name, 34 | play_dialtone=True, 35 | ) 36 | 37 | log.info( 38 | "livekit_call CreateSIPParticipantRequest: ", 39 | request=request, 40 | ) 41 | 42 | participant = ( 43 | await livekit_api.sip.create_sip_participant(request) 44 | ) 45 | 46 | await livekit_api.aclose() 47 | 48 | log.info( 49 | "livekit_call SIPParticipantInfo:", 50 | participant=participant, 51 | ) 52 | except Exception as e: 53 | error_message = f"livekit_call function failed: {e}" 54 | raise NonRetryableError(error_message) from e 55 | else: 56 | return participant 57 | -------------------------------------------------------------------------------- /pdf_ocr/src/workflows/files.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from pydantic import BaseModel, Field 4 | from restack_ai.workflow import NonRetryableError, log, workflow, workflow_info 5 | 6 | from .pdf import PdfWorkflow, PdfWorkflowInput 7 | 8 | 9 | class FilesWorkflowInput(BaseModel): 10 | files_upload: list[dict] = Field(files=True) 11 | 12 | @workflow.defn() 13 | class FilesWorkflow: 14 | @workflow.run 15 | async def run(self, input: FilesWorkflowInput): 16 | tasks = [] 17 | parent_workflow_id = workflow_info().workflow_id 18 | 19 | for index, pdf_input in enumerate(input.files_upload, start=1): 20 | log.info(f"Queue PdfWorkflow {index} for execution") 21 | # Ensure child workflows are started and return an awaitable 22 | try: 23 | task = workflow.child_execute( 24 | workflow=PdfWorkflow, 25 | workflow_id=f"{parent_workflow_id}-pdf-{index}", 26 | input=PdfWorkflowInput( 27 | file_upload=[pdf_input] 28 | ) 29 | ) 30 | except Exception as e: 31 | error_message = f"Failed to execute PdfWorkflow {index}: {e}" 32 | raise NonRetryableError(error_message) from e 33 | else: 34 | # Wrap the task in an asyncio.ensure_future to ensure it's awaitable 35 | tasks.append(asyncio.ensure_future(task)) 36 | 37 | # Await all tasks at once to run them in parallel 38 | results = await asyncio.gather(*tasks) 39 | 40 | for i, result in enumerate(results, start=1): 41 | log.info(f"PdfWorkflow {i} completed", result=result) 42 | 43 | return { 44 | "results": results 45 | } 46 | -------------------------------------------------------------------------------- /agent_rag/src/functions/llm_chat.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import Literal 3 | 4 | from dotenv import load_dotenv 5 | from openai import OpenAI 6 | from openai.types.chat.chat_completion import ChatCompletion 7 | from pydantic import BaseModel 8 | from restack_ai.function import NonRetryableError, function, log 9 | 10 | load_dotenv() 11 | 12 | 13 | class Message(BaseModel): 14 | role: Literal["system", "user", "assistant"] 15 | content: str 16 | 17 | 18 | class LlmChatInput(BaseModel): 19 | system_content: str | None = None 20 | model: str | None = None 21 | messages: list[Message] | None = None 22 | 23 | 24 | def raise_exception(message: str) -> None: 25 | log.error(message) 26 | raise NonRetryableError(message) 27 | 28 | 29 | @function.defn() 30 | async def llm_chat(function_input: LlmChatInput) -> ChatCompletion: 31 | try: 32 | log.info("llm_chat function started", function_input=function_input) 33 | 34 | if os.environ.get("OPENAI_API_KEY") is None: 35 | error_message = "OPENAI_API_KEY is not set" 36 | raise_exception(error_message) 37 | 38 | client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY")) 39 | 40 | if function_input.system_content: 41 | function_input.messages.append( 42 | Message(role="system", content=function_input.system_content or "") 43 | ) 44 | 45 | response = client.chat.completions.create( 46 | model=function_input.model or "gpt-4.1-mini", 47 | messages=function_input.messages, 48 | ) 49 | except Exception as e: 50 | error_message = f"LLM chat failed: {e}" 51 | raise NonRetryableError(error_message) from e 52 | else: 53 | log.info("llm_chat function completed", response=response) 54 | return response.model_dump() 55 | -------------------------------------------------------------------------------- /agent_video/src/functions/llm_chat.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import TYPE_CHECKING, Literal 3 | 4 | from openai import OpenAI 5 | from pydantic import BaseModel, Field 6 | from restack_ai.function import NonRetryableError, function, stream_to_websocket 7 | 8 | from src.client import api_address 9 | 10 | if TYPE_CHECKING: 11 | from openai.resources.chat.completions import ChatCompletionChunk, Stream 12 | 13 | 14 | class Message(BaseModel): 15 | role: Literal["system", "user", "assistant"] 16 | content: str 17 | 18 | 19 | class LlmChatInput(BaseModel): 20 | system_content: str | None = None 21 | model: str | None = None 22 | messages: list[Message] = Field(default_factory=list) 23 | stream: bool = True 24 | 25 | 26 | @function.defn() 27 | async def llm_chat(function_input: LlmChatInput) -> str: 28 | try: 29 | client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY")) 30 | 31 | if function_input.system_content: 32 | # Insert the system message at the beginning 33 | function_input.messages.insert( 34 | 0, Message(role="system", content=function_input.system_content) 35 | ) 36 | 37 | # Convert Message objects to dictionaries 38 | messages_dicts = [message.model_dump() for message in function_input.messages] 39 | # Get the streamed response from OpenAI API 40 | response: Stream[ChatCompletionChunk] = client.chat.completions.create( 41 | model=function_input.model or "gpt-4.1-mini", 42 | messages=messages_dicts, 43 | stream=True, 44 | ) 45 | 46 | return await stream_to_websocket(api_address=api_address, data=response) 47 | 48 | except Exception as e: 49 | error_message = f"llm_chat function failed: {e}" 50 | raise NonRetryableError(error_message) from e 51 | -------------------------------------------------------------------------------- /agent_voice/livekit/agent/src/functions/llm_chat.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import TYPE_CHECKING, Literal 3 | 4 | from openai import OpenAI 5 | from pydantic import BaseModel, Field 6 | from restack_ai.function import NonRetryableError, function, stream_to_websocket 7 | 8 | from src.client import api_address 9 | 10 | if TYPE_CHECKING: 11 | from openai.resources.chat.completions import ChatCompletionChunk, Stream 12 | 13 | 14 | class Message(BaseModel): 15 | role: Literal["system", "user", "assistant"] 16 | content: str 17 | 18 | 19 | class LlmChatInput(BaseModel): 20 | system_content: str | None = None 21 | model: str | None = None 22 | messages: list[Message] = Field(default_factory=list) 23 | stream: bool = True 24 | 25 | 26 | @function.defn() 27 | async def llm_chat(function_input: LlmChatInput) -> str: 28 | try: 29 | client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY")) 30 | 31 | if function_input.system_content: 32 | # Insert the system message at the beginning 33 | function_input.messages.insert( 34 | 0, Message(role="system", content=function_input.system_content) 35 | ) 36 | 37 | # Convert Message objects to dictionaries 38 | messages_dicts = [message.model_dump() for message in function_input.messages] 39 | # Get the streamed response from OpenAI API 40 | response: Stream[ChatCompletionChunk] = client.chat.completions.create( 41 | model=function_input.model or "gpt-4.1-mini", 42 | messages=messages_dicts, 43 | stream=True, 44 | ) 45 | 46 | return await stream_to_websocket(api_address=api_address, data=response) 47 | 48 | except Exception as e: 49 | error_message = "LLM chat failed" 50 | raise NonRetryableError(error_message) from e 51 | -------------------------------------------------------------------------------- /child_workflows/src/workflows/parent.py: -------------------------------------------------------------------------------- 1 | from pydantic import BaseModel 2 | from restack_ai.workflow import NonRetryableError, log, workflow, workflow_info 3 | 4 | from .child import ChildInput, ChildWorkflow 5 | 6 | 7 | class ParentInput(BaseModel): 8 | child: bool = True 9 | 10 | class ParentOutput(BaseModel): 11 | result: str 12 | 13 | @workflow.defn() 14 | class ParentWorkflow: 15 | @workflow.run 16 | async def run(self, workflow_input: ParentInput) -> ParentOutput: 17 | 18 | log.info("ParentWorkflow started", workflow_input=workflow_input) 19 | if workflow_input.child: 20 | # use the parent run id to create child workflow ids 21 | parent_workflow_id = workflow_info().workflow_id 22 | 23 | log.info("Start ChildWorkflow and dont wait for result") 24 | # result = await workflow.child_start(ChildWorkflow, input=ChildInput(name="world"), workflow_id=f"{parent_workflow_id}-child-start") 25 | 26 | log.info("Start ChildWorkflow and wait for result") 27 | try: 28 | result = await workflow.child_execute( 29 | workflow=ChildWorkflow, 30 | workflow_input=ChildInput(name="world"), 31 | workflow_id=f"{parent_workflow_id}-child-execute", 32 | ) 33 | except Exception as e: 34 | error_message = f"Error during child_execute: {e}" 35 | raise NonRetryableError(error_message) from e 36 | else: 37 | log.info("ChildWorkflow completed", result=result) 38 | return ParentOutput(result="ParentWorkflow completed") 39 | 40 | else: 41 | log.info("ParentWorkflow without starting or executing child workflow") 42 | return ParentOutput(result="ParentWorkflow completed") 43 | -------------------------------------------------------------------------------- /agent_telephony/vapi/agent_vapi/src/functions/llm_chat.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import TYPE_CHECKING, Literal 3 | 4 | from openai import OpenAI 5 | from pydantic import BaseModel, Field 6 | from restack_ai.function import NonRetryableError, function, stream_to_websocket 7 | 8 | from src.client import api_address 9 | 10 | if TYPE_CHECKING: 11 | from openai.resources.chat.completions import ChatCompletionChunk, Stream 12 | 13 | 14 | class Message(BaseModel): 15 | role: Literal["system", "user", "assistant"] 16 | content: str 17 | 18 | 19 | class LlmChatInput(BaseModel): 20 | system_content: str | None = None 21 | model: str | None = None 22 | messages: list[Message] = Field(default_factory=list) 23 | stream: bool = True 24 | 25 | 26 | @function.defn() 27 | async def llm_chat(function_input: LlmChatInput) -> str: 28 | try: 29 | client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY")) 30 | 31 | if function_input.system_content: 32 | # Insert the system message at the beginning 33 | function_input.messages.insert( 34 | 0, Message(role="system", content=function_input.system_content) 35 | ) 36 | 37 | # Convert Message objects to dictionaries 38 | messages_dicts = [message.model_dump() for message in function_input.messages] 39 | # Get the streamed response from OpenAI API 40 | response: Stream[ChatCompletionChunk] = client.chat.completions.create( 41 | model=function_input.model or "gpt-4.1-mini", 42 | messages=messages_dicts, 43 | stream=True, 44 | ) 45 | 46 | return await stream_to_websocket(api_address=api_address, data=response) 47 | 48 | except Exception as e: 49 | error_message = f"llm_chat function failed: {e}" 50 | raise NonRetryableError(error_message) from e 51 | -------------------------------------------------------------------------------- /agent_stream/src/agents/agent.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from datetime import timedelta 3 | 4 | from pydantic import BaseModel 5 | from restack_ai.agent import NonRetryableError, agent, import_functions, log 6 | 7 | with import_functions(): 8 | from src.functions.llm_chat import LlmChatInput, Message, llm_chat 9 | 10 | 11 | class MessagesEvent(BaseModel): 12 | messages: list[Message] 13 | 14 | 15 | class EndEvent(BaseModel): 16 | end: bool 17 | 18 | 19 | @dataclass 20 | class AgentStreamInput: 21 | room_id: str | None = None 22 | 23 | 24 | @agent.defn() 25 | class AgentStream: 26 | def __init__(self) -> None: 27 | self.end = False 28 | self.messages: list[Message] = [] 29 | 30 | @agent.event 31 | async def messages(self, messages_event: MessagesEvent) -> list[Message]: 32 | log.info(f"Received message: {messages_event.messages}") 33 | self.messages.extend(messages_event.messages) 34 | 35 | try: 36 | assistant_message = await agent.step( 37 | function=llm_chat, 38 | function_input=LlmChatInput(messages=self.messages), 39 | start_to_close_timeout=timedelta(seconds=120), 40 | ) 41 | except Exception as e: 42 | error_message = f"Error during llm_chat: {e}" 43 | raise NonRetryableError(error_message) from e 44 | else: 45 | self.messages.append(Message(role="assistant", content=str(assistant_message))) 46 | return self.messages 47 | 48 | @agent.event 49 | async def end(self, end: EndEvent) -> EndEvent: 50 | log.info("Received end") 51 | self.end = True 52 | return end 53 | 54 | @agent.run 55 | async def run(self, agent_input: AgentStreamInput) -> None: 56 | log.info("run agent", agent_input=agent_input) 57 | await agent.condition(lambda: self.end) 58 | -------------------------------------------------------------------------------- /agent_stream/src/functions/llm_chat.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import TYPE_CHECKING, Literal 3 | 4 | from openai import OpenAI 5 | from pydantic import BaseModel, Field 6 | from restack_ai.function import NonRetryableError, function, stream_to_websocket 7 | 8 | from src.client import api_address 9 | 10 | if TYPE_CHECKING: 11 | from openai.resources.chat.completions import ChatCompletionChunk, Stream 12 | 13 | 14 | class Message(BaseModel): 15 | role: Literal["system", "user", "assistant"] 16 | content: str 17 | 18 | 19 | class LlmChatInput(BaseModel): 20 | system_content: str | None = None 21 | model: str | None = None 22 | messages: list[Message] = Field(default_factory=list) 23 | stream: bool = True 24 | 25 | 26 | @function.defn() 27 | async def llm_chat(function_input: LlmChatInput) -> str: 28 | try: 29 | client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY")) 30 | 31 | if function_input.system_content: 32 | # Insert the system message at the beginning 33 | function_input.messages.insert( 34 | 0, Message(role="system", content=function_input.system_content) 35 | ) 36 | 37 | # Convert Message objects to dictionaries 38 | messages_dicts = [message.model_dump() for message in function_input.messages] 39 | # Get the streamed response from OpenAI API 40 | response: Stream[ChatCompletionChunk] = client.chat.completions.create( 41 | model=function_input.model or "gpt-4.1-mini", 42 | messages=messages_dicts, 43 | stream=True, 44 | ) 45 | 46 | # Use Restack API websocket to stream the response 47 | return await stream_to_websocket(api_address=api_address, data=response) 48 | 49 | except Exception as e: 50 | error_message = f"llm_chat function failed: {e}" 51 | raise NonRetryableError(error_message) from e 52 | -------------------------------------------------------------------------------- /pdf_ocr/src/workflows/pdf.py: -------------------------------------------------------------------------------- 1 | from datetime import timedelta 2 | 3 | from pydantic import BaseModel, Field 4 | from restack_ai.workflow import NonRetryableError, import_functions, log, workflow 5 | 6 | with import_functions(): 7 | from src.functions.openai_chat import OpenAiChatInput, openai_chat 8 | from src.functions.torch_ocr import OcrInput, torch_ocr 9 | 10 | class PdfWorkflowInput(BaseModel): 11 | file_upload: list[dict] = Field(files=True) 12 | 13 | @workflow.defn() 14 | class PdfWorkflow: 15 | @workflow.run 16 | async def run(self, input: PdfWorkflowInput): 17 | log.info("PdfWorkflow started") 18 | try: 19 | ocr_result = await workflow.step( 20 | function=torch_ocr, 21 | function_input=OcrInput( 22 | file_type=input.file_upload[0]["type"], 23 | file_name=input.file_upload[0]["name"] 24 | ), 25 | start_to_close_timeout=timedelta(seconds=120) 26 | ) 27 | except Exception as e: 28 | error_message = f"torch_ocr function failed: {e}" 29 | raise NonRetryableError(error_message) from e 30 | else: 31 | 32 | try: 33 | llm_result = await workflow.step( 34 | function=openai_chat, 35 | function_input=OpenAiChatInput( 36 | user_content=f"Make a summary of that PDF. Here is the OCR result: {ocr_result}", 37 | model="gpt-4.1-mini" 38 | ), 39 | start_to_close_timeout=timedelta(seconds=120) 40 | ) 41 | except Exception as e: 42 | error_message = f"openai_chat function failed: {e}" 43 | raise NonRetryableError(error_message) from e 44 | else: 45 | log.info("PdfWorkflow completed") 46 | return llm_result 47 | -------------------------------------------------------------------------------- /encryption/src/codec_server.py: -------------------------------------------------------------------------------- 1 | from functools import partial 2 | from typing import Awaitable, Callable, Iterable, List 3 | 4 | from aiohttp import hdrs, web 5 | from google.protobuf import json_format 6 | from restack_ai.security import Payload, Payloads 7 | 8 | from .codec import EncryptionCodec 9 | 10 | 11 | def build_codec_server() -> web.Application: 12 | # Cors handler 13 | async def cors_options(req: web.Request) -> web.Response: 14 | resp = web.Response() 15 | if req.headers.get(hdrs.ORIGIN) == "http://localhost:8233": 16 | resp.headers[hdrs.ACCESS_CONTROL_ALLOW_ORIGIN] = "http://localhost:8233" 17 | resp.headers[hdrs.ACCESS_CONTROL_ALLOW_METHODS] = "POST" 18 | resp.headers[hdrs.ACCESS_CONTROL_ALLOW_HEADERS] = "content-type,x-namespace" 19 | return resp 20 | 21 | # General purpose payloads-to-payloads 22 | async def apply( 23 | fn: Callable[[Iterable[Payload]], Awaitable[List[Payload]]], req: web.Request 24 | ) -> web.Response: 25 | # Read payloads as JSON 26 | assert req.content_type == "application/json" 27 | payloads = json_format.Parse(await req.read(), Payloads()) 28 | 29 | # Apply 30 | payloads = Payloads(payloads=await fn(payloads.payloads)) 31 | 32 | # Apply CORS and return JSON 33 | resp = await cors_options(req) 34 | resp.content_type = "application/json" 35 | resp.text = json_format.MessageToJson(payloads) 36 | return resp 37 | 38 | # Build app 39 | codec = EncryptionCodec() 40 | app = web.Application() 41 | app.add_routes( 42 | [ 43 | web.post("/encode", partial(apply, codec.encode)), 44 | web.post("/decode", partial(apply, codec.decode)), 45 | web.options("/decode", cors_options), 46 | ] 47 | ) 48 | return app 49 | 50 | def run_codec_server(): 51 | web.run_app(build_codec_server(), host="127.0.0.1", port=8081) -------------------------------------------------------------------------------- /production_demo/src/workflows/child.py: -------------------------------------------------------------------------------- 1 | from datetime import timedelta 2 | from pydantic import BaseModel, Field 3 | from restack_ai.workflow import workflow, import_functions, log, NonRetryableError, RetryPolicy 4 | 5 | with import_functions(): 6 | from src.functions.function import example_function, ExampleFunctionInput 7 | from src.functions.generate import llm_generate, GenerateInput 8 | from src.functions.evaluate import llm_evaluate, EvaluateInput 9 | 10 | class ChildWorkflowInput(BaseModel): 11 | prompt: str = Field(default="Generate a random joke in max 20 words.") 12 | 13 | @workflow.defn() 14 | class ChildWorkflow: 15 | @workflow.run 16 | async def run(self, input: ChildWorkflowInput): 17 | 18 | log.info("ChildWorkflow started") 19 | 20 | try: 21 | await workflow.step(function=example_function, function_input=ExampleFunctionInput(name='John Doe'), start_to_close_timeout=timedelta(minutes=2), retry_policy=RetryPolicy(maximum_attempts=3)) 22 | 23 | await workflow.sleep(1) 24 | 25 | generated_text = await workflow.step( 26 | function=llm_generate, 27 | function_input=GenerateInput(prompt=input.prompt), 28 | task_queue="llm", 29 | start_to_close_timeout=timedelta(minutes=2) 30 | ) 31 | 32 | evaluation = await workflow.step( 33 | function=llm_evaluate, 34 | function_input=EvaluateInput(generated_text=generated_text), 35 | task_queue="llm", 36 | start_to_close_timeout=timedelta(minutes=5) 37 | ) 38 | 39 | return { 40 | "generated_text": generated_text, 41 | "evaluation": evaluation 42 | } 43 | except Exception as e: 44 | log.error(f"ChildWorkflow failed {e}") 45 | raise NonRetryableError(message=f"ChildWorkflow failed {e}") from e 46 | 47 | 48 | -------------------------------------------------------------------------------- /agent_humanloop/README.md: -------------------------------------------------------------------------------- 1 | # Restack AI - Agent with human in the loop 2 | 3 | This repository contains an example Agent with human in the loop with Restack. 4 | It demonstrates how to set up a basic Agent and functions. 5 | 6 | ## Prerequisites 7 | 8 | - Docker (for running Restack) 9 | - Python 3.10 or higher 10 | 11 | ## Start Restack 12 | 13 | To start the Restack, use the following Docker command: 14 | 15 | ```bash 16 | docker run -d --pull always --name restack -p 5233:5233 -p 6233:6233 -p 7233:7233 -p 9233:9233 -p 10233:10233 ghcr.io/restackio/restack:main 17 | ``` 18 | 19 | ## Start python shell 20 | 21 | If using uv: 22 | 23 | ```bash 24 | uv venv && source .venv/bin/activate 25 | ``` 26 | 27 | If using pip: 28 | 29 | ```bash 30 | python -m venv .venv && source .venv/bin/activate 31 | ``` 32 | 33 | ## Install dependencies 34 | 35 | If using uv: 36 | 37 | ```bash 38 | uv sync 39 | uv run dev 40 | ``` 41 | 42 | If using pip: 43 | 44 | ```bash 45 | pip install -e . 46 | python -c "from src.services import watch_services; watch_services()" 47 | ``` 48 | 49 | ## Run agents 50 | 51 | ### from UI 52 | 53 | You can run agents from the UI by clicking the "Run" button. 54 | 55 | ![Run agents from UI](./screenshot-quickstart.png) 56 | 57 | ### from API 58 | 59 | You can run agents from the API by using the generated endpoint: 60 | 61 | `POST http://localhost:6233/api/agents/AgentHumanLoop` 62 | 63 | ### from any client 64 | 65 | You can run agents with any client connected to Restack, for example: 66 | 67 | If using uv: 68 | 69 | ```bash 70 | uv run schedule 71 | ``` 72 | 73 | If using pip: 74 | 75 | ```bash 76 | python -c "from schedule_agent import run_schedule_agent; run_schedule_agent()" 77 | ``` 78 | 79 | executes `schedule_agent.py` which will connect to Restack and execute the `AgentHumanLoop` agent. 80 | 81 | ## Deploy on Restack Cloud 82 | 83 | To deploy the application on Restack, you can create an account at [https://console.restack.io](https://console.restack.io) 84 | -------------------------------------------------------------------------------- /agent_chat/src/agents/agent.py: -------------------------------------------------------------------------------- 1 | from datetime import timedelta 2 | 3 | from pydantic import BaseModel 4 | from restack_ai.agent import NonRetryableError, agent, import_functions, log 5 | 6 | with import_functions(): 7 | from src.functions.llm_chat import LlmChatInput, Message, llm_chat 8 | 9 | 10 | class MessagesEvent(BaseModel): 11 | messages: list[Message] 12 | 13 | 14 | class EndEvent(BaseModel): 15 | end: bool 16 | 17 | 18 | @agent.defn() 19 | class AgentChat: 20 | def __init__(self) -> None: 21 | self.end = False 22 | self.messages = [] 23 | 24 | @agent.event 25 | async def messages(self, messages_event: MessagesEvent) -> list[Message]: 26 | log.info(f"Received messages: {messages_event.messages}") 27 | self.messages.extend(messages_event.messages) 28 | 29 | log.info(f"Calling llm_chat with messages: {self.messages}") 30 | try: 31 | assistant_message = await agent.step( 32 | function=llm_chat, 33 | function_input=LlmChatInput(messages=self.messages), 34 | start_to_close_timeout=timedelta(seconds=120), 35 | ) 36 | except Exception as e: 37 | error_message = f"Error during llm_chat: {e}" 38 | raise NonRetryableError(error_message) from e 39 | else: 40 | self.messages.append(assistant_message) 41 | return self.messages 42 | 43 | @agent.event 44 | async def end(self, end: EndEvent) -> EndEvent: 45 | log.info("Received end") 46 | self.end = True 47 | return end 48 | 49 | @agent.run 50 | async def run(self, function_input: dict) -> None: 51 | log.info("AgentChat function_input", function_input=function_input) 52 | await agent.condition(lambda: self.end or agent.should_continue_as_new()) 53 | 54 | if self.end is True: 55 | log.info("AgentChat end") 56 | return 57 | 58 | await agent.agent_continue_as_new() 59 | -------------------------------------------------------------------------------- /pdf_ocr/README.md: -------------------------------------------------------------------------------- 1 | # Restack AI - PDF OCR and LLM summary 2 | 3 | ## Motivation 4 | 5 | Demonstrates how to scale multi step workflows. 6 | Use pytorch to OCR and OpenAI to make a summary. 7 | 8 | ## Prerequisites 9 | 10 | - Docker (for running Restack) 11 | - Python 3.10 or higher 12 | - Uv (for dependency management) 13 | 14 | ## Start Restack 15 | 16 | To start the Restack, use the following Docker command: 17 | 18 | ```bash 19 | docker run -d --pull always --name restack -p 5233:5233 -p 6233:6233 -p 7233:7233 -p 9233:9233 -p 10233:10233 ghcr.io/restackio/restack:main 20 | ``` 21 | 22 | ## Start python shell 23 | 24 | If using uv: 25 | 26 | ```bash 27 | uv venv && source .venv/bin/activate 28 | ``` 29 | 30 | If using pip: 31 | 32 | ```bash 33 | python -m venv .venv && source .venv/bin/activate 34 | ``` 35 | 36 | ## Install dependencies 37 | 38 | If using uv: 39 | 40 | ```bash 41 | uv sync 42 | uv run dev 43 | ``` 44 | 45 | If using pip: 46 | 47 | ```bash 48 | pip install -e . 49 | python -c "from src.services import watch_services; watch_services()" 50 | ``` 51 | 52 | ## Run workflows 53 | 54 | ### from UI 55 | 56 | You can run workflows from the UI by clicking the "Run" button. 57 | 58 | ![Run workflows from UI](./ui-screenshot.png) 59 | 60 | ### from API 61 | 62 | You can run workflows from the API by using the generated endpoint: 63 | 64 | `POST http://localhost:6233/api/workflows/TranscribeTranslateWorkflow` 65 | 66 | ### from any client 67 | 68 | You can run workflows with any client connected to Restack, for example: 69 | 70 | If using uv: 71 | 72 | ```bash 73 | uv run schedule 74 | ``` 75 | 76 | If using pip: 77 | 78 | ```bash 79 | python -c "from schedule_workflow import run_schedule_workflow; run_schedule_workflow()" 80 | ``` 81 | 82 | executes `schedule_workflow.py` which will connect to Restack and execute the `TranscribeTranslateWorkflow` workflow. 83 | 84 | ## Deploy on Restack Cloud 85 | 86 | To deploy the application on Restack, you can create an account at [https://console.restack.io](https://console.restack.io) 87 | -------------------------------------------------------------------------------- /audio_transcript/README.md: -------------------------------------------------------------------------------- 1 | # Restack AI - Audio translation example 2 | 3 | This example showcases how to transcribe an mp3 audio and then translate the generated text to a target language, all done in a single workflow defined with Restack AI. 4 | 5 | ## Prerequisites 6 | 7 | - Docker (for running Restack) 8 | - Python 3.10 or higher 9 | - Uv (for dependency management) 10 | 11 | ## Start Restack 12 | 13 | To start the Restack, use the following Docker command: 14 | 15 | ```bash 16 | docker run -d --pull always --name restack -p 5233:5233 -p 6233:6233 -p 7233:7233 -p 9233:9233 -p 10233:10233 ghcr.io/restackio/restack:main 17 | ``` 18 | 19 | ## Start python shell 20 | 21 | If using uv: 22 | 23 | ```bash 24 | uv venv && source .venv/bin/activate 25 | ``` 26 | 27 | If using pip: 28 | 29 | ```bash 30 | python -m venv .venv && source .venv/bin/activate 31 | ``` 32 | 33 | ## Install dependencies 34 | 35 | If using uv: 36 | 37 | ```bash 38 | uv sync 39 | uv run dev 40 | ``` 41 | 42 | If using pip: 43 | 44 | ```bash 45 | pip install -e . 46 | python -c "from src.services import watch_services; watch_services()" 47 | ``` 48 | 49 | ## Run workflows 50 | 51 | ### from UI 52 | 53 | You can run workflows from the UI by clicking the "Run" button. 54 | 55 | ![Run workflows from UI](./ui-screenshot.png) 56 | 57 | ### from API 58 | 59 | You can run workflows from the API by using the generated endpoint: 60 | 61 | `POST http://localhost:6233/api/workflows/TranscribeTranslateWorkflow` 62 | 63 | ### from any client 64 | 65 | You can run workflows with any client connected to Restack, for example: 66 | 67 | If using uv: 68 | 69 | ```bash 70 | uv run schedule 71 | ``` 72 | 73 | If using pip: 74 | 75 | ```bash 76 | python -c "from schedule_workflow import run_schedule_workflow; run_schedule_workflow()" 77 | ``` 78 | 79 | executes `schedule_workflow.py` which will connect to Restack and execute the `TranscribeTranslateWorkflow` workflow. 80 | 81 | ## Deploy on Restack Cloud 82 | 83 | To deploy the application on Restack, you can create an account at [https://console.restack.io](https://console.restack.io) 84 | -------------------------------------------------------------------------------- /agent_chat/src/functions/llm_chat.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import Literal 3 | 4 | from dotenv import load_dotenv 5 | from openai import OpenAI 6 | from pydantic import BaseModel 7 | from restack_ai.function import NonRetryableError, function, log 8 | 9 | load_dotenv() 10 | 11 | 12 | class Message(BaseModel): 13 | role: Literal["system", "user", "assistant"] 14 | content: str 15 | 16 | 17 | class LlmChatInput(BaseModel): 18 | system_content: str | None = None 19 | model: str | None = None 20 | messages: list[Message] | None = None 21 | 22 | 23 | def raise_exception(message: str) -> None: 24 | log.error(message) 25 | raise NonRetryableError(message) 26 | 27 | 28 | @function.defn() 29 | async def llm_chat(agent_input: LlmChatInput) -> dict[str, str]: 30 | try: 31 | log.info("llm_chat function started", agent_input=agent_input) 32 | 33 | if os.environ.get("OPENAI_API_KEY") is None: 34 | error_message = "OPENAI_API_KEY is not set" 35 | raise_exception(error_message) 36 | 37 | client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY")) 38 | 39 | if agent_input.system_content: 40 | agent_input.messages.append( 41 | {"role": "system", "content": agent_input.system_content} 42 | ) 43 | 44 | assistant_raw_response = client.chat.completions.create( 45 | model=agent_input.model or "gpt-4.1-mini", 46 | messages=agent_input.messages, 47 | ) 48 | except Exception as e: 49 | error_message = f"LLM chat failed: {e}" 50 | raise NonRetryableError(error_message) from e 51 | else: 52 | log.info( 53 | "llm_chat function completed", assistant_raw_response=assistant_raw_response 54 | ) 55 | 56 | assistant_response = { 57 | "role": assistant_raw_response.choices[0].message.role, 58 | "content": assistant_raw_response.choices[0].message.content, 59 | } 60 | 61 | log.info("assistant_response", assistant_response=assistant_response) 62 | 63 | return assistant_response 64 | -------------------------------------------------------------------------------- /agent_video/src/workflows/room.py: -------------------------------------------------------------------------------- 1 | from datetime import timedelta 2 | 3 | from pydantic import BaseModel 4 | from restack_ai.workflow import ( 5 | NonRetryableError, 6 | ParentClosePolicy, 7 | import_functions, 8 | log, 9 | workflow, 10 | workflow_info, 11 | ) 12 | 13 | from src.agents.agent import AgentVideo 14 | 15 | with import_functions(): 16 | from src.functions.pipeline import PipecatPipelineInput, pipecat_pipeline 17 | 18 | 19 | class RoomWorkflowOutput(BaseModel): 20 | room_url: str 21 | 22 | 23 | @workflow.defn() 24 | class RoomWorkflow: 25 | @workflow.run 26 | async def run(self) -> RoomWorkflowOutput: 27 | agent_id = f"{workflow_info().workflow_id}-agent" 28 | try: 29 | agent = await workflow.child_start( 30 | agent=AgentVideo, 31 | agent_id=agent_id, 32 | start_to_close_timeout=timedelta(minutes=20), 33 | parent_close_policy=ParentClosePolicy.ABANDON, 34 | ) 35 | except Exception as e: 36 | error_message = f"Error during child_start: {e}" 37 | raise NonRetryableError(error_message) from e 38 | else: 39 | log.info("Agent started", agent=agent) 40 | 41 | try: 42 | room_url = await workflow.step( 43 | function=pipecat_pipeline, 44 | function_input=PipecatPipelineInput( 45 | agent_name=AgentVideo.__name__, 46 | agent_id=agent.id, 47 | agent_run_id=agent.run_id, 48 | ), 49 | start_to_close_timeout=timedelta(minutes=20), 50 | ) 51 | except Exception as e: 52 | error_message = f"Error during pipecat_pipeline: {e}" 53 | raise NonRetryableError(error_message) from e 54 | else: 55 | log.info("Pipecat pipeline started") 56 | 57 | log.info("RoomWorkflow completed", room_url=room_url) 58 | 59 | return RoomWorkflowOutput(room_url=room_url) 60 | -------------------------------------------------------------------------------- /encryption/README.md: -------------------------------------------------------------------------------- 1 | # Restack AI - Encryption Example 2 | 3 | This repository contains a simple example project to help you get started with the Restack AI SDK. It demonstrates how to set up a basic workflow and functions using the SDK. 4 | 5 | ## Prerequisites 6 | 7 | - Python 3.8 or higher 8 | - Uv (for dependency management) 9 | - Docker (for running the Restack services) 10 | 11 | ## Start Restack 12 | 13 | To start the Restack, use the following Docker command: 14 | 15 | ```bash 16 | docker run -d --pull always --name restack -p 5233:5233 -p 6233:6233 -p 7233:7233 -p 9233:9233 -p 10233:10233 ghcr.io/restackio/restack:main 17 | ``` 18 | 19 | ## Start python shell 20 | 21 | If using uv: 22 | 23 | ```bash 24 | uv venv && source .venv/bin/activate 25 | ``` 26 | 27 | If using pip: 28 | 29 | ```bash 30 | python -m venv .venv && source .venv/bin/activate 31 | ``` 32 | 33 | ## Install dependencies 34 | 35 | If using uv: 36 | 37 | ```bash 38 | uv sync 39 | uv run services 40 | ``` 41 | 42 | If using pip: 43 | 44 | ```bash 45 | pip install -e . 46 | python -c "from src.services import run_services; run_services()" 47 | ``` 48 | 49 | ## In a new terminal, run the encryption codec server: 50 | 51 | If using uv: 52 | 53 | ```bash 54 | uv run codec 55 | ``` 56 | 57 | If using pip: 58 | 59 | ```bash 60 | python -c "from src.codec_server import run_codec_server; run_codec_server()" 61 | ``` 62 | 63 | ### In an another terminal, run the workflow 64 | 65 | If using uv: 66 | 67 | ```bash 68 | uv run schedule 69 | ``` 70 | 71 | If using pip: 72 | 73 | ```bash 74 | python -c "from src.schedule_workflow import run_schedule_workflow; run_schedule_workflow()" 75 | ``` 76 | 77 | ### To decrypt use http://localhost:8081 in the UI settings as the codec address 78 | 79 | ## Project Structure 80 | 81 | - `src/`: Main source code directory 82 | - `client.py`: Initializes the Restack client 83 | - `functions/`: Contains function definitions 84 | - `workflows/`: Contains workflow definitions 85 | - `services.py`: Sets up and runs the Restack services 86 | - `schedule_workflow.py`: Example script to schedule and run a workflow 87 | -------------------------------------------------------------------------------- /agent_telephony/twilio_livekit/agent_twilio/src/functions/livekit_start_recording.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from livekit import api 4 | from livekit.api import ( 5 | EgressInfo, 6 | EncodedFileType, 7 | RoomCompositeEgressRequest, 8 | ) 9 | from pydantic import BaseModel 10 | from restack_ai.function import NonRetryableError, function, log 11 | 12 | 13 | class LivekitStartRecordingInput(BaseModel): 14 | room_id: str 15 | 16 | 17 | @function.defn() 18 | async def livekit_start_recording( 19 | function_input: LivekitStartRecordingInput, 20 | ) -> EgressInfo: 21 | try: 22 | if os.getenv("GCP_CREDENTIALS") is None: 23 | raise NonRetryableError( 24 | message="GCP_CREDENTIALS is not set" 25 | ) 26 | 27 | credentials = os.getenv("GCP_CREDENTIALS") 28 | log.info("GCP_CREDENTIALS", credentials=credentials) 29 | 30 | lkapi = api.LiveKitAPI( 31 | url=os.getenv("LIVEKIT_API_URL"), 32 | api_key=os.getenv("LIVEKIT_API_KEY"), 33 | api_secret=os.getenv("LIVEKIT_API_SECRET"), 34 | ) 35 | 36 | recording = await lkapi.egress.start_room_composite_egress( 37 | RoomCompositeEgressRequest( 38 | room_name=function_input.room_id, 39 | layout="grid", 40 | audio_only=True, 41 | file_outputs=[ 42 | api.EncodedFileOutput( 43 | file_type=EncodedFileType.MP4, 44 | filepath=f"{function_input.room_id}-audio.mp4", 45 | gcp=api.GCPUpload( 46 | credentials=credentials, 47 | bucket="livekit-local-recordings", 48 | ), 49 | ) 50 | ], 51 | ) 52 | ) 53 | 54 | await lkapi.aclose() 55 | 56 | except Exception as e: 57 | error_message = ( 58 | f"livekit_start_recording function failed: {e}" 59 | ) 60 | raise NonRetryableError(error_message) from e 61 | 62 | else: 63 | return recording 64 | -------------------------------------------------------------------------------- /agent_apis/src/workflows/multistep.py: -------------------------------------------------------------------------------- 1 | from datetime import timedelta 2 | 3 | from pydantic import BaseModel, Field 4 | from restack_ai.workflow import NonRetryableError, import_functions, log, workflow 5 | 6 | with import_functions(): 7 | from src.functions.llm import FunctionInputParams, llm 8 | from src.functions.weather import weather 9 | 10 | 11 | class WorkflowInputParams(BaseModel): 12 | name: str = Field(default="John Doe") 13 | 14 | 15 | @workflow.defn() 16 | class MultistepWorkflow: 17 | @workflow.run 18 | async def run(self, workflow_input: WorkflowInputParams) -> dict: 19 | log.info("MultistepWorkflow started", workflow_input=workflow_input) 20 | user_content = f"Greet this person {workflow_input.name}" 21 | 22 | # Step 1 get weather data 23 | try: 24 | weather_data = await workflow.step( 25 | function=weather, start_to_close_timeout=timedelta(seconds=120) 26 | ) 27 | except Exception as e: 28 | error_message = f"Error during weather: {e}" 29 | raise NonRetryableError(error_message) from e 30 | else: 31 | # Step 2 Generate greeting with LLM based on name and weather data 32 | try: 33 | llm_message = await workflow.step( 34 | function=llm, 35 | function_input=FunctionInputParams( 36 | system_content=f"You are a personal assitant and have access to weather data {weather_data}. Always greet person with relevant info from weather data", 37 | user_content=user_content, 38 | model="gpt-4.1-mini", 39 | ), 40 | start_to_close_timeout=timedelta(seconds=120), 41 | ) 42 | except Exception as e: 43 | error_message = f"Error during llm: {e}" 44 | raise NonRetryableError(error_message) from e 45 | else: 46 | log.info("MultistepWorkflow completed", llm_message=llm_message) 47 | return {"message": llm_message, "weather": weather_data} 48 | -------------------------------------------------------------------------------- /agent_telephony/twilio_livekit/agent_twilio/src/functions/llm_logic.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import Literal 3 | 4 | from openai import OpenAI 5 | from pydantic import BaseModel 6 | from restack_ai.function import NonRetryableError, function 7 | 8 | class Message(BaseModel): 9 | role: str 10 | content: str 11 | 12 | class LlmLogicResponse(BaseModel): 13 | """Structured AI decision output used to interrupt conversations.""" 14 | 15 | action: Literal["interrupt", "update_context", "end_call"] 16 | reason: str 17 | updated_context: str 18 | 19 | 20 | class LlmLogicInput(BaseModel): 21 | messages: list[Message] 22 | documentation: str 23 | 24 | 25 | @function.defn() 26 | async def llm_logic( 27 | function_input: LlmLogicInput, 28 | ) -> LlmLogicResponse: 29 | try: 30 | client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY")) 31 | 32 | user_messages = [msg for msg in function_input.messages if msg.role == "user"] 33 | if len(user_messages) == 1: 34 | voice_mail_detection = "End the call if a voice mail is detected." 35 | else: 36 | voice_mail_detection = "" 37 | 38 | response = client.beta.chat.completions.parse( 39 | model="gpt-4.1-mini", 40 | messages=[ 41 | { 42 | "role": "system", 43 | "content": ( 44 | "Analyze the developer's questions and determine if an interruption is needed. " 45 | "Use the Restack documentation for accurate answers. " 46 | "Track what the developer has learned and update their belief state." 47 | f"{voice_mail_detection}" 48 | f"Restack Documentation: {function_input.documentation}" 49 | ), 50 | }, 51 | *function_input.messages, 52 | ], 53 | response_format=LlmLogicResponse, 54 | ) 55 | 56 | return response.choices[0].message.parsed 57 | 58 | except Exception as e: 59 | raise NonRetryableError(f"llm_slow failed: {e}") from e 60 | -------------------------------------------------------------------------------- /agent_apis/README.md: -------------------------------------------------------------------------------- 1 | # Restack AI - Agent with third party APIs 2 | 3 | This repository contains an agent with third party APIs 4 | It demonstrates how to set up a multi steps workflow with Weather API and OpenAI. 5 | 6 | ## Prerequisites 7 | 8 | - Docker (for running Restack) 9 | - Python 3.10 or higher 10 | 11 | ## Start Restack 12 | 13 | To start the Restack, use the following Docker command: 14 | 15 | ```bash 16 | docker run -d --pull always --name restack -p 5233:5233 -p 6233:6233 -p 7233:7233 -p 9233:9233 -p 10233:10233 ghcr.io/restackio/restack:main 17 | ``` 18 | 19 | ## Start python shell 20 | 21 | If using uv: 22 | 23 | ```bash 24 | uv venv && source .venv/bin/activate 25 | ``` 26 | 27 | If using pip: 28 | 29 | ```bash 30 | python -m venv .venv && source .venv/bin/activate 31 | ``` 32 | 33 | ## Install dependencies 34 | 35 | If using uv: 36 | 37 | ```bash 38 | uv sync 39 | uv run dev 40 | ``` 41 | 42 | If using pip: 43 | 44 | ```bash 45 | pip install -e . 46 | python -c "from src.services import watch_services; watch_services()" 47 | ``` 48 | 49 | ## Run agent 50 | 51 | ### from UI 52 | 53 | You can run workflows from the UI by clicking the "Run" button. 54 | 55 | ![Run workflows from UI](./workflow_get.png) 56 | 57 | ### from API 58 | 59 | You can run workflows from the API by using the generated endpoint: 60 | 61 | `POST http://localhost:6233/api/workflows/MultistepWorkflow` 62 | 63 | ### from any client 64 | 65 | You can run workflows with any client connected to Restack, for example: 66 | 67 | If using uv: 68 | 69 | ```bash 70 | uv run schedule-workflow 71 | ``` 72 | 73 | If using pip: 74 | 75 | ```bash 76 | python -c "from src.schedule_workflow import run_schedule_workflow; run_schedule_workflow()" 77 | ``` 78 | 79 | executes `schedule_workflow.py` which will connect to Restack and execute the `MultistepWorkflow` workflow. 80 | 81 | ## See run results 82 | 83 | ### from UI 84 | 85 | You can see the run results in the UI. 86 | 87 | ![See run results from UI](./workflow_run.png) 88 | 89 | ## Deploy on Restack Cloud 90 | 91 | To deploy the application on Restack, you can create an account at [https://console.restack.io](https://console.restack.io) 92 | -------------------------------------------------------------------------------- /agent_chat/requirements.txt: -------------------------------------------------------------------------------- 1 | # This file was autogenerated by uv via the following command: 2 | # uv pip compile pyproject.toml -o requirements.txt 3 | aiohappyeyeballs==2.4.4 4 | # via aiohttp 5 | aiohttp==3.11.11 6 | # via restack-ai 7 | aiosignal==1.3.2 8 | # via aiohttp 9 | annotated-types==0.7.0 10 | # via pydantic 11 | anyio==4.8.0 12 | # via 13 | # httpx 14 | # openai 15 | # watchfiles 16 | asyncio==3.4.3 17 | # via restack-ai 18 | attrs==25.1.0 19 | # via aiohttp 20 | certifi==2025.1.31 21 | # via 22 | # httpcore 23 | # httpx 24 | colorama==0.4.6 25 | # via restack-ai 26 | distro==1.9.0 27 | # via openai 28 | frozenlist==1.5.0 29 | # via 30 | # aiohttp 31 | # aiosignal 32 | h11==0.14.0 33 | # via httpcore 34 | httpcore==1.0.7 35 | # via httpx 36 | httpx==0.28.1 37 | # via openai 38 | idna==3.10 39 | # via 40 | # anyio 41 | # httpx 42 | # yarl 43 | jiter==0.8.2 44 | # via openai 45 | msgspec==0.18.6 46 | # via restack-ai 47 | multidict==6.1.0 48 | # via 49 | # aiohttp 50 | # yarl 51 | openai==1.61.0 52 | # via agent-chat (pyproject.toml) 53 | propcache==0.2.1 54 | # via 55 | # aiohttp 56 | # yarl 57 | protobuf==5.29.3 58 | # via temporalio 59 | pydantic==2.10.6 60 | # via 61 | # agent-chat (pyproject.toml) 62 | # openai 63 | # restack-ai 64 | pydantic-core==2.27.2 65 | # via pydantic 66 | python-dotenv==1.0.1 67 | # via 68 | # agent-chat (pyproject.toml) 69 | # restack-ai 70 | restack-ai==0.0.94 71 | # via agent-chat (pyproject.toml) 72 | sniffio==1.3.1 73 | # via 74 | # anyio 75 | # openai 76 | temporalio==1.10.0 77 | # via restack-ai 78 | tqdm==4.67.1 79 | # via openai 80 | types-protobuf==5.29.1.20241207 81 | # via temporalio 82 | typing-extensions==4.12.2 83 | # via 84 | # anyio 85 | # openai 86 | # pydantic 87 | # pydantic-core 88 | # restack-ai 89 | # temporalio 90 | watchfiles==1.0.4 91 | # via agent-chat (pyproject.toml) 92 | websockets==14.2 93 | # via restack-ai 94 | yarl==1.18.3 95 | # via aiohttp 96 | -------------------------------------------------------------------------------- /agent_todo/requirements.txt: -------------------------------------------------------------------------------- 1 | # This file was autogenerated by uv via the following command: 2 | # uv pip compile pyproject.toml -o requirements.txt 3 | aiohappyeyeballs==2.4.4 4 | # via aiohttp 5 | aiohttp==3.11.11 6 | # via restack-ai 7 | aiosignal==1.3.2 8 | # via aiohttp 9 | annotated-types==0.7.0 10 | # via pydantic 11 | anyio==4.8.0 12 | # via 13 | # httpx 14 | # openai 15 | # watchfiles 16 | asyncio==3.4.3 17 | # via restack-ai 18 | attrs==25.1.0 19 | # via aiohttp 20 | certifi==2025.1.31 21 | # via 22 | # httpcore 23 | # httpx 24 | colorama==0.4.6 25 | # via restack-ai 26 | distro==1.9.0 27 | # via openai 28 | frozenlist==1.5.0 29 | # via 30 | # aiohttp 31 | # aiosignal 32 | h11==0.14.0 33 | # via httpcore 34 | httpcore==1.0.7 35 | # via httpx 36 | httpx==0.28.1 37 | # via openai 38 | idna==3.10 39 | # via 40 | # anyio 41 | # httpx 42 | # yarl 43 | jiter==0.8.2 44 | # via openai 45 | msgspec==0.18.6 46 | # via restack-ai 47 | multidict==6.1.0 48 | # via 49 | # aiohttp 50 | # yarl 51 | openai==1.65.4 52 | # via quickstart (pyproject.toml) 53 | propcache==0.2.1 54 | # via 55 | # aiohttp 56 | # yarl 57 | protobuf==5.29.3 58 | # via temporalio 59 | pydantic==2.10.6 60 | # via 61 | # quickstart (pyproject.toml) 62 | # openai 63 | # restack-ai 64 | pydantic-core==2.27.2 65 | # via pydantic 66 | python-dotenv==1.0.1 67 | # via 68 | # quickstart (pyproject.toml) 69 | # restack-ai 70 | restack-ai==0.0.94 71 | # via quickstart (pyproject.toml) 72 | sniffio==1.3.1 73 | # via 74 | # anyio 75 | # openai 76 | temporalio==1.10.0 77 | # via restack-ai 78 | tqdm==4.67.1 79 | # via openai 80 | types-protobuf==5.29.1.20241207 81 | # via temporalio 82 | typing-extensions==4.12.2 83 | # via 84 | # anyio 85 | # openai 86 | # pydantic 87 | # pydantic-core 88 | # restack-ai 89 | # temporalio 90 | watchfiles==1.0.4 91 | # via quickstart (pyproject.toml) 92 | websockets==14.2 93 | # via restack-ai 94 | yarl==1.18.3 95 | # via aiohttp 96 | -------------------------------------------------------------------------------- /production_demo/requirements.txt: -------------------------------------------------------------------------------- 1 | # This file was autogenerated by uv via the following command: 2 | # uv pip compile pyproject.toml -o requirements.txt 3 | aiohappyeyeballs==2.4.4 4 | # via aiohttp 5 | aiohttp==3.11.11 6 | # via restack-ai 7 | aiosignal==1.3.2 8 | # via aiohttp 9 | annotated-types==0.7.0 10 | # via pydantic 11 | anyio==4.8.0 12 | # via 13 | # httpx 14 | # openai 15 | # watchfiles 16 | asyncio==3.4.3 17 | # via restack-ai 18 | attrs==25.1.0 19 | # via aiohttp 20 | certifi==2025.1.31 21 | # via 22 | # httpcore 23 | # httpx 24 | colorama==0.4.6 25 | # via restack-ai 26 | distro==1.9.0 27 | # via openai 28 | frozenlist==1.5.0 29 | # via 30 | # aiohttp 31 | # aiosignal 32 | h11==0.14.0 33 | # via httpcore 34 | httpcore==1.0.7 35 | # via httpx 36 | httpx==0.28.1 37 | # via openai 38 | idna==3.10 39 | # via 40 | # anyio 41 | # httpx 42 | # yarl 43 | jiter==0.8.2 44 | # via openai 45 | msgspec==0.18.6 46 | # via restack-ai 47 | multidict==6.1.0 48 | # via 49 | # aiohttp 50 | # yarl 51 | openai==1.61.0 52 | # via production-demo (pyproject.toml) 53 | propcache==0.2.1 54 | # via 55 | # aiohttp 56 | # yarl 57 | protobuf==5.29.3 58 | # via temporalio 59 | pydantic==2.10.6 60 | # via 61 | # production-demo (pyproject.toml) 62 | # openai 63 | # restack-ai 64 | pydantic-core==2.27.2 65 | # via pydantic 66 | python-dotenv==1.0.1 67 | # via 68 | # production-demo (pyproject.toml) 69 | # restack-ai 70 | restack-ai==0.0.94 71 | # via production-demo (pyproject.toml) 72 | sniffio==1.3.1 73 | # via 74 | # anyio 75 | # openai 76 | temporalio==1.10.0 77 | # via restack-ai 78 | tqdm==4.67.1 79 | # via openai 80 | types-protobuf==5.29.1.20241207 81 | # via temporalio 82 | typing-extensions==4.12.2 83 | # via 84 | # anyio 85 | # openai 86 | # pydantic 87 | # pydantic-core 88 | # restack-ai 89 | # temporalio 90 | watchfiles==1.0.4 91 | # via production-demo (pyproject.toml) 92 | websockets==14.2 93 | # via restack-ai 94 | yarl==1.18.3 95 | # via aiohttp 96 | -------------------------------------------------------------------------------- /production_demo/src/workflows/workflow.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from datetime import timedelta 3 | from pydantic import BaseModel, Field 4 | from restack_ai.workflow import workflow, log, workflow_info, import_functions, NonRetryableError 5 | from .child import ChildWorkflow, ChildWorkflowInput 6 | 7 | with import_functions(): 8 | from src.functions.generate import llm_generate, GenerateInput 9 | 10 | class ExampleWorkflowInput(BaseModel): 11 | amount: int = Field(default=50) 12 | 13 | @workflow.defn() 14 | class ExampleWorkflow: 15 | @workflow.run 16 | async def run(self, input: ExampleWorkflowInput): 17 | 18 | try: 19 | # use the parent run id to create child workflow ids 20 | parent_workflow_id = workflow_info().workflow_id 21 | 22 | tasks = [] 23 | for i in range(input.amount): 24 | log.info(f"Queue ChildWorkflow {i+1} for execution") 25 | task = workflow.child_execute( 26 | workflow=ChildWorkflow, 27 | workflow_id=f"{parent_workflow_id}-child-execute-{i+1}", 28 | workflow_input=ChildWorkflowInput(prompt="Generate a random joke in max 20 words."), 29 | ) 30 | tasks.append(task) 31 | 32 | # Run all child workflows in parallel and wait for their results 33 | results = await asyncio.gather(*tasks) 34 | 35 | for i, result in enumerate(results, start=1): 36 | log.info(f"ChildWorkflow {i} completed", result=result) 37 | 38 | generated_text = await workflow.step( 39 | function=llm_generate, 40 | function_input=GenerateInput(prompt=f"Give me the top 3 unique jokes according to the results. {results}"), 41 | task_queue="llm", 42 | start_to_close_timeout=timedelta(minutes=2) 43 | ) 44 | 45 | return { 46 | "top_jokes": generated_text, 47 | "results": results 48 | } 49 | 50 | except Exception as e: 51 | log.error(f"ExampleWorkflow failed {e}") 52 | raise NonRetryableError(message=f"ExampleWorkflow failed {e}") from e -------------------------------------------------------------------------------- /audio_transcript/requirements.txt: -------------------------------------------------------------------------------- 1 | # This file was autogenerated by uv via the following command: 2 | # uv pip compile pyproject.toml -o requirements.txt 3 | aiohappyeyeballs==2.4.4 4 | # via aiohttp 5 | aiohttp==3.11.11 6 | # via restack-ai 7 | aiosignal==1.3.2 8 | # via aiohttp 9 | annotated-types==0.7.0 10 | # via pydantic 11 | anyio==4.8.0 12 | # via 13 | # httpx 14 | # openai 15 | # watchfiles 16 | asyncio==3.4.3 17 | # via restack-ai 18 | attrs==25.1.0 19 | # via aiohttp 20 | certifi==2025.1.31 21 | # via 22 | # httpcore 23 | # httpx 24 | colorama==0.4.6 25 | # via restack-ai 26 | distro==1.9.0 27 | # via openai 28 | frozenlist==1.5.0 29 | # via 30 | # aiohttp 31 | # aiosignal 32 | h11==0.14.0 33 | # via httpcore 34 | httpcore==1.0.7 35 | # via httpx 36 | httpx==0.28.1 37 | # via openai 38 | idna==3.10 39 | # via 40 | # anyio 41 | # httpx 42 | # yarl 43 | jiter==0.8.2 44 | # via openai 45 | msgspec==0.18.6 46 | # via restack-ai 47 | multidict==6.1.0 48 | # via 49 | # aiohttp 50 | # yarl 51 | openai==1.61.0 52 | # via audio-transcript (pyproject.toml) 53 | propcache==0.2.1 54 | # via 55 | # aiohttp 56 | # yarl 57 | protobuf==5.29.3 58 | # via temporalio 59 | pydantic==2.10.6 60 | # via 61 | # audio-transcript (pyproject.toml) 62 | # openai 63 | # restack-ai 64 | pydantic-core==2.27.2 65 | # via pydantic 66 | python-dotenv==1.0.1 67 | # via 68 | # audio-transcript (pyproject.toml) 69 | # restack-ai 70 | restack-ai==0.0.94 71 | # via audio-transcript (pyproject.toml) 72 | sniffio==1.3.1 73 | # via 74 | # anyio 75 | # openai 76 | temporalio==1.10.0 77 | # via restack-ai 78 | tqdm==4.67.1 79 | # via openai 80 | types-protobuf==5.29.1.20241207 81 | # via temporalio 82 | typing-extensions==4.12.2 83 | # via 84 | # anyio 85 | # openai 86 | # pydantic 87 | # pydantic-core 88 | # restack-ai 89 | # temporalio 90 | watchfiles==1.0.4 91 | # via audio-transcript (pyproject.toml) 92 | websockets==14.2 93 | # via restack-ai 94 | yarl==1.18.3 95 | # via aiohttp 96 | -------------------------------------------------------------------------------- /agent_apis/requirements.txt: -------------------------------------------------------------------------------- 1 | # This file was autogenerated by uv via the following command: 2 | # uv pip compile pyproject.toml -o requirements.txt 3 | aiohappyeyeballs==2.4.4 4 | # via aiohttp 5 | aiohttp==3.11.12 6 | # via 7 | # openai-greet (pyproject.toml) 8 | # restack-ai 9 | aiosignal==1.3.2 10 | # via aiohttp 11 | annotated-types==0.7.0 12 | # via pydantic 13 | anyio==4.8.0 14 | # via 15 | # httpx 16 | # openai 17 | # watchfiles 18 | asyncio==3.4.3 19 | # via restack-ai 20 | attrs==25.1.0 21 | # via aiohttp 22 | certifi==2025.1.31 23 | # via 24 | # httpcore 25 | # httpx 26 | colorama==0.4.6 27 | # via restack-ai 28 | distro==1.9.0 29 | # via openai 30 | frozenlist==1.5.0 31 | # via 32 | # aiohttp 33 | # aiosignal 34 | h11==0.14.0 35 | # via httpcore 36 | httpcore==1.0.7 37 | # via httpx 38 | httpx==0.28.1 39 | # via openai 40 | idna==3.10 41 | # via 42 | # anyio 43 | # httpx 44 | # yarl 45 | jiter==0.8.2 46 | # via openai 47 | msgspec==0.18.6 48 | # via restack-ai 49 | multidict==6.1.0 50 | # via 51 | # aiohttp 52 | # yarl 53 | openai==1.61.0 54 | # via openai-greet (pyproject.toml) 55 | propcache==0.2.1 56 | # via 57 | # aiohttp 58 | # yarl 59 | protobuf==5.29.3 60 | # via temporalio 61 | pydantic==2.10.6 62 | # via 63 | # openai-greet (pyproject.toml) 64 | # openai 65 | # restack-ai 66 | pydantic-core==2.27.2 67 | # via pydantic 68 | python-dotenv==1.0.1 69 | # via 70 | # openai-greet (pyproject.toml) 71 | # restack-ai 72 | restack-ai==0.0.94 73 | # via openai-greet (pyproject.toml) 74 | sniffio==1.3.1 75 | # via 76 | # anyio 77 | # openai 78 | temporalio==1.10.0 79 | # via restack-ai 80 | tqdm==4.67.1 81 | # via openai 82 | types-protobuf==5.29.1.20241207 83 | # via temporalio 84 | typing-extensions==4.12.2 85 | # via 86 | # anyio 87 | # openai 88 | # pydantic 89 | # pydantic-core 90 | # restack-ai 91 | # temporalio 92 | watchfiles==1.0.4 93 | # via openai-greet (pyproject.toml) 94 | websockets==14.2 95 | # via restack-ai 96 | yarl==1.18.3 97 | # via aiohttp 98 | -------------------------------------------------------------------------------- /agent_telephony/twilio_livekit/livekit_pipeline/src/pipeline.py: -------------------------------------------------------------------------------- 1 | """Pipeline. 2 | 3 | This module provides functions to create and configure LiveKit pipeline. 4 | """ 5 | 6 | from livekit.agents import JobContext 7 | from livekit.agents.pipeline import VoicePipelineAgent 8 | from livekit.plugins import ( 9 | deepgram, 10 | elevenlabs, 11 | openai, 12 | turn_detector, 13 | ) 14 | from src.utils import logger 15 | 16 | 17 | def create_livekit_pipeline( 18 | ctx: JobContext, agent_id: str, agent_url: str 19 | ) -> VoicePipelineAgent: 20 | """Create and configure a VoicePipelineAgent with the provided context, agent_id, and agent_url. 21 | 22 | Args: 23 | ctx (JobContext): The job context containing user data and configuration. 24 | agent_id (str): The identifier for the agent. 25 | agent_url (str): The URL for the agent backend. 26 | 27 | Returns: 28 | VoicePipelineAgent: A configured agent instance. 29 | """ 30 | try: 31 | logger.info( 32 | "Creating VoicePipelineAgent with agent_id: %s and agent_url: %s", 33 | agent_id, 34 | agent_url, 35 | ) 36 | return VoicePipelineAgent( 37 | vad=ctx.proc.userdata["vad"], 38 | stt=deepgram.STT( 39 | model="nova-3-general", 40 | ), 41 | llm=openai.LLM( 42 | api_key=f"{agent_id}-livekit", 43 | base_url=agent_url, 44 | ), 45 | tts=elevenlabs.TTS( 46 | voice=elevenlabs.tts.Voice( 47 | id="UgBBYS2sOqTuMpoF3BR0", 48 | name="Mark", 49 | category="premade", 50 | settings=elevenlabs.tts.VoiceSettings( 51 | stability=0, 52 | similarity_boost=0, 53 | style=0, 54 | speed=1.01, 55 | use_speaker_boost=False 56 | ), 57 | ), 58 | ), 59 | turn_detector=turn_detector.EOUModel(), 60 | ) 61 | except Exception as e: 62 | logger.exception( 63 | "Error creating VoicePipelineAgent: %s", e 64 | ) 65 | raise 66 | -------------------------------------------------------------------------------- /agent_telephony/twilio_livekit/agent_twilio/src/functions/livekit_outbound_trunk.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from livekit import api 4 | from livekit.protocol.sip import ( 5 | CreateSIPOutboundTrunkRequest, 6 | ListSIPOutboundTrunkRequest, 7 | SIPOutboundTrunkInfo, 8 | ) 9 | from restack_ai.function import ( 10 | NonRetryableError, 11 | function, 12 | function_info, 13 | log, 14 | ) 15 | 16 | 17 | @function.defn() 18 | async def livekit_outbound_trunk() -> str: 19 | try: 20 | livekit_api = api.LiveKitAPI() 21 | run_id = function_info().workflow_run_id 22 | 23 | existing_trunk = ( 24 | await livekit_api.sip.list_sip_outbound_trunk( 25 | list=ListSIPOutboundTrunkRequest( 26 | trunk_ids=[str(run_id)] 27 | ) 28 | ) 29 | ) 30 | 31 | if existing_trunk.items and len(existing_trunk.items) > 0: 32 | first_trunk = existing_trunk.items[0] 33 | if first_trunk.sip_trunk_id: 34 | log.info( 35 | "livekit_outbound_trunk Trunk already exists: ", 36 | trunk_id=first_trunk.sip_trunk_id, 37 | ) 38 | return first_trunk.sip_trunk_id 39 | 40 | trunk = SIPOutboundTrunkInfo( 41 | name=run_id, 42 | address=os.getenv("TWILIO_TRUNK_TERMINATION_SIP_URL"), 43 | numbers=[os.getenv("TWILIO_PHONE_NUMBER")], 44 | auth_username=os.getenv("TWILIO_TRUNK_AUTH_USERNAME"), 45 | auth_password=os.getenv("TWILIO_TRUNK_AUTH_PASSWORD"), 46 | ) 47 | 48 | request = CreateSIPOutboundTrunkRequest(trunk=trunk) 49 | 50 | trunk = await livekit_api.sip.create_sip_outbound_trunk( 51 | request 52 | ) 53 | 54 | log.info( 55 | "livekit_outbound_trunk Successfully created, trunk: ", 56 | trunk=trunk, 57 | ) 58 | 59 | await livekit_api.aclose() 60 | 61 | except Exception as e: # Consider catching a more specific exception if possible 62 | error_message = ( 63 | f"livekit_outbound_trunk function failed: {e}" 64 | ) 65 | raise NonRetryableError(error_message) from e 66 | 67 | else: 68 | return trunk.sip_trunk_id 69 | -------------------------------------------------------------------------------- /agent_tool/src/functions/llm_chat.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import Literal 3 | 4 | from dotenv import load_dotenv 5 | from openai import OpenAI 6 | from openai.types.chat.chat_completion import ChatCompletion 7 | from openai.types.chat.chat_completion_message_tool_call import ( 8 | ChatCompletionMessageToolCall, 9 | ) 10 | from openai.types.chat.chat_completion_tool_param import ( 11 | ChatCompletionToolParam, 12 | ) 13 | from pydantic import BaseModel 14 | from restack_ai.function import NonRetryableError, function, log 15 | 16 | load_dotenv() 17 | 18 | 19 | class Message(BaseModel): 20 | role: Literal["system", "user", "assistant", "tool"] 21 | content: str 22 | tool_call_id: str | None = None 23 | tool_calls: list[ChatCompletionMessageToolCall] | None = None 24 | 25 | 26 | class LlmChatInput(BaseModel): 27 | system_content: str | None = None 28 | model: str | None = None 29 | messages: list[Message] | None = None 30 | tools: list[ChatCompletionToolParam] | None = None 31 | 32 | 33 | def raise_exception(message: str) -> None: 34 | log.error(message) 35 | raise NonRetryableError(message) 36 | 37 | 38 | @function.defn() 39 | async def llm_chat(function_input: LlmChatInput) -> ChatCompletion: 40 | try: 41 | log.info("llm_chat function started", function_input=function_input) 42 | 43 | if os.environ.get("OPENAI_API_KEY") is None: 44 | raise_exception("OPENAI_API_KEY is not set") 45 | 46 | client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY")) 47 | 48 | log.info("pydantic_function_tool", tools=function_input.tools) 49 | 50 | if function_input.system_content: 51 | function_input.messages.append( 52 | Message(role="system", content=function_input.system_content or "") 53 | ) 54 | 55 | result = client.chat.completions.create( 56 | model=function_input.model or "gpt-4.1-mini", 57 | messages=function_input.messages, 58 | tools=function_input.tools, 59 | ) 60 | 61 | log.info("llm_chat function completed", result=result) 62 | 63 | return result.model_dump() 64 | except Exception as e: 65 | error_message = f"LLM chat failed: {e}" 66 | raise NonRetryableError(error_message) from e 67 | -------------------------------------------------------------------------------- /agent_telephony/twilio_livekit/agent_twilio/src/services.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | import webbrowser 4 | from pathlib import Path 5 | 6 | from watchfiles import run_process 7 | 8 | from src.agents.agent import AgentTwilio 9 | from src.client import client 10 | from src.functions.context_docs import context_docs 11 | from src.functions.livekit_call import livekit_call 12 | from src.functions.livekit_create_room import livekit_create_room 13 | from src.functions.livekit_delete_room import livekit_delete_room 14 | from src.functions.livekit_dispatch import livekit_dispatch 15 | from src.functions.livekit_outbound_trunk import ( 16 | livekit_outbound_trunk, 17 | ) 18 | from src.functions.livekit_send_data import livekit_send_data 19 | from src.functions.livekit_start_recording import ( 20 | livekit_start_recording, 21 | ) 22 | from src.functions.livekit_token import livekit_token 23 | from src.functions.llm_logic import llm_logic 24 | from src.functions.llm_talk import llm_talk 25 | from src.functions.send_agent_event import send_agent_event 26 | from src.workflows.logic import LogicWorkflow 27 | 28 | 29 | async def main() -> None: 30 | await client.start_service( 31 | agents=[AgentTwilio], 32 | workflows=[LogicWorkflow], 33 | functions=[ 34 | llm_talk, 35 | llm_logic, 36 | livekit_dispatch, 37 | livekit_call, 38 | livekit_create_room, 39 | livekit_delete_room, 40 | livekit_outbound_trunk, 41 | livekit_token, 42 | context_docs, 43 | livekit_send_data, 44 | send_agent_event, 45 | livekit_start_recording, 46 | ], 47 | ) 48 | 49 | 50 | def run_services() -> None: 51 | try: 52 | asyncio.run(main()) 53 | except KeyboardInterrupt: 54 | logging.info( 55 | "Service interrupted by user. Exiting gracefully." 56 | ) 57 | 58 | 59 | def watch_services() -> None: 60 | watch_path = Path.cwd() 61 | logging.info( 62 | "Watching %s and its subdirectories for changes...", 63 | watch_path, 64 | ) 65 | webbrowser.open("http://localhost:5233") 66 | run_process(watch_path, recursive=True, target=run_services) 67 | 68 | 69 | if __name__ == "__main__": 70 | run_services() 71 | -------------------------------------------------------------------------------- /encryption/src/codec.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import Iterable, List 3 | 4 | from cryptography.hazmat.primitives.ciphers.aead import AESGCM 5 | from restack_ai.security import Payload, PayloadCodec 6 | 7 | default_key = b"test-key-test-key-test-key-test!" 8 | default_key_id = "test-key-id" 9 | 10 | 11 | class EncryptionCodec(PayloadCodec): 12 | def __init__(self, key_id: str = default_key_id, key: bytes = default_key) -> None: 13 | super().__init__() 14 | self.key_id = key_id 15 | # We are using direct AESGCM to be compatible with samples from 16 | # TypeScript and Go. Pure Python samples may prefer the higher-level, 17 | # safer APIs. 18 | self.encryptor = AESGCM(key) 19 | 20 | async def encode(self, payloads: Iterable[Payload]) -> List[Payload]: 21 | # We blindly encode all payloads with the key and set the metadata 22 | # saying which key we used 23 | return [ 24 | Payload( 25 | metadata={ 26 | "encoding": b"binary/encrypted", 27 | "encryption-key-id": self.key_id.encode(), 28 | }, 29 | data=self.encrypt(p.SerializeToString()), 30 | ) 31 | for p in payloads 32 | ] 33 | 34 | async def decode(self, payloads: Iterable[Payload]) -> List[Payload]: 35 | ret: List[Payload] = [] 36 | for p in payloads: 37 | # Ignore ones w/out our expected encoding 38 | if p.metadata.get("encoding", b"").decode() != "binary/encrypted": 39 | ret.append(p) 40 | continue 41 | # Confirm our key ID is the same 42 | key_id = p.metadata.get("encryption-key-id", b"").decode() 43 | if key_id != self.key_id: 44 | raise ValueError( 45 | f"Unrecognized key ID {key_id}. Current key ID is {self.key_id}." 46 | ) 47 | # Decrypt and append 48 | ret.append(Payload.FromString(self.decrypt(p.data))) 49 | return ret 50 | 51 | def encrypt(self, data: bytes) -> bytes: 52 | nonce = os.urandom(12) 53 | return nonce + self.encryptor.encrypt(nonce, data, None) 54 | 55 | def decrypt(self, data: bytes) -> bytes: 56 | return self.encryptor.decrypt(data[:12], data[12:], None) -------------------------------------------------------------------------------- /agent_todo/src/functions/llm_chat.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import Literal 3 | 4 | from dotenv import load_dotenv 5 | from openai import OpenAI 6 | from openai.types.chat.chat_completion import ChatCompletion 7 | from openai.types.chat.chat_completion_message_tool_call import ( 8 | ChatCompletionMessageToolCall, 9 | ) 10 | from openai.types.chat.chat_completion_tool_param import ( 11 | ChatCompletionToolParam, 12 | ) 13 | from pydantic import BaseModel 14 | from restack_ai.function import NonRetryableError, function, log 15 | 16 | load_dotenv() 17 | 18 | 19 | class Message(BaseModel): 20 | role: Literal["system", "user", "assistant", "tool"] 21 | content: str 22 | tool_call_id: str | None = None 23 | tool_calls: list[ChatCompletionMessageToolCall] | None = None 24 | 25 | 26 | class LlmChatInput(BaseModel): 27 | system_content: str | None = None 28 | model: str | None = None 29 | messages: list[Message] | None = None 30 | tools: list[ChatCompletionToolParam] | None = None 31 | 32 | 33 | def raise_exception(message: str) -> None: 34 | log.error("llm_chat function failed", error=message) 35 | raise NonRetryableError(message) 36 | 37 | 38 | @function.defn() 39 | async def llm_chat(function_input: LlmChatInput) -> ChatCompletion: 40 | try: 41 | log.info("llm_chat function started", function_input=function_input) 42 | 43 | if os.environ.get("OPENAI_API_KEY") is None: 44 | raise_exception("OPENAI_API_KEY is not set") 45 | 46 | client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY")) 47 | 48 | log.info("pydantic_function_tool", tools=function_input.tools) 49 | 50 | if function_input.system_content: 51 | function_input.messages.append( 52 | Message(role="system", content=function_input.system_content or "") 53 | ) 54 | 55 | response = client.chat.completions.create( 56 | model=function_input.model or "gpt-4.1-mini", 57 | messages=function_input.messages, 58 | tools=function_input.tools, 59 | ) 60 | except Exception as e: 61 | error_message = f"LLM chat failed: {e}" 62 | raise NonRetryableError(error_message) from e 63 | else: 64 | log.info("llm_chat function completed", response=response) 65 | return response.model_dump() 66 | -------------------------------------------------------------------------------- /agent_voice/livekit/agent/src/agents/agent.py: -------------------------------------------------------------------------------- 1 | from datetime import timedelta 2 | 3 | from pydantic import BaseModel, Field 4 | from restack_ai.agent import NonRetryableError, agent, import_functions, log 5 | 6 | with import_functions(): 7 | from src.functions.livekit_dispatch import LivekitDispatchInput, livekit_dispatch 8 | from src.functions.llm_chat import LlmChatInput, Message, llm_chat 9 | 10 | 11 | class MessagesEvent(BaseModel): 12 | messages: list[Message] 13 | 14 | 15 | class EndEvent(BaseModel): 16 | end: bool 17 | 18 | 19 | class AgentVoiceInput(BaseModel): 20 | room_id: str | None = Field(default="room-1") 21 | 22 | @agent.defn() 23 | class AgentVoice: 24 | def __init__(self) -> None: 25 | self.end = False 26 | self.messages: list[Message] = [] 27 | 28 | @agent.event 29 | async def messages(self, messages_event: MessagesEvent) -> list[Message]: 30 | log.info(f"Received message: {messages_event.messages}") 31 | self.messages.extend(messages_event.messages) 32 | try: 33 | assistant_message = await agent.step( 34 | function=llm_chat, 35 | function_input=LlmChatInput(messages=self.messages), 36 | start_to_close_timeout=timedelta(minutes=2), 37 | ) 38 | except Exception as e: 39 | error_message = f"Error during llm_chat: {e}" 40 | raise NonRetryableError(error_message) from e 41 | else: 42 | self.messages.append(Message(role="assistant", content=str(assistant_message))) 43 | return self.messages 44 | 45 | @agent.event 46 | async def end(self, end: EndEvent) -> EndEvent: 47 | log.info("Received end") 48 | self.end = True 49 | return end 50 | 51 | @agent.run 52 | async def run(self, agent_input: AgentVoiceInput) -> None: 53 | log.info("Run", agent_input=agent_input) 54 | room_id = agent_input.room_id 55 | try: 56 | await agent.step( 57 | function=livekit_dispatch, 58 | function_input=LivekitDispatchInput(room_id=room_id), 59 | ) 60 | except Exception as e: 61 | error_message = f"Error during livekit_dispatch: {e}" 62 | raise NonRetryableError(error_message) from e 63 | else: 64 | await agent.condition(lambda: self.end) 65 | -------------------------------------------------------------------------------- /agent_video/src/agents/agent.py: -------------------------------------------------------------------------------- 1 | from datetime import timedelta 2 | 3 | from pydantic import BaseModel 4 | from restack_ai.agent import NonRetryableError, agent, import_functions, log 5 | 6 | with import_functions(): 7 | from src.functions.context_docs import context_docs 8 | from src.functions.llm_chat import LlmChatInput, Message, llm_chat 9 | 10 | class MessagesEvent(BaseModel): 11 | messages: list[Message] 12 | 13 | 14 | class EndEvent(BaseModel): 15 | end: bool 16 | 17 | 18 | @agent.defn() 19 | class AgentVideo: 20 | def __init__(self) -> None: 21 | self.end = False 22 | self.messages: list[Message] = [] 23 | 24 | @agent.event 25 | async def messages(self, messages_event: MessagesEvent) -> list[Message]: 26 | log.info(f"Received message: {messages_event.messages}") 27 | self.messages.extend(messages_event.messages) 28 | 29 | try: 30 | assistant_message = await agent.step( 31 | function=llm_chat, 32 | function_input=LlmChatInput(messages=self.messages), 33 | start_to_close_timeout=timedelta(seconds=120), 34 | ) 35 | except Exception as e: 36 | error_message = f"llm_chat function failed: {e}" 37 | raise NonRetryableError(error_message) from e 38 | else: 39 | self.messages.append(Message(role="assistant", content=str(assistant_message))) 40 | return self.messages 41 | 42 | @agent.event 43 | async def end(self, end: EndEvent) -> EndEvent: 44 | log.info("Received end") 45 | self.end = True 46 | return end 47 | 48 | @agent.run 49 | async def run(self) -> None: 50 | try: 51 | docs = await agent.step(function=context_docs) 52 | except Exception as e: 53 | error_message = f"context_docs function failed: {e}" 54 | raise NonRetryableError(error_message) from e 55 | else: 56 | system_prompt=f""" 57 | You are an interactive video assistant, your answers will be used in text to speech so try to keep answers short and concise so that interaction is seamless. 58 | You can answer questions about the following documentation: 59 | {docs} 60 | """ 61 | self.messages.append(Message(role="system", content=system_prompt)) 62 | await agent.condition(lambda: self.end) 63 | --------------------------------------------------------------------------------