├── .chainlit └── config.toml ├── .env.example ├── .gitignore ├── LICENSE ├── README.md ├── app.py ├── chainlit.md ├── public └── logo.png ├── render.yaml └── requirements.txt /.chainlit/config.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | # Whether to enable telemetry (default: true). No personal data is collected. 3 | enable_telemetry = true 4 | 5 | 6 | # List of environment variables to be provided by each user to use the app. 7 | user_env = [] 8 | 9 | # Duration (in seconds) during which the session is saved when the connection is lost 10 | session_timeout = 3600 11 | 12 | # Enable third parties caching (e.g LangChain cache) 13 | cache = false 14 | 15 | # Authorized origins 16 | allow_origins = ["*"] 17 | 18 | # Follow symlink for asset mount (see https://github.com/Chainlit/chainlit/issues/317) 19 | # follow_symlink = false 20 | 21 | [features] 22 | # Show the prompt playground 23 | prompt_playground = true 24 | 25 | # Process and display HTML in messages. This can be a security risk (see https://stackoverflow.com/questions/19603097/why-is-it-dangerous-to-render-user-generated-html-or-javascript) 26 | unsafe_allow_html = false 27 | 28 | # Process and display mathematical expressions. This can clash with "$" characters in messages. 29 | latex = false 30 | 31 | # Automatically tag threads with the current chat profile (if a chat profile is used) 32 | auto_tag_thread = true 33 | 34 | # Authorize users to spontaneously upload files with messages 35 | [features.spontaneous_file_upload] 36 | enabled = true 37 | accept = ["*/*"] 38 | max_files = 20 39 | max_size_mb = 500 40 | 41 | [features.audio] 42 | # Threshold for audio recording 43 | min_decibels = -45 44 | # Delay for the user to start speaking in MS 45 | initial_silence_timeout = 3000 46 | # Delay for the user to continue speaking in MS. If the user stops speaking for this duration, the recording will stop. 47 | silence_timeout = 1500 48 | # Above this duration (MS), the recording will forcefully stop. 49 | max_duration = 15000 50 | # Duration of the audio chunks in MS 51 | chunk_duration = 1000 52 | # Sample rate of the audio 53 | sample_rate = 44100 54 | 55 | [UI] 56 | # Name of the app and chatbot. 57 | name = "Chatbot" 58 | 59 | # Show the readme while the thread is empty. 60 | show_readme_as_default = false 61 | 62 | # Description of the app and chatbot. This is used for HTML tags. 63 | # description = "" 64 | 65 | # Large size content are by default collapsed for a cleaner ui 66 | default_collapse_content = true 67 | 68 | # The default value for the expand messages settings. 69 | default_expand_messages = false 70 | 71 | # Hide the chain of thought details from the user in the UI. 72 | hide_cot = false 73 | 74 | # Link to your github repo. This will add a github button in the UI's header. 75 | # github = "" 76 | 77 | # Specify a CSS file that can be used to customize the user interface. 78 | # The CSS file can be served from the public directory or via an external link. 79 | # custom_css = "/public/test.css" 80 | 81 | # Specify a Javascript file that can be used to customize the user interface. 82 | # The Javascript file can be served from the public directory. 83 | # custom_js = "/public/test.js" 84 | 85 | # Specify a custom font url. 86 | # custom_font = "https://fonts.googleapis.com/css2?family=Inter:wght@400;500;700&display=swap" 87 | 88 | # Specify a custom build directory for the frontend. 89 | # This can be used to customize the frontend code. 90 | # Be careful: If this is a relative path, it should not start with a slash. 91 | # custom_build = "./public/build" 92 | 93 | [UI.theme] 94 | #layout = "wide" 95 | #font_family = "Inter, sans-serif" 96 | # Override default MUI light theme. (Check theme.ts) 97 | [UI.theme.light] 98 | #background = "#FAFAFA" 99 | #paper = "#FFFFFF" 100 | 101 | [UI.theme.light.primary] 102 | #main = "#F80061" 103 | #dark = "#980039" 104 | #light = "#FFE7EB" 105 | 106 | # Override default MUI dark theme. (Check theme.ts) 107 | [UI.theme.dark] 108 | #background = "#FAFAFA" 109 | #paper = "#FFFFFF" 110 | 111 | [UI.theme.dark.primary] 112 | #main = "#F80061" 113 | #dark = "#980039" 114 | #light = "#FFE7EB" 115 | 116 | 117 | [meta] 118 | generated_by = "1.1.0rc1" 119 | -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | OPENAI_API_KEY= 2 | OPENAI_ASSISTANT_ID= 3 | LITERAL_API_KEY= 4 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .env 2 | translations 3 | .files 4 | __pycache__ 5 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 chainlit 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # OpenAI Assistant 2 | 3 | You can deploy your OpenAI assistant with Chainlit using this template. 4 | ![openai-assistant](https://github.com/Chainlit/openai-assistant/assets/13104895/5c095a89-e426-417e-977d-772c4d4974c2) 5 | 6 | ### Supported Assistant Features 7 | 8 | | Streaming | Files | Code Interpreter | File Search | Voice | 9 | | --------- | ----- | ---------------- | ----------- | ----- | 10 | | ✅ | ✅ | ✅ | ✅ | ✅ | 11 | 12 | ### Get an OpenAI API key 13 | 14 | Go to OpenAI's [API keys page](https://platform.openai.com/api-keys) and create one if you don't have one already. 15 | 16 | ### Create an Assistant 17 | 18 | Go to OpenAI's [assistant page](https://platform.openai.com/assistants) and click on the `Create` at the top right. 19 | 20 | Configure your assistant. 21 | 22 | ### [Optional] Get a Literal AI API key 23 | 24 | > [!NOTE] 25 | > Literal AI is an all in one observability, evaluation and analytics platform for building LLM apps. 26 | 27 | Go to [Literal AI](https://cloud.getliteral.ai/), create a project and go to Settings to get your API key. 28 | 29 | ### Deploy 30 | 31 | Click on the button below, then set the API keys in the form and click on `Apply`. 32 | 33 | [![Deploy to Render](https://render.com/images/deploy-to-render-button.svg)](https://render.com/deploy) 34 | -------------------------------------------------------------------------------- /app.py: -------------------------------------------------------------------------------- 1 | import os 2 | from io import BytesIO 3 | from pathlib import Path 4 | from typing import List 5 | 6 | from openai import AsyncAssistantEventHandler, AsyncOpenAI, OpenAI 7 | 8 | from literalai.helper import utc_now 9 | 10 | import chainlit as cl 11 | from chainlit.config import config 12 | from chainlit.element import Element 13 | 14 | 15 | async_openai_client = AsyncOpenAI(api_key=os.environ.get("OPENAI_API_KEY")) 16 | sync_openai_client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY")) 17 | 18 | assistant = sync_openai_client.beta.assistants.retrieve( 19 | os.environ.get("OPENAI_ASSISTANT_ID") 20 | ) 21 | 22 | config.ui.name = assistant.name 23 | 24 | class EventHandler(AsyncAssistantEventHandler): 25 | 26 | def __init__(self, assistant_name: str) -> None: 27 | super().__init__() 28 | self.current_message: cl.Message = None 29 | self.current_step: cl.Step = None 30 | self.current_tool_call = None 31 | self.assistant_name = assistant_name 32 | 33 | async def on_text_created(self, text) -> None: 34 | self.current_message = await cl.Message(author=self.assistant_name, content="").send() 35 | 36 | async def on_text_delta(self, delta, snapshot): 37 | await self.current_message.stream_token(delta.value) 38 | 39 | async def on_text_done(self, text): 40 | await self.current_message.update() 41 | 42 | async def on_tool_call_created(self, tool_call): 43 | self.current_tool_call = tool_call.id 44 | self.current_step = cl.Step(name=tool_call.type, type="tool") 45 | self.current_step.language = "python" 46 | self.current_step.created_at = utc_now() 47 | await self.current_step.send() 48 | 49 | async def on_tool_call_delta(self, delta, snapshot): 50 | if snapshot.id != self.current_tool_call: 51 | self.current_tool_call = snapshot.id 52 | self.current_step = cl.Step(name=delta.type, type="tool") 53 | self.current_step.language = "python" 54 | self.current_step.start = utc_now() 55 | await self.current_step.send() 56 | 57 | if delta.type == "code_interpreter": 58 | if delta.code_interpreter.outputs: 59 | for output in delta.code_interpreter.outputs: 60 | if output.type == "logs": 61 | error_step = cl.Step( 62 | name=delta.type, 63 | type="tool" 64 | ) 65 | error_step.is_error = True 66 | error_step.output = output.logs 67 | error_step.language = "markdown" 68 | error_step.start = self.current_step.start 69 | error_step.end = utc_now() 70 | await error_step.send() 71 | else: 72 | if delta.code_interpreter.input: 73 | await self.current_step.stream_token(delta.code_interpreter.input) 74 | 75 | 76 | async def on_tool_call_done(self, tool_call): 77 | self.current_step.end = utc_now() 78 | await self.current_step.update() 79 | 80 | async def on_image_file_done(self, image_file): 81 | image_id = image_file.file_id 82 | response = await async_openai_client.files.with_raw_response.content(image_id) 83 | image_element = cl.Image( 84 | name=image_id, 85 | content=response.content, 86 | display="inline", 87 | size="large" 88 | ) 89 | if not self.current_message.elements: 90 | self.current_message.elements = [] 91 | self.current_message.elements.append(image_element) 92 | await self.current_message.update() 93 | 94 | 95 | @cl.step(type="tool") 96 | async def speech_to_text(audio_file): 97 | response = await async_openai_client.audio.transcriptions.create( 98 | model="whisper-1", file=audio_file 99 | ) 100 | 101 | return response.text 102 | 103 | 104 | async def upload_files(files: List[Element]): 105 | file_ids = [] 106 | for file in files: 107 | uploaded_file = await async_openai_client.files.create( 108 | file=Path(file.path), purpose="assistants" 109 | ) 110 | file_ids.append(uploaded_file.id) 111 | return file_ids 112 | 113 | 114 | async def process_files(files: List[Element]): 115 | # Upload files if any and get file_ids 116 | file_ids = [] 117 | if len(files) > 0: 118 | file_ids = await upload_files(files) 119 | 120 | return [ 121 | { 122 | "file_id": file_id, 123 | "tools": [{"type": "code_interpreter"}, {"type": "file_search"}], 124 | } 125 | for file_id in file_ids 126 | ] 127 | 128 | 129 | @cl.on_chat_start 130 | async def start_chat(): 131 | # Create a Thread 132 | thread = await async_openai_client.beta.threads.create() 133 | # Store thread ID in user session for later use 134 | cl.user_session.set("thread_id", thread.id) 135 | await cl.Avatar(name=assistant.name, path="./public/logo.png").send() 136 | await cl.Message(content=f"Hello, I'm {assistant.name}!", disable_feedback=True).send() 137 | 138 | 139 | @cl.on_message 140 | async def main(message: cl.Message): 141 | thread_id = cl.user_session.get("thread_id") 142 | 143 | attachments = await process_files(message.elements) 144 | 145 | # Add a Message to the Thread 146 | oai_message = await async_openai_client.beta.threads.messages.create( 147 | thread_id=thread_id, 148 | role="user", 149 | content=message.content, 150 | attachments=attachments, 151 | ) 152 | 153 | # Create and Stream a Run 154 | async with async_openai_client.beta.threads.runs.stream( 155 | thread_id=thread_id, 156 | assistant_id=assistant.id, 157 | event_handler=EventHandler(assistant_name=assistant.name), 158 | ) as stream: 159 | await stream.until_done() 160 | 161 | 162 | @cl.on_audio_chunk 163 | async def on_audio_chunk(chunk: cl.AudioChunk): 164 | if chunk.isStart: 165 | buffer = BytesIO() 166 | # This is required for whisper to recognize the file type 167 | buffer.name = f"input_audio.{chunk.mimeType.split('/')[1]}" 168 | # Initialize the session for a new audio stream 169 | cl.user_session.set("audio_buffer", buffer) 170 | cl.user_session.set("audio_mime_type", chunk.mimeType) 171 | 172 | # Write the chunks to a buffer and transcribe the whole audio at the end 173 | cl.user_session.get("audio_buffer").write(chunk.data) 174 | 175 | 176 | @cl.on_audio_end 177 | async def on_audio_end(elements: list[Element]): 178 | # Get the audio buffer from the session 179 | audio_buffer: BytesIO = cl.user_session.get("audio_buffer") 180 | audio_buffer.seek(0) # Move the file pointer to the beginning 181 | audio_file = audio_buffer.read() 182 | audio_mime_type: str = cl.user_session.get("audio_mime_type") 183 | 184 | input_audio_el = cl.Audio( 185 | mime=audio_mime_type, content=audio_file, name=audio_buffer.name 186 | ) 187 | await cl.Message( 188 | author="You", 189 | type="user_message", 190 | content="", 191 | elements=[input_audio_el, *elements], 192 | ).send() 193 | 194 | whisper_input = (audio_buffer.name, audio_file, audio_mime_type) 195 | transcription = await speech_to_text(whisper_input) 196 | 197 | msg = cl.Message(author="You", content=transcription, elements=elements) 198 | 199 | await main(message=msg) 200 | -------------------------------------------------------------------------------- /chainlit.md: -------------------------------------------------------------------------------- 1 | # Welcome to Chainlit! 🚀🤖 2 | 3 | Hi there, Developer! 👋 We're excited to have you on board. Chainlit is a powerful tool designed to help you prototype, debug and share applications built on top of LLMs. 4 | 5 | ## Useful Links 🔗 6 | 7 | - **Documentation:** Get started with our comprehensive [Chainlit Documentation](https://docs.chainlit.io) 📚 8 | - **Discord Community:** Join our friendly [Chainlit Discord](https://discord.gg/k73SQ3FyUh) to ask questions, share your projects, and connect with other developers! 💬 9 | 10 | We can't wait to see what you create with Chainlit! Happy coding! 💻😊 11 | 12 | ## Welcome screen 13 | 14 | To modify the welcome screen, edit the `chainlit.md` file at the root of your project. If you do not want a welcome screen, just leave this file empty. 15 | -------------------------------------------------------------------------------- /public/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Chainlit/openai-assistant/7590d2fe737a28cc07f3f3da9b7eb93a199ee6c9/public/logo.png -------------------------------------------------------------------------------- /render.yaml: -------------------------------------------------------------------------------- 1 | # Exported from Render on 2024-05-20T20:56:35Z 2 | services: 3 | - type: web 4 | name: Chainlit-OpenAI-Assistant 5 | runtime: python 6 | repo: https://github.com/Chainlit/openai-assistant 7 | plan: starter 8 | envVars: 9 | - key: OPENAI_API_KEY 10 | sync: false 11 | - key: OPENAI_ASSISTANT_ID 12 | sync: false 13 | - key: LITERAL_API_KEY 14 | sync: false 15 | region: frankfurt 16 | buildCommand: pip install -r requirements.txt 17 | startCommand: chainlit run app.py -h --port $PORT 18 | version: "1" 19 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | openai 2 | chainlit --------------------------------------------------------------------------------