├── .gitignore ├── README.md ├── poetry.lock ├── pyproject.toml ├── scripts ├── 1_prompt_demo.ipynb ├── 2_backend_choice.ipynb ├── 3_chat_prompting.ipynb ├── 4_structured_output.ipynb ├── 5_function_calling.ipynb ├── 6_async_support.ipynb └── 7_streaming_and_object_streaming.ipynb └── thumb.png /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | .env 6 | 7 | # C extensions 8 | *.so 9 | 10 | # Distribution / packaging 11 | .Python 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | wheels/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | cover/ 54 | 55 | # Translations 56 | *.mo 57 | *.pot 58 | 59 | # Django stuff: 60 | *.log 61 | local_settings.py 62 | db.sqlite3 63 | db.sqlite3-journal 64 | 65 | # Flask stuff: 66 | instance/ 67 | .webassets-cache 68 | 69 | # Scrapy stuff: 70 | .scrapy 71 | 72 | # Sphinx documentation 73 | docs/_build/ 74 | 75 | # PyBuilder 76 | .pybuilder/ 77 | target/ 78 | 79 | # Jupyter Notebook 80 | .ipynb_checkpoints 81 | 82 | # IPython 83 | profile_default/ 84 | ipython_config.py 85 | 86 | # pyenv 87 | # For a library or package, you might want to ignore these files since the code is 88 | # intended to run in multiple environments; otherwise, check them in: 89 | # .python-version 90 | 91 | # pipenv 92 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 93 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 94 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 95 | # install all needed dependencies. 96 | #Pipfile.lock 97 | 98 | # poetry 99 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 100 | # This is especially recommended for binary packages to ensure reproducibility, and is more 101 | # commonly ignored for libraries. 102 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 103 | #poetry.lock 104 | 105 | # pdm 106 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 107 | #pdm.lock 108 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 109 | # in version control. 110 | # https://pdm.fming.dev/#use-with-ide 111 | .pdm.toml 112 | 113 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 114 | __pypackages__/ 115 | 116 | # Celery stuff 117 | celerybeat-schedule 118 | celerybeat.pid 119 | 120 | # SageMath parsed files 121 | *.sage.py 122 | 123 | # Environments 124 | .env 125 | .venv 126 | env/ 127 | venv/ 128 | ENV/ 129 | env.bak/ 130 | venv.bak/ 131 | 132 | # Spyder project settings 133 | .spyderproject 134 | .spyproject 135 | 136 | # Rope project settings 137 | .ropeproject 138 | 139 | # mkdocs documentation 140 | /site 141 | 142 | # mypy 143 | .mypy_cache/ 144 | .dmypy.json 145 | dmypy.json 146 | 147 | # Pyre type checker 148 | .pyre/ 149 | 150 | # pytype static type analyzer 151 | .pytype/ 152 | 153 | # Cython debug symbols 154 | cython_debug/ 155 | 156 | # PyCharm 157 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 158 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 159 | # and can be added to the global gitignore or merged into this file. For a more nuclear 160 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 161 | #.idea/ 162 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Magentic + LiteLLM Tutorial 2 | 3 | This tutorial is available on my YouTube channel: 4 | 5 | [![IMAGE ALT TEXT HERE](thumb.png)](https://www.youtube.com/watch?v=VSfehUJUWQY) 6 | [YouTube Video](https://www.youtube.com/watch?v=VSfehUJUWQY). 7 | 8 | ## Overview 9 | 10 | This repository hosts a tutorial to help Python developers understand and leverage the Magentic API along with LiteLLM for integrating state-of-the-art language models into their projects. The tutorial is designed to be user-friendly and comprehensive, guiding users through the essentials of setting up, choosing backends, creating prompts, and streamlining interactions with large language models (LLMs). 11 | 12 | ## Features 13 | 14 | - **Simplified Integration**: Learn how to seamlessly add Magentic and LiteLLM capabilities to Python applications. 15 | - **Backend Selection**: Tutorials on choosing and switching between various LLM backends. 16 | - **Advanced Prompt Handling**: Including structured prompts, chat prompts, and the use of decorators for clean and maintainable code. 17 | - **Function Invocation**: Empower your LLM to perform function calls for extended functionality. 18 | - **Asynchronous Operations**: Discover how to utilize async support for better performance and responsiveness. 19 | - **Progressive Results**: Implement streaming and object streaming to manage large outputs and provide real-time feedback. 20 | 21 | ## Getting Started 22 | 23 | To start using the tutorial, clone this repository and install the required dependencies using poetry: 24 | 25 | ``` 26 | git clone repo 27 | cd repo 28 | poetry install 29 | ``` 30 | 31 | ## Prerequisites 32 | 33 | Ensure you have the following installed on your system: 34 | 35 | - Python 3.12 or later 36 | - Poetry for dependency management 37 | 38 | ## Tutorial Structure 39 | 40 | The tutorial is divided into executable Jupyter notebooks, each focusing on different aspects of using Magentic with LiteLLM: 41 | 42 | 1. **1_prompt_demo.ipynb**: Demonstrates the basic prompts functionality. 43 | 2. **2_backend_choice.ipynb**: Guides on how to select and work with different LLM backends. 44 | 3. **3_chat_prompting.ipynb**: Focuses on chat prompting for dialog-based applications. 45 | 4. **4_structured_output.ipynb**: Introduces structured outputs using Pydantic for more precise data handling. 46 | 5. **5_function_calling.ipynb**: Shows how to make LLMs call external functions for extended use cases. 47 | 6. **6_async_support.ipynb**: Provides examples of how to use asynchronous programming to improve efficiency. 48 | 7. **7_streaming_and_object_streaming.ipynb**: Details on managing streaming responses for better user experiences. 49 | Navigate through each notebook in order to gain a comprehensive understanding of the material covered. 50 | 51 | ## Support and Contribution 52 | 53 | For questions, support, or to contribute to this tutorial, please open an issue or pull request on the GitHub repository. We welcome contributions that help enhance and clarify the content. 54 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "litellm-magentic" 3 | version = "0.1.0" 4 | description = "" 5 | authors = ["jimzer "] 6 | readme = "README.md" 7 | 8 | [tool.poetry.dependencies] 9 | python = "^3.12" 10 | magentic = { extras = ["litellm"], version = "^0.21.0" } 11 | python-dotenv = "^1.0.1" 12 | ipykernel = "^6.29.4" 13 | pydantic = "^2.7.1" 14 | 15 | [tool.poetry.group.dev.dependencies] 16 | mypy = "^1.10.0" 17 | ruff = "^0.4.2" 18 | 19 | [tool.mypy] 20 | disable_error_code = ["empty-body"] 21 | 22 | [build-system] 23 | requires = ["poetry-core"] 24 | build-backend = "poetry.core.masonry.api" 25 | -------------------------------------------------------------------------------- /scripts/1_prompt_demo.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Magentic demo: `@prompt`" 8 | ] 9 | }, 10 | { 11 | "cell_type": "markdown", 12 | "metadata": {}, 13 | "source": [ 14 | "Filter out some Pydantic warnings" 15 | ] 16 | }, 17 | { 18 | "cell_type": "code", 19 | "execution_count": 8, 20 | "metadata": {}, 21 | "outputs": [], 22 | "source": [ 23 | "import warnings\n", 24 | "warnings.filterwarnings(\"ignore\")" 25 | ] 26 | }, 27 | { 28 | "cell_type": "markdown", 29 | "metadata": {}, 30 | "source": [ 31 | "Necessary imports" 32 | ] 33 | }, 34 | { 35 | "cell_type": "code", 36 | "execution_count": 9, 37 | "metadata": {}, 38 | "outputs": [ 39 | { 40 | "data": { 41 | "text/plain": [ 42 | "True" 43 | ] 44 | }, 45 | "execution_count": 9, 46 | "metadata": {}, 47 | "output_type": "execute_result" 48 | } 49 | ], 50 | "source": [ 51 | "from dotenv import load_dotenv\n", 52 | "from magentic import prompt\n", 53 | "from magentic.chat_model.litellm_chat_model import LitellmChatModel\n", 54 | "\n", 55 | "load_dotenv()" 56 | ] 57 | }, 58 | { 59 | "cell_type": "markdown", 60 | "metadata": {}, 61 | "source": [ 62 | "# Simple text completion with variables" 63 | ] 64 | }, 65 | { 66 | "cell_type": "code", 67 | "execution_count": 10, 68 | "metadata": {}, 69 | "outputs": [ 70 | { 71 | "data": { 72 | "text/plain": [ 73 | "\"Hello Claude, it's nice to meet you! I'm an AI assistant.\"" 74 | ] 75 | }, 76 | "execution_count": 10, 77 | "metadata": {}, 78 | "output_type": "execute_result" 79 | } 80 | ], 81 | "source": [ 82 | "@prompt(\"Say hello to {name}\", model=LitellmChatModel(\"claude-3-opus-20240229\"))\n", 83 | "def hello(name: str) -> str: ...\n", 84 | "\n", 85 | "hello(\"Claude\")" 86 | ] 87 | } 88 | ], 89 | "metadata": { 90 | "kernelspec": { 91 | "display_name": "prometeeos-api-5W7JRlnu-py3.12", 92 | "language": "python", 93 | "name": "python3" 94 | }, 95 | "language_info": { 96 | "codemirror_mode": { 97 | "name": "ipython", 98 | "version": 3 99 | }, 100 | "file_extension": ".py", 101 | "mimetype": "text/x-python", 102 | "name": "python", 103 | "nbconvert_exporter": "python", 104 | "pygments_lexer": "ipython3", 105 | "version": "3.12.2" 106 | } 107 | }, 108 | "nbformat": 4, 109 | "nbformat_minor": 2 110 | } 111 | -------------------------------------------------------------------------------- /scripts/2_backend_choice.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Choosing Your Backend\n", 8 | "\n", 9 | "Magentic is compatible with many backends, including OpenAI, Anthropic, LiteLLM, ...\n", 10 | "\n", 11 | "In this tutorial, I will show you how to choose your backend. \n", 12 | "\n", 13 | "Specifically, I will demonstrate how to use LiteLLM, as I believe it's the most powerful one. It gives you access to 100+ LLMs with a unified API." 14 | ] 15 | }, 16 | { 17 | "cell_type": "markdown", 18 | "metadata": {}, 19 | "source": [ 20 | "Filter out some Pydantic warnings" 21 | ] 22 | }, 23 | { 24 | "cell_type": "code", 25 | "execution_count": 1, 26 | "metadata": {}, 27 | "outputs": [], 28 | "source": [ 29 | "import warnings\n", 30 | "warnings.filterwarnings(\"ignore\")" 31 | ] 32 | }, 33 | { 34 | "cell_type": "markdown", 35 | "metadata": {}, 36 | "source": [ 37 | "Necessary imports" 38 | ] 39 | }, 40 | { 41 | "cell_type": "code", 42 | "execution_count": 1, 43 | "metadata": {}, 44 | "outputs": [ 45 | { 46 | "name": "stderr", 47 | "output_type": "stream", 48 | "text": [ 49 | "/Users/jimzer/Library/Caches/pypoetry/virtualenvs/prometeeos-api-5W7JRlnu-py3.12/lib/python3.12/site-packages/pydantic/_internal/_fields.py:160: UserWarning: Field \"model_name\" has conflict with protected namespace \"model_\".\n", 50 | "\n", 51 | "You may be able to resolve this warning by setting `model_config['protected_namespaces'] = ()`.\n", 52 | " warnings.warn(\n", 53 | "/Users/jimzer/Library/Caches/pypoetry/virtualenvs/prometeeos-api-5W7JRlnu-py3.12/lib/python3.12/site-packages/pydantic/_internal/_fields.py:160: UserWarning: Field \"model_info\" has conflict with protected namespace \"model_\".\n", 54 | "\n", 55 | "You may be able to resolve this warning by setting `model_config['protected_namespaces'] = ()`.\n", 56 | " warnings.warn(\n" 57 | ] 58 | }, 59 | { 60 | "data": { 61 | "text/plain": [ 62 | "True" 63 | ] 64 | }, 65 | "execution_count": 1, 66 | "metadata": {}, 67 | "output_type": "execute_result" 68 | } 69 | ], 70 | "source": [ 71 | "from dotenv import load_dotenv\n", 72 | "from magentic import prompt\n", 73 | "from magentic.chat_model.litellm_chat_model import LitellmChatModel\n", 74 | "\n", 75 | "load_dotenv()" 76 | ] 77 | }, 78 | { 79 | "cell_type": "markdown", 80 | "metadata": {}, 81 | "source": [ 82 | "# Using the model parameter" 83 | ] 84 | }, 85 | { 86 | "cell_type": "code", 87 | "execution_count": 2, 88 | "metadata": {}, 89 | "outputs": [ 90 | { 91 | "data": { 92 | "text/plain": [ 93 | "\"Hello Claude, it's nice to meet you! I'm Claude, an AI assistant.\"" 94 | ] 95 | }, 96 | "execution_count": 2, 97 | "metadata": {}, 98 | "output_type": "execute_result" 99 | } 100 | ], 101 | "source": [ 102 | "@prompt(\"Say hello to {name}\", model=LitellmChatModel(\"claude-3-opus-20240229\"))\n", 103 | "def hello_anthropic(name: str) -> str: ...\n", 104 | "\n", 105 | "\n", 106 | "hello_anthropic(\"Claude\")" 107 | ] 108 | }, 109 | { 110 | "cell_type": "code", 111 | "execution_count": 4, 112 | "metadata": {}, 113 | "outputs": [ 114 | { 115 | "data": { 116 | "text/plain": [ 117 | "'Hello GPT! How are you today?'" 118 | ] 119 | }, 120 | "execution_count": 4, 121 | "metadata": {}, 122 | "output_type": "execute_result" 123 | } 124 | ], 125 | "source": [ 126 | "@prompt(\"Say hello to {name}\", model=LitellmChatModel(\"gpt-3.5-turbo\"))\n", 127 | "def hello_openai(name: str) -> str: ...\n", 128 | "\n", 129 | "res = hello_openai(\"GPT\")\n", 130 | "res" 131 | ] 132 | }, 133 | { 134 | "cell_type": "markdown", 135 | "metadata": {}, 136 | "source": [ 137 | "# Using environment variables" 138 | ] 139 | }, 140 | { 141 | "cell_type": "code", 142 | "execution_count": 5, 143 | "metadata": {}, 144 | "outputs": [], 145 | "source": [ 146 | "import os\n", 147 | "\n", 148 | "@prompt(\"Say hello to {name}\")\n", 149 | "def hello_env(name: str) -> str: ..." 150 | ] 151 | }, 152 | { 153 | "cell_type": "code", 154 | "execution_count": 6, 155 | "metadata": {}, 156 | "outputs": [ 157 | { 158 | "data": { 159 | "text/plain": [ 160 | "'Hello, GPT! How can I assist you today?'" 161 | ] 162 | }, 163 | "execution_count": 6, 164 | "metadata": {}, 165 | "output_type": "execute_result" 166 | } 167 | ], 168 | "source": [ 169 | "os.environ[\"MAGENTIC_BACKEND\"] = \"litellm\"\n", 170 | "os.environ[\"MAGENTIC_LITELLM_MODEL\"] = \"gpt-3.5-turbo\"\n", 171 | "hello_env(\"GPT\")\n" 172 | ] 173 | }, 174 | { 175 | "cell_type": "code", 176 | "execution_count": 7, 177 | "metadata": {}, 178 | "outputs": [ 179 | { 180 | "data": { 181 | "text/plain": [ 182 | "\"Hello Claude, it's nice to meet you! I'm an AI assistant. How are you doing today?\"" 183 | ] 184 | }, 185 | "execution_count": 7, 186 | "metadata": {}, 187 | "output_type": "execute_result" 188 | } 189 | ], 190 | "source": [ 191 | "os.environ[\"MAGENTIC_BACKEND\"] = \"litellm\"\n", 192 | "os.environ[\"MAGENTIC_LITELLM_MODEL\"] = \"claude-3-opus-20240229\"\n", 193 | "hello_env(\"Claude\")" 194 | ] 195 | }, 196 | { 197 | "cell_type": "code", 198 | "execution_count": null, 199 | "metadata": {}, 200 | "outputs": [], 201 | "source": [] 202 | } 203 | ], 204 | "metadata": { 205 | "kernelspec": { 206 | "display_name": "prometeeos-api-5W7JRlnu-py3.12", 207 | "language": "python", 208 | "name": "python3" 209 | }, 210 | "language_info": { 211 | "codemirror_mode": { 212 | "name": "ipython", 213 | "version": 3 214 | }, 215 | "file_extension": ".py", 216 | "mimetype": "text/x-python", 217 | "name": "python", 218 | "nbconvert_exporter": "python", 219 | "pygments_lexer": "ipython3", 220 | "version": "3.12.2" 221 | } 222 | }, 223 | "nbformat": 4, 224 | "nbformat_minor": 2 225 | } 226 | -------------------------------------------------------------------------------- /scripts/3_chat_prompting.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Chat Prompting with `@chatprompt`\n", 8 | "\n", 9 | "In the first part, we saw how to do simple text completion using `@prompt`.\n", 10 | "\n", 11 | "Now, I will show you how to do chat completion, with System, User, and Assistant messages." 12 | ] 13 | }, 14 | { 15 | "cell_type": "markdown", 16 | "metadata": {}, 17 | "source": [ 18 | "Filter out some Pydantic warnings" 19 | ] 20 | }, 21 | { 22 | "cell_type": "code", 23 | "execution_count": 3, 24 | "metadata": {}, 25 | "outputs": [], 26 | "source": [ 27 | "import warnings\n", 28 | "warnings.filterwarnings(\"ignore\")" 29 | ] 30 | }, 31 | { 32 | "cell_type": "markdown", 33 | "metadata": {}, 34 | "source": [ 35 | "Necessary imports" 36 | ] 37 | }, 38 | { 39 | "cell_type": "code", 40 | "execution_count": 4, 41 | "metadata": {}, 42 | "outputs": [ 43 | { 44 | "data": { 45 | "text/plain": [ 46 | "True" 47 | ] 48 | }, 49 | "execution_count": 4, 50 | "metadata": {}, 51 | "output_type": "execute_result" 52 | } 53 | ], 54 | "source": [ 55 | "from dotenv import load_dotenv\n", 56 | "from magentic import chatprompt, AssistantMessage, SystemMessage, UserMessage\n", 57 | "from magentic.chat_model.litellm_chat_model import LitellmChatModel\n", 58 | "\n", 59 | "\n", 60 | "load_dotenv()" 61 | ] 62 | }, 63 | { 64 | "cell_type": "markdown", 65 | "metadata": {}, 66 | "source": [ 67 | "# Using the model parameter" 68 | ] 69 | }, 70 | { 71 | "cell_type": "code", 72 | "execution_count": 5, 73 | "metadata": {}, 74 | "outputs": [ 75 | { 76 | "data": { 77 | "text/plain": [ 78 | "'\"My old man had a philosophy: peace means having a bigger stick than the other guy.\" - Tony Stark'" 79 | ] 80 | }, 81 | "execution_count": 5, 82 | "metadata": {}, 83 | "output_type": "execute_result" 84 | } 85 | ], 86 | "source": [ 87 | "@chatprompt(\n", 88 | " SystemMessage(\"You are a movie buff.\"),\n", 89 | " UserMessage(\"What is your favorite quote from {movie}?\"),\n", 90 | " model=LitellmChatModel(\"gpt-3.5-turbo\"),\n", 91 | ")\n", 92 | "def get_movie_quote(movie: str) -> str: ...\n", 93 | "\n", 94 | "\n", 95 | "get_movie_quote(\"Iron Man\")" 96 | ] 97 | }, 98 | { 99 | "cell_type": "code", 100 | "execution_count": null, 101 | "metadata": {}, 102 | "outputs": [], 103 | "source": [] 104 | } 105 | ], 106 | "metadata": { 107 | "kernelspec": { 108 | "display_name": "prometeeos-api-5W7JRlnu-py3.12", 109 | "language": "python", 110 | "name": "python3" 111 | }, 112 | "language_info": { 113 | "codemirror_mode": { 114 | "name": "ipython", 115 | "version": 3 116 | }, 117 | "file_extension": ".py", 118 | "mimetype": "text/x-python", 119 | "name": "python", 120 | "nbconvert_exporter": "python", 121 | "pygments_lexer": "ipython3", 122 | "version": "3.12.2" 123 | } 124 | }, 125 | "nbformat": 4, 126 | "nbformat_minor": 2 127 | } 128 | -------------------------------------------------------------------------------- /scripts/4_structured_output.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Structured Output\n", 8 | "\n", 9 | "Now I will show you how to leverage **structured output**.\n", 10 | "\n", 11 | "It allows you to get structured data like JSON from the AI model, which is particularly useful when you integrate the results into an application.\n", 12 | "\n", 13 | "With Magentic, we do this using Pydantic models to specify the shape of the data we expect and force the LLM to follow it.\n", 14 | "\n", 15 | "Then we get Pydantic models as output." 16 | ] 17 | }, 18 | { 19 | "cell_type": "markdown", 20 | "metadata": {}, 21 | "source": [ 22 | "Filter out some Pydantic warnings" 23 | ] 24 | }, 25 | { 26 | "cell_type": "code", 27 | "execution_count": null, 28 | "metadata": {}, 29 | "outputs": [], 30 | "source": [ 31 | "import warnings\n", 32 | "warnings.filterwarnings(\"ignore\")" 33 | ] 34 | }, 35 | { 36 | "cell_type": "markdown", 37 | "metadata": {}, 38 | "source": [ 39 | "Necessary imports" 40 | ] 41 | }, 42 | { 43 | "cell_type": "code", 44 | "execution_count": null, 45 | "metadata": {}, 46 | "outputs": [], 47 | "source": [ 48 | "from dotenv import load_dotenv\n", 49 | "from magentic import chatprompt, AssistantMessage, SystemMessage, UserMessage, prompt\n", 50 | "from magentic.chat_model.litellm_chat_model import LitellmChatModel\n", 51 | "from pydantic import BaseModel\n", 52 | "from typing import Literal\n", 53 | "from datetime import datetime\n", 54 | "from pprint import pprint\n", 55 | "\n", 56 | "\n", 57 | "load_dotenv()" 58 | ] 59 | }, 60 | { 61 | "cell_type": "markdown", 62 | "metadata": {}, 63 | "source": [ 64 | "# Create Some Unstructured Data\n", 65 | "\n", 66 | "I have gathered data from 2 reviews on TrustPilot for ChatGPT.\n", 67 | "\n", 68 | "`review_1` is positive while `review_2` is negative." 69 | ] 70 | }, 71 | { 72 | "cell_type": "code", 73 | "execution_count": null, 74 | "metadata": {}, 75 | "outputs": [], 76 | "source": [ 77 | "review_1 = \"\"\"\n", 78 | "Rated 5 out of 5 stars\n", 79 | "Nov 16, 2023\n", 80 | "ChatGPT is a lovely tool that provides…\n", 81 | "ChatGPT is a lovely tool that provides quite good answers. With the wolfram alpha plugin it does calculations very good also.\n", 82 | "\n", 83 | "Date of experience: November 15, 2023\n", 84 | "\"\"\"\n", 85 | "\n", 86 | "review_2 = \"\"\"\n", 87 | "Rated 1 out of 5 stars\n", 88 | "Sep 14, 2023\n", 89 | "ChatGPT looping itself.\n", 90 | "It is repeating itself. I tell it I want to do something without doing something else, It gives me something that doesn't work. I tell it that, then it gives me something that includes what I don't want. It even does it in the same response!\n", 91 | "\"If you have an object that you want to write to a JSON file without it being stringified, you should first convert it to a JSON string using JSON.stringify and then write it to the file.\"\n", 92 | "\n", 93 | "Date of experience: September 14, 2023\n", 94 | "\"\"\"" 95 | ] 96 | }, 97 | { 98 | "cell_type": "markdown", 99 | "metadata": {}, 100 | "source": [ 101 | "# Define A Pydantic Model For Reviews" 102 | ] 103 | }, 104 | { 105 | "cell_type": "code", 106 | "execution_count": null, 107 | "metadata": {}, 108 | "outputs": [], 109 | "source": [ 110 | "class Review(BaseModel):\n", 111 | " sentiment: Literal[\"positive\", \"negative\", \"neutral\"]\n", 112 | " grade: int\n", 113 | " summary: str\n", 114 | " date: datetime" 115 | ] 116 | }, 117 | { 118 | "cell_type": "markdown", 119 | "metadata": {}, 120 | "source": [ 121 | "# Structured Output From `@prompt`" 122 | ] 123 | }, 124 | { 125 | "cell_type": "code", 126 | "execution_count": null, 127 | "metadata": {}, 128 | "outputs": [], 129 | "source": [ 130 | "@prompt(\"Extract a Review from: {review}\", model=LitellmChatModel(\"gpt-3.5-turbo\"))\n", 131 | "def extract(review: str) -> Review: ...\n", 132 | "\n", 133 | "print(\"Parsing review 1:\")\n", 134 | "res = extract(review_1)\n", 135 | "pprint(res.model_dump())\n", 136 | "\n", 137 | "print()\n", 138 | "\n", 139 | "print(\"Parsing review 2:\")\n", 140 | "res = extract(review_2)\n", 141 | "pprint(res.model_dump())" 142 | ] 143 | }, 144 | { 145 | "cell_type": "markdown", 146 | "metadata": {}, 147 | "source": [ 148 | "# Structured Output From `@chatpromt`" 149 | ] 150 | }, 151 | { 152 | "cell_type": "code", 153 | "execution_count": 7, 154 | "metadata": {}, 155 | "outputs": [ 156 | { 157 | "name": "stdout", 158 | "output_type": "stream", 159 | "text": [ 160 | "Parsing review 1:\n", 161 | "{'date': datetime.datetime(2023, 11, 16, 0, 0),\n", 162 | " 'grade': 5,\n", 163 | " 'sentiment': 'positive',\n", 164 | " 'summary': 'ChatGPT is a lovely tool'}\n", 165 | "\n", 166 | "Parsing review 2:\n", 167 | "{'date': datetime.datetime(2023, 9, 14, 0, 0),\n", 168 | " 'grade': 1,\n", 169 | " 'sentiment': 'negative',\n", 170 | " 'summary': 'ChatGPT repeating and providing incorrect information'}\n" 171 | ] 172 | } 173 | ], 174 | "source": [ 175 | "@chatprompt(\n", 176 | " SystemMessage(\"You are an expert to extract structured data from reviews.\"),\n", 177 | " UserMessage(\"Extract a Review from this review: {review}?\"),\n", 178 | " model=LitellmChatModel(\"gpt-3.5-turbo\"),\n", 179 | ")\n", 180 | "def extract_chat(review: str) -> Review: ...\n", 181 | "\n", 182 | "\n", 183 | "print(\"Parsing review 1:\")\n", 184 | "res = extract_chat(review_1)\n", 185 | "pprint(res.model_dump())\n", 186 | "\n", 187 | "print()\n", 188 | "\n", 189 | "print(\"Parsing review 2:\")\n", 190 | "res = extract_chat(review_2)\n", 191 | "pprint(res.model_dump())" 192 | ] 193 | }, 194 | { 195 | "cell_type": "code", 196 | "execution_count": null, 197 | "metadata": {}, 198 | "outputs": [], 199 | "source": [] 200 | } 201 | ], 202 | "metadata": { 203 | "kernelspec": { 204 | "display_name": "prometeeos-api-5W7JRlnu-py3.12", 205 | "language": "python", 206 | "name": "python3" 207 | }, 208 | "language_info": { 209 | "codemirror_mode": { 210 | "name": "ipython", 211 | "version": 3 212 | }, 213 | "file_extension": ".py", 214 | "mimetype": "text/x-python", 215 | "name": "python", 216 | "nbconvert_exporter": "python", 217 | "pygments_lexer": "ipython3", 218 | "version": "3.12.2" 219 | } 220 | }, 221 | "nbformat": 4, 222 | "nbformat_minor": 2 223 | } 224 | -------------------------------------------------------------------------------- /scripts/5_function_calling.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Function Calling\n", 8 | "\n", 9 | "Function calling is a powerful capability that allows LLMs to call external functions.\n", 10 | "\n", 11 | "These functions can do anything, such as call APIs, perform computations, and more.\n", 12 | "\n", 13 | "Magentic makes it really simple to use." 14 | ] 15 | }, 16 | { 17 | "cell_type": "markdown", 18 | "metadata": {}, 19 | "source": [ 20 | "Filter out some Pydantic warnings" 21 | ] 22 | }, 23 | { 24 | "cell_type": "code", 25 | "execution_count": 1, 26 | "metadata": {}, 27 | "outputs": [], 28 | "source": [ 29 | "import warnings\n", 30 | "warnings.filterwarnings(\"ignore\")" 31 | ] 32 | }, 33 | { 34 | "cell_type": "markdown", 35 | "metadata": {}, 36 | "source": [ 37 | "Necessary imports" 38 | ] 39 | }, 40 | { 41 | "cell_type": "code", 42 | "execution_count": 2, 43 | "metadata": {}, 44 | "outputs": [ 45 | { 46 | "data": { 47 | "text/plain": [ 48 | "True" 49 | ] 50 | }, 51 | "execution_count": 2, 52 | "metadata": {}, 53 | "output_type": "execute_result" 54 | } 55 | ], 56 | "source": [ 57 | "from dotenv import load_dotenv\n", 58 | "from magentic import chatprompt, prompt, prompt_chain, SystemMessage, UserMessage, FunctionCall\n", 59 | "from magentic.chat_model.litellm_chat_model import LitellmChatModel\n", 60 | "from pydantic import BaseModel\n", 61 | "import requests\n", 62 | "\n", 63 | "\n", 64 | "load_dotenv()" 65 | ] 66 | }, 67 | { 68 | "cell_type": "markdown", 69 | "metadata": {}, 70 | "source": [ 71 | "# Define A Function To Get Realtime Weather" 72 | ] 73 | }, 74 | { 75 | "cell_type": "code", 76 | "execution_count": 3, 77 | "metadata": {}, 78 | "outputs": [], 79 | "source": [ 80 | "\n", 81 | "def get_weather(city_name: str) -> str:\n", 82 | " \"\"\"\n", 83 | " Retrieves the weather information for a given city.\n", 84 | "\n", 85 | " Args:\n", 86 | " city_name (str): The name of the city for which to retrieve the weather information.\n", 87 | "\n", 88 | " Returns:\n", 89 | " str: The weather information for the specified city.\n", 90 | " \"\"\"\n", 91 | " base_url = f\"http://wttr.in/{city_name}?format=%C+%t\"\n", 92 | " response = requests.get(base_url)\n", 93 | " return response.text" 94 | ] 95 | }, 96 | { 97 | "cell_type": "markdown", 98 | "metadata": {}, 99 | "source": [ 100 | "# Function Calling With Simple `@prompt`" 101 | ] 102 | }, 103 | { 104 | "cell_type": "code", 105 | "execution_count": 4, 106 | "metadata": {}, 107 | "outputs": [ 108 | { 109 | "name": "stdout", 110 | "output_type": "stream", 111 | "text": [ 112 | "FunctionCall(, 'Lausanne')\n" 113 | ] 114 | }, 115 | { 116 | "data": { 117 | "text/plain": [ 118 | "'Light rain +10°C'" 119 | ] 120 | }, 121 | "execution_count": 4, 122 | "metadata": {}, 123 | "output_type": "execute_result" 124 | } 125 | ], 126 | "source": [ 127 | "@prompt(\n", 128 | " \"Use the appropriate function to answer: {question}\",\n", 129 | " functions=[get_weather],\n", 130 | " model=LitellmChatModel(\"gpt-3.5-turbo\"),\n", 131 | ")\n", 132 | "def answer(question: str) -> FunctionCall[str]: ...\n", 133 | "\n", 134 | "\n", 135 | "output = answer(\"What is the weather in Lausanne?\")\n", 136 | "print(output)\n", 137 | "output()" 138 | ] 139 | }, 140 | { 141 | "cell_type": "markdown", 142 | "metadata": {}, 143 | "source": [ 144 | "# Function Calling With `pompt_chain`" 145 | ] 146 | }, 147 | { 148 | "cell_type": "code", 149 | "execution_count": 5, 150 | "metadata": {}, 151 | "outputs": [ 152 | { 153 | "data": { 154 | "text/plain": [ 155 | "'The weather in Lausanne is light rain with a temperature of +10°C.'" 156 | ] 157 | }, 158 | "execution_count": 5, 159 | "metadata": {}, 160 | "output_type": "execute_result" 161 | } 162 | ], 163 | "source": [ 164 | "@prompt_chain(\n", 165 | " \"Use the appropriate function to answer: {question}\",\n", 166 | " functions=[get_weather],\n", 167 | " model=LitellmChatModel(\"gpt-3.5-turbo\"),\n", 168 | ")\n", 169 | "def answer_chain(question: str) -> str: ...\n", 170 | "\n", 171 | "\n", 172 | "answer_chain(\"What is the weather in Lausanne?\")" 173 | ] 174 | }, 175 | { 176 | "cell_type": "markdown", 177 | "metadata": {}, 178 | "source": [ 179 | "# Function Calling With `@chatprompt`" 180 | ] 181 | }, 182 | { 183 | "cell_type": "code", 184 | "execution_count": 6, 185 | "metadata": {}, 186 | "outputs": [ 187 | { 188 | "data": { 189 | "text/plain": [ 190 | "'Light rain +10°C'" 191 | ] 192 | }, 193 | "execution_count": 6, 194 | "metadata": {}, 195 | "output_type": "execute_result" 196 | } 197 | ], 198 | "source": [ 199 | "@chatprompt(\n", 200 | " SystemMessage(\"You are a weather beast. Use the proper function to answer.\"),\n", 201 | " UserMessage(\"{message}?\"),\n", 202 | " model=LitellmChatModel(\"gpt-3.5-turbo\"),\n", 203 | " functions=[get_weather],\n", 204 | ")\n", 205 | "def answer_chat(message: str) -> FunctionCall[str]: ...\n", 206 | "\n", 207 | "\n", 208 | "output = answer_chat(\"What is the weather in Lausanne?\")\n", 209 | "output()" 210 | ] 211 | }, 212 | { 213 | "cell_type": "code", 214 | "execution_count": null, 215 | "metadata": {}, 216 | "outputs": [], 217 | "source": [] 218 | } 219 | ], 220 | "metadata": { 221 | "kernelspec": { 222 | "display_name": "prometeeos-api-5W7JRlnu-py3.12", 223 | "language": "python", 224 | "name": "python3" 225 | }, 226 | "language_info": { 227 | "codemirror_mode": { 228 | "name": "ipython", 229 | "version": 3 230 | }, 231 | "file_extension": ".py", 232 | "mimetype": "text/x-python", 233 | "name": "python", 234 | "nbconvert_exporter": "python", 235 | "pygments_lexer": "ipython3", 236 | "version": "3.12.2" 237 | } 238 | }, 239 | "nbformat": 4, 240 | "nbformat_minor": 2 241 | } 242 | -------------------------------------------------------------------------------- /scripts/6_async_support.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Asynchronous Support\n", 8 | "\n", 9 | "Async support is important to optimize performance.\n", 10 | "\n", 11 | "When you generate, you spend most of your time waiting for the API to respond.\n", 12 | "\n", 13 | "With async support, you can do other things while waiting, enabling multitasking on a single thread.\n", 14 | "\n", 15 | "For instance, if you use FastAPI, you should use async generation, so the FastAPI thread can answer other requests while you wait for the response instead of blocking the main thread." 16 | ] 17 | }, 18 | { 19 | "cell_type": "markdown", 20 | "metadata": {}, 21 | "source": [ 22 | "Filter out some Pydantic warnings" 23 | ] 24 | }, 25 | { 26 | "cell_type": "code", 27 | "execution_count": 1, 28 | "metadata": {}, 29 | "outputs": [], 30 | "source": [ 31 | "import warnings\n", 32 | "\n", 33 | "warnings.filterwarnings(\"ignore\")" 34 | ] 35 | }, 36 | { 37 | "cell_type": "markdown", 38 | "metadata": {}, 39 | "source": [ 40 | "Necessary imports" 41 | ] 42 | }, 43 | { 44 | "cell_type": "code", 45 | "execution_count": 2, 46 | "metadata": {}, 47 | "outputs": [ 48 | { 49 | "data": { 50 | "text/plain": [ 51 | "True" 52 | ] 53 | }, 54 | "execution_count": 2, 55 | "metadata": {}, 56 | "output_type": "execute_result" 57 | } 58 | ], 59 | "source": [ 60 | "from dotenv import load_dotenv\n", 61 | "from magentic import (\n", 62 | " chatprompt,\n", 63 | " prompt,\n", 64 | " SystemMessage,\n", 65 | " UserMessage\n", 66 | ")\n", 67 | "from magentic.chat_model.litellm_chat_model import LitellmChatModel\n", 68 | "\n", 69 | "\n", 70 | "load_dotenv()" 71 | ] 72 | }, 73 | { 74 | "cell_type": "markdown", 75 | "metadata": {}, 76 | "source": [ 77 | "# Asynchronous `@prompt`" 78 | ] 79 | }, 80 | { 81 | "cell_type": "code", 82 | "execution_count": 3, 83 | "metadata": {}, 84 | "outputs": [ 85 | { 86 | "data": { 87 | "text/plain": [ 88 | "'AI, or artificial intelligence, is a branch of computer science that aims to create machines or systems that can perform tasks that would typically require human intelligence. These tasks can include learning, reasoning, problem-solving, perception, and even speech recognition.\\n\\nThere are several different approaches to AI, including machine learning, deep learning, natural language processing, and computer vision. Machine learning is a method of teaching computers to learn from data and make decisions without being explicitly programmed. Deep learning is a subset of machine learning that uses artificial neural networks to mimic the way the human brain works. Natural language processing focuses on enabling computers to understand, interpret, and generate human language. Computer vision involves enabling machines to interpret and understand visual information.\\n\\nAI has a wide range of applications, from virtual assistants like Siri and Alexa to self-driving cars, medical diagnosis, financial trading, and even playing games like chess and Go. However, with the increasing capabilities of AI, there are also concerns about the ethical and societal implications of its use, such as privacy, bias, and job displacement.\\n\\nOverall, AI has the potential to greatly improve our lives and revolutionize various industries, but it also raises important questions that need to be addressed as the technology continues to advance.'" 89 | ] 90 | }, 91 | "execution_count": 3, 92 | "metadata": {}, 93 | "output_type": "execute_result" 94 | } 95 | ], 96 | "source": [ 97 | "@prompt(\n", 98 | " \"Tell me more about this topic: {topic}\",\n", 99 | " model=LitellmChatModel(\"gpt-3.5-turbo\"),\n", 100 | ")\n", 101 | "async def answer(topic: str) -> str: ...\n", 102 | "\n", 103 | "await answer(\"AI\")\n" 104 | ] 105 | }, 106 | { 107 | "cell_type": "markdown", 108 | "metadata": {}, 109 | "source": [ 110 | "# Asynchronous `@chatprompt`" 111 | ] 112 | }, 113 | { 114 | "cell_type": "code", 115 | "execution_count": 4, 116 | "metadata": {}, 117 | "outputs": [ 118 | { 119 | "data": { 120 | "text/plain": [ 121 | "'Artificial intelligence (AI) is the simulation of human intelligence processes by computer systems. This includes learning (the acquisition of information and rules for using the information), reasoning (using rules to reach approximate or definite conclusions), and self-correction.\\n\\nAI has the ability to analyze data, identify patterns, and make decisions with minimal human intervention. It is being used in various industries such as healthcare, finance, transportation, and customer service to improve efficiency and provide better services.\\n\\nThere are different types of AI including narrow AI, which is designed for specific tasks such as speech recognition or image recognition, and general AI, which has the ability to apply intelligence to any problem.\\n\\nOverall, AI has the potential to revolutionize many aspects of our lives and has already made significant advancements in areas such as healthcare, autonomous vehicles, and personalized recommendations. However, there are also ethical concerns surrounding AI, such as privacy issues, bias in algorithms, and the potential for job displacement.'" 122 | ] 123 | }, 124 | "execution_count": 4, 125 | "metadata": {}, 126 | "output_type": "execute_result" 127 | } 128 | ], 129 | "source": [ 130 | "@chatprompt(\n", 131 | " SystemMessage(\"You are pro teacher.\"),\n", 132 | " UserMessage(\"Tell me more about {topic}?\"),\n", 133 | " model=LitellmChatModel(\"gpt-3.5-turbo\"),\n", 134 | ")\n", 135 | "async def answer_chat(topic: str) -> str: ...\n", 136 | "\n", 137 | "\n", 138 | "await answer_chat(\"AI\")" 139 | ] 140 | }, 141 | { 142 | "cell_type": "code", 143 | "execution_count": null, 144 | "metadata": {}, 145 | "outputs": [], 146 | "source": [] 147 | } 148 | ], 149 | "metadata": { 150 | "kernelspec": { 151 | "display_name": "prometeeos-api-5W7JRlnu-py3.12", 152 | "language": "python", 153 | "name": "python3" 154 | }, 155 | "language_info": { 156 | "codemirror_mode": { 157 | "name": "ipython", 158 | "version": 3 159 | }, 160 | "file_extension": ".py", 161 | "mimetype": "text/x-python", 162 | "name": "python", 163 | "nbconvert_exporter": "python", 164 | "pygments_lexer": "ipython3", 165 | "version": "3.12.2" 166 | } 167 | }, 168 | "nbformat": 4, 169 | "nbformat_minor": 2 170 | } 171 | -------------------------------------------------------------------------------- /scripts/7_streaming_and_object_streaming.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Streaming And Object Streaming" 8 | ] 9 | }, 10 | { 11 | "cell_type": "markdown", 12 | "metadata": {}, 13 | "source": [ 14 | "Filter out some Pydantic warnings" 15 | ] 16 | }, 17 | { 18 | "cell_type": "code", 19 | "execution_count": null, 20 | "metadata": {}, 21 | "outputs": [], 22 | "source": [ 23 | "import warnings\n", 24 | "\n", 25 | "warnings.filterwarnings(\"ignore\")" 26 | ] 27 | }, 28 | { 29 | "cell_type": "markdown", 30 | "metadata": {}, 31 | "source": [ 32 | "Necessary imports" 33 | ] 34 | }, 35 | { 36 | "cell_type": "code", 37 | "execution_count": null, 38 | "metadata": {}, 39 | "outputs": [], 40 | "source": [ 41 | "from dotenv import load_dotenv\n", 42 | "from time import time\n", 43 | "from magentic import (\n", 44 | " StreamedStr,\n", 45 | " AsyncStreamedStr,\n", 46 | " prompt,\n", 47 | ")\n", 48 | "from magentic.chat_model.litellm_chat_model import LitellmChatModel\n", 49 | "from pydantic import BaseModel\n", 50 | "from typing import AsyncIterable, Iterable\n", 51 | "\n", 52 | "\n", 53 | "load_dotenv()" 54 | ] 55 | }, 56 | { 57 | "cell_type": "markdown", 58 | "metadata": {}, 59 | "source": [ 60 | "# Streaming\n", 61 | "\n", 62 | "Streaming is a technique to receive partial results while generating.\n", 63 | "\n", 64 | "It's convenient to show something to the user instead of letting him wait for the entire response.\n", 65 | "\n", 66 | "With Magentic we can either stream synchronously or asynchronously.\n" 67 | ] 68 | }, 69 | { 70 | "cell_type": "markdown", 71 | "metadata": {}, 72 | "source": [ 73 | "## Synchronous Streaming" 74 | ] 75 | }, 76 | { 77 | "cell_type": "code", 78 | "execution_count": null, 79 | "metadata": {}, 80 | "outputs": [], 81 | "source": [ 82 | "@prompt(\n", 83 | " \"Tell me more about this topic stay concise: {topic}\",\n", 84 | " model=LitellmChatModel(\"gpt-4-turbo\"),\n", 85 | ")\n", 86 | "def answer_sync(topic: str) -> StreamedStr: ...\n", 87 | "\n", 88 | "for chunk in answer_sync(\"AI\"):\n", 89 | " print(chunk, end=\"\")\n" 90 | ] 91 | }, 92 | { 93 | "cell_type": "markdown", 94 | "metadata": {}, 95 | "source": [ 96 | "## Asynchronous Streaming" 97 | ] 98 | }, 99 | { 100 | "cell_type": "code", 101 | "execution_count": null, 102 | "metadata": {}, 103 | "outputs": [], 104 | "source": [ 105 | "@prompt(\n", 106 | " \"Tell me more about this topic stay concise: {topic}\",\n", 107 | " model=LitellmChatModel(\"gpt-4-turbo\"),\n", 108 | ")\n", 109 | "async def answer_async(topic: str) -> AsyncStreamedStr: ...\n", 110 | "\n", 111 | "async for chunk in await answer_async(\"AI\"):\n", 112 | " print(chunk, end=\"\")" 113 | ] 114 | }, 115 | { 116 | "cell_type": "markdown", 117 | "metadata": {}, 118 | "source": [ 119 | "# Object Streaming\n", 120 | "\n", 121 | "Object streaming is a powerful technique for structured output generation.\n", 122 | "\n", 123 | "Instead of streaming chunk by chunk, we stream object by object.\n", 124 | "\n", 125 | "We wait for an entire object to be generated, we display, and keep going until the end.\n", 126 | "\n", 127 | "Agaim, with Magentic we can do it synchronously or asynchronously." 128 | ] 129 | }, 130 | { 131 | "cell_type": "markdown", 132 | "metadata": {}, 133 | "source": [ 134 | "## Synchronous Object Streaming" 135 | ] 136 | }, 137 | { 138 | "cell_type": "code", 139 | "execution_count": 11, 140 | "metadata": {}, 141 | "outputs": [ 142 | { 143 | "name": "stdout", 144 | "output_type": "stream", 145 | "text": [ 146 | "3.07s : name='Ms. Sushi' age=28 power='Water manipulation' enemies=['Wasabi Fiend', 'Soy Slasher']\n", 147 | "3.88s : name='Boss Burger' age=35 power='Super strength' enemies=['Veggie Vandal', 'Keto Crusader']\n", 148 | "4.97s : name='Taco Titan' age=30 power='Time manipulation' enemies=['Guac Ghoul', 'Salsa Spectre']\n", 149 | "5.83s : name='Pasta Paladin' age=29 power='Elasticity' enemies=['Alfredo Archmage', 'Noodle Nemesis']\n" 150 | ] 151 | } 152 | ], 153 | "source": [ 154 | "class Superhero(BaseModel):\n", 155 | " name: str\n", 156 | " age: int\n", 157 | " power: str\n", 158 | " enemies: list[str]\n", 159 | "\n", 160 | "\n", 161 | "@prompt(\"Create a Superhero team named {name}.\")\n", 162 | "def create_superhero_team(name: str) -> Iterable[Superhero]: ...\n", 163 | "\n", 164 | "\n", 165 | "start_time = time()\n", 166 | "for hero in create_superhero_team(\"The Food Dudes\"):\n", 167 | " print(f\"{time() - start_time:.2f}s : {hero}\")" 168 | ] 169 | }, 170 | { 171 | "cell_type": "markdown", 172 | "metadata": {}, 173 | "source": [ 174 | "## Asynchronous Object Streaming" 175 | ] 176 | }, 177 | { 178 | "cell_type": "code", 179 | "execution_count": 12, 180 | "metadata": {}, 181 | "outputs": [ 182 | { 183 | "name": "stdout", 184 | "output_type": "stream", 185 | "text": [ 186 | "2.22s : name='Captain Carrot' age=30 power='Super Sight' enemies=['Junkfood Joker']\n", 187 | "3.90s : name='Broccoli Bolt' age=25 power='Super Speed' enemies=['Soda Slammer']\n", 188 | "4.64s : name='Steak Slater' age=35 power='Super Strength' enemies=['Vegan Vandal']\n", 189 | "5.67s : name='Tamale Twister' age=27 power='Weather Control' enemies=['Frozen Foodster']\n" 190 | ] 191 | } 192 | ], 193 | "source": [ 194 | "\n", 195 | "class Superhero(BaseModel):\n", 196 | " name: str\n", 197 | " age: int\n", 198 | " power: str\n", 199 | " enemies: list[str]\n", 200 | "\n", 201 | "\n", 202 | "@prompt(\"Create a Superhero team named {name}.\")\n", 203 | "async def create_superhero_team(name: str) -> AsyncIterable[Superhero]: ...\n", 204 | "\n", 205 | "\n", 206 | "start_time = time()\n", 207 | "async for hero in await create_superhero_team(\"The Food Dudes\"):\n", 208 | " print(f\"{time() - start_time:.2f}s : {hero}\")" 209 | ] 210 | }, 211 | { 212 | "cell_type": "code", 213 | "execution_count": null, 214 | "metadata": {}, 215 | "outputs": [], 216 | "source": [] 217 | } 218 | ], 219 | "metadata": { 220 | "kernelspec": { 221 | "display_name": "prometeeos-api-5W7JRlnu-py3.12", 222 | "language": "python", 223 | "name": "python3" 224 | }, 225 | "language_info": { 226 | "codemirror_mode": { 227 | "name": "ipython", 228 | "version": 3 229 | }, 230 | "file_extension": ".py", 231 | "mimetype": "text/x-python", 232 | "name": "python", 233 | "nbconvert_exporter": "python", 234 | "pygments_lexer": "ipython3", 235 | "version": "3.12.2" 236 | } 237 | }, 238 | "nbformat": 4, 239 | "nbformat_minor": 2 240 | } 241 | -------------------------------------------------------------------------------- /thumb.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bitswired/magentic-litellm-tutorial/93e9a627dbccb37be641f82efd96e4399d718d79/thumb.png --------------------------------------------------------------------------------