├── .gitignore ├── CONTRIBUTING.md ├── README.md ├── assets ├── langsmith-ui-01.jpg ├── langsmith-ui-02.jpg ├── langsmith-ui-03.jpg ├── langsmith-ui-04.jpg └── lcel-flow.png ├── chapters ├── 01-intro.ipynb ├── 02-langsmith.ipynb ├── 03-prompts.ipynb ├── 04-chat-memory.ipynb ├── 05-agents-intro.ipynb ├── 06-agent-executor.ipynb ├── 07-lcel.ipynb ├── 08-streaming.ipynb ├── 09-capstone │ ├── README.md │ ├── api │ │ ├── __init__.py │ │ ├── agent.py │ │ └── main.py │ ├── app │ │ ├── .gitignore │ │ ├── next.config.ts │ │ ├── package-lock.json │ │ ├── package.json │ │ ├── postcss.config.mjs │ │ ├── src │ │ │ ├── app │ │ │ │ ├── favicon.ico │ │ │ │ ├── globals.css │ │ │ │ ├── layout.tsx │ │ │ │ └── page.tsx │ │ │ ├── components │ │ │ │ ├── MarkdownRenderer.tsx │ │ │ │ ├── Output.tsx │ │ │ │ └── TextArea.tsx │ │ │ └── types.ts │ │ ├── tailwind.config.ts │ │ └── tsconfig.json │ ├── serapi-tool.ipynb │ └── streaming-test.ipynb └── ollama │ ├── 00-langsmith-ollama.ipynb │ ├── 01-intro-ollama.ipynb │ ├── 02-prompts-ollama.ipynb │ ├── 03-conversational-memory-ollama.ipynb │ ├── 04-WIP-agents-intro-ollama.ipynb │ ├── 05-WIP-agent-executor-ollama.ipynb │ └── 05-langchain-LCEL-Ollama.ipynb ├── env.example ├── pyproject.toml └── uv.lock /.gitignore: -------------------------------------------------------------------------------- 1 | *.env 2 | *.DS_Store 3 | *.egg-info 4 | *.pyc 5 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contribution Guidelines 2 | 3 | These guidelines outline the workflow for creating and publishing new chapters 4 | to the course. 5 | 6 | ## Creating a Chapter 7 | 8 | We create a new chapter by adding a new directory, providing a complete 9 | venv via `uv`, and adding the relevant learning material. Let's work through 10 | an example chapter called `example-chapter`. 11 | 12 | ``` 13 | # first we create our new chapter directory and project env (from project root) 14 | uv init example-chapter --python 3.12.7 15 | # navigate into the new chapter directory 16 | cd example-chapter 17 | # delete unecessary hello.py file 18 | rm hello.py 19 | # add jupyter notebook support (if needed) 20 | uv add ipykernel 21 | # add required libraries, for example: 22 | uv add numpy transformers 23 | # confirm all functional and syncronized 24 | uv sync 25 | # open your project in an editor 26 | cursor . # run via Cursor 27 | code . # run via VS Code 28 | ``` -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # LangChain Course 2 | 3 | Welcome to the LangChain course by Aurelio AI! 4 | 5 | ## Getting Started 6 | 7 | ### Python Environment (IMPORTANT) 8 | 9 | This course repo contains everything you need to install an exact duplicate Python environment as used during the course creation. 10 | 11 | #### Installing Python Venvs 12 | 13 | The Python packages are managed using the [uv](https://github.com/astral-sh/uv) package manager, and so we must install `uv` as a prerequisite for the course. We do so by following the [installation guide](https://docs.astral.sh/uv/#getting-started). For Mac users, as of 22 Oct 2024 enter the following in your terminal: 14 | 15 | ``` 16 | curl -LsSf https://astral.sh/uv/install.sh | sh 17 | ``` 18 | 19 | Once `uv` is installed and available in your terminal you can navigate to the course root directory and execute: 20 | 21 | ``` 22 | uv python install 3.12.7 23 | uv venv --python 3.12.7 24 | uv sync 25 | ``` 26 | 27 | > ❗️ You may need to restart the terminal if the `uv` command is not recognized by your terminal. 28 | 29 | With that we have our chapter venv installed. When working through the code for a specific chapter, always create a new venv to avoid dependency hell. 30 | 31 | #### Using Venv in VS Code / Cursor 32 | 33 | To use our new venv in VS Code or Cursor we simply execute: 34 | 35 | ``` 36 | cd example-chapter 37 | cursor . # run via Cursor 38 | code . # run via VS Code 39 | ``` 40 | 41 | This command will open a new code window, from here you open the relevant files (like Jupyter notebook files), click on the top-right **Select Environment**, click **Python Environments...**, and choose the top `.venv` environment provided. 42 | 43 | #### Uninstalling Venvs 44 | 45 | Naturally, we might not want to keep all of these venvs clogging up the memory on our system, so after completing the course we recommend removing the venv with: 46 | 47 | ``` 48 | deactivate 49 | rm -rf .venv -r 50 | ``` 51 | 52 | ### Ollama 53 | 54 | The course can be run using OpenAI or Ollama. If using Ollama, you must go to [ollama.com](https://ollama.com/) and install Ollama for your respective OS (MacOS is recommended). 55 | 56 | Whenever an LLM is used via Ollama you must: 57 | 58 | 1. Ensure Ollama is running by executing `ollama serve` in your terminal or running the Ollama application. Make sure to keep note of the port the server is running on, by default Ollama runs on `http://localhost:11434` 59 | 60 | 2. Download the LLM being used in your current example using `ollama pull`. For example, to download Llama 3.2 3B, we execute `ollama pull llama 3.2:3b` in our terminal. -------------------------------------------------------------------------------- /assets/langsmith-ui-01.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aurelio-labs/langchain-course/766cd86fa402427a31a87009849d0a653d50f536/assets/langsmith-ui-01.jpg -------------------------------------------------------------------------------- /assets/langsmith-ui-02.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aurelio-labs/langchain-course/766cd86fa402427a31a87009849d0a653d50f536/assets/langsmith-ui-02.jpg -------------------------------------------------------------------------------- /assets/langsmith-ui-03.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aurelio-labs/langchain-course/766cd86fa402427a31a87009849d0a653d50f536/assets/langsmith-ui-03.jpg -------------------------------------------------------------------------------- /assets/langsmith-ui-04.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aurelio-labs/langchain-course/766cd86fa402427a31a87009849d0a653d50f536/assets/langsmith-ui-04.jpg -------------------------------------------------------------------------------- /assets/lcel-flow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aurelio-labs/langchain-course/766cd86fa402427a31a87009849d0a653d50f536/assets/lcel-flow.png -------------------------------------------------------------------------------- /chapters/02-langsmith.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "id": "ixSi6udtrYZk" 7 | }, 8 | "source": [ 9 | "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/aurelio-labs/langchain-course/blob/main/chapters/02-langsmith.ipynb)" 10 | ] 11 | }, 12 | { 13 | "cell_type": "markdown", 14 | "metadata": { 15 | "id": "9NwdBTO0qCQi" 16 | }, 17 | "source": [ 18 | "#### LangChain Essentials Course" 19 | ] 20 | }, 21 | { 22 | "cell_type": "markdown", 23 | "metadata": { 24 | "id": "Swum099VqCQj" 25 | }, 26 | "source": [ 27 | "# LangSmith Starter" 28 | ] 29 | }, 30 | { 31 | "cell_type": "markdown", 32 | "metadata": { 33 | "id": "_hk437ZjqCQj" 34 | }, 35 | "source": [ 36 | "LangSmith is a built-in observability service and platform that integrates _very easily_ with LangChain. You don't _need_ to use LangSmith for this course, but it can be very helpful in understanding what is happening, _and_ we recommend using it beyond this course for general development with LangChain — with all of that in mind we would recommend spending a little bit of time to get familiar with LangSmith." 37 | ] 38 | }, 39 | { 40 | "cell_type": "code", 41 | "execution_count": 1, 42 | "metadata": { 43 | "colab": { 44 | "base_uri": "https://localhost:8080/" 45 | }, 46 | "id": "-k9EfPofqG3g", 47 | "outputId": "71873d67-b511-40b8-9524-d1b0e0ddd3e2" 48 | }, 49 | "outputs": [ 50 | { 51 | "name": "stdout", 52 | "output_type": "stream", 53 | "text": [ 54 | "\u001b[?25l \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m0.0/412.7 kB\u001b[0m \u001b[31m?\u001b[0m eta \u001b[36m-:--:--\u001b[0m\r\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m412.7/412.7 kB\u001b[0m \u001b[31m12.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 55 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m54.5/54.5 kB\u001b[0m \u001b[31m4.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 56 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.5/2.5 MB\u001b[0m \u001b[31m59.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 57 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m333.3/333.3 kB\u001b[0m \u001b[31m18.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 58 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.2/1.2 MB\u001b[0m \u001b[31m23.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 59 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m50.8/50.8 kB\u001b[0m \u001b[31m2.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 60 | "\u001b[?25h" 61 | ] 62 | } 63 | ], 64 | "source": [ 65 | "!pip install -qU \\\n", 66 | " langchain-core==0.3.33 \\\n", 67 | " langchain-openai==0.3.3 \\\n", 68 | " langchain-community==0.3.16 \\\n", 69 | " langsmith==0.3.4" 70 | ] 71 | }, 72 | { 73 | "cell_type": "markdown", 74 | "metadata": {}, 75 | "source": [ 76 | "---\n", 77 | "\n", 78 | "> ⚠️ We will be using OpenAI for this example allowing us to run everything via API. If you would like to use Ollama instead, check out the [Ollama LangChain Course](https://github.com/aurelio-labs/langchain-course/tree/main/notebooks/ollama).\n", 79 | "\n", 80 | "---" 81 | ] 82 | }, 83 | { 84 | "cell_type": "markdown", 85 | "metadata": { 86 | "id": "8UdyCjmqqCQk" 87 | }, 88 | "source": [ 89 | "## Setting up LangSmith" 90 | ] 91 | }, 92 | { 93 | "cell_type": "markdown", 94 | "metadata": { 95 | "id": "rqURpTWbqCQk" 96 | }, 97 | "source": [ 98 | "LangSmith does require an API key, but it comes with a generous free tier. You can sign up an account and get your API key [here](https://smith.langchain.com).\n", 99 | "\n", 100 | "When using LangSmith, we need to setup our environment variables _and_ provide our API key, like so:" 101 | ] 102 | }, 103 | { 104 | "cell_type": "code", 105 | "execution_count": 3, 106 | "metadata": { 107 | "colab": { 108 | "base_uri": "https://localhost:8080/" 109 | }, 110 | "id": "ezoLeIGrqCQk", 111 | "outputId": "252aa252-d8c3-4cd5-fb18-acf3cad2dee5" 112 | }, 113 | "outputs": [ 114 | { 115 | "name": "stdout", 116 | "output_type": "stream", 117 | "text": [ 118 | "Enter LangSmith API Key: ··········\n" 119 | ] 120 | } 121 | ], 122 | "source": [ 123 | "import os\n", 124 | "from getpass import getpass\n", 125 | "\n", 126 | "# must enter API key\n", 127 | "os.environ[\"LANGCHAIN_API_KEY\"] = os.getenv(\"LANGCHAIN_API_KEY\") or \\\n", 128 | " getpass(\"Enter LangSmith API Key: \")\n", 129 | "\n", 130 | "# below should not be changed\n", 131 | "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n", 132 | "os.environ[\"LANGCHAIN_ENDPOINT\"] = \"https://api.smith.langchain.com\"\n", 133 | "# you can change this as preferred\n", 134 | "os.environ[\"LANGCHAIN_PROJECT\"] = \"aurelioai-langchain-course-langsmith-openai\"" 135 | ] 136 | }, 137 | { 138 | "cell_type": "markdown", 139 | "metadata": { 140 | "id": "EuGDntgWqCQk" 141 | }, 142 | "source": [ 143 | "In most cases, this is all we need to start seeing logs and traces in the [LangSmith UI](https://smith.langchain.com). By default, LangChain will trace LLM calls, chains, etc. We'll take a look at a quick example of this below." 144 | ] 145 | }, 146 | { 147 | "cell_type": "markdown", 148 | "metadata": { 149 | "id": "KFDeEk0tqCQk" 150 | }, 151 | "source": [ 152 | "## Default Tracing" 153 | ] 154 | }, 155 | { 156 | "cell_type": "markdown", 157 | "metadata": { 158 | "id": "Ii3hqGO2qCQk" 159 | }, 160 | "source": [ 161 | "As mentioned, LangSmith traces a lot of data without us needing to do anything. Let's see how that looks. We'll start by initializing our LLM. Again, this will need an [API key](https://platform.openai.com/api-keys)." 162 | ] 163 | }, 164 | { 165 | "cell_type": "code", 166 | "execution_count": 4, 167 | "metadata": { 168 | "colab": { 169 | "base_uri": "https://localhost:8080/" 170 | }, 171 | "id": "1AfToUsEqCQl", 172 | "outputId": "4426952f-af46-440d-d275-a86610a9c1fe" 173 | }, 174 | "outputs": [ 175 | { 176 | "name": "stdout", 177 | "output_type": "stream", 178 | "text": [ 179 | "Enter OpenAI API Key: ··········\n" 180 | ] 181 | } 182 | ], 183 | "source": [ 184 | "import os\n", 185 | "from getpass import getpass\n", 186 | "from langchain_openai import ChatOpenAI\n", 187 | "\n", 188 | "os.environ[\"OPENAI_API_KEY\"] = os.getenv(\"OPENAI_API_KEY\") or getpass(\n", 189 | " \"Enter OpenAI API Key: \"\n", 190 | ")\n", 191 | "\n", 192 | "llm = ChatOpenAI(temperature=0.0, model=\"gpt-4o-mini\")" 193 | ] 194 | }, 195 | { 196 | "cell_type": "markdown", 197 | "metadata": { 198 | "id": "bAVDqCttqCQl" 199 | }, 200 | "source": [ 201 | "Let's invoke our LLM and then see what happens in the LangSmith UI." 202 | ] 203 | }, 204 | { 205 | "cell_type": "code", 206 | "execution_count": 5, 207 | "metadata": { 208 | "colab": { 209 | "base_uri": "https://localhost:8080/" 210 | }, 211 | "id": "SCGtll-IqCQl", 212 | "outputId": "0f9ea347-b50f-481c-c237-fd1fd6a09589" 213 | }, 214 | "outputs": [ 215 | { 216 | "data": { 217 | "text/plain": [ 218 | "AIMessage(content='Hello! How can I assist you today?', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 10, 'prompt_tokens': 8, 'total_tokens': 18, 'completion_tokens_details': {'accepted_prediction_tokens': 0, 'audio_tokens': 0, 'reasoning_tokens': 0, 'rejected_prediction_tokens': 0}, 'prompt_tokens_details': {'audio_tokens': 0, 'cached_tokens': 0}}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_bd83329f63', 'finish_reason': 'stop', 'logprobs': None}, id='run-e73b0928-0ea5-4e88-90f7-28470b499ae2-0', usage_metadata={'input_tokens': 8, 'output_tokens': 10, 'total_tokens': 18, 'input_token_details': {'audio': 0, 'cache_read': 0}, 'output_token_details': {'audio': 0, 'reasoning': 0}})" 219 | ] 220 | }, 221 | "execution_count": 5, 222 | "metadata": {}, 223 | "output_type": "execute_result" 224 | } 225 | ], 226 | "source": [ 227 | "llm.invoke(\"hello\")" 228 | ] 229 | }, 230 | { 231 | "cell_type": "markdown", 232 | "metadata": { 233 | "id": "eYM-pIIaqCQl" 234 | }, 235 | "source": [ 236 | "After this we should see that a new project (`aurelioai-langchain-course-langsmith-openai`) has been created in the LangSmith UI. Inside that project, we should see the trace from our LLM call:\n", 237 | "\n", 238 | "\n", 239 | "![LangSmith LLM Trace](https://github.com/aurelio-labs/langchain-course/blob/main/assets/langsmith-ui-01.jpg?raw=1)" 240 | ] 241 | }, 242 | { 243 | "cell_type": "markdown", 244 | "metadata": { 245 | "id": "S7VktC46qCQl" 246 | }, 247 | "source": [ 248 | "By default, LangSmith will capture plenty — however, it won't capture functions from outside of LangChain. Let's see how we can trace those." 249 | ] 250 | }, 251 | { 252 | "cell_type": "markdown", 253 | "metadata": { 254 | "id": "VNQRKbp9qCQl" 255 | }, 256 | "source": [ 257 | "## Tracing Non-LangChain Code" 258 | ] 259 | }, 260 | { 261 | "cell_type": "markdown", 262 | "metadata": { 263 | "id": "hbYn3yY7qCQl" 264 | }, 265 | "source": [ 266 | "LangSmith can trace functions that are not part of LangChain, we just need to add the `@traceable` decorator. Let's try this for a few simple functions." 267 | ] 268 | }, 269 | { 270 | "cell_type": "code", 271 | "execution_count": 6, 272 | "metadata": { 273 | "id": "NVo50uq6qCQl" 274 | }, 275 | "outputs": [], 276 | "source": [ 277 | "from langsmith import traceable\n", 278 | "import random\n", 279 | "import time\n", 280 | "\n", 281 | "\n", 282 | "@traceable\n", 283 | "def generate_random_number():\n", 284 | " return random.randint(0, 100)\n", 285 | "\n", 286 | "@traceable\n", 287 | "def generate_string_delay(input_str: str):\n", 288 | " number = random.randint(1, 5)\n", 289 | " time.sleep(number)\n", 290 | " return f\"{input_str} ({number})\"\n", 291 | "\n", 292 | "@traceable\n", 293 | "def random_error():\n", 294 | " number = random.randint(0, 1)\n", 295 | " if number == 0:\n", 296 | " raise ValueError(\"Random error\")\n", 297 | " else:\n", 298 | " return \"No error\"" 299 | ] 300 | }, 301 | { 302 | "cell_type": "markdown", 303 | "metadata": { 304 | "id": "Rx4b1yJtqCQl" 305 | }, 306 | "source": [ 307 | "Let's run these a few times and see what happens." 308 | ] 309 | }, 310 | { 311 | "cell_type": "code", 312 | "execution_count": 7, 313 | "metadata": { 314 | "colab": { 315 | "base_uri": "https://localhost:8080/", 316 | "height": 49, 317 | "referenced_widgets": [ 318 | "2af699bd631846bdb2e064981a48ae67", 319 | "1507edeb2bcf43238ed2431731b51f24", 320 | "76c4884af2ed4474a07a6f8964cd7b10", 321 | "302271efa18349fb8fd20e2fa922f17f", 322 | "59d44325b22241a0828dd1df4c87eb46", 323 | "fb6ca9c9ad144bc3947e454cc3f3ab77", 324 | "1e91ab27a61c4530ac1dd2e05a4455ae", 325 | "a087fea20eb040469312b40081517d67", 326 | "ba1df7e62b114c6d9062c65a04bb349d", 327 | "e7967f66202744d1b41ec3ae02a45617", 328 | "fe1edcf8e1fa4ae2bdae23d2235395e0" 329 | ] 330 | }, 331 | "id": "iTDAR7b4qCQl", 332 | "outputId": "084ca65c-2497-41cf-e6f6-5b946163b5f6" 333 | }, 334 | "outputs": [ 335 | { 336 | "data": { 337 | "application/vnd.jupyter.widget-view+json": { 338 | "model_id": "2af699bd631846bdb2e064981a48ae67", 339 | "version_major": 2, 340 | "version_minor": 0 341 | }, 342 | "text/plain": [ 343 | " 0%| | 0/10 [00:00 "Article": 55 | return cls( 56 | title=result["title"], 57 | source=result["source"], 58 | link=result["link"], 59 | snippet=result["snippet"], 60 | ) 61 | 62 | # Tools definition 63 | # note: we define all tools as async to simplify later code, but only the serpapi 64 | # tool is actually async 65 | @tool 66 | async def add(x: float, y: float) -> float: 67 | """Add 'x' and 'y'.""" 68 | return x + y 69 | 70 | @tool 71 | async def multiply(x: float, y: float) -> float: 72 | """Multiply 'x' and 'y'.""" 73 | return x * y 74 | 75 | @tool 76 | async def exponentiate(x: float, y: float) -> float: 77 | """Raise 'x' to the power of 'y'.""" 78 | return x ** y 79 | 80 | @tool 81 | async def subtract(x: float, y: float) -> float: 82 | """Subtract 'x' from 'y'.""" 83 | return y - x 84 | 85 | @tool 86 | async def serpapi(query: str) -> list[Article]: 87 | """Use this tool to search the web.""" 88 | params = { 89 | "api_key": SERPAPI_API_KEY.get_secret_value(), 90 | "engine": "google", 91 | "q": query, 92 | } 93 | async with aiohttp.ClientSession() as session: 94 | async with session.get( 95 | "https://serpapi.com/search", 96 | params=params 97 | ) as response: 98 | results = await response.json() 99 | return [Article.from_serpapi_result(result) for result in results["organic_results"]] 100 | 101 | @tool 102 | async def final_answer(answer: str, tools_used: list[str]) -> dict[str, str | list[str]]: 103 | """Use this tool to provide a final answer to the user.""" 104 | return {"answer": answer, "tools_used": tools_used} 105 | 106 | tools = [add, subtract, multiply, exponentiate, final_answer, serpapi] 107 | # note when we have sync tools we use tool.func, when async we use tool.coroutine 108 | name2tool = {tool.name: tool.coroutine for tool in tools} 109 | 110 | # Streaming Handler 111 | class QueueCallbackHandler(AsyncCallbackHandler): 112 | def __init__(self, queue: asyncio.Queue): 113 | self.queue = queue 114 | self.final_answer_seen = False 115 | 116 | async def __aiter__(self): 117 | while True: 118 | if self.queue.empty(): 119 | await asyncio.sleep(0.1) 120 | continue 121 | token_or_done = await self.queue.get() 122 | if token_or_done == "<>": 123 | return 124 | if token_or_done: 125 | yield token_or_done 126 | 127 | async def on_llm_new_token(self, *args, **kwargs) -> None: 128 | chunk = kwargs.get("chunk") 129 | if chunk and chunk.message.additional_kwargs.get("tool_calls"): 130 | if chunk.message.additional_kwargs["tool_calls"][0]["function"]["name"] == "final_answer": 131 | self.final_answer_seen = True 132 | self.queue.put_nowait(kwargs.get("chunk")) 133 | 134 | async def on_llm_end(self, *args, **kwargs) -> None: 135 | if self.final_answer_seen: 136 | self.queue.put_nowait("<>") 137 | else: 138 | self.queue.put_nowait("<>") 139 | 140 | async def execute_tool(tool_call: AIMessage) -> ToolMessage: 141 | tool_name = tool_call.tool_calls[0]["name"] 142 | tool_args = tool_call.tool_calls[0]["args"] 143 | tool_out = await name2tool[tool_name](**tool_args) 144 | return ToolMessage( 145 | content=f"{tool_out}", 146 | tool_call_id=tool_call.tool_calls[0]["id"] 147 | ) 148 | 149 | # Agent Executor 150 | class CustomAgentExecutor: 151 | def __init__(self, max_iterations: int = 3): 152 | self.chat_history: list[BaseMessage] = [] 153 | self.max_iterations = max_iterations 154 | self.agent = ( 155 | { 156 | "input": lambda x: x["input"], 157 | "chat_history": lambda x: x["chat_history"], 158 | "agent_scratchpad": lambda x: x.get("agent_scratchpad", []) 159 | } 160 | | prompt 161 | | llm.bind_tools(tools, tool_choice="any") 162 | ) 163 | 164 | async def invoke(self, input: str, streamer: QueueCallbackHandler, verbose: bool = False) -> dict: 165 | # invoke the agent but we do this iteratively in a loop until 166 | # reaching a final answer 167 | count = 0 168 | final_answer: str | None = None 169 | agent_scratchpad: list[AIMessage | ToolMessage] = [] 170 | # streaming function 171 | async def stream(query: str) -> list[AIMessage]: 172 | response = self.agent.with_config( 173 | callbacks=[streamer] 174 | ) 175 | # we initialize the output dictionary that we will be populating with 176 | # our streamed output 177 | outputs = [] 178 | # now we begin streaming 179 | async for token in response.astream({ 180 | "input": query, 181 | "chat_history": self.chat_history, 182 | "agent_scratchpad": agent_scratchpad 183 | }): 184 | tool_calls = token.additional_kwargs.get("tool_calls") 185 | if tool_calls: 186 | # first check if we have a tool call id - this indicates a new tool 187 | if tool_calls[0]["id"]: 188 | outputs.append(token) 189 | else: 190 | outputs[-1] += token 191 | else: 192 | pass 193 | return [ 194 | AIMessage( 195 | content=x.content, 196 | tool_calls=x.tool_calls, 197 | tool_call_id=x.tool_calls[0]["id"] 198 | ) for x in outputs 199 | ] 200 | 201 | while count < self.max_iterations: 202 | # invoke a step for the agent to generate a tool call 203 | tool_calls = await stream(query=input) 204 | # gather tool execution coroutines 205 | tool_obs = await asyncio.gather( 206 | *[execute_tool(tool_call) for tool_call in tool_calls] 207 | ) 208 | # append tool calls and tool observations to the scratchpad in order 209 | id2tool_obs = {tool_call.tool_call_id: tool_obs for tool_call, tool_obs in zip(tool_calls, tool_obs)} 210 | for tool_call in tool_calls: 211 | agent_scratchpad.extend([ 212 | tool_call, 213 | id2tool_obs[tool_call.tool_call_id] 214 | ]) 215 | 216 | count += 1 217 | # if the tool call is the final answer tool, we stop 218 | found_final_answer = False 219 | for tool_call in tool_calls: 220 | if tool_call.tool_calls[0]["name"] == "final_answer": 221 | final_answer_call = tool_call.tool_calls[0] 222 | final_answer = final_answer_call["args"]["answer"] 223 | found_final_answer = True 224 | break 225 | 226 | # Only break the loop if we found a final answer 227 | if found_final_answer: 228 | break 229 | 230 | # add the final output to the chat history, we only add the "answer" field 231 | self.chat_history.extend([ 232 | HumanMessage(content=input), 233 | AIMessage(content=final_answer if final_answer else "No answer found") 234 | ]) 235 | # return the final answer in dict form 236 | return final_answer_call if final_answer else {"answer": "No answer found", "tools_used": []} 237 | 238 | # Initialize agent executor 239 | agent_executor = CustomAgentExecutor() -------------------------------------------------------------------------------- /chapters/09-capstone/api/main.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from agent import QueueCallbackHandler, agent_executor 4 | from fastapi import FastAPI 5 | from fastapi.responses import StreamingResponse 6 | from fastapi.middleware.cors import CORSMiddleware 7 | 8 | # initilizing our application 9 | app = FastAPI() 10 | 11 | app.add_middleware( 12 | CORSMiddleware, 13 | allow_origins=["http://localhost:3000"], # Your frontend URL 14 | allow_credentials=True, 15 | allow_methods=["*"], # Allows all methods 16 | allow_headers=["*"], # Allows all headers 17 | ) 18 | 19 | # streaming function 20 | async def token_generator(content: str, streamer: QueueCallbackHandler): 21 | task = asyncio.create_task(agent_executor.invoke( 22 | input=content, 23 | streamer=streamer, 24 | verbose=True # set to True to see verbose output in console 25 | )) 26 | # initialize various components to stream 27 | async for token in streamer: 28 | try: 29 | if token == "<>": 30 | # send end of step token 31 | yield "" 32 | elif tool_calls := token.message.additional_kwargs.get("tool_calls"): 33 | if tool_name := tool_calls[0]["function"]["name"]: 34 | # send start of step token followed by step name tokens 35 | yield f"{tool_name}" 36 | if tool_args := tool_calls[0]["function"]["arguments"]: 37 | # tool args are streamed directly, ensure it's properly encoded 38 | yield tool_args 39 | except Exception as e: 40 | print(f"Error streaming token: {e}") 41 | continue 42 | await task 43 | 44 | # invoke function 45 | @app.post("/invoke") 46 | async def invoke(content: str): 47 | queue: asyncio.Queue = asyncio.Queue() 48 | streamer = QueueCallbackHandler(queue) 49 | # return the streaming response 50 | return StreamingResponse( 51 | token_generator(content, streamer), 52 | media_type="text/event-stream", 53 | headers={ 54 | "Cache-Control": "no-cache", 55 | "Connection": "keep-alive", 56 | } 57 | ) 58 | -------------------------------------------------------------------------------- /chapters/09-capstone/app/.gitignore: -------------------------------------------------------------------------------- 1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files. 2 | 3 | # dependencies 4 | /node_modules 5 | /.pnp 6 | .pnp.* 7 | .yarn/* 8 | !.yarn/patches 9 | !.yarn/plugins 10 | !.yarn/releases 11 | !.yarn/versions 12 | 13 | # testing 14 | /coverage 15 | 16 | # next.js 17 | /.next/ 18 | /out/ 19 | 20 | # production 21 | /build 22 | 23 | # misc 24 | .DS_Store 25 | *.pem 26 | 27 | # debug 28 | npm-debug.log* 29 | yarn-debug.log* 30 | yarn-error.log* 31 | .pnpm-debug.log* 32 | 33 | # env files (can opt-in for committing if needed) 34 | .env* 35 | 36 | # vercel 37 | .vercel 38 | 39 | # typescript 40 | *.tsbuildinfo 41 | next-env.d.ts 42 | -------------------------------------------------------------------------------- /chapters/09-capstone/app/next.config.ts: -------------------------------------------------------------------------------- 1 | import type { NextConfig } from "next"; 2 | 3 | const nextConfig: NextConfig = { 4 | /* config options here */ 5 | }; 6 | 7 | export default nextConfig; 8 | -------------------------------------------------------------------------------- /chapters/09-capstone/app/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "new-next-app", 3 | "version": "0.1.0", 4 | "private": true, 5 | "scripts": { 6 | "dev": "next dev --turbopack", 7 | "build": "next build", 8 | "start": "next start", 9 | "lint": "next lint" 10 | }, 11 | "dependencies": { 12 | "incomplete-json-parser": "^1.1.4", 13 | "next": "15.1.7", 14 | "react": "^19.0.0", 15 | "react-dom": "^19.0.0", 16 | "react-markdown": "^10.0.0", 17 | "remark-gfm": "^4.0.1" 18 | }, 19 | "devDependencies": { 20 | "@tailwindcss/typography": "^0.5.16", 21 | "@types/node": "^20", 22 | "@types/react": "^19", 23 | "@types/react-dom": "^19", 24 | "postcss": "^8", 25 | "tailwindcss": "^3.4.1", 26 | "typescript": "^5" 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /chapters/09-capstone/app/postcss.config.mjs: -------------------------------------------------------------------------------- 1 | /** @type {import('postcss-load-config').Config} */ 2 | const config = { 3 | plugins: { 4 | tailwindcss: {}, 5 | }, 6 | }; 7 | 8 | export default config; 9 | -------------------------------------------------------------------------------- /chapters/09-capstone/app/src/app/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aurelio-labs/langchain-course/766cd86fa402427a31a87009849d0a653d50f536/chapters/09-capstone/app/src/app/favicon.ico -------------------------------------------------------------------------------- /chapters/09-capstone/app/src/app/globals.css: -------------------------------------------------------------------------------- 1 | @tailwind base; 2 | @tailwind components; 3 | @tailwind utilities; 4 | 5 | :root { 6 | --foreground: #ffffff; 7 | --background: #131313; 8 | } 9 | 10 | body { 11 | color: var(--foreground); 12 | background: var(--background); 13 | } 14 | 15 | .container { 16 | @apply max-w-[48rem]; 17 | } 18 | -------------------------------------------------------------------------------- /chapters/09-capstone/app/src/app/layout.tsx: -------------------------------------------------------------------------------- 1 | import type { Metadata } from "next"; 2 | import { Inter } from "next/font/google"; 3 | import "./globals.css"; 4 | 5 | const inter = Inter({ 6 | subsets: ['latin'], 7 | display: 'swap', 8 | }) 9 | 10 | export const metadata: Metadata = { 11 | title: "Langchain Course", 12 | description: "", 13 | }; 14 | 15 | export default function RootLayout({ 16 | children, 17 | }: Readonly<{ 18 | children: React.ReactNode; 19 | }>) { 20 | return ( 21 | 22 | 25 | {children} 26 | 27 | 28 | ); 29 | } 30 | -------------------------------------------------------------------------------- /chapters/09-capstone/app/src/app/page.tsx: -------------------------------------------------------------------------------- 1 | "use client"; 2 | 3 | import Output from "@/components/Output"; 4 | import TextArea from "@/components/TextArea"; 5 | import { type ChatOutput } from "@/types"; 6 | import { useState } from "react"; 7 | 8 | export default function Home() { 9 | const [outputs, setOutputs] = useState([]); 10 | const [isGenerating, setIsGenerating] = useState(false); 11 | 12 | return ( 13 |
18 |
19 | {outputs.length === 0 && ( 20 |

21 | What do you want to know? 22 |

23 | )} 24 | 25 |