├── .env.example
├── .gitignore
├── Makefile
├── README.md
├── basic-diagrams.excalidraw
├── notebooks
├── 1.0-intro-to-langchain.ipynb
├── 1.1-intro-to-runnable-interface.ipynb
├── 2.0-qa-with-langchain.ipynb
├── 2.1-langchain-query-csv.ipynb
├── 2.2-dynamic-quiz-over-pdf.ipynb
├── 2.3-qa-fully-local.ipynb
├── 2.4-simple-rag-langchain-langgraph.ipynb
├── 3.0-building-llm-agents-with-langchain.ipynb
├── 3.1-langchain-github-agent-prototype.ipynb
├── 3.2-langchain-agent-with-langgraph.ipynb
├── 4.0-langgraph-quick-introduction.ipynb
├── 5.0-demos-research-workflows.ipynb
├── 6.0-live-demo-chat-with-langchain-docs-urls.ipynb
├── README.md
├── assets-resources
│ ├── Augmenting-Human-Intellect_ A-Conceptual-Framework-1962-Doug Engelbart.pdf
│ ├── agent-loop-langgraph-version.png
│ ├── agent-loop.png
│ ├── agent_loop.svg
│ ├── ai.png
│ ├── attention-paper.pdf
│ ├── attributes_runnable.png
│ ├── basic-agent-diagram.png
│ ├── chatgpt-demo.png
│ ├── components_input_output.png
│ ├── components_input_type_output_type.png
│ ├── diagram_langchain_agent_find_source_code.png
│ ├── embeddings-similarity.png
│ ├── embeddings-similarity2.png
│ ├── embeddings.png
│ ├── google-icon.png
│ ├── google-translate.png
│ ├── images_tbn.jpg
│ ├── langchain-icon.svg
│ ├── langchain-project-structure.png
│ ├── langchain-toolkits.png
│ ├── langgraph-components.png
│ ├── lcel-image.png
│ ├── llm_paper_know_dont_know.pdf
│ ├── llm_predicts_pancakes.png
│ ├── pancake_maker.png
│ ├── paper-llm-components.pdf
│ ├── prompts.csv
│ ├── pydantic.png
│ ├── rag-docs.png
│ ├── rag-langchain-retrieval.png
│ ├── rag-langchain.png
│ ├── superheroes.csv
│ ├── tagging.png
│ └── vectordb.png
├── dev-notebooks
│ ├── 2023-12-13-14-12-45.png
│ ├── 2023-12-13-14-25-37.png
│ ├── 5.1-demo-playwright-scrape-articles.py
│ ├── 6.0-langchain-deploy-recipe-walkthrough.ipynb
│ ├── 6.1-langchain-app-dev-structure.ipynb
│ ├── 7.0-fun-demos-adept.ipynb
│ ├── create-knowledge-graph.ipynb
│ ├── knowledge_graph.gv
│ ├── knowledge_graph.gv.pdf
│ ├── live-coding-demo-agents-student-request.ipynb
│ ├── live-coding-demo-agents.ipynb
│ ├── live-coding-demo-qa-pdf.ipynb
│ ├── llama3-langchain-basic-chain.ipynb
│ └── research-assistant_test.py
├── intro-rag-basics.ipynb
├── jira-agent.py
├── langchain-app.py
├── langchain-lcel-cheatsheet.md
├── langchain-structured-output-ui.py
├── live-example-liner-regression-langgraph.ipynb
├── live-notebook-rag-basics.ipynb
├── live-session-intro.ipynb
├── paper.pdf
├── rag_methods.py
├── research-agent-langgraph.ipynb
├── simple-product-info-chatbot.py
└── testing-langchain-app.ipynb
├── presentation_slides
├── 2024-02-20-21-43-40.png
├── Getting-Started-with-LangChain-presentation.pdf
├── extract-presentation.applescript
├── presentation-langgraph.html
└── presentation.html
└── requirements
├── requirements.in
└── requirements.txt
/.env.example:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/EnkrateiaLucca/oreilly_live_training_getting_started_with_langchain/9232c291a850aa62586ceadb8d7ebd453f2fa593/.env.example
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Ignore node_modules directory
2 | node_modules/
3 |
4 | # Ignore build artifacts
5 | dist/
6 | build/
7 |
8 | # Ignore environment-specific files
9 | .env
10 | .env.local
11 | .env.*.local
12 |
13 | # Ignore log files
14 | *.log
15 | *.sqlite
16 | *.bin
17 |
18 | # Ignore editor-specific files
19 | .vscode/
20 | .idea/
21 |
22 | # Ignore OS-specific files
23 | .DS_Store
24 | Thumbs.db
25 |
26 | # Project files
27 |
28 | ## presentation-slides folder
29 | presentation-slides/
30 |
31 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | ENV_NAME ?= oreilly-langchain
2 | PYTHON_VERSION ?= 3.11
3 |
4 | # Install exact Python and CUDA versions
5 | conda-create:
6 | conda create -n $(ENV_NAME) python=$(PYTHON_VERSION)
7 |
8 | repo-setup:
9 | mkdir requirements
10 | touch requirements/requirements.in
11 | pip install uv
12 | uv pip install pip-tools
13 |
14 | notebook-setup:
15 | python -m ipykernel install --user --name=$(ENV_NAME)
16 |
17 | env-update:
18 | uv pip compile ./requirements/requirements.in -o ./requirements/requirements.txt
19 | uv pip sync ./requirements/requirements.txt
20 |
21 | pip-tools-setup:
22 | pip install uv
23 | uv pip install pip-tools setuptools
24 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Notebooks for the O'Reilly live-training: "Getting Started with LangChain"
2 |
3 | - [Live-training official website from O'Reilly](https://learning.oreilly.com/live-events/getting-started-with-langchain/0636920098586/0636920098585/)
4 | # Overview
5 |
6 |
7 | ## Notebooks
8 |
9 | 1. **Getting Started with LangChain**
10 | - [Introduction to LangChain](https://colab.research.google.com/github/EnkrateiaLucca/oreilly_live_training_getting_started_with_langchain/blob/main/notebooks/1.0-intro-to-langchain.ipynb)
11 |
12 | 2. **Advanced Query and Dynamic Content**
13 | - [QA with LangChain](https://colab.research.google.com/github/EnkrateiaLucca/oreilly_live_training_getting_started_with_langchain/blob/main/notebooks/2.0-qa-with-langchain.ipynb)
14 | - [Querying CSV Data with LangChain](https://colab.research.google.com/github/EnkrateiaLucca/oreilly_live_training_getting_started_with_langchain/blob/main/notebooks/2.1-langchain-query-csv.ipynb)
15 | - [Dynamic Quiz over PDF](https://colab.research.google.com/github/EnkrateiaLucca/oreilly_live_training_getting_started_with_langchain/blob/main/notebooks/2.2-dynamic-quiz-over-pdf.ipynb)
16 | - [QA Fully Local](https://colab.research.google.com/github/EnkrateiaLucca/oreilly_live_training_getting_started_with_langchain/blob/main/notebooks/2.3-qa-fully-local.ipynb)
17 |
18 | 3. **Building Intelligent Agents**
19 | - [Building LLM Agents with LangChain](https://colab.research.google.com/github/EnkrateiaLucca/oreilly_live_training_getting_started_with_langchain/blob/main/notebooks/3.0-building-llm-agents-with-langchain.ipynb)
20 | - [LangChain GitHub Agent Prototype](https://colab.research.google.com/github/EnkrateiaLucca/oreilly_live_training_getting_started_with_langchain/blob/main/notebooks/3.1-langchain-github-agent-prototype.ipynb)
21 |
22 | 4. **Quick Introduction to LangGraph**
23 | - [Quick Introduction to LangGraph](https://colab.research.google.com/github/EnkrateiaLucca/oreilly_live_training_getting_started_with_langchain/blob/main/notebooks/4.0-langgraph-quick-introduction.ipynb)
24 |
25 | 5. **Demonstrations and Practical Applications**
26 | - [Research Workflows](https://colab.research.google.com/github/EnkrateiaLucca/oreilly_live_training_getting_started_with_langchain/blob/main/notebooks/5.0-demos-research-workflows.ipynb)
27 |
28 | 6. **Additional Resources**
29 | - [Live Coding Demo QA PDF](https://colab.research.google.com/github/EnkrateiaLucca/oreilly_live_training_getting_started_with_langchain/blob/main/notebooks/live-coding-demo-qa-pdf.ipynb)
30 |
31 |
32 | ## Setup
33 |
34 | **Conda**
35 |
36 | - Install [anaconda](https://www.anaconda.com/download)
37 | - Create an environment: `conda create -n oreilly-langchain python=3.11`
38 | - Activate your environment with: `conda activate oreilly-langchain`
39 | - Install requirements with: `pip install -r requirements.txt`
40 | - Setup your openai [API key](https://platform.openai.com/)
41 |
42 | **If you're used to Makefiles**
43 | - Install `uv` with `pip install uv`
44 | - Run `make all`
45 | - Activate your environment just like below and you're done
46 |
47 | **Pip**
48 |
49 | 1. **Create a Virtual Environment:**
50 | Navigate to your project directory. If using Python 3's built-in `venv`:
51 | ```bash
52 | python -m venv oreilly_env
53 | ```
54 | If you're using `virtualenv`:
55 | ```bash
56 | virtualenv oreilly_env
57 | ```
58 |
59 | 2. **Activate the Virtual Environment:**
60 | - **On Windows:**
61 | ```bash
62 | .\oreilly_env\Scripts\activate
63 | ```
64 | - **On macOS and Linux:**
65 | ```bash
66 | source oreilly_env/bin/activate
67 | ```
68 |
69 | 3. **Install Dependencies from `requirements.txt`:**
70 | ```bash
71 | pip install python-dotenv
72 | pip install -r requirements.txt
73 | ```
74 |
75 | 4. Setup your openai [API key](https://platform.openai.com/)
76 |
77 | Remember to deactivate the virtual environment once you're done by simply typing:
78 | ```bash
79 | deactivate
80 | ```
81 |
82 | ## Setup your .env file
83 |
84 | - Change the `.env.example` file to `.env` and add your OpenAI API key.
85 |
86 | ## To use this Environment with Jupyter Notebooks:
87 |
88 | ```python3 -m ipykernel install --user --name=oreilly-langchain```
89 |
90 | ## Official Training Website
91 |
92 | For more information about the live-training, visit the [official website](https://learning.oreilly.com/live-events/getting-started-with-langchain/0636920098586/0636920098585/).
93 |
--------------------------------------------------------------------------------
/notebooks/2.1-langchain-query-csv.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 4,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "# %pip install langchain\n",
10 | "# # %pip install langchain-openai\n",
11 | "# # %pip install langchainhub\n",
12 | "# # %pip install pypdf\n",
13 | "# %pip install chromadb\n",
14 | "# %pip install pandas"
15 | ]
16 | },
17 | {
18 | "cell_type": "markdown",
19 | "metadata": {},
20 | "source": [
21 | "# DISCLAIMER: \n",
22 | "\n",
23 | "```\n",
24 | "FOR THIS NOTEBOOK YOU WILL NEED TO BE RUNNING LOCALLY OLLAMA + HAVE THE LLAMA 3.1 MODEL INSTALLED\n",
25 | "```\n",
26 | "\n",
27 | "See [here](https://python.langchain.com/v0.2/docs/integrations/chat/ollama/) for how to set that up."
28 | ]
29 | },
30 | {
31 | "cell_type": "code",
32 | "execution_count": 1,
33 | "metadata": {},
34 | "outputs": [],
35 | "source": [
36 | "import os\n",
37 | "import getpass\n",
38 | "\n",
39 | "# Set OPENAI API Key\n",
40 | "\n",
41 | "import os\n",
42 | "import getpass\n",
43 | "\n",
44 | "def _set_env(var: str):\n",
45 | " if not os.environ.get(var):\n",
46 | " os.environ[var] = getpass.getpass(f\"var: \")\n",
47 | "\n",
48 | "_set_env(\"OPENAI_API_KEY\")"
49 | ]
50 | },
51 | {
52 | "cell_type": "markdown",
53 | "metadata": {},
54 | "source": [
55 | "A vector database is a way to store these embeddings, these numerical representations that we just discussed.\n",
56 | "\n",
57 | "The pipeline is:\n",
58 | "- In coming document\n",
59 | "- Create chunks of text from that document\n",
60 | "- Embed each chunk\n",
61 | "- Store these embeddings\n",
62 | "\n",
63 | "\n",
64 | "\n",
65 | "[LangChain for LLM Application Development by Deeplearning.ai](https://learn.deeplearning.ai/langchain/lesson/1/introduction)"
66 | ]
67 | },
68 | {
69 | "cell_type": "code",
70 | "execution_count": 2,
71 | "metadata": {},
72 | "outputs": [
73 | {
74 | "data": {
75 | "text/html": [
76 | "
\n",
77 | "\n",
90 | "
\n",
91 | " \n",
92 | " \n",
93 | " | \n",
94 | " Superhero Name | \n",
95 | " Superpower | \n",
96 | " Power Level | \n",
97 | " Catchphrase | \n",
98 | "
\n",
99 | " \n",
100 | " \n",
101 | " \n",
102 | " 0 | \n",
103 | " Captain Thunder | \n",
104 | " Bolt Manipulation | \n",
105 | " 90 | \n",
106 | " Feel the power of the storm! | \n",
107 | "
\n",
108 | " \n",
109 | " 1 | \n",
110 | " Silver Falcon | \n",
111 | " Flight and Agility | \n",
112 | " 85 | \n",
113 | " Soar high, fearlessly! | \n",
114 | "
\n",
115 | " \n",
116 | " 2 | \n",
117 | " Mystic Shadow | \n",
118 | " Invisibility and Illusions | \n",
119 | " 78 | \n",
120 | " Disappear into the darkness! | \n",
121 | "
\n",
122 | " \n",
123 | " 3 | \n",
124 | " Blaze Runner | \n",
125 | " Pyrokinesis | \n",
126 | " 88 | \n",
127 | " Burn bright and fierce! | \n",
128 | "
\n",
129 | " \n",
130 | " 4 | \n",
131 | " Electra-Wave | \n",
132 | " Electric Manipulation | \n",
133 | " 82 | \n",
134 | " Unleash the electric waves! | \n",
135 | "
\n",
136 | " \n",
137 | "
\n",
138 | "
"
139 | ],
140 | "text/plain": [
141 | " Superhero Name Superpower Power Level \\\n",
142 | "0 Captain Thunder Bolt Manipulation 90 \n",
143 | "1 Silver Falcon Flight and Agility 85 \n",
144 | "2 Mystic Shadow Invisibility and Illusions 78 \n",
145 | "3 Blaze Runner Pyrokinesis 88 \n",
146 | "4 Electra-Wave Electric Manipulation 82 \n",
147 | "\n",
148 | " Catchphrase \n",
149 | "0 Feel the power of the storm! \n",
150 | "1 Soar high, fearlessly! \n",
151 | "2 Disappear into the darkness! \n",
152 | "3 Burn bright and fierce! \n",
153 | "4 Unleash the electric waves! "
154 | ]
155 | },
156 | "execution_count": 2,
157 | "metadata": {},
158 | "output_type": "execute_result"
159 | }
160 | ],
161 | "source": [
162 | "import pandas as pd\n",
163 | "\n",
164 | "file_path=\"./assets-resources/superheroes.csv\"\n",
165 | "df = pd.read_csv(file_path)\n",
166 | "\n",
167 | "df.head()"
168 | ]
169 | },
170 | {
171 | "cell_type": "code",
172 | "execution_count": 3,
173 | "metadata": {},
174 | "outputs": [],
175 | "source": [
176 | "from langchain.document_loaders.csv_loader import CSVLoader"
177 | ]
178 | },
179 | {
180 | "cell_type": "code",
181 | "execution_count": 4,
182 | "metadata": {},
183 | "outputs": [
184 | {
185 | "data": {
186 | "text/plain": [
187 | "[Document(metadata={'source': './assets-resources/superheroes.csv', 'row': 0}, page_content='Superhero Name: Captain Thunder\\nSuperpower: Bolt Manipulation\\nPower Level: 90\\nCatchphrase: Feel the power of the storm!'),\n",
188 | " Document(metadata={'source': './assets-resources/superheroes.csv', 'row': 1}, page_content='Superhero Name: Silver Falcon\\nSuperpower: Flight and Agility\\nPower Level: 85\\nCatchphrase: Soar high, fearlessly!'),\n",
189 | " Document(metadata={'source': './assets-resources/superheroes.csv', 'row': 2}, page_content='Superhero Name: Mystic Shadow\\nSuperpower: Invisibility and Illusions\\nPower Level: 78\\nCatchphrase: Disappear into the darkness!'),\n",
190 | " Document(metadata={'source': './assets-resources/superheroes.csv', 'row': 3}, page_content='Superhero Name: Blaze Runner\\nSuperpower: Pyrokinesis\\nPower Level: 88\\nCatchphrase: Burn bright and fierce!'),\n",
191 | " Document(metadata={'source': './assets-resources/superheroes.csv', 'row': 4}, page_content='Superhero Name: Electra-Wave\\nSuperpower: Electric Manipulation\\nPower Level: 82\\nCatchphrase: Unleash the electric waves!')]"
192 | ]
193 | },
194 | "execution_count": 4,
195 | "metadata": {},
196 | "output_type": "execute_result"
197 | }
198 | ],
199 | "source": [
200 | "loader = CSVLoader(file_path)\n",
201 | "data = loader.load()\n",
202 | "data[:5]"
203 | ]
204 | },
205 | {
206 | "cell_type": "code",
207 | "execution_count": 5,
208 | "metadata": {},
209 | "outputs": [],
210 | "source": [
211 | "from langchain_ollama import ChatOllama\n",
212 | "\n",
213 | "llm = ChatOllama(\n",
214 | " model=\"gemma3\",\n",
215 | " verbose=True,\n",
216 | ")"
217 | ]
218 | },
219 | {
220 | "cell_type": "code",
221 | "execution_count": 6,
222 | "metadata": {},
223 | "outputs": [],
224 | "source": [
225 | "from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
226 | "\n",
227 | "text_splitter = RecursiveCharacterTextSplitter(chunk_size=1500, chunk_overlap=100)\n",
228 | "all_splits = text_splitter.split_documents(data)"
229 | ]
230 | },
231 | {
232 | "cell_type": "code",
233 | "execution_count": 7,
234 | "metadata": {},
235 | "outputs": [],
236 | "source": [
237 | "# Embed and store\n",
238 | "from langchain_chroma import Chroma\n",
239 | "from langchain_ollama import OllamaEmbeddings\n",
240 | "\n",
241 | "vectordb = Chroma.from_documents(documents=all_splits, embedding=OllamaEmbeddings(model='nomic-embed-text'))"
242 | ]
243 | },
244 | {
245 | "cell_type": "code",
246 | "execution_count": 8,
247 | "metadata": {},
248 | "outputs": [
249 | {
250 | "data": {
251 | "text/plain": [
252 | "4"
253 | ]
254 | },
255 | "execution_count": 8,
256 | "metadata": {},
257 | "output_type": "execute_result"
258 | }
259 | ],
260 | "source": [
261 | "question = \"What is the name of the thunder super hero?\"\n",
262 | "docs = vectordb.similarity_search(question)\n",
263 | "len(docs)"
264 | ]
265 | },
266 | {
267 | "cell_type": "code",
268 | "execution_count": 9,
269 | "metadata": {},
270 | "outputs": [
271 | {
272 | "data": {
273 | "text/plain": [
274 | "[Document(metadata={'row': 0, 'source': './assets-resources/superheroes.csv'}, page_content='Superhero Name: Captain Thunder\\nSuperpower: Bolt Manipulation\\nPower Level: 90\\nCatchphrase: Feel the power of the storm!'),\n",
275 | " Document(metadata={'row': 30, 'source': './assets-resources/superheroes.csv'}, page_content='Superhero Name: Thunderstrike\\nSuperpower: Lightning Control\\nPower Level: 91\\nCatchphrase: Electrify the battlefield!'),\n",
276 | " Document(metadata={'row': 20, 'source': './assets-resources/superheroes.csv'}, page_content=\"Superhero Name: Stormbringer\\nSuperpower: Weather Manipulation\\nPower Level: 93\\nCatchphrase: Unleash the storm's fury!\"),\n",
277 | " Document(metadata={'row': 8, 'source': './assets-resources/superheroes.csv'}, page_content='Superhero Name: Steel Titan\\nSuperpower: Super Strength and Durability\\nPower Level: 95\\nCatchphrase: Indestructible force of nature!')]"
278 | ]
279 | },
280 | "execution_count": 9,
281 | "metadata": {},
282 | "output_type": "execute_result"
283 | }
284 | ],
285 | "source": [
286 | "docs"
287 | ]
288 | },
289 | {
290 | "cell_type": "code",
291 | "execution_count": 10,
292 | "metadata": {},
293 | "outputs": [],
294 | "source": [
295 | "from langchain.chains.combine_documents import create_stuff_documents_chain\n",
296 | "from langchain.chains import create_retrieval_chain\n",
297 | "from langchain_core.prompts import ChatPromptTemplate\n",
298 | "from langchain_core.runnables import RunnablePassthrough\n",
299 | "from langchain_core.output_parsers import StrOutputParser\n",
300 | "\n",
301 | "def format_docs(docs):\n",
302 | " return \"\\n\\n\".join(doc.page_content for doc in docs)\n",
303 | "\n",
304 | "\n",
305 | "system_prompt = (\n",
306 | " \"You are an assistant for question-answering tasks. \"\n",
307 | " \"Use the following pieces of retrieved context to answer \"\n",
308 | " \"the question. If you don't know the answer, say that you \"\n",
309 | " \"don't know. Use three sentences maximum and keep the \"\n",
310 | " \"answer concise.\"\n",
311 | " \"\\n\\n\"\n",
312 | " \"{context}\"\n",
313 | ")\n",
314 | "\n",
315 | "prompt = ChatPromptTemplate.from_messages([\n",
316 | " ('system', system_prompt),\n",
317 | " ('human', '{input}')\n",
318 | "])\n",
319 | "\n",
320 | "\n",
321 | "rag_chain_from_docs = (\n",
322 | " {\n",
323 | " 'input': lambda x: x['input'],\n",
324 | " 'context': lambda x: format_docs(x['context']), \n",
325 | " }\n",
326 | " | prompt\n",
327 | " | llm\n",
328 | " | StrOutputParser()\n",
329 | ")\n",
330 | "\n",
331 | "retriever = vectordb.as_retriever()\n",
332 | "\n",
333 | "# passing the input query to the retriever\n",
334 | "retrieve_docs = (lambda x: x['input']) | retriever\n",
335 | "\n",
336 | "qa_chain = RunnablePassthrough.assign(context=retrieve_docs).assign(\n",
337 | " answer=rag_chain_from_docs\n",
338 | ")"
339 | ]
340 | },
341 | {
342 | "cell_type": "code",
343 | "execution_count": 11,
344 | "metadata": {},
345 | "outputs": [
346 | {
347 | "data": {
348 | "text/plain": [
349 | "{'input': 'What is the catch phrase for the super hero with the power of producing balls of fire?',\n",
350 | " 'context': [Document(metadata={'row': 24, 'source': './assets-resources/superheroes.csv'}, page_content='Superhero Name: Blazing Comet\\nSuperpower: Fireball Projection\\nPower Level: 82\\nCatchphrase: Burn brighter than a comet!'),\n",
351 | " Document(metadata={'row': 42, 'source': './assets-resources/superheroes.csv'}, page_content='Superhero Name: Solar Flare\\nSuperpower: Solar Energy Projection\\nPower Level: 85\\nCatchphrase: Feel the burning light!'),\n",
352 | " Document(metadata={'row': 3, 'source': './assets-resources/superheroes.csv'}, page_content='Superhero Name: Blaze Runner\\nSuperpower: Pyrokinesis\\nPower Level: 88\\nCatchphrase: Burn bright and fierce!'),\n",
353 | " Document(metadata={'row': 11, 'source': './assets-resources/superheroes.csv'}, page_content='Superhero Name: Starburst\\nSuperpower: Energy Projection\\nPower Level: 83\\nCatchphrase: Ignite the cosmos!')],\n",
354 | " 'answer': 'Blazing Comet’s catchphrase is “Burn brighter than a comet!” He possesses the power of fireball projection and has a power level of 82.'}"
355 | ]
356 | },
357 | "execution_count": 11,
358 | "metadata": {},
359 | "output_type": "execute_result"
360 | }
361 | ],
362 | "source": [
363 | "question = \"What is the catch phrase for the super hero with the power of producing balls of fire?\"\n",
364 | "result = qa_chain.invoke({'input': question})\n",
365 | "result"
366 | ]
367 | },
368 | {
369 | "cell_type": "code",
370 | "execution_count": 12,
371 | "metadata": {},
372 | "outputs": [
373 | {
374 | "data": {
375 | "text/plain": [
376 | "Superhero Name Blazing Comet\n",
377 | "Superpower Fireball Projection\n",
378 | "Power Level 82\n",
379 | "Catchphrase Burn brighter than a comet!\n",
380 | "Name: 24, dtype: object"
381 | ]
382 | },
383 | "execution_count": 12,
384 | "metadata": {},
385 | "output_type": "execute_result"
386 | }
387 | ],
388 | "source": [
389 | "df.iloc[24]"
390 | ]
391 | },
392 | {
393 | "cell_type": "code",
394 | "execution_count": null,
395 | "metadata": {},
396 | "outputs": [],
397 | "source": []
398 | }
399 | ],
400 | "metadata": {
401 | "kernelspec": {
402 | "display_name": "oreilly-langchain",
403 | "language": "python",
404 | "name": "oreilly-langchain"
405 | },
406 | "language_info": {
407 | "codemirror_mode": {
408 | "name": "ipython",
409 | "version": 3
410 | },
411 | "file_extension": ".py",
412 | "mimetype": "text/x-python",
413 | "name": "python",
414 | "nbconvert_exporter": "python",
415 | "pygments_lexer": "ipython3",
416 | "version": "3.11.9"
417 | }
418 | },
419 | "nbformat": 4,
420 | "nbformat_minor": 2
421 | }
422 |
--------------------------------------------------------------------------------
/notebooks/3.1-langchain-github-agent-prototype.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 12,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "%pip install -qU langchain\n",
10 | "%pip install langchain-openai\n",
11 | "%pip install langchainhub\n",
12 | "%pip install tiktoken"
13 | ]
14 | },
15 | {
16 | "cell_type": "code",
17 | "execution_count": 17,
18 | "metadata": {},
19 | "outputs": [],
20 | "source": [
21 | "import os\n",
22 | "import getpass\n",
23 | "\n",
24 | "# Set OPENAI API Key\n",
25 | "\n",
26 | "import os\n",
27 | "import getpass\n",
28 | "\n",
29 | "def _set_env(var: str):\n",
30 | " if not os.environ.get(var):\n",
31 | " os.environ[var] = getpass.getpass(f\"var: \")\n",
32 | "\n",
33 | "_set_env(\"OPENAI_API_KEY\")"
34 | ]
35 | },
36 | {
37 | "cell_type": "markdown",
38 | "metadata": {},
39 | "source": [
40 | "The more complex version of this would use this pygithub package as shown in here:\n",
41 | "- https://python.langchain.com/docs/integrations/toolkits/github\n",
42 | "\n",
43 | "But let's try a simpler version that just sets up a bunch of python functions to automate different github actions like add, commit and pull requests."
44 | ]
45 | },
46 | {
47 | "cell_type": "markdown",
48 | "metadata": {},
49 | "source": []
50 | },
51 | {
52 | "cell_type": "markdown",
53 | "metadata": {},
54 | "source": [
55 | "Let's start with a simple commit to a branch of some repository."
56 | ]
57 | },
58 | {
59 | "cell_type": "markdown",
60 | "metadata": {},
61 | "source": [
62 | "First, I'll create a github repository using some simple commands."
63 | ]
64 | },
65 | {
66 | "cell_type": "markdown",
67 | "metadata": {},
68 | "source": []
69 | },
70 | {
71 | "cell_type": "code",
72 | "execution_count": 14,
73 | "metadata": {},
74 | "outputs": [],
75 | "source": [
76 | "# git init -b main\n",
77 | "\n",
78 | "# git add . && git commit -m \"Some commit\"\n",
79 | "\n",
80 | "# gh repo create"
81 | ]
82 | },
83 | {
84 | "cell_type": "code",
85 | "execution_count": 1,
86 | "metadata": {},
87 | "outputs": [],
88 | "source": [
89 | "MODEL='gpt-4o-mini'\n",
90 | "TEMP=0.0"
91 | ]
92 | },
93 | {
94 | "cell_type": "markdown",
95 | "metadata": {},
96 | "source": [
97 | "Now, let's try asking an agent to write come code and commit the resulting code to the current branch of this repository.\n",
98 | "\n",
99 | "To do that, let's give the agent the necessary tools it will need which in this case will be python functions that perform different github actions using the `subprocess` package."
100 | ]
101 | },
102 | {
103 | "cell_type": "markdown",
104 | "metadata": {},
105 | "source": [
106 | "We'll follow the basic steps for building a langchain agent:\n",
107 | "\n",
108 | "# Steps for building a simple agent:\n",
109 | "- Set up the LLM\n",
110 | "- Define the tool or toolkit (list of tools)\n",
111 | "- Set up a prompt template\n",
112 | "- Connect llm with your tools\n",
113 | "- Define your agent as a dict with keys: input, and agent scratchpad \n",
114 | "- Use the Langchain LCEL language to pipe agent, prompt the llm_with_tools variable and the output parser (which can use OpenAI function calling parser)\n",
115 | "- Create the agent loop\n",
116 | "- Wrap everything into the AgentExecutor\n",
117 | "- Invoke the agent with some query input\n"
118 | ]
119 | },
120 | {
121 | "cell_type": "code",
122 | "execution_count": 18,
123 | "metadata": {},
124 | "outputs": [],
125 | "source": [
126 | "# setup the llm\n",
127 | "from langchain_openai import ChatOpenAI\n",
128 | "\n",
129 | "llm = ChatOpenAI(model=MODEL, temperature=TEMP)"
130 | ]
131 | },
132 | {
133 | "cell_type": "markdown",
134 | "metadata": {},
135 | "source": [
136 | "Now, let's create a tool for the LLM"
137 | ]
138 | },
139 | {
140 | "cell_type": "code",
141 | "execution_count": 4,
142 | "metadata": {},
143 | "outputs": [],
144 | "source": [
145 | "import subprocess\n",
146 | "\n",
147 | "def github_commit_tool(commit_message=\"Some commit\"):\n",
148 | " subprocess.run([\"git\", \"add\", \".\"])\n",
149 | " subprocess.run([\"git\", \"commit\", \"-m\", commit_message])\n",
150 | " subprocess.run([\"git\", \"push\", \"-u\", \"origin\", \"main\"])\n",
151 | " \n",
152 | " return \"Committed to Github\""
153 | ]
154 | },
155 | {
156 | "cell_type": "markdown",
157 | "metadata": {},
158 | "source": [
159 | "Now! Before we use it with langchain, let's test it by itself."
160 | ]
161 | },
162 | {
163 | "cell_type": "code",
164 | "execution_count": 5,
165 | "metadata": {},
166 | "outputs": [
167 | {
168 | "name": "stdout",
169 | "output_type": "stream",
170 | "text": [
171 | "[main 079c67c] Testing commit for github agent demo.\n",
172 | " 2 files changed, 73 insertions(+), 113 deletions(-)\n",
173 | " create mode 100644 notebooks/2024-08-26-12-39-39.png\n",
174 | "branch 'main' set up to track 'origin/main'.\n"
175 | ]
176 | },
177 | {
178 | "name": "stderr",
179 | "output_type": "stream",
180 | "text": [
181 | "To https://github.com/EnkrateiaLucca/oreilly_live_training_getting_started_with_langchain.git\n",
182 | " 3a97192..079c67c main -> main\n"
183 | ]
184 | },
185 | {
186 | "data": {
187 | "text/plain": [
188 | "'Committed to Github'"
189 | ]
190 | },
191 | "execution_count": 5,
192 | "metadata": {},
193 | "output_type": "execute_result"
194 | }
195 | ],
196 | "source": [
197 | "github_commit_tool(\"Testing commit for github agent demo.\")"
198 | ]
199 | },
200 | {
201 | "cell_type": "markdown",
202 | "metadata": {},
203 | "source": [
204 | "Now, let's take a look at out github repository from the terminal.\n",
205 | "\n",
206 | ""
207 | ]
208 | },
209 | {
210 | "cell_type": "markdown",
211 | "metadata": {},
212 | "source": [
213 | "Nice! It looks like we are good to go with this first simple tool!"
214 | ]
215 | },
216 | {
217 | "cell_type": "markdown",
218 | "metadata": {},
219 | "source": [
220 | "Let's now make it a tool for our langchain agent by adding the @tool. "
221 | ]
222 | },
223 | {
224 | "cell_type": "code",
225 | "execution_count": 19,
226 | "metadata": {},
227 | "outputs": [],
228 | "source": [
229 | "from langchain.tools import tool\n",
230 | "\n",
231 | "@tool\n",
232 | "def github_commit_tool(commit_message=\"Some commit\"):\n",
233 | " \"\"\"This function uses the subprocess package to make commits to a github repo pre-defined.\"\"\"\n",
234 | " subprocess.run([\"git\", \"add\", \".\"])\n",
235 | " subprocess.run([\"git\", \"commit\", \"-m\", commit_message])\n",
236 | " subprocess.run([\"git\", \"push\", \"-u\", \"origin\", \"main\"])\n",
237 | " \n",
238 | " return \"Committed to Github\"\n",
239 | "\n",
240 | "tools = [github_commit_tool]"
241 | ]
242 | },
243 | {
244 | "cell_type": "markdown",
245 | "metadata": {},
246 | "source": [
247 | "We added some documentation to our function to abide by the requirements of the tool decorator from langchain."
248 | ]
249 | },
250 | {
251 | "cell_type": "markdown",
252 | "metadata": {},
253 | "source": [
254 | "Now, let's test if a simple agent can use that tool!"
255 | ]
256 | },
257 | {
258 | "cell_type": "markdown",
259 | "metadata": {},
260 | "source": [
261 | "Let's start by setting up our prompt template."
262 | ]
263 | },
264 | {
265 | "cell_type": "code",
266 | "execution_count": 1,
267 | "metadata": {},
268 | "outputs": [
269 | {
270 | "data": {
271 | "text/plain": [
272 | "[SystemMessagePromptTemplate(prompt=PromptTemplate(input_variables=[], input_types={}, partial_variables={}, template='You are a helpful assistant'), additional_kwargs={}),\n",
273 | " MessagesPlaceholder(variable_name='chat_history', optional=True),\n",
274 | " HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=['input'], input_types={}, partial_variables={}, template='{input}'), additional_kwargs={}),\n",
275 | " MessagesPlaceholder(variable_name='agent_scratchpad')]"
276 | ]
277 | },
278 | "execution_count": 1,
279 | "metadata": {},
280 | "output_type": "execute_result"
281 | }
282 | ],
283 | "source": [
284 | "from langchain import hub\n",
285 | "\n",
286 | "prompt = hub.pull(\"hwchase17/openai-tools-agent\")\n",
287 | "prompt.messages"
288 | ]
289 | },
290 | {
291 | "cell_type": "code",
292 | "execution_count": 8,
293 | "metadata": {},
294 | "outputs": [],
295 | "source": [
296 | "from langchain.agents import create_tool_calling_agent\n",
297 | "from langchain_openai import ChatOpenAI\n",
298 | "\n",
299 | "agent = create_tool_calling_agent(llm, tools, prompt)"
300 | ]
301 | },
302 | {
303 | "cell_type": "code",
304 | "execution_count": 9,
305 | "metadata": {},
306 | "outputs": [],
307 | "source": [
308 | "from langchain.agents import AgentExecutor\n",
309 | "\n",
310 | "agent_executor = AgentExecutor(agent=agent,tools=tools, verbose=True)"
311 | ]
312 | },
313 | {
314 | "cell_type": "code",
315 | "execution_count": 10,
316 | "metadata": {},
317 | "outputs": [
318 | {
319 | "name": "stdout",
320 | "output_type": "stream",
321 | "text": [
322 | "\n",
323 | "\n",
324 | "\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
325 | "\u001b[32;1m\u001b[1;3m\n",
326 | "Invoking: `github_commit_tool` with `{'commit_message': 'Testing LLM agent!'}`\n",
327 | "\n",
328 | "\n",
329 | "\u001b[0mOn branch main\n",
330 | "Your branch is up to date with 'origin/main'.\n",
331 | "\n",
332 | "nothing to commit, working tree clean\n"
333 | ]
334 | },
335 | {
336 | "name": "stderr",
337 | "output_type": "stream",
338 | "text": [
339 | "Everything up-to-date\n"
340 | ]
341 | },
342 | {
343 | "name": "stdout",
344 | "output_type": "stream",
345 | "text": [
346 | "branch 'main' set up to track 'origin/main'.\n",
347 | "\u001b[36;1m\u001b[1;3mCommitted to Github\u001b[0m\u001b[32;1m\u001b[1;3mThe commit with the message \"Testing LLM agent!\" has been successfully made to GitHub.\u001b[0m\n",
348 | "\n",
349 | "\u001b[1m> Finished chain.\u001b[0m\n"
350 | ]
351 | },
352 | {
353 | "data": {
354 | "text/plain": [
355 | "{'input': \"Create a github commit with the message 'Testing LLM agent!'\",\n",
356 | " 'output': 'The commit with the message \"Testing LLM agent!\" has been successfully made to GitHub.'}"
357 | ]
358 | },
359 | "execution_count": 10,
360 | "metadata": {},
361 | "output_type": "execute_result"
362 | }
363 | ],
364 | "source": [
365 | "agent_executor.invoke({\"input\": \"Create a github commit with the message 'Testing LLM agent!'\"})"
366 | ]
367 | },
368 | {
369 | "cell_type": "markdown",
370 | "metadata": {},
371 | "source": [
372 | ""
373 | ]
374 | },
375 | {
376 | "cell_type": "markdown",
377 | "metadata": {},
378 | "source": [
379 | "And voila, the agent calls the right function and creates the commit!\n",
380 | "\n",
381 | "(ignore the github related error, let's focus on the agent capabilities)."
382 | ]
383 | },
384 | {
385 | "cell_type": "markdown",
386 | "metadata": {},
387 | "source": [
388 | "And voila! We made a commit!\n",
389 | "\n",
390 | ""
391 | ]
392 | },
393 | {
394 | "cell_type": "markdown",
395 | "metadata": {},
396 | "source": [
397 | "Ok, this is cool but, we can go beyond that, and add the ability to create a file and then commit that file to our desired repository. Let's do that!\n",
398 | "\n",
399 | "To do it all we have to do is amplify the Agent's toolkit with another tool that can create files in the current folder."
400 | ]
401 | },
402 | {
403 | "cell_type": "markdown",
404 | "metadata": {},
405 | "source": [
406 | "Perfect! Now, let's add this to our github agent"
407 | ]
408 | },
409 | {
410 | "cell_type": "code",
411 | "execution_count": 21,
412 | "metadata": {},
413 | "outputs": [],
414 | "source": [
415 | "@tool\n",
416 | "def create_file_tool(filename, contents):\n",
417 | " \"\"\"This function creates a file given its filename and its contents provided as inputs.\"\"\"\n",
418 | " with open(filename, \"w\") as f:\n",
419 | " f.write(contents)\n",
420 | " \n",
421 | " return \"File created\""
422 | ]
423 | },
424 | {
425 | "cell_type": "markdown",
426 | "metadata": {},
427 | "source": [
428 | "Now, let's add that functionality to our agent by just updating the tools list."
429 | ]
430 | },
431 | {
432 | "cell_type": "code",
433 | "execution_count": 22,
434 | "metadata": {},
435 | "outputs": [],
436 | "source": [
437 | "tools = [github_commit_tool, create_file_tool]"
438 | ]
439 | },
440 | {
441 | "cell_type": "code",
442 | "execution_count": 23,
443 | "metadata": {},
444 | "outputs": [],
445 | "source": [
446 | "agent = create_tool_calling_agent(llm, tools, prompt)"
447 | ]
448 | },
449 | {
450 | "cell_type": "code",
451 | "execution_count": 24,
452 | "metadata": {},
453 | "outputs": [],
454 | "source": [
455 | "# Let's also inspect this agent execution in langsmith!\n",
456 | "agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)"
457 | ]
458 | },
459 | {
460 | "cell_type": "markdown",
461 | "metadata": {},
462 | "source": [
463 | "Let's also inspect this agent execution in langsmith!"
464 | ]
465 | },
466 | {
467 | "cell_type": "code",
468 | "execution_count": 26,
469 | "metadata": {},
470 | "outputs": [
471 | {
472 | "name": "stdout",
473 | "output_type": "stream",
474 | "text": [
475 | "\n",
476 | "\n",
477 | "\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
478 | "\u001b[32;1m\u001b[1;3m\n",
479 | "Invoking: `create_file_tool` with `{'filename': 'pancakes-are-the-best-breakfast.md', 'contents': '# Why Pancakes are the Most Delicious Breakfast\\n\\n- **Fluffy Texture**: Pancakes have a light and fluffy texture that melts in your mouth.\\n- **Versatile Toppings**: They can be topped with a variety of delicious options like syrup, fruits, whipped cream, and nuts.\\n- **Quick to Make**: Pancakes are quick and easy to prepare, making them a perfect breakfast choice.\\n- **Comfort Food**: They provide a sense of comfort and nostalgia, often reminding us of family breakfasts.\\n- **Customizable**: You can easily customize the batter with flavors like chocolate, vanilla, or spices.\\n- **Great for Sharing**: Pancakes can be made in large batches, perfect for sharing with family and friends.\\n- **Satisfying**: They are filling and satisfying, providing a good start to the day.\\n- **Healthy Options**: You can make healthier versions using whole grains, fruits, or even protein powder.\\n- **Cultural Variations**: Pancakes come in many forms across different cultures, from crepes to flapjacks.\\n- **Fun to Make**: Cooking pancakes can be a fun activity, especially for kids who enjoy flipping them!'}`\n",
480 | "\n",
481 | "\n",
482 | "\u001b[0m\u001b[33;1m\u001b[1;3mFile created\u001b[0m\u001b[32;1m\u001b[1;3m\n",
483 | "Invoking: `github_commit_tool` with `{'commit_message': 'Add pancakes-are-the-best-breakfast.md with reasons why pancakes are the most delicious breakfast.'}`\n",
484 | "\n",
485 | "\n",
486 | "\u001b[0m[main abe2980] Add pancakes-are-the-best-breakfast.md with reasons why pancakes are the most delicious breakfast.\n",
487 | " 3 files changed, 30 insertions(+), 37 deletions(-)\n"
488 | ]
489 | },
490 | {
491 | "name": "stderr",
492 | "output_type": "stream",
493 | "text": [
494 | "To https://github.com/EnkrateiaLucca/oreilly_live_training_getting_started_with_langchain.git\n",
495 | " d5c44bc..abe2980 main -> main\n"
496 | ]
497 | },
498 | {
499 | "name": "stdout",
500 | "output_type": "stream",
501 | "text": [
502 | "branch 'main' set up to track 'origin/main'.\n",
503 | "\u001b[36;1m\u001b[1;3mCommitted to Github\u001b[0m\u001b[32;1m\u001b[1;3mI have created the file named \"pancakes-are-the-best-breakfast.md\" containing 10 bullet points on why pancakes are the most delicious breakfast, and it has been successfully committed to GitHub.\u001b[0m\n",
504 | "\n",
505 | "\u001b[1m> Finished chain.\u001b[0m\n"
506 | ]
507 | },
508 | {
509 | "data": {
510 | "text/plain": [
511 | "{'input': 'Create and commit a file named: \"pancakes-are-the-best-breakfast.md\" containing 10 bullet points on why pancakes are the most delicious breakfast.',\n",
512 | " 'output': 'I have created the file named \"pancakes-are-the-best-breakfast.md\" containing 10 bullet points on why pancakes are the most delicious breakfast, and it has been successfully committed to GitHub.'}"
513 | ]
514 | },
515 | "execution_count": 26,
516 | "metadata": {},
517 | "output_type": "execute_result"
518 | }
519 | ],
520 | "source": [
521 | "agent_executor.invoke({'input': 'Create and commit a file named: \"pancakes-are-the-best-breakfast.md\" containing 10 bullet points on why\\\n",
522 | " pancakes are the most delicious breakfast.'})"
523 | ]
524 | },
525 | {
526 | "cell_type": "markdown",
527 | "metadata": {},
528 | "source": [
529 | "We can inspect this more thoroughly in the [langsmith platform](https://smith.langchain.com/)."
530 | ]
531 | },
532 | {
533 | "cell_type": "markdown",
534 | "metadata": {},
535 | "source": [
536 | "Ok, before we finish with this example, let's add a last tool to read files from a repo, for situations where we might write some Python code and we want our agent to integrate that Python code into our repository. "
537 | ]
538 | },
539 | {
540 | "cell_type": "markdown",
541 | "metadata": {},
542 | "source": [
543 | "Again, we follow the same procedure:\n",
544 | "\n",
545 | "- Create the tool\n",
546 | "- Update our tookit\n",
547 | "- Update our agent\n",
548 | "- Run a test task to check if it works "
549 | ]
550 | },
551 | {
552 | "cell_type": "markdown",
553 | "metadata": {},
554 | "source": [
555 | "I think this is a great usecase for agents like these even if sometimes the performance and reliability is a bit off, because now, its a matter of optimizing the edges to get it to work for complex use cases."
556 | ]
557 | },
558 | {
559 | "cell_type": "markdown",
560 | "metadata": {},
561 | "source": [
562 | "# DISCLAIMER\n",
563 | "\n",
564 | "This example is for demonstration purposes, it does not put in place any measures to counter act potential security vulnerabilities\n",
565 | "associated with giving large language models the ability to execute shell commands in our behalf without authentication procedures\n",
566 | "and sandbox environments."
567 | ]
568 | }
569 | ],
570 | "metadata": {
571 | "kernelspec": {
572 | "display_name": "oreilly-langchain",
573 | "language": "python",
574 | "name": "oreilly-langchain"
575 | },
576 | "language_info": {
577 | "codemirror_mode": {
578 | "name": "ipython",
579 | "version": 3
580 | },
581 | "file_extension": ".py",
582 | "mimetype": "text/x-python",
583 | "name": "python",
584 | "nbconvert_exporter": "python",
585 | "pygments_lexer": "ipython3",
586 | "version": "3.11.9"
587 | }
588 | },
589 | "nbformat": 4,
590 | "nbformat_minor": 2
591 | }
592 |
--------------------------------------------------------------------------------
/notebooks/3.2-langchain-agent-with-langgraph.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": []
9 | },
10 | {
11 | "cell_type": "code",
12 | "execution_count": 1,
13 | "metadata": {},
14 | "outputs": [],
15 | "source": [
16 | "# Import relevant functionality\n",
17 | "from langchain_openai import ChatOpenAI\n",
18 | "from langchain_community.tools.tavily_search import TavilySearchResults\n",
19 | "from langchain_core.messages import HumanMessage\n",
20 | "from langgraph.checkpoint.memory import MemorySaver\n",
21 | "from langgraph.prebuilt import create_react_agent"
22 | ]
23 | },
24 | {
25 | "cell_type": "code",
26 | "execution_count": 2,
27 | "metadata": {},
28 | "outputs": [],
29 | "source": [
30 | "# Create the agent\n",
31 | "memory = MemorySaver()"
32 | ]
33 | },
34 | {
35 | "cell_type": "code",
36 | "execution_count": 3,
37 | "metadata": {},
38 | "outputs": [
39 | {
40 | "data": {
41 | "text/plain": [
42 | "AIMessage(content='An agent based on large language models (LLMs) refers to a software entity or system that utilizes the capabilities of LLMs to perform tasks, interact with users, and make decisions based on natural language input. These agents can be designed for a variety of applications, including customer support, content generation, virtual assistants, and more. Here are some key characteristics and functionalities of such agents:\\n\\n1. **Natural Language Understanding (NLU)**: Agents leverage LLMs to comprehend and interpret user inputs in natural language, allowing for more intuitive interactions.\\n\\n2. **Natural Language Generation (NLG)**: They can generate human-like text responses, making conversations more engaging and contextually relevant.\\n\\n3. **Context Awareness**: Advanced agents can maintain context over a conversation, enabling more coherent and meaningful interactions over multiple exchanges.\\n\\n4. **Task Execution**: Agents may perform specific tasks such as answering questions, providing recommendations, or even executing commands, depending on their design and the context of the interaction.\\n\\n5. **Learning and Adaptation**: Some agents can learn from user interactions over time, improving their responses and functionalities based on feedback and usage patterns.\\n\\n6. **Integration with Other Systems**: Agents can often be integrated with other software systems and databases to access real-time information, manage tasks, or pull in relevant data to enhance their responses.\\n\\n7. **Personalization**: They can be designed to tailor responses based on user preferences or previous interactions, creating a more personalized experience.\\n\\nExamples of agents based on large language models include chatbots, virtual personal assistants like Siri or Google Assistant, and automated customer service representatives. These agents are increasingly being used across various industries to enhance user experience and streamline operations.', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 343, 'prompt_tokens': 17, 'total_tokens': 360, 'completion_tokens_details': {'audio_tokens': 0, 'reasoning_tokens': 0, 'accepted_prediction_tokens': 0, 'rejected_prediction_tokens': 0}, 'prompt_tokens_details': {'audio_tokens': 0, 'cached_tokens': 0}}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_06737a9306', 'finish_reason': 'stop', 'logprobs': None}, id='run-cf74da99-bcea-44ae-a8ce-dcc26ac0d85b-0', usage_metadata={'input_tokens': 17, 'output_tokens': 343, 'total_tokens': 360, 'input_token_details': {'audio': 0, 'cache_read': 0}, 'output_token_details': {'audio': 0, 'reasoning': 0}})"
43 | ]
44 | },
45 | "execution_count": 3,
46 | "metadata": {},
47 | "output_type": "execute_result"
48 | }
49 | ],
50 | "source": [
51 | "llm = ChatOpenAI(model_name=\"gpt-4o-mini\")\n",
52 | "\n",
53 | "\n",
54 | "llm.invoke(\"What is an agent based on large language models?\")"
55 | ]
56 | },
57 | {
58 | "cell_type": "code",
59 | "execution_count": 4,
60 | "metadata": {},
61 | "outputs": [],
62 | "source": [
63 | "search = TavilySearchResults(max_results=3)"
64 | ]
65 | },
66 | {
67 | "cell_type": "code",
68 | "execution_count": 5,
69 | "metadata": {},
70 | "outputs": [],
71 | "source": [
72 | "tools = [search]"
73 | ]
74 | },
75 | {
76 | "cell_type": "code",
77 | "execution_count": 6,
78 | "metadata": {},
79 | "outputs": [
80 | {
81 | "ename": "NameError",
82 | "evalue": "name 'agent_executor' is not defined",
83 | "output_type": "error",
84 | "traceback": [
85 | "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
86 | "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
87 | "Cell \u001b[0;32mIn[6], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43magent_executor\u001b[49m\n",
88 | "\u001b[0;31mNameError\u001b[0m: name 'agent_executor' is not defined"
89 | ]
90 | }
91 | ],
92 | "source": [
93 | "agent_executor"
94 | ]
95 | },
96 | {
97 | "cell_type": "code",
98 | "execution_count": 7,
99 | "metadata": {},
100 | "outputs": [],
101 | "source": [
102 | "agent_executor = create_react_agent(llm, tools, checkpointer=memory)"
103 | ]
104 | },
105 | {
106 | "cell_type": "code",
107 | "execution_count": 8,
108 | "metadata": {},
109 | "outputs": [
110 | {
111 | "name": "stdout",
112 | "output_type": "stream",
113 | "text": [
114 | "{'agent': {'messages': [AIMessage(content=\"Hi Bob! It's great to meet you. How's life in San Francisco?\", additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 17, 'prompt_tokens': 88, 'total_tokens': 105, 'completion_tokens_details': {'audio_tokens': 0, 'reasoning_tokens': 0, 'accepted_prediction_tokens': 0, 'rejected_prediction_tokens': 0}, 'prompt_tokens_details': {'audio_tokens': 0, 'cached_tokens': 0}}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_bba3c8e70b', 'finish_reason': 'stop', 'logprobs': None}, id='run-af9de20a-f905-4972-bfc5-5ec5b707ff4b-0', usage_metadata={'input_tokens': 88, 'output_tokens': 17, 'total_tokens': 105, 'input_token_details': {'audio': 0, 'cache_read': 0}, 'output_token_details': {'audio': 0, 'reasoning': 0}})]}}\n",
115 | "----\n",
116 | "{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_UFdwE6U5TjsJpVKI40kHzmHu', 'function': {'arguments': '{\"query\":\"San Francisco weather\"}', 'name': 'tavily_search_results_json'}, 'type': 'function'}], 'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 20, 'prompt_tokens': 120, 'total_tokens': 140, 'completion_tokens_details': {'audio_tokens': 0, 'reasoning_tokens': 0, 'accepted_prediction_tokens': 0, 'rejected_prediction_tokens': 0}, 'prompt_tokens_details': {'audio_tokens': 0, 'cached_tokens': 0}}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_bba3c8e70b', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-809ac445-45e1-45b7-9aec-33bbcf059ec9-0', tool_calls=[{'name': 'tavily_search_results_json', 'args': {'query': 'San Francisco weather'}, 'id': 'call_UFdwE6U5TjsJpVKI40kHzmHu', 'type': 'tool_call'}], usage_metadata={'input_tokens': 120, 'output_tokens': 20, 'total_tokens': 140, 'input_token_details': {'audio': 0, 'cache_read': 0}, 'output_token_details': {'audio': 0, 'reasoning': 0}})]}}\n",
117 | "----\n",
118 | "{'tools': {'messages': [ToolMessage(content='[{\"url\": \"https://www.weatherapi.com/\", \"content\": \"{\\'location\\': {\\'name\\': \\'San Francisco\\', \\'region\\': \\'California\\', \\'country\\': \\'United States of America\\', \\'lat\\': 37.775, \\'lon\\': -122.4183, \\'tz_id\\': \\'America/Los_Angeles\\', \\'localtime_epoch\\': 1733951474, \\'localtime\\': \\'2024-12-11 13:11\\'}, \\'current\\': {\\'last_updated_epoch\\': 1733950800, \\'last_updated\\': \\'2024-12-11 13:00\\', \\'temp_c\\': 12.2, \\'temp_f\\': 54.0, \\'is_day\\': 1, \\'condition\\': {\\'text\\': \\'Partly cloudy\\', \\'icon\\': \\'//cdn.weatherapi.com/weather/64x64/day/116.png\\', \\'code\\': 1003}, \\'wind_mph\\': 4.3, \\'wind_kph\\': 6.8, \\'wind_degree\\': 43, \\'wind_dir\\': \\'NE\\', \\'pressure_mb\\': 1021.0, \\'pressure_in\\': 30.15, \\'precip_mm\\': 0.0, \\'precip_in\\': 0.0, \\'humidity\\': 66, \\'cloud\\': 75, \\'feelslike_c\\': 11.8, \\'feelslike_f\\': 53.3, \\'windchill_c\\': 7.7, \\'windchill_f\\': 45.8, \\'heatindex_c\\': 8.8, \\'heatindex_f\\': 47.9, \\'dewpoint_c\\': 6.7, \\'dewpoint_f\\': 44.0, \\'vis_km\\': 16.0, \\'vis_miles\\': 9.0, \\'uv\\': 1.8, \\'gust_mph\\': 5.7, \\'gust_kph\\': 9.2}}\"}, {\"url\": \"https://www.msn.com/en-us/weather/topstories/december-11-2024-san-francisco-bay-area-weather-forecast/vi-AA1vFB5g\", \"content\": \"KRON San Francisco. December 11, 2024 San Francisco Bay Area weather forecast. Posted: December 11, 2024 | Last updated: December 11, 2024. KRON4 Meteorologist John Shrable has the latest updates\"}, {\"url\": \"https://weatherspark.com/h/m/557/2024/11/Historical-Weather-in-November-2024-in-San-Francisco-California-United-States\", \"content\": \"This report shows the past weather for San Francisco, providing a weather history for November 2024. It features all historical weather data series we have available, including the San Francisco temperature history for November 2024. San Francisco Temperature History November 2024 Hourly Temperature in November 2024 in San Francisco Cloud Cover in November 2024 in San Francisco Daily Precipitation in November 2024 in San Francisco Observed Weather in November 2024 in San Francisco Hours of Daylight and Twilight in November 2024 in San Francisco Humidity Comfort Levels in November 2024 in San Francisco Wind Speed in November 2024 in San Francisco Hourly Wind Speed in November 2024 in San Francisco The details of the data sources used for this report can be found on the San Francisco International Airport page.\"}]', name='tavily_search_results_json', id='22ffeb9f-caf6-4d30-ba73-19add990aa4f', tool_call_id='call_UFdwE6U5TjsJpVKI40kHzmHu', artifact={'query': 'San Francisco weather', 'follow_up_questions': None, 'answer': None, 'images': [], 'results': [{'title': 'Weather in San Francisco', 'url': 'https://www.weatherapi.com/', 'content': \"{'location': {'name': 'San Francisco', 'region': 'California', 'country': 'United States of America', 'lat': 37.775, 'lon': -122.4183, 'tz_id': 'America/Los_Angeles', 'localtime_epoch': 1733951474, 'localtime': '2024-12-11 13:11'}, 'current': {'last_updated_epoch': 1733950800, 'last_updated': '2024-12-11 13:00', 'temp_c': 12.2, 'temp_f': 54.0, 'is_day': 1, 'condition': {'text': 'Partly cloudy', 'icon': '//cdn.weatherapi.com/weather/64x64/day/116.png', 'code': 1003}, 'wind_mph': 4.3, 'wind_kph': 6.8, 'wind_degree': 43, 'wind_dir': 'NE', 'pressure_mb': 1021.0, 'pressure_in': 30.15, 'precip_mm': 0.0, 'precip_in': 0.0, 'humidity': 66, 'cloud': 75, 'feelslike_c': 11.8, 'feelslike_f': 53.3, 'windchill_c': 7.7, 'windchill_f': 45.8, 'heatindex_c': 8.8, 'heatindex_f': 47.9, 'dewpoint_c': 6.7, 'dewpoint_f': 44.0, 'vis_km': 16.0, 'vis_miles': 9.0, 'uv': 1.8, 'gust_mph': 5.7, 'gust_kph': 9.2}}\", 'score': 0.9992536, 'raw_content': None}, {'title': 'December 11, 2024 San Francisco Bay Area weather forecast - MSN', 'url': 'https://www.msn.com/en-us/weather/topstories/december-11-2024-san-francisco-bay-area-weather-forecast/vi-AA1vFB5g', 'content': 'KRON San Francisco. December 11, 2024 San Francisco Bay Area weather forecast. Posted: December 11, 2024 | Last updated: December 11, 2024. KRON4 Meteorologist John Shrable has the latest updates', 'score': 0.9957684, 'raw_content': None}, {'title': 'November 2024 Weather History in San Francisco', 'url': 'https://weatherspark.com/h/m/557/2024/11/Historical-Weather-in-November-2024-in-San-Francisco-California-United-States', 'content': 'This report shows the past weather for San Francisco, providing a weather history for November 2024. It features all historical weather data series we have available, including the San Francisco temperature history for November 2024. San Francisco Temperature History November 2024 Hourly Temperature in November 2024 in San Francisco Cloud Cover in November 2024 in San Francisco Daily Precipitation in November 2024 in San Francisco Observed Weather in November 2024 in San Francisco Hours of Daylight and Twilight in November 2024 in San Francisco Humidity Comfort Levels in November 2024 in San Francisco Wind Speed in November 2024 in San Francisco Hourly Wind Speed in November 2024 in San Francisco The details of the data sources used for this report can be found on the San Francisco International Airport page.', 'score': 0.99021614, 'raw_content': None}], 'response_time': 2.74})]}}\n",
119 | "----\n",
120 | "{'agent': {'messages': [AIMessage(content='The current weather in San Francisco is partly cloudy with a temperature of about 54°F (12.2°C). Here are some additional details:\\n\\n- **Humidity**: 66%\\n- **Wind**: Northeastern at 4.3 mph (6.8 kph)\\n- **Visibility**: 16 km (9 miles)\\n- **Pressure**: 30.15 inHg (1021.0 mb)\\n\\nIt feels slightly cooler at around 53.3°F (11.8°C) due to the wind chill.\\n\\nIf you want more detailed or updated information, you can check out [WeatherAPI](https://www.weatherapi.com/) or local news outlets.', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 140, 'prompt_tokens': 867, 'total_tokens': 1007, 'completion_tokens_details': {'audio_tokens': 0, 'reasoning_tokens': 0, 'accepted_prediction_tokens': 0, 'rejected_prediction_tokens': 0}, 'prompt_tokens_details': {'audio_tokens': 0, 'cached_tokens': 0}}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_bba3c8e70b', 'finish_reason': 'stop', 'logprobs': None}, id='run-7c5358f7-2be9-4753-8375-1bb0470c58c5-0', usage_metadata={'input_tokens': 867, 'output_tokens': 140, 'total_tokens': 1007, 'input_token_details': {'audio': 0, 'cache_read': 0}, 'output_token_details': {'audio': 0, 'reasoning': 0}})]}}\n",
121 | "----\n"
122 | ]
123 | }
124 | ],
125 | "source": [
126 | "# Use the agent\n",
127 | "config = {\"configurable\": {\"thread_id\": \"abc123\"}}\n",
128 | "for chunk in agent_executor.stream(\n",
129 | " {\"messages\": [HumanMessage(content=\"hi im bob! and i live in sf\")]}, config\n",
130 | "):\n",
131 | " print(chunk)\n",
132 | " print(\"----\")\n",
133 | "\n",
134 | "for chunk in agent_executor.stream(\n",
135 | " {\"messages\": [HumanMessage(content=\"whats the weather where I live?\")]}, config\n",
136 | "):\n",
137 | " print(chunk)\n",
138 | " print(\"----\")"
139 | ]
140 | },
141 | {
142 | "cell_type": "code",
143 | "execution_count": null,
144 | "metadata": {},
145 | "outputs": [],
146 | "source": []
147 | }
148 | ],
149 | "metadata": {
150 | "kernelspec": {
151 | "display_name": "oreilly-langchain",
152 | "language": "python",
153 | "name": "oreilly-langchain"
154 | },
155 | "language_info": {
156 | "codemirror_mode": {
157 | "name": "ipython",
158 | "version": 3
159 | },
160 | "file_extension": ".py",
161 | "mimetype": "text/x-python",
162 | "name": "python",
163 | "nbconvert_exporter": "python",
164 | "pygments_lexer": "ipython3",
165 | "version": "3.11.9"
166 | }
167 | },
168 | "nbformat": 4,
169 | "nbformat_minor": 2
170 | }
171 |
--------------------------------------------------------------------------------
/notebooks/6.0-live-demo-chat-with-langchain-docs-urls.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "urls = ['https://python.langchain.com/docs/concepts/#__docusaurus_skipToContent_fallback', \n",
10 | " 'https://python.langchain.com/', 'https://python.langchain.com/docs/integrations/providers/', \n",
11 | " 'https://python.langchain.com/api_reference/', 'https://python.langchain.com/docs/concepts/#', \n",
12 | " 'https://python.langchain.com/docs/contributing/', 'https://python.langchain.com/docs/people/', \n",
13 | " 'https://python.langchain.com/docs/troubleshooting/errors/', 'https://docs.smith.langchain.com/', \n",
14 | " 'https://langchain-ai.github.io/langgraph/', 'https://smith.langchain.com/hub', 'https://js.langchain.com/',\n",
15 | " 'https://python.langchain.com/docs/introduction/', 'https://python.langchain.com/v0.2/docs/introduction',\n",
16 | " 'https://python.langchain.com/v0.1/docs/get_started/introduction', 'https://chat.langchain.com/', \n",
17 | " 'https://github.com/langchain-ai/langchain', 'https://python.langchain.com/docs/tutorials/', \n",
18 | " 'https://python.langchain.com/docs/tutorials/graph/', 'https://python.langchain.com/docs/tutorials/llm_chain/',\n",
19 | " 'https://python.langchain.com/docs/tutorials/chatbot/', 'https://python.langchain.com/docs/tutorials/qa_chat_history/', \n",
20 | " 'https://python.langchain.com/docs/tutorials/extraction/', 'https://python.langchain.com/docs/tutorials/agents/', \n",
21 | " 'https://python.langchain.com/docs/tutorials/classification/', 'https://python.langchain.com/docs/tutorials/rag/',\n",
22 | " 'https://python.langchain.com/docs/tutorials/retrievers/', 'https://python.langchain.com/docs/tutorials/sql_qa/', \n",
23 | " 'https://python.langchain.com/docs/tutorials/summarization/', 'https://python.langchain.com/docs/how_to/', \n",
24 | " 'https://python.langchain.com/docs/how_to/tools_chain/', 'https://python.langchain.com/docs/how_to/vectorstore_retriever/',\n",
25 | " 'https://python.langchain.com/docs/how_to/chatbots_memory/', 'https://python.langchain.com/docs/how_to/example_selectors/', \n",
26 | " 'https://python.langchain.com/docs/how_to/graph_semantic/', 'https://python.langchain.com/docs/how_to/parallel/', \n",
27 | " 'https://python.langchain.com/docs/how_to/chat_streaming/', 'https://python.langchain.com/docs/how_to/binding/', \n",
28 | " 'https://python.langchain.com/docs/how_to/chatbots_retrieval/', 'https://python.langchain.com/docs/how_to/few_shot_examples_chat/', \n",
29 | " 'https://python.langchain.com/docs/how_to/function_calling/', 'https://python.langchain.com/docs/how_to/installation/', \n",
30 | " 'https://python.langchain.com/docs/how_to/query_few_shot/', 'https://python.langchain.com/docs/how_to/few_shot_examples/', \n",
31 | " 'https://python.langchain.com/docs/how_to/functions/', 'https://python.langchain.com/docs/how_to/output_parser_structured/', \n",
32 | " 'https://python.langchain.com/docs/how_to/query_no_queries/', 'https://python.langchain.com/docs/how_to/routing/', 'https://python.langchain.com/docs/how_to/structured_output/', 'https://python.langchain.com/docs/how_to/summarize_map_reduce/', 'https://python.langchain.com/docs/how_to/summarize_refine/', 'https://python.langchain.com/docs/how_to/summarize_stuff/', 'https://python.langchain.com/docs/how_to/toolkits/', 'https://python.langchain.com/docs/how_to/tools_prompting/', 'https://python.langchain.com/docs/how_to/agent_executor/', 'https://python.langchain.com/docs/how_to/graph_constructing/', 'https://python.langchain.com/docs/how_to/prompts_partial/', 'https://python.langchain.com/docs/how_to/query_multiple_queries/', 'https://python.langchain.com/docs/how_to/tools_builtin/', 'https://python.langchain.com/docs/how_to/passthrough/',\n",
33 | " 'https://python.langchain.com/docs/how_to/prompts_composition/', 'https://python.langchain.com/docs/how_to/query_multiple_retrievers/', 'https://python.langchain.com/docs/how_to/assign/', 'https://python.langchain.com/docs/how_to/query_constructing_filters/', 'https://python.langchain.com/docs/how_to/configure/', 'https://python.langchain.com/docs/how_to/query_high_cardinality/', 'https://python.langchain.com/docs/how_to/document_loader_custom/', 'https://python.langchain.com/docs/how_to/HTML_header_metadata_splitter/', 'https://python.langchain.com/docs/how_to/HTML_section_aware_splitter/', 'https://python.langchain.com/docs/how_to/MultiQueryRetriever/', 'https://python.langchain.com/docs/how_to/add_scores_retriever/', 'https://python.langchain.com/docs/how_to/caching_embeddings/', 'https://python.langchain.com/docs/how_to/callbacks_async/', 'https://python.langchain.com/docs/how_to/callbacks_attach/', 'https://python.langchain.com/docs/how_to/callbacks_constructor/', 'https://python.langchain.com/docs/how_to/callbacks_custom_events/', 'https://python.langchain.com/docs/how_to/callbacks_runtime/', 'https://python.langchain.com/docs/how_to/character_text_splitter/', 'https://python.langchain.com/docs/how_to/chat_model_caching/', 'https://python.langchain.com/docs/how_to/chat_model_rate_limiting/', 'https://python.langchain.com/docs/how_to/chat_models_universal_init/', 'https://python.langchain.com/docs/how_to/chat_token_usage_tracking/', 'https://python.langchain.com/docs/how_to/chatbots_tools/', 'https://python.langchain.com/docs/how_to/code_splitter/', 'https://python.langchain.com/docs/how_to/contextual_compression/', 'https://python.langchain.com/docs/how_to/convert_runnable_to_tool/', 'https://python.langchain.com/docs/how_to/custom_callbacks/',\n",
34 | " 'https://python.langchain.com/docs/how_to/custom_chat_model/', 'https://python.langchain.com/docs/how_to/custom_llm/', 'https://python.langchain.com/docs/how_to/custom_retriever/', 'https://python.langchain.com/docs/how_to/custom_tools/', 'https://python.langchain.com/docs/how_to/debugging/', 'https://python.langchain.com/docs/how_to/document_loader_csv/', 'https://python.langchain.com/docs/how_to/document_loader_directory/', 'https://python.langchain.com/docs/how_to/document_loader_html/', 'https://python.langchain.com/docs/how_to/document_loader_json/', 'https://python.langchain.com/docs/how_to/document_loader_markdown/', 'https://python.langchain.com/docs/how_to/document_loader_office_file/', 'https://python.langchain.com/docs/how_to/document_loader_pdf/']"
35 | ]
36 | }
37 | ],
38 | "metadata": {
39 | "language_info": {
40 | "name": "python"
41 | }
42 | },
43 | "nbformat": 4,
44 | "nbformat_minor": 2
45 | }
46 |
--------------------------------------------------------------------------------
/notebooks/README.md:
--------------------------------------------------------------------------------
1 | # O'Reilly Live Trainining - Getting Started with Langchain
2 |
3 | Link to the event: https://learning.oreilly.com/live-events/getting-started-with-langchain/0636920098586/
4 |
5 | ## Setup
6 |
7 | **Conda**
8 |
9 | - Install [anaconda](https://www.anaconda.com/download)
10 | - Create an environment: `conda create -n oreilly-langchain`
11 | - Activate your environment with: `conda activate oreilly-langchain`
12 | - Install requirements with:
13 | - `pip install -r ./notebooks/requirements.txt`
14 |
15 | - Setup your openai [API key](https://platform.openai.com/)
16 |
17 | **Pip**
18 |
19 |
20 | 1. **Create a Virtual Environment:**
21 | Navigate to your project directory. If using Python 3's built-in `venv`:
22 | ```bash
23 | python -m venv oreilly_env
24 | ```
25 | If you're using `virtualenv`:
26 | ```bash
27 | virtualenv oreilly_env
28 | ```
29 |
30 | 2. **Activate the Virtual Environment:**
31 | - **On Windows:**
32 | ```bash
33 | .\oreilly_env\Scripts\activate
34 | ```
35 | - **On macOS and Linux:**
36 | ```bash
37 | source oreilly_env/bin/activate
38 | ```
39 |
40 | 3. **Install Dependencies from `requirements.txt`:**
41 | ```bash
42 | pip install python-dotenv
43 | pip install -r requirements.txt
44 | ```
45 |
46 | 4. Setup your openai [API key](https://platform.openai.com/)
47 |
48 | Remember to deactivate the virtual environment once you're done by simply typing:
49 | ```bash
50 | deactivate
51 | ```
52 |
53 | ## Setup your .env file
54 |
55 | - Change the `.env.example` file to `.env` and add your OpenAI API key.
56 |
57 | ## To use this Environment with Jupyter Notebooks:
58 |
59 | ```python3 -m ipykernel install --user --name=oreilly-langchain```
--------------------------------------------------------------------------------
/notebooks/assets-resources/Augmenting-Human-Intellect_ A-Conceptual-Framework-1962-Doug Engelbart.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/EnkrateiaLucca/oreilly_live_training_getting_started_with_langchain/9232c291a850aa62586ceadb8d7ebd453f2fa593/notebooks/assets-resources/Augmenting-Human-Intellect_ A-Conceptual-Framework-1962-Doug Engelbart.pdf
--------------------------------------------------------------------------------
/notebooks/assets-resources/agent-loop-langgraph-version.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/EnkrateiaLucca/oreilly_live_training_getting_started_with_langchain/9232c291a850aa62586ceadb8d7ebd453f2fa593/notebooks/assets-resources/agent-loop-langgraph-version.png
--------------------------------------------------------------------------------
/notebooks/assets-resources/agent-loop.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/EnkrateiaLucca/oreilly_live_training_getting_started_with_langchain/9232c291a850aa62586ceadb8d7ebd453f2fa593/notebooks/assets-resources/agent-loop.png
--------------------------------------------------------------------------------
/notebooks/assets-resources/agent_loop.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/notebooks/assets-resources/ai.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/EnkrateiaLucca/oreilly_live_training_getting_started_with_langchain/9232c291a850aa62586ceadb8d7ebd453f2fa593/notebooks/assets-resources/ai.png
--------------------------------------------------------------------------------
/notebooks/assets-resources/attention-paper.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/EnkrateiaLucca/oreilly_live_training_getting_started_with_langchain/9232c291a850aa62586ceadb8d7ebd453f2fa593/notebooks/assets-resources/attention-paper.pdf
--------------------------------------------------------------------------------
/notebooks/assets-resources/attributes_runnable.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/EnkrateiaLucca/oreilly_live_training_getting_started_with_langchain/9232c291a850aa62586ceadb8d7ebd453f2fa593/notebooks/assets-resources/attributes_runnable.png
--------------------------------------------------------------------------------
/notebooks/assets-resources/basic-agent-diagram.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/EnkrateiaLucca/oreilly_live_training_getting_started_with_langchain/9232c291a850aa62586ceadb8d7ebd453f2fa593/notebooks/assets-resources/basic-agent-diagram.png
--------------------------------------------------------------------------------
/notebooks/assets-resources/chatgpt-demo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/EnkrateiaLucca/oreilly_live_training_getting_started_with_langchain/9232c291a850aa62586ceadb8d7ebd453f2fa593/notebooks/assets-resources/chatgpt-demo.png
--------------------------------------------------------------------------------
/notebooks/assets-resources/components_input_output.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/EnkrateiaLucca/oreilly_live_training_getting_started_with_langchain/9232c291a850aa62586ceadb8d7ebd453f2fa593/notebooks/assets-resources/components_input_output.png
--------------------------------------------------------------------------------
/notebooks/assets-resources/components_input_type_output_type.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/EnkrateiaLucca/oreilly_live_training_getting_started_with_langchain/9232c291a850aa62586ceadb8d7ebd453f2fa593/notebooks/assets-resources/components_input_type_output_type.png
--------------------------------------------------------------------------------
/notebooks/assets-resources/diagram_langchain_agent_find_source_code.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/EnkrateiaLucca/oreilly_live_training_getting_started_with_langchain/9232c291a850aa62586ceadb8d7ebd453f2fa593/notebooks/assets-resources/diagram_langchain_agent_find_source_code.png
--------------------------------------------------------------------------------
/notebooks/assets-resources/embeddings-similarity.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/EnkrateiaLucca/oreilly_live_training_getting_started_with_langchain/9232c291a850aa62586ceadb8d7ebd453f2fa593/notebooks/assets-resources/embeddings-similarity.png
--------------------------------------------------------------------------------
/notebooks/assets-resources/embeddings-similarity2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/EnkrateiaLucca/oreilly_live_training_getting_started_with_langchain/9232c291a850aa62586ceadb8d7ebd453f2fa593/notebooks/assets-resources/embeddings-similarity2.png
--------------------------------------------------------------------------------
/notebooks/assets-resources/embeddings.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/EnkrateiaLucca/oreilly_live_training_getting_started_with_langchain/9232c291a850aa62586ceadb8d7ebd453f2fa593/notebooks/assets-resources/embeddings.png
--------------------------------------------------------------------------------
/notebooks/assets-resources/google-icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/EnkrateiaLucca/oreilly_live_training_getting_started_with_langchain/9232c291a850aa62586ceadb8d7ebd453f2fa593/notebooks/assets-resources/google-icon.png
--------------------------------------------------------------------------------
/notebooks/assets-resources/google-translate.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/EnkrateiaLucca/oreilly_live_training_getting_started_with_langchain/9232c291a850aa62586ceadb8d7ebd453f2fa593/notebooks/assets-resources/google-translate.png
--------------------------------------------------------------------------------
/notebooks/assets-resources/images_tbn.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/EnkrateiaLucca/oreilly_live_training_getting_started_with_langchain/9232c291a850aa62586ceadb8d7ebd453f2fa593/notebooks/assets-resources/images_tbn.jpg
--------------------------------------------------------------------------------
/notebooks/assets-resources/langchain-project-structure.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/EnkrateiaLucca/oreilly_live_training_getting_started_with_langchain/9232c291a850aa62586ceadb8d7ebd453f2fa593/notebooks/assets-resources/langchain-project-structure.png
--------------------------------------------------------------------------------
/notebooks/assets-resources/langchain-toolkits.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/EnkrateiaLucca/oreilly_live_training_getting_started_with_langchain/9232c291a850aa62586ceadb8d7ebd453f2fa593/notebooks/assets-resources/langchain-toolkits.png
--------------------------------------------------------------------------------
/notebooks/assets-resources/langgraph-components.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/EnkrateiaLucca/oreilly_live_training_getting_started_with_langchain/9232c291a850aa62586ceadb8d7ebd453f2fa593/notebooks/assets-resources/langgraph-components.png
--------------------------------------------------------------------------------
/notebooks/assets-resources/lcel-image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/EnkrateiaLucca/oreilly_live_training_getting_started_with_langchain/9232c291a850aa62586ceadb8d7ebd453f2fa593/notebooks/assets-resources/lcel-image.png
--------------------------------------------------------------------------------
/notebooks/assets-resources/llm_paper_know_dont_know.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/EnkrateiaLucca/oreilly_live_training_getting_started_with_langchain/9232c291a850aa62586ceadb8d7ebd453f2fa593/notebooks/assets-resources/llm_paper_know_dont_know.pdf
--------------------------------------------------------------------------------
/notebooks/assets-resources/llm_predicts_pancakes.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/EnkrateiaLucca/oreilly_live_training_getting_started_with_langchain/9232c291a850aa62586ceadb8d7ebd453f2fa593/notebooks/assets-resources/llm_predicts_pancakes.png
--------------------------------------------------------------------------------
/notebooks/assets-resources/pancake_maker.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/EnkrateiaLucca/oreilly_live_training_getting_started_with_langchain/9232c291a850aa62586ceadb8d7ebd453f2fa593/notebooks/assets-resources/pancake_maker.png
--------------------------------------------------------------------------------
/notebooks/assets-resources/paper-llm-components.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/EnkrateiaLucca/oreilly_live_training_getting_started_with_langchain/9232c291a850aa62586ceadb8d7ebd453f2fa593/notebooks/assets-resources/paper-llm-components.pdf
--------------------------------------------------------------------------------
/notebooks/assets-resources/pydantic.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/EnkrateiaLucca/oreilly_live_training_getting_started_with_langchain/9232c291a850aa62586ceadb8d7ebd453f2fa593/notebooks/assets-resources/pydantic.png
--------------------------------------------------------------------------------
/notebooks/assets-resources/rag-docs.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/EnkrateiaLucca/oreilly_live_training_getting_started_with_langchain/9232c291a850aa62586ceadb8d7ebd453f2fa593/notebooks/assets-resources/rag-docs.png
--------------------------------------------------------------------------------
/notebooks/assets-resources/rag-langchain-retrieval.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/EnkrateiaLucca/oreilly_live_training_getting_started_with_langchain/9232c291a850aa62586ceadb8d7ebd453f2fa593/notebooks/assets-resources/rag-langchain-retrieval.png
--------------------------------------------------------------------------------
/notebooks/assets-resources/rag-langchain.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/EnkrateiaLucca/oreilly_live_training_getting_started_with_langchain/9232c291a850aa62586ceadb8d7ebd453f2fa593/notebooks/assets-resources/rag-langchain.png
--------------------------------------------------------------------------------
/notebooks/assets-resources/superheroes.csv:
--------------------------------------------------------------------------------
1 | Superhero Name,Superpower,Power Level,Catchphrase
2 | Captain Thunder,Bolt Manipulation,90,"Feel the power of the storm!"
3 | Silver Falcon,Flight and Agility,85,"Soar high, fearlessly!"
4 | Mystic Shadow,Invisibility and Illusions,78,"Disappear into the darkness!"
5 | Blaze Runner,Pyrokinesis,88,"Burn bright and fierce!"
6 | Electra-Wave,Electric Manipulation,82,"Unleash the electric waves!"
7 | Crimson Cyclone,Super Speed,91,"Blazing fast and unstoppable!"
8 | Aqua Fury,Hydrokinesis,80,"Ride the waves of power!"
9 | Lunar Guardian,Lunar Manipulation,77,"Embrace the moon's might!"
10 | Steel Titan,Super Strength and Durability,95,"Indestructible force of nature!"
11 | Nightblade,Night Vision and Stealth,84,"Strike from the shadows!"
12 | Frostbite,Ice Manipulation,87,"Chill your bones!"
13 | Starburst,Energy Projection,83,"Ignite the cosmos!"
14 | Sonic Dash,Sound Manipulation,86,"Hear the sound of victory!"
15 | Nova Surge,Energy Absorption and Redirection,89,"Harness the energy within!"
16 | Shadowcat,Intangibility,76,"Phase through the impossible!"
17 | Neon Spark,Light Manipulation,79,"Glow and dazzle!"
18 | Phoenix Flame,Resurrection and Healing,94,"Rise from the ashes!"
19 | Crystal Guardian,Crystallokinesis,81,"Shatter your illusions!"
20 | Earthshaker,Geokinesis,92,"Feel the earth tremble beneath you!"
21 | Silver Seraph,Telekinesis,85,"Move objects with your mind!"
22 | Stormbringer,Weather Manipulation,93,"Unleash the storm's fury!"
23 | Scarlet Siren,Mind Control,88,"Obey my commands!"
24 | Rift Walker,Dimensional Travel,90,"Step between worlds!"
25 | Chrono-Wizard,Time Manipulation,91,"Master of time's flow!"
26 | Blazing Comet,Fireball Projection,82,"Burn brighter than a comet!"
27 | Phantom Wisp,Invisibility,75,"Disappear like a wisp in the wind!"
28 | Luminous Knight,Luminokinesis,78,"Illuminate the darkest night!"
29 | Gravity Shift,Gravity Manipulation,89,"Bend gravity to your will!"
30 | Solar Empress,Solar Energy Absorption,87,"Harness the power of the sun!"
31 | Twilight Specter,Shadow Manipulation,80,"Dance with shadows!"
32 | Thunderstrike,Lightning Control,91,"Electrify the battlefield!"
33 | Nebula Weaver,Reality Warping,96,"Shape the fabric of reality!"
34 | Frostglide,Cryo-Teleportation,85,"Freeze and glide through space!"
35 | Zenith Blaze,Heat Vision,83,"Gaze into the flames of justice!"
36 | Astral Drifter,Astral Projection,79,"Roam the astral plane!"
37 | Blade Dancer,Swordsmanship and Agility,88,"Dance with deadly grace!"
38 | Azure Tempest,Water Manipulation,82,"Unleash the tempest's rage!"
39 | Ghost Sentinel,Intangibility and Invisibility,76,"Haunt your nightmares!"
40 | Ember Fox,Pyrokinetic Fox Shapeshifter,89,"Outfox your enemies with fire!"
41 | Psy-Bender,Telepathy,84,"Read minds like an open book!"
42 | Celestial Sphinx,Cosmic Awareness,93,"Unravel the secrets of the universe!"
43 | Dragonfist,Dragon Summoning and Martial Arts,92,"Unleash the dragon's fury!"
44 | Solar Flare,Solar Energy Projection,85,"Feel the burning light!"
45 | Night Lotus,Darkness Manipulation,78,"Bloom in the shadows!"
46 | Quantum Strider,Quantum Manipulation,90,"Walk the edge of reality!"
47 | Ironclad,Invulnerability and Enhanced Strength,95,"Invincible and mighty!"
48 | Shadow Stalker,Shadow Shifting,81,"Disappear and reappear at will!"
49 | Aqua Archer,Water Arrow Projection,80,"Shoot through water's flow!"
50 | Crystal Gazer,Crystal Ball Scrying,77,"See what the future holds!"
51 |
--------------------------------------------------------------------------------
/notebooks/assets-resources/tagging.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/EnkrateiaLucca/oreilly_live_training_getting_started_with_langchain/9232c291a850aa62586ceadb8d7ebd453f2fa593/notebooks/assets-resources/tagging.png
--------------------------------------------------------------------------------
/notebooks/assets-resources/vectordb.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/EnkrateiaLucca/oreilly_live_training_getting_started_with_langchain/9232c291a850aa62586ceadb8d7ebd453f2fa593/notebooks/assets-resources/vectordb.png
--------------------------------------------------------------------------------
/notebooks/dev-notebooks/2023-12-13-14-12-45.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/EnkrateiaLucca/oreilly_live_training_getting_started_with_langchain/9232c291a850aa62586ceadb8d7ebd453f2fa593/notebooks/dev-notebooks/2023-12-13-14-12-45.png
--------------------------------------------------------------------------------
/notebooks/dev-notebooks/2023-12-13-14-25-37.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/EnkrateiaLucca/oreilly_live_training_getting_started_with_langchain/9232c291a850aa62586ceadb8d7ebd453f2fa593/notebooks/dev-notebooks/2023-12-13-14-25-37.png
--------------------------------------------------------------------------------
/notebooks/dev-notebooks/5.1-demo-playwright-scrape-articles.py:
--------------------------------------------------------------------------------
1 | # requires installing playwright!
2 | import pprint
3 | from langchain.text_splitter import RecursiveCharacterTextSplitter
4 | from langchain.document_loaders import AsyncChromiumLoader
5 | from langchain.document_transformers import BeautifulSoupTransformer
6 | from langchain.chains import create_extraction_chain
7 | from langchain_openai import ChatOpenAI
8 |
9 | def extract(content: str, schema: dict):
10 | return create_extraction_chain(schema=schema, llm=llm).run(content)
11 |
12 | def scrape_with_playwright(urls, schema):
13 | loader = AsyncChromiumLoader(urls)
14 | docs = loader.load()
15 | bs_transformer = BeautifulSoupTransformer()
16 | docs_transformed = bs_transformer.transform_documents(docs, tags_to_extract=["span"])
17 | print("Extracting content with LLM")
18 |
19 | # Grab the first 1000 tokens of the site
20 | splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(chunk_size=1000, chunk_overlap=0)
21 | splits = splitter.split_documents(docs_transformed)
22 |
23 | # Process the first split
24 | extracted_content = extract(schema=schema, content=splits[0].page_content)
25 | pprint.pprint(extracted_content)
26 | return extracted_content
27 |
28 | schema = {
29 | "properties": {
30 | "news_article_title": {"type": "string"},
31 | "news_article_summary": {"type": "string"},
32 | },
33 | "required": ["news_article_title", "news_article_summary"],
34 | }
35 |
36 | llm = ChatOpenAI(temperature=0, model="gpt-4o-mini")
37 |
38 | urls = ["https://www.wsj.com"]
39 |
40 | extracted_content = scrape_with_playwright(urls, schema=schema)
41 |
42 | print("Extracted content:")
43 | print(extracted_content)
44 |
--------------------------------------------------------------------------------
/notebooks/dev-notebooks/6.1-langchain-app-dev-structure.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "# uncomment and run below:\n",
10 | "%pip install langchain\n",
11 | "%pip install langchain-openai\n",
12 | "%pip install -U langsmith"
13 | ]
14 | },
15 | {
16 | "cell_type": "markdown",
17 | "metadata": {},
18 | "source": [
19 | "1. Environment setup\n",
20 | "\n",
21 | "2. Prototype in with jupyter notebook + langsmith for inspection\n",
22 | "\n",
23 | "3. Set up the chain in the project app structure\n",
24 | "\n",
25 | "4. Set up server\n",
26 | "\n",
27 | "5. Deploy locally\n",
28 | "\n",
29 | "6. Test it on the playground\n",
30 | "\n",
31 | "7. Call it as an API"
32 | ]
33 | },
34 | {
35 | "cell_type": "markdown",
36 | "metadata": {},
37 | "source": [
38 | "# 1. Environment setup\n",
39 | "\n",
40 | "```\n",
41 | "conda create -n rag-pdf-app python=3.11\n",
42 | "conda activate rag-pdf-app\n",
43 | "pip install -U \"langchain-cli [serve]\" \"langserve [all]\"\n",
44 | "langchain app new .\n",
45 | "poetry add langchain\n",
46 | "poetry add langchain-community\n",
47 | "poetry add langchain-openai\n",
48 | "poetry add chromadb\n",
49 | "poetry add pypdf\n",
50 | "poetry add tiktoken\n",
51 | "poetry add openai\n",
52 | "poetry add jupyter\n",
53 | "poetry add python-dotenv\n",
54 | "```\n",
55 | "\n",
56 | "Below is optional depending on your development setup:\n",
57 | "\n",
58 | "```\n",
59 | "poetry run jupyter notebook\n",
60 | "```"
61 | ]
62 | },
63 | {
64 | "cell_type": "markdown",
65 | "metadata": {},
66 | "source": [
67 | "# 2. Prototype in with jupyter notebook + langsmith for inspection"
68 | ]
69 | },
70 | {
71 | "cell_type": "code",
72 | "execution_count": 1,
73 | "metadata": {},
74 | "outputs": [],
75 | "source": [
76 | "import os\n",
77 | "import getpass\n",
78 | "\n",
79 | "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
80 | "os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass()"
81 | ]
82 | },
83 | {
84 | "cell_type": "code",
85 | "execution_count": null,
86 | "metadata": {},
87 | "outputs": [],
88 | "source": [
89 | "# in rag-pdf-app/app/chain.py\n",
90 | "\n",
91 | "# inspired by this template from langchain: https://github.com/langchain-ai/langchain/blob/master/templates/rag-chroma-private/rag_chroma_private/chain.py\n",
92 | "\n",
93 | "from langchain_community.document_loaders import PyPDFLoader\n",
94 | "from langchain_community.embeddings import OllamaEmbeddings\n",
95 | "from langchain_core.prompts import PromptTemplate, ChatPromptTemplate, MessagesPlaceholder\n",
96 | "from langchain.chains import RetrievalQA\n",
97 | "from langchain_core.output_parsers import StrOutputParser\n",
98 | "from langchain_core.runnables import RunnablePassthrough, RunnableParallel, RunnableLambda\n",
99 | "from langchain_community.vectorstores import Chroma\n",
100 | "from langchain_community.chat_models import ChatOllama\n",
101 | "from langchain_core.pydantic_v1 import BaseModel, Field\n",
102 | "from langchain import hub\n",
103 | "from typing import List, Tuple\n",
104 | "\n",
105 | "def load_pdf(file_path: str=\"./paper.pdf\") -> str:\n",
106 | " loader = PyPDFLoader(file_path)\n",
107 | " return loader.load()\n",
108 | "\n",
109 | "def index_docs(docs: List[str], \n",
110 | " persist_directory: str=\"./i-shall-persist\", \n",
111 | " embedding_model: str=\"llama3\"):\n",
112 | " embeddings = OllamaEmbeddings(model=embedding_model)\n",
113 | " vectordb = Chroma.from_documents(docs, embeddings, persist_directory=persist_directory)\n",
114 | " retriever = vectordb.as_retriever()\n",
115 | " \n",
116 | " return retriever\n",
117 | "\n",
118 | "\n",
119 | "file_path = \"./paper.pdf\"\n",
120 | "\n",
121 | "docs = load_pdf(file_path)\n",
122 | "\n",
123 | "retriever = index_docs(docs)\n",
124 | "\n",
125 | "template = \"\"\"\n",
126 | "Ansdwer the question based only on the following context:\n",
127 | "{context}\n",
128 | "\n",
129 | "Question: {question}\n",
130 | "\"\"\"\n",
131 | "\n",
132 | "prompt = ChatPromptTemplate.from_template(template)\n",
133 | "\n",
134 | "llm = ChatOllama(model=\"llama3\")\n",
135 | "\n",
136 | "# 2 suggestions for creating the rag chain:\n",
137 | "\n",
138 | "# chain = (\n",
139 | "# RunnableParallel({\"context\": retriever, \"question\": RunnablePassthrough()}) # RunnablePassthrough source: https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.passthrough.RunnablePassthrough.html#langchain-core-runnables-passthrough-runnablepassthrough:~:text=Runnable%20to%20passthrough,and%20experiment%20with.\n",
140 | "# # RunnableParallel source: https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableParallel.html\n",
141 | "# | prompt\n",
142 | "# | llm\n",
143 | "# | StrOutputParser()\n",
144 | "# )\n",
145 | "\n",
146 | "chain = RetrievalQA.from_chain_type(llm, retriever=retriever, chain_type_kwargs={\"prompt\": prompt}, return_source_documents=True) | RunnableLambda(lambda x: x[\"result\"])\n",
147 | "# qa_chain = RetrievalQA.from_chain_type(llm, retriever=retriever, return_source_documents=True)\n",
148 | "\n",
149 | "# Add typing for input\n",
150 | "class Question(BaseModel):\n",
151 | " __root__: str\n",
152 | " # The __root__ field in Pydantic models is used to define a model\n",
153 | " # where you expect a single value or a list rather than a dictionary \n",
154 | " # of named fields. Essentially, it allows your model to handle instances \n",
155 | " # where data does not naturally fit into a key-value structure, \n",
156 | " # such as a single value or a list.\n",
157 | "\n",
158 | "\n",
159 | "rag_chain = chain.with_types(input_type=Question)"
160 | ]
161 | },
162 | {
163 | "cell_type": "markdown",
164 | "metadata": {},
165 | "source": [
166 | "# 3. Set up the chain in the project app structure\n",
167 | "\n",
168 | ""
169 | ]
170 | },
171 | {
172 | "cell_type": "markdown",
173 | "metadata": {},
174 | "source": [
175 | "Go to the folder: `rag-pdf-app/app/`\n",
176 | "\n",
177 | "and save the rag code from above in `chain.py`"
178 | ]
179 | },
180 | {
181 | "cell_type": "markdown",
182 | "metadata": {},
183 | "source": [
184 | "# 4. Set up server\n",
185 | "\n",
186 | "Go to `rag-pdf-app/app/server.py`\n",
187 | "\n",
188 | "and change:\n",
189 | "\n",
190 | "```python\n",
191 | "from fastapi import FastAPI\n",
192 | "from fastapi.responses import RedirectResponse\n",
193 | "from langserve import add_routes\n",
194 | "from app.chain import rag_chain\n",
195 | "\n",
196 | "app = FastAPI()\n",
197 | "\n",
198 | "\n",
199 | "@app.get(\"/\")\n",
200 | "async def redirect_root_to_docs():\n",
201 | " return RedirectResponse(\"/docs\")\n",
202 | "\n",
203 | "\n",
204 | "# Edit this to add the chain you want to add\n",
205 | "# Add routes connects the chain to our app exposing the methods of the chain to our web server\n",
206 | "add_routes(app,rag_chain, path=\"/rag-local\")\n",
207 | "\n",
208 | "if __name__ == \"__main__\":\n",
209 | " import uvicorn\n",
210 | "\n",
211 | " uvicorn.run(app, host=\"0.0.0.0\", port=8000)\n",
212 | "```"
213 | ]
214 | },
215 | {
216 | "cell_type": "markdown",
217 | "metadata": {},
218 | "source": [
219 | "# 5. Deploy locally"
220 | ]
221 | },
222 | {
223 | "cell_type": "markdown",
224 | "metadata": {},
225 | "source": [
226 | "From within the conda environment in the root folder of the project:\n",
227 | "\n",
228 | "`langchain serve`\n",
229 | "\n",
230 | "or with poetry:\n",
231 | "\n",
232 | "`poetry run langchain serve --port=8100`"
233 | ]
234 | },
235 | {
236 | "cell_type": "markdown",
237 | "metadata": {},
238 | "source": [
239 | "# 6. Test it on the playground\n",
240 | "\n",
241 | "`rag-local/playground/`"
242 | ]
243 | },
244 | {
245 | "cell_type": "markdown",
246 | "metadata": {},
247 | "source": []
248 | },
249 | {
250 | "cell_type": "markdown",
251 | "metadata": {},
252 | "source": [
253 | "# 7. Call it as an API\n",
254 | "\n",
255 | "```\n",
256 | "from langserve.client import RemoteRunnable\n",
257 | "\n",
258 | "runnable = RemoteRunnable(\"http://localhost:8000/rag-local\")\n",
259 | "\n",
260 | "runnable.invoke(\"What is attention in this paper?\")\n",
261 | "```"
262 | ]
263 | }
264 | ],
265 | "metadata": {
266 | "kernelspec": {
267 | "display_name": "oreilly-langchain",
268 | "language": "python",
269 | "name": "oreilly-langchain"
270 | },
271 | "language_info": {
272 | "codemirror_mode": {
273 | "name": "ipython",
274 | "version": 3
275 | },
276 | "file_extension": ".py",
277 | "mimetype": "text/x-python",
278 | "name": "python",
279 | "nbconvert_exporter": "python",
280 | "pygments_lexer": "ipython3",
281 | "version": "3.11.9"
282 | }
283 | },
284 | "nbformat": 4,
285 | "nbformat_minor": 2
286 | }
287 |
--------------------------------------------------------------------------------
/notebooks/dev-notebooks/create-knowledge-graph.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 3,
6 | "metadata": {},
7 | "outputs": [
8 | {
9 | "data": {
10 | "image/svg+xml": [
11 | "\n",
12 | "\n",
14 | "\n",
16 | "\n",
17 | "\n"
62 | ],
63 | "text/plain": [
64 | ""
65 | ]
66 | },
67 | "metadata": {},
68 | "output_type": "display_data"
69 | }
70 | ],
71 | "source": [
72 | "from pydantic import BaseModel, Field\n",
73 | "from typing import List\n",
74 | "from openai import OpenAI\n",
75 | "import instructor\n",
76 | "from graphviz import Digraph\n",
77 | "import argparse\n",
78 | "\n",
79 | "class Node(BaseModel):\n",
80 | " id: int\n",
81 | " label: str\n",
82 | " color: str\n",
83 | "\n",
84 | "class Edge(BaseModel):\n",
85 | " source: int\n",
86 | " target: int\n",
87 | " label: str\n",
88 | " color: str = \"black\"\n",
89 | "\n",
90 | "class KnowledgeGraph(BaseModel):\n",
91 | " nodes: List[Node] = Field(..., default_factory=list)\n",
92 | " edges: List[Edge] = Field(..., default_factory=list)\n",
93 | "\n",
94 | "# Adds response_model to ChatCompletion\n",
95 | "# Allows the return of Pydantic model rather than raw JSON\n",
96 | "client = instructor.patch(OpenAI())\n",
97 | "\n",
98 | "def generate_graph(input) -> KnowledgeGraph:\n",
99 | " return client.chat.completions.create(\n",
100 | " model=\"gpt-3.5-turbo\",\n",
101 | " messages=[\n",
102 | " {\n",
103 | " \"role\": \"user\",\n",
104 | " \"content\": f\"Help me understand the following by describing it as a detailed knowledge graph: {input}\",\n",
105 | " }\n",
106 | " ],\n",
107 | " response_model=KnowledgeGraph,\n",
108 | " ) # type: ignore\n",
109 | "\n",
110 | "def visualize_knowledge_graph(kg: KnowledgeGraph):\n",
111 | " dot = Digraph(comment=\"Knowledge Graph\")\n",
112 | "\n",
113 | " # Add nodes\n",
114 | " for node in kg.nodes:\n",
115 | " dot.node(str(node.id), node.label, color=node.color)\n",
116 | "\n",
117 | " # Add edges\n",
118 | " for edge in kg.edges:\n",
119 | " dot.edge(str(edge.source), str(edge.target), label=edge.label, color=edge.color)\n",
120 | "\n",
121 | " # Render the graph\n",
122 | " # dot.render(\"knowledge_graph.gv\", view=True)\n",
123 | " display(dot)\n",
124 | " \n",
125 | " \n",
126 | "\n",
127 | "graph: KnowledgeGraph = generate_graph(\"\"\"\n",
128 | "First, list the key decisions, follow-up items, and associated owners in a sketchpad.\n",
129 | "Then, check that the details in the sketchpad are factually consistent with the transcript.\n",
130 | "Finally, synthesize the key points into a concise summary.\"\"\")\n",
131 | "visualize_knowledge_graph(graph)"
132 | ]
133 | }
134 | ],
135 | "metadata": {
136 | "kernelspec": {
137 | "display_name": "automations",
138 | "language": "python",
139 | "name": "automations"
140 | },
141 | "language_info": {
142 | "codemirror_mode": {
143 | "name": "ipython",
144 | "version": 3
145 | },
146 | "file_extension": ".py",
147 | "mimetype": "text/x-python",
148 | "name": "python",
149 | "nbconvert_exporter": "python",
150 | "pygments_lexer": "ipython3",
151 | "version": "3.11.9"
152 | }
153 | },
154 | "nbformat": 4,
155 | "nbformat_minor": 2
156 | }
157 |
--------------------------------------------------------------------------------
/notebooks/dev-notebooks/knowledge_graph.gv:
--------------------------------------------------------------------------------
1 | // Knowledge Graph
2 | digraph {
3 | 1 [label="Random Variable" color=lightblue]
4 | 2 [label="Probability Distribution" color=lightblue]
5 | 3 [label="Joint Probability Mass Function" color=lightgreen]
6 | 4 [label="Discrete Random Variables" color=lightblue]
7 | 5 [label="Probability Mass Function" color=lightgreen]
8 | 1 -> 2 [label=has color=black]
9 | 2 -> 3 [label=has color=black]
10 | 1 -> 4 [label="is a" color=black]
11 | 4 -> 5 [label=has color=black]
12 | }
13 |
--------------------------------------------------------------------------------
/notebooks/dev-notebooks/knowledge_graph.gv.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/EnkrateiaLucca/oreilly_live_training_getting_started_with_langchain/9232c291a850aa62586ceadb8d7ebd453f2fa593/notebooks/dev-notebooks/knowledge_graph.gv.pdf
--------------------------------------------------------------------------------
/notebooks/dev-notebooks/llama3-langchain-basic-chain.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Basic Chain Example LangChain + Llama3"
8 | ]
9 | },
10 | {
11 | "cell_type": "code",
12 | "execution_count": 2,
13 | "metadata": {},
14 | "outputs": [],
15 | "source": [
16 | "from langchain_community.chat_models import ChatOllama\n",
17 | "from langchain_core.output_parsers import StrOutputParser\n",
18 | "from langchain_core.prompts import ChatPromptTemplate"
19 | ]
20 | },
21 | {
22 | "cell_type": "code",
23 | "execution_count": 3,
24 | "metadata": {},
25 | "outputs": [],
26 | "source": [
27 | "\n",
28 | "# Local Llama3\n",
29 | "llm = ChatOllama(\n",
30 | " model=\"llama3\",\n",
31 | " keep_alive=-1, # keep the model loaded indefinitely\n",
32 | " temperature=0,\n",
33 | " max_new_tokens=512\n",
34 | ")"
35 | ]
36 | },
37 | {
38 | "cell_type": "code",
39 | "execution_count": 4,
40 | "metadata": {},
41 | "outputs": [],
42 | "source": [
43 | "prompt = ChatPromptTemplate.from_template(\"Write me a 500 word article on {topic} from the perspective of a {profession}.\")"
44 | ]
45 | },
46 | {
47 | "cell_type": "code",
48 | "execution_count": 7,
49 | "metadata": {},
50 | "outputs": [
51 | {
52 | "name": "stdout",
53 | "output_type": "stream",
54 | "text": [
55 | "**\"LLMs: The Game-Changers for Shipping Magnates Like Me\"**\n",
56 | "\n",
57 | "As a shipping magnate, I've spent my fair share of time navigating the complexities of global trade and logistics. From optimizing routes to managing inventory, every decision counts when it comes to keeping my fleet of vessels running smoothly and efficiently. That's why I'm excited about the potential of Large Language Models (LLMs) to revolutionize our industry.\n",
58 | "\n",
59 | "In recent years, LLMs have made headlines for their impressive language processing capabilities. These AI-powered models can understand and generate human-like text with uncanny accuracy. But what does this mean for shipping magnates like me? Let me tell you – it's a game-changer.\n",
60 | "\n",
61 | "Firstly, LLMs can help streamline communication between our teams and clients. Imagine being able to quickly generate responses to common customer inquiries or providing detailed reports on shipment status without breaking a sweat. With an LLM at your disposal, you can automate routine tasks and free up your team to focus on higher-value activities like strategic planning and problem-solving.\n",
62 | "\n",
63 | "But the benefits don't stop there. LLMs can also be used to analyze vast amounts of data related to shipping patterns, market trends, and regulatory compliance. By processing this information in real-time, we can identify opportunities for optimization and make more informed decisions about our operations. For instance, an LLM could help us predict demand fluctuations based on historical data and adjust our logistics accordingly.\n",
64 | "\n",
65 | "Another area where LLMs can make a significant impact is in the realm of documentation and compliance. Shipping companies are subject to numerous regulations and standards, which can be time-consuming and error-prone to navigate. An LLM can assist with generating accurate and compliant documents, such as customs forms and insurance claims, reducing the risk of errors and fines.\n",
66 | "\n",
67 | "Furthermore, LLMs have the potential to revolutionize our approach to supply chain management. By analyzing vast amounts of data on shipment routes, modes of transportation, and inventory levels, an LLM can help us identify areas for improvement and optimize our logistics networks. This could lead to significant cost savings, reduced transit times, and improved customer satisfaction.\n",
68 | "\n",
69 | "Of course, there are also concerns about the potential risks associated with relying on AI-powered models like LLMs. As a shipping magnate, I understand the importance of maintaining human oversight and control over critical decision-making processes. However, I believe that LLMs can be designed to augment our existing capabilities rather than replace them.\n",
70 | "\n",
71 | "In conclusion, Large Language Models have the potential to transform the shipping industry in numerous ways. By streamlining communication, analyzing data, generating documents, and optimizing supply chains, LLMs can help us operate more efficiently, effectively, and profitably. As a shipping magnate, I'm excited about the possibilities that these AI-powered models bring to our industry – and I'm eager to see how they will shape the future of global trade and logistics.\n",
72 | "\n",
73 | "**About the Author**\n",
74 | "\n",
75 | "John Smith is a seasoned shipping magnate with over two decades of experience in the industry. He has built his reputation on his ability to navigate complex logistical challenges and optimize supply chain operations. In this article, he shares his insights on the potential of Large Language Models to revolutionize the shipping industry.\n"
76 | ]
77 | }
78 | ],
79 | "source": [
80 | "# using LangChain Expressive Language chain syntax\n",
81 | "chain = prompt | llm | StrOutputParser()"
82 | ]
83 | },
84 | {
85 | "cell_type": "code",
86 | "execution_count": null,
87 | "metadata": {},
88 | "outputs": [],
89 | "source": [
90 | "print(chain.invoke({\"topic\": \"LLMs\", \"profession\": \"shipping magnate\"}))"
91 | ]
92 | }
93 | ],
94 | "metadata": {
95 | "kernelspec": {
96 | "display_name": "oreilly-llama3",
97 | "language": "python",
98 | "name": "oreilly-llama3"
99 | },
100 | "language_info": {
101 | "codemirror_mode": {
102 | "name": "ipython",
103 | "version": 3
104 | },
105 | "file_extension": ".py",
106 | "mimetype": "text/x-python",
107 | "name": "python",
108 | "nbconvert_exporter": "python",
109 | "pygments_lexer": "ipython3",
110 | "version": "3.11.9"
111 | }
112 | },
113 | "nbformat": 4,
114 | "nbformat_minor": 2
115 | }
116 |
--------------------------------------------------------------------------------
/notebooks/dev-notebooks/research-assistant_test.py:
--------------------------------------------------------------------------------
1 | from langchain.chat_models import ChatOpenAI
2 | from langchain.prompts import ChatPromptTemplate
3 | from langchain.schema.output_parser import StrOutputParser
4 | import requests
5 | from bs4 import BeautifulSoup
6 | from langchain.schema.runnable import RunnablePassthrough, RunnableLambda
7 | from langchain.utilities import DuckDuckGoSearchAPIWrapper
8 | import json
9 |
10 | RESULTS_PER_QUESTION = 3
11 |
12 | ddg_search = DuckDuckGoSearchAPIWrapper()
13 |
14 |
15 | def web_search(query: str, num_results: int = RESULTS_PER_QUESTION):
16 | results = ddg_search.results(query, num_results)
17 | return [r["link"] for r in results]
18 |
19 |
20 | SUMMARY_TEMPLATE = """{text}
21 | -----------
22 | Using the above text, answer in short the following question:
23 | > {question}
24 | -----------
25 | if the question cannot be answered using the text, imply summarize the text. Include all factual information, numbers, stats etc if available.""" # noqa: E501
26 | SUMMARY_PROMPT = ChatPromptTemplate.from_template(SUMMARY_TEMPLATE)
27 |
28 |
29 | def scrape_text(url: str):
30 | # Send a GET request to the webpage
31 | try:
32 | response = requests.get(url)
33 |
34 | # Check if the request was successful
35 | if response.status_code == 200:
36 | # Parse the content of the request with BeautifulSoup
37 | soup = BeautifulSoup(response.text, "html.parser")
38 |
39 | # Extract all text from the webpage
40 | page_text = soup.get_text(separator=" ", strip=True)
41 |
42 | # Print the extracted text
43 | return page_text
44 | else:
45 | return f"Failed to retrieve the webpage: Status code {response.status_code}"
46 | except Exception as e:
47 | print(e)
48 | return f"Failed to retrieve the webpage: {e}"
49 |
50 |
51 | url = "https://blog.langchain.dev/announcing-langsmith/"
52 |
53 | scrape_and_summarize_chain = RunnablePassthrough.assign(
54 | summary = RunnablePassthrough.assign(
55 | text=lambda x: scrape_text(x["url"])[:10000]
56 | ) | SUMMARY_PROMPT | ChatOpenAI(model="gpt-3.5-turbo-1106") | StrOutputParser()
57 | ) | (lambda x: f"URL: {x['url']}\n\nSUMMARY: {x['summary']}")
58 |
59 | web_search_chain = RunnablePassthrough.assign(
60 | urls = lambda x: web_search(x["question"])
61 | ) | (lambda x: [{"question": x["question"], "url": u} for u in x["urls"]]) | scrape_and_summarize_chain.map()
62 |
63 |
64 | SEARCH_PROMPT = ChatPromptTemplate.from_messages(
65 | [
66 | (
67 | "user",
68 | "Write 3 google search queries to search online that form an "
69 | "objective opinion from the following: {question}\n"
70 | "You must respond with a list of strings in the following format: "
71 | '["query 1", "query 2", "query 3"].',
72 | ),
73 | ]
74 | )
75 |
76 | search_question_chain = SEARCH_PROMPT | ChatOpenAI(temperature=0) | StrOutputParser() | json.loads
77 |
78 | full_research_chain = search_question_chain | (lambda x: [{"question": q} for q in x]) | web_search_chain.map()
79 |
80 | WRITER_SYSTEM_PROMPT = "You are an AI critical thinker research assistant. Your sole purpose is to write well written, critically acclaimed, objective and structured reports on given text." # noqa: E501
81 |
82 |
83 | # Report prompts from https://github.com/assafelovic/gpt-researcher/blob/master/gpt_researcher/master/prompts.py
84 | RESEARCH_REPORT_TEMPLATE = """Information:
85 | --------
86 | {research_summary}
87 | --------
88 | Using the above information, answer the following question or topic: "{question}" in a detailed report -- \
89 | The report should focus on the answer to the question, should be well structured, informative, \
90 | in depth, with facts and numbers if available and a minimum of 1,200 words.
91 | You should strive to write the report as long as you can using all relevant and necessary information provided.
92 | You must write the report with markdown syntax.
93 | You MUST determine your own concrete and valid opinion based on the given information. Do NOT deter to general and meaningless conclusions.
94 | Write all used source urls at the end of the report, and make sure to not add duplicated sources, but only one reference for each.
95 | You must write the report in apa format.
96 | Please do your best, this is very important to my career.""" # noqa: E501
97 |
98 | prompt = ChatPromptTemplate.from_messages(
99 | [
100 | ("system", WRITER_SYSTEM_PROMPT),
101 | ("user", RESEARCH_REPORT_TEMPLATE),
102 | ]
103 | )
104 |
105 | def collapse_list_of_lists(list_of_lists):
106 | content = []
107 | for l in list_of_lists:
108 | content.append("\n\n".join(l))
109 | return "\n\n".join(content)
110 |
111 | chain = RunnablePassthrough.assign(
112 | research_summary= full_research_chain | collapse_list_of_lists
113 | ) | prompt | ChatOpenAI(model="gpt-3.5-turbo-1106") | StrOutputParser()
114 |
115 | #!/usr/bin/env python
116 | # from fastapi import FastAPI
117 | # from langserve import add_routes
118 |
119 | # app = FastAPI(
120 | # title="LangChain Server",
121 | # version="1.0",
122 | # description="A simple api server using Langchain's Runnable interfaces",
123 | # )
124 |
125 | # add_routes(
126 | # app,
127 | # chain,
128 | # path="/research-assistant",
129 | # )
130 |
131 |
132 | # if __name__ == "__main__":
133 | # import uvicorn
134 |
135 | # uvicorn.run(app, host="localhost", port=8000)
136 |
--------------------------------------------------------------------------------
/notebooks/jira-agent.py:
--------------------------------------------------------------------------------
1 | from jira import JIRA
2 | import argparse
3 | import os
4 | from langchain.tools import tool
5 | from langchain.agents import initialize_agent, AgentExecutor
6 | from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
7 | from langchain.tools.render import format_tool_to_openai_function
8 | from langchain.chat_models import ChatOpenAI
9 | from langchain.agents.format_scratchpad import format_to_openai_function_messages
10 | from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser
11 | import sys
12 |
13 |
14 | @tool
15 | def list_issue_transition_options(issue_key,server_url=''):
16 | """Lists all the available transition options for a Jira issue given the issue key"""
17 | JIRA_USERNAME = os.environ["JIRA_USERNAME"]
18 | JIRA_TOKEN = os.environ["JIRA_TOKEN"]
19 |
20 | jira = JIRA(basic_auth=(JIRA_USERNAME, JIRA_TOKEN), server=server_url)
21 | transitions = jira.transitions(issue_key)
22 | for transition in transitions:
23 | print(transition['name'], transition['id'])
24 |
25 | @tool
26 | def update_issue_status(issue_key, target_status_name, server_url=''):
27 | """Updates the status of a Jira issue given the issue key and the target status name"""
28 |
29 | JIRA_USERNAME = os.environ["JIRA_USERNAME"]
30 | JIRA_TOKEN = os.environ["JIRA_TOKEN"]
31 |
32 | jira = JIRA(basic_auth=(JIRA_USERNAME, JIRA_TOKEN), server=server_url)
33 | transitions = jira.transitions(issue_key)
34 | target_transition_id = None
35 |
36 | # Find the transition ID for the target status
37 | for transition in transitions:
38 | print(transition['name'].lower().strip())
39 | if transition['name'].lower().strip() == target_status_name.lower():
40 | target_transition_id = transition['id']
41 | break
42 |
43 | # Execute the transition if possible
44 | if target_transition_id:
45 | jira.transition_issue(issue_key, target_transition_id)
46 | print(f"Issue {issue_key} has been moved to '{target_status_name}'.")
47 | else:
48 | print(f"Transition to '{target_status_name}' not found.")
49 |
50 | @tool
51 | def create_issue(summary, description, issue_type, project_key='ML', server_url=''):
52 | """Creates a Jira issue with summary, description, issue type and a project key"""
53 | JIRA_USERNAME = os.environ["JIRA_USERNAME"]
54 | JIRA_TOKEN = os.environ["JIRA_TOKEN"]
55 |
56 | jira = JIRA(basic_auth=(JIRA_USERNAME, JIRA_TOKEN), server=server_url)
57 | issue_dict = {
58 | 'project': {'key': project_key},
59 | 'summary': summary,
60 | 'description': description,
61 | 'issuetype': {'name': issue_type},
62 |
63 | }
64 | new_issue = jira.create_issue(fields=issue_dict)
65 | print(f'New issue created with key: {new_issue.key}')
66 |
67 |
68 | @tool
69 | def delete_issue(issue_key, server_url=''):
70 | """Deletes a Jira issue given the issue key"""
71 | JIRA_USERNAME = os.environ["JIRA_USERNAME"]
72 | JIRA_TOKEN = os.environ["JIRA_TOKEN"]
73 | jira = JIRA(basic_auth=(JIRA_USERNAME, JIRA_TOKEN), server=server_url)
74 | issue = jira.issue(issue_key)
75 | print(f'Deleting issue: {issue.key}')
76 | delete_issue = input("Do you want to delete the issue? (y/n): ")
77 | if delete_issue.lower() in ['y', 'yes']:
78 | issue.delete()
79 | print('Issue deleted successfully')
80 |
81 |
82 | @tool
83 | def update_issue_summary(issue_key, summary, server_url=''):
84 | """Updates issue summary"""
85 | JIRA_USERNAME = os.environ["JIRA_USERNAME"]
86 | JIRA_TOKEN = os.environ["JIRA_TOKEN"]
87 |
88 | jira = JIRA(basic_auth=(JIRA_USERNAME, JIRA_TOKEN), server=server_url)
89 | issue = jira.issue(issue_key)
90 | issue.update(summary=summary)
91 | print(f'Issue {issue.key} summary updated successfully')
92 |
93 |
94 | @tool
95 | def update_issue_description(issue_key, description, server_url=''):
96 | """Updates issue description"""
97 | JIRA_USERNAME = os.environ["JIRA_USERNAME"]
98 | JIRA_TOKEN = os.environ["JIRA_TOKEN"]
99 |
100 | jira = JIRA(basic_auth=(JIRA_USERNAME, JIRA_TOKEN), server=server_url)
101 | issue = jira.issue(issue_key)
102 | issue.update(description=description)
103 | print(f'Issue {issue.key} description updated successfully')
104 |
105 | @tool
106 | def view_issue(issue_key, server_url=''):
107 | """Views a Jira issue given the issue key"""
108 | JIRA_USERNAME = os.environ["JIRA_USERNAME"]
109 | JIRA_TOKEN = os.environ["JIRA_TOKEN"]
110 |
111 | jira = JIRA(basic_auth=(JIRA_USERNAME, JIRA_TOKEN), server=server_url)
112 | """Views a Jira issue given the issue key"""
113 | issue = jira.issue(issue_key)
114 | print(f'Viewing issue {issue.key}.')
115 |
116 |
117 | def setup_prompt_template():
118 | prompt = ChatPromptTemplate.from_messages(
119 | [
120 | (
121 | "system",
122 | "You are very powerful assistant that helps users to manage their issues in the Jira Software.",
123 | ),
124 | ("user", "{input}"),
125 | MessagesPlaceholder(variable_name="agent_scratchpad"),
126 | ])
127 |
128 | return prompt
129 |
130 |
131 | def setup_agent(prompt, llm_with_tools):
132 | agent = (
133 | {
134 | "input": lambda x: x["input"],
135 | "agent_scratchpad": lambda x: format_to_openai_function_messages(
136 | x["intermediate_steps"]
137 | ),
138 | }
139 | | prompt
140 | | llm_with_tools
141 | | OpenAIFunctionsAgentOutputParser())
142 | agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
143 |
144 | return agent_executor
145 |
146 | if __name__=="__main__":
147 | action_input = sys.argv[1]
148 | prompt = setup_prompt_template()
149 | llm = ChatOpenAI()
150 | tools = [
151 | view_issue,
152 | create_issue,
153 | update_issue_summary,
154 | update_issue_description,
155 | delete_issue,
156 | update_issue_status,
157 | list_issue_transition_options,
158 | ]
159 | llm_with_tools = llm.bind(functions=[format_tool_to_openai_function(t) for t in tools])
160 | agent_executor = setup_agent(prompt, llm_with_tools)
161 | agent_executor.invoke({"input": action_input})
162 |
--------------------------------------------------------------------------------
/notebooks/langchain-app.py:
--------------------------------------------------------------------------------
1 | # source: https://python.langchain.com/docs/tutorials/llm_chain/#:~:text=Server%E2%80%8B,with%20langserve.add_routes
2 | #!/usr/bin/env python
3 | from fastapi import FastAPI
4 | from langchain_core.prompts import ChatPromptTemplate
5 | from langchain_core.output_parsers import StrOutputParser
6 | from langchain_openai import ChatOpenAI
7 | from langserve import add_routes
8 |
9 | # 1. Create prompt template
10 | system_template = "Translate the following into {language}:"
11 | prompt_template = ChatPromptTemplate.from_messages([
12 | ('system', system_template),
13 | ('user', '{text}')
14 | ])
15 |
16 | # 2. Create model
17 | model = ChatOpenAI(model="gpt-4o-mini", temperature=0)
18 |
19 | # 3. Create parser
20 | parser = StrOutputParser()
21 |
22 | # 4. Create chain
23 | chain = prompt_template | model | parser
24 |
25 |
26 | # 4. App definition
27 | app = FastAPI(
28 | title="LangChain Server",
29 | version="1.0",
30 | description="A simple API server using LangChain's Runnable interfaces",
31 | )
32 |
33 | # 5. Adding chain route
34 | add_routes(
35 | app,
36 | chain,
37 | path="/chain",
38 | )
39 |
40 | if __name__ == "__main__":
41 | import uvicorn
42 |
43 | uvicorn.run(app, host="localhost", port=8000)
--------------------------------------------------------------------------------
/notebooks/langchain-lcel-cheatsheet.md:
--------------------------------------------------------------------------------
1 | # LangChain Expression Language (LCEL) Cheat Sheet
2 |
3 | - **Invoke a runnable:** Use `Runnable.invoke()` for synchronous and `Runnable.ainvoke()` for asynchronous invocation.
4 | - **Batch a runnable:** Use `Runnable.batch()` for synchronous and `Runnable.abatch()` for asynchronous batching.
5 | - **Stream a runnable:** Use `Runnable.stream()` for synchronous and `Runnable.astream()` for asynchronous streaming.
6 | - **Compose runnables:** Use the pipe operator `|` to chain runnables.
7 | - **Invoke runnables in parallel:** Use `RunnableParallel` to run multiple runnables concurrently.
8 | - **Turn any function into a runnable:** Use `RunnableLambda` to convert a function into a runnable.
9 | - **Merge input and output dicts:** Use `RunnablePassthrough.assign` to merge input and output dictionaries.
10 | - **Include input dict in output dict:** Use `RunnablePassthrough` to include input in the output.
11 | - **Add default invocation args:** Use `Runnable.bind` to bind default arguments to a runnable.
12 | - **Add fallbacks:** Use `Runnable.with_fallbacks` to add fallback runnables.
13 | - **Add retries:** Use `Runnable.with_retry` to add retry logic to a runnable.
14 | - **Configure runnable execution:** Use `RunnableConfig` to specify execution configurations.
15 | - **Add default config to runnable:** Use `Runnable.with_config` to add default configurations.
16 | - **Make runnable attributes configurable:** Use `Runnable.with_configurable_fields` for configurable attributes.
17 | - **Make chain components configurable:** Use `Runnable.with_configurable_alternatives` for configurable chain components.
18 | - **Build a chain dynamically based on input:** Use conditionals to dynamically build chains.
19 | - **Generate a stream of events:** Use `Runnable.astream_events` for asynchronous event streams.
20 | - **Yield batched outputs as they complete:** Use `Runnable.batch_as_completed` for synchronous and `Runnable.abatch_as_completed` for asynchronous batching.
21 | - **Return subset of output dict:** Use `Runnable.pick` to select specific parts of the output.
22 | - **Declaratively make a batched version of a runnable:** Use `Runnable.map` for batching.
23 | - **Get a graph representation of a runnable:** Use `Runnable.get_graph` to visualize the runnable.
24 | - **Get all prompts in a chain:** Use `Runnable.get_prompts` to retrieve prompts from a chain.
25 | - **Add lifecycle listeners:** Use `Runnable.with_listeners` to add start and end listeners to a runnable.
--------------------------------------------------------------------------------
/notebooks/langchain-structured-output-ui.py:
--------------------------------------------------------------------------------
1 | import streamlit as st
2 | from langchain_openai import ChatOpenAI
3 | from pydantic import BaseModel, Field
4 | import pandas as pd
5 |
6 |
7 | if 'df' not in st.session_state:
8 | st.session_state.df = pd.DataFrame(columns=["Product Name", "Price", "Description"])
9 |
10 | class Product(BaseModel):
11 | name: str = Field(description="The name of the product")
12 | price: float = Field(description="The price of the product")
13 | description: str = Field(description="A detailed description of the product")
14 |
15 | llm = ChatOpenAI(model="gpt-4o-mini", temperature=0)
16 |
17 | llm_structured_output = llm.with_structured_output(Product)
18 |
19 |
20 | st.title("LangChain Structured Output UI")
21 |
22 | col1, col2 = st.columns(2)
23 |
24 |
25 | raw_text = st.text_area("Raw Text")
26 |
27 | # Add a checkbox to show the current table
28 | show_table = st.checkbox("Show current table")
29 |
30 | # Display the current table if the checkbox is checked
31 | if show_table:
32 | st.write("Current Product Information")
33 | st.table(st.session_state.df)
34 |
35 | if raw_text:
36 | st.write("Product Information")
37 | if raw_text!="" and st.button("Parse"):
38 | product_info = llm_structured_output.invoke(raw_text)
39 | st.write("Product Name,Price,Description")
40 | new_row = pd.DataFrame({
41 | "Product Name": [product_info.name],
42 | "Price": [product_info.price],
43 | "Description": [product_info.description]
44 | })
45 | st.session_state.df = pd.concat([st.session_state.df, new_row], ignore_index=True)
46 | st.table(st.session_state.df)
47 | else:
48 | st.write("Enter text in the left column to see it here.")
--------------------------------------------------------------------------------
/notebooks/paper.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/EnkrateiaLucca/oreilly_live_training_getting_started_with_langchain/9232c291a850aa62586ceadb8d7ebd453f2fa593/notebooks/paper.pdf
--------------------------------------------------------------------------------
/notebooks/rag_methods.py:
--------------------------------------------------------------------------------
1 | import os
2 | import dotenv
3 | from time import time
4 | import streamlit as st
5 |
6 | from langchain_community.document_loaders.text import TextLoader
7 | from langchain_community.document_loaders import (
8 | WebBaseLoader,
9 | PyPDFLoader,
10 | Docx2txtLoader,
11 | CSVLoader,
12 | )
13 | # pip install docx2txt, pypdf
14 | from langchain_community.vectorstores import Chroma
15 | from langchain.text_splitter import RecursiveCharacterTextSplitter
16 | from langchain_openai import OpenAIEmbeddings, AzureOpenAIEmbeddings
17 | from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
18 | from langchain.chains import create_history_aware_retriever, create_retrieval_chain
19 | from langchain.chains.combine_documents import create_stuff_documents_chain
20 |
21 | dotenv.load_dotenv()
22 |
23 | os.environ["USER_AGENT"] = "myagent"
24 | DB_DOCS_LIMIT = 10
25 |
26 | # Function to stream the response of the LLM
27 | def stream_llm_response(llm_stream, messages):
28 | response_message = ""
29 |
30 | for chunk in llm_stream.stream(messages):
31 | response_message += chunk.content
32 | yield chunk
33 |
34 | st.session_state.messages.append({"role": "assistant", "content": response_message})
35 |
36 |
37 | # --- Indexing Phase ---
38 |
39 | def load_doc_to_db():
40 | # Use loader according to doc type
41 | if "rag_docs" in st.session_state and st.session_state.rag_docs:
42 | docs = []
43 | for doc_file in st.session_state.rag_docs:
44 | if doc_file.name not in st.session_state.rag_sources:
45 | if len(st.session_state.rag_sources) < DB_DOCS_LIMIT:
46 | os.makedirs("source_files", exist_ok=True)
47 | file_path = f"./source_files/{doc_file.name}"
48 | with open(file_path, "wb") as file:
49 | file.write(doc_file.read())
50 |
51 | try:
52 | if doc_file.type == "application/pdf":
53 | loader = PyPDFLoader(file_path)
54 | elif doc_file.name.endswith(".docx"):
55 | loader = Docx2txtLoader(file_path)
56 | elif doc_file.type in ["text/plain", "text/markdown"]:
57 | loader = TextLoader(file_path)
58 | elif doc_file.name.endswith(".csv"):
59 | loader = CSVLoader(file_path)
60 | else:
61 | st.warning(f"Document type {doc_file.type} not supported.")
62 | continue
63 |
64 | docs.extend(loader.load())
65 | st.session_state.rag_sources.append(doc_file.name)
66 |
67 | except Exception as e:
68 | st.toast(f"Error loading document {doc_file.name}: {e}", icon="⚠️")
69 | print(f"Error loading document {doc_file.name}: {e}")
70 |
71 | finally:
72 | os.remove(file_path)
73 |
74 | else:
75 | st.error(F"Maximum number of documents reached ({DB_DOCS_LIMIT}).")
76 |
77 | if docs:
78 | _split_and_load_docs(docs)
79 | st.toast(f"Document *{str([doc_file.name for doc_file in st.session_state.rag_docs])[1:-1]}* loaded successfully.", icon="✅")
80 |
81 |
82 | def load_url_to_db():
83 | if "rag_url" in st.session_state and st.session_state.rag_url:
84 | url = st.session_state.rag_url
85 | docs = []
86 | if url not in st.session_state.rag_sources:
87 | if len(st.session_state.rag_sources) < 10:
88 | try:
89 | loader = WebBaseLoader(url)
90 | docs.extend(loader.load())
91 | st.session_state.rag_sources.append(url)
92 |
93 | except Exception as e:
94 | st.error(f"Error loading document from {url}: {e}")
95 |
96 | if docs:
97 | _split_and_load_docs(docs)
98 | st.toast(f"Document from URL *{url}* loaded successfully.", icon="✅")
99 |
100 | else:
101 | st.error("Maximum number of documents reached (10).")
102 |
103 |
104 | def initialize_vector_db(docs):
105 | embedding = OpenAIEmbeddings(api_key=st.session_state.openai_api_key)
106 |
107 | # Create a persistent directory for the database
108 | persist_directory = f"./chroma_db_{st.session_state['session_id']}"
109 |
110 | vector_db = Chroma.from_documents(
111 | documents=docs,
112 | embedding=embedding,
113 | persist_directory=persist_directory,
114 | )
115 |
116 | # Persist the database
117 | vector_db.persist()
118 |
119 | return vector_db
120 |
121 |
122 | def _split_and_load_docs(docs):
123 | text_splitter = RecursiveCharacterTextSplitter(
124 | chunk_size=1000,
125 | chunk_overlap=200,
126 | )
127 |
128 | document_chunks = text_splitter.split_documents(docs)
129 |
130 | persist_directory = f"./chroma_db_{st.session_state['session_id']}"
131 |
132 | if os.path.exists(persist_directory):
133 | # Load existing database
134 | embedding = OpenAIEmbeddings(api_key=st.session_state.openai_api_key)
135 | st.session_state.vector_db = Chroma(persist_directory=persist_directory, embedding_function=embedding)
136 | # Add new documents to existing database
137 | st.session_state.vector_db.add_documents(document_chunks)
138 | else:
139 | # Create new database
140 | st.session_state.vector_db = initialize_vector_db(document_chunks)
141 |
142 |
143 | # --- Retrieval Augmented Generation (RAG) Phase ---
144 |
145 | def _get_context_retriever_chain(vector_db, llm):
146 | retriever = vector_db.as_retriever()
147 | prompt = ChatPromptTemplate.from_messages([
148 | MessagesPlaceholder(variable_name="messages"),
149 | ("user", "{input}"),
150 | ("user", "Given the above conversation, generate a search query to look up in order to get inforamtion relevant to the conversation, focusing on the most recent messages."),
151 | ])
152 | retriever_chain = create_history_aware_retriever(llm, retriever, prompt)
153 |
154 | return retriever_chain
155 |
156 |
157 | def get_conversational_rag_chain(llm):
158 | retriever_chain = _get_context_retriever_chain(st.session_state.vector_db, llm)
159 |
160 | prompt = ChatPromptTemplate.from_messages([
161 | ("system",
162 | """You are a helpful assistant. You will have to answer to user's queries.
163 | You will have some context to help with your answers, but now always would be completely related or helpful.
164 | You can also use your knowledge to assist answering the user's queries.\n
165 | {context}"""),
166 | MessagesPlaceholder(variable_name="messages"),
167 | ("user", "{input}"),
168 | ])
169 | stuff_documents_chain = create_stuff_documents_chain(llm, prompt)
170 |
171 | return create_retrieval_chain(retriever_chain, stuff_documents_chain)
172 |
173 |
174 | def stream_llm_rag_response(llm_stream, messages):
175 | if st.session_state.vector_db is None:
176 | st.error("Vector database is not initialized. Please upload some documents first.")
177 | return
178 |
179 | conversation_rag_chain = get_conversational_rag_chain(llm_stream)
180 | response_message = "*(RAG Response)*\n"
181 | try:
182 | for chunk in conversation_rag_chain.pick("answer").stream({"messages": messages[:-1], "input": messages[-1].content}):
183 | response_message += chunk
184 | yield chunk
185 | except Exception as e:
186 | st.error(f"An error occurred while processing the RAG response: {str(e)}")
187 | yield "I'm sorry, but I encountered an error while trying to process your request with the uploaded documents. Please try again or contact support if the issue persists."
188 |
189 | st.session_state.messages.append({"role": "assistant", "content": response_message})
190 |
--------------------------------------------------------------------------------
/notebooks/simple-product-info-chatbot.py:
--------------------------------------------------------------------------------
1 | import streamlit as st
2 | import os
3 | import dotenv
4 | import uuid
5 | from langchain.schema import HumanMessage, AIMessage
6 | from langchain_openai import ChatOpenAI
7 | from langchain_chroma import Chroma
8 | from langchain_openai import OpenAIEmbeddings
9 |
10 | from rag_methods import (
11 | load_doc_to_db,
12 | load_url_to_db,
13 | stream_llm_response,
14 | stream_llm_rag_response,
15 | )
16 |
17 | dotenv.load_dotenv()
18 |
19 | if "AZ_OPENAI_API_KEY" not in os.environ:
20 | MODELS = [
21 | # "openai/o1-mini",
22 | "openai/gpt-4o",
23 | "openai/gpt-4o-mini",
24 | ]
25 |
26 | st.set_page_config(
27 | page_title="RAG LLM app?",
28 | page_icon="📚",
29 | layout="centered",
30 | initial_sidebar_state="expanded"
31 | )
32 |
33 |
34 | # --- Header ---
35 | st.html("""📚🔍 Do your LLM even RAG bro? 🤖💬
""")
36 |
37 |
38 | # --- Initial Setup ---
39 | if "session_id" not in st.session_state:
40 | st.session_state.session_id = str(uuid.uuid4())
41 |
42 | if "rag_sources" not in st.session_state:
43 | st.session_state.rag_sources = []
44 |
45 | if "messages" not in st.session_state:
46 | st.session_state.messages = [
47 | {"role": "user", "content": "Hello"},
48 | {"role": "assistant", "content": "Hi there! How can I assist you today?"}
49 | ]
50 |
51 | if "vector_db" not in st.session_state:
52 | st.session_state.vector_db = None
53 |
54 | persist_directory = f"./chroma_db_{st.session_state['session_id']}"
55 | if os.path.exists(persist_directory):
56 | embedding = OpenAIEmbeddings(api_key=st.session_state.openai_api_key)
57 | st.session_state.vector_db = Chroma(persist_directory=persist_directory, embedding_function=embedding)
58 |
59 | # --- Side Bar LLM API Tokens ---
60 | with st.sidebar:
61 | default_openai_api_key = os.getenv("OPENAI_API_KEY") if os.getenv("OPENAI_API_KEY") is not None else "" # only for development environment, otherwise it should return None
62 | with st.popover("🔐 OpenAI"):
63 | openai_api_key = st.text_input(
64 | "Introduce your OpenAI API Key (https://platform.openai.com/)",
65 | value=default_openai_api_key,
66 | type="password",
67 | key="openai_api_key",
68 | )
69 |
70 |
71 | # --- Main Content ---
72 | # Checking if the user has introduced the OpenAI API Key, if not, a warning is displayed
73 | missing_openai = openai_api_key == "" or openai_api_key is None or "sk-" not in openai_api_key
74 | if missing_openai:
75 | st.write("#")
76 | st.warning("⬅️ Please introduce an API Key to continue...")
77 |
78 | else:
79 | # Sidebar
80 | with st.sidebar:
81 | st.divider()
82 | models = []
83 | for model in MODELS:
84 | if "openai" in model and not missing_openai:
85 | models.append(model)
86 |
87 | st.selectbox(
88 | "🤖 Select a Model",
89 | options=models,
90 | key="model",
91 | )
92 |
93 | cols0 = st.columns(2)
94 | with cols0[0]:
95 | is_vector_db_loaded = st.session_state.vector_db is not None
96 | st.toggle(
97 | "Use RAG",
98 | value=is_vector_db_loaded,
99 | key="use_rag",
100 | disabled=not is_vector_db_loaded,
101 | )
102 |
103 | with cols0[1]:
104 | st.button("Clear Chat", on_click=lambda: st.session_state.messages.clear(), type="primary")
105 |
106 | st.header("RAG Sources:")
107 |
108 | # File upload input for RAG with documents
109 | st.file_uploader(
110 | "📄 Upload a document",
111 | type=["pdf", "txt", "docx", "md", "csv"],
112 | accept_multiple_files=True,
113 | on_change=load_doc_to_db,
114 | key="rag_docs",
115 | )
116 |
117 | # URL input for RAG with websites
118 | st.text_input(
119 | "🌐 Introduce a URL",
120 | placeholder="https://example.com",
121 | on_change=load_url_to_db,
122 | key="rag_url",
123 | )
124 |
125 | with st.expander(f"📚 Documents in DB ({0 if not is_vector_db_loaded else len(st.session_state.rag_sources)})"):
126 | st.write([] if not is_vector_db_loaded else [source for source in st.session_state.rag_sources])
127 |
128 |
129 | # Main chat app
130 | model_provider = st.session_state.model.split("/")[0]
131 | if model_provider == "openai":
132 | llm_stream = ChatOpenAI(
133 | api_key=openai_api_key,
134 | model_name=st.session_state.model.split("/")[-1],
135 | temperature=0.3,
136 | streaming=True,
137 | )
138 |
139 | for message in st.session_state.messages:
140 | with st.chat_message(message["role"]):
141 | st.markdown(message["content"])
142 |
143 | if prompt := st.chat_input("Your message"):
144 | st.session_state.messages.append({"role": "user", "content": prompt})
145 | with st.chat_message("user"):
146 | st.markdown(prompt)
147 |
148 | with st.chat_message("assistant"):
149 | message_placeholder = st.empty()
150 | full_response = ""
151 |
152 | messages = [HumanMessage(content=m["content"]) if m["role"] == "user" else AIMessage(content=m["content"]) for m in st.session_state.messages]
153 |
154 | if not st.session_state.use_rag or st.session_state.vector_db is None:
155 | st.write_stream(stream_llm_response(llm_stream, messages))
156 | else:
157 | st.write_stream(stream_llm_rag_response(llm_stream, messages))
158 |
159 |
160 | with st.sidebar:
161 | st.divider()
162 | st.video("https://youtu.be/abMwFViFFhI")
163 | st.write("📋[Medium Blog](https://medium.com/@enricdomingo/program-a-rag-llm-chat-app-with-langchain-streamlit-o1-gtp-4o-and-claude-3-5-529f0f164a5e)")
164 | st.write("📋[GitHub Repo](https://github.com/enricd/rag_llm_app)")
165 |
166 |
167 |
--------------------------------------------------------------------------------
/notebooks/testing-langchain-app.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Testing a LLM app\n",
8 | "\n",
9 | "1. Organize your parameters for experimentation\n",
10 | " 1. Atomic task that you're trying to solve\n",
11 | " 2. One or more prompts that solve either the whole thing or part of it\n",
12 | " 3. Define a metric that comprises performance\n",
13 | " 1. Summarization: g-eval method that involves consistency, relevancy, fluency, etc...\n",
14 | " 2. Extraction: produce a test coverage dataset with examples of examples of successful extraction and then work from there\n",
15 | " 3. General applications: LLM-as-a-judge just uses gpt-4o or gpt-4o-mini or whatever LLM to judge the outputs of other LLMs and score them for comparison and performance tracking\n",
16 | " 4. RAG: \n",
17 | " 1. Hallucination\n",
18 | " 2. Latency\n",
19 | " 3. Token\n",
20 | " 4. Token/cost \n",
21 | " 5. Benchmarks for your specific use case context/scenarion\n",
22 | " 6. Faithfulness (accuracy concerning the retrieved context, avoiding hallucinations), Context Relevancy (relevance of the retrieved context to the query), Answer Relevancy (relevance of the generated answer to the query)\n",
23 | " 7. Build your own small test dataset for whatever your task is, and iterate and grow it, by mixing manual annotations with semi-automated annotations and performance tracking\n",
24 | "2. Do a lot of few-shot examples in your prompts."
25 | ]
26 | }
27 | ],
28 | "metadata": {
29 | "language_info": {
30 | "name": "python"
31 | }
32 | },
33 | "nbformat": 4,
34 | "nbformat_minor": 2
35 | }
36 |
--------------------------------------------------------------------------------
/presentation_slides/2024-02-20-21-43-40.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/EnkrateiaLucca/oreilly_live_training_getting_started_with_langchain/9232c291a850aa62586ceadb8d7ebd453f2fa593/presentation_slides/2024-02-20-21-43-40.png
--------------------------------------------------------------------------------
/presentation_slides/Getting-Started-with-LangChain-presentation.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/EnkrateiaLucca/oreilly_live_training_getting_started_with_langchain/9232c291a850aa62586ceadb8d7ebd453f2fa593/presentation_slides/Getting-Started-with-LangChain-presentation.pdf
--------------------------------------------------------------------------------
/presentation_slides/extract-presentation.applescript:
--------------------------------------------------------------------------------
1 | tell application "Keynote"
2 | activate
3 | -- Make sure a presentation is open
4 | if not (exists front document) then error "Please open a presentation first."
5 | set thePresentation to front document
6 | set slideText to ""
7 |
8 | -- Loop through each slide in the presentation
9 | repeat with i from 1 to the count of slides of thePresentation
10 | set thisSlide to slide i of thePresentation
11 | set slideText to slideText & "Slide " & i & ":"
12 |
13 | -- Loop through each text item in the slide
14 | repeat with j from 1 to the count of text items of thisSlide
15 | set thisTextItem to text item j of thisSlide
16 | set theText to object text of thisTextItem
17 | set slideText to slideText & return & theText
18 | end repeat
19 | set slideText to slideText & return & return
20 | end repeat
21 | end tell
22 |
23 | -- Writing the extracted text to a file on the desktop
24 | set desktopPath to (path to desktop folder as text) & "KeynoteText.txt"
25 | set fileReference to open for access file desktopPath with write permission
26 | write slideText to fileReference
27 | close access fileReference
28 |
29 | -- Notifying the user that the script has finished
30 | display notification "Extracted text has been saved to KeynoteText.txt on your desktop." with title "Extraction Complete"
--------------------------------------------------------------------------------
/presentation_slides/presentation-langgraph.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | Module 4: Getting Started with LangGraph
5 |
6 |
24 |
25 |
26 |
388 |
390 |
393 |
394 |
--------------------------------------------------------------------------------
/presentation_slides/presentation.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | Introduction to LangChain
5 |
6 |
18 |
19 |
20 |
870 |
871 |
874 |
875 |
876 |
877 | ```
--------------------------------------------------------------------------------
/requirements/requirements.in:
--------------------------------------------------------------------------------
1 | ipykernel
2 | langchain>=0.3
3 | langchain-community
4 | langchain-openai
5 | langchain-ollama
6 | langchain_experimental
7 | langchain-cli
8 | langchain_chroma
9 | langchainhub
10 | langgraph>=0.2.58
11 | langsmith
12 | fastapi
13 | langserve
14 | openai
15 | pypdf
16 | faiss-cpu
17 | arxiv
18 | xmltodict
19 | duckduckgo-search
20 | playwright
21 | graphviz
22 | ipydrawio
23 | ipydrawio_widgets
24 | nbformat
25 | python-dotenv
26 | pandas
27 | matplotlib
28 | wikipedia
29 | google-search-results
--------------------------------------------------------------------------------
/requirements/requirements.txt:
--------------------------------------------------------------------------------
1 | # This file was autogenerated by uv via the following command:
2 | # uv pip compile ./requirements/requirements.in -o ./requirements/requirements.txt
3 | aiofiles==22.1.0
4 | # via ypy-websocket
5 | aiohttp==3.9.5
6 | # via
7 | # langchain
8 | # langchain-community
9 | aiosignal==1.3.1
10 | # via aiohttp
11 | aiosqlite==0.20.0
12 | # via ypy-websocket
13 | annotated-types==0.7.0
14 | # via pydantic
15 | anyio==4.4.0
16 | # via
17 | # httpx
18 | # jupyter-server
19 | # openai
20 | # sse-starlette
21 | # starlette
22 | # watchfiles
23 | appnope==0.1.4
24 | # via ipykernel
25 | argon2-cffi==23.1.0
26 | # via
27 | # jupyter-server
28 | # notebook
29 | argon2-cffi-bindings==21.2.0
30 | # via argon2-cffi
31 | arrow==1.3.0
32 | # via isoduration
33 | arxiv==2.1.0
34 | # via -r ./requirements/requirements.in
35 | asgiref==3.8.1
36 | # via opentelemetry-instrumentation-asgi
37 | asttokens==2.4.1
38 | # via stack-data
39 | attrs==23.2.0
40 | # via
41 | # aiohttp
42 | # jsonschema
43 | # referencing
44 | babel==2.15.0
45 | # via jupyterlab-server
46 | backoff==2.2.1
47 | # via posthog
48 | bcrypt==4.1.3
49 | # via chromadb
50 | beautifulsoup4==4.12.3
51 | # via
52 | # nbconvert
53 | # wikipedia
54 | bleach==6.1.0
55 | # via nbconvert
56 | build==1.2.1
57 | # via chromadb
58 | cachetools==5.3.3
59 | # via google-auth
60 | certifi==2024.6.2
61 | # via
62 | # httpcore
63 | # httpx
64 | # kubernetes
65 | # requests
66 | cffi==1.16.0
67 | # via argon2-cffi-bindings
68 | charset-normalizer==3.3.2
69 | # via requests
70 | chroma-hnswlib==0.7.3
71 | # via chromadb
72 | chromadb==0.5.0
73 | # via langchain-chroma
74 | click==8.1.7
75 | # via
76 | # duckduckgo-search
77 | # typer
78 | # uvicorn
79 | colorama==0.4.6
80 | # via typer
81 | coloredlogs==15.0.1
82 | # via onnxruntime
83 | comm==0.2.2
84 | # via
85 | # ipykernel
86 | # ipywidgets
87 | contourpy==1.2.1
88 | # via matplotlib
89 | cycler==0.12.1
90 | # via matplotlib
91 | dataclasses-json==0.6.7
92 | # via langchain-community
93 | debugpy==1.8.1
94 | # via ipykernel
95 | decorator==5.1.1
96 | # via ipython
97 | defusedxml==0.7.1
98 | # via nbconvert
99 | deprecated==1.2.14
100 | # via
101 | # opentelemetry-api
102 | # opentelemetry-exporter-otlp-proto-grpc
103 | distro==1.9.0
104 | # via openai
105 | duckduckgo-search==6.1.6
106 | # via -r ./requirements/requirements.in
107 | entrypoints==0.4
108 | # via jupyter-client
109 | executing==2.0.1
110 | # via stack-data
111 | faiss-cpu==1.8.0
112 | # via -r ./requirements/requirements.in
113 | fastapi==0.110.3
114 | # via
115 | # -r ./requirements/requirements.in
116 | # chromadb
117 | # langchain-chroma
118 | # langserve
119 | # sse-starlette
120 | fastjsonschema==2.19.1
121 | # via nbformat
122 | feedparser==6.0.10
123 | # via arxiv
124 | filelock==3.15.1
125 | # via huggingface-hub
126 | flatbuffers==24.3.25
127 | # via onnxruntime
128 | fonttools==4.53.0
129 | # via matplotlib
130 | fqdn==1.5.1
131 | # via jsonschema
132 | frozenlist==1.4.1
133 | # via
134 | # aiohttp
135 | # aiosignal
136 | fsspec==2024.6.0
137 | # via huggingface-hub
138 | gitdb==4.0.11
139 | # via gitpython
140 | gitpython==3.1.43
141 | # via langchain-cli
142 | google-auth==2.30.0
143 | # via kubernetes
144 | google-search-results==2.4.2
145 | # via -r ./requirements/requirements.in
146 | googleapis-common-protos==1.63.1
147 | # via opentelemetry-exporter-otlp-proto-grpc
148 | graphviz==0.20.3
149 | # via -r ./requirements/requirements.in
150 | greenlet==3.0.3
151 | # via playwright
152 | grpcio==1.64.1
153 | # via
154 | # chromadb
155 | # opentelemetry-exporter-otlp-proto-grpc
156 | h11==0.14.0
157 | # via
158 | # httpcore
159 | # uvicorn
160 | httpcore==1.0.5
161 | # via httpx
162 | httptools==0.6.1
163 | # via uvicorn
164 | httpx==0.27.0
165 | # via
166 | # langgraph-sdk
167 | # langserve
168 | # langsmith
169 | # ollama
170 | # openai
171 | huggingface-hub==0.23.3
172 | # via tokenizers
173 | humanfriendly==10.0
174 | # via coloredlogs
175 | idna==3.7
176 | # via
177 | # anyio
178 | # httpx
179 | # jsonschema
180 | # requests
181 | # yarl
182 | importlib-metadata==7.1.0
183 | # via opentelemetry-api
184 | importlib-resources==6.4.0
185 | # via chromadb
186 | ipydrawio==1.3.0
187 | # via -r ./requirements/requirements.in
188 | ipydrawio-widgets==1.3.0
189 | # via
190 | # -r ./requirements/requirements.in
191 | # ipydrawio
192 | ipykernel==6.29.4
193 | # via
194 | # -r ./requirements/requirements.in
195 | # nbclassic
196 | # notebook
197 | ipython==8.25.0
198 | # via
199 | # ipykernel
200 | # ipywidgets
201 | # jupyterlab
202 | ipython-genutils==0.2.0
203 | # via
204 | # nbclassic
205 | # notebook
206 | ipywidgets==8.1.3
207 | # via ipydrawio-widgets
208 | isoduration==20.11.0
209 | # via jsonschema
210 | jedi==0.19.1
211 | # via ipython
212 | jinja2==3.1.4
213 | # via
214 | # jupyter-server
215 | # jupyterlab
216 | # jupyterlab-server
217 | # nbconvert
218 | # notebook
219 | jiter==0.6.1
220 | # via openai
221 | json5==0.9.25
222 | # via jupyterlab-server
223 | jsonpatch==1.33
224 | # via langchain-core
225 | jsonpointer==3.0.0
226 | # via
227 | # jsonpatch
228 | # jsonschema
229 | jsonschema==4.22.0
230 | # via
231 | # ipydrawio-widgets
232 | # jupyter-events
233 | # jupyterlab-server
234 | # nbformat
235 | jsonschema-specifications==2023.12.1
236 | # via jsonschema
237 | jupyter-client==7.4.9
238 | # via
239 | # ipykernel
240 | # jupyter-server
241 | # nbclient
242 | # notebook
243 | jupyter-core==5.7.2
244 | # via
245 | # ipykernel
246 | # jupyter-client
247 | # jupyter-server
248 | # jupyterlab
249 | # nbclient
250 | # nbconvert
251 | # nbformat
252 | # notebook
253 | jupyter-events==0.10.0
254 | # via
255 | # jupyter-server
256 | # jupyter-server-fileid
257 | jupyter-server==2.14.1
258 | # via
259 | # jupyter-server-fileid
260 | # jupyterlab
261 | # jupyterlab-server
262 | # notebook-shim
263 | jupyter-server-fileid==0.9.2
264 | # via jupyter-server-ydoc
265 | jupyter-server-terminals==0.5.3
266 | # via jupyter-server
267 | jupyter-server-ydoc==0.8.0
268 | # via jupyterlab
269 | jupyter-ydoc==0.2.5
270 | # via
271 | # jupyter-server-ydoc
272 | # jupyterlab
273 | jupyterlab==3.6.7
274 | # via ipydrawio
275 | jupyterlab-pygments==0.3.0
276 | # via nbconvert
277 | jupyterlab-server==2.27.2
278 | # via jupyterlab
279 | jupyterlab-widgets==3.0.11
280 | # via ipywidgets
281 | kiwisolver==1.4.5
282 | # via matplotlib
283 | kubernetes==30.1.0
284 | # via chromadb
285 | langchain==0.3.3
286 | # via
287 | # -r ./requirements/requirements.in
288 | # langchain-community
289 | langchain-chroma==0.1.4
290 | # via -r ./requirements/requirements.in
291 | langchain-cli==0.0.24
292 | # via -r ./requirements/requirements.in
293 | langchain-community==0.3.2
294 | # via
295 | # -r ./requirements/requirements.in
296 | # langchain-experimental
297 | langchain-core==0.3.24
298 | # via
299 | # langchain
300 | # langchain-chroma
301 | # langchain-community
302 | # langchain-experimental
303 | # langchain-ollama
304 | # langchain-openai
305 | # langchain-text-splitters
306 | # langgraph
307 | # langgraph-checkpoint
308 | # langserve
309 | langchain-experimental==0.3.2
310 | # via -r ./requirements/requirements.in
311 | langchain-ollama==0.2.0
312 | # via -r ./requirements/requirements.in
313 | langchain-openai==0.2.2
314 | # via -r ./requirements/requirements.in
315 | langchain-text-splitters==0.3.0
316 | # via langchain
317 | langchainhub==0.1.20
318 | # via -r ./requirements/requirements.in
319 | langgraph==0.2.58
320 | # via -r ./requirements/requirements.in
321 | langgraph-checkpoint==2.0.8
322 | # via langgraph
323 | langgraph-sdk==0.1.43
324 | # via langgraph
325 | langserve==0.3.0
326 | # via
327 | # -r ./requirements/requirements.in
328 | # langchain-cli
329 | langsmith==0.1.134
330 | # via
331 | # -r ./requirements/requirements.in
332 | # langchain
333 | # langchain-community
334 | # langchain-core
335 | libcst==1.4.0
336 | # via langchain-cli
337 | markdown-it-py==3.0.0
338 | # via rich
339 | markupsafe==2.1.5
340 | # via
341 | # jinja2
342 | # nbconvert
343 | marshmallow==3.21.3
344 | # via dataclasses-json
345 | matplotlib==3.9.0
346 | # via -r ./requirements/requirements.in
347 | matplotlib-inline==0.1.7
348 | # via
349 | # ipykernel
350 | # ipython
351 | mdurl==0.1.2
352 | # via markdown-it-py
353 | mistune==3.0.2
354 | # via nbconvert
355 | mmh3==4.1.0
356 | # via chromadb
357 | monotonic==1.6
358 | # via posthog
359 | mpmath==1.3.0
360 | # via sympy
361 | msgpack==1.1.0
362 | # via langgraph-checkpoint
363 | multidict==6.0.5
364 | # via
365 | # aiohttp
366 | # yarl
367 | mypy-extensions==1.0.0
368 | # via typing-inspect
369 | nbclassic==1.1.0
370 | # via
371 | # jupyterlab
372 | # notebook
373 | nbclient==0.10.0
374 | # via nbconvert
375 | nbconvert==7.16.4
376 | # via
377 | # jupyter-server
378 | # notebook
379 | nbformat==5.10.4
380 | # via
381 | # -r ./requirements/requirements.in
382 | # jupyter-server
383 | # nbclient
384 | # nbconvert
385 | # notebook
386 | nest-asyncio==1.6.0
387 | # via
388 | # ipykernel
389 | # jupyter-client
390 | # nbclassic
391 | # notebook
392 | notebook==6.5.7
393 | # via jupyterlab
394 | notebook-shim==0.2.4
395 | # via nbclassic
396 | numpy==1.26.4
397 | # via
398 | # chroma-hnswlib
399 | # chromadb
400 | # contourpy
401 | # faiss-cpu
402 | # langchain
403 | # langchain-chroma
404 | # langchain-community
405 | # matplotlib
406 | # onnxruntime
407 | # pandas
408 | oauthlib==3.2.2
409 | # via
410 | # kubernetes
411 | # requests-oauthlib
412 | ollama==0.3.1
413 | # via langchain-ollama
414 | onnxruntime==1.18.0
415 | # via chromadb
416 | openai==1.51.2
417 | # via
418 | # -r ./requirements/requirements.in
419 | # langchain-openai
420 | opentelemetry-api==1.25.0
421 | # via
422 | # chromadb
423 | # opentelemetry-exporter-otlp-proto-grpc
424 | # opentelemetry-instrumentation
425 | # opentelemetry-instrumentation-asgi
426 | # opentelemetry-instrumentation-fastapi
427 | # opentelemetry-sdk
428 | # opentelemetry-semantic-conventions
429 | opentelemetry-exporter-otlp-proto-common==1.25.0
430 | # via opentelemetry-exporter-otlp-proto-grpc
431 | opentelemetry-exporter-otlp-proto-grpc==1.25.0
432 | # via chromadb
433 | opentelemetry-instrumentation==0.46b0
434 | # via
435 | # opentelemetry-instrumentation-asgi
436 | # opentelemetry-instrumentation-fastapi
437 | opentelemetry-instrumentation-asgi==0.46b0
438 | # via opentelemetry-instrumentation-fastapi
439 | opentelemetry-instrumentation-fastapi==0.46b0
440 | # via chromadb
441 | opentelemetry-proto==1.25.0
442 | # via
443 | # opentelemetry-exporter-otlp-proto-common
444 | # opentelemetry-exporter-otlp-proto-grpc
445 | opentelemetry-sdk==1.25.0
446 | # via
447 | # chromadb
448 | # opentelemetry-exporter-otlp-proto-grpc
449 | opentelemetry-semantic-conventions==0.46b0
450 | # via
451 | # opentelemetry-instrumentation-asgi
452 | # opentelemetry-instrumentation-fastapi
453 | # opentelemetry-sdk
454 | opentelemetry-util-http==0.46b0
455 | # via
456 | # opentelemetry-instrumentation-asgi
457 | # opentelemetry-instrumentation-fastapi
458 | orjson==3.10.4
459 | # via
460 | # chromadb
461 | # duckduckgo-search
462 | # langgraph-sdk
463 | # langserve
464 | # langsmith
465 | overrides==7.7.0
466 | # via
467 | # chromadb
468 | # jupyter-server
469 | packaging==23.2
470 | # via
471 | # build
472 | # huggingface-hub
473 | # ipykernel
474 | # jupyter-server
475 | # jupyterlab
476 | # jupyterlab-server
477 | # langchain-core
478 | # langchainhub
479 | # marshmallow
480 | # matplotlib
481 | # nbconvert
482 | # onnxruntime
483 | pandas==2.2.2
484 | # via -r ./requirements/requirements.in
485 | pandocfilters==1.5.1
486 | # via nbconvert
487 | parso==0.8.4
488 | # via jedi
489 | pexpect==4.9.0
490 | # via ipython
491 | pillow==10.3.0
492 | # via matplotlib
493 | platformdirs==4.2.2
494 | # via jupyter-core
495 | playwright==1.44.0
496 | # via -r ./requirements/requirements.in
497 | posthog==3.5.0
498 | # via chromadb
499 | prometheus-client==0.20.0
500 | # via
501 | # jupyter-server
502 | # notebook
503 | prompt-toolkit==3.0.47
504 | # via ipython
505 | protobuf==4.25.3
506 | # via
507 | # googleapis-common-protos
508 | # onnxruntime
509 | # opentelemetry-proto
510 | psutil==5.9.8
511 | # via ipykernel
512 | ptyprocess==0.7.0
513 | # via
514 | # pexpect
515 | # terminado
516 | pure-eval==0.2.2
517 | # via stack-data
518 | pyasn1==0.6.0
519 | # via
520 | # pyasn1-modules
521 | # rsa
522 | pyasn1-modules==0.4.0
523 | # via google-auth
524 | pycparser==2.22
525 | # via cffi
526 | pydantic==2.7.4
527 | # via
528 | # chromadb
529 | # fastapi
530 | # langchain
531 | # langchain-core
532 | # langserve
533 | # langsmith
534 | # openai
535 | # pydantic-settings
536 | pydantic-core==2.18.4
537 | # via pydantic
538 | pydantic-settings==2.5.2
539 | # via langchain-community
540 | pyee==11.1.0
541 | # via playwright
542 | pygments==2.18.0
543 | # via
544 | # ipython
545 | # nbconvert
546 | # rich
547 | pyparsing==3.1.2
548 | # via matplotlib
549 | pypdf==4.2.0
550 | # via -r ./requirements/requirements.in
551 | pypika==0.48.9
552 | # via chromadb
553 | pyproject-hooks==1.1.0
554 | # via build
555 | pyreqwest-impersonate==0.4.7
556 | # via duckduckgo-search
557 | python-dateutil==2.9.0.post0
558 | # via
559 | # arrow
560 | # jupyter-client
561 | # kubernetes
562 | # matplotlib
563 | # pandas
564 | # posthog
565 | python-dotenv==1.0.1
566 | # via
567 | # -r ./requirements/requirements.in
568 | # pydantic-settings
569 | # uvicorn
570 | python-json-logger==2.0.7
571 | # via jupyter-events
572 | pytz==2024.1
573 | # via pandas
574 | pyyaml==6.0.1
575 | # via
576 | # chromadb
577 | # huggingface-hub
578 | # jupyter-events
579 | # kubernetes
580 | # langchain
581 | # langchain-community
582 | # langchain-core
583 | # libcst
584 | # uvicorn
585 | pyzmq==26.0.3
586 | # via
587 | # ipykernel
588 | # jupyter-client
589 | # jupyter-server
590 | # notebook
591 | referencing==0.35.1
592 | # via
593 | # jsonschema
594 | # jsonschema-specifications
595 | # jupyter-events
596 | regex==2024.5.15
597 | # via tiktoken
598 | requests==2.31.0
599 | # via
600 | # arxiv
601 | # chromadb
602 | # google-search-results
603 | # huggingface-hub
604 | # jupyterlab-server
605 | # kubernetes
606 | # langchain
607 | # langchain-community
608 | # langchainhub
609 | # langsmith
610 | # posthog
611 | # requests-oauthlib
612 | # requests-toolbelt
613 | # tiktoken
614 | # wikipedia
615 | requests-oauthlib==2.0.0
616 | # via kubernetes
617 | requests-toolbelt==1.0.0
618 | # via langsmith
619 | rfc3339-validator==0.1.4
620 | # via
621 | # jsonschema
622 | # jupyter-events
623 | rfc3986-validator==0.1.1
624 | # via
625 | # jsonschema
626 | # jupyter-events
627 | rich==13.7.1
628 | # via typer
629 | rpds-py==0.18.1
630 | # via
631 | # jsonschema
632 | # referencing
633 | rsa==4.9
634 | # via google-auth
635 | send2trash==1.8.3
636 | # via
637 | # jupyter-server
638 | # notebook
639 | setuptools==70.0.0
640 | # via opentelemetry-instrumentation
641 | sgmllib3k==1.0.0
642 | # via feedparser
643 | shellingham==1.5.4
644 | # via typer
645 | six==1.16.0
646 | # via
647 | # asttokens
648 | # bleach
649 | # kubernetes
650 | # posthog
651 | # python-dateutil
652 | # rfc3339-validator
653 | smmap==5.0.1
654 | # via gitdb
655 | sniffio==1.3.1
656 | # via
657 | # anyio
658 | # httpx
659 | # openai
660 | soupsieve==2.5
661 | # via beautifulsoup4
662 | sqlalchemy==2.0.30
663 | # via
664 | # langchain
665 | # langchain-community
666 | sse-starlette==1.8.2
667 | # via langserve
668 | stack-data==0.6.3
669 | # via ipython
670 | starlette==0.37.2
671 | # via
672 | # fastapi
673 | # sse-starlette
674 | sympy==1.12.1
675 | # via onnxruntime
676 | tenacity==8.3.0
677 | # via
678 | # chromadb
679 | # langchain
680 | # langchain-community
681 | # langchain-core
682 | terminado==0.18.1
683 | # via
684 | # jupyter-server
685 | # jupyter-server-terminals
686 | # notebook
687 | tiktoken==0.7.0
688 | # via langchain-openai
689 | tinycss2==1.3.0
690 | # via nbconvert
691 | tokenizers==0.19.1
692 | # via chromadb
693 | tomlkit==0.12.5
694 | # via langchain-cli
695 | tornado==6.4.1
696 | # via
697 | # ipykernel
698 | # jupyter-client
699 | # jupyter-server
700 | # jupyterlab
701 | # notebook
702 | # terminado
703 | tqdm==4.66.4
704 | # via
705 | # chromadb
706 | # huggingface-hub
707 | # openai
708 | traitlets==5.14.3
709 | # via
710 | # comm
711 | # ipykernel
712 | # ipython
713 | # ipywidgets
714 | # jupyter-client
715 | # jupyter-core
716 | # jupyter-events
717 | # jupyter-server
718 | # matplotlib-inline
719 | # nbclient
720 | # nbconvert
721 | # nbformat
722 | # notebook
723 | typer==0.9.4
724 | # via
725 | # chromadb
726 | # langchain-cli
727 | types-python-dateutil==2.9.0.20240316
728 | # via arrow
729 | types-requests==2.32.0.20240602
730 | # via langchainhub
731 | typing-extensions==4.12.2
732 | # via
733 | # aiosqlite
734 | # chromadb
735 | # fastapi
736 | # huggingface-hub
737 | # ipython
738 | # langchain-core
739 | # openai
740 | # opentelemetry-sdk
741 | # pydantic
742 | # pydantic-core
743 | # pyee
744 | # sqlalchemy
745 | # typer
746 | # typing-inspect
747 | typing-inspect==0.9.0
748 | # via dataclasses-json
749 | tzdata==2024.1
750 | # via pandas
751 | uri-template==1.3.0
752 | # via jsonschema
753 | urllib3==2.2.1
754 | # via
755 | # kubernetes
756 | # requests
757 | # types-requests
758 | uvicorn==0.23.2
759 | # via
760 | # chromadb
761 | # langchain-cli
762 | # sse-starlette
763 | uvloop==0.19.0
764 | # via uvicorn
765 | watchfiles==0.22.0
766 | # via uvicorn
767 | wcwidth==0.2.13
768 | # via prompt-toolkit
769 | webcolors==24.6.0
770 | # via jsonschema
771 | webencodings==0.5.1
772 | # via
773 | # bleach
774 | # tinycss2
775 | websocket-client==1.8.0
776 | # via
777 | # jupyter-server
778 | # kubernetes
779 | websockets==12.0
780 | # via uvicorn
781 | widgetsnbextension==4.0.11
782 | # via ipywidgets
783 | wikipedia==1.4.0
784 | # via -r ./requirements/requirements.in
785 | wrapt==1.16.0
786 | # via
787 | # deprecated
788 | # opentelemetry-instrumentation
789 | xmltodict==0.13.0
790 | # via -r ./requirements/requirements.in
791 | y-py==0.6.2
792 | # via
793 | # jupyter-ydoc
794 | # ypy-websocket
795 | yarl==1.9.4
796 | # via aiohttp
797 | ypy-websocket==0.8.4
798 | # via jupyter-server-ydoc
799 | zipp==3.19.2
800 | # via importlib-metadata
801 |
--------------------------------------------------------------------------------