├── L-11 ├── .env ├── README.md ├── README.md.bak └── demo.ipynb ├── L-13 ├── .env ├── README.md └── solution.ipynb ├── L-15 ├── 1.py └── README.md ├── L-16 ├── README.md └── sol.ipynb ├── L-3 └── first_genai_app │ ├── .env │ ├── README.md │ ├── gemini_app_qa.py │ └── gemini_applanguage_translator.py ├── L-4 └── chatgpt_clone │ ├── .env │ ├── README.md │ ├── chatgpt_like_app.py │ └── requirements.txt ├── L-5 ├── .env ├── README.md ├── database │ └── retail_sales.sql ├── myapp2.py ├── requirements.txt └── retail_sales_db.ipynb ├── L-6 ├── README.md ├── app1.py └── requirements.txt ├── L-7 └── RAG_demo │ ├── .env │ ├── README.md │ ├── app1.py │ ├── basics_RAG.ipynb │ ├── requirements.txt │ └── ~$script.docx ├── L-8 └── gemini_rag_demo │ ├── .env │ ├── README.md │ ├── app1.py │ ├── basics_RAG_pdf.ipynb │ ├── requirements.txt │ └── yolov9_paper.pdf ├── L-9 ├── README.md ├── huggingface_llm_RAG.ipynb └── requirements.txt ├── deepseek-r1_langchain ├── 1.py └── README.md ├── langgraph_agents.ipynb ├── langgraph_rag.ipynb ├── langgraph_tools_Bindings_agents.ipynb ├── multi_agents_langgraph.ipynb └── qwen_implementation ├── README.md ├── README.md.bak ├── demo_huggingface.ipynb └── demo_langgraph_ollama.py /L-11/.env: -------------------------------------------------------------------------------- 1 | SERPAPI_API_KEY="d36e*************5d9b85a7679a78" 2 | 3 | OPENAI_API_KEY='sk-proj-eGTsZBr*******************QsPkQHrtvM7' 4 | -------------------------------------------------------------------------------- /L-11/README.md: -------------------------------------------------------------------------------- 1 | ### Video Tutorial: https://youtu.be/Z_OvWHR8C7M 2 | 3 | ## Environment setup: 4 | 5 | Install packages 6 | 7 | pip install --upgrade langchain langchain-community google-search-results 8 | 9 | 10 | #### Provides access to the OpenAI GPT models. You’ll also need an OpenAI API key, which you can get by signing up at OpenAI. 11 | pip install openai 12 | 13 | #### A tool for searching the web. You’ll need a SerpAPI key, which you can get by signing up at SerpAPI. 14 | pip install google-search-results 15 | 16 | #### To manage environment variables using a .env file. 17 | pip install python-dotenv 18 | 19 | In the same directory as your script, create a .env file and add your API keys. 20 | 21 | 22 | Create .env file and paste these environment variables 23 | 24 | 25 | 26 | 27 | 28 | -------------------------------------------------------------------------------- /L-11/README.md.bak: -------------------------------------------------------------------------------- 1 | ### Video Tutorial: https://youtu.be/425N7n86QGw 2 | 3 | ## Environment setup: 4 | 5 | conda create -n env_langchain1 python=3.10 6 | conda activate env_langchain1 7 | python -m pip install --upgrade pip 8 | Install packages: 9 | pip install -r requirements.txt 10 | 11 | 12 | 13 | Gemini Pro: https://deepmind.google/technologies/gemini/pro/ 14 | 15 | ### Get an API key: 16 | Head to https://ai.google.dev/gemini-api/docs/api-key to generate a Google AI API key. 17 | 18 | Once you've done this set the GOOGLE_API_KEY environment variable 19 | 20 | ### mysql: 21 | Ofiicial site link: https://dev.mysql.com/downloads/installer/ 22 | 23 | ### mysql workbench: 24 | Official site link: https://dev.mysql.com/downloads/workbench/ 25 | -------------------------------------------------------------------------------- /L-11/demo.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 4, 6 | "id": "f684a4d0-1cd9-459e-9762-0c597f8f0165", 7 | "metadata": {}, 8 | "outputs": [], 9 | "source": [ 10 | "from langchain.agents import initialize_agent, Tool\n", 11 | "from langchain.llms import OpenAI\n", 12 | "from langchain_community.utilities import SerpAPIWrapper\n", 13 | "\n", 14 | "\n", 15 | "from dotenv import load_dotenv\n", 16 | "import os\n", 17 | "\n", 18 | "load_dotenv()\n", 19 | "openai_api_key = os.getenv(\"OPENAI_API_KEY\")\n", 20 | "serpapi_api_key = os.getenv(\"SERPAPI_API_KEY\")" 21 | ] 22 | }, 23 | { 24 | "cell_type": "code", 25 | "execution_count": 5, 26 | "id": "25eefd32-6140-42c4-99c9-9f6eb47a7d3d", 27 | "metadata": {}, 28 | "outputs": [], 29 | "source": [ 30 | "# https://serpapi.com/manage-api-key" 31 | ] 32 | }, 33 | { 34 | "cell_type": "code", 35 | "execution_count": 6, 36 | "id": "f0781f96-f573-447f-977c-d693380fd20d", 37 | "metadata": {}, 38 | "outputs": [ 39 | { 40 | "name": "stderr", 41 | "output_type": "stream", 42 | "text": [ 43 | "C:\\Users\\f\\AppData\\Local\\Temp\\ipykernel_18320\\3059657962.py:3: LangChainDeprecationWarning: The class `OpenAI` was deprecated in LangChain 0.0.10 and will be removed in 1.0. An updated version of the class exists in the :class:`~langchain-openai package and should be used instead. To use it run `pip install -U :class:`~langchain-openai` and import as `from :class:`~langchain_openai import OpenAI``.\n", 44 | " llm = OpenAI(temperature=0.5)\n", 45 | "C:\\Users\\f\\AppData\\Local\\Temp\\ipykernel_18320\\3059657962.py:14: LangChainDeprecationWarning: LangChain agents will continue to be supported, but it is recommended for new use cases to be built with LangGraph. LangGraph offers a more flexible and full-featured framework for building agents, including support for tool-calling, persistence of state, and human-in-the-loop workflows. For details, refer to the `LangGraph documentation `_ as well as guides for `Migrating from AgentExecutor `_ and LangGraph's `Pre-built ReAct agent `_.\n", 46 | " agent = initialize_agent(tools, llm, agent=\"zero-shot-react-description\", verbose=True)\n", 47 | "C:\\Users\\f\\AppData\\Local\\Temp\\ipykernel_18320\\3059657962.py:16: LangChainDeprecationWarning: The method `Chain.run` was deprecated in langchain 0.1.0 and will be removed in 1.0. Use :meth:`~invoke` instead.\n", 48 | " response = agent.run(\"What’s the weather in Delhi?\")\n" 49 | ] 50 | }, 51 | { 52 | "name": "stdout", 53 | "output_type": "stream", 54 | "text": [ 55 | "\n", 56 | "\n", 57 | "\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n", 58 | "\u001b[32;1m\u001b[1;3m I should search for a weather website that can give me the current weather in Delhi\n", 59 | "Action: Search\n", 60 | "Action Input: \"weather in Delhi\"\u001b[0m\n", 61 | "Observation: \u001b[36;1m\u001b[1;3m{'type': 'weather_result', 'temperature': '56', 'unit': 'Fahrenheit', 'precipitation': '0%', 'humidity': '86%', 'wind': '2 mph', 'location': 'Delhi, India', 'date': 'Saturday 6:00 PM', 'weather': 'Mostly cloudy'}\u001b[0m\n", 62 | "\u001b[32;1m\u001b[1;3m I should check the temperature in Fahrenheit\n", 63 | "Action: Search\n", 64 | "Action Input: \"temperature in Fahrenheit\"\u001b[0m\n", 65 | "Observation: \u001b[36;1m\u001b[1;3m[{'link': 'https://www.reddit.com/r/AskAnAmerican/comments/tddlh6/what_is_your_ideal_weather_temperature_in/', 'source': 'Reddit', 'question': 'What is your ideal weather temperature (in fahrenheit)?', 'answer': 'About 72-75 is great for me. Quite low to “ideal” humidity preferred, small breezes ok, and sunny. That’s the weather today so it should be nice, until about 5pm when I’ll switch to a sweater.', 'votes': 62}, {'link': 'https://support.google.com/pixelphone/thread/295288209/at-a-glance-widget-temperature-in-f-not-c?hl=en', 'source': 'Google Help', 'question': 'At a Glance Widget Temperature in F not C', 'answer': 'I cleared the storage/cache of the weather app data and followed these steps :Settings > System > Languages > Regional preferences > Default to Celsius Once i restarted the Weather app, at a glance changed from F to C.', 'votes': 17}, {'link': 'https://android.stackexchange.com/questions/138322/google-app-displaying-temperature-in-fahrenheit', 'source': 'Android Enthusiasts Stack Exchange', 'question': 'Google app displaying temperature in Fahrenheit', 'answer': 'This problem apparently is due to the device language selection. If English (US) is chosen, Google defaults to temperature units used in US (Fahrenheit) This is remedied by switching over to English (UK), where temperature units are in Celsius Source: Accepted answer on Android wear,How do I change temperature units in Android wear weather app?', 'votes': 7}, {'link': 'https://stackoverflow.com/questions/75074546/need-both-celsius-and-fahrenheit-with-weatherkit', 'source': 'Stack Overflow', 'question': 'I have a weather app that tells the temperature in Celsius and Fahrenheit at the same time. I\\'d like to use WeatherKit, but I\\'m having trouble rounding Celsius down to no decimal places. I can do it with Fahrenheit with .formatted because I\\'m based in the US, but I can\\'t round down Celsius at the same time. Is there an easy way to do that in SwiftUI? Or is it possible to manually set the locale for just a single property? if let weather{ let celsiusWeather = weather.currentWeather.temperature.converted(to: .celsius).description VStack{ Text(\"New York\") .font(.largeTitle) Text(\"\\\\(weather.currentWeather.temperature.converted(to: .fahrenheit).formatted().description)\") Text(celsiusWeather) } } } This current code comes up with it half there: New York 45°F 5.98°C But I would like it simply be 6°C instead. I\\'ve tried string interpolation: let celsiusFormatted = String(format: \"%.0f\", celsiusWeather) and that just came up with 0, not even any of the temperature, so I\\'m not sure if because it\\'s from WeatherKit that it can do that or not. Any help would be great. Let me know if you need more code to help clarify.', 'answer': 'You can format the temperature using MeasurementFormatter. Remember to set numberFormatter.maximumFractionDigits = 0 to round the number as required and unitOptions = .providedUnit to ensure the correct units are displayed. struct ContentView: View { let temp: Measurement var body: some View { VStack { HStack { Text(\"New York\") Text(temp, formatter: formatter) Text(temp.converted(to: .celsius), formatter: formatter) } } } var formatter: MeasurementFormatter { let formatter = MeasurementFormatter() formatter.unitStyle = .medium formatter.numberFormatter.maximumFractionDigits = 0 formatter.unitOptions = .providedUnit return formatter } }', 'votes': 4}]\u001b[0m\n", 66 | "\u001b[32;1m\u001b[1;3m I now know the final answer\n", 67 | "Final Answer: The temperature in Delhi is 56 degrees Fahrenheit.\u001b[0m\n", 68 | "\n", 69 | "\u001b[1m> Finished chain.\u001b[0m\n", 70 | "The temperature in Delhi is 56 degrees Fahrenheit.\n" 71 | ] 72 | } 73 | ], 74 | "source": [ 75 | "search_tool = SerpAPIWrapper()\n", 76 | "\n", 77 | "llm = OpenAI(temperature=0.5)\n", 78 | "\n", 79 | "tools = [\n", 80 | " Tool(\n", 81 | " name=\"Search\",\n", 82 | " func=search_tool.run,\n", 83 | " description=\"Use this tool to perform web searches.\"\n", 84 | " )\n", 85 | "]\n", 86 | "\n", 87 | "\n", 88 | "agent = initialize_agent(tools, llm, agent=\"zero-shot-react-description\", verbose=True)\n", 89 | "\n", 90 | "response = agent.run(\"What’s the weather in Delhi?\")\n", 91 | "print(response)" 92 | ] 93 | }, 94 | { 95 | "cell_type": "code", 96 | "execution_count": null, 97 | "id": "8c1c0273-acbf-438a-997d-374e00ff0bb9", 98 | "metadata": {}, 99 | "outputs": [], 100 | "source": [] 101 | }, 102 | { 103 | "cell_type": "code", 104 | "execution_count": null, 105 | "id": "665631f7-5623-40d3-a617-b18651f5f5a3", 106 | "metadata": {}, 107 | "outputs": [], 108 | "source": [] 109 | } 110 | ], 111 | "metadata": { 112 | "kernelspec": { 113 | "display_name": "Python 3 (ipykernel)", 114 | "language": "python", 115 | "name": "python3" 116 | }, 117 | "language_info": { 118 | "codemirror_mode": { 119 | "name": "ipython", 120 | "version": 3 121 | }, 122 | "file_extension": ".py", 123 | "mimetype": "text/x-python", 124 | "name": "python", 125 | "nbconvert_exporter": "python", 126 | "pygments_lexer": "ipython3", 127 | "version": "3.10.11" 128 | } 129 | }, 130 | "nbformat": 4, 131 | "nbformat_minor": 5 132 | } 133 | -------------------------------------------------------------------------------- /L-13/.env: -------------------------------------------------------------------------------- 1 | 2 | OPENAI_API_KEY='sk-proj****************QsPkQHrtvM7' 3 | 4 | -------------------------------------------------------------------------------- /L-13/README.md: -------------------------------------------------------------------------------- 1 | ### Video Tutorial: https://youtu.be/FQyrd26U3MU 2 | 3 | ## Environment setup: 4 | 5 | Install packages 6 | 7 | pip install --upgrade langchain langchain-community 8 | 9 | 10 | #### Provides access to the OpenAI GPT models. You’ll also need an OpenAI API key, which you can get by signing up at OpenAI. 11 | pip install openai 12 | 13 | 14 | #### To manage environment variables using a .env file. 15 | pip install python-dotenv 16 | 17 | In the same directory as your script, create a .env file and add your API keys. 18 | 19 | 20 | Create .env file and paste these environment variables 21 | 22 | 23 | 24 | 25 | 26 | -------------------------------------------------------------------------------- /L-15/1.py: -------------------------------------------------------------------------------- 1 | # pip install --upgrade langchain langchain-community langgraph 2 | 3 | # pip install langchain-ollama 4 | 5 | from typing import List, Dict 6 | from langgraph.graph import StateGraph, START, END 7 | from langchain_ollama.llms import OllamaLLM 8 | 9 | 10 | # Step 1: Define State 11 | class State(Dict): 12 | messages: List[Dict[str, str]] 13 | 14 | 15 | # Step 2: Initialize StateGraph 16 | graph_builder = StateGraph(State) 17 | 18 | # Initialize the LLM 19 | llm = OllamaLLM(model="llama3.1") 20 | 21 | 22 | # Define chatbot function 23 | def chatbot(state: State): 24 | response = llm.invoke(state["messages"]) 25 | state["messages"].append({"role": "assistant", "content": response}) # Treat response as a string 26 | return {"messages": state["messages"]} 27 | 28 | 29 | 30 | # Add nodes and edges 31 | graph_builder.add_node("chatbot", chatbot) 32 | graph_builder.add_edge(START, "chatbot") 33 | graph_builder.add_edge("chatbot", END) 34 | 35 | 36 | # Compile the graph 37 | graph = graph_builder.compile() 38 | 39 | 40 | # Stream updates 41 | def stream_graph_updates(user_input: str): 42 | state = {"messages": [{"role": "user", "content": user_input}]} 43 | for event in graph.stream(state): 44 | for value in event.values(): 45 | print("Assistant:", value["messages"][-1]["content"]) 46 | 47 | 48 | 49 | # Run chatbot in a loop 50 | if __name__ == "__main__": 51 | while True: 52 | try: 53 | user_input = input("User: ") 54 | if user_input.lower() in ["quit", "exit", "q"]: 55 | print("Goodbye!") 56 | break 57 | 58 | stream_graph_updates(user_input) 59 | except Exception as e: 60 | print(f"An error occurred: {e}") 61 | break 62 | -------------------------------------------------------------------------------- /L-15/README.md: -------------------------------------------------------------------------------- 1 | ### Video Tutorial: https://youtu.be/gjTvGg0HOB8 2 | 3 | ## Environment setup: 4 | 5 | Install packages 6 | 7 | pip install --upgrade langchain langchain-community langgraph 8 | 9 | 10 | #### Provides access to the ollama models. 11 | pip install langchain-ollama 12 | 13 | 14 | #### To run this code- Open command prompt and type 15 | python 1.py 16 | 17 | 18 | 19 | 20 | 21 | 22 | -------------------------------------------------------------------------------- /L-16/README.md: -------------------------------------------------------------------------------- 1 | ### Video Tutorial(hindi): https://youtu.be/VL9PFXqpf9Q 2 | 3 | ## Environment setup: 4 | 5 | Install packages 6 | 7 | pip install --upgrade langchain langchain-community langgraph 8 | 9 | 10 | #### Provides access to the ollama models. 11 | pip install langchain-ollama 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | -------------------------------------------------------------------------------- /L-16/sol.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "id": "c37a48e9-002a-4d1e-8bd9-78b082b2656d", 7 | "metadata": {}, 8 | "outputs": [ 9 | { 10 | "name": "stdout", 11 | "output_type": "stream", 12 | "text": [ 13 | "Name: langchain\n", 14 | "Version: 0.3.17\n", 15 | "Summary: Building applications with LLMs through composability\n", 16 | "Home-page: https://github.com/langchain-ai/langchain\n", 17 | "Author: \n", 18 | "Author-email: \n", 19 | "License: MIT\n", 20 | "Location: e:\\genertaive_ai_playlist\\0_langchain\\l-12_langgraph\\myvenv\\lib\\site-packages\n", 21 | "Requires: aiohttp, async-timeout, langchain-core, langchain-text-splitters, langsmith, numpy, pydantic, PyYAML, requests, SQLAlchemy, tenacity\n", 22 | "Required-by: langchain-community\n" 23 | ] 24 | } 25 | ], 26 | "source": [ 27 | "!pip show langchain" 28 | ] 29 | }, 30 | { 31 | "cell_type": "code", 32 | "execution_count": 2, 33 | "id": "d8acd74d-036a-476e-9f86-2ae59e004e8d", 34 | "metadata": {}, 35 | "outputs": [ 36 | { 37 | "name": "stdout", 38 | "output_type": "stream", 39 | "text": [ 40 | "Name: langgraph\n", 41 | "Version: 0.2.69\n", 42 | "Summary: Building stateful, multi-actor applications with LLMs\n", 43 | "Home-page: https://www.github.com/langchain-ai/langgraph\n", 44 | "Author: \n", 45 | "Author-email: \n", 46 | "License: MIT\n", 47 | "Location: e:\\genertaive_ai_playlist\\0_langchain\\l-12_langgraph\\myvenv\\lib\\site-packages\n", 48 | "Requires: langchain-core, langgraph-checkpoint, langgraph-sdk\n", 49 | "Required-by: \n" 50 | ] 51 | } 52 | ], 53 | "source": [ 54 | "!pip show langgraph\n" 55 | ] 56 | }, 57 | { 58 | "cell_type": "code", 59 | "execution_count": 3, 60 | "id": "b5dff37a-1e15-467d-83d7-f69e40f4a347", 61 | "metadata": {}, 62 | "outputs": [], 63 | "source": [ 64 | "from langchain_ollama.llms import OllamaLLM\n", 65 | "\n", 66 | "#model = OllamaLLM(model=\"llama3.1\")\n", 67 | "model = OllamaLLM(model=\"deepseek-r1\")" 68 | ] 69 | }, 70 | { 71 | "cell_type": "code", 72 | "execution_count": null, 73 | "id": "3142f8be-006b-4251-b8e1-bbb32b2bbe03", 74 | "metadata": {}, 75 | "outputs": [ 76 | { 77 | "name": "stdin", 78 | "output_type": "stream", 79 | "text": [ 80 | "User: hi\n" 81 | ] 82 | }, 83 | { 84 | "name": "stdout", 85 | "output_type": "stream", 86 | "text": [ 87 | "Assistant: \n", 88 | "\n", 89 | "\n", 90 | "\n", 91 | "Hello! How can I assist you today? 😊\n" 92 | ] 93 | }, 94 | { 95 | "name": "stdin", 96 | "output_type": "stream", 97 | "text": [ 98 | "User: what is AGI\n" 99 | ] 100 | }, 101 | { 102 | "name": "stdout", 103 | "output_type": "stream", 104 | "text": [ 105 | "Assistant: \n", 106 | "\n", 107 | "\n", 108 | "\n", 109 | "AGI stands for Artificial General Intelligence. It refers to a type of artificial intelligence that possesses the ability to understand, learn, and apply knowledge across a wide range of tasks at a level comparable to human intelligence. Unlike Narrow AI, which is designed for specific tasks (e.g., facial recognition, language translation), AGI can perform any intellectual task that a human can do.\n", 110 | "\n", 111 | "Key characteristics of AGI include:\n", 112 | "\n", 113 | "1. **Generalization**: Ability to apply knowledge and skills across diverse domains.\n", 114 | "2. **Learning**: Capacity to learn from experience and improve over time.\n", 115 | "3. **Reasoning**: Capability to solve complex problems, make decisions, and reason abstractly.\n", 116 | "4. **Autonomy**: Functioning independently without human intervention.\n", 117 | "\n", 118 | "AGI remains a theoretical concept and has not yet been achieved. Current AI systems, such as Large Language Models (e.g., GPT-4), are examples of Narrow AI and have demonstrated impressive capabilities in specific areas but lack the general intelligence of AGI.\n" 119 | ] 120 | } 121 | ], 122 | "source": [ 123 | "from typing import List, Dict\n", 124 | "from langgraph.graph import StateGraph, START, END\n", 125 | "\n", 126 | "# Step 1: Define State\n", 127 | "class State(Dict):\n", 128 | " messages: List[Dict[str, str]] \n", 129 | "\n", 130 | "\n", 131 | "# Step 2: Initialize StateGraph\n", 132 | "graph_builder = StateGraph(State)\n", 133 | "\n", 134 | "llm = OllamaLLM(model=\"deepseek-r1\")\n", 135 | "\n", 136 | "# Define chatbot function\n", 137 | "def chatbot(state: State):\n", 138 | " response = llm.invoke(state[\"messages\"])\n", 139 | " state[\"messages\"].append({\"role\": \"assistant\", \"content\": response}) # Treat response as a string\n", 140 | " return {\"messages\": state[\"messages\"]}\n", 141 | "\n", 142 | "\n", 143 | "# Add nodes and edges\n", 144 | "graph_builder.add_node(\"chatbot\", chatbot)\n", 145 | "graph_builder.add_edge(START, \"chatbot\")\n", 146 | "graph_builder.add_edge(\"chatbot\", END)\n", 147 | "\n", 148 | "# Compile the graph\n", 149 | "graph = graph_builder.compile()\n", 150 | "\n", 151 | "\n", 152 | "\n", 153 | "# Stream updates\n", 154 | "def stream_graph_updates(user_input: str): \n", 155 | " state = {\"messages\": [{\"role\": \"user\", \"content\": user_input}]}\n", 156 | " for event in graph.stream(state):\n", 157 | " for value in event.values():\n", 158 | " print(\"Assistant:\", value[\"messages\"][-1][\"content\"])\n", 159 | "\n", 160 | "\n", 161 | "\n", 162 | "# Run chatbot in a loop\n", 163 | "if __name__ == \"__main__\":\n", 164 | " while True:\n", 165 | " try:\n", 166 | " user_input = input(\"User: \")\n", 167 | " if user_input.lower() in [\"quit\", \"exit\", \"q\"]:\n", 168 | " print(\"Goodbye!\")\n", 169 | " break\n", 170 | "\n", 171 | " stream_graph_updates(user_input)\n", 172 | " except Exception as e:\n", 173 | " print(f\"An error occurred: {e}\")\n", 174 | " break" 175 | ] 176 | }, 177 | { 178 | "cell_type": "code", 179 | "execution_count": null, 180 | "id": "864e39de-e1d7-4536-ba94-2d394b162b8c", 181 | "metadata": {}, 182 | "outputs": [], 183 | "source": [] 184 | } 185 | ], 186 | "metadata": { 187 | "kernelspec": { 188 | "display_name": "Python 3 (ipykernel)", 189 | "language": "python", 190 | "name": "python3" 191 | }, 192 | "language_info": { 193 | "codemirror_mode": { 194 | "name": "ipython", 195 | "version": 3 196 | }, 197 | "file_extension": ".py", 198 | "mimetype": "text/x-python", 199 | "name": "python", 200 | "nbconvert_exporter": "python", 201 | "pygments_lexer": "ipython3", 202 | "version": "3.10.11" 203 | } 204 | }, 205 | "nbformat": 4, 206 | "nbformat_minor": 5 207 | } 208 | -------------------------------------------------------------------------------- /L-3/first_genai_app/.env: -------------------------------------------------------------------------------- 1 | GOOGLE_API_KEY='AIzaSy***********RylVdfgdfgdfgdfgx9JKE' 2 | 3 | LANGCHAIN_API_KEY="lsv2_pt_****************53c75226a8" 4 | 5 | LANGCHAIN_PROJECT="geminiChatbottutorial" 6 | 7 | -------------------------------------------------------------------------------- /L-3/first_genai_app/README.md: -------------------------------------------------------------------------------- 1 | ### Check this video : https://youtu.be/VvpuGpXOYrQ 2 | 3 | ### Environment setup: 4 | 5 | py -3.10 -m venv myvenv 6 | 7 | myvenv\Scripts\activate 8 | 9 | python -m pip install --upgrade pip 10 | pip install --upgrade --quiet langchain-google-genai pillow 11 | pip install streamlit 12 | pip install python-dotenv 13 | 14 | Get a Google API key: 15 | Head to https://ai.google.dev/gemini-api/docs/api-key to generate a Google AI API key. 16 | 17 | 18 | 19 | ### To run question and answering app: 20 | 21 | streamlit run gemini_app_qa.py 22 | 23 | 24 | ### To run language translator app: 25 | 26 | streamlit run gemini_applanguage_translator.py 27 | -------------------------------------------------------------------------------- /L-3/first_genai_app/gemini_app_qa.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | from dotenv import load_dotenv 3 | from langchain_core.prompts import ChatPromptTemplate 4 | from langchain_google_genai import ChatGoogleGenerativeAI 5 | from langchain_core.output_parsers import StrOutputParser 6 | 7 | load_dotenv() 8 | 9 | # Now we can instantiate our model object and generate chat completions: 10 | llm = ChatGoogleGenerativeAI( 11 | model="gemini-1.5-pro", 12 | temperature=0, 13 | max_tokens=None, 14 | timeout=None, 15 | max_retries=2, 16 | ) 17 | 18 | prompt=ChatPromptTemplate.from_messages( 19 | [ 20 | ("system","You are a chatbot"), 21 | ("human","Question:{question}") 22 | ] 23 | ) 24 | 25 | 26 | 27 | st.title('Langchain Demo With Gemini') 28 | input_text=st.text_input("Enter your question here") 29 | 30 | 31 | output_parser=StrOutputParser() 32 | 33 | chain=prompt|llm|output_parser 34 | 35 | if input_text: 36 | st.write(chain.invoke({'question':input_text})) 37 | 38 | 39 | 40 | # To run this code, write- streamlit run gemini_app_qa.py -------------------------------------------------------------------------------- /L-3/first_genai_app/gemini_applanguage_translator.py: -------------------------------------------------------------------------------- 1 | ''' 2 | https://python.langchain.com/v0.2/docs/integrations/chat/google_generative_ai/ 3 | ''' 4 | 5 | import getpass 6 | import os 7 | import streamlit as st 8 | from dotenv import load_dotenv 9 | from langchain_core.output_parsers import StrOutputParser 10 | 11 | load_dotenv() 12 | 13 | # Now we can instantiate our model object and generate chat completions: 14 | from langchain_google_genai import ChatGoogleGenerativeAI 15 | 16 | llm = ChatGoogleGenerativeAI( 17 | model="gemini-1.5-pro", 18 | temperature=0, 19 | max_tokens=None, 20 | timeout=None, 21 | max_retries=2, 22 | # other params... 23 | ) 24 | 25 | 26 | # We can chain our model with a prompt template like so: 27 | from langchain_core.prompts import ChatPromptTemplate 28 | 29 | prompt = ChatPromptTemplate.from_messages( 30 | [ 31 | ( 32 | "system", 33 | "You are a helpful assistant that translates {input_language} to {output_language}.", 34 | ), 35 | ("human", "{input}"), 36 | ] 37 | ) 38 | 39 | 40 | st.title('Langchain Demo With Gemini (language translator)') 41 | input_text=st.text_input("Write the sentence in english and it will be translated in german") 42 | 43 | 44 | 45 | # chain = prompt | llm 46 | output_parser=StrOutputParser() 47 | 48 | chain=prompt|llm|output_parser 49 | 50 | if input_text: 51 | st.write(chain.invoke( 52 | { 53 | "input_language": "English", 54 | "output_language": "German", 55 | "input": input_text, 56 | #"input": "I love programming.", 57 | } 58 | )) 59 | 60 | 61 | # Run app- streamlit run gemini_applanguage_translator.py -------------------------------------------------------------------------------- /L-4/chatgpt_clone/.env: -------------------------------------------------------------------------------- 1 | OPENAI_API_KEY='sk-proj-eGTsZBrDDDDDDDDDDDDDDDDlBq3QsPkQHrtvM7' 2 | -------------------------------------------------------------------------------- /L-4/chatgpt_clone/README.md: -------------------------------------------------------------------------------- 1 | ### Video Tutorial: https://youtu.be/z-moiQlcC6c 2 | 3 | ### Setup Environment: 4 | 5 | conda create -n env_langchain1 python=3.10 6 | 7 | conda activate env_langchain1 8 | 9 | Install packages: 10 | pip install -r requirements.txt 11 | 12 | Api key: 13 | https://platform.openai.com/api-keys 14 | 15 | Run App: 16 | streamlit run chatgpt_like_app.py 17 | -------------------------------------------------------------------------------- /L-4/chatgpt_clone/chatgpt_like_app.py: -------------------------------------------------------------------------------- 1 | import os 2 | import streamlit as st 3 | import pickle 4 | import time 5 | from langchain_openai import OpenAI 6 | from langchain.chains import RetrievalQAWithSourcesChain 7 | from langchain.text_splitter import RecursiveCharacterTextSplitter 8 | from langchain_community.document_loaders import UnstructuredURLLoader 9 | from langchain_openai import OpenAIEmbeddings 10 | 11 | 12 | from openai import OpenAI 13 | 14 | from dotenv import load_dotenv 15 | load_dotenv() 16 | 17 | 18 | 19 | st.title("ChatGPT-like clone") 20 | 21 | # Set OpenAI API key from Streamlit secrets 22 | client = OpenAI(api_key=os.environ["OPENAI_API_KEY"]) 23 | 24 | # Set a default model 25 | if "openai_model" not in st.session_state: 26 | st.session_state["openai_model"] = "gpt-3.5-turbo" # we've added a default model to st.session_state 27 | 28 | # Initialize chat history 29 | if "messages" not in st.session_state: 30 | st.session_state.messages = [] 31 | 32 | # Display chat messages from history on app rerun 33 | for message in st.session_state.messages: 34 | with st.chat_message(message["role"]): 35 | st.markdown(message["content"]) 36 | 37 | # Accept user input 38 | if prompt := st.chat_input("What is up?"): 39 | # Add user message to chat history 40 | st.session_state.messages.append({"role": "user", "content": prompt}) 41 | # Display user message in chat message container 42 | with st.chat_message("user"): 43 | st.markdown(prompt) 44 | 45 | # Display assistant response in chat message container 46 | with st.chat_message("assistant"): 47 | stream = client.chat.completions.create( 48 | model=st.session_state["openai_model"], 49 | messages=[ 50 | {"role": m["role"], "content": m["content"]} 51 | for m in st.session_state.messages 52 | ], 53 | stream=True, 54 | ) 55 | response = st.write_stream(stream) 56 | st.session_state.messages.append({"role": "assistant", "content": response}) -------------------------------------------------------------------------------- /L-4/chatgpt_clone/requirements.txt: -------------------------------------------------------------------------------- 1 | langchain 2 | langchain-openai 3 | python-dotenv 4 | streamlit 5 | tiktoken 6 | faiss-cpu 7 | langchain_experimental 8 | sentence-transformers 9 | -------------------------------------------------------------------------------- /L-5/.env: -------------------------------------------------------------------------------- 1 | 2 | GOOGLE_API_KEY='AIATN2bxIPFvUUUUUUUUUU9PfnGuJKE' 3 | -------------------------------------------------------------------------------- /L-5/README.md: -------------------------------------------------------------------------------- 1 | ### Video Tutorial: https://youtu.be/425N7n86QGw 2 | 3 | ## Environment setup: 4 | 5 | conda create -n env_langchain1 python=3.10 6 | conda activate env_langchain1 7 | python -m pip install --upgrade pip 8 | Install packages: 9 | pip install -r requirements.txt 10 | 11 | 12 | 13 | Gemini Pro: https://deepmind.google/technologies/gemini/pro/ 14 | 15 | ### Get an API key: 16 | Head to https://ai.google.dev/gemini-api/docs/api-key to generate a Google AI API key. 17 | 18 | Once you've done this set the GOOGLE_API_KEY environment variable 19 | 20 | ### mysql: 21 | Ofiicial site link: https://dev.mysql.com/downloads/installer/ 22 | 23 | ### mysql workbench: 24 | Official site link: https://dev.mysql.com/downloads/workbench/ 25 | -------------------------------------------------------------------------------- /L-5/database/retail_sales.sql: -------------------------------------------------------------------------------- 1 | -- Create the database if it doesn't exist 2 | CREATE DATABASE IF NOT EXISTS retail_sales_db; 3 | 4 | -- Switch to the retail_sales database 5 | USE retail_sales_db; 6 | 7 | -- Create the sales table 8 | CREATE TABLE IF NOT EXISTS sales_tb ( 9 | TransactionID INT, 10 | Date DATE, 11 | CustomerID VARCHAR(10), 12 | Gender VARCHAR(10), 13 | Age INT, 14 | ProductCategory VARCHAR(50), 15 | Quantity INT, 16 | PriceperUnit DECIMAL(10,2), 17 | TotalAmount DECIMAL(10,2) 18 | ); 19 | 20 | -- Insert data into the sales table 21 | INSERT INTO sales_tb (TransactionID, Date, CustomerID, Gender, Age, ProductCategory, Quantity, PriceperUnit, TotalAmount) 22 | VALUES 23 | (1, '2023-11-24', 'CUST001', 'Male', 34, 'Beauty', 3, 50, 150), 24 | (2, '2023-02-27', 'CUST002', 'Female', 26, 'Clothing', 2, 500, 1000), 25 | (3, '2023-01-13', 'CUST003', 'Male', 50, 'Electronics', 1, 30, 30), 26 | (4, '2023-05-21', 'CUST004', 'Male', 37, 'Clothing', 1, 500, 500), 27 | (5, '2023-05-06', 'CUST005', 'Male', 30, 'Beauty', 2, 50, 100), 28 | (6, '2023-04-25', 'CUST006', 'Female', 45, 'Beauty', 1, 30, 30), 29 | (7, '2023-03-13', 'CUST007', 'Male', 46, 'Clothing', 2, 25, 50), 30 | (8, '2023-02-22', 'CUST008', 'Male', 30, 'Electronics', 4, 25, 100), 31 | (9, '2023-12-13', 'CUST009', 'Male', 63, 'Electronics', 2, 300, 600), 32 | (10, '2023-10-07', 'CUST010', 'Female', 52, 'Clothing', 4, 50, 200), 33 | (11, '2023-02-14', 'CUST011', 'Male', 23, 'Clothing', 2, 50, 100), 34 | (12, '2023-10-30', 'CUST012', 'Male', 35, 'Beauty', 3, 25, 75), 35 | (13, '2023-08-05', 'CUST013', 'Male', 22, 'Electronics', 3, 500, 1500), 36 | (14, '2023-01-17', 'CUST014', 'Male', 64, 'Clothing', 4, 30, 120), 37 | (15, '2023-01-16', 'CUST015', 'Female', 42, 'Electronics', 4, 500, 2000), 38 | (16, '2023-02-17', 'CUST016', 'Male', 19, 'Clothing', 3, 500, 1500), 39 | (17, '2023-04-22', 'CUST017', 'Female', 27, 'Clothing', 4, 25, 100), 40 | (18, '2023-04-30', 'CUST018', 'Female', 47, 'Electronics', 2, 25, 50), 41 | (19, '2023-09-16', 'CUST019', 'Female', 62, 'Clothing', 2, 25, 50), 42 | (20, '2023-11-05', 'CUST020', 'Male', 22, 'Clothing', 3, 300, 900), 43 | (21, '2023-01-14', 'CUST021', 'Female', 50, 'Beauty', 1, 500, 500), 44 | (22, '2023-10-15', 'CUST022', 'Male', 18, 'Clothing', 2, 50, 100), 45 | (23, '2023-04-12', 'CUST023', 'Female', 35, 'Clothing', 4, 30, 120), 46 | (24, '2023-11-29', 'CUST024', 'Female', 49, 'Clothing', 1, 300, 300), 47 | (25, '2023-12-26', 'CUST025', 'Female', 64, 'Beauty', 1, 50, 50), 48 | (26, '2023-10-07', 'CUST026', 'Female', 28, 'Electronics', 2, 500, 1000), 49 | (27, '2023-08-03', 'CUST027', 'Female', 38, 'Beauty', 2, 25, 50), 50 | (28, '2023-04-23', 'CUST028', 'Female', 43, 'Beauty', 1, 500, 500), 51 | (29, '2023-08-18', 'CUST029', 'Female', 42, 'Electronics', 1, 30, 30); -------------------------------------------------------------------------------- /L-5/myapp2.py: -------------------------------------------------------------------------------- 1 | import os 2 | import streamlit as st 3 | from langchain.chains import create_sql_query_chain 4 | from langchain_google_genai import GoogleGenerativeAI 5 | from sqlalchemy import create_engine 6 | from sqlalchemy.exc import ProgrammingError 7 | from langchain_community.utilities import SQLDatabase 8 | from dotenv import load_dotenv 9 | load_dotenv() 10 | 11 | # Database connection parameters 12 | db_user = "root" 13 | db_password = "root123" 14 | db_host = "localhost" 15 | db_name = "retail_sales_db" 16 | 17 | # Create SQLAlchemy engine 18 | engine = create_engine(f"mysql+pymysql://{db_user}:{db_password}@{db_host}/{db_name}") 19 | 20 | # Initialize SQLDatabase 21 | db = SQLDatabase(engine, sample_rows_in_table_info=3) 22 | 23 | # Initialize LLM 24 | llm = GoogleGenerativeAI(model="gemini-pro", google_api_key=os.environ["GOOGLE_API_KEY"]) 25 | 26 | # Create SQL query chain 27 | chain = create_sql_query_chain(llm, db) 28 | 29 | def execute_query(question): 30 | try: 31 | # Generate SQL query from question 32 | response = chain.invoke({"question": question}) 33 | 34 | # Execute the query 35 | result = db.run(response) 36 | 37 | # Return the query and the result 38 | return response, result 39 | except ProgrammingError as e: 40 | st.error(f"An error occurred: {e}") 41 | return None, None 42 | 43 | # Streamlit interface 44 | st.title("Question Answering App") 45 | 46 | # Input from user 47 | question = st.text_input("Enter your question:") 48 | 49 | if st.button("Execute"): 50 | if question: 51 | cleaned_query, query_result = execute_query(question) 52 | 53 | if cleaned_query and query_result is not None: 54 | st.write("Generated SQL Query:") 55 | st.code(response, language="sql") 56 | st.write("Query Result:") 57 | st.write(query_result) 58 | else: 59 | st.write("No result returned due to an error.") 60 | else: 61 | st.write("Please enter a question.") 62 | -------------------------------------------------------------------------------- /L-5/requirements.txt: -------------------------------------------------------------------------------- 1 | langchain==0.0.284 2 | langchain_experimental 3 | langchain-google-genai 4 | python-dotenv==1.0.0 5 | streamlit==1.22.0 6 | tiktoken==0.4.0 7 | faiss-cpu==1.7.4 8 | protobuf~=3.19.0 9 | mysql-connector-python 10 | pymysql 11 | sentence-transformers 12 | chromadb -------------------------------------------------------------------------------- /L-6/README.md: -------------------------------------------------------------------------------- 1 | ### Video Tutorial: https://youtu.be/6ExFTPcJJFs 2 | 3 | ## Environment setup: 4 | 5 | conda create -n env_langchain1 python=3.10 6 | conda activate env_langchain1 7 | python -m pip install --upgrade pip 8 | Install packages: 9 | pip install -r requirements.txt 10 | 11 | 12 | ## Run App: 13 | streamlit run app1.py 14 | -------------------------------------------------------------------------------- /L-6/app1.py: -------------------------------------------------------------------------------- 1 | from langchain_core.prompts import ChatPromptTemplate 2 | from langchain_ollama.llms import OllamaLLM 3 | import streamlit as st 4 | 5 | 6 | st.title("LAngchain-LLama3.1 app") 7 | 8 | template = """Question: {question} 9 | 10 | Answer: Let's think step by step.""" 11 | 12 | prompt = ChatPromptTemplate.from_template(template) 13 | 14 | model = OllamaLLM(model="llama3.1") 15 | 16 | chain = prompt | model 17 | 18 | 19 | question = st.chat_input("Enter your question here") 20 | if question: 21 | st.write(chain.invoke({"question": question})) 22 | -------------------------------------------------------------------------------- /L-6/requirements.txt: -------------------------------------------------------------------------------- 1 | langchain 2 | langchain-ollama 3 | streamlit 4 | langchain_experimental 5 | 6 | -------------------------------------------------------------------------------- /L-7/RAG_demo/.env: -------------------------------------------------------------------------------- 1 | OPENAI_API_KEY='sk-proj-eTsZBrD000000000000000000q3QsPvM7' 2 | -------------------------------------------------------------------------------- /L-7/RAG_demo/README.md: -------------------------------------------------------------------------------- 1 | ### Video Tutorial: https://youtu.be/iA-UhFlIP80 2 | 3 | ## Environment setup: 4 | 5 | conda create -n env_langchain1 python=3.10 6 | conda activate env_langchain1 7 | python -m pip install --upgrade pip 8 | Install packages: 9 | pip install -r requirements.txt 10 | 11 | 12 | ## Run App: 13 | streamlit run app1.py 14 | -------------------------------------------------------------------------------- /L-7/RAG_demo/app1.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | import time 3 | from langchain_openai import OpenAI 4 | from langchain_community.document_loaders import UnstructuredURLLoader 5 | from langchain.text_splitter import RecursiveCharacterTextSplitter 6 | from langchain_chroma import Chroma 7 | from langchain_openai import OpenAIEmbeddings 8 | from langchain.chains import create_retrieval_chain 9 | from langchain.chains.combine_documents import create_stuff_documents_chain 10 | from langchain_core.prompts import ChatPromptTemplate 11 | 12 | from dotenv import load_dotenv 13 | load_dotenv() 14 | 15 | 16 | st.title("RAG Application") 17 | 18 | urls = ['url1','url2'] 19 | 20 | loader = UnstructuredURLLoader(urls=urls) 21 | data = loader.load() 22 | 23 | text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000) 24 | docs = text_splitter.split_documents(data) 25 | 26 | all_splits = docs 27 | vectorstore = Chroma.from_documents(documents=all_splits, embedding=OpenAIEmbeddings()) 28 | 29 | retriever = vectorstore.as_retriever(search_type="similarity", search_kwargs={"k": 6}) 30 | 31 | #retrieved_docs = retriever.invoke("What kind of services they provide?") 32 | 33 | llm = OpenAI(temperature=0.4, max_tokens=500) 34 | 35 | 36 | query = st.chat_input("Say something: ") 37 | prompt = query 38 | 39 | system_prompt = ( 40 | "You are an assistant for question-answering tasks. " 41 | "Use the following pieces of retrieved context to answer " 42 | "the question. If you don't know the answer, say that you " 43 | "don't know. Use three sentences maximum and keep the " 44 | "answer concise." 45 | "\n\n" 46 | "{context}" 47 | ) 48 | 49 | prompt = ChatPromptTemplate.from_messages( 50 | [ 51 | ("system", system_prompt), 52 | ("human", "{input}"), 53 | ] 54 | ) 55 | 56 | if query: 57 | question_answer_chain = create_stuff_documents_chain(llm, prompt) 58 | rag_chain = create_retrieval_chain(retriever, question_answer_chain) 59 | 60 | response = rag_chain.invoke({"input": query}) 61 | #print(response["answer"]) 62 | 63 | st.write(response["answer"]) 64 | 65 | 66 | -------------------------------------------------------------------------------- /L-7/RAG_demo/basics_RAG.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 2, 6 | "id": "1e9cacce-6d9a-4570-965b-c74d41695a03", 7 | "metadata": {}, 8 | "outputs": [], 9 | "source": [ 10 | "from langchain_community.document_loaders import UnstructuredURLLoader\n", 11 | "urls = ['https://www.victoriaonmove.com.au/local-removalists.html','https://victoriaonmove.com.au/index.html','https://victoriaonmove.com.au/contact.html']\n", 12 | "loader = UnstructuredURLLoader(urls=urls)\n", 13 | "data = loader.load() " 14 | ] 15 | }, 16 | { 17 | "cell_type": "code", 18 | "execution_count": 5, 19 | "id": "3b0ac3d6-0c7c-499f-a7af-e6330ac08373", 20 | "metadata": {}, 21 | "outputs": [ 22 | { 23 | "data": { 24 | "text/plain": [ 25 | "[Document(metadata={'source': 'https://www.victoriaonmove.com.au/local-removalists.html'}, page_content='Loading...\\n\\nHome\\n\\nLocal removalists\\n\\nLocal removalists\\n\\nInterstate removalists\\n\\nSydney interstate removalists\\n\\nMelbourne interstate removalists\\n\\nBrisbane interstate removalists\\n\\nAdelaide interstate removalists\\n\\nCanberra interstate removalists\\n\\nContact\\n\\nGet a quote\\n\\nLOCAL REMOVALS\\n\\nYour trusted partner in seamless moving and packing solutions!\\n\\nGoogle Rating\\n\\n5 stars, 111 reviews\\n\\nRequst A call for You:\\n\\nLocal removal services via \"Victoria on move\"\\r\\n Victoria on Move is your trusted local moving company in Melbourne, specializing in seamless relocation services. As experienced furniture movers and relocation experts, we provide top-notch packing and moving services tailored to your needs. Whether you\\'re moving across town or relocating interstate, our professional movers ensure a stress-free experience. Count on Victoria on Move for reliable removal services, making us the preferred choice among local movers in Melbourne. Discover why we\\'re recognized for our commitment to quality and customer satisfaction.\\n\\nApartment Moving\\r\\n Efficient and careful relocation services tailored for apartments of all sizes, ensuring smooth transitions to your new home.\\n\\nVilla Moving\\r\\n Comprehensive moving solutions for large residences and villas, handling valuable possessions with utmost care and precision.\\n\\nHousehold Moving\\r\\n Full-service moving options for households, including packing, loading, transportation, and unpacking services to simplify your move.\\n\\nOffice Moving\\r\\n Specialized expertise in office relocations, minimizing downtime and ensuring your business operations continue seamlessly.\\n\\nFurniture Moving\\r\\n Experienced in handling furniture of all sizes and types, ensuring safe transport and setup in your new location.\\n\\nPacking and Unpacking Services\\r\\n Optional packing and unpacking services available to save you time and effort, using high-quality packing materials.\\n\\nCustomized Moving Plans\\r\\n Tailored moving plans to fit your specific requirements and schedule, providing personalized service and peace of mind.\\n\\nProfessional Team\\r\\n Experienced and courteous moving professionals dedicated to delivering exceptional service and customer satisfaction throughout the entire moving process.\\n\\nFind removalists travelling to or from Melbourne\\n\\nFrom Melbourne\\n\\nTo Melbourne\\n\\nMoving from Ballarat to Melbourne\\n\\nMoving from Bendigo to Melbourne\\n\\nMoving from Geelong to Melbourne\\n\\nMoving from Melton to Melbourne\\n\\nMoving from Mildura to Melbourne\\n\\nMoving from Mooroopna to Melbourne\\n\\nMoving from Shepparton to Melbourne\\n\\nMoving from Sunbury to Melbourne\\n\\nMoving from Traralgon to Melbourne\\n\\nMoving from Warrnambool to Melbourne\\n\\nMoving from Wodonga to Melbourne\\n\\nMoving from Melbourne to Ballarat\\n\\nMoving from Melbourne to Bendigo\\n\\nMoving from Melbourne to Geelong\\n\\nMoving from Melbourne to Melton\\n\\nMoving from Melbourne to Mildura\\n\\nMoving from Melbourne to Mooroopna\\n\\nMoving from Melbourne to Shepparton\\n\\nMoving from Melbourne to Sunbury\\n\\nMoving from Melbourne to Traralgon\\n\\nMoving from Melbourne to Warrnambool\\n\\nMoving from Melbourne to Wodonga\\n\\nLooking for interstate removalists Melbourne? We can also assist with interstate moves to or from Melbourne.\\n\\nGet In Touch\\n\\nWollert Victoria\\n\\n0404922328\\n\\nvictoriaonmove07@gmail.com\\n\\nQuick Links\\n\\nAbout Us\\n\\nContact Us\\n\\nOur Services\\n\\nTerms & Condition\\n\\nPhoto Gallery\\n\\nCheck us out on Google!\\n\\n\\r\\n © \\n\\nVictoria On Move 2024, All Right Reserved. \\r\\n\\t\\t\\t\\t\\t\\t\\t\\r\\n\\t\\t\\t\\t\\t\\t\\t\\r\\n\\t\\t\\t\\t\\t\\t\\tDesigned By\\n\\nHTML Codex\\n\\nHome'),\n", 26 | " Document(metadata={'source': 'https://victoriaonmove.com.au/index.html'}, page_content=\"Loading...\\n\\nHome\\n\\nLocal removalists\\n\\nLocal removalists\\n\\nInterstate removalists\\n\\nSydney interstate removalists\\n\\nMelbourne interstate removalists\\n\\nBrisbane interstate removalists\\n\\nAdelaide interstate removalists\\n\\nCanberra interstate removalists\\n\\nContact\\n\\nGallery\\n\\nGet a quote\\n\\nRelocate with confidence\\n\\nYour trusted partner in seamless moving and packing solutions!\\n\\nGoogle Rating\\n\\n5 stars, 111 reviews\\n\\nRequst A call for You:\\n\\nExplore Our Fleet\\n\\nVehicles Tailored for Your Moving Needs\\n\\nSmall Truck\\r\\n With 2 Mover\\r\\n\\t\\t\\t\\t\\t\\t\\t\\t From $130/hr\\r\\n\\t\\t\\t\\t\\t\\t\\t\\t 4.5 ton 20 cubic \\r\\n\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t \\r\\n Our Small Truck can manage a student apartment, small office .\\n\\nMedium Truck\\r\\n With 2 Mover\\r\\n\\t\\t\\t\\t\\t\\t\\t\\t From $145 p/hr\\r\\n\\t\\t\\t\\t\\t\\t\\t\\t 6 ton 30-35 cubic \\r\\n Our Medium trucks can manage 2-3 bedroom with single lounge outdoor boxes and miscellaneous.\\n\\nLarge Truck\\r\\n With 2 Mover\\r\\n\\t\\t\\t\\t\\t\\t\\t\\t From $155 p/hr\\r\\n\\t\\t\\t\\t\\t\\t\\t\\t 8 ton 40 cubic \\r\\n\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t \\r\\n Our Large trucks can handle 3-4 bedrooms with double lounge outdoor boxes and miscellaneous.\\n\\nX-Large Truck\\r\\n With 2 Mover\\r\\n\\t\\t\\t\\t\\t\\t\\t\\t From $165 p/hr\\r\\n\\t\\t\\t\\t\\t\\t\\t\\t 4.5 ton 20 cubic \\r\\n\\t\\t\\t\\t\\t\\t\\t\\t (10 ton or 50 cubic meter)\\r\\n\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t \\r\\n Our X-Large trucks can handle \\r\\n 3-4 bedrooms double lounge outdoor miscellaneous items.\\n\\nOUR SERVICE\\n\\nWE OFFER QUICK & POWERFUL LOGISTICS SOLUTION\\n\\nFURNITURE REMOVALS: We are the furniture removals specialists in Victoria. Our Teams are experienced and comes with Furniture moving equipments like- Trolleys. Blankets, Loading Ramps.\\n\\nINSURANCE: Victoria On Move Pvt Ltd. look after your goods arrive safely at your destination. We have Transit and Public liability insurance. Transit insurance covers loading and unloading from vehicle , and Road accident cover.\\n\\n1 BED HOME: \\r\\n1 Bed, Mattress, Tall Boy, Bedside, Refrigerator, Washing Machine, Some Boxes.\\n\\n2 BED HOME: \\r\\n2 Beds, Mattress, Tall Boy, Bedside, Refrigerator, Washing Machine,Dresser, Barbeque, Some Boxes, Loose stuff.\\n\\nBig HOME: \\r\\n4-5 Bed Room, Double Lounge, Mattress, Tall Boy, Bedside, Refrigerator, Washing Machine,Dresser, Barbeque, Some Boxes,Furniture, Loose stuff.\\n\\nGet a Quote\\n\\nInterstate Removalists:\\n\\nSeamless Relocation Across State Lines\\n\\nMelbourne to Sydney Removalists\\n\\nBook Now\\n\\nMelbourne to Brisbane Removalists\\n\\nBook Now\\n\\nMelbourne to Adelaide Removalists\\n\\nBook Now\\n\\nMelbourne to Canberra Removalists\\n\\nBook Now\\n\\nOUR MOVING WORK & ETHICS\\n\\nMoving house furniture is an art that comes with experience and a steadfast commitment to our customers' needs. It's a challenging industry, and not everyone survives for long without the expertise and dedication that we bring to every job. Our long history as Removalists in Melbourne is a testament to our sincerity and hard work.\\n\\nUnlike some other moving companies, we value the trust our customers place in us. Our customer reviews on Google are genuine, and we never manipulate them. We trust in our quality and experience and believe in our destiny to provide top-notch moving and packing services to our clients.\\n\\nContact Us\\n\\nOur Clients Say!\\n\\nDiscover firsthand experiences from our valued clients through their heartfelt testimonials. From seamless moves to exceptional service, our customers share how we've made their relocation journey stress-free and rewarding. Explore their stories and see why they trust us with their moves time and again.\\n\\nGet In Touch\\n\\nWollert Victoria\\n\\n0404922328\\n\\nvictoriaonmove07@gmail.com\\n\\nQuick Links\\n\\nAbout Us\\n\\nContact Us\\n\\nOur Services\\n\\nTerms & Condition\\n\\nPhoto Gallery\\n\\nCheck us out on Google!\\n\\n\\r\\n © \\n\\nVictoria On Move 2024, All Right Reserved. \\r\\n\\t\\t\\t\\t\\t\\t\\t\\r\\n\\t\\t\\t\\t\\t\\t\\t\\r\\n\\t\\t\\t\\t\\t\\t\\tDesigned By\\n\\nHTML Codex\\n\\nHome\"),\n", 27 | " Document(metadata={'source': 'https://victoriaonmove.com.au/contact.html'}, page_content='Loading...\\n\\nHome\\n\\nLocal removalists\\n\\nLocal removalists\\n\\nInterstate removalists\\n\\nSydney interstate removalists\\n\\nMelbourne interstate removalists\\n\\nBrisbane interstate removalists\\n\\nAdelaide interstate removalists\\n\\nCanberra interstate removalists\\n\\nContact\\n\\nGet a quote\\n\\nContact Us\\n\\nYou can contact us any way that is convenient for you. We are available 24/7 via fax or email. You can also use a quick contact form below or visit our office personally. We would be happy to answer your questions.\\n\\nWollert Victoria\\n\\nvictoriaonmove07@gmail.com\\n\\n0404922328\\n\\nDo you require packing & unpacking?\\n\\nE.g. packing of crockery and clothes in boxes.\\n\\nHouse Size (if applicable)\\n\\nTruck Size\\n\\nExtra Heavy Item\\n\\nStairs?\\n\\nFind removalists travelling interstate to or from Sydney\\n\\nFrom Sydney\\n\\nTo Sydney\\n\\nMoving from Adelaide to Sydney\\n\\nMoving from Ballarat to Sydney\\n\\nMoving from Bendigo to Sydney\\n\\nMoving from Brisbane to Sydney\\n\\nMoving from Cairns to Sydney\\n\\nMoving from Canberra to Sydney\\n\\nMoving from Darwin to Sydney\\n\\nMoving from Geelong to Sydney\\n\\nMoving from Gold Coast to Sydney\\n\\nMoving from Hobart to Sydney\\n\\nMoving from Melbourne to Sydney\\n\\nMoving from Perth to Sydney\\n\\nMoving from Tasmania to Sydney\\n\\nMoving from Sydney to Adelaide\\n\\nMoving from Sydney to Ballarat\\n\\nMoving from Sydney to Bendigo\\n\\nMoving from Sydney to Brisbane\\n\\nMoving from Sydney to Bunbury\\n\\nMoving from Sydney to Bundaberg\\n\\nMoving from Sydney to Cairns\\n\\nMoving from Sydney to Canberra\\n\\nMoving from Sydney to Darwin\\n\\nMoving from Sydney to Devonport\\n\\nMoving from Sydney to Fraser Coast\\n\\nMoving from Sydney to Geelong\\n\\nMoving from Sydney to Geraldton\\n\\nMoving from Sydney to Gladstone\\n\\nMoving from Sydney to Gold Coast\\n\\nMoving from Sydney to Hervey Bay\\n\\nMoving from Sydney to Hobart\\n\\nMoving from Sydney to Launceston\\n\\nMoving from Sydney to Mackay\\n\\nMoving from Sydney to Mandurah\\n\\nMoving from Sydney to Melbourne\\n\\nMoving from Sydney to Melton\\n\\nMoving from Sydney to Perth\\n\\nMoving from Sydney to Rockhampton\\n\\nMoving from Sydney to Shepparton\\n\\nMoving from Sydney to Sunshine Coast\\n\\nMoving from Sydney to Tasmania\\n\\nMoving from Sydney to Toowoomba\\n\\nMoving from Sydney to Townsville\\n\\nMoving from Sydney to Traralgon\\n\\nMoving from Sydney to Warrnambool\\n\\nIf you are in need of local removalists services in Sydney we can also help with professional movers for local moves.\\n\\nGet In Touch\\n\\nWollert Victoria\\n\\n0404922328\\n\\nvictoriaonmove07@gmail.com\\n\\nQuick Links\\n\\nAbout Us\\n\\nContact Us\\n\\nOur Services\\n\\nTerms & Condition\\n\\nPhoto Gallery\\n\\nCheck us out on Google!\\n\\n\\r\\n © \\n\\nVictoria On Move 2024, All Right Reserved. \\r\\n\\t\\t\\t\\t\\t\\t\\t\\r\\n\\t\\t\\t\\t\\t\\t\\t\\r\\n\\t\\t\\t\\t\\t\\t\\tDesigned By\\n\\nHTML Codex\\n\\nHome')]" 28 | ] 29 | }, 30 | "execution_count": 5, 31 | "metadata": {}, 32 | "output_type": "execute_result" 33 | } 34 | ], 35 | "source": [ 36 | "data" 37 | ] 38 | }, 39 | { 40 | "cell_type": "code", 41 | "execution_count": 6, 42 | "id": "f199854e-42d2-4357-bed2-f03d3e8e96d8", 43 | "metadata": {}, 44 | "outputs": [ 45 | { 46 | "name": "stdout", 47 | "output_type": "stream", 48 | "text": [ 49 | "Total number of documents: 15\n" 50 | ] 51 | } 52 | ], 53 | "source": [ 54 | "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", 55 | "\n", 56 | "# split data\n", 57 | "text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000)\n", 58 | "docs = text_splitter.split_documents(data)\n", 59 | "\n", 60 | "\n", 61 | "print(\"Total number of documents: \",len(docs))" 62 | ] 63 | }, 64 | { 65 | "cell_type": "code", 66 | "execution_count": 9, 67 | "id": "0ecf3ac7-3181-4617-9390-fe4f3368f7a4", 68 | "metadata": {}, 69 | "outputs": [ 70 | { 71 | "data": { 72 | "text/plain": [ 73 | "Document(metadata={'source': 'https://www.victoriaonmove.com.au/local-removalists.html'}, page_content='Loading...\\n\\nHome\\n\\nLocal removalists\\n\\nLocal removalists\\n\\nInterstate removalists\\n\\nSydney interstate removalists\\n\\nMelbourne interstate removalists\\n\\nBrisbane interstate removalists\\n\\nAdelaide interstate removalists\\n\\nCanberra interstate removalists\\n\\nContact\\n\\nGet a quote\\n\\nLOCAL REMOVALS\\n\\nYour trusted partner in seamless moving and packing solutions!\\n\\nGoogle Rating\\n\\n5 stars, 111 reviews\\n\\nRequst A call for You:')" 74 | ] 75 | }, 76 | "execution_count": 9, 77 | "metadata": {}, 78 | "output_type": "execute_result" 79 | } 80 | ], 81 | "source": [ 82 | "docs[0]" 83 | ] 84 | }, 85 | { 86 | "cell_type": "code", 87 | "execution_count": 11, 88 | "id": "5340b0ec-7555-4d58-a034-8acec8c70aff", 89 | "metadata": {}, 90 | "outputs": [], 91 | "source": [ 92 | "from langchain_chroma import Chroma\n", 93 | "from langchain_openai import OpenAIEmbeddings\n", 94 | "from langchain_openai import OpenAI\n", 95 | "from dotenv import load_dotenv\n", 96 | "load_dotenv()\n", 97 | "\n", 98 | "vectorstore = Chroma.from_documents(documents=docs, embedding=OpenAIEmbeddings())" 99 | ] 100 | }, 101 | { 102 | "cell_type": "code", 103 | "execution_count": 12, 104 | "id": "95a78612-e69f-4a9f-9f66-77e11e52f794", 105 | "metadata": {}, 106 | "outputs": [], 107 | "source": [ 108 | "retriever = vectorstore.as_retriever(search_type=\"similarity\", search_kwargs={\"k\": 6})\n", 109 | "\n", 110 | "retrieved_docs = retriever.invoke(\"What kind of services they provide?\")\n" 111 | ] 112 | }, 113 | { 114 | "cell_type": "code", 115 | "execution_count": 13, 116 | "id": "d4e7cca7-664b-462f-846c-6d63f67ad440", 117 | "metadata": {}, 118 | "outputs": [ 119 | { 120 | "data": { 121 | "text/plain": [ 122 | "6" 123 | ] 124 | }, 125 | "execution_count": 13, 126 | "metadata": {}, 127 | "output_type": "execute_result" 128 | } 129 | ], 130 | "source": [ 131 | "len(retrieved_docs)" 132 | ] 133 | }, 134 | { 135 | "cell_type": "code", 136 | "execution_count": 14, 137 | "id": "12c016a2-08cf-44ec-9a24-b5c3593f8024", 138 | "metadata": {}, 139 | "outputs": [ 140 | { 141 | "name": "stdout", 142 | "output_type": "stream", 143 | "text": [ 144 | "Apartment Moving\n", 145 | " Efficient and careful relocation services tailored for apartments of all sizes, ensuring smooth transitions to your new home.\n", 146 | "\n", 147 | "Villa Moving\n", 148 | " Comprehensive moving solutions for large residences and villas, handling valuable possessions with utmost care and precision.\n", 149 | "\n", 150 | "Household Moving\n", 151 | " Full-service moving options for households, including packing, loading, transportation, and unpacking services to simplify your move.\n", 152 | "\n", 153 | "Office Moving\n", 154 | " Specialized expertise in office relocations, minimizing downtime and ensuring your business operations continue seamlessly.\n", 155 | "\n", 156 | "Furniture Moving\n", 157 | " Experienced in handling furniture of all sizes and types, ensuring safe transport and setup in your new location.\n" 158 | ] 159 | } 160 | ], 161 | "source": [ 162 | "print(retrieved_docs[0].page_content)" 163 | ] 164 | }, 165 | { 166 | "cell_type": "code", 167 | "execution_count": 15, 168 | "id": "2ef6353b-1fdd-4e7e-95e1-7c2e6846924f", 169 | "metadata": {}, 170 | "outputs": [], 171 | "source": [ 172 | "llm = OpenAI(temperature=0.4, max_tokens=500)" 173 | ] 174 | }, 175 | { 176 | "cell_type": "code", 177 | "execution_count": 16, 178 | "id": "b100ad98-7922-47e8-bdcf-f8b2bd24a056", 179 | "metadata": {}, 180 | "outputs": [], 181 | "source": [ 182 | "from langchain.chains import create_retrieval_chain\n", 183 | "from langchain.chains.combine_documents import create_stuff_documents_chain\n", 184 | "from langchain_core.prompts import ChatPromptTemplate\n", 185 | "\n", 186 | "system_prompt = (\n", 187 | " \"You are an assistant for question-answering tasks. \"\n", 188 | " \"Use the following pieces of retrieved context to answer \"\n", 189 | " \"the question. If you don't know the answer, say that you \"\n", 190 | " \"don't know. Use three sentences maximum and keep the \"\n", 191 | " \"answer concise.\"\n", 192 | " \"\\n\\n\"\n", 193 | " \"{context}\"\n", 194 | ")\n", 195 | "\n", 196 | "prompt = ChatPromptTemplate.from_messages(\n", 197 | " [\n", 198 | " (\"system\", system_prompt),\n", 199 | " (\"human\", \"{input}\"),\n", 200 | " ]\n", 201 | ")" 202 | ] 203 | }, 204 | { 205 | "cell_type": "code", 206 | "execution_count": 17, 207 | "id": "2a0168bb-7d01-43fe-bf4a-39c50962164d", 208 | "metadata": {}, 209 | "outputs": [], 210 | "source": [ 211 | "\n", 212 | "question_answer_chain = create_stuff_documents_chain(llm, prompt)\n", 213 | "rag_chain = create_retrieval_chain(retriever, question_answer_chain)" 214 | ] 215 | }, 216 | { 217 | "cell_type": "code", 218 | "execution_count": 18, 219 | "id": "aaffc0a9-276c-44bd-879d-54318ae0f698", 220 | "metadata": {}, 221 | "outputs": [ 222 | { 223 | "name": "stdout", 224 | "output_type": "stream", 225 | "text": [ 226 | "\n", 227 | "\n", 228 | "System: Victoria on Move provides local and interstate removal services for residential and commercial properties. They also offer packing and unpacking services, as well as customized moving plans to fit specific requirements and schedules. Their team of experienced professionals ensures a stress-free and seamless moving experience for their customers.\n" 229 | ] 230 | } 231 | ], 232 | "source": [ 233 | "response = rag_chain.invoke({\"input\": \"What kind of services they provide?\"})\n", 234 | "print(response[\"answer\"])" 235 | ] 236 | }, 237 | { 238 | "cell_type": "code", 239 | "execution_count": null, 240 | "id": "d42dbf97-6559-407b-bd9d-499d3b13963f", 241 | "metadata": {}, 242 | "outputs": [], 243 | "source": [] 244 | } 245 | ], 246 | "metadata": { 247 | "kernelspec": { 248 | "display_name": "Python 3 (ipykernel)", 249 | "language": "python", 250 | "name": "python3" 251 | }, 252 | "language_info": { 253 | "codemirror_mode": { 254 | "name": "ipython", 255 | "version": 3 256 | }, 257 | "file_extension": ".py", 258 | "mimetype": "text/x-python", 259 | "name": "python", 260 | "nbconvert_exporter": "python", 261 | "pygments_lexer": "ipython3", 262 | "version": "3.10.14" 263 | } 264 | }, 265 | "nbformat": 4, 266 | "nbformat_minor": 5 267 | } 268 | -------------------------------------------------------------------------------- /L-7/RAG_demo/requirements.txt: -------------------------------------------------------------------------------- 1 | langchain 2 | langchain_community 3 | langchain_openai 4 | python-dotenv 5 | streamlit 6 | langchain_experimental 7 | sentence-transformers 8 | langchain_chroma 9 | langchainhub 10 | unstructured -------------------------------------------------------------------------------- /L-7/RAG_demo/~$script.docx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AarohiSingla/Generative_AI/6df5ac38368014e9337bb572af65cf24feda9b4a/L-7/RAG_demo/~$script.docx -------------------------------------------------------------------------------- /L-8/gemini_rag_demo/.env: -------------------------------------------------------------------------------- 1 | GOOGLE_API_KEY='AIaSyAT99999999999999999999nGusx9JE' -------------------------------------------------------------------------------- /L-8/gemini_rag_demo/README.md: -------------------------------------------------------------------------------- 1 | ### Video Tutorial: https://youtu.be/8cKf5GUz4TU 2 | 3 | ## Environment setup: 4 | 5 | conda create -n env_langchain1 python=3.10 6 | conda activate env_langchain1 7 | python -m pip install --upgrade pip 8 | Install packages: 9 | pip install -r requirements.txt 10 | 11 | 12 | ## Run App: 13 | streamlit run app1.py 14 | -------------------------------------------------------------------------------- /L-8/gemini_rag_demo/app1.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | import time 3 | from langchain_community.document_loaders import PyPDFLoader 4 | from langchain.text_splitter import RecursiveCharacterTextSplitter 5 | from langchain_google_genai import GoogleGenerativeAIEmbeddings 6 | from langchain_chroma import Chroma 7 | from langchain_google_genai import ChatGoogleGenerativeAI 8 | from langchain.chains import create_retrieval_chain 9 | from langchain.chains.combine_documents import create_stuff_documents_chain 10 | from langchain_core.prompts import ChatPromptTemplate 11 | 12 | 13 | 14 | 15 | from dotenv import load_dotenv 16 | load_dotenv() 17 | 18 | 19 | st.title("RAG Application built on Gemini Model") 20 | 21 | loader = PyPDFLoader("yolov9_paper.pdf") 22 | data = loader.load() 23 | 24 | text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000) 25 | docs = text_splitter.split_documents(data) 26 | 27 | 28 | vectorstore = Chroma.from_documents(documents=docs, embedding=GoogleGenerativeAIEmbeddings(model="models/embedding-001")) 29 | 30 | retriever = vectorstore.as_retriever(search_type="similarity", search_kwargs={"k": 10}) 31 | 32 | llm = ChatGoogleGenerativeAI(model="gemini-1.5-pro",temperature=0,max_tokens=None,timeout=None) 33 | 34 | 35 | query = st.chat_input("Say something: ") 36 | prompt = query 37 | 38 | system_prompt = ( 39 | "You are an assistant for question-answering tasks. " 40 | "Use the following pieces of retrieved context to answer " 41 | "the question. If you don't know the answer, say that you " 42 | "don't know. Use three sentences maximum and keep the " 43 | "answer concise." 44 | "\n\n" 45 | "{context}" 46 | ) 47 | 48 | prompt = ChatPromptTemplate.from_messages( 49 | [ 50 | ("system", system_prompt), 51 | ("human", "{input}"), 52 | ] 53 | ) 54 | 55 | if query: 56 | question_answer_chain = create_stuff_documents_chain(llm, prompt) 57 | rag_chain = create_retrieval_chain(retriever, question_answer_chain) 58 | 59 | response = rag_chain.invoke({"input": query}) 60 | #print(response["answer"]) 61 | 62 | st.write(response["answer"]) 63 | 64 | 65 | -------------------------------------------------------------------------------- /L-8/gemini_rag_demo/basics_RAG_pdf.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "ad9b4b85-9d17-468c-b244-73a1c5191e3e", 6 | "metadata": {}, 7 | "source": [ 8 | "# RAG application built on gemini " 9 | ] 10 | }, 11 | { 12 | "cell_type": "code", 13 | "execution_count": 5, 14 | "id": "7d49c22a-1ad0-4395-b93b-aa95660aa026", 15 | "metadata": {}, 16 | "outputs": [], 17 | "source": [ 18 | "from langchain_community.document_loaders import PyPDFLoader\n", 19 | "\n", 20 | "loader = PyPDFLoader(\"yolov9_paper.pdf\")\n", 21 | "data = loader.load() # entire PDF is loaded as a single Document\n", 22 | "#data" 23 | ] 24 | }, 25 | { 26 | "cell_type": "code", 27 | "execution_count": 3, 28 | "id": "a374eb7c-e262-42bb-8f3f-308ba7dcdbe4", 29 | "metadata": {}, 30 | "outputs": [ 31 | { 32 | "data": { 33 | "text/plain": [ 34 | "18" 35 | ] 36 | }, 37 | "execution_count": 3, 38 | "metadata": {}, 39 | "output_type": "execute_result" 40 | } 41 | ], 42 | "source": [ 43 | "len(data)" 44 | ] 45 | }, 46 | { 47 | "cell_type": "code", 48 | "execution_count": 6, 49 | "id": "29633e3b-ff24-4ace-a09b-c03b6e28c5cc", 50 | "metadata": {}, 51 | "outputs": [ 52 | { 53 | "name": "stdout", 54 | "output_type": "stream", 55 | "text": [ 56 | "Total number of documents: 96\n" 57 | ] 58 | } 59 | ], 60 | "source": [ 61 | "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", 62 | "\n", 63 | "# split data\n", 64 | "text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000)\n", 65 | "docs = text_splitter.split_documents(data)\n", 66 | "\n", 67 | "\n", 68 | "print(\"Total number of documents: \",len(docs))" 69 | ] 70 | }, 71 | { 72 | "cell_type": "code", 73 | "execution_count": 8, 74 | "id": "100b7d1a-1209-49d4-99ed-c51bc233a938", 75 | "metadata": {}, 76 | "outputs": [ 77 | { 78 | "data": { 79 | "text/plain": [ 80 | "Document(metadata={'source': 'yolov9_paper.pdf', 'page': 1}, page_content='ditional layers to combine repeatedly fed input data, which\\nwill significantly increase the inference cost. In addition,\\nsince the input data layer to the output layer cannot have a\\ntoo deep path, this limitation will make it difficult to model\\nhigh-order semantic information during the training pro-\\ncess. As for masked modeling, its reconstruction loss some-\\ntimes conflicts with the target loss. In addition, most mask\\nmechanisms also produce incorrect associations with data.\\nFor the deep supervision mechanism, it will produce error\\naccumulation, and if the shallow supervision loses informa-\\ntion during the training process, the subsequent layers will\\nnot be able to retrieve the required information. The above\\nphenomenon will be more significant on difficult tasks and\\nsmall models.\\nTo address the above-mentioned issues, we propose a\\nnew concept, which is programmable gradient information\\n(PGI). The concept is to generate reliable gradients through')" 81 | ] 82 | }, 83 | "execution_count": 8, 84 | "metadata": {}, 85 | "output_type": "execute_result" 86 | } 87 | ], 88 | "source": [ 89 | "docs[7]" 90 | ] 91 | }, 92 | { 93 | "cell_type": "code", 94 | "execution_count": 9, 95 | "id": "1073ab7f-2632-4367-8dec-c19449d6ce71", 96 | "metadata": {}, 97 | "outputs": [ 98 | { 99 | "data": { 100 | "text/plain": [ 101 | "[0.05168594419956207,\n", 102 | " -0.030764883384108543,\n", 103 | " -0.03062233328819275,\n", 104 | " -0.02802734449505806,\n", 105 | " 0.01813092641532421]" 106 | ] 107 | }, 108 | "execution_count": 9, 109 | "metadata": {}, 110 | "output_type": "execute_result" 111 | } 112 | ], 113 | "source": [ 114 | "from langchain_chroma import Chroma\n", 115 | "from langchain_google_genai import GoogleGenerativeAIEmbeddings\n", 116 | "\n", 117 | "from dotenv import load_dotenv\n", 118 | "load_dotenv() \n", 119 | "\n", 120 | "#Get an API key: \n", 121 | "# Head to https://ai.google.dev/gemini-api/docs/api-key to generate a Google AI API key. Paste in .env file\n", 122 | "\n", 123 | "# Embedding models: https://python.langchain.com/v0.1/docs/integrations/text_embedding/\n", 124 | "\n", 125 | "embeddings = GoogleGenerativeAIEmbeddings(model=\"models/embedding-001\")\n", 126 | "vector = embeddings.embed_query(\"hello, world!\")\n", 127 | "vector[:5]\n", 128 | "#vector" 129 | ] 130 | }, 131 | { 132 | "cell_type": "code", 133 | "execution_count": 10, 134 | "id": "688b6e6a-d8ab-41fb-a665-b72c9c9b4026", 135 | "metadata": {}, 136 | "outputs": [], 137 | "source": [ 138 | "vectorstore = Chroma.from_documents(documents=docs, embedding=GoogleGenerativeAIEmbeddings(model=\"models/embedding-001\"))" 139 | ] 140 | }, 141 | { 142 | "cell_type": "code", 143 | "execution_count": 11, 144 | "id": "2c674c5c-1b57-42e9-a99d-9e882c75da2d", 145 | "metadata": {}, 146 | "outputs": [], 147 | "source": [ 148 | "retriever = vectorstore.as_retriever(search_type=\"similarity\", search_kwargs={\"k\": 10})\n", 149 | "\n", 150 | "retrieved_docs = retriever.invoke(\"What is new in yolov9?\")\n" 151 | ] 152 | }, 153 | { 154 | "cell_type": "code", 155 | "execution_count": 13, 156 | "id": "04c5c6bb-fd0e-45ec-b315-e3f7656e0329", 157 | "metadata": {}, 158 | "outputs": [ 159 | { 160 | "data": { 161 | "text/plain": [ 162 | "10" 163 | ] 164 | }, 165 | "execution_count": 13, 166 | "metadata": {}, 167 | "output_type": "execute_result" 168 | } 169 | ], 170 | "source": [ 171 | "len(retrieved_docs)" 172 | ] 173 | }, 174 | { 175 | "cell_type": "code", 176 | "execution_count": 17, 177 | "id": "8a1c8321-1efd-4a11-9744-0d1a7c6f4e0a", 178 | "metadata": {}, 179 | "outputs": [ 180 | { 181 | "name": "stdout", 182 | "output_type": "stream", 183 | "text": [ 184 | "YOLOv9: Learning What You Want to Learn\n", 185 | "Using Programmable Gradient Information\n", 186 | "Chien-Yao Wang1,2, I-Hau Yeh2, and Hong-Yuan Mark Liao1,2,3\n", 187 | "1Institute of Information Science, Academia Sinica, Taiwan\n", 188 | "2National Taipei University of Technology, Taiwan\n", 189 | "3Department of Information and Computer Engineering, Chung Yuan Christian University, Taiwan\n", 190 | "kinyiu@iis.sinica.edu.tw, ihyeh@emc.com.tw, and liao@iis.sinica.edu.tw\n", 191 | "Abstract\n", 192 | "Today’s deep learning methods focus on how to design\n", 193 | "the most appropriate objective functions so that the pre-\n", 194 | "diction results of the model can be closest to the ground\n", 195 | "truth. Meanwhile, an appropriate architecture that can\n", 196 | "facilitate acquisition of enough information for prediction\n", 197 | "has to be designed. Existing methods ignore a fact that\n", 198 | "when input data undergoes layer-by-layer feature extrac-\n", 199 | "tion and spatial transformation, large amount of informa-\n", 200 | "tion will be lost. This paper will delve into the important is-\n" 201 | ] 202 | } 203 | ], 204 | "source": [ 205 | "print(retrieved_docs[5].page_content)" 206 | ] 207 | }, 208 | { 209 | "cell_type": "code", 210 | "execution_count": 18, 211 | "id": "7f991a1f-6ce9-4463-9941-b35014df94f6", 212 | "metadata": {}, 213 | "outputs": [], 214 | "source": [ 215 | "from langchain_google_genai import ChatGoogleGenerativeAI\n", 216 | "\n", 217 | "llm = ChatGoogleGenerativeAI(model=\"gemini-1.5-pro\",temperature=0.3, max_tokens=500)" 218 | ] 219 | }, 220 | { 221 | "cell_type": "code", 222 | "execution_count": 19, 223 | "id": "6ee17439-7bc3-4931-9f57-4ec7e82ce902", 224 | "metadata": {}, 225 | "outputs": [], 226 | "source": [ 227 | "from langchain.chains import create_retrieval_chain\n", 228 | "from langchain.chains.combine_documents import create_stuff_documents_chain\n", 229 | "from langchain_core.prompts import ChatPromptTemplate\n", 230 | "\n", 231 | "system_prompt = (\n", 232 | " \"You are an assistant for question-answering tasks. \"\n", 233 | " \"Use the following pieces of retrieved context to answer \"\n", 234 | " \"the question. If you don't know the answer, say that you \"\n", 235 | " \"don't know. Use three sentences maximum and keep the \"\n", 236 | " \"answer concise.\"\n", 237 | " \"\\n\\n\"\n", 238 | " \"{context}\"\n", 239 | ")\n", 240 | "\n", 241 | "prompt = ChatPromptTemplate.from_messages(\n", 242 | " [\n", 243 | " (\"system\", system_prompt),\n", 244 | " (\"human\", \"{input}\"),\n", 245 | " ]\n", 246 | ")" 247 | ] 248 | }, 249 | { 250 | "cell_type": "code", 251 | "execution_count": 20, 252 | "id": "266e86e0-746b-4943-9470-fd842633ed85", 253 | "metadata": {}, 254 | "outputs": [], 255 | "source": [ 256 | "question_answer_chain = create_stuff_documents_chain(llm, prompt)\n", 257 | "rag_chain = create_retrieval_chain(retriever, question_answer_chain)" 258 | ] 259 | }, 260 | { 261 | "cell_type": "code", 262 | "execution_count": 21, 263 | "id": "9db9500d-4c51-4a10-9b21-f1ef9c8f985e", 264 | "metadata": {}, 265 | "outputs": [ 266 | { 267 | "name": "stdout", 268 | "output_type": "stream", 269 | "text": [ 270 | "YOLOv9 introduces a new architecture called GELAN, which improves parameter usage and inference speed. It also proposes a novel Programmable Gradient Information (PGI) mechanism for better gradient flow during training. These improvements lead to state-of-the-art performance on object detection tasks, surpassing previous YOLO versions and other real-time detectors. \n", 271 | "\n" 272 | ] 273 | } 274 | ], 275 | "source": [ 276 | "response = rag_chain.invoke({\"input\": \"what is new in YOLOv9?\"})\n", 277 | "print(response[\"answer\"])" 278 | ] 279 | }, 280 | { 281 | "cell_type": "code", 282 | "execution_count": null, 283 | "id": "7cff65d0-2436-47f8-8572-6979a3378701", 284 | "metadata": {}, 285 | "outputs": [], 286 | "source": [] 287 | } 288 | ], 289 | "metadata": { 290 | "kernelspec": { 291 | "display_name": "Python 3 (ipykernel)", 292 | "language": "python", 293 | "name": "python3" 294 | }, 295 | "language_info": { 296 | "codemirror_mode": { 297 | "name": "ipython", 298 | "version": 3 299 | }, 300 | "file_extension": ".py", 301 | "mimetype": "text/x-python", 302 | "name": "python", 303 | "nbconvert_exporter": "python", 304 | "pygments_lexer": "ipython3", 305 | "version": "3.10.14" 306 | } 307 | }, 308 | "nbformat": 4, 309 | "nbformat_minor": 5 310 | } 311 | -------------------------------------------------------------------------------- /L-8/gemini_rag_demo/requirements.txt: -------------------------------------------------------------------------------- 1 | langchain 2 | langchain_community 3 | langchain-google-genai 4 | python-dotenv 5 | streamlit 6 | langchain_experimental 7 | sentence-transformers 8 | langchain_chroma 9 | langchainhub 10 | pypdf 11 | rapidocr-onnxruntime -------------------------------------------------------------------------------- /L-8/gemini_rag_demo/yolov9_paper.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AarohiSingla/Generative_AI/6df5ac38368014e9337bb572af65cf24feda9b4a/L-8/gemini_rag_demo/yolov9_paper.pdf -------------------------------------------------------------------------------- /L-9/README.md: -------------------------------------------------------------------------------- 1 | ### Video Tutorial: https://youtu.be/DQfBXRTeo3o 2 | 3 | ## Environment setup: 4 | 5 | conda create -n env_langchain1 python=3.10 6 | conda activate env_langchain1 7 | python -m pip install --upgrade pip 8 | Install packages: 9 | pip install -r requirements.txt 10 | 11 | 12 | -------------------------------------------------------------------------------- /L-9/huggingface_llm_RAG.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "id": "7d49c22a-1ad0-4395-b93b-aa95660aa026", 7 | "metadata": {}, 8 | "outputs": [], 9 | "source": [ 10 | "from langchain_community.document_loaders import UnstructuredURLLoader\n", 11 | "urls = ['https://www.livemint.com/economy/budget-2024-key-highlights-live-updates-nirmala-sitharaman-infrastructure-defence-income-tax-modi-budget-23-july-11721654502862.html',\n", 12 | " 'https://cleartax.in/s/budget-2024-highlights',\n", 13 | " 'https://www.hindustantimes.com/budget',\n", 14 | " 'https://economictimes.indiatimes.com/news/economy/policy/budget-2024-highlights-india-nirmala-sitharaman-capex-fiscal-deficit-tax-slab-key-announcement-in-union-budget-2024-25/articleshow/111942707.cms?from=mdr']\n", 15 | "loader = UnstructuredURLLoader(urls=urls)\n", 16 | "data = loader.load() " 17 | ] 18 | }, 19 | { 20 | "cell_type": "code", 21 | "execution_count": 2, 22 | "id": "a374eb7c-e262-42bb-8f3f-308ba7dcdbe4", 23 | "metadata": {}, 24 | "outputs": [ 25 | { 26 | "data": { 27 | "text/plain": [ 28 | "4" 29 | ] 30 | }, 31 | "execution_count": 2, 32 | "metadata": {}, 33 | "output_type": "execute_result" 34 | } 35 | ], 36 | "source": [ 37 | "len(data)" 38 | ] 39 | }, 40 | { 41 | "cell_type": "code", 42 | "execution_count": 4, 43 | "id": "be3de203-f996-4a81-9c81-bd1501ad1aa0", 44 | "metadata": {}, 45 | "outputs": [], 46 | "source": [ 47 | "#data" 48 | ] 49 | }, 50 | { 51 | "cell_type": "code", 52 | "execution_count": 6, 53 | "id": "29633e3b-ff24-4ace-a09b-c03b6e28c5cc", 54 | "metadata": {}, 55 | "outputs": [ 56 | { 57 | "name": "stdout", 58 | "output_type": "stream", 59 | "text": [ 60 | "Total number of documents: 164\n" 61 | ] 62 | } 63 | ], 64 | "source": [ 65 | "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", 66 | "\n", 67 | "# split data\n", 68 | "text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000)\n", 69 | "docs = text_splitter.split_documents(data)\n", 70 | "\n", 71 | "\n", 72 | "print(\"Total number of documents: \",len(docs))" 73 | ] 74 | }, 75 | { 76 | "cell_type": "code", 77 | "execution_count": 7, 78 | "id": "100b7d1a-1209-49d4-99ed-c51bc233a938", 79 | "metadata": {}, 80 | "outputs": [ 81 | { 82 | "data": { 83 | "text/plain": [ 84 | "Document(metadata={'source': 'https://www.livemint.com/economy/budget-2024-key-highlights-live-updates-nirmala-sitharaman-infrastructure-defence-income-tax-modi-budget-23-july-11721654502862.html'}, page_content='“It was an intriguing trading session. Key events like the Budget often set the tone for future trends, but today, the closing did not show significant changes. The impact of such events can sometimes be seen in the following sessions, making the next few days crucial. Observing the daily chart, the market shows a defined range. The upper end, marked by a bearish engulfing pattern formed on Friday at 24850, serves as a key resistance, followed by the psychological level of 25000. On the downside, the panic low around the 20EMA at 24100 - 24000 acts as a strong support zone. In our previous outlook, we highlighted the fresh sell signal by the RSI Smoothened in the overbought zone, which remains a key observation for our firm cautious stance. However, recent observations indicate that the bulls are very resilient and hard to give up. We expect the Index to hover within the 24000 - 25000 range and anticipate trending moves after some consolidation. In this scenario, it is advisable to')" 85 | ] 86 | }, 87 | "execution_count": 7, 88 | "metadata": {}, 89 | "output_type": "execute_result" 90 | } 91 | ], 92 | "source": [ 93 | "docs[7]" 94 | ] 95 | }, 96 | { 97 | "cell_type": "code", 98 | "execution_count": 8, 99 | "id": "7fbdb7ae-aafc-48f0-a4a9-c52e51fdfb60", 100 | "metadata": { 101 | "scrolled": true 102 | }, 103 | "outputs": [ 104 | { 105 | "name": "stderr", 106 | "output_type": "stream", 107 | "text": [ 108 | "C:\\Users\\f\\anaconda3\\envs\\env_langchain1\\lib\\site-packages\\langchain_core\\_api\\deprecation.py:139: LangChainDeprecationWarning: The class `HuggingFaceEmbeddings` was deprecated in LangChain 0.2.2 and will be removed in 0.3.0. An updated version of the class exists in the langchain-huggingface package and should be used instead. To use it run `pip install -U langchain-huggingface` and import as `from langchain_huggingface import HuggingFaceEmbeddings`.\n", 109 | " warn_deprecated(\n", 110 | "C:\\Users\\f\\anaconda3\\envs\\env_langchain1\\lib\\site-packages\\sentence_transformers\\cross_encoder\\CrossEncoder.py:11: TqdmExperimentalWarning: Using `tqdm.autonotebook.tqdm` in notebook mode. Use `tqdm.tqdm` instead to force console mode (e.g. in jupyter console)\n", 111 | " from tqdm.autonotebook import tqdm, trange\n" 112 | ] 113 | }, 114 | { 115 | "data": { 116 | "text/plain": [ 117 | "[0.03492265194654465,\n", 118 | " 0.01883004419505596,\n", 119 | " -0.017854733392596245,\n", 120 | " 0.00013885056250728667,\n", 121 | " 0.07407364994287491]" 122 | ] 123 | }, 124 | "execution_count": 8, 125 | "metadata": {}, 126 | "output_type": "execute_result" 127 | } 128 | ], 129 | "source": [ 130 | "# Embedding models: https://python.langchain.com/v0.1/docs/integrations/text_embedding/\n", 131 | "# Let's load the Hugging Face Embedding class. sentence_transformers\n", 132 | "from langchain_community.embeddings import HuggingFaceEmbeddings\n", 133 | "embeddings = HuggingFaceEmbeddings()\n", 134 | "\n", 135 | "vector = embeddings.embed_query(\"hello, world!\")\n", 136 | "vector[:5]\n", 137 | "#vector" 138 | ] 139 | }, 140 | { 141 | "cell_type": "code", 142 | "execution_count": 17, 143 | "id": "688b6e6a-d8ab-41fb-a665-b72c9c9b4026", 144 | "metadata": {}, 145 | "outputs": [], 146 | "source": [ 147 | "from langchain_chroma import Chroma\n", 148 | "vectorstore = Chroma.from_documents(documents=docs, embedding=HuggingFaceEmbeddings())" 149 | ] 150 | }, 151 | { 152 | "cell_type": "code", 153 | "execution_count": 18, 154 | "id": "2c674c5c-1b57-42e9-a99d-9e882c75da2d", 155 | "metadata": {}, 156 | "outputs": [], 157 | "source": [ 158 | "retriever = vectorstore.as_retriever(search_type=\"similarity\", search_kwargs={\"k\": 3})\n", 159 | "\n", 160 | "retrieved_docs = retriever.invoke(\"Budget highlights\")" 161 | ] 162 | }, 163 | { 164 | "cell_type": "code", 165 | "execution_count": 19, 166 | "id": "04c5c6bb-fd0e-45ec-b315-e3f7656e0329", 167 | "metadata": {}, 168 | "outputs": [ 169 | { 170 | "data": { 171 | "text/plain": [ 172 | "3" 173 | ] 174 | }, 175 | "execution_count": 19, 176 | "metadata": {}, 177 | "output_type": "execute_result" 178 | } 179 | ], 180 | "source": [ 181 | "len(retrieved_docs)" 182 | ] 183 | }, 184 | { 185 | "cell_type": "code", 186 | "execution_count": 20, 187 | "id": "8a1c8321-1efd-4a11-9744-0d1a7c6f4e0a", 188 | "metadata": {}, 189 | "outputs": [ 190 | { 191 | "name": "stdout", 192 | "output_type": "stream", 193 | "text": [ 194 | "25 Jul 2024, 08:43:33 AM IST\n", 195 | "\n", 196 | "Action-Packed Budget Session Ends with Minimal Change, says Rajesh Bhosale, Equity Technical Analyst, Angel One\n", 197 | "\n", 198 | "Rajesh Bhosale, Equity Technical Analyst, Angel One, said, “On the key Budget day, the Benchmark Index began on a positive note. However, as the day progressed, prices traded cautiously within a range, influenced by the Budget announcement. In the second half, volatility spiked, causing prices to drop sharply below 24100. Despite this, prices quickly rebounded, reclaiming lost ground and ending just below 24500 with a marginal loss of 0.14%.\"\n" 199 | ] 200 | } 201 | ], 202 | "source": [ 203 | "print(retrieved_docs[2].page_content)" 204 | ] 205 | }, 206 | { 207 | "cell_type": "code", 208 | "execution_count": 21, 209 | "id": "4af01de6-bb32-44ce-a01e-15e72f8e97c4", 210 | "metadata": {}, 211 | "outputs": [ 212 | { 213 | "data": { 214 | "application/vnd.jupyter.widget-view+json": { 215 | "model_id": "2993d1fd928c4620951379ca2e337957", 216 | "version_major": 2, 217 | "version_minor": 0 218 | }, 219 | "text/plain": [ 220 | "Loading checkpoint shards: 0%| | 0/2 [00:00\n", 254 | "Answer the question based on your knowledge. Use the following context to help:\n", 255 | "\n", 256 | "{context}\n", 257 | "\n", 258 | "\n", 259 | "<|user|>\n", 260 | "{question}\n", 261 | "\n", 262 | "<|assistant|>\n", 263 | "\n", 264 | " \"\"\"\n", 265 | "\n", 266 | "prompt = PromptTemplate(\n", 267 | " input_variables=[\"context\", \"question\"],\n", 268 | " template=prompt_template,\n", 269 | ")\n", 270 | "\n", 271 | "llm_chain = prompt | llm | StrOutputParser()" 272 | ] 273 | }, 274 | { 275 | "cell_type": "code", 276 | "execution_count": 22, 277 | "id": "3ac3ae8d-d987-41d9-9eb0-4f4f9a099ce0", 278 | "metadata": {}, 279 | "outputs": [], 280 | "source": [ 281 | "from langchain_core.runnables import RunnablePassthrough\n", 282 | "\n", 283 | "rag_chain = {\"context\": retriever, \"question\": RunnablePassthrough()} | llm_chain" 284 | ] 285 | }, 286 | { 287 | "cell_type": "code", 288 | "execution_count": 23, 289 | "id": "19f1e96e-57e7-4274-bede-77dcee282dbd", 290 | "metadata": {}, 291 | "outputs": [], 292 | "source": [ 293 | "question = \"2024 Budget Highlights\"" 294 | ] 295 | }, 296 | { 297 | "cell_type": "code", 298 | "execution_count": 24, 299 | "id": "392ea3b8-8389-4927-a207-ed70c70c8f3c", 300 | "metadata": { 301 | "scrolled": true 302 | }, 303 | "outputs": [ 304 | { 305 | "name": "stderr", 306 | "output_type": "stream", 307 | "text": [ 308 | "Setting `pad_token_id` to `eos_token_id`:11 for open-end generation.\n" 309 | ] 310 | }, 311 | { 312 | "data": { 313 | "text/plain": [ 314 | "'\\n<|system|>\\nAnswer the question based on your knowledge. Use the following context to help:\\n\\n[Document(metadata={\\'source\\': \\'https://www.livemint.com/economy/budget-2024-key-highlights-live-updates-nirmala-sitharaman-infrastructure-defence-income-tax-modi-budget-23-july-11721654502862.html\\'}, page_content=\\'24 Jul 2024, 04:21:26 PM IST\\\\n\\\\nBudget 2024 Key Highlights Live Updates: Rajeev Tiwari, Co founder of STEMROBO Technologies, said\\\\n\\\\nBudget 2024 Key Highlights Live Updates: \"The Union Budget 2024 focuses on the provisions for the education and startup sectors, reflecting an approach to boost economic growth and development. The budget allocation for education stands at ₹1.48 lakh crore, marking an increase aimed at enhancing educational infrastructure, skilling, and employment opportunities.\\'), Document(metadata={\\'source\\': \\'https://www.livemint.com/economy/budget-2024-key-highlights-live-updates-nirmala-sitharaman-infrastructure-defence-income-tax-modi-budget-23-july-11721654502862.html\\'}, page_content=\\'24 Jul 2024, 04:21:26 PM IST\\\\n\\\\nBudget 2024 Key Highlights Live Updates: Rajeev Tiwari, Co founder of STEMROBO Technologies, said\\\\n\\\\nBudget 2024 Key Highlights Live Updates: \"The Union Budget 2024 focuses on the provisions for the education and startup sectors, reflecting an approach to boost economic growth and development. The budget allocation for education stands at ₹1.48 lakh crore, marking an increase aimed at enhancing educational infrastructure, skilling, and employment opportunities.\\'), Document(metadata={\\'source\\': \\'https://www.livemint.com/economy/budget-2024-key-highlights-live-updates-nirmala-sitharaman-infrastructure-defence-income-tax-modi-budget-23-july-11721654502862.html\\'}, page_content=\"24 Jul 2024, 02:00:05 PM IST\\\\n\\\\nBudget 2024 Key Highlights Live Updates: Jayesh Jain highlights transformative budget for economic growth and fintech sector\\\\n\\\\nJayesh Jain, Group CFO of Balancehero India, praised the Union Budget 2024 as a transformative step towards strengthening India\\'s economic fabric, emphasizing employment, skilling, and MSMEs. The allocation of ₹2 lakh crore for job and skill development schemes over the next five years is set to empower the youth and foster a skilled workforce, essential for driving innovation in the fintech sector. Jain highlighted the budget\\'s focus on social justice and inclusive human resource development, ensuring growth benefits reach every segment of society.\")]\\n\\n\\n<|user|>\\n2024 Budget Highlights\\n\\n<|assistant|>\\n\\n <|system|>\\nAnswer the question based on your knowledge. Use the following context to help:\\n\\n[Document(metadata={\\'source\\': \\'https://www.livemint.com/economy/budget-2024-key-highlights-live-updates-nirmala-sitharaman-infrastructure-defence-income-tax-modi-budget-23-july-11721654502862.html\\'}, page_content=\\'24 Jul 2024, 04:21:26 PM IST\\\\n\\\\nBudget 2024 Key Highlights Live Updates: \"The Union Budget 2024 focuses on the provisions for the education and startup sectors, reflecting an approach to boost economic growth and development. The budget allocation for education stands at ₹1.48 lakh crore, marking an increase aimed at enhancing educational infrastructure, skilling, and employment opportunities.\"), Document(metadata={\\'source\\': \\'https://www.livemint.com/economy/budget-2024-key-highlights-live-updates-nirmala-sitharaman-infrastructure-defence-income-tax-modi-budget-23-july-11721654502862.html\\'}, page_content=\\'24 Jul 2024, 04:21:26 PM IST\\\\n\\\\nBudget 2024 Key Highlights Live Updates: \"The Union Budget 2024 focuses on the provisions for the education and startup sectors, reflecting an approach to boost economic growth and development. The budget allocation for education stands at ₹1.48 lakh crore, marking an increase aimed at enhancing educational infrastructure, skilling, and employment opportunities.\"), Document(metadata={\\'source\\': \\'https://www.livemint.com/economy/budget-2024-key-highlights-live-updates-nirmala-sitharaman-'" 315 | ] 316 | }, 317 | "execution_count": 24, 318 | "metadata": {}, 319 | "output_type": "execute_result" 320 | } 321 | ], 322 | "source": [ 323 | "rag_chain.invoke(question)" 324 | ] 325 | }, 326 | { 327 | "cell_type": "code", 328 | "execution_count": 26, 329 | "id": "30b37f59-9621-4c17-9b4e-5a2705df3c40", 330 | "metadata": {}, 331 | "outputs": [ 332 | { 333 | "name": "stderr", 334 | "output_type": "stream", 335 | "text": [ 336 | "Setting `pad_token_id` to `eos_token_id`:11 for open-end generation.\n" 337 | ] 338 | }, 339 | { 340 | "name": "stdout", 341 | "output_type": "stream", 342 | "text": [ 343 | "Response: <|system|>\n", 344 | "Answer the question based on your knowledge. Use the following context to help:\n", 345 | "\n", 346 | "[Document(metadata={'source': 'https://www.livemint.com/economy/budget-2024-key-highlights-live-updates-nirmala-sitharaman-infrastructure-defence-income-tax-modi-budget-23-july-11721654502862.html'}, page_content='24 Jul 2024, 04:21:26 PM IST\\n\\nBudget 2024 Key Highlights Live Updates: Rajeev Tiwari, Co founder of STEMROBO Technologies, said\\n\\nBudget 2024 Key Highlights Live Updates: \"The Union Budget 2024 focuses on the provisions for the education and startup sectors, reflecting an approach to boost economic growth and development. The budget allocation for education stands at ₹1.48 lakh crore, marking an increase aimed at enhancing educational infrastructure, skilling, and employment opportunities.'), Document(metadata={'source': 'https://www.livemint.com/economy/budget-2024-key-highlights-live-updates-nirmala-sitharaman-infrastructure-defence-income-tax-modi-budget-23-july-11721654502862.html'}, page_content='24 Jul 2024, 04:21:26 PM IST\\n\\nBudget 2024 Key Highlights Live Updates: Rajeev Tiwari, Co founder of STEMROBO Technologies, said\\n\\nBudget 2024 Key Highlights Live Updates: \"The Union Budget 2024 focuses on the provisions for the education and startup sectors, reflecting an approach to boost economic growth and development. The budget allocation for education stands at ₹1.48 lakh crore, marking an increase aimed at enhancing educational infrastructure, skilling, and employment opportunities.'), Document(metadata={'source': 'https://www.livemint.com/economy/budget-2024-key-highlights-live-updates-nirmala-sitharaman-infrastructure-defence-income-tax-modi-budget-23-july-11721654502862.html'}, page_content=\"24 Jul 2024, 02:00:05 PM IST\\n\\nBudget 2024 Key Highlights Live Updates: Jayesh Jain highlights transformative budget for economic growth and fintech sector\\n\\nJayesh Jain, Group CFO of Balancehero India, praised the Union Budget 2024 as a transformative step towards strengthening India's economic fabric, emphasizing employment, skilling, and MSMEs. The allocation of ₹2 lakh crore for job and skill development schemes over the next five years is set to empower the youth and foster a skilled workforce, essential for driving innovation in the fintech sector. Jain highlighted the budget's focus on social justice and inclusive human resource development, ensuring growth benefits reach every segment of society.\")]\n", 347 | "\n", 348 | "\n", 349 | "<|user|>\n", 350 | "2024 Budget Highlights\n", 351 | "\n", 352 | "<|assistant|>\n", 353 | "\n", 354 | " <|system|>\n", 355 | "Answer the question based on your knowledge. Use the following context to help:\n", 356 | "\n", 357 | "[Document(metadata={'source': 'https://www.livemint.com/economy/budget-2024-key-highlights-live-updates-nirmala-sitharaman-infrastructure-defence-income-tax-modi-budget-23-july-11721654502862.html'}, page_content='24 Jul 2024, 04:21:26 PM IST\\n\\nBudget 2024 Key Highlights Live Updates: \"The Union Budget 2024 focuses on the provisions for the education and startup sectors, reflecting an approach to boost economic growth and development. The budget allocation for education stands at ₹1.48 lakh crore, marking an increase aimed at enhancing educational infrastructure, skilling, and employment opportunities.\"), Document(metadata={'source': 'https://www.livemint.com/economy/budget-2024-key-highlights-live-updates-nirmala-sitharaman-infrastructure-defence-income-tax-modi-budget-23-july-11721654502862.html'}, page_content='24 Jul 2024, 04:21:26 PM IST\\n\\nBudget 2024 Key Highlights Live Updates: \"The Union Budget 2024 focuses on the provisions for the education and startup sectors, reflecting an approach to boost economic growth and development. The budget allocation for education stands at ₹1.48 lakh crore, marking an increase aimed at enhancing educational infrastructure, skilling, and employment opportunities.\"), Document(metadata={'source': 'https://www.livemint.com/economy/budget-2024-key-highlights-live-updates-nirmala-sitharaman-\n" 358 | ] 359 | } 360 | ], 361 | "source": [ 362 | "question = \"2024 Budget Highlights\"\n", 363 | "response = rag_chain.invoke(question)\n", 364 | "\n", 365 | "# Making the response readable\n", 366 | "response = response.replace(\"\", \"\").strip()\n", 367 | "print(\"Response:\", response)\n", 368 | "\n" 369 | ] 370 | }, 371 | { 372 | "cell_type": "code", 373 | "execution_count": null, 374 | "id": "4a60395f-c06d-4b79-b041-6445468766dd", 375 | "metadata": {}, 376 | "outputs": [], 377 | "source": [] 378 | }, 379 | { 380 | "cell_type": "code", 381 | "execution_count": 33, 382 | "id": "5605402f-fd76-4fd0-a530-956134f75dd3", 383 | "metadata": {}, 384 | "outputs": [ 385 | { 386 | "name": "stderr", 387 | "output_type": "stream", 388 | "text": [ 389 | "Setting `pad_token_id` to `eos_token_id`:11 for open-end generation.\n" 390 | ] 391 | }, 392 | { 393 | "name": "stdout", 394 | "output_type": "stream", 395 | "text": [ 396 | "Response: <|system|>\n", 397 | "Answer the question based on your knowledge. Use the following context to help:\n", 398 | "\n", 399 | "[Document(metadata={'source': 'https://www.livemint.com/economy/budget-2024-key-highlights-live-updates-nirmala-sitharaman-infrastructure-defence-income-tax-modi-budget-23-july-11721654502862.html'}, page_content='48 min read . Updated: 25 Jul 2024, 08:43 AM IST\\n\\nLivemint, Written By Ankit Gohel, Anubhav Mukherjee, Shivangini\\n\\nBudget 2024 Key Highlights: Finance Minister Nirmala Sitharaman presented the Union Budget 2024-25 in the Lok Sabha today. FM listed out roadmap for nine priorities for India opportunity.\\n\\nPremium\\n\\nRead Full Story\\n\\nBudget 2024 Key Highlights: Finance Minister Nirmala Sitharaman announced the Union Budget 2024-25 in the parliament on July 23. The Budget focused on provisioning for the agricultural sector, the introduction of schemes related to employment, loan schemes, announcements for financial support to the MSME sector, infrastructural development, and fiscal deficit projection at 4.9% with a commitment to reducing it down to 4.5%.'), Document(metadata={'source': 'https://www.livemint.com/economy/budget-2024-key-highlights-live-updates-nirmala-sitharaman-infrastructure-defence-income-tax-modi-budget-23-july-11721654502862.html'}, page_content='48 min read . Updated: 25 Jul 2024, 08:43 AM IST\\n\\nLivemint, Written By Ankit Gohel, Anubhav Mukherjee, Shivangini\\n\\nBudget 2024 Key Highlights: Finance Minister Nirmala Sitharaman presented the Union Budget 2024-25 in the Lok Sabha today. FM listed out roadmap for nine priorities for India opportunity.\\n\\nPremium\\n\\nRead Full Story\\n\\nBudget 2024 Key Highlights: Finance Minister Nirmala Sitharaman announced the Union Budget 2024-25 in the parliament on July 23. The Budget focused on provisioning for the agricultural sector, the introduction of schemes related to employment, loan schemes, announcements for financial support to the MSME sector, infrastructural development, and fiscal deficit projection at 4.9% with a commitment to reducing it down to 4.5%.'), Document(metadata={'source': 'https://www.livemint.com/economy/budget-2024-key-highlights-live-updates-nirmala-sitharaman-infrastructure-defence-income-tax-modi-budget-23-july-11721654502862.html'}, page_content='24 Jul 2024, 04:21:26 PM IST\\n\\nBudget 2024 Key Highlights Live Updates: Rajeev Tiwari, Co founder of STEMROBO Technologies, said\\n\\nBudget 2024 Key Highlights Live Updates: \"The Union Budget 2024 focuses on the provisions for the education and startup sectors, reflecting an approach to boost economic growth and development. The budget allocation for education stands at ₹1.48 lakh crore, marking an increase aimed at enhancing educational infrastructure, skilling, and employment opportunities.')]\n", 400 | "\n", 401 | "\n", 402 | "<|user|>\n", 403 | "What is the Union Budget?\n", 404 | "\n", 405 | "<|assistant|>\n", 406 | "\n", 407 | " <|system|>\n", 408 | "Answer the question based on your knowledge. Use the following context to help:\n", 409 | "\n", 410 | "[Document(metadata={'source': 'https://www.livemint.com/economy/budget-2024-key-highlights-live-updates-nirmala-sitharaman-infrastructure-defence-income-tax-modi-budget-23-july-11721654502862.html'}, page_content='48 min read. Updated: 25 Jul 2024, 08:43 AM IST\\n\\nLivemint, Written By Ankit Gohel, Anubhav Mukherjee, Shivangini\\n\\nBudget 2024 Key Highlights: Finance Minister Nirmala Sitharaman presented the Union Budget 2024-25 in the Lok Sabha today. FM listed out roadmap for nine priorities for India opportunity.\\n\\nPremium\\n\\nRead Full Story\\n\\nBudget 2024 Key Highlights: Finance Minister Nirmala Sitharaman announced the Union Budget 2024-25 in the parliament on July 23. The Budget focused on provisioning for the agricultural sector, the introduction of schemes related to employment, loan schemes, announcements for financial support to the MSME sector, infrastructural development, and fiscal deficit projection at 4.9% with a commitment to reducing it down to 4.5%.'), Document(metadata={'source': 'https://www.livemint.com/economy/budget-2024-key-highlights-live-updates-nirmala-sitharaman-infrastructure-defence-income-tax-modi-budget-23-july-11721654502862.html'}, page_content='24 Jul 2024, 04:21:26 PM IST\\n\\nBudget 202\n" 411 | ] 412 | } 413 | ], 414 | "source": [ 415 | "question = \"What is the Union Budget?\"\n", 416 | "response = rag_chain.invoke(question)\n", 417 | "\n", 418 | "# Making the response readable\n", 419 | "response = response.replace(\"\", \"\").strip()\n", 420 | "print(\"Response:\", response)" 421 | ] 422 | }, 423 | { 424 | "cell_type": "code", 425 | "execution_count": null, 426 | "id": "04d7f1bd-e761-40af-bb32-34014a1b296b", 427 | "metadata": {}, 428 | "outputs": [], 429 | "source": [] 430 | }, 431 | { 432 | "cell_type": "code", 433 | "execution_count": 27, 434 | "id": "c6baa476-160d-4132-a860-ca42e8e99342", 435 | "metadata": {}, 436 | "outputs": [ 437 | { 438 | "name": "stderr", 439 | "output_type": "stream", 440 | "text": [ 441 | "Hardware accelerator e.g. GPU is available in the environment, but no `device` argument is passed to the `Pipeline` object. Model will be on CPU.\n" 442 | ] 443 | } 444 | ], 445 | "source": [ 446 | "from langchain_huggingface import HuggingFacePipeline\n", 447 | "from langchain.prompts import PromptTemplate\n", 448 | "from transformers import pipeline\n", 449 | "from langchain_core.output_parsers import StrOutputParser\n", 450 | "import torch\n", 451 | "from transformers import AutoModelForCausalLM, AutoTokenizer\n", 452 | "\n", 453 | "\n", 454 | "model_name = \"EleutherAI/gpt-neo-1.3B\"\n", 455 | "model = AutoModelForCausalLM.from_pretrained(model_name)\n", 456 | "tokenizer = AutoTokenizer.from_pretrained(model_name)\n", 457 | "\n", 458 | "text_generation_pipeline = pipeline(\n", 459 | " model=model,\n", 460 | " tokenizer=tokenizer,\n", 461 | " task=\"text-generation\",\n", 462 | " temperature=0.1,\n", 463 | " do_sample=True,\n", 464 | " repetition_penalty=1.1,\n", 465 | " return_full_text=True,\n", 466 | " max_new_tokens=400,\n", 467 | ")\n", 468 | "llm = HuggingFacePipeline(pipeline=text_generation_pipeline)\n" 469 | ] 470 | }, 471 | { 472 | "cell_type": "code", 473 | "execution_count": 28, 474 | "id": "7cff65d0-2436-47f8-8572-6979a3378701", 475 | "metadata": {}, 476 | "outputs": [], 477 | "source": [ 478 | "\n", 479 | "\n", 480 | "prompt_template = \"\"\"\n", 481 | "<|system|>\n", 482 | "Answer the question based on your knowledge. Use the following context to help:\n", 483 | "\n", 484 | "{context}\n", 485 | "\n", 486 | "\n", 487 | "<|user|>\n", 488 | "{question}\n", 489 | "\n", 490 | "<|assistant|>\n", 491 | "\n", 492 | " \"\"\"\n", 493 | "\n", 494 | "prompt = PromptTemplate(\n", 495 | " input_variables=[\"context\", \"question\"],\n", 496 | " template=prompt_template,\n", 497 | ")\n", 498 | "\n" 499 | ] 500 | }, 501 | { 502 | "cell_type": "code", 503 | "execution_count": 29, 504 | "id": "36f9c819-e4df-43b9-93df-a462a89a2a92", 505 | "metadata": {}, 506 | "outputs": [], 507 | "source": [ 508 | "llm_chain = prompt | llm | StrOutputParser()" 509 | ] 510 | }, 511 | { 512 | "cell_type": "code", 513 | "execution_count": 30, 514 | "id": "e3460fb6-31f0-41e9-b5f9-863d4bd579ec", 515 | "metadata": {}, 516 | "outputs": [], 517 | "source": [ 518 | "from langchain_core.runnables import RunnablePassthrough\n", 519 | "rag_chain = {\"context\": retriever, \"question\": RunnablePassthrough()} | llm_chain" 520 | ] 521 | }, 522 | { 523 | "cell_type": "code", 524 | "execution_count": 32, 525 | "id": "465785b8-dc36-4e5d-bce6-41191a034a7d", 526 | "metadata": {}, 527 | "outputs": [ 528 | { 529 | "name": "stderr", 530 | "output_type": "stream", 531 | "text": [ 532 | "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n" 533 | ] 534 | } 535 | ], 536 | "source": [ 537 | "question = \"What is the Union Budget?\"\n", 538 | "\n", 539 | "response = rag_chain.invoke(question)\n", 540 | "\n", 541 | "# Making the response readable\n", 542 | "response = response.replace(\"\", \"\").strip()\n", 543 | "print(\"Response:\", response)" 544 | ] 545 | }, 546 | { 547 | "cell_type": "code", 548 | "execution_count": null, 549 | "id": "ba32518d-e8d9-4577-a3fc-d1f3398e3236", 550 | "metadata": {}, 551 | "outputs": [], 552 | "source": [] 553 | }, 554 | { 555 | "cell_type": "code", 556 | "execution_count": null, 557 | "id": "276b9599-521b-4df9-b9a1-78adf71c58bf", 558 | "metadata": {}, 559 | "outputs": [], 560 | "source": [] 561 | } 562 | ], 563 | "metadata": { 564 | "kernelspec": { 565 | "display_name": "Python 3 (ipykernel)", 566 | "language": "python", 567 | "name": "python3" 568 | }, 569 | "language_info": { 570 | "codemirror_mode": { 571 | "name": "ipython", 572 | "version": 3 573 | }, 574 | "file_extension": ".py", 575 | "mimetype": "text/x-python", 576 | "name": "python", 577 | "nbconvert_exporter": "python", 578 | "pygments_lexer": "ipython3", 579 | "version": "3.10.14" 580 | } 581 | }, 582 | "nbformat": 4, 583 | "nbformat_minor": 5 584 | } 585 | -------------------------------------------------------------------------------- /L-9/requirements.txt: -------------------------------------------------------------------------------- 1 | torch 2 | transformers 3 | sentence-transformers 4 | langchain 5 | langchain_community 6 | langchain-huggingface 7 | langchain_experimental 8 | langchain_chroma 9 | langchainhub 10 | streamlit 11 | -------------------------------------------------------------------------------- /deepseek-r1_langchain/1.py: -------------------------------------------------------------------------------- 1 | from langchain_core.prompts import ChatPromptTemplate 2 | from langchain_ollama.llms import OllamaLLM 3 | import streamlit as st 4 | 5 | 6 | st.title("Langchain-DeepSeek-R1 app") 7 | 8 | template = """Question: {question} 9 | 10 | Answer: Let's think step by step.""" 11 | 12 | prompt = ChatPromptTemplate.from_template(template) 13 | 14 | #model = OllamaLLM(model="llama3.1") 15 | model = OllamaLLM(model="deepseek-r1") 16 | 17 | chain = prompt | model 18 | 19 | 20 | question = st.chat_input("Enter your question here") 21 | if question: 22 | st.write(chain.invoke({"question": question})) -------------------------------------------------------------------------------- /deepseek-r1_langchain/README.md: -------------------------------------------------------------------------------- 1 | ### Video Tutorial (Hindi): https://youtu.be/uBQiWk0buwM 2 | 3 | ### Video Tutorial (English): https://youtu.be/Yy2xXp0UGcM 4 | 5 | Download and install ollama - https://ollama.com/ 6 | 7 | ## Environment setup: 8 | 9 | Install packages 10 | 11 | pip install --upgrade langchain langchain-community 12 | 13 | 14 | #### Provides access to the ollama models. 15 | pip install -U langchain-ollama 16 | 17 | #### Install streamlit. 18 | pip install streamlit 19 | 20 | 21 | #### To run this code- Open command prompt and type 22 | streamlit run 1.py 23 | 24 | 25 | 26 | 27 | 28 | 29 | -------------------------------------------------------------------------------- /langgraph_agents.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "nbformat": 4, 3 | "nbformat_minor": 0, 4 | "metadata": { 5 | "colab": { 6 | "provenance": [], 7 | "gpuType": "T4" 8 | }, 9 | "kernelspec": { 10 | "name": "python3", 11 | "display_name": "Python 3" 12 | }, 13 | "language_info": { 14 | "name": "python" 15 | } 16 | }, 17 | "cells": [ 18 | { 19 | "cell_type": "code", 20 | "execution_count": 1, 21 | "metadata": { 22 | "colab": { 23 | "base_uri": "https://localhost:8080/" 24 | }, 25 | "id": "9r6mZKZf2Gmo", 26 | "outputId": "4042da4c-bf9c-4911-a716-33bc9f3bc90d" 27 | }, 28 | "outputs": [ 29 | { 30 | "output_type": "stream", 31 | "name": "stdout", 32 | "text": [ 33 | "Requirement already satisfied: langchain in /usr/local/lib/python3.11/dist-packages (0.3.17)\n", 34 | "Collecting langchain\n", 35 | " Downloading langchain-0.3.18-py3-none-any.whl.metadata (7.8 kB)\n", 36 | "Collecting langchain-community\n", 37 | " Downloading langchain_community-0.3.17-py3-none-any.whl.metadata (2.4 kB)\n", 38 | "Collecting langgraph\n", 39 | " Downloading langgraph-0.2.70-py3-none-any.whl.metadata (17 kB)\n", 40 | "Requirement already satisfied: openai in /usr/local/lib/python3.11/dist-packages (1.61.1)\n", 41 | "Collecting langchain_openai\n", 42 | " Downloading langchain_openai-0.3.4-py3-none-any.whl.metadata (2.3 kB)\n", 43 | "Collecting wikipedia\n", 44 | " Downloading wikipedia-1.4.0.tar.gz (27 kB)\n", 45 | " Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n", 46 | "Collecting langchain-core<1.0.0,>=0.3.34 (from langchain)\n", 47 | " Downloading langchain_core-0.3.34-py3-none-any.whl.metadata (5.9 kB)\n", 48 | "Collecting langchain-text-splitters<1.0.0,>=0.3.6 (from langchain)\n", 49 | " Downloading langchain_text_splitters-0.3.6-py3-none-any.whl.metadata (1.9 kB)\n", 50 | "Requirement already satisfied: langsmith<0.4,>=0.1.17 in /usr/local/lib/python3.11/dist-packages (from langchain) (0.3.6)\n", 51 | "Requirement already satisfied: pydantic<3.0.0,>=2.7.4 in /usr/local/lib/python3.11/dist-packages (from langchain) (2.10.6)\n", 52 | "Requirement already satisfied: SQLAlchemy<3,>=1.4 in /usr/local/lib/python3.11/dist-packages (from langchain) (2.0.37)\n", 53 | "Requirement already satisfied: requests<3,>=2 in /usr/local/lib/python3.11/dist-packages (from langchain) (2.32.3)\n", 54 | "Requirement already satisfied: PyYAML>=5.3 in /usr/local/lib/python3.11/dist-packages (from langchain) (6.0.2)\n", 55 | "Requirement already satisfied: aiohttp<4.0.0,>=3.8.3 in /usr/local/lib/python3.11/dist-packages (from langchain) (3.11.12)\n", 56 | "Requirement already satisfied: tenacity!=8.4.0,<10,>=8.1.0 in /usr/local/lib/python3.11/dist-packages (from langchain) (9.0.0)\n", 57 | "Requirement already satisfied: numpy<2,>=1.26.4 in /usr/local/lib/python3.11/dist-packages (from langchain) (1.26.4)\n", 58 | "Collecting dataclasses-json<0.7,>=0.5.7 (from langchain-community)\n", 59 | " Downloading dataclasses_json-0.6.7-py3-none-any.whl.metadata (25 kB)\n", 60 | "Collecting pydantic-settings<3.0.0,>=2.4.0 (from langchain-community)\n", 61 | " Downloading pydantic_settings-2.7.1-py3-none-any.whl.metadata (3.5 kB)\n", 62 | "Collecting httpx-sse<1.0.0,>=0.4.0 (from langchain-community)\n", 63 | " Downloading httpx_sse-0.4.0-py3-none-any.whl.metadata (9.0 kB)\n", 64 | "Collecting langgraph-checkpoint<3.0.0,>=2.0.10 (from langgraph)\n", 65 | " Downloading langgraph_checkpoint-2.0.12-py3-none-any.whl.metadata (4.6 kB)\n", 66 | "Collecting langgraph-sdk<0.2.0,>=0.1.42 (from langgraph)\n", 67 | " Downloading langgraph_sdk-0.1.51-py3-none-any.whl.metadata (1.8 kB)\n", 68 | "Requirement already satisfied: anyio<5,>=3.5.0 in /usr/local/lib/python3.11/dist-packages (from openai) (3.7.1)\n", 69 | "Requirement already satisfied: distro<2,>=1.7.0 in /usr/local/lib/python3.11/dist-packages (from openai) (1.9.0)\n", 70 | "Requirement already satisfied: httpx<1,>=0.23.0 in /usr/local/lib/python3.11/dist-packages (from openai) (0.28.1)\n", 71 | "Requirement already satisfied: jiter<1,>=0.4.0 in /usr/local/lib/python3.11/dist-packages (from openai) (0.8.2)\n", 72 | "Requirement already satisfied: sniffio in /usr/local/lib/python3.11/dist-packages (from openai) (1.3.1)\n", 73 | "Requirement already satisfied: tqdm>4 in /usr/local/lib/python3.11/dist-packages (from openai) (4.67.1)\n", 74 | "Requirement already satisfied: typing-extensions<5,>=4.11 in /usr/local/lib/python3.11/dist-packages (from openai) (4.12.2)\n", 75 | "Collecting tiktoken<1,>=0.7 (from langchain_openai)\n", 76 | " Downloading tiktoken-0.8.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (6.6 kB)\n", 77 | "Requirement already satisfied: beautifulsoup4 in /usr/local/lib/python3.11/dist-packages (from wikipedia) (4.13.3)\n", 78 | "Requirement already satisfied: aiohappyeyeballs>=2.3.0 in /usr/local/lib/python3.11/dist-packages (from aiohttp<4.0.0,>=3.8.3->langchain) (2.4.4)\n", 79 | "Requirement already satisfied: aiosignal>=1.1.2 in /usr/local/lib/python3.11/dist-packages (from aiohttp<4.0.0,>=3.8.3->langchain) (1.3.2)\n", 80 | "Requirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.11/dist-packages (from aiohttp<4.0.0,>=3.8.3->langchain) (25.1.0)\n", 81 | "Requirement already satisfied: frozenlist>=1.1.1 in /usr/local/lib/python3.11/dist-packages (from aiohttp<4.0.0,>=3.8.3->langchain) (1.5.0)\n", 82 | "Requirement already satisfied: multidict<7.0,>=4.5 in /usr/local/lib/python3.11/dist-packages (from aiohttp<4.0.0,>=3.8.3->langchain) (6.1.0)\n", 83 | "Requirement already satisfied: propcache>=0.2.0 in /usr/local/lib/python3.11/dist-packages (from aiohttp<4.0.0,>=3.8.3->langchain) (0.2.1)\n", 84 | "Requirement already satisfied: yarl<2.0,>=1.17.0 in /usr/local/lib/python3.11/dist-packages (from aiohttp<4.0.0,>=3.8.3->langchain) (1.18.3)\n", 85 | "Requirement already satisfied: idna>=2.8 in /usr/local/lib/python3.11/dist-packages (from anyio<5,>=3.5.0->openai) (3.10)\n", 86 | "Collecting marshmallow<4.0.0,>=3.18.0 (from dataclasses-json<0.7,>=0.5.7->langchain-community)\n", 87 | " Downloading marshmallow-3.26.1-py3-none-any.whl.metadata (7.3 kB)\n", 88 | "Collecting typing-inspect<1,>=0.4.0 (from dataclasses-json<0.7,>=0.5.7->langchain-community)\n", 89 | " Downloading typing_inspect-0.9.0-py3-none-any.whl.metadata (1.5 kB)\n", 90 | "Requirement already satisfied: certifi in /usr/local/lib/python3.11/dist-packages (from httpx<1,>=0.23.0->openai) (2025.1.31)\n", 91 | "Requirement already satisfied: httpcore==1.* in /usr/local/lib/python3.11/dist-packages (from httpx<1,>=0.23.0->openai) (1.0.7)\n", 92 | "Requirement already satisfied: h11<0.15,>=0.13 in /usr/local/lib/python3.11/dist-packages (from httpcore==1.*->httpx<1,>=0.23.0->openai) (0.14.0)\n", 93 | "Requirement already satisfied: jsonpatch<2.0,>=1.33 in /usr/local/lib/python3.11/dist-packages (from langchain-core<1.0.0,>=0.3.34->langchain) (1.33)\n", 94 | "Requirement already satisfied: packaging<25,>=23.2 in /usr/local/lib/python3.11/dist-packages (from langchain-core<1.0.0,>=0.3.34->langchain) (24.2)\n", 95 | "Requirement already satisfied: msgpack<2.0.0,>=1.1.0 in /usr/local/lib/python3.11/dist-packages (from langgraph-checkpoint<3.0.0,>=2.0.10->langgraph) (1.1.0)\n", 96 | "Requirement already satisfied: orjson>=3.10.1 in /usr/local/lib/python3.11/dist-packages (from langgraph-sdk<0.2.0,>=0.1.42->langgraph) (3.10.15)\n", 97 | "Requirement already satisfied: requests-toolbelt<2.0.0,>=1.0.0 in /usr/local/lib/python3.11/dist-packages (from langsmith<0.4,>=0.1.17->langchain) (1.0.0)\n", 98 | "Requirement already satisfied: zstandard<0.24.0,>=0.23.0 in /usr/local/lib/python3.11/dist-packages (from langsmith<0.4,>=0.1.17->langchain) (0.23.0)\n", 99 | "Requirement already satisfied: annotated-types>=0.6.0 in /usr/local/lib/python3.11/dist-packages (from pydantic<3.0.0,>=2.7.4->langchain) (0.7.0)\n", 100 | "Requirement already satisfied: pydantic-core==2.27.2 in /usr/local/lib/python3.11/dist-packages (from pydantic<3.0.0,>=2.7.4->langchain) (2.27.2)\n", 101 | "Collecting python-dotenv>=0.21.0 (from pydantic-settings<3.0.0,>=2.4.0->langchain-community)\n", 102 | " Downloading python_dotenv-1.0.1-py3-none-any.whl.metadata (23 kB)\n", 103 | "Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.11/dist-packages (from requests<3,>=2->langchain) (3.4.1)\n", 104 | "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.11/dist-packages (from requests<3,>=2->langchain) (2.3.0)\n", 105 | "Requirement already satisfied: greenlet!=0.4.17 in /usr/local/lib/python3.11/dist-packages (from SQLAlchemy<3,>=1.4->langchain) (3.1.1)\n", 106 | "Requirement already satisfied: regex>=2022.1.18 in /usr/local/lib/python3.11/dist-packages (from tiktoken<1,>=0.7->langchain_openai) (2024.11.6)\n", 107 | "Requirement already satisfied: soupsieve>1.2 in /usr/local/lib/python3.11/dist-packages (from beautifulsoup4->wikipedia) (2.6)\n", 108 | "Requirement already satisfied: jsonpointer>=1.9 in /usr/local/lib/python3.11/dist-packages (from jsonpatch<2.0,>=1.33->langchain-core<1.0.0,>=0.3.34->langchain) (3.0.0)\n", 109 | "Collecting mypy-extensions>=0.3.0 (from typing-inspect<1,>=0.4.0->dataclasses-json<0.7,>=0.5.7->langchain-community)\n", 110 | " Downloading mypy_extensions-1.0.0-py3-none-any.whl.metadata (1.1 kB)\n", 111 | "Downloading langchain-0.3.18-py3-none-any.whl (1.0 MB)\n", 112 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.0/1.0 MB\u001b[0m \u001b[31m48.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 113 | "\u001b[?25hDownloading langchain_community-0.3.17-py3-none-any.whl (2.5 MB)\n", 114 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.5/2.5 MB\u001b[0m \u001b[31m82.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 115 | "\u001b[?25hDownloading langgraph-0.2.70-py3-none-any.whl (149 kB)\n", 116 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m149.7/149.7 kB\u001b[0m \u001b[31m13.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 117 | "\u001b[?25hDownloading langchain_openai-0.3.4-py3-none-any.whl (54 kB)\n", 118 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m54.7/54.7 kB\u001b[0m \u001b[31m5.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 119 | "\u001b[?25hDownloading dataclasses_json-0.6.7-py3-none-any.whl (28 kB)\n", 120 | "Downloading httpx_sse-0.4.0-py3-none-any.whl (7.8 kB)\n", 121 | "Downloading langchain_core-0.3.34-py3-none-any.whl (412 kB)\n", 122 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m413.0/413.0 kB\u001b[0m \u001b[31m33.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 123 | "\u001b[?25hDownloading langchain_text_splitters-0.3.6-py3-none-any.whl (31 kB)\n", 124 | "Downloading langgraph_checkpoint-2.0.12-py3-none-any.whl (38 kB)\n", 125 | "Downloading langgraph_sdk-0.1.51-py3-none-any.whl (44 kB)\n", 126 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m44.7/44.7 kB\u001b[0m \u001b[31m3.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 127 | "\u001b[?25hDownloading pydantic_settings-2.7.1-py3-none-any.whl (29 kB)\n", 128 | "Downloading tiktoken-0.8.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (1.2 MB)\n", 129 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.2/1.2 MB\u001b[0m \u001b[31m54.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 130 | "\u001b[?25hDownloading marshmallow-3.26.1-py3-none-any.whl (50 kB)\n", 131 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m50.9/50.9 kB\u001b[0m \u001b[31m4.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 132 | "\u001b[?25hDownloading python_dotenv-1.0.1-py3-none-any.whl (19 kB)\n", 133 | "Downloading typing_inspect-0.9.0-py3-none-any.whl (8.8 kB)\n", 134 | "Downloading mypy_extensions-1.0.0-py3-none-any.whl (4.7 kB)\n", 135 | "Building wheels for collected packages: wikipedia\n", 136 | " Building wheel for wikipedia (setup.py) ... \u001b[?25l\u001b[?25hdone\n", 137 | " Created wheel for wikipedia: filename=wikipedia-1.4.0-py3-none-any.whl size=11679 sha256=4905b29afe154d86504c2b8378eae526ae9b3c6b9f6a2ddd3f88c2c31cc26321\n", 138 | " Stored in directory: /root/.cache/pip/wheels/8f/ab/cb/45ccc40522d3a1c41e1d2ad53b8f33a62f394011ec38cd71c6\n", 139 | "Successfully built wikipedia\n", 140 | "Installing collected packages: python-dotenv, mypy-extensions, marshmallow, httpx-sse, wikipedia, typing-inspect, tiktoken, pydantic-settings, langgraph-sdk, dataclasses-json, langchain-core, langgraph-checkpoint, langchain-text-splitters, langchain_openai, langgraph, langchain, langchain-community\n", 141 | " Attempting uninstall: langchain-core\n", 142 | " Found existing installation: langchain-core 0.3.33\n", 143 | " Uninstalling langchain-core-0.3.33:\n", 144 | " Successfully uninstalled langchain-core-0.3.33\n", 145 | " Attempting uninstall: langchain-text-splitters\n", 146 | " Found existing installation: langchain-text-splitters 0.3.5\n", 147 | " Uninstalling langchain-text-splitters-0.3.5:\n", 148 | " Successfully uninstalled langchain-text-splitters-0.3.5\n", 149 | " Attempting uninstall: langchain\n", 150 | " Found existing installation: langchain 0.3.17\n", 151 | " Uninstalling langchain-0.3.17:\n", 152 | " Successfully uninstalled langchain-0.3.17\n", 153 | "Successfully installed dataclasses-json-0.6.7 httpx-sse-0.4.0 langchain-0.3.18 langchain-community-0.3.17 langchain-core-0.3.34 langchain-text-splitters-0.3.6 langchain_openai-0.3.4 langgraph-0.2.70 langgraph-checkpoint-2.0.12 langgraph-sdk-0.1.51 marshmallow-3.26.1 mypy-extensions-1.0.0 pydantic-settings-2.7.1 python-dotenv-1.0.1 tiktoken-0.8.0 typing-inspect-0.9.0 wikipedia-1.4.0\n" 154 | ] 155 | } 156 | ], 157 | "source": [ 158 | "!pip install --upgrade langchain langchain-community langgraph openai langchain_openai wikipedia" 159 | ] 160 | }, 161 | { 162 | "cell_type": "code", 163 | "source": [ 164 | "from langchain_community.tools import WikipediaQueryRun\n", 165 | "from langchain_community.utilities import WikipediaAPIWrapper\n", 166 | "\n", 167 | "\n", 168 | "api_wrapper = WikipediaAPIWrapper(top_k_results=1, doc_content_chars_max=300)\n", 169 | "wiki_tool = WikipediaQueryRun(api_wrapper=api_wrapper)" 170 | ], 171 | "metadata": { 172 | "id": "dBJF7BEr4BXn" 173 | }, 174 | "execution_count": 4, 175 | "outputs": [] 176 | }, 177 | { 178 | "cell_type": "code", 179 | "source": [ 180 | "wiki_tool.run({\"query\": \"AI agents\"})" 181 | ], 182 | "metadata": { 183 | "colab": { 184 | "base_uri": "https://localhost:8080/", 185 | "height": 70 186 | }, 187 | "id": "DMiuQhuq4fdA", 188 | "outputId": "74295346-2c98-4352-aafb-ede74d0318ad" 189 | }, 190 | "execution_count": 5, 191 | "outputs": [ 192 | { 193 | "output_type": "execute_result", 194 | "data": { 195 | "text/plain": [ 196 | "'Page: Intelligent agent\\nSummary: In artificial intelligence, an intelligent agent is an entity that perceives its environment, takes actions autonomously to achieve goals, and may improve its performance through machine learning or by acquiring knowledge. Leading AI textbooks define artificial intel'" 197 | ], 198 | "application/vnd.google.colaboratory.intrinsic+json": { 199 | "type": "string" 200 | } 201 | }, 202 | "metadata": {}, 203 | "execution_count": 5 204 | } 205 | ] 206 | }, 207 | { 208 | "cell_type": "code", 209 | "source": [ 210 | "from langchain_openai import ChatOpenAI\n", 211 | "llm = ChatOpenAI(temperature=0, api_key=\"sk*******************M7\", model=\"gpt-4o-mini\")\n" 212 | ], 213 | "metadata": { 214 | "id": "Tq9TI3Ho4ln6" 215 | }, 216 | "execution_count": 6, 217 | "outputs": [] 218 | }, 219 | { 220 | "cell_type": "code", 221 | "source": [ 222 | "tools = [wiki_tool]\n", 223 | "\n", 224 | "# Tool binding\n", 225 | "llm_with_tools = llm.bind_tools(tools)\n", 226 | "\n", 227 | "#Tool calling\n", 228 | "result = llm_with_tools.invoke(\"Hello world!\")\n", 229 | "result\n", 230 | "result.content" 231 | ], 232 | "metadata": { 233 | "colab": { 234 | "base_uri": "https://localhost:8080/", 235 | "height": 35 236 | }, 237 | "id": "U2wQ7j-K47ZK", 238 | "outputId": "9af5429b-a11e-4ad8-942d-6febcb174c02" 239 | }, 240 | "execution_count": 7, 241 | "outputs": [ 242 | { 243 | "output_type": "execute_result", 244 | "data": { 245 | "text/plain": [ 246 | "'Hello! How can I assist you today?'" 247 | ], 248 | "application/vnd.google.colaboratory.intrinsic+json": { 249 | "type": "string" 250 | } 251 | }, 252 | "metadata": {}, 253 | "execution_count": 7 254 | } 255 | ] 256 | }, 257 | { 258 | "cell_type": "code", 259 | "source": [ 260 | "from langgraph.prebuilt import create_react_agent\n", 261 | "\n", 262 | "agent_executor = create_react_agent(llm, tools)" 263 | ], 264 | "metadata": { 265 | "id": "vhUDRGM_5ZtF" 266 | }, 267 | "execution_count": 8, 268 | "outputs": [] 269 | }, 270 | { 271 | "cell_type": "code", 272 | "source": [ 273 | "from langchain_core.messages import HumanMessage\n", 274 | "\n", 275 | "#First up, let's see how it responds when there's no need to call a tool:\n", 276 | "response = agent_executor.invoke({\"messages\": [HumanMessage(content=\"hi!\")]})\n", 277 | "\n", 278 | "response[\"messages\"]" 279 | ], 280 | "metadata": { 281 | "colab": { 282 | "base_uri": "https://localhost:8080/" 283 | }, 284 | "id": "2PtbkBft5uqm", 285 | "outputId": "fddcd849-c21c-4fcc-a243-c4e44ec2da8f" 286 | }, 287 | "execution_count": 9, 288 | "outputs": [ 289 | { 290 | "output_type": "execute_result", 291 | "data": { 292 | "text/plain": [ 293 | "[HumanMessage(content='hi!', additional_kwargs={}, response_metadata={}, id='6945f09a-4798-49c1-8d4a-e369f165b671'),\n", 294 | " AIMessage(content='Hello! How can I assist you today?', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 11, 'prompt_tokens': 83, 'total_tokens': 94, 'completion_tokens_details': {'accepted_prediction_tokens': 0, 'audio_tokens': 0, 'reasoning_tokens': 0, 'rejected_prediction_tokens': 0}, 'prompt_tokens_details': {'audio_tokens': 0, 'cached_tokens': 0}}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_72ed7ab54c', 'finish_reason': 'stop', 'logprobs': None}, id='run-15409985-a239-4005-a880-1bbdff55e74a-0', usage_metadata={'input_tokens': 83, 'output_tokens': 11, 'total_tokens': 94, 'input_token_details': {'audio': 0, 'cache_read': 0}, 'output_token_details': {'audio': 0, 'reasoning': 0}})]" 295 | ] 296 | }, 297 | "metadata": {}, 298 | "execution_count": 9 299 | } 300 | ] 301 | }, 302 | { 303 | "cell_type": "code", 304 | "source": [ 305 | "print(response[\"messages\"][-1].content)" 306 | ], 307 | "metadata": { 308 | "colab": { 309 | "base_uri": "https://localhost:8080/" 310 | }, 311 | "id": "nTMdWurE57G2", 312 | "outputId": "57c5c8fd-d9db-4b02-ae83-35d756abb3ac" 313 | }, 314 | "execution_count": 10, 315 | "outputs": [ 316 | { 317 | "output_type": "stream", 318 | "name": "stdout", 319 | "text": [ 320 | "Hello! How can I assist you today?\n" 321 | ] 322 | } 323 | ] 324 | }, 325 | { 326 | "cell_type": "code", 327 | "source": [ 328 | "from langchain_core.messages import HumanMessage\n", 329 | "\n", 330 | "#First up, let's see how it responds when there's no need to call a tool:\n", 331 | "response = agent_executor.invoke({\"messages\": [HumanMessage(content=\"what is agentic ai\")]})\n", 332 | "\n", 333 | "response[\"messages\"]" 334 | ], 335 | "metadata": { 336 | "colab": { 337 | "base_uri": "https://localhost:8080/" 338 | }, 339 | "id": "kjtFP-pv6F44", 340 | "outputId": "3dcd2ea0-9f51-44ed-fa8d-08d9101dba6c" 341 | }, 342 | "execution_count": 11, 343 | "outputs": [ 344 | { 345 | "output_type": "execute_result", 346 | "data": { 347 | "text/plain": [ 348 | "[HumanMessage(content='what is agentic ai', additional_kwargs={}, response_metadata={}, id='cc704242-a6b6-4beb-8540-1189d8ee727d'),\n", 349 | " AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_nJETD0JhkN3atAANJQ5Jh0zs', 'function': {'arguments': '{\"query\":\"Agentic AI\"}', 'name': 'wikipedia'}, 'type': 'function'}], 'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 16, 'prompt_tokens': 86, 'total_tokens': 102, 'completion_tokens_details': {'accepted_prediction_tokens': 0, 'audio_tokens': 0, 'reasoning_tokens': 0, 'rejected_prediction_tokens': 0}, 'prompt_tokens_details': {'audio_tokens': 0, 'cached_tokens': 0}}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_bd83329f63', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-72ada718-03e2-4ff9-ad0b-f1c9013bbc59-0', tool_calls=[{'name': 'wikipedia', 'args': {'query': 'Agentic AI'}, 'id': 'call_nJETD0JhkN3atAANJQ5Jh0zs', 'type': 'tool_call'}], usage_metadata={'input_tokens': 86, 'output_tokens': 16, 'total_tokens': 102, 'input_token_details': {'audio': 0, 'cache_read': 0}, 'output_token_details': {'audio': 0, 'reasoning': 0}}),\n", 350 | " ToolMessage(content='Page: Intelligent agent\\nSummary: In artificial intelligence, an intelligent agent is an entity that perceives its environment, takes actions autonomously to achieve goals, and may improve its performance through machine learning or by acquiring knowledge. Leading AI textbooks define artificial intel', name='wikipedia', id='8836c552-b430-4d34-a3e1-627a4ef420fe', tool_call_id='call_nJETD0JhkN3atAANJQ5Jh0zs'),\n", 351 | " AIMessage(content='Agentic AI refers to intelligent agents in artificial intelligence that can perceive their environment, take autonomous actions to achieve specific goals, and potentially improve their performance through learning or acquiring knowledge. These agents operate independently and can adapt their behavior based on their experiences.', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 51, 'prompt_tokens': 159, 'total_tokens': 210, 'completion_tokens_details': {'accepted_prediction_tokens': 0, 'audio_tokens': 0, 'reasoning_tokens': 0, 'rejected_prediction_tokens': 0}, 'prompt_tokens_details': {'audio_tokens': 0, 'cached_tokens': 0}}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_72ed7ab54c', 'finish_reason': 'stop', 'logprobs': None}, id='run-a8460c47-2361-4df5-8882-3f46a1c2dde2-0', usage_metadata={'input_tokens': 159, 'output_tokens': 51, 'total_tokens': 210, 'input_token_details': {'audio': 0, 'cache_read': 0}, 'output_token_details': {'audio': 0, 'reasoning': 0}})]" 352 | ] 353 | }, 354 | "metadata": {}, 355 | "execution_count": 11 356 | } 357 | ] 358 | }, 359 | { 360 | "cell_type": "code", 361 | "source": [ 362 | "print(response[\"messages\"][-1].content)" 363 | ], 364 | "metadata": { 365 | "colab": { 366 | "base_uri": "https://localhost:8080/" 367 | }, 368 | "id": "K6XqJpTl6UsO", 369 | "outputId": "4f252e6e-ec18-42af-a0db-9699ae81a42e" 370 | }, 371 | "execution_count": 12, 372 | "outputs": [ 373 | { 374 | "output_type": "stream", 375 | "name": "stdout", 376 | "text": [ 377 | "Agentic AI refers to intelligent agents in artificial intelligence that can perceive their environment, take autonomous actions to achieve specific goals, and potentially improve their performance through learning or acquiring knowledge. These agents operate independently and can adapt their behavior based on their experiences.\n" 378 | ] 379 | } 380 | ] 381 | }, 382 | { 383 | "cell_type": "code", 384 | "source": [], 385 | "metadata": { 386 | "id": "XIAVu3UF6wND" 387 | }, 388 | "execution_count": null, 389 | "outputs": [] 390 | } 391 | ] 392 | } -------------------------------------------------------------------------------- /langgraph_tools_Bindings_agents.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "nbformat": 4, 3 | "nbformat_minor": 0, 4 | "metadata": { 5 | "colab": { 6 | "provenance": [], 7 | "gpuType": "T4" 8 | }, 9 | "kernelspec": { 10 | "name": "python3", 11 | "display_name": "Python 3" 12 | }, 13 | "language_info": { 14 | "name": "python" 15 | }, 16 | "accelerator": "GPU" 17 | }, 18 | "cells": [ 19 | { 20 | "cell_type": "code", 21 | "source": [ 22 | "!pip install --upgrade langchain langchain-community langgraph openai langchain_openai" 23 | ], 24 | "metadata": { 25 | "id": "CrEJVfjvHDvC" 26 | }, 27 | "execution_count": 1, 28 | "outputs": [] 29 | }, 30 | { 31 | "cell_type": "code", 32 | "source": [ 33 | "from IPython.display import Image,display\n", 34 | "from langgraph.graph import StateGraph,START\n", 35 | "from langchain_openai import ChatOpenAI\n", 36 | "import requests\n", 37 | "from langchain_core.messages import SystemMessage, HumanMessage\n", 38 | "from langgraph.graph import MessagesState\n", 39 | "\n", 40 | "from langgraph.prebuilt import ToolNode, tools_condition\n", 41 | "\n", 42 | "from typing import Annotated\n", 43 | "from typing_extensions import TypedDict\n", 44 | "from langgraph.graph.message import add_messages" 45 | ], 46 | "metadata": { 47 | "id": "OGqH6zIhH3ZL" 48 | }, 49 | "execution_count": 2, 50 | "outputs": [] 51 | }, 52 | { 53 | "cell_type": "code", 54 | "source": [ 55 | "class State(TypedDict):\n", 56 | " messages: Annotated[list, add_messages]" 57 | ], 58 | "metadata": { 59 | "id": "ivIYzyb_IFd9" 60 | }, 61 | "execution_count": 3, 62 | "outputs": [] 63 | }, 64 | { 65 | "cell_type": "code", 66 | "source": [ 67 | "!pip install -U duckduckgo-search" 68 | ], 69 | "metadata": { 70 | "colab": { 71 | "base_uri": "https://localhost:8080/" 72 | }, 73 | "id": "22T0Y_7dIPjj", 74 | "outputId": "0456ed38-6d3f-44e7-9c28-964970b23e43" 75 | }, 76 | "execution_count": 4, 77 | "outputs": [ 78 | { 79 | "output_type": "stream", 80 | "name": "stdout", 81 | "text": [ 82 | "Requirement already satisfied: duckduckgo-search in /usr/local/lib/python3.11/dist-packages (7.3.2)\n", 83 | "Requirement already satisfied: click>=8.1.8 in /usr/local/lib/python3.11/dist-packages (from duckduckgo-search) (8.1.8)\n", 84 | "Requirement already satisfied: primp>=0.11.0 in /usr/local/lib/python3.11/dist-packages (from duckduckgo-search) (0.12.1)\n", 85 | "Requirement already satisfied: lxml>=5.3.0 in /usr/local/lib/python3.11/dist-packages (from duckduckgo-search) (5.3.0)\n" 86 | ] 87 | } 88 | ] 89 | }, 90 | { 91 | "cell_type": "code", 92 | "source": [ 93 | "from langchain_community.tools import DuckDuckGoSearchRun\n", 94 | "search = DuckDuckGoSearchRun()\n", 95 | "search.invoke(\"Obama's first name?\")" 96 | ], 97 | "metadata": { 98 | "colab": { 99 | "base_uri": "https://localhost:8080/", 100 | "height": 139 101 | }, 102 | "id": "MwOf8Y5DIYvS", 103 | "outputId": "32663cc1-a8a5-416f-930d-d7b747f6c30e" 104 | }, 105 | "execution_count": 5, 106 | "outputs": [ 107 | { 108 | "output_type": "execute_result", 109 | "data": { 110 | "text/plain": [ 111 | "\"The Irish Sun, It's a fake Barack Obama's brother posts forged document he claims is ex-president's 'Kenyan birth certificate,' March 11, 2017 Salon, Orly Taitz is at it again , Sept. 4, 2009 The White House, official residence of the president of the United States, in July 2008. The president of the United States is the head of state and head of government of the United States, [1] indirectly elected to a four-year term via the Electoral College. [2] Under the U.S. Constitution, the officeholder leads the executive branch of the federal government and is the commander-in-chief of ... Born on August 4, 1961, in Honolulu, Hawaii, Obama is the first president born outside the continental United States. His full name is Barack Hussein Obama II, named after his father. Obama's mother, Ann Dunham, was from Kansas, while his father, Barack Obama Sr., hailed from Kenya. Here is a list of the presidents and vice presidents of the United States along with their parties and dates in office. ... Chester A Arthur: Twenty-First President of the United States. 10 Interesting Facts About James Buchanan. Martin Van Buren - Eighth President of the United States. Quotes From Harry S. Truman. The list of presidents and their full names are as follows (with shared names in bold): 1: George Washington 2: John Adams 3: Thomas Jefferson 4: James Madison Jr. 5: James Monroe 6: John Quincy Adams\"" 112 | ], 113 | "application/vnd.google.colaboratory.intrinsic+json": { 114 | "type": "string" 115 | } 116 | }, 117 | "metadata": {}, 118 | "execution_count": 5 119 | } 120 | ] 121 | }, 122 | { 123 | "cell_type": "code", 124 | "source": [ 125 | "from langchain_community.tools import DuckDuckGoSearchRun\n", 126 | "\n", 127 | "def search_duckduckgo(query: str):\n", 128 | " \"\"\"Searches DuckDuckGo using LangChain's DuckDuckGoSearchRun tool.\"\"\"\n", 129 | " search = DuckDuckGoSearchRun()\n", 130 | " return search.invoke(query)\n", 131 | "\n", 132 | "# Example usage\n", 133 | "result = search_duckduckgo(\"what are AI agent\")\n", 134 | "print(result)" 135 | ], 136 | "metadata": { 137 | "colab": { 138 | "base_uri": "https://localhost:8080/" 139 | }, 140 | "id": "-DAzhxeaIYoC", 141 | "outputId": "0b441ff0-454f-406c-f407-19a923f32a61" 142 | }, 143 | "execution_count": 6, 144 | "outputs": [ 145 | { 146 | "output_type": "stream", 147 | "name": "stdout", 148 | "text": [ 149 | "AI agents use feedback mechanisms, such as other AI agents and human-in-the-loop (HITL), to improve the accuracy of their responses. Let's return to our previous surfing example to highlight this. After the agent forms its response to the user, the agent stores the learned information along with the user's feedback to improve performance ... In this article. Azure AI Agent Service is a fully managed service designed to empower developers to securely build, deploy, and scale high-quality, and extensible AI agents without needing to manage the underlying compute and storage resources. What originally took hundreds of lines of code to support client side function calling can now be done in just a few lines of code with Azure AI Agent ... AI Agents in Healthcare and Finance . Healthcare: Babylon Health uses AI agents to provide initial symptom diagnoses, suggest possible conditions, and connect patients with medical professionals in real-time. This not only speeds up the process of seeking care but also helps reduce the workload on healthcare providers. AI agents are also being ... AI agents are also difficult to evaluate in a repeatable way that shows progress without employing artificial constraints. This is especially challenging as the core capabilities of the underlying LLMs continue to rapidly improve, which makes it difficult to know whether your approach has improved results or if it's simply the underlying ... Limitations of AI Agents. AI agents have undoubtedly transformed many industries, but they come with limitations that impact their reliability, adaptability, and ethical use. Here are some of the main limitations: Limited Understanding of Context Many AI agents lack the deep understanding of context that humans have.\n" 150 | ] 151 | } 152 | ] 153 | }, 154 | { 155 | "cell_type": "code", 156 | "source": [ 157 | "def multiply(a:int,b:int) -> int:\n", 158 | " \"\"\"\n", 159 | " Multiply a and b\n", 160 | " \"\"\"\n", 161 | " return a* b\n", 162 | "\n", 163 | "def add(a:int,b:int) -> int:\n", 164 | " \"\"\"\n", 165 | " Adds a and b\n", 166 | " \"\"\"\n", 167 | " return a + b" 168 | ], 169 | "metadata": { 170 | "id": "_RWRl15NIxhv" 171 | }, 172 | "execution_count": 7, 173 | "outputs": [] 174 | }, 175 | { 176 | "cell_type": "code", 177 | "source": [ 178 | "from langchain_openai import ChatOpenAI\n", 179 | "llm = ChatOpenAI(temperature=0, api_key='sk-proj-************************QHrtvM7', model=\"gpt-4o-mini\")\n" 180 | ], 181 | "metadata": { 182 | "id": "ZXmQdfkEJIZe" 183 | }, 184 | "execution_count": 8, 185 | "outputs": [] 186 | }, 187 | { 188 | "cell_type": "code", 189 | "source": [ 190 | "llm.invoke('hello').content" 191 | ], 192 | "metadata": { 193 | "colab": { 194 | "base_uri": "https://localhost:8080/", 195 | "height": 35 196 | }, 197 | "id": "qVgcPrKYJRJD", 198 | "outputId": "dd01d0ef-8d3c-4f0a-e5fe-e82dcdf80d53" 199 | }, 200 | "execution_count": 9, 201 | "outputs": [ 202 | { 203 | "output_type": "execute_result", 204 | "data": { 205 | "text/plain": [ 206 | "'Hello! How can I assist you today?'" 207 | ], 208 | "application/vnd.google.colaboratory.intrinsic+json": { 209 | "type": "string" 210 | } 211 | }, 212 | "metadata": {}, 213 | "execution_count": 9 214 | } 215 | ] 216 | }, 217 | { 218 | "cell_type": "code", 219 | "source": [ 220 | "tools = [search_duckduckgo, add, multiply]\n", 221 | "\n", 222 | "llm_with_tools = llm.bind_tools(tools)" 223 | ], 224 | "metadata": { 225 | "id": "_SiCnEY4JWzE" 226 | }, 227 | "execution_count": 10, 228 | "outputs": [] 229 | }, 230 | { 231 | "cell_type": "code", 232 | "source": [ 233 | "def chatbot(state: State):\n", 234 | " return {\"messages\": [llm_with_tools.invoke(state[\"messages\"])]}\n", 235 | "\n" 236 | ], 237 | "metadata": { 238 | "id": "eqF4x1I7JsYA" 239 | }, 240 | "execution_count": 11, 241 | "outputs": [] 242 | }, 243 | { 244 | "cell_type": "code", 245 | "source": [ 246 | "from langgraph.prebuilt import ToolNode, tools_condition\n", 247 | "\n", 248 | "graph_builder = StateGraph(State)\n", 249 | "\n", 250 | "# Define nodes\n", 251 | "graph_builder.add_node(\"assistant\",chatbot)\n", 252 | "graph_builder.add_node(\"tools\",ToolNode(tools))\n", 253 | "\n", 254 | "#define edges\n", 255 | "graph_builder.add_edge(START,\"assistant\")\n", 256 | "graph_builder.add_conditional_edges(\"assistant\",tools_condition)\n", 257 | "graph_builder.add_edge(\"tools\",\"assistant\")\n", 258 | "\n", 259 | "react_graph=graph_builder.compile()" 260 | ], 261 | "metadata": { 262 | "id": "Pvn_gwVlKAll" 263 | }, 264 | "execution_count": 14, 265 | "outputs": [] 266 | }, 267 | { 268 | "cell_type": "code", 269 | "source": [ 270 | "# To see the graph’s connection visually\n", 271 | "\n", 272 | "display(Image(react_graph.get_graph().draw_mermaid_png()))" 273 | ], 274 | "metadata": { 275 | "colab": { 276 | "base_uri": "https://localhost:8080/", 277 | "height": 266 278 | }, 279 | "id": "HaRoh_yFKl1R", 280 | "outputId": "c5384d80-b98b-447d-9ca4-7c274b8d3849" 281 | }, 282 | "execution_count": 13, 283 | "outputs": [ 284 | { 285 | "output_type": "display_data", 286 | "data": { 287 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAANYAAAD5CAIAAADUe1yaAAAAAXNSR0IArs4c6QAAIABJREFUeJztnXdcU1fj/89NQnYChD1kiQgIjooTXFXqI44fUKt11Grr86271tX66GPt0Nplfdo+1rb6WBXrnlgVrKsuXBUVEESmjEBISEJCxk1yf3/EF6UYhpp7zw0571f/sMnNOZ/Am3PvPfcMjCAIgEDAgwE7AMLZQQoiIIMUREAGKYiADFIQARmkIAIyLNgBnge1AlfL8Ua1WdtgMhkdo1uJ5YIxWRhfxOSLWR5+bC6fCTsRXcAc4xcIAABAVqkvuqstydUKxCyzieCLmQIRi81jAEf4BiwOpqk3NTaYG9UmrcoscGWGxgi69RYK3V1gR4OMYyiokuNXj9cxXTB3b3ZoD4FnAAd2ohelskhXkqNVSA1uXuzB4z1YLs57ReQACl4/JS+41TB4gmd4LyHsLPbn7h/Kq+nyISmeMYNdYWeBA90VPPifiph4cWScGHYQcrmRoWhQ4COn+MAOAgH6KkgQxE8riye84+8XyoOdhQryrqtLc7VJb/nBDkI19FXwhxWPZqwOEYgd8p79+ci/qc65qp74biDsIJRCUwUPbqqIT/bwC3GK9q8596+o5FWG4a95ww5CHXS8Ecs6KY8dInZC/wAAsfGufBHzwQ017CDUQTsF62uNj7I13ft28vuPNnhppPuFAzLYKaiDdgpeTZcPHu8BOwVMWC6MvqPcr5+Sww5CEfRSUFqq5/AYYbGdsP/vmeg/WiIt1eNGC+wgVEAvBYvuaSS+bMqqy8nJMRgMsD7eNlwBsyRHS1LhtIJeCpbkakN7CKipKz09febMmTqdDsrH2yU0RoAUpJr6WqNYwnL3oagVfO4GzNqNRV77ZyUsVqCS46RWQRNopKCqDscwjIySy8rK5syZk5CQkJSUtH79eovFkp6evmHDBgDAqFGj4uLi0tPTAQDZ2dkLFixISEhISEh45513Hjx4YP24UqmMi4vbtWvX6tWrExIS/vnPf9r8uH1huTA0SpNWZbJ7yXSDRs8eGtVmvpiUUXSffPJJaWnp0qVLtVrtrVu3GAxGfHz89OnT09LSNm3aJBQKg4KCAABVVVUGg2H27NkMBuPAgQOLFi1KT0/ncrnWQrZt2/baa69t2bKFyWT6+Pg8/XG7IxCztGqTwJVGvyMyoNHX06pNJD2Oq6qqioyMTElJAQBMnz4dACCRSAIDAwEAMTExbm5u1sPGjBmTlJRk/Xd0dPScOXOys7MHDhxofSU2Nnb+/PlNZT79cbsjcGVqVWbQhaTi6QKNFASAYHFIOREnJSX98ssvX3zxxezZsyUSSWuHYRh2/vz5tLS0kpISPp8PAJDL/+qc69+/PxnZ2oDDZRIWOj4+tS80uhbkCVgNClIufebPn79kyZLMzMwJEybs37+/tcO2bt26fPny6OjojRs3Ll68GABgsfzVM8fjUf3AUFln5DvBKA0aKcgXMxvVZjJKxjBs6tSpx44dGzZs2BdffJGdnd30VtMoDYPBsH379uTk5KVLl/bu3Ts2NrYjJZM6yIO8i2NaQSMFRRIXF3JOxNYOFIFAMGfOHABAfn5+U6smkz15GqvT6QwGQ1RUlPV/lUpli1awBS0+TgYiCUvk1vlbQRp9Q68ATuUjnUZpEtr75/7+++8LhcKBAwdevnwZAGD1rFevXkwm86uvvpowYYLBYHj11VfDw8P37t3r4eGh0Wh++uknBoPx6NGj1sp8+uP2zVyap3VhMzAGKX+TtIK5du1a2Bn+QinDcb3FO4hr32IrKiouX758+vRpnU63cOHC4cOHAwDEYrGPj8+ZM2cuXbqkVqvHjRv30ksvXblyZf/+/WVlZQsXLgwODj506NC0adNwHN+5c2dCQkJ0dHRTmU9/3L6Z75xXBoTzvLvY+UdBQ+g1ZLU8X1ucox0+0YkGbLZG+k9VIyZ5Cd06/xRPGp2IAQBBkYLrpxTSMr1vsO2/fqVSmZycbPOtwMDAioqKp18fNmzYRx99ZO+kLZk9e7bNs3ZUVFTTU5bm9O3b9+uvv26ttJyrKqEbyxn8o10rCACofKS7flqeusD2/Amz2VxTU2PzLQyz/V14PJ67u7u9Y7ZEJpPhuI1Huq2l4nA4Hh6tDov8aWXxm2uCObzOfztMRwUBAOf313brIwzsxocdBA73r6iMekvfkaT/2dAEGnXKNDFikvfpHVKdhpQ+QppTXtBYfE/jPP7RVEEAwJQVQb9+Xg47BdU01ONn0mr+39wA2EEohY4nYisGnXn3hvJpHwQ5ySVRTZk+M61m2soghhP0BTaHvgpaW4U9Xzye8I6fb2ef0FlwW333D9Wk9zr7qBhb0FpBK2f31Oi05vjxnpQNqKaSisLGK+nywHBe/ARP2Fng4AAKAgBKcrRX0uvCYgU+QdzQGEEnOFXpteaSXG11iV5Vh8eP97D7AyEHwjEUtFJ4p6HwjqYkRxs1QMxiYwIxS+DK5HCZDvEFmExMqzY1qk0alUmtMNWU6UN7CCL6ioK6O2nfUxOOpGATpQ+0qlpcqzZpVWaTyWKxa+8NjuN5eXm9evWyZ6EA8IRMwkLwxSyhK8vDj+3ftZNf3XYch1SQVORy+ZQpUzIzM2EHcRZo2i+IcB6QggjIIAVbgmFYREQE7BROBFKwJQRBPHz4EHYKJwIp2BIMw1xdnXTxeyggBVtCEIRKpYKdwolACtrA19cXdgQnAiloA6lUCjuCE4EUbAmGYc1nyiHIBinYEoIg8vLyYKdwIpCCCMggBVuCYVgbq28h7A5SsCUEQSgUCtgpnAikoA08PZ10ADMUkII2qKurgx3BiUAKIiCDFGwJhmFdu3aFncKJQAq2hCCIoqIi2CmcCKQgAjJIQRs0LfeLoACkoA1srgiIIAmkIAIySMGWoJEyFIMUbAkaKUMxSEEEZJCCLUGTOCkGKdgSNImTYpCCCMggBVuC5hFTDFKwJWgeMcUgBVuCRspQDFKwJWikDMUgBRGQQQrawMfHB3YEJwIpaIPWdlpEkAFS0AZovCCVIAVtgMYLUglSsCVosBbFIAVbggZrUQxS0AaBgbb3hEeQAdr65glvv/22VCplMpkWi6W+vl4ikWAYZjKZTp48CTtaJwe1gk+YNGlSQ0NDVVWVVCo1GAzV1dVVVVUY5vD7LdIfpOATRo8eHRYW1vwVgiD69u0LL5GzgBT8iylTpvD5f+2L6evrO3XqVKiJnAKk4F+MHj06ODjY+m9rExgZGQk7VOcHKfg3ZsyYIRAIrE3glClTYMdxCpCCfyMxMTE4OJggiD59+qDHdNTAgh3ABhYLoZTh6jrcAqO/KPmVd0Dj0X8MfbM4R0t97UwmcPdmiz1cqK8aFrTrF8y/pc69qm7UmP3D+FqVCXYcqhG6s8rzte5e7H6j3f3DnGLndnop+OC6uvCudthrvgyGU3fI6XXmzB2ViVO9vbtwYWchHRpdCxZmawr+1IyY7Ofk/gEAuDzmhDlBp36RKmVG2FlIh0YK3rukjE9Gw5X/YtB471uZ9bBTkA5dFNRpzYpqI5fPhB2ERrh6sssLGmGnIB26KNigwH2CnOLqu+PwRSwun2kyWmAHIRe6KAgApm1wuvvfdlHJ8U4/VII+CiKcFKQgAjJIQQRkkIIIyCAFEZBBCiIggxREQAYpiIAMUhABGaQgAjJIQQRknFrBk6eOJaeOqqmRtnaA2Wy+fz/7xSuSSqurpVUvXk6nxKkVZLM5AoGQwWj1h/Dl159s3LT+BWuprKqYOn1CQQFaKsk2dJy+RBmjRv5j1Mh/tHGA0WB48VrMJhOtZkfQDQdW8P797F1pW+/nZAMAIrv3mDNncfeIKACAXq/f9O2Gq1f/AAD07Nlnwbxlvr5+WVmXf9r6XVVVha+v/4TxE1NTJm/4Ym1GxgkAwJmMLBaLZfOA8xfOAABGjIwDAPy6+7ifr/+p08ePHt1fXPKIx+P37zdowfxlbm7uAICDh349dz7ztYnTtm37r1xR161b5LIlq4OCQqqlVW/OmggA+OjjDz4CYPTocR+sWAv7J0cvHFhBqbTKYDS8MX02g8E4duzABysX7dmdzuVyf92zPSPjxKyZczw8PDMyT/B4vMbGxrUfvx8SHLZ0yeqSkkdyuQwAkJryusViOXPmJADA5gHTp74lq62prq5c+cHHAAAPiScAIC/vflBQSGJiUn294vCRvdpG7WfrNlnzPHiQs3//rqVLV5tMpo0b1332+Yc//HeHh8Rz1b8+Xbd+9ayZc/r0jnN3l8D+sdEOB1Zw1KgxiYlJ1n937x69ZOmc+znZ/eIGVkureDze1CkzWSzW2KRk69WYwWAYMuTlxFFjmj4e0S0yJPjJOkb1SsXTBwQGBrm6uinq5bGxvZteXPLev5rGkLJYrLTd/zMYDBwOx/rKuk+/kUg8AACpqa9v/uEblVrlKnaN6BYJAAgKCmleDqIJB1YQw7BLl8/vP5BWVlZiXY6oXiEHAIwaOebs2dPvf7Bw/rylYWHhAAB/v4AePXqm7d7G5fLGj0tls9ktimr3gCZwHD98ZO+Z30/W1ko5HK7FYlEq6318fK3vcrlP5h74+PgBAOR1Mlcx2s6uHRz4jnjnrq1rPlzePSJ63Scb57yzGABgISwAgAH9B3+2/j+Kevnb/3z9q68/NZlMGIZtWP/t6FfGbflx04yZqXfv/tmiqHYPsEIQxL9WLd796//G/GPC5xu+TxyV1FRpC1xYLgAAs8VMzlfvVDiqgjiO/7pn+9ik5AXzl8bG9o6Oim3+7oD+g7f9vHfe3Pd+O3l0z94dAAChULj43Q92/HJIIBCu/veSxsaWM9NaO6D5zezdu3/e/vPGu4s+mPjq1OiomLDQcEq+ayfHURU0Go0GgyEi4snKQyq1EgBgsVisbwEAGAzGaxOneXp6FRbmAwAMBoP1hJua8rpGq5E+1VFs8wAul6dQyK3FNtVivbZrUWkbcDhc60mZhB9DZ8BRrwUFAkFYWPjhI3slEg+tRrNj508MBqO4+BEA4PCRvVeuXkwclSSXy+rqZN27R+M4/uasV4cPSwwN6Xrs2AGhQOjv/7cFzVs7oFfPl06dPr7xm/WxMb1FInF0VCybzf556/djx6YUFxf+umc7AKCk+FGAf1vLo3t7+/j7Bew/mMbl8dRq1eRJb7TRGe6EOPDP4t+r1vO4vI8/WbnvwK65c997Y/rbGRnpOI77+wfiRuMPW7757eTR1NTXJ096Q6fX9end7/ezpzZ9u4Hl4rJ+3SYu929rtbR2QGJiUkrypAsXz/y09bvcvHteXt6rV60rfJS/9qMVt29f3/j1jwMHJhw+srftnBiGrV69ns8XfP/fr05npFsbaUQTdFnWqPax4eze2nH/1wV2EHqR9mnR/60PY7p05qnEDtwKIjoHSEEEZJCCCMggBRGQQQoiIIMUREAGKYiADFIQARmkIAIySEEEZJCCCMggBRGQQQoiIEMXBRlMTCxx1MGL5OEVyGEwO/MwGRop6OnPLsnV0mTkGE1QSA24wYLR5VdEFjT6fpH9RNUlOtgpaERNua5bHyHsFKRDIwVHTPK+fLhGp0Ub4AAAQGluQ2lOQ1xi55/6TpdR01YMOvOudeW9R0iEbi5u3mxAo2gUQQCgqNY3yPHyfM1r7wV2+q2XaKeglVu/KyoKdQSBqVrZCtVsNuM43mL+h70gCEKv1/N4FG2Ip9PpOBxO04QmzwAOACA4kheb4EZNAPgQDsjChQvJK3zTpk0JCQnHjx8nr4rm1NbWrlmzhpq66AkdW8E2OHfu3Msvv0xe+dXV1QsXLiwtLY2Kitq1axd5FT3Nzp07R44cGRAQQGWldIBGtyPtMnnyZLJ/QwcOHCgtLQUAlJeXnzhxgtS6WpCUlDR37lyDPVY0dCwcoxWUSqWurq6VlZXh4SSuoVFZWblo0aKysjLr/1LfEFovDe/duxcdHS0SiSiuGhYO0AoeOHAgKyuLx+OR6h8A4MiRI03+AQDKysqOHTtGao1Pw+PxunXrNn78eI1GQ3HVsHAABcvKypKTk8mupaqq6vz5881f0Wq1u3fvJrvep5FIJBcuXNDr9VJpq+uwdyZoreDVq1cBAMuWLaOgrr1791qbwKZlijAMe/z4MQVV28TT01MoFMbHxzdvmDsnsG/JbWM0GgcPHlxfX0991TKZ7JVXXqG+XpvodLrt27fDTkEudGwFlUplWVnZ2bNn3dwgdM+azebIyEjq67UJl8udOXMmAGDVqlVmc+dcMJN2Ch4/fry0tDQ8PJykhx/tguO4tV+GVsyaNWvx4sWwU5ACvRSUyWR37tzp3RvmsuA6nc7HxwdiAJuEh4d/9913AIALFy7AzmJnaKRgaWkphmEffvgh3BhyudzFxQVuhjbAcXzFihWwU9gTuii4Zs0aHo/n6ekJOwior68PCgqCnaJVEhMTx44dCwAwmTrJqDZaKFhRUTFgwACanP5KSkro8JfQBsOGDQMA7Nu37+HDh7Cz2AH4Cup0OqFQaP3LpgMGg6Fr166wU7TPtGnTPvzww05wmwxZweXLl1+7dg1K50trnDt3LiIiAnaKDrFnzx6TyVRQUAA7yAsBU8Hbt28vWrSI1MFXz4pSqRSLxf7+/rCDdBQOh6NQKHbu3Ak7yPMDTUGFQtGtW7cuXei1vnlWVlZISAjsFM/GoEGD6uvrYad4fuAoePDgwR9//FEsFkOpvQ3++OOPoUOHwk7xzLz77rvWvYBgB3keICgolUrd3NxWrlxJfdXtolKpHFFBAACbzd68eXNaWhrsIM+MYwxZpYaMjIyLFy+uX78edpDn5/r1656eng5xR98E1a3gggULcnJyKK60gxw5ciQlJQV2ihdiwIABwcHB7W6LRysoVfDixYvjx4+PiYmhstIOUlJSwmKx+vXrBzvIi8JisRITE5VKJewgHQWdiJ+wbNmysWPHjhgxAnYQO6BSqU6cODFt2jTYQToEda3gvn37aHsKzs/Pr66u7hz+AQBcXV0dxT/qFCwtLd2/fz89T8EAgG+++Yaa6QFUsnz58rt378JO0T4UKYhh2NatW6mp61k5evRoYGBgnz59YAexM8uXL//2229hp2gfZ78WNJlMo0ePPnv2LOwgzgsVreC5c+c+/vhjCip6DpYsWULbbHYhMzMTdoR2oELBrKysQYMGUVDRs7Jr166wsLD4+HjYQUjk4cOH27dvh52iLZz3RFxYWPjdd985xNXSi2AymdLT0+nc5U6Fgkajkc1mk13Ls9K/f/9r164xmUzYQZwd0k/Eubm5s2fPJruWZ2X69Ok7duxwEv9ycnI2b94MO0WrkK6gRqMhezmiZ+X777+fNm1aVFQU7CAUERMTs3v3br1eDzuIbZzuWnDr1q04js+dOxd2EEqpqKgQCATu7u6wg9iA9FbQZDIZjbaXjKae48ePV1ZWOpt/AIDAwEB6+keFgufOnYM+O93KzZs3c3NzaRKGYmpra+fNmwc7hW1I33PLw8ODDsPX7t27t3nzZpr3kJGHt7d3QUGBUqmk1WRFK05xLVhUVLRy5cr9+/fDDgITi8WCYRgNNzLp/P2CFRUVixYtOnz4MKwAiLah4gFdSkoKrDVrCwsL582bh/yz3or98MMPsFPYgIr9V4cPH/7mm2+azWa1Wu3t7U3ZZgr5+fl79+49fvw4NdXRHJFIVFRUBDuFDUhUcOjQoY2Njda1hK2XIARBREdHk1djc4qKilatWnXo0CFqqqM/Q4YM6dWrF+wUNiDxRPzyyy9bt1ZrugTmcDgDBgwgr8YmcnJyfv75Z+Rfc1gslkRCx309SVRw7dq10dHRzW93vLy8KPhDzM7O/vLLLzds2EB2RY6FTCYbN24c7BQ2IPd25PPPP29aooUgCD6fT/bz4kuXLp04cWLHjh2k1uKIsNls63UR3SBXQR8fn/fee8+6YiSGYWQ3gRkZGYcOHVq9ejWptTgoYrGYntN3SO+USUhISE1NFQgEQqGQ1AvBo0ePXrx4cdOmTeRV4dBgGBYWFgY7hQ06dEdswi06zfM/ZJvy2ltlRbVFRUVhQT0a6klZIfn8+fO594sdejkYssFxfOLEidTvqtcu7TwdeXBDfe+SSiE18oQvNLqzqV+GJIxGo3eAsKqoMaynsF+iu4c/h7y6HIvly5efPXu2qVPM2hwSBPHnn3/CjvaEtlrBG5mKuip8SKqvSELfTRCaYzETSpnx5C/SUVN9/ELg7JxDN+bOnZuXl1dTU9O8d4xWy3i2ei14/bRCJTMNSfFxFP8AAAwmJvHlJM8PPruntqacpoOEKSYsLKxv377Nz3UYhtFqDUXbCtbXGusqDQPHeVOexz68PMXvVqYDr31rX2bMmNF8Q43AwMDXX38daqK/YVvBukoDQdBuVE/HEbm7PC5sNBrgj1OkA+Hh4f3797f+myCIIUOG0GSLFyu2FdSozF5dHPtaKjhaoKh2yLWXyeCNN97w9vYGAAQEBNBt0S3bCuIGC6537CZELTcB4MANuX3p2rXrgAEDCIIYNmwYrZpAigZrIZ4Vi4Uoz2/U1Ju0apMJJ3RaO2yx1Mt/ur5Pt+6S+N/31Lx4aVwek81j8MVMsbtLUCT/RYpCCtKLBzfUBbc1FYWN/hFik5FgujAZLiyA2aNTgsHtP2gsbgG4PR4UN2gIM24ym3AXF8PxH6uCowURfYTd40TPURRSkC7kXVdfPlbnFSRiCUQxifQ6V7aNe7CkobYx97b+Srp8SLJHtz7PJiJSED46jfnk9hrczAgbEMhiO94aIxiGiX0EAAiEXuJb5xQPbmrGvu3LZHb0Qhz+TpxOTnmBdue6MmGAxLe7lyP61xw2j+UX7c12d9uyoqj2cUcfDSAFYVLzWH/xsKL70GAOz2EeQbULV8juMSr05PYatbxDq2ggBaFRkqvJTJN16e0wu34+EyH9Ag9vlkrL2m8LkYJw0ChNZ/d0Wv+shMQFHP6u0oS308GMFITD6Z01If0DYKcgna4D/X/7XzvdkEhBCNw6U28GbJaLY998dASOgK3VYrnXVG0cgxSEQNZJuXc4TZdaszveYZIr6Yo2DrCngnkPcl5wV+YLF38fMTKuvLzUfqFox+3fFQHREhouLwQA+PiLcQeP2XnyK4vD9AgS5VxttSG0m4KnM9LnL5ip1+vsVWBn5cFNDdfVsUchPSscITf/lqa1d+2moIPuSk8xagWu11p4Iuea2iL04Mke6/FWhm/a5wHd6Yz0Tf/ZAABITh0FAHh/xYf/GD0eAJCZ+dvuPdurqio8PDzHJqVMmzrLusSHyWTa/suWjMwTKpUyODh05pvvJMQPf7rYrKzLP239rqqqwtfXf8L4iakpk+2SFiKPCxrdA4UkFf6o+PbJM5urpA9FQkl4aNyYxLlikScAYPW6ka+Ofz/nwYW8gis8rnBgv5RXRjzZA8FsNv9+YVvWraNGo65rWF8cJ2u2g2eIqOxBY3hvG9/dPq3ggP7xk16bDgD4bN2mbzdtHdA/HgCQkXHis88/7NYt8t+r1w8flvi/7T/s/vXJIqdfff3pvv27xo1NWfWvT319/f+9Ztm9e3dalNnY2Lj24/fZLuylS1YPHjRULpfZJSpc6qpxgiDlFrCw6ObPOxf5eIdOSl41dPDU4tI7W7bPNxqfKLX38Ef+vhHz3t7yUq8xmed+ziu4Yn39yIkvz1zYFhkxOGXcMrYLV6dvICMbAMBsxuplth+W2KcVdHeX+PsHAgCiomJcXd2sA8S3/u+/sbG9V//rUwDA0CEvNzSo9+7b8WrqlLq62ozMEzPemD3zzXcAAMOGjpw+I+WXHT9u/HpL8zLrlQqDwTBkyMuJo8bYJSQd0KpMLA6PjJKP/vb1wLiUlHFPtrSNCB/w5beTCx5lxUYPBwD0f2nCyGEzAQD+vhE3bh97+Cgrunt8RVV+1q0jI4fNGjNqDgAgrs/YohKyZna6cFiaVqaQkzVSpqKivK5ONnnSG02v9Os36OSpYxWV5QUFeQCAhIQn+09jGNYvbuCZ30+2KMHfL6BHj55pu7dxubzx41JpuH/Tc6DTmDnu9u8OVNRX18hK6hSPs24dbf66UvWkW5jNfuI9k8l0FXur1DIAwP28CwCAoYOnNB2PYWR10rE4jEY1tQpqtBoAgJvbX6uJiURiAECdrFar1QAA3Ju9JRa7NjY2arXa5iVgGLZh/bdbt32/5cdNBw6mrXz/4169XiIpLWWQtKpyg0YOAEgcMbtn9N82lheJPJ8+mMFgWSxmAIBSKeVyhQK+KymZWkBglla+u52tb5qv6u3lAwBQqZRNb9XXK6wienp6AwDU6r86ihQKOYvF4nJbdlUIhcLF736w45dDAoFw9b+X0HNhqGdC4Mo0GewwCr8FPK4IAIDjBm+vkOb/8bht3foIBO56vQY3UbErjMlgErnbbu/spiCPywMA1NU9uWnw8PD09fG7ceNK0wEXL/7O5XLDw7tHRcVgGJZ1/bL1daPRmHX9co8ePZlMJtuF3dxOa0ePv19AasrrGq1GKq2yV1pYiFxZJqP9FfTyDHJz9b35Z7rB+KRf1mw2mUx4258KDIgEANy5l2H3PE9jMppFbrYVZK5du/bpVyuLdGYT8A15hgtnLo9/7PiB0rJiDGB5D+537x4tEor3HUiTyWpwHD98ZO/vZ09Nm/pWv7iBYpFYKq0+cnQfAFhdneyHH74pKS1avmyNn18Ay8XlyNF9+QW5QUEhnh5eM2am1tXJ5PK6I0f3GQ2Gt9+ax2J19Mqh8I46JIovbOVrw0KjwuVSE8/NznckGIa5u/nduH08L/8SAYiyx/ePnPjabDYGd4kFAJy7tDPQP7J7+JNlzbJuHuVyBX16vuLtGXov9+ztOyd1eo1GW3/t5pGikluB/lHRkQn2jQcA0Ku0odFciY+NC3q7KSgWib28fC5cOHPt2qWGBvXo0ePCwyPc3SXnzmeeOn1cWa+YOnXW9GlvWR9M9YsbpNU8IWSvAAADj0lEQVRqTp0+du5choAvWLZ0db9+gwAAIqHIz9f/zzs3GRgjKjq2oqL88pXzly6f8/Dw+mDF2oCAwI7noaeCfDHrxm91HsH2v/zy8QoJDIguLs2+nX2yvCLXzy+8b+8x1n7B1hRkMBhREQmyurJ7uWeLS7N9vcMU9VU+XqFkKFhyu2bUNB8Gw8ZjSdsra93IUBj1oNdwOi5N3EFObqsYlurpS7/FjX794rFbkAff1YkekDTUNZrUDSnzbQ+OpFcj4QxEDxQ+ytW1oeDDRzd27lv59Os8rqi1ruNxoxcOjEu2V8IHBVd2H1zz9OsEQQBA2Oy4mTPrv4H+ka0VaNAYevQXtPYuUpBqeg91v3aiyD1QzGTZvhcMCeq5ZN6up18nCNDa8Bo+z55n9q6hfW0GsFgsBEHY3EdcLPJqrTSjDldLNVH9Wl1ODikIgfjxHnm3Fb7dbXTaAQDYbK6EDXNAv30D1BXXD0n2aOMANGQVAj2HuPG4ZoOunU6TToC+weDmgbU9uR0pCIcxs3yLsyphpyAXi4UovlGVNMu37cOQgnBgcxjJc/1LbnRmC4uzKqasCGr3MKQgNPxCeakLfEtuVMAOYn/MJkvhlfKp7we6e7c/uAQpCBNXD/b42b45mSU6dedZGVtbry+8XD55SSBf2KGbXaQgZDwDOPM3drVo1JU5NQYtFSMGyEOnNjy+W+1i0cz5vKu4w6vko04Z+GAYNvZtv5Ic7R9HavluXBafI/biMx1nlrHJYFbLtGaDEdcahqd6dol4thUvkYJ0ITRGEBojKLqvKbyjfXRFIQnk4wYLk81icVg0XLGYIAizwWTGTS5sRr1UFxoj6BYvDIl+nmURkYL0omussGusEABQXaLTqsxalclosOjtsdCvfeHwGVw+my/mi9yZPkHtdLu0DVKQpviFkjLFhIbYVpDNxSz0a/yfCVcvF9ImQiDsie3fksjdRVbm2OsilNzTePh1hhlPnR7bCnp34dByzZOOopQZQ3rwWS6oGXQAWm0FA8K5fxySUp7HPpzdXTUwqa3RGQj60NZ+xLnXVIXZml7DPNx92K0NbqMVOo1JVYf/cVD66sIAtw48GkLQgXa2xC7J1WZfVEpL9EwW3U/MEj+OSmYMi+H3H+MhEKM7fYehHQWbMOjoviUdQQAu3wGaakQLOqogAkESqNlAQAYpiIAMUhABGaQgAjJIQQRkkIIIyPx/ohlWIXXfCHUAAAAASUVORK5CYII=\n", 288 | "text/plain": [ 289 | "" 290 | ] 291 | }, 292 | "metadata": {} 293 | } 294 | ] 295 | }, 296 | { 297 | "cell_type": "code", 298 | "source": [ 299 | "response = react_graph.invoke({\"messages\": [HumanMessage(content=\"what is the weather in delhi. Multiply it by 2 and add 5.\")]})\n", 300 | "print(response[\"messages\"])" 301 | ], 302 | "metadata": { 303 | "colab": { 304 | "base_uri": "https://localhost:8080/" 305 | }, 306 | "id": "uDxvi3DhKmdL", 307 | "outputId": "bee07501-229a-4a46-85f5-522393c7f80e" 308 | }, 309 | "execution_count": 15, 310 | "outputs": [ 311 | { 312 | "output_type": "stream", 313 | "name": "stdout", 314 | "text": [ 315 | "[HumanMessage(content='what is the weather in delhi. Multiply it by 2 and add 5.', additional_kwargs={}, response_metadata={}, id='93dac111-0fcc-43df-a776-63f2ed10bbc1'), AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_UE8iSp1kzj5jwRaSvvuJOPne', 'function': {'arguments': '{\"query\":\"current weather in Delhi\"}', 'name': 'search_duckduckgo'}, 'type': 'function'}], 'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 21, 'prompt_tokens': 117, 'total_tokens': 138, 'completion_tokens_details': {'accepted_prediction_tokens': 0, 'audio_tokens': 0, 'reasoning_tokens': 0, 'rejected_prediction_tokens': 0}, 'prompt_tokens_details': {'audio_tokens': 0, 'cached_tokens': 0}}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_72ed7ab54c', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-c35cf1e8-c380-44f8-bb90-fdd28f4bf9ec-0', tool_calls=[{'name': 'search_duckduckgo', 'args': {'query': 'current weather in Delhi'}, 'id': 'call_UE8iSp1kzj5jwRaSvvuJOPne', 'type': 'tool_call'}], usage_metadata={'input_tokens': 117, 'output_tokens': 21, 'total_tokens': 138, 'input_token_details': {'audio': 0, 'cache_read': 0}, 'output_token_details': {'audio': 0, 'reasoning': 0}}), ToolMessage(content=\"Get today's real-time weather updates in New Delhi with hourly temperatures and a weekly forecast. Find out about New Delhi's temperature trends, rain chances, air quality (AQI), and humidity ... New Delhi Weather Forecasts. Weather Underground provides local & long-range weather forecasts, weatherreports, maps & tropical weather conditions for the New Delhi area. Current weather in New delhi is 16°C. Get today's New delhi weather report along with accurate forecast including hourly, weekly and monthly reports at Oneindia. Weather In Delhi The minimum temperature in Delhi today is likely to hover around 16 degrees Celsius, while the maximum temperature might reach 28 degrees Celsius. The mercury level is expected to hover around 20 degrees Celsius throughout the day, with the wind speed around 3.23. Current New Delhi weather condition is Mist with real-time temperature (26°C), humidity 32%, wind 7.9km/h, pressure (1012mb), UV (0), visibility (4.5km) in Delhi. ... The monthly weather averages in New Delhi consist of 23 sunny days, 1 cloudy days, 4 rainy days, and 0 snowy days. New Delhi - Weather Conditions. India. Locations. Temp. Condition.\", name='search_duckduckgo', id='c44d480f-48d1-433a-b24b-a1e2dab06eb0', tool_call_id='call_UE8iSp1kzj5jwRaSvvuJOPne'), AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_YNhb0BUlz0tCKMPEIbkvsLR9', 'function': {'arguments': '{\"a\": 26, \"b\": 2}', 'name': 'multiply'}, 'type': 'function'}, {'id': 'call_X23UW288LFmdl5ZNHiu7ZhUR', 'function': {'arguments': '{\"a\": 52, \"b\": 5}', 'name': 'add'}, 'type': 'function'}], 'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 51, 'prompt_tokens': 397, 'total_tokens': 448, 'completion_tokens_details': {'accepted_prediction_tokens': 0, 'audio_tokens': 0, 'reasoning_tokens': 0, 'rejected_prediction_tokens': 0}, 'prompt_tokens_details': {'audio_tokens': 0, 'cached_tokens': 0}}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_72ed7ab54c', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-b9f64c73-15d9-4f13-9971-a1eaac3da45a-0', tool_calls=[{'name': 'multiply', 'args': {'a': 26, 'b': 2}, 'id': 'call_YNhb0BUlz0tCKMPEIbkvsLR9', 'type': 'tool_call'}, {'name': 'add', 'args': {'a': 52, 'b': 5}, 'id': 'call_X23UW288LFmdl5ZNHiu7ZhUR', 'type': 'tool_call'}], usage_metadata={'input_tokens': 397, 'output_tokens': 51, 'total_tokens': 448, 'input_token_details': {'audio': 0, 'cache_read': 0}, 'output_token_details': {'audio': 0, 'reasoning': 0}}), ToolMessage(content='52', name='multiply', id='243384f9-7fbe-4056-b75f-e5eab4725f5f', tool_call_id='call_YNhb0BUlz0tCKMPEIbkvsLR9'), ToolMessage(content='57', name='add', id='d17c52ef-859e-455b-b8f0-1c7c91d25ea6', tool_call_id='call_X23UW288LFmdl5ZNHiu7ZhUR'), AIMessage(content='The current temperature in Delhi is 26°C. \\n\\n- When multiplied by 2, it equals 52.\\n- When you add 5 to that result, you get 57.\\n\\nSo, the final result is 57.', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 49, 'prompt_tokens': 463, 'total_tokens': 512, 'completion_tokens_details': {'accepted_prediction_tokens': 0, 'audio_tokens': 0, 'reasoning_tokens': 0, 'rejected_prediction_tokens': 0}, 'prompt_tokens_details': {'audio_tokens': 0, 'cached_tokens': 0}}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_72ed7ab54c', 'finish_reason': 'stop', 'logprobs': None}, id='run-4ce48b11-832d-4d7d-a4e9-6f6d564de14a-0', usage_metadata={'input_tokens': 463, 'output_tokens': 49, 'total_tokens': 512, 'input_token_details': {'audio': 0, 'cache_read': 0}, 'output_token_details': {'audio': 0, 'reasoning': 0}})]\n" 316 | ] 317 | } 318 | ] 319 | } 320 | ] 321 | } -------------------------------------------------------------------------------- /multi_agents_langgraph.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": { 7 | "colab": { 8 | "base_uri": "https://localhost:8080/" 9 | }, 10 | "id": "uSd1beaS6prz", 11 | "outputId": "970c9863-e41e-41c9-c87c-8ef49027d5f1" 12 | }, 13 | "outputs": [], 14 | "source": [ 15 | "!pip install langgraph-supervisor langchain-openai" 16 | ] 17 | }, 18 | { 19 | "cell_type": "code", 20 | "execution_count": 2, 21 | "metadata": { 22 | "id": "UK3now8z7Gjb" 23 | }, 24 | "outputs": [], 25 | "source": [ 26 | "from langchain_openai import ChatOpenAI\n", 27 | "\n", 28 | "from langgraph_supervisor import create_supervisor\n", 29 | "from langgraph.prebuilt import create_react_agent\n", 30 | "\n", 31 | "\n", 32 | "# Initialize OpenAI model\n", 33 | "model = ChatOpenAI(temperature=0, api_key=\"sk-pro*********************QHrtvM7\", model=\"gpt-4o-mini\")\n" 34 | ] 35 | }, 36 | { 37 | "cell_type": "code", 38 | "execution_count": 2, 39 | "metadata": { 40 | "colab": { 41 | "base_uri": "https://localhost:8080/" 42 | }, 43 | "id": "xXDIM9fC72Ca", 44 | "outputId": "593e1207-520f-4b52-d2ce-2de34de267de" 45 | }, 46 | "outputs": [], 47 | "source": [ 48 | "!pip install -U duckduckgo-search" 49 | ] 50 | }, 51 | { 52 | "cell_type": "code", 53 | "execution_count": 3, 54 | "metadata": { 55 | "colab": { 56 | "base_uri": "https://localhost:8080/" 57 | }, 58 | "id": "xYYADL3G8WLQ", 59 | "outputId": "b53dac69-2b38-4987-801a-ac3c860c0d03" 60 | }, 61 | "outputs": [], 62 | "source": [ 63 | "!pip install langchain langchain_community langgraph" 64 | ] 65 | }, 66 | { 67 | "cell_type": "code", 68 | "execution_count": 7, 69 | "metadata": { 70 | "id": "optBAxTG7996" 71 | }, 72 | "outputs": [], 73 | "source": [ 74 | "from langchain_community.tools import DuckDuckGoSearchRun\n", 75 | "\n", 76 | "def search_duckduckgo(query: str):\n", 77 | " \"\"\"Searches DuckDuckGo using LangChain's DuckDuckGoSearchRun tool.\"\"\"\n", 78 | " search = DuckDuckGoSearchRun()\n", 79 | " return search.invoke(query)\n", 80 | "\n", 81 | "# Example usage\n", 82 | "# result = search_duckduckgo(\"what are AI agent\")\n", 83 | "# print(result)\n", 84 | "\n", 85 | "def add(a: float, b: float) -> float:\n", 86 | " \"\"\"Add two numbers.\"\"\"\n", 87 | " return a + b\n", 88 | "\n", 89 | "def multiply(a: float, b: float) -> float:\n", 90 | " \"\"\"Multiply two numbers.\"\"\"\n", 91 | " return a * b" 92 | ] 93 | }, 94 | { 95 | "cell_type": "code", 96 | "execution_count": 8, 97 | "metadata": { 98 | "id": "ysJZqDKJ8xE0" 99 | }, 100 | "outputs": [], 101 | "source": [ 102 | "\n", 103 | "from langgraph_supervisor import create_supervisor\n", 104 | "from langgraph.prebuilt import create_react_agent\n", 105 | "\n", 106 | "math_agent = create_react_agent(\n", 107 | " model=model,\n", 108 | " tools=[add, multiply],\n", 109 | " name=\"math_expert\",\n", 110 | " prompt=\"You are a math expert. Always use one tool at a time.\"\n", 111 | ")\n" 112 | ] 113 | }, 114 | { 115 | "cell_type": "code", 116 | "execution_count": 10, 117 | "metadata": { 118 | "id": "WA15T4mh9KWo" 119 | }, 120 | "outputs": [], 121 | "source": [ 122 | "research_agent = create_react_agent(\n", 123 | " model=model,\n", 124 | " tools=[search_duckduckgo],\n", 125 | " name=\"research_expert\",\n", 126 | " prompt=\"You are a world class researcher with access to web search. Do not do any math.\"\n", 127 | ")\n" 128 | ] 129 | }, 130 | { 131 | "cell_type": "code", 132 | "execution_count": 11, 133 | "metadata": { 134 | "id": "ggOCjGEe9hEO" 135 | }, 136 | "outputs": [], 137 | "source": [ 138 | "# Create supervisor workflow\n", 139 | "workflow = create_supervisor(\n", 140 | " [research_agent, math_agent],\n", 141 | " model=model,\n", 142 | " prompt=(\n", 143 | " \"You are a team supervisor managing a research expert and a math expert. \"\n", 144 | " \"For current events, use research_agent. \"\n", 145 | " \"For math problems, use math_agent.\"\n", 146 | " )\n", 147 | ")" 148 | ] 149 | }, 150 | { 151 | "cell_type": "code", 152 | "execution_count": 12, 153 | "metadata": { 154 | "id": "vxHZG5-x90yk" 155 | }, 156 | "outputs": [], 157 | "source": [ 158 | "# Compile and run\n", 159 | "app = workflow.compile()\n", 160 | "\n", 161 | "result = app.invoke({\n", 162 | " \"messages\": [\n", 163 | " {\n", 164 | " \"role\": \"user\",\n", 165 | " \"content\": \"what is quantum computing?\"\n", 166 | " }\n", 167 | " ]\n", 168 | "})" 169 | ] 170 | }, 171 | { 172 | "cell_type": "code", 173 | "execution_count": 13, 174 | "metadata": { 175 | "colab": { 176 | "base_uri": "https://localhost:8080/" 177 | }, 178 | "id": "JmW5TiI4-Az5", 179 | "outputId": "46273eed-f458-4469-fe97-ceb3ce4b091b" 180 | }, 181 | "outputs": [ 182 | { 183 | "data": { 184 | "text/plain": [ 185 | "{'messages': [HumanMessage(content='what is quantum computing?', additional_kwargs={}, response_metadata={}, id='df4517a2-0964-47e2-924d-fc7aed61e5ee'),\n", 186 | " AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_wansmCLuYn85BZ0UPabmpLmV', 'function': {'arguments': '{}', 'name': 'transfer_to_research_expert'}, 'type': 'function'}], 'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 15, 'prompt_tokens': 93, 'total_tokens': 108, 'completion_tokens_details': {'accepted_prediction_tokens': 0, 'audio_tokens': 0, 'reasoning_tokens': 0, 'rejected_prediction_tokens': 0}, 'prompt_tokens_details': {'audio_tokens': 0, 'cached_tokens': 0}}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_00428b782a', 'finish_reason': 'tool_calls', 'logprobs': None}, name='supervisor', id='run-082eb49c-ac8b-4719-9cdf-f257e7b54ef6-0', tool_calls=[{'name': 'transfer_to_research_expert', 'args': {}, 'id': 'call_wansmCLuYn85BZ0UPabmpLmV', 'type': 'tool_call'}], usage_metadata={'input_tokens': 93, 'output_tokens': 15, 'total_tokens': 108, 'input_token_details': {'audio': 0, 'cache_read': 0}, 'output_token_details': {'audio': 0, 'reasoning': 0}}),\n", 187 | " ToolMessage(content='Successfully transferred to research_expert', name='transfer_to_research_expert', id='4d77575d-0cde-43cc-b2e7-fb9b2a535a40', tool_call_id='call_wansmCLuYn85BZ0UPabmpLmV'),\n", 188 | " AIMessage(content='Quantum computing is a type of computing that utilizes the principles of quantum mechanics to process information. Unlike classical computers, which use bits as the smallest unit of data (where each bit can be either 0 or 1), quantum computers use qubits. A qubit can exist in multiple states simultaneously, thanks to phenomena such as superposition and entanglement.\\n\\nThis unique capability allows quantum computers to solve certain complex problems much faster than classical computers. Quantum computing is particularly promising for tasks that involve large datasets, optimization problems, and simulations of quantum systems. The field is still developing, with ongoing research into quantum algorithms, hardware, and applications across various industries.', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 133, 'prompt_tokens': 339, 'total_tokens': 472, 'completion_tokens_details': {'accepted_prediction_tokens': 0, 'audio_tokens': 0, 'reasoning_tokens': 0, 'rejected_prediction_tokens': 0}, 'prompt_tokens_details': {'audio_tokens': 0, 'cached_tokens': 0}}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_13eed4fce1', 'finish_reason': 'stop', 'logprobs': None}, name='research_expert', id='run-03bc2b26-4ad4-4663-bd54-c037365633af-0', usage_metadata={'input_tokens': 339, 'output_tokens': 133, 'total_tokens': 472, 'input_token_details': {'audio': 0, 'cache_read': 0}, 'output_token_details': {'audio': 0, 'reasoning': 0}}),\n", 189 | " AIMessage(content='Transferring back to supervisor', additional_kwargs={}, response_metadata={}, name='research_expert', id='53dd4288-9f04-4394-a76b-f17833035c8f', tool_calls=[{'name': 'transfer_back_to_supervisor', 'args': {}, 'id': 'f7ba8fd4-ff8e-4196-b561-19b32627eea4', 'type': 'tool_call'}]),\n", 190 | " ToolMessage(content='Successfully transferred back to supervisor', name='transfer_back_to_supervisor', id='4dc35b14-162c-4c52-b0ed-dca7eb03b860', tool_call_id='f7ba8fd4-ff8e-4196-b561-19b32627eea4'),\n", 191 | " AIMessage(content='I have provided an overview of quantum computing. If you have any further questions or need more details, feel free to ask!', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 27, 'prompt_tokens': 314, 'total_tokens': 341, 'completion_tokens_details': {'accepted_prediction_tokens': 0, 'audio_tokens': 0, 'reasoning_tokens': 0, 'rejected_prediction_tokens': 0}, 'prompt_tokens_details': {'audio_tokens': 0, 'cached_tokens': 0}}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_13eed4fce1', 'finish_reason': 'stop', 'logprobs': None}, name='supervisor', id='run-37a6d93c-9705-4781-b8b8-e10971488edb-0', usage_metadata={'input_tokens': 314, 'output_tokens': 27, 'total_tokens': 341, 'input_token_details': {'audio': 0, 'cache_read': 0}, 'output_token_details': {'audio': 0, 'reasoning': 0}})]}" 192 | ] 193 | }, 194 | "execution_count": 13, 195 | "metadata": {}, 196 | "output_type": "execute_result" 197 | } 198 | ], 199 | "source": [ 200 | "result" 201 | ] 202 | }, 203 | { 204 | "cell_type": "code", 205 | "execution_count": 14, 206 | "metadata": { 207 | "colab": { 208 | "base_uri": "https://localhost:8080/" 209 | }, 210 | "id": "75iM3duu-Cvt", 211 | "outputId": "748bcfdc-1b79-4a0a-c443-3843e09adda0" 212 | }, 213 | "outputs": [ 214 | { 215 | "name": "stdout", 216 | "output_type": "stream", 217 | "text": [ 218 | "================================\u001b[1m Human Message \u001b[0m=================================\n", 219 | "\n", 220 | "what is quantum computing?\n", 221 | "==================================\u001b[1m Ai Message \u001b[0m==================================\n", 222 | "Name: supervisor\n", 223 | "Tool Calls:\n", 224 | " transfer_to_research_expert (call_wansmCLuYn85BZ0UPabmpLmV)\n", 225 | " Call ID: call_wansmCLuYn85BZ0UPabmpLmV\n", 226 | " Args:\n", 227 | "=================================\u001b[1m Tool Message \u001b[0m=================================\n", 228 | "Name: transfer_to_research_expert\n", 229 | "\n", 230 | "Successfully transferred to research_expert\n", 231 | "==================================\u001b[1m Ai Message \u001b[0m==================================\n", 232 | "Name: research_expert\n", 233 | "\n", 234 | "Quantum computing is a type of computing that utilizes the principles of quantum mechanics to process information. Unlike classical computers, which use bits as the smallest unit of data (where each bit can be either 0 or 1), quantum computers use qubits. A qubit can exist in multiple states simultaneously, thanks to phenomena such as superposition and entanglement.\n", 235 | "\n", 236 | "This unique capability allows quantum computers to solve certain complex problems much faster than classical computers. Quantum computing is particularly promising for tasks that involve large datasets, optimization problems, and simulations of quantum systems. The field is still developing, with ongoing research into quantum algorithms, hardware, and applications across various industries.\n", 237 | "==================================\u001b[1m Ai Message \u001b[0m==================================\n", 238 | "Name: research_expert\n", 239 | "\n", 240 | "Transferring back to supervisor\n", 241 | "Tool Calls:\n", 242 | " transfer_back_to_supervisor (f7ba8fd4-ff8e-4196-b561-19b32627eea4)\n", 243 | " Call ID: f7ba8fd4-ff8e-4196-b561-19b32627eea4\n", 244 | " Args:\n", 245 | "=================================\u001b[1m Tool Message \u001b[0m=================================\n", 246 | "Name: transfer_back_to_supervisor\n", 247 | "\n", 248 | "Successfully transferred back to supervisor\n", 249 | "==================================\u001b[1m Ai Message \u001b[0m==================================\n", 250 | "Name: supervisor\n", 251 | "\n", 252 | "I have provided an overview of quantum computing. If you have any further questions or need more details, feel free to ask!\n" 253 | ] 254 | } 255 | ], 256 | "source": [ 257 | "for m in result[\"messages\"]:\n", 258 | " m.pretty_print()" 259 | ] 260 | }, 261 | { 262 | "cell_type": "code", 263 | "execution_count": 15, 264 | "metadata": { 265 | "colab": { 266 | "base_uri": "https://localhost:8080/" 267 | }, 268 | "id": "caLWpUN9-hrO", 269 | "outputId": "effff469-9802-4b28-f7d1-60421b414cce" 270 | }, 271 | "outputs": [ 272 | { 273 | "name": "stdout", 274 | "output_type": "stream", 275 | "text": [ 276 | "================================\u001b[1m Human Message \u001b[0m=================================\n", 277 | "\n", 278 | "what is the weather in delhi today. Multiply it by 2 and add 5\n", 279 | "==================================\u001b[1m Ai Message \u001b[0m==================================\n", 280 | "Name: supervisor\n", 281 | "Tool Calls:\n", 282 | " transfer_to_research_expert (call_AxW3KuprH0hwdYXz0bnH3T52)\n", 283 | " Call ID: call_AxW3KuprH0hwdYXz0bnH3T52\n", 284 | " Args:\n", 285 | "=================================\u001b[1m Tool Message \u001b[0m=================================\n", 286 | "Name: transfer_to_research_expert\n", 287 | "\n", 288 | "Successfully transferred to research_expert\n", 289 | "==================================\u001b[1m Ai Message \u001b[0m==================================\n", 290 | "Name: research_expert\n", 291 | "\n", 292 | "Today in Delhi, the weather is warm and sunny with a current temperature of 13°C. The maximum temperature is expected to reach 26°C, while the minimum may drop to 10°C. There is no chance of rain in the morning, and winds are blowing at 5 km/h.\n", 293 | "==================================\u001b[1m Ai Message \u001b[0m==================================\n", 294 | "Name: research_expert\n", 295 | "\n", 296 | "Transferring back to supervisor\n", 297 | "Tool Calls:\n", 298 | " transfer_back_to_supervisor (df935c74-341d-41ab-826d-05fc0f69b588)\n", 299 | " Call ID: df935c74-341d-41ab-826d-05fc0f69b588\n", 300 | " Args:\n", 301 | "=================================\u001b[1m Tool Message \u001b[0m=================================\n", 302 | "Name: transfer_back_to_supervisor\n", 303 | "\n", 304 | "Successfully transferred back to supervisor\n", 305 | "==================================\u001b[1m Ai Message \u001b[0m==================================\n", 306 | "Name: supervisor\n", 307 | "Tool Calls:\n", 308 | " transfer_to_math_expert (call_BwHusv28pciiOrFlE6QouFvK)\n", 309 | " Call ID: call_BwHusv28pciiOrFlE6QouFvK\n", 310 | " Args:\n", 311 | "=================================\u001b[1m Tool Message \u001b[0m=================================\n", 312 | "Name: transfer_to_math_expert\n", 313 | "\n", 314 | "Successfully transferred to math_expert\n", 315 | "==================================\u001b[1m Ai Message \u001b[0m==================================\n", 316 | "Name: math_expert\n", 317 | "\n", 318 | "The results are as follows:\n", 319 | "\n", 320 | "- Multiplying the temperature (13°C) by 2 gives you 26.\n", 321 | "- Adding 5 to the temperature (13°C) gives you 18.\n", 322 | "\n", 323 | "So, the final results are 26 and 18.\n", 324 | "==================================\u001b[1m Ai Message \u001b[0m==================================\n", 325 | "Name: math_expert\n", 326 | "\n", 327 | "Transferring back to supervisor\n", 328 | "Tool Calls:\n", 329 | " transfer_back_to_supervisor (ee1461a5-7156-49d8-9805-3c6d3f60a7e8)\n", 330 | " Call ID: ee1461a5-7156-49d8-9805-3c6d3f60a7e8\n", 331 | " Args:\n", 332 | "=================================\u001b[1m Tool Message \u001b[0m=================================\n", 333 | "Name: transfer_back_to_supervisor\n", 334 | "\n", 335 | "Successfully transferred back to supervisor\n", 336 | "==================================\u001b[1m Ai Message \u001b[0m==================================\n", 337 | "Name: supervisor\n", 338 | "\n", 339 | "The current temperature in Delhi is 13°C. When you multiply it by 2, you get 26, and when you add 5, the result is 18.\n" 340 | ] 341 | } 342 | ], 343 | "source": [ 344 | "# Compile and run\n", 345 | "app = workflow.compile()\n", 346 | "result = app.invoke({\n", 347 | " \"messages\": [\n", 348 | " {\n", 349 | " \"role\": \"user\",\n", 350 | " \"content\": \"what is the weather in delhi today. Multiply it by 2 and add 5\"\n", 351 | " }\n", 352 | " ]\n", 353 | "})\n", 354 | "\n", 355 | "for m in result[\"messages\"]:\n", 356 | " m.pretty_print()" 357 | ] 358 | } 359 | ], 360 | "metadata": { 361 | "accelerator": "GPU", 362 | "colab": { 363 | "gpuType": "T4", 364 | "provenance": [] 365 | }, 366 | "kernelspec": { 367 | "display_name": "Python 3 (ipykernel)", 368 | "language": "python", 369 | "name": "python3" 370 | }, 371 | "language_info": { 372 | "codemirror_mode": { 373 | "name": "ipython", 374 | "version": 3 375 | }, 376 | "file_extension": ".py", 377 | "mimetype": "text/x-python", 378 | "name": "python", 379 | "nbconvert_exporter": "python", 380 | "pygments_lexer": "ipython3", 381 | "version": "3.11.6" 382 | } 383 | }, 384 | "nbformat": 4, 385 | "nbformat_minor": 4 386 | } 387 | -------------------------------------------------------------------------------- /qwen_implementation/README.md: -------------------------------------------------------------------------------- 1 | ### Video Tutorial(hindi): https://youtu.be/6zb2zU4eoYU 2 | 3 | ### Video Tutorial(English): https://youtu.be/iiMq8Mvm3tk 4 | 5 | ## Environment setup: 6 | 7 | Install packages 8 | 9 | pip install --upgrade langchain langchain-community langgraph 10 | 11 | 12 | #### Provides access to the ollama models. 13 | pip install langchain-ollama 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /qwen_implementation/README.md.bak: -------------------------------------------------------------------------------- 1 | ### Video Tutorial(hindi): https://youtu.be/VL9PFXqpf9Q 2 | 3 | ## Environment setup: 4 | 5 | Install packages 6 | 7 | pip install --upgrade langchain langchain-community langgraph 8 | 9 | 10 | #### Provides access to the ollama models. 11 | pip install langchain-ollama 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | -------------------------------------------------------------------------------- /qwen_implementation/demo_huggingface.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "id": "8772fc8a-65dc-4813-a440-f1ed0f65a563", 7 | "metadata": {}, 8 | "outputs": [ 9 | { 10 | "data": { 11 | "text/plain": [ 12 | "'4.48.1'" 13 | ] 14 | }, 15 | "execution_count": 1, 16 | "metadata": {}, 17 | "output_type": "execute_result" 18 | } 19 | ], 20 | "source": [ 21 | "import transformers\n", 22 | "transformers.__version__" 23 | ] 24 | }, 25 | { 26 | "cell_type": "code", 27 | "execution_count": 2, 28 | "id": "440cb064-dc02-4742-a4a7-e298609061c3", 29 | "metadata": {}, 30 | "outputs": [ 31 | { 32 | "data": { 33 | "text/plain": [ 34 | "'2.5.1+cu121'" 35 | ] 36 | }, 37 | "execution_count": 2, 38 | "metadata": {}, 39 | "output_type": "execute_result" 40 | } 41 | ], 42 | "source": [ 43 | "import torch\n", 44 | "torch.__version__" 45 | ] 46 | }, 47 | { 48 | "cell_type": "code", 49 | "execution_count": 3, 50 | "id": "e0ce015e-625d-4c88-92f6-dd330697a8fa", 51 | "metadata": {}, 52 | "outputs": [ 53 | { 54 | "data": { 55 | "text/plain": [ 56 | "'NVIDIA GeForce RTX 3090'" 57 | ] 58 | }, 59 | "execution_count": 3, 60 | "metadata": {}, 61 | "output_type": "execute_result" 62 | } 63 | ], 64 | "source": [ 65 | "torch.cuda.get_device_name(0)" 66 | ] 67 | }, 68 | { 69 | "cell_type": "code", 70 | "execution_count": 4, 71 | "id": "4700f8e5-4095-4900-a7e5-1a618d2811bc", 72 | "metadata": {}, 73 | "outputs": [ 74 | { 75 | "data": { 76 | "application/json": { 77 | "ascii": false, 78 | "bar_format": null, 79 | "colour": null, 80 | "elapsed": 0.003001689910888672, 81 | "initial": 0, 82 | "n": 0, 83 | "ncols": null, 84 | "nrows": 29, 85 | "postfix": null, 86 | "prefix": "Loading checkpoint shards", 87 | "rate": null, 88 | "total": 4, 89 | "unit": "it", 90 | "unit_divisor": 1000, 91 | "unit_scale": false 92 | }, 93 | "application/vnd.jupyter.widget-view+json": { 94 | "model_id": "a6c2366a19834052925f1e9966077e38", 95 | "version_major": 2, 96 | "version_minor": 0 97 | }, 98 | "text/plain": [ 99 | "Loading checkpoint shards: 0%| | 0/4 [00:00=3.9 and <=3.12 VRAM Requirement for processing 1 million-token sequences: Qwen2.5-7B-Instruct-1M: At least 120GB VRAM (total across GPUs). Qwen2.5-14B-Instruct-1M: At least 320GB VRAM (total across GPUs). If your GPUs do not have sufficient VRAM, you can still use Qwen2.5-1M models for shorter tasks.\n" 308 | ] 309 | }, 310 | { 311 | "name": "stdout", 312 | "output_type": "stream", 313 | "text": [ 314 | "Assistant: system\n", 315 | "You are a helpful AI assistant.\n", 316 | "user\n", 317 | "Summarise this content: Introduction Two months after upgrading Qwen2.5-Turbo to support context length up to one million tokens, we are back with the open-source Qwen2.5-1M models and the corresponding inference framework support. Here’s what you can expect from this release: Opensource Models: We’re releasing two new checkpoints, Qwen2.5-7B-Instruct-1M and Qwen2.5-14B-Instruct-1M, marking the first time we’ve upgraded our opensource Qwen models to handle 1M-token contexts. Inference Framework: To help developers deploy the Qwen2.5-1M series models more efficiently, we’ve fully open-sourced our inference framework based on vLLM. With integration with sparse attention methods, our framework can process 1M-token inputs 3x to 7x faster. Technical Report: We’re also sharing the technical details behind the Qwen2.5-1M series, including design insights for training and inference frameworks, as well as ablation experiments. You can experience Qwen2.5-1M models online by visiting our demo on Huggingface and Modelscope. Additionally, we recently introduced Qwen Chat, an advanced AI assistant from the Qwen series. With Qwen Chat, you can engage in conversations, write code, perform searches, generate images and videos, and utilize various tools. Notably, Qwen Chat also features the Qwen2.5-Turbo model, which supports long-context processing with a context length of up to 1M tokens. Model Performance Let’s start by diving into the performance of the Qwen2.5-1M series models, covering both long-context and short text tasks. Long-Context Tasks First off, we evaluate the Qwen2.5-1M models on the Passkey Retrieval task with a context length of 1 million tokens. The results show that these models can accurately retrieve hidden information from documents containing up to 1M tokens, with only minor errors observed in the 7B model. For more complex long-context understanding tasks, we select RULER, LV-Eval, LongbenchChat used in this blog. From these results, we can draw a few key conclusions: Significantly Superior to the 128k Version: The Qwen2.5-1M series models significantly outperform their 128K counterparts in most long-context tasks, especially for sequences exceeding 64K in length. Notable Performance Advantage: The Qwen2.5-14B-Instruct-1M model not only beats Qwen2.5-Turbo but also consistently outperforms GPT-4o-mini across multiple datasets, offering a robust open-source alternative for long-context tasks. Short-Context Tasks Besides performance on long sequences, we’re equally interested in how these models handle short sequences. So, we compare the Qwen2.5-1M models and their 128K versions on widely used academic benchmarks, throwing in GPT-4o-mini for comparison. Here’s what we find: Both Qwen2.5-7B-Instruct-1M and Qwen2.5-14B-Instruct-1M maintain performance on short text tasks that is similar to their 128K versions, ensuring the fundamental capabilities haven’t been compromised by the addition of long-sequence processing abilities. Compared to GPT-4o-mini, both Qwen2.5-14B-Instruct-1M and Qwen2.5-Turbo achieve similar performance on short text tasks while supporting a context length that’s eight times longer. Key Techniques Here, we’ll briefly introduce the key techniques behind building Qwen2.5-1M. For more details, please check out our technical report. Long-Context Training Training with long sequences demands substantial computational resources, so we adopt a progressive approach to expand the context length for Qwen2.5-1M through multiple stages: We begin with an intermediate checkpoint of pre-trained Qwen2.5, which had a 4K token context length. In Pretraining, we gradually increase the context length from 4K to 256K tokens while using Adjusted Base Frequency, raising the RoPE base from 10,000 to 10,000,000. In Supervised Fine-tuning, we split this into two stages to preserve performance on shorter sequences: Stage 1: Fine-tuned only on short instructions (up to 32K tokens) using the same data and steps as the 128K versions of Qwen2.5. Stage 2: Mixed short (up to 32K) and long (up to 256K) instructions to enhance long-context task performance while maintaining short-task quality. In Reinforcement Learning, we train models on short texts up to 8K tokens, which sufficiently improves alignment with human preferences and generalizes well to long-context tasks. The final instruction-tuned models are capable of handling sequences up to 256K tokens. Length Extrapolation During training, we develop an instruction-tuned model with a context length of 256K tokens. To extend this to 1M tokens, we employ length extrapolation techniques. The degradation of LLMs based on RoPE in long-context tasks is mainly due to unseen, large relative positional distances between queries and keys in computing attention weight. We employ Dual Chunk Attention (DCA), which addresses this issue by remapping relative positions to smaller values, avoiding the large distances not seen during training. We evaluat the Qwen2.5-1M models and their 128K counterparts with and without the length extrapolation method. We can find: Even models trained on just 32K tokens, such as the Qwen2.5-7B-Instruct, achieve nearly perfect accuracy in passkey retrieval tasks with 1M-token contexts. This underscores the remarkable ability of DCA to extend supported context lengths, without any training required. Sparse Attention For long-context language models, inference speed is crucial for user experience. We introduce a sparse attention mechanism based on MInference to accelerate the prefill phase. Furthermore, we propose several improvements: Integrating with Chunked Prefill: Directly processing sequences of 1M tokens results in substantial memory overhead to store the activations in MLP layers, consuming 71GB of VRAM in Qwen2.5-7B. By integrating with chunk prefill with a chunk length of 32,768 tokens, activation VRAM usage is reduced by 96.7%, leading to a significant decrease in memory consumption. Integrating with Length Extrapolation: We integrate DCA with MInference in long-context processing, thereby enhancing inference efficiency and achieving greater accuracy. Sparsity Refinement on Long Sequences: MInference requires an offline search to determine the optimal sparsification configuration for each attention head. Due to the computational demand of full attention weights, this search is typically conducted on short sequences, which may not generalize well to longer sequences. We developed a method to refine the sparsification configuration specifically for sequences up to 1M tokens, which significantly reduces the accuracy loss brought by sparse attention. More Optimizations: We introduce additional optimizations, such as enhanced kernel efficiency and dynamic chunked pipeline parallelism, to fully unlock the potential of the entire framework. With these enhancements, our inference framework results in a 3.2x to 6.7x acceleration in the prefill speed across different model sizes and GPU devices for sequences of 1M token length. Deploy Qwen2.5-1M Models Locally Here we provide step-by-step instructions for deploying the Qwen2.5-1M models on your local devices. 1. System Preparation To achieve the best performance, we recommend using GPUs with Ampere or Hopper architecture, which support optimized kernels. Ensure your system meets the following requirements: CUDA Version: 12.1 or 12.3 Python Version: >=3.9 and <=3.12 VRAM Requirement for processing 1 million-token sequences: Qwen2.5-7B-Instruct-1M: At least 120GB VRAM (total across GPUs). Qwen2.5-14B-Instruct-1M: At least 320GB VRAM (total across GPUs). If your GPUs do not have sufficient VRAM, you can still use Qwen2.5-1M models for shorter tasks.\n", 318 | "assistant\n", 319 | "### Summary of Qwen2.5-1M Release\n", 320 | "\n", 321 | "**Introduction:**\n", 322 | "Two months after upgrading Qwen2.5-Turbo to support one million token contexts, the Qwen2.5-1M models and their inference framework are now open-sourced. This update includes:\n", 323 | "- Two new checkpoints: Qwen2.5-7B-Instruct-1M and Qwen2.5-14B-Instruct-1M.\n", 324 | "- An open-sourced inference framework based on vLLM, integrating sparse attention methods for faster processing.\n", 325 | "\n", 326 | "**Performance:**\n", 327 | "- **Long-Context Tasks:** The models excel at tasks requiring up to 1 million token contexts, like document retrieval, showing minimal errors in the 7B model. The Qwen2.5-14B-Instruct-1M outperforms both Qwen2.5-Turbo and GPT-4o-mini across multiple datasets.\n", 328 | "- **Short-Context Tasks:** The models maintain comparable performance to their 128K versions, ensuring no degradation in short text tasks despite added long-sequence capabilities.\n", 329 | "\n", 330 | "**Key Techniques:**\n", 331 | "- **Long-Context Training:** A progressive approach was used, starting from a 4K token context and gradually increasing it to 256K tokens, with techniques like Adjusted Base Frequency and Dual Chunk Attention (DCA).\n", 332 | "- **Length Extrapolation:** DCA was employed to extend the context length from 256K to 1M tokens, addressing the issue of large relative positional distances.\n", 333 | "- **Sparse Attention:** Improvements include chunked prefill, integration with length extrapolation, and refined sparsification configurations for long sequences, resulting in up to 6.7x faster prefill speeds.\n", 334 | "\n", 335 | "**Deployment:**\n", 336 | "For local deployment:\n", 337 | "- Use GPUs with Ampere or Hopper architecture.\n", 338 | "- Ensure the system meets specific VRAM requirements: at least 120GB for Qwen2.5-7B-Instruct-1M and 320GB for Qwen2.5-14B-Instruct-1M.\n", 339 | "\n", 340 | "**Additional Features:**\n", 341 | "Qwen Chat, an advanced AI assistant from the Qwen series, incorporates the Qwen2.5-Turbo model, supporting long-context processing with a context length of up to 1M tokens.\n", 342 | "\n", 343 | "This release provides significant advancements in handling long-context tasks while maintaining performance on short tasks, along with detailed technical insights shared in the technical report.\n" 344 | ] 345 | } 346 | ], 347 | "source": [ 348 | "# # Chat Loop\n", 349 | "while True:\n", 350 | " user_input = input(\"User: \") # Get user input\n", 351 | " if user_input.lower() in [\"exit\", \"quit\"]:\n", 352 | " print(\"Goodbye!\")\n", 353 | " break\n", 354 | "\n", 355 | " # Format the input for the model\n", 356 | " messages = [\n", 357 | " {\"role\": \"system\", \"content\": \"You are a helpful AI assistant.\"},\n", 358 | " {\"role\": \"user\", \"content\": user_input}\n", 359 | " ]\n", 360 | "\n", 361 | " # Tokenize and process input\n", 362 | " text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)\n", 363 | " model_inputs = tokenizer([text], return_tensors=\"pt\").to(model.device)\n", 364 | "\n", 365 | " # Generate response\n", 366 | " generated_ids = model.generate(**model_inputs, max_new_tokens=512)\n", 367 | " response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]\n", 368 | "\n", 369 | " # Display response\n", 370 | " print(\"Assistant:\", response)" 371 | ] 372 | }, 373 | { 374 | "cell_type": "code", 375 | "execution_count": null, 376 | "id": "a267fa62-e15d-4ea6-ad36-c56f845f0f9f", 377 | "metadata": {}, 378 | "outputs": [], 379 | "source": [] 380 | }, 381 | { 382 | "cell_type": "code", 383 | "execution_count": null, 384 | "id": "b32b88b1-dff3-4ec8-baad-c56886e0743a", 385 | "metadata": {}, 386 | "outputs": [], 387 | "source": [] 388 | } 389 | ], 390 | "metadata": { 391 | "kernelspec": { 392 | "display_name": "Python 3 (ipykernel)", 393 | "language": "python", 394 | "name": "python3" 395 | }, 396 | "language_info": { 397 | "codemirror_mode": { 398 | "name": "ipython", 399 | "version": 3 400 | }, 401 | "file_extension": ".py", 402 | "mimetype": "text/x-python", 403 | "name": "python", 404 | "nbconvert_exporter": "python", 405 | "pygments_lexer": "ipython3", 406 | "version": "3.10.11" 407 | } 408 | }, 409 | "nbformat": 4, 410 | "nbformat_minor": 5 411 | } 412 | -------------------------------------------------------------------------------- /qwen_implementation/demo_langgraph_ollama.py: -------------------------------------------------------------------------------- 1 | from typing import List, Dict 2 | from langgraph.graph import StateGraph, START, END 3 | from langchain_ollama.llms import OllamaLLM 4 | 5 | # Step 1: Define State 6 | class State(Dict): 7 | messages: List[Dict[str, str]] 8 | 9 | 10 | # Step 2: Initialize StateGraph 11 | graph_builder = StateGraph(State) 12 | 13 | llm = OllamaLLM(model="qwen2.5:7B") 14 | 15 | 16 | # Define chatbot function 17 | def chatbot(state: State): 18 | response = llm.invoke(state["messages"]) 19 | state["messages"].append({"role": "assistant", "content": response}) # Treat response as a string 20 | return {"messages": state["messages"]} 21 | 22 | 23 | 24 | # Add nodes and edges 25 | graph_builder.add_node("chatbot", chatbot) 26 | graph_builder.add_edge(START, "chatbot") 27 | graph_builder.add_edge("chatbot", END) 28 | 29 | # Compile the graph 30 | graph = graph_builder.compile() 31 | 32 | 33 | 34 | # Stream updates 35 | def stream_graph_updates(user_input: str): 36 | state = {"messages": [{"role": "user", "content": user_input}]} 37 | for event in graph.stream(state): 38 | for value in event.values(): 39 | print("Assistant:", value["messages"][-1]["content"]) 40 | 41 | 42 | 43 | # Run chatbot in a loop 44 | if __name__ == "__main__": 45 | while True: 46 | try: 47 | user_input = input("User: ") 48 | if user_input.lower() in ["quit", "exit", "q"]: 49 | print("Goodbye!") 50 | break 51 | 52 | stream_graph_updates(user_input) 53 | except Exception as e: 54 | print(f"An error occurred: {e}") 55 | break --------------------------------------------------------------------------------