├── requirements.txt ├── tools.py ├── LICENSE ├── example.py ├── .gitignore ├── README.md └── Document.md /requirements.txt: -------------------------------------------------------------------------------- 1 | langchain 2 | langchain-openai 3 | langchain-anthropic 4 | wikipedia 5 | duckduckgo-search 6 | python-dotenv 7 | pydantic -------------------------------------------------------------------------------- /tools.py: -------------------------------------------------------------------------------- 1 | # tools.py 2 | from langchain_community.tools import WikipediaQueryRun 3 | from langchain_community.utilities import WikipediaAPIWrapper 4 | from langchain_community.tools import DuckDuckGoSearchRun 5 | from langchain.tools import Tool 6 | from datetime import datetime 7 | 8 | # Web search tool using DuckDuckGo 9 | search = DuckDuckGoSearchRun() 10 | search_tool = Tool( 11 | name="search", 12 | func=search.run, 13 | description="Search the web for information" 14 | ) 15 | 16 | # Wikipedia tool with limited results and content 17 | api_wrapper = WikipediaAPIWrapper(top_k_results=1, doc_content_chars_max=100) 18 | wiki_tool = WikipediaQueryRun(api_wrapper=api_wrapper) 19 | 20 | # Custom tool to save research output to a text file 21 | def save_to_txt(data: str, filename: str = "research_output.txt"): 22 | with open(filename, "w") as f: 23 | f.write(f"Research Output\nTimestamp: {datetime.now()}\n\n{data}") 24 | return "File saved successfully" 25 | 26 | save_tool = Tool( 27 | name="save_text_to_file", 28 | func=save_to_txt, 29 | description="Save structured research data to a text file" 30 | ) -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 aliezza hn 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /example.py: -------------------------------------------------------------------------------- 1 | # main.py 2 | import os 3 | from dotenv import load_dotenv 4 | from pydantic import BaseModel 5 | from langchain_openai import ChatOpenAI 6 | from langchain_anthropic import ChatAnthropic 7 | from langchain_core.prompts import ChatPromptTemplate 8 | from langchain_core.output_parsers import PydanticOutputParser 9 | from langchain.agents import create_tool_calling_agent, AgentExecutor 10 | from tools import search_tool, wiki_tool, save_tool 11 | 12 | # Load environment variables from .env file 13 | load_dotenv() 14 | 15 | # Set up the LLM (choose one based on your API key) 16 | # Uncomment the one you want to use 17 | # llm = ChatOpenAI(model="gpt-4o-mini", api_key=os.getenv("OPENAI_API_KEY")) 18 | llm = ChatAnthropic(model="claude-3-5-sonnet-20241022", api_key=os.getenv("ANTHROPIC_API_KEY")) 19 | 20 | # Define the structured output format using Pydantic 21 | class ResearchResponse(BaseModel): 22 | topic: str 23 | summary: str 24 | sources: list[str] 25 | tools_used: list[str] 26 | 27 | # Create a parser to convert LLM output into the ResearchResponse format 28 | parser = PydanticOutputParser(pydantic_object=ResearchResponse) 29 | 30 | # Define the prompt template with instructions for the agent 31 | prompt = ChatPromptTemplate.from_messages([ 32 | ("system", "You are a research assistant that will help generate a research paper. Answer the user query and use the necessary tools. Wrap the output in this format and provide no other text:\n{format_instructions}"), 33 | ("human", "{query}"), 34 | ("placeholder", "{chat_history}"), 35 | ("placeholder", "{agent_scratchpad}") 36 | ]).partial(format_instructions=parser.get_format_instructions()) 37 | 38 | # List of tools the agent can use 39 | tools = [search_tool, wiki_tool, save_tool] 40 | 41 | # Create the agent with the LLM, prompt, and tools 42 | agent = create_tool_calling_agent(llm=llm, prompt=prompt, tools=tools) 43 | 44 | # Set up the executor to run the agent (verbose=True shows the thought process) 45 | agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True) 46 | 47 | # Get user input and invoke the agent 48 | if __name__ == "__main__": 49 | query = input("What can I help you research? ") 50 | raw_response = agent_executor.invoke({"query": query}) 51 | 52 | # Parse the raw response into structured output 53 | try: 54 | structured_response = parser.parse(raw_response["output"][0]["text"]) 55 | print("\nStructured Response:") 56 | print(structured_response) 57 | except Exception as e: 58 | print(f"\nError parsing response: {e}") 59 | print(f"Raw response: {raw_response}") -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # UV 98 | # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | #uv.lock 102 | 103 | # poetry 104 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 105 | # This is especially recommended for binary packages to ensure reproducibility, and is more 106 | # commonly ignored for libraries. 107 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 108 | #poetry.lock 109 | 110 | # pdm 111 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 112 | #pdm.lock 113 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 114 | # in version control. 115 | # https://pdm.fming.dev/latest/usage/project/#working-with-version-control 116 | .pdm.toml 117 | .pdm-python 118 | .pdm-build/ 119 | 120 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 121 | __pypackages__/ 122 | 123 | # Celery stuff 124 | celerybeat-schedule 125 | celerybeat.pid 126 | 127 | # SageMath parsed files 128 | *.sage.py 129 | 130 | # Environments 131 | .env 132 | .venv 133 | env/ 134 | venv/ 135 | ENV/ 136 | env.bak/ 137 | venv.bak/ 138 | 139 | # Spyder project settings 140 | .spyderproject 141 | .spyproject 142 | 143 | # Rope project settings 144 | .ropeproject 145 | 146 | # mkdocs documentation 147 | /site 148 | 149 | # mypy 150 | .mypy_cache/ 151 | .dmypy.json 152 | dmypy.json 153 | 154 | # Pyre type checker 155 | .pyre/ 156 | 157 | # pytype static type analyzer 158 | .pytype/ 159 | 160 | # Cython debug symbols 161 | cython_debug/ 162 | 163 | # PyCharm 164 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 165 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 166 | # and can be added to the global gitignore or merged into this file. For a more nuclear 167 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 168 | #.idea/ 169 | 170 | # Ruff stuff: 171 | .ruff_cache/ 172 | 173 | # PyPI configuration file 174 | .pypirc 175 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Building an AI Agent from Scratch in Python 2 | 3 | ## A Beginner's Guide to Creating a Research Assistant with LangChain 4 | 5 | This guide walks you through building an AI agent from scratch using Python. We'll use popular frameworks like LangChain to integrate large language models (LLMs) such as Claude or GPT, give the agent access to tools like Wikipedia and web search, and structure its output for use in your code. By the end, you'll have a functional research assistant that can save its findings to a file. Let's dive in! 6 | 7 | --- 8 | 9 | ## Table of Contents 10 | 11 | 1. [Introduction](#chapter-1-introduction) 12 | 2. [Prerequisites](#chapter-2-prerequisites) 13 | 3. [Setting Up the Environment](#chapter-3-setting-up-the-environment) 14 | 4. [Writing the Core Code](#chapter-4-writing-the-core-code) 15 | 5. [Adding Tools](#chapter-5-adding-tools) 16 | 6. [Running the Agent](#chapter-6-running-the-agent) 17 | 7. [Sample Outputs](#chapter-7-sample-outputs) 18 | 8. [Conclusion](#chapter-8-conclusion) 19 | 20 | --- 21 | 22 | ## Chapter 1: Introduction 23 | 24 | In this tutorial, you'll learn how to build an AI agent step-by-step in Python. The agent will act as a research assistant, capable of answering queries, using tools like Wikipedia and DuckDuckGo search, and saving results to a text file. We'll use LangChain to integrate LLMs and structure the output predictably. 25 | 26 | ### Demo of the Finished Project 27 | 28 | The agent asks: "What can I help you research?" For example, if you input "Tell me about LangChain and its applications" and request it to save to a file, it: 29 | 30 | - Searches Wikipedia and the web. 31 | - Provides a structured response with a topic, summary, sources, and tools used. 32 | - Saves the output to a text file with a timestamp. 33 | 34 | --- 35 | 36 | ## Chapter 2: Prerequisites 37 | 38 | Before starting, ensure you have the following: 39 | 40 | 1. **Python**: Version 3.10 or higher recommended. Install from [python.org](https://www.python.org/). 41 | 2. **Code Editor**: Visual Studio Code (VS Code) is recommended. 42 | 3. **API Keys**: You'll need API keys for an LLM provider (e.g., OpenAI or Anthropic). Instructions provided later. 43 | 44 | --- 45 | 46 | ## Chapter 3: Setting Up the Environment 47 | 48 | ### Step 1: Create a Project Folder 49 | 50 | - Open VS Code. 51 | - Go to `File > Open Folder`. 52 | - Create a new folder (e.g., `AI_Agent_Tutorial`) and open it. 53 | 54 | ### Step 2: Create a Requirements File 55 | 56 | Create a file named `requirements.txt` with these dependencies: 57 | 58 | ```plaintext 59 | langchain 60 | langchain-openai 61 | langchain-anthropic 62 | wikipedia 63 | duckduckgo-search 64 | python-dotenv 65 | pydantic 66 | ``` 67 | 68 | ### Step 3: Set Up a Virtual Environment 69 | 70 | 1. Open a terminal in VS Code. 71 | 2. Run: 72 | - Windows: `python -m venv venv` 73 | - Mac/Linux: `python3 -m venv venv` 74 | 3. Activate it: 75 | - Windows: `venv\Scripts\activate` 76 | - Mac/Linux: `source venv/bin/activate` 77 | 4. Install dependencies: 78 | ```bash 79 | pip install -r requirements.txt 80 | ``` 81 | 82 | ### Step 4: Create Additional Files 83 | 84 | - `main.py`: Main logic for the agent. 85 | - `tools.py`: Custom and external tools. 86 | - `.env`: Environment variables for API keys. 87 | 88 | --- 89 | 90 | ## Chapter 4: Writing the Core Code 91 | 92 | ### Step 1: Set Up API Keys in `.env` 93 | 94 | Create a `.env` file and add your API key(s): 95 | 96 | ```bash 97 | # For OpenAI 98 | OPENAI_API_KEY="your-openai-api-key" 99 | 100 | # For Anthropic (Claude) 101 | ANTHROPIC_API_KEY="your-anthropic-api-key" 102 | ``` 103 | 104 | - **Get OpenAI Key**: Go to [platform.openai.com/api-keys](https://platform.openai.com/api-keys), generate a key, and paste it. 105 | - **Get Anthropic Key**: Go to [console.anthropic.com/settings/keys](https://console.anthropic.com/settings/keys), create a key, and paste it. 106 | 107 | ### Step 2: Write `main.py` 108 | 109 | This file sets up the LLM, prompt, and agent. 110 | 111 | ```python 112 | # main.py 113 | from dotenv import load_dotenv 114 | from pydantic import BaseModel 115 | from langchain_openai import ChatOpenAI 116 | from langchain_anthropic import ChatAnthropic 117 | from langchain_core.prompts import ChatPromptTemplate 118 | from langchain_core.output_parsers import PydanticOutputParser 119 | from langchain.agents import create_tool_calling_agent, AgentExecutor 120 | from tools import search_tool, wiki_tool, save_tool 121 | 122 | # Load environment variables 123 | load_dotenv() 124 | 125 | # Set up the LLM (choose one) 126 | llm = ChatAnthropic(model="claude-3-5-sonnet-20241022") # or ChatOpenAI(model="gpt-4o-mini") 127 | 128 | # Define the output structure 129 | class ResearchResponse(BaseModel): 130 | topic: str 131 | summary: str 132 | sources: list[str] 133 | tools_used: list[str] 134 | 135 | # Create parser 136 | parser = PydanticOutputParser(pydantic_object=ResearchResponse) 137 | 138 | # Set up prompt template 139 | prompt = ChatPromptTemplate.from_messages([ 140 | ("system", "You are a research assistant that will help generate a research paper. Answer the user query and use the necessary tools. Wrap the output in this format and provide no other text:\n{format_instructions}"), 141 | ("human", "{query}"), 142 | ("placeholder", "{chat_history}"), 143 | ("placeholder", "{agent_scratchpad}") 144 | ]).partial(format_instructions=parser.get_format_instructions()) 145 | 146 | # Define tools 147 | tools = [search_tool, wiki_tool, save_tool] 148 | 149 | # Create agent 150 | agent = create_tool_calling_agent(llm=llm, prompt=prompt, tools=tools) 151 | 152 | # Create executor 153 | agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True) 154 | 155 | # Get user input and run agent 156 | query = input("What can I help you research? ") 157 | raw_response = agent_executor.invoke({"query": query}) 158 | 159 | # Parse and display structured output 160 | try: 161 | structured_response = parser.parse(raw_response["output"][0]["text"]) 162 | print(structured_response) 163 | except Exception as e: 164 | print(f"Error parsing response: {e}") 165 | print(f"Raw response: {raw_response}") 166 | ``` 167 | 168 | ### Explanation 169 | 170 | - **LLM**: Configures Claude or GPT with an API key loaded from `.env`. 171 | - **ResearchResponse**: Defines the structured output format using Pydantic. 172 | - **Prompt**: Instructs the agent on its role and output format. 173 | - **Agent**: Combines LLM, prompt, and tools. 174 | - **Executor**: Runs the agent and shows its thought process (`verbose=True`). 175 | 176 | --- 177 | 178 | ## Chapter 5: Adding Tools 179 | 180 | ### Step 1: Write `tools.py` 181 | 182 | This file defines tools for web search, Wikipedia, and saving to a file. 183 | 184 | ```python 185 | # tools.py 186 | from langchain_community.tools import WikipediaQueryRun 187 | from langchain_community.utilities import WikipediaAPIWrapper 188 | from langchain_community.tools import DuckDuckGoSearchRun 189 | from langchain.tools import Tool 190 | from datetime import datetime 191 | 192 | # Web search tool (DuckDuckGo) 193 | search = DuckDuckGoSearchRun() 194 | search_tool = Tool( 195 | name="search", 196 | func=search.run, 197 | description="Search the web for information" 198 | ) 199 | 200 | # Wikipedia tool 201 | api_wrapper = WikipediaAPIWrapper(top_k_results=1, doc_content_chars_max=100) 202 | wiki_tool = WikipediaQueryRun(api_wrapper=api_wrapper) 203 | 204 | # Custom save-to-file tool 205 | def save_to_txt(data: str, filename: str = "research_output.txt"): 206 | with open(filename, "w") as f: 207 | f.write(f"Research Output\nTimestamp: {datetime.now()}\n\n{data}") 208 | 209 | save_tool = Tool( 210 | name="save_text_to_file", 211 | func=save_to_txt, 212 | description="Save structured research data to a text file" 213 | ) 214 | ``` 215 | 216 | ### Explanation 217 | 218 | - **Search Tool**: Uses DuckDuckGo to search the web. 219 | - **Wikipedia Tool**: Queries Wikipedia with a limit of 1 result and 100 characters. 220 | - **Save Tool**: Custom function to save output to a text file with a timestamp. 221 | 222 | --- 223 | 224 | ## Chapter 6: Running the Agent 225 | 226 | 1. Activate the virtual environment: 227 | - Windows: `venv\Scripts\activate` 228 | - Mac/Linux: `source venv/bin/activate` 229 | 2. Run the script: 230 | ```bash 231 | python main.py 232 | ``` 233 | 3. Enter a query, e.g., "South East Asia population save to a file". 234 | 235 | --- 236 | 237 | ## Chapter 7: Sample Outputs 238 | 239 | ### Example 1: Query - "Tell me about sharks" 240 | 241 | **Terminal Output**: 242 | 243 | ```bash 244 | > What can I help you research? Tell me about sharks 245 | [AgentExecutor Chain] 246 | Invoking tool: search with query "shark biology habitat behavior research" 247 | [Output] 248 | topic='Sharks' 249 | summary='Sharks are a group of elasmobranch fish characterized by a cartilaginous skeleton, five to seven gill slits, and pectoral fins not fused to the head.' 250 | sources=['https://en.wikipedia.org/wiki/Shark'] 251 | tools_used=['search'] 252 | ``` 253 | 254 | ### Example 2: Query - "Hammerhead sharks" 255 | 256 | **Terminal Output**: 257 | 258 | ```bash 259 | > What can I help you research? Hammerhead sharks 260 | [AgentExecutor Chain] 261 | Invoking tool: wiki_tool with query "Hammerhead shark" 262 | Invoking tool: search with query "hammerhead shark research latest findings" 263 | [Output] 264 | topic='Hammerhead Sharks' 265 | summary='Hammerhead sharks are known for their distinctive hammer-shaped heads, which enhance their sensory capabilities.' 266 | sources=['Wikipedia: Hammerhead shark', 'https://www.sharkresearch.org'] 267 | tools_used=['wiki_tool', 'search'] 268 | ``` 269 | 270 | ### Example 3: Query - "South East Asia population save to a file" 271 | 272 | **Terminal Output**: 273 | 274 | ```bash 275 | > What can I help you research? South East Asia population save to a file 276 | [AgentExecutor Chain] 277 | Invoking tool: wiki_tool with query "Southeast Asia" 278 | Invoking tool: save_text_to_file with data... 279 | [Output] 280 | topic='South East Asia Population' 281 | summary='Southeast Asia has a population of over 650 million, with a relatively young demographic.' 282 | sources=['Wikipedia: Southeast Asia'] 283 | tools_used=['wiki_tool', 'save_text_to_file'] 284 | ``` 285 | 286 | **File Output (`research_output.txt`)**: 287 | 288 | ```bash 289 | Research Output 290 | Timestamp: 2025-03-15 10:00:00 291 | topic='South East Asia Population' 292 | summary='Southeast Asia has a population of over 650 million, with a relatively young demographic.' 293 | sources=['Wikipedia: Southeast Asia'] 294 | tools_used=['wiki_tool', 'save_text_to_file'] 295 | ``` 296 | 297 | --- 298 | 299 | ## Chapter 8: Conclusion 300 | 301 | Congratulations! You've built an AI research assistant from scratch using Python and LangChain. It can: 302 | 303 | - Answer queries using LLMs like Claude or GPT. 304 | - Use tools like Wikipedia, web search, and custom file-saving. 305 | - Structure output predictably with Pydantic. 306 | 307 | ### Next Steps 308 | 309 | - Add more tools (e.g., API calls, databases). 310 | - Improve the prompt for better responses. 311 | - Experiment with different LLMs or models. 312 | -------------------------------------------------------------------------------- /Document.md: -------------------------------------------------------------------------------- 1 | # AI Agent Example: Line-by-Line Explanation 2 | 3 | ## A Detailed Guide to the Python Code and Its Outputs 4 | 5 | This document explains the Python example files (`main.py` and `tools.py`) for building an AI research assistant using LangChain. It covers every line of code, its purpose, and all possible outputs and scenarios, including success cases, errors, and edge cases. By the end, you'll understand how the agent works and what to expect when running it. 6 | 7 | --- 8 | 9 | ## Table of Contents 10 | 11 | 1. [Overview](#chapter-1-overview) 12 | 2. [File Structure](#chapter-2-file-structure) 13 | 3. [Line-by-Line Explanation: `main.py`](#chapter-3-line-by-line-explanation-mainpy) 14 | 4. [Line-by-Line Explanation: `tools.py`](#chapter-4-line-by-line-explanation-toolspy) 15 | 5. [Sample Outputs and Scenarios](#chapter-5-sample-outputs-and-scenarios) 16 | 6. [Troubleshooting and Edge Cases](#chapter-6-troubleshooting-and-edge-cases) 17 | 18 | --- 19 | 20 | ## Chapter 1: Overview 21 | 22 | The example creates an AI agent that acts as a research assistant. It: 23 | 24 | - Uses a large language model (LLM) like Claude or GPT. 25 | - Integrates tools (web search, Wikipedia, file saving). 26 | - Structures output predictably using Pydantic. 27 | - Accepts user queries and processes them with tools. 28 | 29 | Key files: 30 | 31 | - `main.py`: Core logic for the agent. 32 | - `tools.py`: Tool definitions. 33 | - `.env`: API key storage. 34 | - `requirements.txt`: Dependencies. 35 | 36 | --- 37 | 38 | ## Chapter 2: File Structure 39 | 40 | - **`main.py`**: Sets up the LLM, prompt, agent, and executor; handles user input and output parsing. 41 | - **`tools.py`**: Defines three tools: DuckDuckGo search, Wikipedia query, and a custom save-to-file tool. 42 | - **`.env`**: Stores API keys (e.g., `OPENAI_API_KEY` or `ANTHROPIC_API_KEY`). 43 | - **`requirements.txt`**: Lists required packages (`langchain`, `pydantic`, etc.). 44 | 45 | --- 46 | 47 | ## Chapter 3: Line-by-Line Explanation: `main.py` 48 | 49 | ```python 50 | # main.py 51 | import os 52 | from dotenv import load_dotenv 53 | from pydantic import BaseModel 54 | from langchain_openai import ChatOpenAI 55 | from langchain_anthropic import ChatAnthropic 56 | from langchain_core.prompts import ChatPromptTemplate 57 | from langchain_core.output_parsers import PydanticOutputParser 58 | from langchain.agents import create_tool_calling_agent, AgentExecutor 59 | from tools import search_tool, wiki_tool, save_tool 60 | ``` 61 | 62 | - **`import os`**: Imports the `os` module to access environment variables. 63 | - **`from dotenv import load_dotenv`**: Imports a function to load API keys from `.env`. 64 | - **`from pydantic import BaseModel`**: Imports `BaseModel` to define structured output. 65 | - **`from langchain_openai import ChatOpenAI`**: Imports OpenAI LLM integration. 66 | - **`from langchain_anthropic import ChatAnthropic`**: Imports Anthropic (Claude) LLM integration. 67 | - **`from langchain_core.prompts import ChatPromptTemplate`**: Imports a template for agent prompts. 68 | - **`from langchain_core.output_parsers import PydanticOutputParser`**: Imports a parser for structured output. 69 | - **`from langchain.agents import ...`**: Imports functions to create and run the agent. 70 | - **`from tools import ...`**: Imports tools defined in `tools.py`. 71 | 72 | ```python 73 | load_dotenv() 74 | ``` 75 | 76 | - Loads API keys from `.env` into the environment (e.g., `os.getenv("OPENAI_API_KEY")`). 77 | 78 | ```python 79 | # llm = ChatOpenAI(model="gpt-4o-mini", api_key=os.getenv("OPENAI_API_KEY")) 80 | llm = ChatAnthropic(model="claude-3-5-sonnet-20241022", api_key=os.getenv("ANTHROPIC_API_KEY")) 81 | ``` 82 | 83 | - Defines the LLM. Two options: 84 | - OpenAI (`ChatOpenAI`): Uses `gpt-4o-mini` model (commented out). 85 | - Anthropic (`ChatAnthropic`): Uses `claude-3-5-sonnet-20241022` model (active). 86 | - `api_key=os.getenv(...)`: Fetches the key from `.env`. 87 | 88 | ```python 89 | class ResearchResponse(BaseModel): 90 | topic: str 91 | summary: str 92 | sources: list[str] 93 | tools_used: list[str] 94 | ``` 95 | 96 | - Defines a Pydantic class for structured output with four fields: 97 | - `topic`: Research subject (string). 98 | - `summary`: Brief explanation (string). 99 | - `sources`: List of references (list of strings). 100 | - `tools_used`: Tools invoked (list of strings). 101 | 102 | ```python 103 | parser = PydanticOutputParser(pydantic_object=ResearchResponse) 104 | ``` 105 | 106 | - Creates a parser to convert LLM output into a `ResearchResponse` object. 107 | 108 | ```python 109 | prompt = ChatPromptTemplate.from_messages([ 110 | ("system", "You are a research assistant that will help generate a research paper. Answer the user query and use the necessary tools. Wrap the output in this format and provide no other text:\n{format_instructions}"), 111 | ("human", "{query}"), 112 | ("placeholder", "{chat_history}"), 113 | ("placeholder", "{agent_scratchpad}") 114 | ]).partial(format_instructions=parser.get_format_instructions()) 115 | ``` 116 | 117 | - Defines a prompt template: 118 | - `system`: Instructs the agent on its role and output format. 119 | - `human`: Placeholder for the user’s query. 120 | - `chat_history` and `agent_scratchpad`: Placeholders for LangChain (auto-filled). 121 | - `.partial(...)`: Inserts the Pydantic format instructions into the system message. 122 | 123 | ```python 124 | tools = [search_tool, wiki_tool, save_tool] 125 | ``` 126 | 127 | - Lists the tools imported from `tools.py` for the agent to use. 128 | 129 | ```python 130 | agent = create_tool_calling_agent(llm=llm, prompt=prompt, tools=tools) 131 | ``` 132 | 133 | - Creates an agent combining the LLM, prompt, and tools. 134 | 135 | ```python 136 | agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True) 137 | ``` 138 | 139 | - Sets up an executor to run the agent. `verbose=True` shows the agent’s thought process. 140 | 141 | ```python 142 | if __name__ == "__main__": 143 | query = input("What can I help you research? ") 144 | raw_response = agent_executor.invoke({"query": query}) 145 | ``` 146 | 147 | - Runs the script only if executed directly (not imported). 148 | - Prompts the user for input and invokes the agent with the query. 149 | 150 | ```python 151 | try: 152 | structured_response = parser.parse(raw_response["output"][0]["text"]) 153 | print("\nStructured Response:") 154 | print(structured_response) 155 | except Exception as e: 156 | print(f"\nError parsing response: {e}") 157 | print(f"Raw response: {raw_response}") 158 | ``` 159 | 160 | - Tries to parse the raw LLM output into a `ResearchResponse` object. 161 | - Success: Prints the structured response. 162 | - Failure: Prints the error and raw response for debugging. 163 | 164 | --- 165 | 166 | ## Chapter 4: Line-by-Line Explanation: `tools.py` 167 | 168 | ```python 169 | # tools.py 170 | from langchain_community.tools import WikipediaQueryRun 171 | from langchain_community.utilities import WikipediaAPIWrapper 172 | from langchain_community.tools import DuckDuckGoSearchRun 173 | from langchain.tools import Tool 174 | from datetime import datetime 175 | ``` 176 | 177 | - Imports tools and utilities from LangChain and Python’s `datetime`. 178 | 179 | ```python 180 | search = DuckDuckGoSearchRun() 181 | search_tool = Tool( 182 | name="search", 183 | func=search.run, 184 | description="Search the web for information" 185 | ) 186 | ``` 187 | 188 | - Creates a web search tool using DuckDuckGo: 189 | - `search`: Instance of the search runner. 190 | - `search_tool`: Wraps it as a LangChain tool with a name, function, and description. 191 | 192 | ```python 193 | api_wrapper = WikipediaAPIWrapper(top_k_results=1, doc_content_chars_max=100) 194 | wiki_tool = WikipediaQueryRun(api_wrapper=api_wrapper) 195 | ``` 196 | 197 | - Sets up a Wikipedia tool: 198 | - `api_wrapper`: Limits results to 1 and content to 100 characters. 199 | - `wiki_tool`: Creates a query runner with the wrapper. 200 | 201 | ```python 202 | def save_to_txt(data: str, filename: str = "research_output.txt"): 203 | with open(filename, "w") as f: 204 | f.write(f"Research Output\nTimestamp: {datetime.now()}\n\n{data}") 205 | return "File saved successfully" 206 | ``` 207 | 208 | - Defines a custom function to save data to a file: 209 | - `data`: The content to save (string). 210 | - `filename`: Output file (defaults to `research_output.txt`). 211 | - Writes a header, timestamp, and data; returns a success message. 212 | 213 | ```python 214 | save_tool = Tool( 215 | name="save_text_to_file", 216 | func=save_to_txt, 217 | description="Save structured research data to a text file" 218 | ) 219 | ``` 220 | 221 | - Wraps `save_to_txt` as a tool with a name, function, and description. 222 | 223 | --- 224 | 225 | ## Chapter 5: Sample Outputs and Scenarios 226 | 227 | ### Scenario 1: Simple Query ("Tell me about sharks") 228 | 229 | **Input**: `Tell me about sharks` 230 | **Terminal Output**: 231 | 232 | ```bash 233 | What can I help you research? Tell me about sharks 234 | 235 | > Entering new AgentExecutor chain... 236 | Invoking tool: search with query "shark biology habitat behavior research" 237 | > Finished chain. 238 | 239 | Structured Response: 240 | topic='Sharks' 241 | summary='Sharks are a group of elasmobranch fish characterized by a cartilaginous skeleton, five to seven gill slits, and pectoral fins not fused to the head.' 242 | sources=['https://en.wikipedia.org/wiki/Shark'] 243 | tools_used=['search'] 244 | ``` 245 | 246 | - **Explanation**: The agent uses the `search` tool, retrieves info from the web, and structures it. 247 | 248 | ### Scenario 2: Multi-Tool Query ("Hammerhead sharks") 249 | 250 | **Input**: `Hammerhead sharks` 251 | **Terminal Output**: 252 | 253 | ```bash 254 | What can I help you research? Hammerhead sharks 255 | 256 | > Entering new AgentExecutor chain... 257 | Invoking tool: wiki_tool with query "Hammerhead shark" 258 | Invoking tool: search with query "hammerhead shark research latest findings" 259 | > Finished chain. 260 | 261 | Structured Response: 262 | topic='Hammerhead Sharks' 263 | summary='Hammerhead sharks are known for their distinctive hammer-shaped heads, which enhance their sensory capabilities.' 264 | sources=['Wikipedia: Hammerhead shark', 'https://www.sharkresearch.org'] 265 | tools_used=['wiki_tool', 'search'] 266 | ``` 267 | 268 | - **Explanation**: The agent uses both `wiki_tool` and `search` for a richer response. 269 | 270 | ### Scenario 3: Save to File ("South East Asia population save to a file") 271 | 272 | **Input**: `South East Asia population save to a file` 273 | **Terminal Output**: 274 | 275 | ```bash 276 | What can I help you research? South East Asia population save to a file 277 | 278 | > Entering new AgentExecutor chain... 279 | Invoking tool: wiki_tool with query "Southeast Asia" 280 | Invoking tool: save_text_to_file with data... 281 | > Finished chain. 282 | 283 | Structured Response: 284 | topic='South East Asia Population' 285 | summary='Southeast Asia has a population of over 650 million, with a relatively young demographic.' 286 | sources=['Wikipedia: Southeast Asia'] 287 | tools_used=['wiki_tool', 'save_text_to_file'] 288 | ``` 289 | 290 | **File Output (`research_output.txt`)**: 291 | 292 | ```bash 293 | Research Output 294 | Timestamp: 2025-03-15 12:34:56.789012 295 | 296 | topic='South East Asia Population' 297 | summary='Southeast Asia has a population of over 650 million, with a relatively young demographic.' 298 | sources=['Wikipedia: Southeast Asia'] 299 | tools_used=['wiki_tool', 'save_text_to_file'] 300 | ``` 301 | 302 | - **Explanation**: The agent uses `wiki_tool` for data and `save_text_to_file` to save it. 303 | 304 | --- 305 | 306 | ## Chapter 6: Troubleshooting and Edge Cases 307 | 308 | ### 1. Missing API Key 309 | 310 | **Scenario**: `.env` lacks a valid key. 311 | **Output**: 312 | 313 | ```bash 314 | Error: Missing API key for Anthropic/OpenAI 315 | ``` 316 | 317 | **Fix**: Add a valid key to `.env` and ensure the correct LLM is uncommented. 318 | 319 | ### 2. Parsing Error 320 | 321 | **Scenario**: LLM outputs malformed data (e.g., missing fields). 322 | **Output**: 323 | 324 | ```bash 325 | Error parsing response: ValidationError: 1 validation error for ResearchResponse 326 | summary: field required 327 | Raw response: {'output': [{'text': '{"topic": "Sharks", "sources": ["Wikipedia"], "tools_used": ["search"]}'}]} 328 | ``` 329 | 330 | **Fix**: Adjust the prompt to enforce all fields or handle missing data gracefully. 331 | 332 | ### 3. Rate Limiting 333 | 334 | **Scenario**: Too many requests to DuckDuckGo or Wikipedia. 335 | **Output**: 336 | 337 | ```bash 338 | > Entering new AgentExecutor chain... 339 | Error: Rate limit exceeded for DuckDuckGo search 340 | > Finished chain. 341 | ``` 342 | 343 | **Fix**: Wait, reduce tool usage, or use an API key-based service. 344 | 345 | ### 4. No Tools Used 346 | 347 | **Scenario**: Query doesn’t trigger tools (e.g., "What is 2+2?"). 348 | **Output**: 349 | 350 | ```bash 351 | Structured Response: 352 | topic='2+2' 353 | summary='2+2 equals 4.' 354 | sources=[] 355 | tools_used=[] 356 | ``` 357 | 358 | **Explanation**: The LLM answers directly without tools. 359 | 360 | ### 5. Invalid Query 361 | 362 | **Scenario**: Empty or nonsensical input (e.g., ""). 363 | **Output**: 364 | 365 | ```bash 366 | Structured Response: 367 | topic='Unknown' 368 | summary='Please provide a valid query.' 369 | sources=[] 370 | tools_used=[] 371 | ``` 372 | 373 | **Explanation**: The LLM may handle it gracefully or return an error depending on the model. 374 | 375 | --- 376 | 377 | This guide covers the code and its behavior comprehensively. If you need further clarification or additional scenarios, let me know! 378 | --------------------------------------------------------------------------------