├── requirements.txt ├── tutorial ├── screenshots │ └── deepseek-r1_azure.jpg ├── .env ├── tutorial_Bigtool_DeepSeek_R1.ipynb ├── tutorial_McpAdapters_DeepSeek_R1.ipynb ├── taot_tutorial_ChatOpenAI_QwQ32B.ipynb ├── taot_tutorial_ChatOpenAI.ipynb ├── taot_tutorial_ChatOpenAI_DeepSeek_R1_0528.ipynb ├── taot_tutorial_ChatOpenAI_DeepSeek_R1_Distill-Qwen-14b.ipynb ├── taot_tutorial_ChatOpenAI_Qwen3.ipynb └── taot_tutorial_ChatBedrockConverse.ipynb ├── MANIFEST.in.txt ├── src └── taot │ ├── models.py │ ├── __init__.py │ ├── message.py │ └── agent.py ├── .gitignore ├── pyproject.toml ├── LICENSE ├── setup.py └── README.md /requirements.txt: -------------------------------------------------------------------------------- 1 | python-dotenv 2 | pydantic 3 | langchain-core 4 | langchain-openai 5 | langgraph 6 | -------------------------------------------------------------------------------- /tutorial/screenshots/deepseek-r1_azure.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/leockl/tool-ahead-of-time/HEAD/tutorial/screenshots/deepseek-r1_azure.jpg -------------------------------------------------------------------------------- /MANIFEST.in.txt: -------------------------------------------------------------------------------- 1 | include LICENSE 2 | include README.md 3 | include pyproject.toml 4 | 5 | recursive-include src/taot *.py 6 | recursive-exclude * __pycache__ 7 | recursive-exclude * *.py[cod] -------------------------------------------------------------------------------- /src/taot/models.py: -------------------------------------------------------------------------------- 1 | from pydantic import BaseModel, Field 2 | 3 | class ToolCall(BaseModel): 4 | tool: str = Field(..., description="Name of the tool to call") 5 | args: dict = Field(..., description="Arguments to pass to the tool") -------------------------------------------------------------------------------- /src/taot/__init__.py: -------------------------------------------------------------------------------- 1 | from .models import ToolCall 2 | from .message import create_system_message_taot 3 | from .agent import ManualToolAgent, create_react_agent_taot 4 | 5 | __all__ = ['ToolCall', 'create_system_message_taot', 'create_react_agent_taot'] 6 | __version__ = '0.1.3' -------------------------------------------------------------------------------- /tutorial/.env: -------------------------------------------------------------------------------- 1 | OPENROUTER_API_KEY="MY_OPENROUTER_API_KEY" 2 | AZURE_ENDPOINT_URL="MY_AZURE_ENDPOINT_URL" 3 | AZURE_API_KEY="MY_AZURE_API_KEY" 4 | AWS_ACCESS_KEY_ID="MY_AWS_ACCESS_KEY_ID" 5 | AWS_SECRET_ACCESS_KEY="MY_AWS_SECRET_ACCESS_KEY" 6 | BRAVE_API_KEY="MY_BRAVE_API_KEY" 7 | ACCUWEATHER_API_KEY="MY_ACCUWEATHER_API_KEY" 8 | OPENAI_API_KEY="MY_OPENAI_API_KEY" 9 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Python build artifacts 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | *.so 6 | .Python 7 | build/ 8 | develop-eggs/ 9 | dist/ 10 | downloads/ 11 | eggs/ 12 | .eggs/ 13 | lib/ 14 | lib64/ 15 | parts/ 16 | sdist/ 17 | var/ 18 | wheels/ 19 | *.egg-info/ 20 | .installed.cfg 21 | *.egg 22 | 23 | # Environment directories 24 | .env 25 | venv/ 26 | ENV/ 27 | env/ 28 | .venv/ 29 | 30 | # IDE specific files 31 | .idea/ 32 | .vscode/ 33 | *.swp 34 | *.swo 35 | *~ -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools>=61.0"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | name = "taot" 7 | version = "0.1.3" 8 | authors = [ 9 | { name="Leo Chow", email="leo.chow11@gmail.com" }, 10 | ] 11 | description = "Tool Ahead of Time (TAoT)" 12 | readme = "README.md" 13 | requires-python = ">=3.8" 14 | classifiers = [ 15 | "Programming Language :: Python :: 3", 16 | "License :: OSI Approved :: MIT License", 17 | "Operating System :: OS Independent", 18 | ] 19 | 20 | [project.urls] 21 | "Homepage" = "https://github.com/leockl/tool-ahead-of-time" 22 | "Bug Tracker" = "https://github.com/leockl/tool-ahead-of-time/issues" -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 Leo Chow 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /src/taot/message.py: -------------------------------------------------------------------------------- 1 | from langchain_core.output_parsers import JsonOutputParser 2 | from .models import ToolCall 3 | 4 | def create_system_message_taot(system_message: str) -> str: 5 | """ 6 | Create a system message with tool instructions and JSON schema. 7 | 8 | Args: 9 | system_message (str): The specific system message for tools 10 | 11 | Returns: 12 | str: Formatted system message with JSON schema instructions 13 | """ 14 | json_parser = JsonOutputParser(pydantic_object=ToolCall) 15 | 16 | sys_msg_taot = (f"{system_message}\n" 17 | f"When a user's question matches a tool's capability, you MUST use that tool. " 18 | f"Do not try to solve problems manually if a tool exists for that purpose.\n" 19 | f"Output ONLY a JSON object (with no extra text) that adheres EXACTLY to the following schema:\n\n" 20 | f"{json_parser.get_format_instructions()}\n\n" 21 | f"If the user's question doesn't require any tool, answer directly in plain text with no JSON.") 22 | 23 | return sys_msg_taot -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | setup( 4 | name="taot", 5 | version="0.1.3", 6 | package_dir={"": "src"}, 7 | packages=find_packages(where="src"), 8 | python_requires=">=3.8", 9 | install_requires=[ 10 | "pydantic>=2.0.0", 11 | "langchain-core>=0.1.0", 12 | "langchain-openai>=0.0.5", 13 | "langgraph>=0.0.1", 14 | ], 15 | author="Leo Chow", 16 | author_email="leo.chow11@gmail.com", 17 | description="Tool Ahead of Time (TAoT)", 18 | long_description=open("README.md").read(), 19 | long_description_content_type="text/markdown", 20 | url="https://github.com/leockl/tool-ahead-of-time", 21 | classifiers=[ 22 | "Development Status :: 3 - Alpha", 23 | "Intended Audience :: Developers", 24 | "License :: OSI Approved :: MIT License", 25 | "Programming Language :: Python :: 3", 26 | "Programming Language :: Python :: 3.8", 27 | "Programming Language :: Python :: 3.9", 28 | "Programming Language :: Python :: 3.10", 29 | "Programming Language :: Python :: 3.11", 30 | ], 31 | ) -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Tool-Ahead-of-Time (TAoT): Because Why Wait? 🕒 2 | Ever found yourself staring at a shiny new LLM through LangChain's window, but can't use tool calling because it's "not supported yet"? 3 | 4 | *Sad agent noises* 😢 5 | 6 | Well, hold my JSON parser, because this repo says "NOT TODAY!" 🦾 7 | 8 | ## What is this sorcery? 🧙‍♂️ 9 | 10 | This is a Python package that enables tool calling for any model available through LangChain's ChatOpenAI class (and by extension, any model available through OpenAI's class), any model available through LangChain's AzureAIChatCompletionsModel class and any model available through LangChain's ChatBedrockConverse class, even before LangChain and LangGraph officially supports it! 11 | 12 | Yes, you read that right. We're living in the age of AI and things move fast 🏎️💨 13 | 14 | It essentially works by reformatting the output response of the model into a JSON parser and passing this on to the relevant tools. 15 | 16 | This repo showcases an example with DeepSeek-R1 671B, which isn't currently supported with tool calling by LangChain and LangGraph (as of 16th Feb 2025). 17 | 18 | ## Features 🌟 19 | 20 | - Tool calling support for OpenAI and non-OpenAI models available on: 21 | - LangChain's ChatOpenAI class (and by extension, OpenAI and non-OpenAI models available on the base OpenAI's class). 22 | - LangChain's AzureAIChatCompletionsModel class. 23 | - LangChain's ChatBedrockConverse class. 24 | - This package follows a similar method to LangChain's and LangGraph's `create_react_agent` method for tool calling, so makes it easy for you to read the syntax. 😊 25 | - Zero waiting for official support required. 26 | - More robust than a caffeinated developer at 3 AM. ☕ 27 | 28 | ## Quick Start 🚀 29 | 30 | Follow the notebook tutorials in the "tutorial" folder in this repo for a fast and practical guide: 31 | - "taot_tutorial_ChatOpenAI.ipynb" file for example notebook tutorial on LangChain's ChatOpenAI class (using DeepSeek-R1 671B on OpenRouter). 32 | - "taot_tutorial_AzureAIChatCompletionsModel.ipynb" file for example notebook tutorial on LangChain's AzureAIChatCompletionsModel class (using DeepSeek-R1 671B on Microsoft Azure). 33 | - "taot_tutorial_ChatOpenAI_QwQ32B.ipynb" file for example notebook tutorial on LangChain's ChatOpenAI class (using QwQ-32B on OpenRouter). 34 | - "taot_tutorial_ChatBedrockConverse.ipynb" file for example notebook tutorial on LangChain's ChatBedrockConverse class (using DeepSeek-R1 671B on Amazon Bedrock). 35 | - "tutorial_McpAdapters_DeepSeek_R1.ipynb" file for example notebook tutorial on using LangChain's MCP Adapters library with DeepSeek-R1 671B (via LangChain's ChatOpenAI class on OpenRouter). 36 | - "tutorial_Bigtool_DeepSeek_R1.ipynb" file for example notebook tutorial on using LangGraph's Bigtool library with DeepSeek-R1 671B (via LangChain's ChatOpenAI class on OpenRouter). 37 | - "taot_tutorial_ChatOpenAI_Qwen3.ipynb" file for example notebook tutorial on LangChain's ChatOpenAI class (using Qwen3 models on OpenRouter). 38 | - "taot_tutorial_ChatOpenAI_DeepSeek_R1_0528.ipynb" file for example notebook tutorial on LangChain's ChatOpenAI class (using DeepSeek-R1-0528 685B on OpenRouter). 39 | 40 | ## Changelog 📖 41 | 42 | 20th Feb 2025: 43 | - Package now available on PyPI! Just "pip install taot" and you're ready to go. 44 | - Completely redesigned to follow LangChain's and LangGraph's intuitive `create_react_agent` tool calling methods. 45 | - Produces natural language responses when tool calling is performed. 46 | 47 | 1st Mar 2025: 48 | - Package now available in TypeScript on npm! Just "npm install taot-ts" and you're ready to go. (https://github.com/leockl/tool-ahead-of-time-ts) 49 | 50 | 8th Mar 2025: 51 | - Updated repo to include implementation support for Microsoft Azure via LangChain's AzureAIChatCompletionsModel class. 52 | 53 | 16th Mar 2025: 54 | - Updated repo to include example tutorial for tool calling support for QwQ-32B using Langchain's ChatOpenAI class (hosted on OpenRouter). See "taot_tutorial_ChatOpenAI_QwQ32B.ipynb" file under the "tutorial" folder in this repo. While doing this, I noticed OpenRouter's API for QwQ-32B is unstable and returning empty responses (likely because QwQ-32B is a new model added on OpenRouter only about a week ago). Due to this, I have updated the taot package to keep looping until a non-empty response is returned. If you have previously downloaded the package, please update the package via `pip install --upgrade taot`. 55 | - Checked out OpenAI Agents SDK framework for tool calling support for non-OpenAI providers/models (https://openai.github.io/openai-agents-python/models/) and they don't support tool calling for DeepSeek-R1 (or models available through OpenRouter) yet (as of 16th Mar 2025), so there you go! 😉 56 | 57 | 28th Mar 2025: 58 | - Updated repo to include implementation support for Amazon Bedrock via LangChain's ChatBedrockConverse class. 59 | 60 | 6th April 2025: 61 | - Special Update: Updated repo to include implementation support for using LangChain's MCP Adapters library with DeepSeek-R1 671B (via LangChain's ChatOpenAI class on OpenRouter). 62 | - Special Update: Updated repo to include implementation support for using LangGraph's Bigtool library with DeepSeek-R1 671B (via LangChain's ChatOpenAI class on OpenRouter). 63 | 64 | 7th May 2025: 65 | - Updated repo to include example tutorial for tool calling support for all the Qwen3 models using Langchain's ChatOpenAI class (hosted on OpenRouter), with the exception of the Qwen3 0.6B model. My observation is that the Qwen 0.6B model is just not "smart" or performant enough to understand when tool use is required. 66 | 67 | 4th Jun 2025: 68 | - Updated repo to include example tutorial for tool calling support for DeepSeek-R1-0528 685B model using Langchain's ChatOpenAI class (hosted on OpenRouter). 69 | 70 | ## Contributions 🤝 71 | 72 | Feel free to contribute! Whether it's adding features, fixing bugs, adding comments in the code or any suggestions to improve this repo, all are welcomed 😄 73 | 74 | ## Disclaimer ⚠️ 75 | 76 | This package is like that friend who shows up to the party early - technically not invited yet, but hopes to bring such good vibes that everyone's glad they came. 77 | 78 | ## License 📜 79 | 80 | MIT License - Because sharing is caring, and we care about you having tool calling RIGHT NOW. 81 | 82 | --- 83 | 84 | Made with ❤️ and a healthy dose of impatience. 85 | 86 | Please give my GitHub repo a ⭐ if this was helpful. Thank you! 87 | -------------------------------------------------------------------------------- /src/taot/agent.py: -------------------------------------------------------------------------------- 1 | from typing import List, Callable 2 | from langchain_core.output_parsers import JsonOutputParser 3 | from langchain_core.messages import SystemMessage, HumanMessage, AIMessage 4 | from langchain_openai import ChatOpenAI 5 | from langgraph.prebuilt import create_react_agent 6 | from langchain_core.runnables import Runnable 7 | import re 8 | from pydantic import TypeAdapter 9 | from .models import ToolCall 10 | 11 | class ManualToolAgent(Runnable): 12 | """ 13 | A custom agent that handles tools manually. 14 | """ 15 | def __init__(self, model, tools): 16 | self.model = model 17 | self.tools = tools 18 | self.json_parser = JsonOutputParser(pydantic_object=ToolCall) 19 | self.base_executor = create_react_agent(model, tools=[]) 20 | self.max_retries = 100 21 | 22 | def convert_messages(self, messages: List[dict]) -> List[SystemMessage | HumanMessage | AIMessage]: 23 | """ 24 | Convert dictionary-based messages to LangChain message objects. 25 | """ 26 | converted_messages = [] 27 | 28 | message_types = { 29 | "system": SystemMessage, 30 | "user": HumanMessage, 31 | "assistant": AIMessage 32 | } 33 | 34 | for message in messages: 35 | role = message["role"] 36 | content = message["content"] 37 | 38 | if role in message_types: 39 | MessageClass = message_types[role] 40 | converted_message = MessageClass(content=content) 41 | converted_messages.append(converted_message) 42 | 43 | return converted_messages 44 | 45 | def is_empty_response(self, response_text: str) -> bool: 46 | """ 47 | Check if the response is empty or contains only whitespace. 48 | 49 | Args: 50 | response_text (str): The response text to check 51 | 52 | Returns: 53 | bool: True if response is empty, False otherwise 54 | """ 55 | if response_text is None: 56 | return True 57 | if not response_text.strip(): 58 | return True 59 | return False 60 | 61 | def format_tool_result(self, tool_name: str, tool_result: str, user_query: str) -> str: 62 | """ 63 | Format tool result using LLM to create natural language response. 64 | """ 65 | prompt = f"""Given the following: 66 | User query: {user_query} 67 | Tool used: {tool_name} 68 | Tool result: {tool_result} 69 | 70 | Create a natural language response to the user query that incorporates the result from the tool. Do not mention anything about using the tool used. 71 | Keep it concise and direct.""" 72 | 73 | retry_count = 0 74 | while retry_count < self.max_retries: 75 | response = self.model.invoke([HumanMessage(content=prompt)]) 76 | if not self.is_empty_response(response.content): 77 | return response.content 78 | retry_count += 1 79 | 80 | # If we've reached here, we've exceeded max retries with empty responses 81 | # Return a default response with the raw tool result 82 | return f"The result is: {tool_result}" 83 | 84 | def invoke(self, inputs: dict) -> dict: 85 | """ 86 | Execute the agent with manual tool handling. 87 | 88 | Args: 89 | inputs (dict): Dictionary containing messages 90 | 91 | Returns: 92 | dict: Response containing processed message 93 | """ 94 | # Get messages 95 | messages = inputs["messages"] 96 | user_query = messages[-1]["content"] # Get the last user message 97 | 98 | # Convert messages to LangChain format 99 | converted_formatted_messages = self.convert_messages(messages) 100 | 101 | # Get response from base executor with retry logic for empty responses 102 | last_response = None 103 | retry_count = 0 104 | while retry_count < self.max_retries: 105 | response = self.base_executor.invoke({"messages": converted_formatted_messages}) 106 | last_response = response["messages"][-1].content 107 | 108 | if not self.is_empty_response(last_response): 109 | break 110 | 111 | retry_count += 1 112 | 113 | # If we still have an empty response after all retries, return an error message 114 | if self.is_empty_response(last_response): 115 | return {"messages": [{"content": "I'm having trouble generating a response. Please try again."}]} 116 | 117 | # Process JSON response 118 | matches = re.findall(r'(\{.*?\})', last_response, re.DOTALL) 119 | json_text = None 120 | for m in matches: 121 | if '"tool"' in m and '"args"' in m: 122 | json_text = m 123 | break 124 | 125 | if json_text: 126 | try: 127 | adapter = TypeAdapter(ToolCall) 128 | parsed = self.json_parser.parse(json_text) 129 | 130 | if isinstance(parsed, dict): 131 | tool_call = adapter.validate_python(parsed) 132 | else: 133 | tool_call = parsed 134 | 135 | # Find the matching tool 136 | tool_dict = {tool.name: tool for tool in self.tools} 137 | 138 | if tool_call.tool in tool_dict: 139 | raw_result = tool_dict[tool_call.tool].invoke(tool_call.args) 140 | # Format the result using LLM 141 | result = self.format_tool_result(tool_call.tool, raw_result, user_query) 142 | else: 143 | result = "Error: Unknown tool" 144 | except Exception as e: 145 | result = f"Error processing tool call: {str(e)}" 146 | else: 147 | result = last_response 148 | 149 | return {"messages": [{"content": result}]} 150 | 151 | def create_react_agent_taot(model, tools) -> ManualToolAgent: 152 | """ 153 | Create a React agent with manual tool handling. 154 | 155 | Args: 156 | model (ChatOpenAI): The language model to use 157 | tools (List[Callable]): List of tool functions 158 | 159 | Returns: 160 | ManualToolAgent: Agent with manual tool handling 161 | """ 162 | return ManualToolAgent(model, tools) 163 | -------------------------------------------------------------------------------- /tutorial/tutorial_Bigtool_DeepSeek_R1.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# LangGraph's Bigtool with DeepSeek-R1 Tutorial\n", 8 | "\n", 9 | "This notebook tutorial showcases a guide on how to implement LangGraph's Bigtool method using DeepSeek-R1 671B.\n", 10 | "\n", 11 | "**Takeaway:** This notebook tutorial demonstrates that even without having DeepSeek-R1 fine-tuned for tool calling or even without using my Tool-Ahead-of-Time package, LangGraph's Bigtool method still works with DeepSeek-R1 671B. This is likely because DeepSeek-R1 671B is a reasoning model and also how the prompts are written within LangGraph's Bigtool class." 12 | ] 13 | }, 14 | { 15 | "cell_type": "code", 16 | "execution_count": null, 17 | "metadata": {}, 18 | "outputs": [ 19 | { 20 | "name": "stderr", 21 | "output_type": "stream", 22 | "text": [ 23 | "C:\\Users\\leo_c\\AppData\\Local\\Temp\\ipykernel_55696\\1597647674.py:29: LangChainBetaWarning: The function `convert_positional_only_function_to_tool` is in beta. It is actively being worked on, so the API may change.\n", 24 | " if tool := convert_positional_only_function_to_tool(\n", 25 | "C:\\Users\\leo_c\\AppData\\Local\\Temp\\ipykernel_55696\\1597647674.py:43: LangChainBetaWarning: The function `init_embeddings` is in beta. It is actively being worked on, so the API may change.\n", 26 | " embeddings = init_embeddings(\"openai:text-embedding-3-small\")\n" 27 | ] 28 | }, 29 | { 30 | "name": "stdout", 31 | "output_type": "stream", 32 | "text": [ 33 | "\n" 34 | ] 35 | } 36 | ], 37 | "source": [ 38 | "import math\n", 39 | "import types\n", 40 | "import uuid\n", 41 | "from dotenv import load_dotenv\n", 42 | "import os\n", 43 | "\n", 44 | "from langchain.embeddings import init_embeddings\n", 45 | "from langgraph.store.memory import InMemoryStore\n", 46 | "\n", 47 | "from langgraph_bigtool import create_agent\n", 48 | "from langgraph_bigtool.utils import (\n", 49 | " convert_positional_only_function_to_tool\n", 50 | ")\n", 51 | "from langchain_openai import ChatOpenAI\n", 52 | "\n", 53 | "# Load environment variables (ie. API keys) from .env file\n", 54 | "load_dotenv()\n", 55 | "\n", 56 | "# Collect functions from `math` built-in\n", 57 | "all_tools = []\n", 58 | "for function_name in dir(math):\n", 59 | " function = getattr(math, function_name)\n", 60 | " if not isinstance(\n", 61 | " function, types.BuiltinFunctionType\n", 62 | " ):\n", 63 | " continue\n", 64 | " # This is an idiosyncrasy of the `math` library\n", 65 | " if tool := convert_positional_only_function_to_tool(\n", 66 | " function\n", 67 | " ):\n", 68 | " all_tools.append(tool)\n", 69 | "\n", 70 | "# Create registry of tools. This is a dict mapping\n", 71 | "# identifiers to tool instances.\n", 72 | "tool_registry = {\n", 73 | " str(uuid.uuid4()): tool\n", 74 | " for tool in all_tools\n", 75 | "}\n", 76 | "\n", 77 | "# Index tool names and descriptions in the LangGraph Store\n", 78 | "# Here we use a simple in-memory store, using OpenAI's text-embedding-3-small embedding model (make sure you have your OPENAI_API_KEY saved in your .env file)\n", 79 | "embeddings = init_embeddings(\"openai:text-embedding-3-small\")\n", 80 | "\n", 81 | "store = InMemoryStore(\n", 82 | " index={\n", 83 | " \"embed\": embeddings,\n", 84 | " \"dims\": 1536,\n", 85 | " \"fields\": [\"description\"],\n", 86 | " }\n", 87 | ")\n", 88 | "for tool_id, tool in tool_registry.items():\n", 89 | " store.put(\n", 90 | " (\"tools\",),\n", 91 | " tool_id,\n", 92 | " {\n", 93 | " \"description\": f\"{tool.name}: {tool.description}\",\n", 94 | " },\n", 95 | " )\n", 96 | "\n", 97 | "# Initialize agent\n", 98 | "llm = ChatOpenAI(\n", 99 | " model=\"deepseek/deepseek-r1\",\n", 100 | " api_key=os.environ[\"OPENROUTER_API_KEY\"],\n", 101 | " base_url=\"https://openrouter.ai/api/v1\"\n", 102 | ")\n", 103 | "\n", 104 | "builder = create_agent(llm, tool_registry)\n", 105 | "agent = builder.compile(store=store)\n", 106 | "print(agent)" 107 | ] 108 | }, 109 | { 110 | "cell_type": "code", 111 | "execution_count": 2, 112 | "metadata": {}, 113 | "outputs": [ 114 | { 115 | "name": "stdout", 116 | "output_type": "stream", 117 | "text": [ 118 | "==================================\u001b[1m Ai Message \u001b[0m==================================\n", 119 | "Tool Calls:\n", 120 | " retrieve_tools (call_1krw21tnqubf094g5v2lnmqd)\n", 121 | " Call ID: call_1krw21tnqubf094g5v2lnmqd\n", 122 | " Args:\n", 123 | " query: arc cosine calculator\n", 124 | "=================================\u001b[1m Tool Message \u001b[0m=================================\n", 125 | "\n", 126 | "Available tools: ['cos', 'acos']\n", 127 | "==================================\u001b[1m Ai Message \u001b[0m==================================\n", 128 | "Tool Calls:\n", 129 | " acos (call_qti8comwhr2gjk21rj85xxrc)\n", 130 | " Call ID: call_qti8comwhr2gjk21rj85xxrc\n", 131 | " Args:\n", 132 | " x: 0.5\n", 133 | "=================================\u001b[1m Tool Message \u001b[0m=================================\n", 134 | "Name: acos\n", 135 | "\n", 136 | "1.0471975511965979\n", 137 | "==================================\u001b[1m Ai Message \u001b[0m==================================\n", 138 | "\n", 139 | "The arc cosine of 0.5 is **1.0472 radians** (which equals **60 degrees**). \n", 140 | "\n", 141 | "This is because \\(\\cos(60^\\circ) = 0.5\\), so \\(\\arccos(0.5) = 60^\\circ\\) or \\(\\frac{\\pi}{3}\\) radians. 😊\n" 142 | ] 143 | } 144 | ], 145 | "source": [ 146 | "query = \"Use available tools to calculate arc cosine of 0.5.\"\n", 147 | "\n", 148 | "# Test it out\n", 149 | "for step in agent.stream(\n", 150 | " {\"messages\": query},\n", 151 | " stream_mode=\"updates\",\n", 152 | "):\n", 153 | " for _, update in step.items():\n", 154 | " for message in update.get(\"messages\", []):\n", 155 | " message.pretty_print()" 156 | ] 157 | } 158 | ], 159 | "metadata": { 160 | "kernelspec": { 161 | "display_name": "base", 162 | "language": "python", 163 | "name": "python3" 164 | }, 165 | "language_info": { 166 | "codemirror_mode": { 167 | "name": "ipython", 168 | "version": 3 169 | }, 170 | "file_extension": ".py", 171 | "mimetype": "text/x-python", 172 | "name": "python", 173 | "nbconvert_exporter": "python", 174 | "pygments_lexer": "ipython3", 175 | "version": "3.11.11" 176 | } 177 | }, 178 | "nbformat": 4, 179 | "nbformat_minor": 2 180 | } 181 | -------------------------------------------------------------------------------- /tutorial/tutorial_McpAdapters_DeepSeek_R1.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# MCP with DeepSeek-R1 Tutorial\n", 8 | "\n", 9 | "This notebook tutorial showcases a step-by-step guide on how to implement DeepSeek-R1 connected to tools in MCP servers, using LangChain's MCP Adapters library (here: https://github.com/langchain-ai/langchain-mcp-adapters).\n", 10 | "\n", 11 | "I am using MCP servers from an MPC server registry/depository called MCP Server Cloud (here: https://mcpserver.cloud/, or their GitHub repo here: https://github.com/modelcontextprotocol).\n", 12 | "\n", 13 | "I will be connecting DeepSeek-R1 to 2 MCP servers, with 1 tool in each MCP server. Namely, I will be using the Brave Search MCP Server (here: https://mcpserver.cloud/server/server-brave-search) and the AccuWeather MCP Server (here: https://mcpserver.cloud/server/mcp-weather-server).\n", 14 | "\n", 15 | "To use the Brave Search MCP Server and the AccuWeather MCP Server, you will need to create a Brave Browser API key (here: https://brave.com/search/api/) and an AccuWeather API key (here: https://developer.accuweather.com/getting-started), respectively. They are both free and it's fairly straight forward to do this (but note creating a Brave Browser API key require a credit card even for the free subscription). Just ask any AI for the step-by-step guide to do this.\n", 16 | "\n", 17 | "Once you have your Brave Browser and AccuWeather API keys, save them in a .env file, along with an OpenRouter API key (for this notebook tutorial I will be using DeepSeek-R1 hosted on OpenRouter). This .env file is saved in the same folder as where this Jupyter Notebook will be saved.\n", 18 | "\n", 19 | "Now that we have all the above setup, let's get into the more technical part of this notebook tutorial. How LangChain's MCP Adapters library works is it convert tools in MCP servers into LangChain tools, so then these LangChain tools can be used within the LangChain/LangGraph framework. Yes, it's as simple as that!\n", 20 | "\n", 21 | "Currently MCP servers are still in it's early development stages and so MCP servers doesn't yet have a direct SSE (Server-Sent Events) connection. To fix this, I have used a package called Supergateway (here: https://github.com/supercorp-ai/supergateway) which establishes a SSE connection for MCP servers. [Note: Currently there are several other ways to connect to MCP servers including downloading MCP servers into your local device and then connecting with the MCP server locally in your device using a Python package called langchain-mcp-tools (here: https://github.com/hideya/langchain-mcp-tools-py, where support for remote MCP server connection is currently experimental) or using the docker approach (here: https://www.youtube.com/watch?v=rdvt1qBZJtI), but I have chosen to use the Supergateway package approach as it is more realistic to connect to remote servers via SSE connections. The Supergateway package is run using npx (which is available in Node.js) which means if you haven't already, you will need to download Node.js (from here: https://nodejs.org/en/download) in order to use the Supergateway package via npx.]\n", 22 | "\n", 23 | "Referring to the instructions in the README file in the Supergateway's GitHub repo, in particular the \"stdio → SSE\" section (\"Expose an MCP stdio server as an SSE server:\"):\n", 24 | "- To establish a SSE connection for the Brave Search MCP Server using Supergateway, run the following command below in your IDE's (for eg. Cursor or VS Code) Terminal window (where this will use port 8001):\n", 25 | "`npx -y supergateway --stdio \"npx -y @modelcontextprotocol/server-brave-search\" --port 8001 --baseUrl http://localhost:8001 --ssePath /sse --messagePath /message`\n", 26 | "- To establish a SSE connection for the AccuWeather MCP Server using Supergateway, open a 2nd Terminal window in your IDE and run the following command below in this 2nd Terminal window (where this will use port 8002):\n", 27 | "`npx -y supergateway --stdio \"uvx --from git+https://github.com/adhikasp/mcp-weather.git mcp-weather\" --port 8002 --baseUrl http://localhost:8002 --ssePath /sse --messagePath /message`\n", 28 | "\n", 29 | "**Tip:** If you are unsure how to write the commands above for other MCP servers, just copy and paste the entire README file instructions in Supergateway's GitHub repo and the entire content of the MCP server page in the MCP Server Cloud registry/depository wesbite (for eg. for the Brave Search MCP Server, copy and paste the entire content from this page from the MCP Server Cloud registry/depository website: https://mcpserver.cloud/server/server-brave-search) into an AI and ask the AI to give you the \"stdio → SSE\" command.\n", 30 | "\n", 31 | "Now that you have both the Brave Search MCP Server and AccuWeather MCP Server SSE connections running, you can now run the Python script below which uses `http://localhost:8001/sse` for the Brave Search MCP Server and `http://localhost:8002/sse` for the AccuWeather MCP Server.\n", 32 | "\n", 33 | "Remember once you are done with using the MCP Servers, you can close off or disconnect the MCP Server's SSE connections by typing \"CTRL\" + \"C\" keys in your IDE's Terminal window.\n", 34 | "\n", 35 | "**Takeaway:** This notebook tutorial demonstrates that even without having DeepSeek-R1 fine-tuned for tool calling or even without using my Tool-Ahead-of-Time package (since LangChain's MCP Adapters library works by converting tools in MCP servers into LangChain tools), MCP (via LangChain's MCP Adapters library) still works with DeepSeek-R1. This is likely because DeepSeek-R1 671B is a reasoning model and also how the prompts are written within LangChain's MCP Adapters library." 36 | ] 37 | }, 38 | { 39 | "cell_type": "code", 40 | "execution_count": null, 41 | "metadata": {}, 42 | "outputs": [ 43 | { 44 | "data": { 45 | "text/plain": [ 46 | ">" 47 | ] 48 | }, 49 | "execution_count": 2, 50 | "metadata": {}, 51 | "output_type": "execute_result" 52 | }, 53 | { 54 | "name": "stdout", 55 | "output_type": "stream", 56 | "text": [ 57 | "Here are the latest AI news highlights from my search:\n", 58 | "\n", 59 | "1. **Apple's Health AI Development** \n", 60 | " Apple is reportedly working on an AI-powered health coach called \"AI Doctors\" that could analyze user data from Apple Watch and iPhone to provide personalized health advice. This aligns with their broader health tech ambitions (NYT, PCMag).\n", 61 | "\n", 62 | "2. **Google's New AI Model** \n", 63 | " Google released Gemini 2.5 Pro (experimental), now available to free users. This advanced model improves reasoning and multimodal capabilities, expanding access to cutting-edge AI tools (Engadget).\n", 64 | "\n", 65 | "3. **Creative AI Tools** \n", 66 | " Free ChatGPT users can now generate Studio Ghibli-style images using DALL-E 3, though this has sparked debate. Prominent animators criticize AI art as \"an insult to life itself\" (Hindustan Times).\n", 67 | "\n", 68 | "4. **AI News Reliability** \n", 69 | " The New York Times has corrected dozens of AI-generated news summaries in 2024, highlighting ongoing challenges with accuracy in automated content creation.\n", 70 | "\n", 71 | "---\n", 72 | "\n", 73 | "**For deeper dives, check these trusted sources:** \n", 74 | "- Dedicated AI News: [Artificial Intelligence-News](https://www.artificialintelligence-news.com/) \n", 75 | "- Tech Updates: [NBC News AI Section](https://www.nbcnews.com/artificial-intelligence) \n", 76 | "- Research Frontiers: [Google AI Blog](https://ai.google/latest-news/) \n", 77 | "\n", 78 | "Would you like details on any specific story? 🤖\n", 79 | "Here's the weather forecast for Sydney tomorrow based on the latest information:\n", 80 | "\n", 81 | "**Sydney Weather Forecast - Tomorrow:**\n", 82 | "- **Temperature:** High of 72°F (22°C), Low of 62°F (17°C)\n", 83 | "- **Conditions:** Windy with a mix of clouds and sunshine\n", 84 | "- **Rain:** 100% chance of rain tonight easing to showers tomorrow\n", 85 | "- **Wind:** Southwesterly winds at 15-25 mph (24-40 km/h)\n", 86 | "\n", 87 | "**Key Details:**\n", 88 | "1. Morning showers likely, becoming partly cloudy by afternoon.\n", 89 | "2. Potential rainfall: Up to 0.25 inches (6mm) overnight.\n", 90 | "3. UV Index: Moderate (sun protection recommended during peak hours).\n", 91 | "\n", 92 | "For official updates, check Australia's Bureau of Meteorology: http://www.bom.gov.au/nsw/forecasts/sydney.shtml\n", 93 | "\n", 94 | "Would you like clarification on any specific aspect of the forecast?\n" 95 | ] 96 | } 97 | ], 98 | "source": [ 99 | "from dotenv import load_dotenv\n", 100 | "import os\n", 101 | "from langchain_openai import ChatOpenAI\n", 102 | "from langchain_mcp_adapters.client import MultiServerMCPClient\n", 103 | "from langgraph.prebuilt import create_react_agent\n", 104 | "import nest_asyncio\n", 105 | "import asyncio\n", 106 | "\n", 107 | "# Apply the nest_asyncio patch\n", 108 | "nest_asyncio.apply()\n", 109 | "\n", 110 | "# Load environment variable (ie. API key) from .env file\n", 111 | "load_dotenv()\n", 112 | "\n", 113 | "# Initialize model\n", 114 | "model = ChatOpenAI(\n", 115 | " model=\"deepseek/deepseek-r1\",\n", 116 | " api_key=os.environ[\"OPENROUTER_API_KEY\"],\n", 117 | " base_url=\"https://openrouter.ai/api/v1\"\n", 118 | ")\n", 119 | "\n", 120 | "# Define the main asynchronous function\n", 121 | "async def main():\n", 122 | " # Configure the MCP clients for Brave Search and Weather servers\n", 123 | " async with MultiServerMCPClient(\n", 124 | " {\n", 125 | " \"brave-search\": {\n", 126 | " \"url\": \"http://localhost:8001/sse\",\n", 127 | " \"transport\": \"sse\",\n", 128 | " \"headers\": {\n", 129 | " \"Authorization\": os.environ[\"BRAVE_API_KEY\"] # Replace with your Brave Search API key\n", 130 | " }\n", 131 | " },\n", 132 | " \"weather\": {\n", 133 | " \"url\": \"http://localhost:8002/sse\",\n", 134 | " \"transport\": \"sse\",\n", 135 | " \"headers\": {\n", 136 | " \"Authorization\": os.environ[\"ACCUWEATHER_API_KEY\"] # Replace with your AccuWeather API key\n", 137 | " }\n", 138 | " }\n", 139 | " }\n", 140 | " ) as client:\n", 141 | " # Create the agent with access to the tools provided by the MCP servers\n", 142 | " agent = create_react_agent(model, client.get_tools())\n", 143 | "\n", 144 | " # Example usage: Perform a web search using Brave Search\n", 145 | " search_response = await agent.ainvoke({\n", 146 | " \"messages\": \"Search for the latest news on AI.\"\n", 147 | " })\n", 148 | " print(search_response['messages'][-1].content)\n", 149 | "\n", 150 | " # Example usage: Get the weather forecast using the Weather MCP Server\n", 151 | " weather_response = await agent.ainvoke({\n", 152 | " \"messages\": \"What's the weather forecast for Sydney tomorrow?\"\n", 153 | " })\n", 154 | " print(weather_response['messages'][-1].content)\n", 155 | "\n", 156 | "# Run the main function using asyncio.ensure_future\n", 157 | "asyncio.ensure_future(main())\n" 158 | ] 159 | } 160 | ], 161 | "metadata": { 162 | "kernelspec": { 163 | "display_name": "base", 164 | "language": "python", 165 | "name": "python3" 166 | }, 167 | "language_info": { 168 | "codemirror_mode": { 169 | "name": "ipython", 170 | "version": 3 171 | }, 172 | "file_extension": ".py", 173 | "mimetype": "text/x-python", 174 | "name": "python", 175 | "nbconvert_exporter": "python", 176 | "pygments_lexer": "ipython3", 177 | "version": "3.11.11" 178 | } 179 | }, 180 | "nbformat": 4, 181 | "nbformat_minor": 2 182 | } 183 | -------------------------------------------------------------------------------- /tutorial/taot_tutorial_ChatOpenAI_QwQ32B.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Tool Ahead of Time (TAoT) Tutorial\n", 8 | "\n", 9 | "Lets jump straight into this tutorial, as time waits for no one 😊\n", 10 | "\n", 11 | "This tutorial uses the QwQ-32B model, but this tutorial can also be applied to other models available through Langchain's ChatOpenAI class.\n", 12 | "\n", 13 | "First, pip install the taot package, as below:" 14 | ] 15 | }, 16 | { 17 | "cell_type": "code", 18 | "execution_count": null, 19 | "metadata": {}, 20 | "outputs": [], 21 | "source": [ 22 | "%pip install taot" 23 | ] 24 | }, 25 | { 26 | "cell_type": "markdown", 27 | "metadata": {}, 28 | "source": [ 29 | "If you haven't already, also pip install the other dependencies required in this tutorial:" 30 | ] 31 | }, 32 | { 33 | "cell_type": "code", 34 | "execution_count": null, 35 | "metadata": {}, 36 | "outputs": [], 37 | "source": [ 38 | "%pip install langchain-core langchain-openai" 39 | ] 40 | }, 41 | { 42 | "cell_type": "markdown", 43 | "metadata": {}, 44 | "source": [ 45 | "## Creating Tools\n", 46 | "\n", 47 | "Next, we create tool functions using LangChain's `@tool` decorator.\n", 48 | "\n", 49 | "This is just any function (with inputs and outputs) and `@tool` added at the top of the function.\n", 50 | "\n", 51 | "I have created two tool functions 'calculator' and 'text_analyzer' below:" 52 | ] 53 | }, 54 | { 55 | "cell_type": "code", 56 | "execution_count": 1, 57 | "metadata": {}, 58 | "outputs": [], 59 | "source": [ 60 | "from langchain_core.tools import tool\n", 61 | "\n", 62 | "@tool\n", 63 | "def calculator(expression: str) -> str:\n", 64 | " \"\"\"Evaluate a math expression.\"\"\"\n", 65 | " try:\n", 66 | " expression = expression.strip()\n", 67 | " if not expression:\n", 68 | " return \"Error: Empty expression\"\n", 69 | " \n", 70 | " allowed_chars = set(\"0123456789+-*/(). \")\n", 71 | " if not all(c in allowed_chars for c in expression):\n", 72 | " return \"Error: Invalid characters in expression\"\n", 73 | " \n", 74 | " result = eval(expression)\n", 75 | " return str(result)\n", 76 | " except Exception as e:\n", 77 | " return f\"Error: {str(e)}\"\n", 78 | "\n", 79 | "@tool\n", 80 | "def text_analyzer(text: str, analysis_type: str) -> str:\n", 81 | " \"\"\"\n", 82 | " Analyze text to count either words or characters.\n", 83 | " \n", 84 | " Args:\n", 85 | " text (str): The text to analyze\n", 86 | " analysis_type (str): Either 'words' or 'chars'\n", 87 | " \"\"\"\n", 88 | " try:\n", 89 | " text = text.strip()\n", 90 | " if not text:\n", 91 | " return \"Error: Empty text\"\n", 92 | " \n", 93 | " if analysis_type.lower() == 'words':\n", 94 | " word_count = len(text.split())\n", 95 | " return f\"{word_count}\"\n", 96 | " elif analysis_type.lower() == 'chars':\n", 97 | " char_count = len(text)\n", 98 | " return f\"{char_count}\"\n", 99 | " else:\n", 100 | " return \"Error: analysis_type must be either 'words' or 'chars'\"\n", 101 | " except Exception as e:\n", 102 | " return f\"Error: {str(e)}\"" 103 | ] 104 | }, 105 | { 106 | "cell_type": "markdown", 107 | "metadata": {}, 108 | "source": [ 109 | "## Initialize Model\n", 110 | "\n", 111 | "Now, initialize a model instance using the format below. \n", 112 | "\n", 113 | "In this tutorial, I am using the QwQ 32B model hosted on the platform OpenRouter. This model hosted on OpenRouter is available on Langchain's ChatOpenAI class.\n", 114 | "\n", 115 | "If you want to use another model, you will need to check if your model (hosted on whichever platform you have chosen, for eg. Azure, Together AI or DeepSeek's own platform etc.) is first available on Langchain's ChatOpenAI class, and then change the values of the parameters `model`, `api_key` and `base_url` below according to which model and platform you have chosen." 116 | ] 117 | }, 118 | { 119 | "cell_type": "code", 120 | "execution_count": 2, 121 | "metadata": {}, 122 | "outputs": [], 123 | "source": [ 124 | "from dotenv import load_dotenv\n", 125 | "from langchain_openai import ChatOpenAI\n", 126 | "import os\n", 127 | "\n", 128 | "# Load environment variable (ie. API key) from .env file\n", 129 | "load_dotenv()\n", 130 | "\n", 131 | "# Initialize model\n", 132 | "model = ChatOpenAI(\n", 133 | " model=\"qwen/qwq-32b\",\n", 134 | " api_key=os.environ[\"OPENROUTER_API_KEY\"],\n", 135 | " base_url=\"https://openrouter.ai/api/v1\"\n", 136 | ")" 137 | ] 138 | }, 139 | { 140 | "cell_type": "markdown", 141 | "metadata": {}, 142 | "source": [ 143 | "## Previous Messages\n", 144 | "\n", 145 | "Next, if you already have a history of previous messages between the user and the chatbot, store them in the format below.\n", 146 | "\n", 147 | "Note: The format of the previous messages does not include the system message (which we will define later further down in this notebook). This design is chosen according to current best practices in chatbot design where we isolate the system message from previous messages." 148 | ] 149 | }, 150 | { 151 | "cell_type": "code", 152 | "execution_count": 3, 153 | "metadata": {}, 154 | "outputs": [], 155 | "source": [ 156 | "# Example previous messages\n", 157 | "previous_messages = [\n", 158 | " # {\"role\": \"system\", \"content\": \"You are a helpful AI assistant.\"}, # Commented out as we do not include system message\n", 159 | " {\"role\": \"user\", \"content\": \"What is the capital of Australia?\"},\n", 160 | " {\"role\": \"assistant\", \"content\": \"The capital of Australia is Canberra.\"}\n", 161 | "]" 162 | ] 163 | }, 164 | { 165 | "cell_type": "markdown", 166 | "metadata": {}, 167 | "source": [ 168 | "## Getting Model Response\n", 169 | "\n", 170 | "Finally, now the fun part where we get to see the response of the model using tool calling! 🛠️\n", 171 | "\n", 172 | "For ease of use, I have designed the taot package to mimic LangChain's and LangGraph's `create_react_agent` method with tool calling, ie. the taot package follows a similar method to LangChain's and LangGraph's:\n", 173 | "\n", 174 | "```\n", 175 | "from langgraph.prebuilt import create_react_agent\n", 176 | "\n", 177 | "agent_executor = create_react_agent(model, tools=[])\n", 178 | "response = agent_executor.invoke({\"messages\": all_messages})\n", 179 | "print(response[\"messages\"][-1].content)\n", 180 | "```\n", 181 | "\n", 182 | "First, the `system_message` variable below can start with any customized system message as per usual, for eg. \"You are a helpful assistant. \", \"You are an expert programmer in Python. \", \"You are a world class expert in SEO optimization. \" etc.\n", 183 | "\n", 184 | "Then, the `system_message` variable below needs to **STRICTLY** include the following: \"You are an assistant with access to specific tools. When the user's question requires a {tool use}, use the {'corresponding'} tool. For the {'corresponding'} tool, provide the {user message} as a string into the {'user message'} argument in the tool or any {'predefined values'} as a string for other arguments in the tool.\"\n", 185 | "\n", 186 | "For eg. for the 'calculator' tool, since the function for the 'calculator' tool above has one argument called 'expression', the `system_message` variable below would need to look like \"You are a math expert. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\"\n", 187 | "\n", 188 | "For the 'text_analyze' tool, since the function for the 'text_analyze' tool above has two arguments 'text' and 'analysis_type' (where the 'analysis_type' argument has two predefined values 'words' and 'chars'), the `system_message` variable below would need to look like \"You are an expert in linguitics. You are an assistant with access to specific tools. When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\n", 189 | "\n", 190 | "Below are five examples of different combinations of user questions and tools used:" 191 | ] 192 | }, 193 | { 194 | "cell_type": "code", 195 | "execution_count": 4, 196 | "metadata": {}, 197 | "outputs": [ 198 | { 199 | "name": "stdout", 200 | "output_type": "stream", 201 | "text": [ 202 | "The product of 123 and 456 is 56,088.\n", 203 | "There are 7 words in the sentence.\n", 204 | "The result of 123 multiplied by 456 is **56,088**.\n", 205 | "There are 7 words in the sentence.\n", 206 | "The number of languages in the world is difficult to pinpoint exactly, but the most commonly cited estimate from Ethnologue is **over 7,000 living languages**. This number can vary due to differing definitions of \"language\" and ongoing documentation efforts.\n" 207 | ] 208 | } 209 | ], 210 | "source": [ 211 | "from taot import create_system_message_taot, create_react_agent_taot\n", 212 | "\n", 213 | "# Example for calculator tool only\n", 214 | "system_message = \"You are a math expert. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\"\n", 215 | "system_message_taot = create_system_message_taot(system_message)\n", 216 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 217 | "# Add previous messages (if available)\n", 218 | "all_messages.extend(previous_messages)\n", 219 | "# Add current user prompt\n", 220 | "user_message = \"What is 123 * 456?\"\n", 221 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 222 | "# Get model response\n", 223 | "agent_executor_taot = create_react_agent_taot(model, tools=[calculator])\n", 224 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 225 | "print(response['messages'][0]['content'])\n", 226 | "\n", 227 | "# Example for text analyzer tool only\n", 228 | "system_message = \"You are an expert in linguitics. You are an assistant with access to specific tools. When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\n", 229 | "system_message_taot = create_system_message_taot(system_message)\n", 230 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 231 | "# Add previous messages (if available)\n", 232 | "all_messages.extend(previous_messages)\n", 233 | "# Add current user prompt\n", 234 | "user_message = \"How many words are in this sentence?: I built my 1st Hello World program\"\n", 235 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 236 | "# Get model response\n", 237 | "agent_executor_taot = create_react_agent_taot(model, tools=[text_analyzer])\n", 238 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 239 | "print(response['messages'][0]['content'])\n", 240 | "\n", 241 | "# Example for both tools with user question requiring math calculation\n", 242 | "system_message = \"\"\"You are an expert in math and linguitics. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\n", 243 | "When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\"\"\n", 244 | "system_message_taot = create_system_message_taot(system_message)\n", 245 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 246 | "# Add previous messages (if available)\n", 247 | "all_messages.extend(previous_messages)\n", 248 | "# Add current user prompt\n", 249 | "user_message = \"What is 123 * 456?\"\n", 250 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 251 | "# Get model response\n", 252 | "agent_executor_taot = create_react_agent_taot(model, tools=[calculator, text_analyzer])\n", 253 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 254 | "print(response['messages'][0]['content'])\n", 255 | "\n", 256 | "# Example for both tools with user question requiring analysis the text\n", 257 | "system_message = \"\"\"You are an expert in math and linguitics. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\n", 258 | "When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\"\"\n", 259 | "system_message_taot = create_system_message_taot(system_message)\n", 260 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 261 | "# Add previous messages (if available)\n", 262 | "all_messages.extend(previous_messages)\n", 263 | "# Add current user prompt\n", 264 | "user_message = \"How many words are in this sentence?: I built my 1st Hello World program\"\n", 265 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 266 | "# Get model response\n", 267 | "agent_executor_taot = create_react_agent_taot(model, tools=[calculator, text_analyzer])\n", 268 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 269 | "print(response['messages'][0]['content'])\n", 270 | "\n", 271 | "# Example for both tools with user question not requiring any tools\n", 272 | "system_message = \"\"\"You are an expert in math and linguitics. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\n", 273 | "When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\"\"\n", 274 | "system_message_taot = create_system_message_taot(system_message)\n", 275 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 276 | "# Add previous messages (if available)\n", 277 | "all_messages.extend(previous_messages)\n", 278 | "# Add current user prompt\n", 279 | "user_message = \"How many languages are there in the world?\"\n", 280 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 281 | "# Get model response\n", 282 | "agent_executor_taot = create_react_agent_taot(model, tools=[calculator, text_analyzer])\n", 283 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 284 | "print(response['messages'][0]['content'])\n" 285 | ] 286 | }, 287 | { 288 | "cell_type": "markdown", 289 | "metadata": {}, 290 | "source": [ 291 | "## Summary\n", 292 | "\n", 293 | "Putting all the scripts above together:" 294 | ] 295 | }, 296 | { 297 | "cell_type": "code", 298 | "execution_count": null, 299 | "metadata": {}, 300 | "outputs": [ 301 | { 302 | "name": "stdout", 303 | "output_type": "stream", 304 | "text": [ 305 | "The result of 123 multiplied by 456 is 56,088.\n", 306 | "There are 7 words in the sentence.\n", 307 | "123 multiplied by 456 equals 56,088.\n", 308 | "There are 7 words in the sentence.\n", 309 | "The number of languages in the world is Estimated to be over 7,000, based on sources like Ethnologue. However, this number can vary depending on classification criteria and ongoing documentation efforts.\n" 310 | ] 311 | } 312 | ], 313 | "source": [ 314 | "from langchain_core.tools import tool\n", 315 | "from dotenv import load_dotenv\n", 316 | "from langchain_openai import ChatOpenAI\n", 317 | "import os\n", 318 | "from taot import create_system_message_taot, create_react_agent_taot\n", 319 | "\n", 320 | "@tool\n", 321 | "def calculator(expression: str) -> str:\n", 322 | " \"\"\"Evaluate a math expression.\"\"\"\n", 323 | " try:\n", 324 | " expression = expression.strip()\n", 325 | " if not expression:\n", 326 | " return \"Error: Empty expression\"\n", 327 | " \n", 328 | " allowed_chars = set(\"0123456789+-*/(). \")\n", 329 | " if not all(c in allowed_chars for c in expression):\n", 330 | " return \"Error: Invalid characters in expression\"\n", 331 | " \n", 332 | " result = eval(expression)\n", 333 | " return str(result)\n", 334 | " except Exception as e:\n", 335 | " return f\"Error: {str(e)}\"\n", 336 | "\n", 337 | "@tool\n", 338 | "def text_analyzer(text: str, analysis_type: str) -> str:\n", 339 | " \"\"\"\n", 340 | " Analyze text to count either words or characters.\n", 341 | " \n", 342 | " Args:\n", 343 | " text (str): The text to analyze\n", 344 | " analysis_type (str): Either 'words' or 'chars'\n", 345 | " \"\"\"\n", 346 | " try:\n", 347 | " text = text.strip()\n", 348 | " if not text:\n", 349 | " return \"Error: Empty text\"\n", 350 | " \n", 351 | " if analysis_type.lower() == 'words':\n", 352 | " word_count = len(text.split())\n", 353 | " return f\"{word_count}\"\n", 354 | " elif analysis_type.lower() == 'chars':\n", 355 | " char_count = len(text)\n", 356 | " return f\"{char_count}\"\n", 357 | " else:\n", 358 | " return \"Error: analysis_type must be either 'words' or 'chars'\"\n", 359 | " except Exception as e:\n", 360 | " return f\"Error: {str(e)}\"\n", 361 | " \n", 362 | "# Load environment variable (ie. API key) from .env file\n", 363 | "load_dotenv()\n", 364 | "\n", 365 | "# Initialize model\n", 366 | "model = ChatOpenAI(\n", 367 | " model=\"qwen/qwq-32b\",\n", 368 | " api_key=os.environ[\"OPENROUTER_API_KEY\"],\n", 369 | " base_url=\"https://openrouter.ai/api/v1\"\n", 370 | ")\n", 371 | "\n", 372 | "# Example previous messages\n", 373 | "previous_messages = [\n", 374 | " # {\"role\": \"system\", \"content\": \"You are a helpful AI assistant.\"}, # Commented out as we do not include system message\n", 375 | " {\"role\": \"user\", \"content\": \"What is the capital of Australia?\"},\n", 376 | " {\"role\": \"assistant\", \"content\": \"The capital of Australia is Canberra.\"}\n", 377 | "]\n", 378 | "\n", 379 | "# Example for calculator tool only\n", 380 | "system_message = \"You are a math expert. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\"\n", 381 | "system_message_taot = create_system_message_taot(system_message)\n", 382 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 383 | "# Add previous messages (if available)\n", 384 | "all_messages.extend(previous_messages)\n", 385 | "# Add current user prompt\n", 386 | "user_message = \"What is 123 * 456?\"\n", 387 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 388 | "# Get model response\n", 389 | "agent_executor_taot = create_react_agent_taot(model, tools=[calculator])\n", 390 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 391 | "print(response['messages'][0]['content'])\n", 392 | "\n", 393 | "# Example for text analyzer tool only\n", 394 | "system_message = \"You are an expert in linguitics. You are an assistant with access to specific tools. When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\n", 395 | "system_message_taot = create_system_message_taot(system_message)\n", 396 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 397 | "# Add previous messages (if available)\n", 398 | "all_messages.extend(previous_messages)\n", 399 | "# Add current user prompt\n", 400 | "user_message = \"How many words are in this sentence?: I built my 1st Hello World program\"\n", 401 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 402 | "# Get model response\n", 403 | "agent_executor_taot = create_react_agent_taot(model, tools=[text_analyzer])\n", 404 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 405 | "print(response['messages'][0]['content'])\n", 406 | "\n", 407 | "# Example for both tools with user question requiring math calculation\n", 408 | "system_message = \"\"\"You are an expert in math and linguitics. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\n", 409 | "When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\"\"\n", 410 | "system_message_taot = create_system_message_taot(system_message)\n", 411 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 412 | "# Add previous messages (if available)\n", 413 | "all_messages.extend(previous_messages)\n", 414 | "# Add current user prompt\n", 415 | "user_message = \"What is 123 * 456?\"\n", 416 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 417 | "# Get model response\n", 418 | "agent_executor_taot = create_react_agent_taot(model, tools=[calculator, text_analyzer])\n", 419 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 420 | "print(response['messages'][0]['content'])\n", 421 | "\n", 422 | "# Example for both tools with user question requiring analysis the text\n", 423 | "system_message = \"\"\"You are an expert in math and linguitics. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\n", 424 | "When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\"\"\n", 425 | "system_message_taot = create_system_message_taot(system_message)\n", 426 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 427 | "# Add previous messages (if available)\n", 428 | "all_messages.extend(previous_messages)\n", 429 | "# Add current user prompt\n", 430 | "user_message = \"How many words are in this sentence?: I built my 1st Hello World program\"\n", 431 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 432 | "# Get model response\n", 433 | "agent_executor_taot = create_react_agent_taot(model, tools=[calculator, text_analyzer])\n", 434 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 435 | "print(response['messages'][0]['content'])\n", 436 | "\n", 437 | "# Example for both tools with user question not requiring any tools\n", 438 | "system_message = \"\"\"You are an expert in math and linguitics. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\n", 439 | "When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\"\"\n", 440 | "system_message_taot = create_system_message_taot(system_message)\n", 441 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 442 | "# Add previous messages (if available)\n", 443 | "all_messages.extend(previous_messages)\n", 444 | "# Add current user prompt\n", 445 | "user_message = \"How many languages are there in the world?\"\n", 446 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 447 | "# Get model response\n", 448 | "agent_executor_taot = create_react_agent_taot(model, tools=[calculator, text_analyzer])\n", 449 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 450 | "print(response['messages'][0]['content'])\n", 451 | "\n" 452 | ] 453 | } 454 | ], 455 | "metadata": { 456 | "kernelspec": { 457 | "display_name": "base", 458 | "language": "python", 459 | "name": "python3" 460 | }, 461 | "language_info": { 462 | "codemirror_mode": { 463 | "name": "ipython", 464 | "version": 3 465 | }, 466 | "file_extension": ".py", 467 | "mimetype": "text/x-python", 468 | "name": "python", 469 | "nbconvert_exporter": "python", 470 | "pygments_lexer": "ipython3", 471 | "version": "3.10.16" 472 | } 473 | }, 474 | "nbformat": 4, 475 | "nbformat_minor": 2 476 | } 477 | -------------------------------------------------------------------------------- /tutorial/taot_tutorial_ChatOpenAI.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Tool Ahead of Time (TAoT) Tutorial\n", 8 | "\n", 9 | "Lets jump straight into this tutorial, as time waits for no one 😊\n", 10 | "\n", 11 | "This tutorial uses the DeepSeek-R1 671B model, but this tutorial can also be applied to other models available through Langchain's ChatOpenAI class.\n", 12 | "\n", 13 | "First, pip install the taot package, as below:" 14 | ] 15 | }, 16 | { 17 | "cell_type": "code", 18 | "execution_count": null, 19 | "metadata": {}, 20 | "outputs": [], 21 | "source": [ 22 | "%pip install taot" 23 | ] 24 | }, 25 | { 26 | "cell_type": "markdown", 27 | "metadata": {}, 28 | "source": [ 29 | "If you haven't already, also pip install the other dependencies required in this tutorial:" 30 | ] 31 | }, 32 | { 33 | "cell_type": "code", 34 | "execution_count": null, 35 | "metadata": {}, 36 | "outputs": [], 37 | "source": [ 38 | "%pip install langchain-core langchain-openai" 39 | ] 40 | }, 41 | { 42 | "cell_type": "markdown", 43 | "metadata": {}, 44 | "source": [ 45 | "## Creating Tools\n", 46 | "\n", 47 | "Next, we create tool functions using LangChain's `@tool` decorator.\n", 48 | "\n", 49 | "This is just any function (with inputs and outputs) and `@tool` added at the top of the function.\n", 50 | "\n", 51 | "I have created two tool functions 'calculator' and 'text_analyzer' below:" 52 | ] 53 | }, 54 | { 55 | "cell_type": "code", 56 | "execution_count": 1, 57 | "metadata": {}, 58 | "outputs": [], 59 | "source": [ 60 | "from langchain_core.tools import tool\n", 61 | "\n", 62 | "@tool\n", 63 | "def calculator(expression: str) -> str:\n", 64 | " \"\"\"Evaluate a math expression.\"\"\"\n", 65 | " try:\n", 66 | " expression = expression.strip()\n", 67 | " if not expression:\n", 68 | " return \"Error: Empty expression\"\n", 69 | " \n", 70 | " allowed_chars = set(\"0123456789+-*/(). \")\n", 71 | " if not all(c in allowed_chars for c in expression):\n", 72 | " return \"Error: Invalid characters in expression\"\n", 73 | " \n", 74 | " result = eval(expression)\n", 75 | " return str(result)\n", 76 | " except Exception as e:\n", 77 | " return f\"Error: {str(e)}\"\n", 78 | "\n", 79 | "@tool\n", 80 | "def text_analyzer(text: str, analysis_type: str) -> str:\n", 81 | " \"\"\"\n", 82 | " Analyze text to count either words or characters.\n", 83 | " \n", 84 | " Args:\n", 85 | " text (str): The text to analyze\n", 86 | " analysis_type (str): Either 'words' or 'chars'\n", 87 | " \"\"\"\n", 88 | " try:\n", 89 | " text = text.strip()\n", 90 | " if not text:\n", 91 | " return \"Error: Empty text\"\n", 92 | " \n", 93 | " if analysis_type.lower() == 'words':\n", 94 | " word_count = len(text.split())\n", 95 | " return f\"{word_count}\"\n", 96 | " elif analysis_type.lower() == 'chars':\n", 97 | " char_count = len(text)\n", 98 | " return f\"{char_count}\"\n", 99 | " else:\n", 100 | " return \"Error: analysis_type must be either 'words' or 'chars'\"\n", 101 | " except Exception as e:\n", 102 | " return f\"Error: {str(e)}\"" 103 | ] 104 | }, 105 | { 106 | "cell_type": "markdown", 107 | "metadata": {}, 108 | "source": [ 109 | "## Initialize Model\n", 110 | "\n", 111 | "Now, initialize a model instance using the format below. \n", 112 | "\n", 113 | "In this tutorial, I am using the DeepSeek-R1 model hosted on the platform OpenRouter. This model hosted on OpenRouter is available on Langchain's ChatOpenAI class.\n", 114 | "\n", 115 | "If you want to use another model, you will need to check if your model (hosted on whichever platform you have chosen, for eg. Azure, Together AI or DeepSeek's own platform etc.) is first available on Langchain's ChatOpenAI class, and then change the values of the parameters `model`, `api_key` and `base_url` below according to which model and platform you have chosen." 116 | ] 117 | }, 118 | { 119 | "cell_type": "code", 120 | "execution_count": 2, 121 | "metadata": {}, 122 | "outputs": [], 123 | "source": [ 124 | "from dotenv import load_dotenv\n", 125 | "from langchain_openai import ChatOpenAI\n", 126 | "import os\n", 127 | "\n", 128 | "# Load environment variable (ie. API key) from .env file\n", 129 | "load_dotenv()\n", 130 | "\n", 131 | "# Initialize model\n", 132 | "model = ChatOpenAI(\n", 133 | " model=\"deepseek/deepseek-r1\",\n", 134 | " api_key=os.environ[\"OPENROUTER_API_KEY\"],\n", 135 | " base_url=\"https://openrouter.ai/api/v1\"\n", 136 | ")" 137 | ] 138 | }, 139 | { 140 | "cell_type": "markdown", 141 | "metadata": {}, 142 | "source": [ 143 | "## Previous Messages\n", 144 | "\n", 145 | "Next, if you already have a history of previous messages between the user and the chatbot, store them in the format below.\n", 146 | "\n", 147 | "Note: The format of the previous messages does not include the system message (which we will define later further down in this notebook). This design is chosen according to current best practices in chatbot design where we isolate the system message from previous messages." 148 | ] 149 | }, 150 | { 151 | "cell_type": "code", 152 | "execution_count": 3, 153 | "metadata": {}, 154 | "outputs": [], 155 | "source": [ 156 | "# Example previous messages\n", 157 | "previous_messages = [\n", 158 | " # {\"role\": \"system\", \"content\": \"You are a helpful AI assistant.\"}, # Commented out as we do not include system message\n", 159 | " {\"role\": \"user\", \"content\": \"What is the capital of Australia?\"},\n", 160 | " {\"role\": \"assistant\", \"content\": \"The capital of Australia is Canberra.\"}\n", 161 | "]" 162 | ] 163 | }, 164 | { 165 | "cell_type": "markdown", 166 | "metadata": {}, 167 | "source": [ 168 | "## Getting Model Response\n", 169 | "\n", 170 | "Finally, now the fun part where we get to see the response of the model using tool calling! 🛠️\n", 171 | "\n", 172 | "For ease of use, I have designed the taot package to mimic LangChain's and LangGraph's `create_react_agent` method with tool calling, ie. the taot package follows a similar method to LangChain's and LangGraph's:\n", 173 | "\n", 174 | "```\n", 175 | "from langgraph.prebuilt import create_react_agent\n", 176 | "\n", 177 | "agent_executor = create_react_agent(model, tools=[])\n", 178 | "response = agent_executor.invoke({\"messages\": all_messages})\n", 179 | "print(response[\"messages\"][-1].content)\n", 180 | "```\n", 181 | "\n", 182 | "First, the `system_message` variable below can start with any customized system message as per usual, for eg. \"You are a helpful assistant. \", \"You are an expert programmer in Python. \", \"You are a world class expert in SEO optimization. \" etc.\n", 183 | "\n", 184 | "Then, the `system_message` variable below needs to **STRICTLY** include the following: \"You are an assistant with access to specific tools. When the user's question requires a {tool use}, use the {'corresponding'} tool. For the {'corresponding'} tool, provide the {user message} as a string into the {'user message'} argument in the tool or any {'predefined values'} as a string for other arguments in the tool.\"\n", 185 | "\n", 186 | "For eg. for the 'calculator' tool, since the function for the 'calculator' tool above has one argument called 'expression', the `system_message` variable below would need to look like \"You are a math expert. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\"\n", 187 | "\n", 188 | "For the 'text_analyze' tool, since the function for the 'text_analyze' tool above has two arguments 'text' and 'analysis_type' (where the 'analysis_type' argument has two predefined values 'words' and 'chars'), the `system_message` variable below would need to look like \"You are an expert in linguitics. You are an assistant with access to specific tools. When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\n", 189 | "\n", 190 | "Below are five examples of different combinations of user questions and tools used:" 191 | ] 192 | }, 193 | { 194 | "cell_type": "code", 195 | "execution_count": null, 196 | "metadata": {}, 197 | "outputs": [ 198 | { 199 | "name": "stdout", 200 | "output_type": "stream", 201 | "text": [ 202 | "The product of 123 multiplied by 456 is **56,088**.\n", 203 | "Your sentence, \"I built my 1st Hello World program,\" contains **7 words**.\n", 204 | "The product of 123 multiplied by 456 is **56,088**.\n", 205 | "There are 7 words in that sentence.\n", 206 | "The exact number varies, but it is estimated there are around 7,000 languages spoken worldwide. However, this number can fluctuate due to factors like language endangerment and classification debates.\n" 207 | ] 208 | } 209 | ], 210 | "source": [ 211 | "from taot import create_system_message_taot, create_react_agent_taot\n", 212 | "\n", 213 | "# Example for calculator tool only\n", 214 | "system_message = \"You are a math expert. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\"\n", 215 | "system_message_taot = create_system_message_taot(system_message)\n", 216 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 217 | "# Add previous messages (if available)\n", 218 | "all_messages.extend(previous_messages)\n", 219 | "# Add current user prompt\n", 220 | "user_message = \"What is 123 * 456?\"\n", 221 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 222 | "# Get model response\n", 223 | "agent_executor_taot = create_react_agent_taot(model, tools=[calculator])\n", 224 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 225 | "print(response['messages'][0]['content'])\n", 226 | "\n", 227 | "# Example for text analyzer tool only\n", 228 | "system_message = \"You are an expert in linguitics. You are an assistant with access to specific tools. When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\n", 229 | "system_message_taot = create_system_message_taot(system_message)\n", 230 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 231 | "# Add previous messages (if available)\n", 232 | "all_messages.extend(previous_messages)\n", 233 | "# Add current user prompt\n", 234 | "user_message = \"How many words are in this sentence?: I built my 1st Hello World program\"\n", 235 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 236 | "# Get model response\n", 237 | "agent_executor_taot = create_react_agent_taot(model, tools=[text_analyzer])\n", 238 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 239 | "print(response['messages'][0]['content'])\n", 240 | "\n", 241 | "# Example for both tools with user question requiring math calculation\n", 242 | "system_message = \"\"\"You are an expert in math and linguitics. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\n", 243 | "When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\"\"\n", 244 | "system_message_taot = create_system_message_taot(system_message)\n", 245 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 246 | "# Add previous messages (if available)\n", 247 | "all_messages.extend(previous_messages)\n", 248 | "# Add current user prompt\n", 249 | "user_message = \"What is 123 * 456?\"\n", 250 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 251 | "# Get model response\n", 252 | "agent_executor_taot = create_react_agent_taot(model, tools=[calculator, text_analyzer])\n", 253 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 254 | "print(response['messages'][0]['content'])\n", 255 | "\n", 256 | "# Example for both tools with user question requiring analysis the text\n", 257 | "system_message = \"\"\"You are an expert in math and linguitics. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\n", 258 | "When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\"\"\n", 259 | "system_message_taot = create_system_message_taot(system_message)\n", 260 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 261 | "# Add previous messages (if available)\n", 262 | "all_messages.extend(previous_messages)\n", 263 | "# Add current user prompt\n", 264 | "user_message = \"How many words are in this sentence?: I built my 1st Hello World program\"\n", 265 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 266 | "# Get model response\n", 267 | "agent_executor_taot = create_react_agent_taot(model, tools=[calculator, text_analyzer])\n", 268 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 269 | "print(response['messages'][0]['content'])\n", 270 | "\n", 271 | "# Example for both tools with user question not requiring any tools\n", 272 | "system_message = \"\"\"You are an expert in math and linguitics. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\n", 273 | "When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\"\"\n", 274 | "system_message_taot = create_system_message_taot(system_message)\n", 275 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 276 | "# Add previous messages (if available)\n", 277 | "all_messages.extend(previous_messages)\n", 278 | "# Add current user prompt\n", 279 | "user_message = \"How many languages are there in the world?\"\n", 280 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 281 | "# Get model response\n", 282 | "agent_executor_taot = create_react_agent_taot(model, tools=[calculator, text_analyzer])\n", 283 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 284 | "print(response['messages'][0]['content'])\n" 285 | ] 286 | }, 287 | { 288 | "cell_type": "markdown", 289 | "metadata": {}, 290 | "source": [ 291 | "## Summary\n", 292 | "\n", 293 | "Putting all the scripts above together:" 294 | ] 295 | }, 296 | { 297 | "cell_type": "code", 298 | "execution_count": null, 299 | "metadata": {}, 300 | "outputs": [ 301 | { 302 | "name": "stdout", 303 | "output_type": "stream", 304 | "text": [ 305 | "The result of 123 multiplied by 456 is **56,088**.\n", 306 | "There are **7 words** in the sentence: \"I built my 1st Hello World program.\"\n", 307 | "The product of 123 multiplied by 456 is **56,088**.\n", 308 | "Your sentence \"I built my 1st Hello World program\" contains **7 words**.\n", 309 | "The exact number of languages in the world is challenging to determine, but current estimates suggest there are between **6,000 to 7,000 living languages** globally. This number fluctuates due to factors like language endangerment and evolution.\n" 310 | ] 311 | } 312 | ], 313 | "source": [ 314 | "from langchain_core.tools import tool\n", 315 | "from dotenv import load_dotenv\n", 316 | "from langchain_openai import ChatOpenAI\n", 317 | "import os\n", 318 | "from taot import create_system_message_taot, create_react_agent_taot\n", 319 | "\n", 320 | "@tool\n", 321 | "def calculator(expression: str) -> str:\n", 322 | " \"\"\"Evaluate a math expression.\"\"\"\n", 323 | " try:\n", 324 | " expression = expression.strip()\n", 325 | " if not expression:\n", 326 | " return \"Error: Empty expression\"\n", 327 | " \n", 328 | " allowed_chars = set(\"0123456789+-*/(). \")\n", 329 | " if not all(c in allowed_chars for c in expression):\n", 330 | " return \"Error: Invalid characters in expression\"\n", 331 | " \n", 332 | " result = eval(expression)\n", 333 | " return str(result)\n", 334 | " except Exception as e:\n", 335 | " return f\"Error: {str(e)}\"\n", 336 | "\n", 337 | "@tool\n", 338 | "def text_analyzer(text: str, analysis_type: str) -> str:\n", 339 | " \"\"\"\n", 340 | " Analyze text to count either words or characters.\n", 341 | " \n", 342 | " Args:\n", 343 | " text (str): The text to analyze\n", 344 | " analysis_type (str): Either 'words' or 'chars'\n", 345 | " \"\"\"\n", 346 | " try:\n", 347 | " text = text.strip()\n", 348 | " if not text:\n", 349 | " return \"Error: Empty text\"\n", 350 | " \n", 351 | " if analysis_type.lower() == 'words':\n", 352 | " word_count = len(text.split())\n", 353 | " return f\"{word_count}\"\n", 354 | " elif analysis_type.lower() == 'chars':\n", 355 | " char_count = len(text)\n", 356 | " return f\"{char_count}\"\n", 357 | " else:\n", 358 | " return \"Error: analysis_type must be either 'words' or 'chars'\"\n", 359 | " except Exception as e:\n", 360 | " return f\"Error: {str(e)}\"\n", 361 | " \n", 362 | "# Load environment variable (ie. API key) from .env file\n", 363 | "load_dotenv()\n", 364 | "\n", 365 | "# Initialize model\n", 366 | "model = ChatOpenAI(\n", 367 | " model=\"deepseek/deepseek-r1\",\n", 368 | " api_key=os.environ[\"OPENROUTER_API_KEY\"],\n", 369 | " base_url=\"https://openrouter.ai/api/v1\"\n", 370 | ")\n", 371 | "\n", 372 | "# Example previous messages\n", 373 | "previous_messages = [\n", 374 | " # {\"role\": \"system\", \"content\": \"You are a helpful AI assistant.\"}, # Commented out as we do not include system message\n", 375 | " {\"role\": \"user\", \"content\": \"What is the capital of Australia?\"},\n", 376 | " {\"role\": \"assistant\", \"content\": \"The capital of Australia is Canberra.\"}\n", 377 | "]\n", 378 | "\n", 379 | "# Example for calculator tool only\n", 380 | "system_message = \"You are a math expert. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\"\n", 381 | "system_message_taot = create_system_message_taot(system_message)\n", 382 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 383 | "# Add previous messages (if available)\n", 384 | "all_messages.extend(previous_messages)\n", 385 | "# Add current user prompt\n", 386 | "user_message = \"What is 123 * 456?\"\n", 387 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 388 | "# Get model response\n", 389 | "agent_executor_taot = create_react_agent_taot(model, tools=[calculator])\n", 390 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 391 | "print(response['messages'][0]['content'])\n", 392 | "\n", 393 | "# Example for text analyzer tool only\n", 394 | "system_message = \"You are an expert in linguitics. You are an assistant with access to specific tools. When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\n", 395 | "system_message_taot = create_system_message_taot(system_message)\n", 396 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 397 | "# Add previous messages (if available)\n", 398 | "all_messages.extend(previous_messages)\n", 399 | "# Add current user prompt\n", 400 | "user_message = \"How many words are in this sentence?: I built my 1st Hello World program\"\n", 401 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 402 | "# Get model response\n", 403 | "agent_executor_taot = create_react_agent_taot(model, tools=[text_analyzer])\n", 404 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 405 | "print(response['messages'][0]['content'])\n", 406 | "\n", 407 | "# Example for both tools with user question requiring math calculation\n", 408 | "system_message = \"\"\"You are an expert in math and linguitics. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\n", 409 | "When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\"\"\n", 410 | "system_message_taot = create_system_message_taot(system_message)\n", 411 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 412 | "# Add previous messages (if available)\n", 413 | "all_messages.extend(previous_messages)\n", 414 | "# Add current user prompt\n", 415 | "user_message = \"What is 123 * 456?\"\n", 416 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 417 | "# Get model response\n", 418 | "agent_executor_taot = create_react_agent_taot(model, tools=[calculator, text_analyzer])\n", 419 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 420 | "print(response['messages'][0]['content'])\n", 421 | "\n", 422 | "# Example for both tools with user question requiring analysis the text\n", 423 | "system_message = \"\"\"You are an expert in math and linguitics. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\n", 424 | "When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\"\"\n", 425 | "system_message_taot = create_system_message_taot(system_message)\n", 426 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 427 | "# Add previous messages (if available)\n", 428 | "all_messages.extend(previous_messages)\n", 429 | "# Add current user prompt\n", 430 | "user_message = \"How many words are in this sentence?: I built my 1st Hello World program\"\n", 431 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 432 | "# Get model response\n", 433 | "agent_executor_taot = create_react_agent_taot(model, tools=[calculator, text_analyzer])\n", 434 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 435 | "print(response['messages'][0]['content'])\n", 436 | "\n", 437 | "# Example for both tools with user question not requiring any tools\n", 438 | "system_message = \"\"\"You are an expert in math and linguitics. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\n", 439 | "When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\"\"\n", 440 | "system_message_taot = create_system_message_taot(system_message)\n", 441 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 442 | "# Add previous messages (if available)\n", 443 | "all_messages.extend(previous_messages)\n", 444 | "# Add current user prompt\n", 445 | "user_message = \"How many languages are there in the world?\"\n", 446 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 447 | "# Get model response\n", 448 | "agent_executor_taot = create_react_agent_taot(model, tools=[calculator, text_analyzer])\n", 449 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 450 | "print(response['messages'][0]['content'])\n", 451 | "\n" 452 | ] 453 | } 454 | ], 455 | "metadata": { 456 | "kernelspec": { 457 | "display_name": "base", 458 | "language": "python", 459 | "name": "python3" 460 | }, 461 | "language_info": { 462 | "codemirror_mode": { 463 | "name": "ipython", 464 | "version": 3 465 | }, 466 | "file_extension": ".py", 467 | "mimetype": "text/x-python", 468 | "name": "python", 469 | "nbconvert_exporter": "python", 470 | "pygments_lexer": "ipython3", 471 | "version": "3.10.16" 472 | } 473 | }, 474 | "nbformat": 4, 475 | "nbformat_minor": 2 476 | } 477 | -------------------------------------------------------------------------------- /tutorial/taot_tutorial_ChatOpenAI_DeepSeek_R1_0528.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Tool Ahead of Time (TAoT) Tutorial\n", 8 | "\n", 9 | "Lets jump straight into this tutorial, as time waits for no one 😊\n", 10 | "\n", 11 | "This tutorial uses the DeepSeek-R1-0528 685B model, but this tutorial can also be applied to other models available through Langchain's ChatOpenAI class.\n", 12 | "\n", 13 | "First, pip install the taot package, as below:" 14 | ] 15 | }, 16 | { 17 | "cell_type": "code", 18 | "execution_count": null, 19 | "metadata": {}, 20 | "outputs": [], 21 | "source": [ 22 | "%pip install taot" 23 | ] 24 | }, 25 | { 26 | "cell_type": "markdown", 27 | "metadata": {}, 28 | "source": [ 29 | "If you haven't already, also pip install the other dependencies required in this tutorial:" 30 | ] 31 | }, 32 | { 33 | "cell_type": "code", 34 | "execution_count": null, 35 | "metadata": {}, 36 | "outputs": [], 37 | "source": [ 38 | "%pip install langchain-core langchain-openai" 39 | ] 40 | }, 41 | { 42 | "cell_type": "markdown", 43 | "metadata": {}, 44 | "source": [ 45 | "## Creating Tools\n", 46 | "\n", 47 | "Next, we create tool functions using LangChain's `@tool` decorator.\n", 48 | "\n", 49 | "This is just any function (with inputs and outputs) and `@tool` added at the top of the function.\n", 50 | "\n", 51 | "I have created two tool functions 'calculator' and 'text_analyzer' below:" 52 | ] 53 | }, 54 | { 55 | "cell_type": "code", 56 | "execution_count": 1, 57 | "metadata": {}, 58 | "outputs": [], 59 | "source": [ 60 | "from langchain_core.tools import tool\n", 61 | "\n", 62 | "@tool\n", 63 | "def calculator(expression: str) -> str:\n", 64 | " \"\"\"Evaluate a math expression.\"\"\"\n", 65 | " try:\n", 66 | " expression = expression.strip()\n", 67 | " if not expression:\n", 68 | " return \"Error: Empty expression\"\n", 69 | " \n", 70 | " allowed_chars = set(\"0123456789+-*/(). \")\n", 71 | " if not all(c in allowed_chars for c in expression):\n", 72 | " return \"Error: Invalid characters in expression\"\n", 73 | " \n", 74 | " result = eval(expression)\n", 75 | " return str(result)\n", 76 | " except Exception as e:\n", 77 | " return f\"Error: {str(e)}\"\n", 78 | "\n", 79 | "@tool\n", 80 | "def text_analyzer(text: str, analysis_type: str) -> str:\n", 81 | " \"\"\"\n", 82 | " Analyze text to count either words or characters.\n", 83 | " \n", 84 | " Args:\n", 85 | " text (str): The text to analyze\n", 86 | " analysis_type (str): Either 'words' or 'chars'\n", 87 | " \"\"\"\n", 88 | " try:\n", 89 | " text = text.strip()\n", 90 | " if not text:\n", 91 | " return \"Error: Empty text\"\n", 92 | " \n", 93 | " if analysis_type.lower() == 'words':\n", 94 | " word_count = len(text.split())\n", 95 | " return f\"{word_count}\"\n", 96 | " elif analysis_type.lower() == 'chars':\n", 97 | " char_count = len(text)\n", 98 | " return f\"{char_count}\"\n", 99 | " else:\n", 100 | " return \"Error: analysis_type must be either 'words' or 'chars'\"\n", 101 | " except Exception as e:\n", 102 | " return f\"Error: {str(e)}\"" 103 | ] 104 | }, 105 | { 106 | "cell_type": "markdown", 107 | "metadata": {}, 108 | "source": [ 109 | "## Initialize Model\n", 110 | "\n", 111 | "Now, initialize a model instance using the format below. \n", 112 | "\n", 113 | "In this tutorial, I am using the DeepSeek-R1-0528 685B model hosted on the platform OpenRouter. This model hosted on OpenRouter is available on Langchain's ChatOpenAI class.\n", 114 | "\n", 115 | "If you want to use another model, you will need to check if your model (hosted on whichever platform you have chosen, for eg. Azure, Together AI or DeepSeek's own platform etc.) is first available on Langchain's ChatOpenAI class, and then change the values of the parameters `model`, `api_key` and `base_url` below according to which model and platform you have chosen." 116 | ] 117 | }, 118 | { 119 | "cell_type": "code", 120 | "execution_count": 2, 121 | "metadata": {}, 122 | "outputs": [], 123 | "source": [ 124 | "from dotenv import load_dotenv\n", 125 | "from langchain_openai import ChatOpenAI\n", 126 | "import os\n", 127 | "\n", 128 | "# Load environment variable (ie. API key) from .env file\n", 129 | "load_dotenv()\n", 130 | "\n", 131 | "# Initialize model\n", 132 | "model = ChatOpenAI(\n", 133 | " model=\"deepseek/deepseek-r1-0528\",\n", 134 | " api_key=os.environ[\"OPENROUTER_API_KEY\"],\n", 135 | " base_url=\"https://openrouter.ai/api/v1\"\n", 136 | ")" 137 | ] 138 | }, 139 | { 140 | "cell_type": "markdown", 141 | "metadata": {}, 142 | "source": [ 143 | "## Previous Messages\n", 144 | "\n", 145 | "Next, if you already have a history of previous messages between the user and the chatbot, store them in the format below.\n", 146 | "\n", 147 | "Note: The format of the previous messages does not include the system message (which we will define later further down in this notebook). This design is chosen according to current best practices in chatbot design where we isolate the system message from previous messages." 148 | ] 149 | }, 150 | { 151 | "cell_type": "code", 152 | "execution_count": 3, 153 | "metadata": {}, 154 | "outputs": [], 155 | "source": [ 156 | "# Example previous messages\n", 157 | "previous_messages = [\n", 158 | " # {\"role\": \"system\", \"content\": \"You are a helpful AI assistant.\"}, # Commented out as we do not include system message\n", 159 | " {\"role\": \"user\", \"content\": \"What is the capital of Australia?\"},\n", 160 | " {\"role\": \"assistant\", \"content\": \"The capital of Australia is Canberra.\"}\n", 161 | "]" 162 | ] 163 | }, 164 | { 165 | "cell_type": "markdown", 166 | "metadata": {}, 167 | "source": [ 168 | "## Getting Model Response\n", 169 | "\n", 170 | "Finally, now the fun part where we get to see the response of the model using tool calling! 🛠️\n", 171 | "\n", 172 | "For ease of use, I have designed the taot package to mimic LangChain's and LangGraph's `create_react_agent` method with tool calling, ie. the taot package follows a similar method to LangChain's and LangGraph's:\n", 173 | "\n", 174 | "```\n", 175 | "from langgraph.prebuilt import create_react_agent\n", 176 | "\n", 177 | "agent_executor = create_react_agent(model, tools=[])\n", 178 | "response = agent_executor.invoke({\"messages\": all_messages})\n", 179 | "print(response[\"messages\"][-1].content)\n", 180 | "```\n", 181 | "\n", 182 | "First, the `system_message` variable below can start with any customized system message as per usual, for eg. \"You are a helpful assistant. \", \"You are an expert programmer in Python. \", \"You are a world class expert in SEO optimization. \" etc.\n", 183 | "\n", 184 | "Then, the `system_message` variable below needs to **STRICTLY** include the following: \"You are an assistant with access to specific tools. When the user's question requires a {tool use}, use the {'corresponding'} tool. For the {'corresponding'} tool, provide the {user message} as a string into the {'user message'} argument in the tool or any {'predefined values'} as a string for other arguments in the tool.\"\n", 185 | "\n", 186 | "For eg. for the 'calculator' tool, since the function for the 'calculator' tool above has one argument called 'expression', the `system_message` variable below would need to look like \"You are a math expert. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\"\n", 187 | "\n", 188 | "For the 'text_analyze' tool, since the function for the 'text_analyze' tool above has two arguments 'text' and 'analysis_type' (where the 'analysis_type' argument has two predefined values 'words' and 'chars'), the `system_message` variable below would need to look like \"You are an expert in linguitics. You are an assistant with access to specific tools. When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\n", 189 | "\n", 190 | "Below are five examples of different combinations of user questions and tools used:" 191 | ] 192 | }, 193 | { 194 | "cell_type": "code", 195 | "execution_count": 4, 196 | "metadata": {}, 197 | "outputs": [ 198 | { 199 | "name": "stdout", 200 | "output_type": "stream", 201 | "text": [ 202 | "The product of 123 and 456 is 56,088.\n", 203 | "The sentence \"I built my 1st Hello World program\" contains **7 words**.\n", 204 | "The product of 123 and 456 is 56,088.\n", 205 | "The sentence \"I built my 1st Hello World program\" contains 7 words.\n", 206 | "It's difficult to provide an exact number because the definition of what constitutes a distinct language versus a dialect is subjective. However, according to Ethnologue (the most comprehensive catalog of world languages), there are approximately 7,168 living languages as of 2024. This number constantly changes as languages evolve or become extinct.\n" 207 | ] 208 | } 209 | ], 210 | "source": [ 211 | "from taot import create_system_message_taot, create_react_agent_taot\n", 212 | "\n", 213 | "# Example for calculator tool only\n", 214 | "system_message = \"You are a math expert. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\"\n", 215 | "system_message_taot = create_system_message_taot(system_message)\n", 216 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 217 | "# Add previous messages (if available)\n", 218 | "all_messages.extend(previous_messages)\n", 219 | "# Add current user prompt\n", 220 | "user_message = \"What is 123 * 456?\"\n", 221 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 222 | "# Get model response\n", 223 | "agent_executor_taot = create_react_agent_taot(model, tools=[calculator])\n", 224 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 225 | "print(response['messages'][0]['content'])\n", 226 | "\n", 227 | "# Example for text analyzer tool only\n", 228 | "system_message = \"You are an expert in linguitics. You are an assistant with access to specific tools. When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\n", 229 | "system_message_taot = create_system_message_taot(system_message)\n", 230 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 231 | "# Add previous messages (if available)\n", 232 | "all_messages.extend(previous_messages)\n", 233 | "# Add current user prompt\n", 234 | "user_message = \"How many words are in this sentence?: I built my 1st Hello World program\"\n", 235 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 236 | "# Get model response\n", 237 | "agent_executor_taot = create_react_agent_taot(model, tools=[text_analyzer])\n", 238 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 239 | "print(response['messages'][0]['content'])\n", 240 | "\n", 241 | "# Example for both tools with user question requiring math calculation\n", 242 | "system_message = \"\"\"You are an expert in math and linguitics. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\n", 243 | "When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\"\"\n", 244 | "system_message_taot = create_system_message_taot(system_message)\n", 245 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 246 | "# Add previous messages (if available)\n", 247 | "all_messages.extend(previous_messages)\n", 248 | "# Add current user prompt\n", 249 | "user_message = \"What is 123 * 456?\"\n", 250 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 251 | "# Get model response\n", 252 | "agent_executor_taot = create_react_agent_taot(model, tools=[calculator, text_analyzer])\n", 253 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 254 | "print(response['messages'][0]['content'])\n", 255 | "\n", 256 | "# Example for both tools with user question requiring analysis the text\n", 257 | "system_message = \"\"\"You are an expert in math and linguitics. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\n", 258 | "When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\"\"\n", 259 | "system_message_taot = create_system_message_taot(system_message)\n", 260 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 261 | "# Add previous messages (if available)\n", 262 | "all_messages.extend(previous_messages)\n", 263 | "# Add current user prompt\n", 264 | "user_message = \"How many words are in this sentence?: I built my 1st Hello World program\"\n", 265 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 266 | "# Get model response\n", 267 | "agent_executor_taot = create_react_agent_taot(model, tools=[calculator, text_analyzer])\n", 268 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 269 | "print(response['messages'][0]['content'])\n", 270 | "\n", 271 | "# Example for both tools with user question not requiring any tools\n", 272 | "system_message = \"\"\"You are an expert in math and linguitics. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\n", 273 | "When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\"\"\n", 274 | "system_message_taot = create_system_message_taot(system_message)\n", 275 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 276 | "# Add previous messages (if available)\n", 277 | "all_messages.extend(previous_messages)\n", 278 | "# Add current user prompt\n", 279 | "user_message = \"How many languages are there in the world?\"\n", 280 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 281 | "# Get model response\n", 282 | "agent_executor_taot = create_react_agent_taot(model, tools=[calculator, text_analyzer])\n", 283 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 284 | "print(response['messages'][0]['content'])\n" 285 | ] 286 | }, 287 | { 288 | "cell_type": "markdown", 289 | "metadata": {}, 290 | "source": [ 291 | "## Summary\n", 292 | "\n", 293 | "Putting all the scripts above together:" 294 | ] 295 | }, 296 | { 297 | "cell_type": "code", 298 | "execution_count": 5, 299 | "metadata": {}, 300 | "outputs": [ 301 | { 302 | "name": "stdout", 303 | "output_type": "stream", 304 | "text": [ 305 | "The product of 123 and 456 is 56,088.\n", 306 | "The sentence \"I built my 1st Hello World program\" contains **7 words**.\n", 307 | "The product of 123 and 456 is 56,088.\n", 308 | "The sentence \"I built my 1st Hello World program\" contains **7 words**.\n", 309 | "Estimating the exact number of languages in the world is challenging due to factors like dialects, endangered languages, and regional variations. However, linguistic databases such as _Ethnologue_ currently document approximately **7,100–7,400 living languages** worldwide. This number fluctuates as languages evolve or become extinct.\n" 310 | ] 311 | } 312 | ], 313 | "source": [ 314 | "from langchain_core.tools import tool\n", 315 | "from dotenv import load_dotenv\n", 316 | "from langchain_openai import ChatOpenAI\n", 317 | "import os\n", 318 | "from taot import create_system_message_taot, create_react_agent_taot\n", 319 | "\n", 320 | "@tool\n", 321 | "def calculator(expression: str) -> str:\n", 322 | " \"\"\"Evaluate a math expression.\"\"\"\n", 323 | " try:\n", 324 | " expression = expression.strip()\n", 325 | " if not expression:\n", 326 | " return \"Error: Empty expression\"\n", 327 | " \n", 328 | " allowed_chars = set(\"0123456789+-*/(). \")\n", 329 | " if not all(c in allowed_chars for c in expression):\n", 330 | " return \"Error: Invalid characters in expression\"\n", 331 | " \n", 332 | " result = eval(expression)\n", 333 | " return str(result)\n", 334 | " except Exception as e:\n", 335 | " return f\"Error: {str(e)}\"\n", 336 | "\n", 337 | "@tool\n", 338 | "def text_analyzer(text: str, analysis_type: str) -> str:\n", 339 | " \"\"\"\n", 340 | " Analyze text to count either words or characters.\n", 341 | " \n", 342 | " Args:\n", 343 | " text (str): The text to analyze\n", 344 | " analysis_type (str): Either 'words' or 'chars'\n", 345 | " \"\"\"\n", 346 | " try:\n", 347 | " text = text.strip()\n", 348 | " if not text:\n", 349 | " return \"Error: Empty text\"\n", 350 | " \n", 351 | " if analysis_type.lower() == 'words':\n", 352 | " word_count = len(text.split())\n", 353 | " return f\"{word_count}\"\n", 354 | " elif analysis_type.lower() == 'chars':\n", 355 | " char_count = len(text)\n", 356 | " return f\"{char_count}\"\n", 357 | " else:\n", 358 | " return \"Error: analysis_type must be either 'words' or 'chars'\"\n", 359 | " except Exception as e:\n", 360 | " return f\"Error: {str(e)}\"\n", 361 | " \n", 362 | "# Load environment variable (ie. API key) from .env file\n", 363 | "load_dotenv()\n", 364 | "\n", 365 | "# Initialize model\n", 366 | "model = ChatOpenAI(\n", 367 | " model=\"deepseek/deepseek-r1-0528\",\n", 368 | " api_key=os.environ[\"OPENROUTER_API_KEY\"],\n", 369 | " base_url=\"https://openrouter.ai/api/v1\"\n", 370 | ")\n", 371 | "\n", 372 | "# Example previous messages\n", 373 | "previous_messages = [\n", 374 | " # {\"role\": \"system\", \"content\": \"You are a helpful AI assistant.\"}, # Commented out as we do not include system message\n", 375 | " {\"role\": \"user\", \"content\": \"What is the capital of Australia?\"},\n", 376 | " {\"role\": \"assistant\", \"content\": \"The capital of Australia is Canberra.\"}\n", 377 | "]\n", 378 | "\n", 379 | "# Example for calculator tool only\n", 380 | "system_message = \"You are a math expert. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\"\n", 381 | "system_message_taot = create_system_message_taot(system_message)\n", 382 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 383 | "# Add previous messages (if available)\n", 384 | "all_messages.extend(previous_messages)\n", 385 | "# Add current user prompt\n", 386 | "user_message = \"What is 123 * 456?\"\n", 387 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 388 | "# Get model response\n", 389 | "agent_executor_taot = create_react_agent_taot(model, tools=[calculator])\n", 390 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 391 | "print(response['messages'][0]['content'])\n", 392 | "\n", 393 | "# Example for text analyzer tool only\n", 394 | "system_message = \"You are an expert in linguitics. You are an assistant with access to specific tools. When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\n", 395 | "system_message_taot = create_system_message_taot(system_message)\n", 396 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 397 | "# Add previous messages (if available)\n", 398 | "all_messages.extend(previous_messages)\n", 399 | "# Add current user prompt\n", 400 | "user_message = \"How many words are in this sentence?: I built my 1st Hello World program\"\n", 401 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 402 | "# Get model response\n", 403 | "agent_executor_taot = create_react_agent_taot(model, tools=[text_analyzer])\n", 404 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 405 | "print(response['messages'][0]['content'])\n", 406 | "\n", 407 | "# Example for both tools with user question requiring math calculation\n", 408 | "system_message = \"\"\"You are an expert in math and linguitics. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\n", 409 | "When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\"\"\n", 410 | "system_message_taot = create_system_message_taot(system_message)\n", 411 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 412 | "# Add previous messages (if available)\n", 413 | "all_messages.extend(previous_messages)\n", 414 | "# Add current user prompt\n", 415 | "user_message = \"What is 123 * 456?\"\n", 416 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 417 | "# Get model response\n", 418 | "agent_executor_taot = create_react_agent_taot(model, tools=[calculator, text_analyzer])\n", 419 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 420 | "print(response['messages'][0]['content'])\n", 421 | "\n", 422 | "# Example for both tools with user question requiring analysis the text\n", 423 | "system_message = \"\"\"You are an expert in math and linguitics. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\n", 424 | "When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\"\"\n", 425 | "system_message_taot = create_system_message_taot(system_message)\n", 426 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 427 | "# Add previous messages (if available)\n", 428 | "all_messages.extend(previous_messages)\n", 429 | "# Add current user prompt\n", 430 | "user_message = \"How many words are in this sentence?: I built my 1st Hello World program\"\n", 431 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 432 | "# Get model response\n", 433 | "agent_executor_taot = create_react_agent_taot(model, tools=[calculator, text_analyzer])\n", 434 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 435 | "print(response['messages'][0]['content'])\n", 436 | "\n", 437 | "# Example for both tools with user question not requiring any tools\n", 438 | "system_message = \"\"\"You are an expert in math and linguitics. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\n", 439 | "When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\"\"\n", 440 | "system_message_taot = create_system_message_taot(system_message)\n", 441 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 442 | "# Add previous messages (if available)\n", 443 | "all_messages.extend(previous_messages)\n", 444 | "# Add current user prompt\n", 445 | "user_message = \"How many languages are there in the world?\"\n", 446 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 447 | "# Get model response\n", 448 | "agent_executor_taot = create_react_agent_taot(model, tools=[calculator, text_analyzer])\n", 449 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 450 | "print(response['messages'][0]['content'])\n", 451 | "\n" 452 | ] 453 | } 454 | ], 455 | "metadata": { 456 | "kernelspec": { 457 | "display_name": "base", 458 | "language": "python", 459 | "name": "python3" 460 | }, 461 | "language_info": { 462 | "codemirror_mode": { 463 | "name": "ipython", 464 | "version": 3 465 | }, 466 | "file_extension": ".py", 467 | "mimetype": "text/x-python", 468 | "name": "python", 469 | "nbconvert_exporter": "python", 470 | "pygments_lexer": "ipython3", 471 | "version": "3.11.11" 472 | } 473 | }, 474 | "nbformat": 4, 475 | "nbformat_minor": 2 476 | } 477 | -------------------------------------------------------------------------------- /tutorial/taot_tutorial_ChatOpenAI_DeepSeek_R1_Distill-Qwen-14b.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Tool Ahead of Time (TAoT) Tutorial\n", 8 | "\n", 9 | "Lets jump straight into this tutorial, as time waits for no one 😊\n", 10 | "\n", 11 | "This tutorial uses the DeepSeek-R1 671B model, but this tutorial can also be applied to other models available through Langchain's ChatOpenAI class.\n", 12 | "\n", 13 | "First, pip install the taot package, as below:" 14 | ] 15 | }, 16 | { 17 | "cell_type": "code", 18 | "execution_count": null, 19 | "metadata": {}, 20 | "outputs": [], 21 | "source": [ 22 | "%pip install taot" 23 | ] 24 | }, 25 | { 26 | "cell_type": "markdown", 27 | "metadata": {}, 28 | "source": [ 29 | "If you haven't already, also pip install the other dependencies required in this tutorial:" 30 | ] 31 | }, 32 | { 33 | "cell_type": "code", 34 | "execution_count": null, 35 | "metadata": {}, 36 | "outputs": [], 37 | "source": [ 38 | "%pip install langchain-core langchain-openai" 39 | ] 40 | }, 41 | { 42 | "cell_type": "markdown", 43 | "metadata": {}, 44 | "source": [ 45 | "## Creating Tools\n", 46 | "\n", 47 | "Next, we create tool functions using LangChain's `@tool` decorator.\n", 48 | "\n", 49 | "This is just any function (with inputs and outputs) and `@tool` added at the top of the function.\n", 50 | "\n", 51 | "I have created two tool functions 'calculator' and 'text_analyzer' below:" 52 | ] 53 | }, 54 | { 55 | "cell_type": "code", 56 | "execution_count": 1, 57 | "metadata": {}, 58 | "outputs": [], 59 | "source": [ 60 | "from langchain_core.tools import tool\n", 61 | "\n", 62 | "@tool\n", 63 | "def calculator(expression: str) -> str:\n", 64 | " \"\"\"Evaluate a math expression.\"\"\"\n", 65 | " try:\n", 66 | " expression = expression.strip()\n", 67 | " if not expression:\n", 68 | " return \"Error: Empty expression\"\n", 69 | " \n", 70 | " allowed_chars = set(\"0123456789+-*/(). \")\n", 71 | " if not all(c in allowed_chars for c in expression):\n", 72 | " return \"Error: Invalid characters in expression\"\n", 73 | " \n", 74 | " result = eval(expression)\n", 75 | " return str(result)\n", 76 | " except Exception as e:\n", 77 | " return f\"Error: {str(e)}\"\n", 78 | "\n", 79 | "@tool\n", 80 | "def text_analyzer(text: str, analysis_type: str) -> str:\n", 81 | " \"\"\"\n", 82 | " Analyze text to count either words or characters.\n", 83 | " \n", 84 | " Args:\n", 85 | " text (str): The text to analyze\n", 86 | " analysis_type (str): Either 'words' or 'chars'\n", 87 | " \"\"\"\n", 88 | " try:\n", 89 | " text = text.strip()\n", 90 | " if not text:\n", 91 | " return \"Error: Empty text\"\n", 92 | " \n", 93 | " if analysis_type.lower() == 'words':\n", 94 | " word_count = len(text.split())\n", 95 | " return f\"{word_count}\"\n", 96 | " elif analysis_type.lower() == 'chars':\n", 97 | " char_count = len(text)\n", 98 | " return f\"{char_count}\"\n", 99 | " else:\n", 100 | " return \"Error: analysis_type must be either 'words' or 'chars'\"\n", 101 | " except Exception as e:\n", 102 | " return f\"Error: {str(e)}\"" 103 | ] 104 | }, 105 | { 106 | "cell_type": "markdown", 107 | "metadata": {}, 108 | "source": [ 109 | "## Initialize Model\n", 110 | "\n", 111 | "Now, initialize a model instance using the format below. \n", 112 | "\n", 113 | "In this tutorial, I am using the DeepSeek-R1 model hosted on the platform OpenRouter. This model hosted on OpenRouter is available on Langchain's ChatOpenAI class.\n", 114 | "\n", 115 | "If you want to use another model, you will need to check if your model (hosted on whichever platform you have chosen, for eg. Azure, Together AI or DeepSeek's own platform etc.) is first available on Langchain's ChatOpenAI class, and then change the values of the parameters `model`, `api_key` and `base_url` below according to which model and platform you have chosen." 116 | ] 117 | }, 118 | { 119 | "cell_type": "code", 120 | "execution_count": 2, 121 | "metadata": {}, 122 | "outputs": [], 123 | "source": [ 124 | "from dotenv import load_dotenv\n", 125 | "from langchain_openai import ChatOpenAI\n", 126 | "import os\n", 127 | "\n", 128 | "# Load environment variable (ie. API key) from .env file\n", 129 | "load_dotenv()\n", 130 | "\n", 131 | "# Initialize model\n", 132 | "model = ChatOpenAI(\n", 133 | " model=\"deepseek/deepseek-r1-distill-qwen-14b\",\n", 134 | " api_key=os.environ[\"OPENROUTER_API_KEY\"],\n", 135 | " base_url=\"https://openrouter.ai/api/v1\"\n", 136 | ")" 137 | ] 138 | }, 139 | { 140 | "cell_type": "markdown", 141 | "metadata": {}, 142 | "source": [ 143 | "## Previous Messages\n", 144 | "\n", 145 | "Next, if you already have a history of previous messages between the user and the chatbot, store them in the format below.\n", 146 | "\n", 147 | "Note: The format of the previous messages does not include the system message (which we will define later further down in this notebook). This design is chosen according to current best practices in chatbot design where we isolate the system message from previous messages." 148 | ] 149 | }, 150 | { 151 | "cell_type": "code", 152 | "execution_count": 3, 153 | "metadata": {}, 154 | "outputs": [], 155 | "source": [ 156 | "# Example previous messages\n", 157 | "previous_messages = [\n", 158 | " # {\"role\": \"system\", \"content\": \"You are a helpful AI assistant.\"}, # Commented out as we do not include system message\n", 159 | " {\"role\": \"user\", \"content\": \"What is the capital of Australia?\"},\n", 160 | " {\"role\": \"assistant\", \"content\": \"The capital of Australia is Canberra.\"}\n", 161 | "]" 162 | ] 163 | }, 164 | { 165 | "cell_type": "markdown", 166 | "metadata": {}, 167 | "source": [ 168 | "## Getting Model Response\n", 169 | "\n", 170 | "Finally, now the fun part where we get to see the response of the model using tool calling! 🛠️\n", 171 | "\n", 172 | "For ease of use, I have designed the taot package to mimic LangChain's and LangGraph's `create_react_agent` method with tool calling, ie. the taot package follows a similar method to LangChain's and LangGraph's:\n", 173 | "\n", 174 | "```\n", 175 | "from langgraph.prebuilt import create_react_agent\n", 176 | "\n", 177 | "agent_executor = create_react_agent(model, tools=[])\n", 178 | "response = agent_executor.invoke({\"messages\": all_messages})\n", 179 | "print(response[\"messages\"][-1].content)\n", 180 | "```\n", 181 | "\n", 182 | "First, the `system_message` variable below can start with any customized system message as per usual, for eg. \"You are a helpful assistant. \", \"You are an expert programmer in Python. \", \"You are a world class expert in SEO optimization. \" etc.\n", 183 | "\n", 184 | "Then, the `system_message` variable below needs to **STRICTLY** include the following: \"You are an assistant with access to specific tools. When the user's question requires a {tool use}, use the {'corresponding'} tool. For the {'corresponding'} tool, provide the {user message} as a string into the {'user message'} argument in the tool or any {'predefined values'} as a string for other arguments in the tool.\"\n", 185 | "\n", 186 | "For eg. for the 'calculator' tool, since the function for the 'calculator' tool above has one argument called 'expression', the `system_message` variable below would need to look like \"You are a math expert. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\"\n", 187 | "\n", 188 | "For the 'text_analyze' tool, since the function for the 'text_analyze' tool above has two arguments 'text' and 'analysis_type' (where the 'analysis_type' argument has two predefined values 'words' and 'chars'), the `system_message` variable below would need to look like \"You are an expert in linguitics. You are an assistant with access to specific tools. When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\n", 189 | "\n", 190 | "Below are five examples of different combinations of user questions and tools used:" 191 | ] 192 | }, 193 | { 194 | "cell_type": "code", 195 | "execution_count": null, 196 | "metadata": {}, 197 | "outputs": [ 198 | { 199 | "name": "stdout", 200 | "output_type": "stream", 201 | "text": [ 202 | "123 multiplied by 456 is 56088.\n", 203 | "The sentence \"I built my 1st Hello World program\" contains 7 words.\n", 204 | "123 multiplied by 456 equals 56,088.\n", 205 | "The sentence contains 7 words.\n", 206 | "There are approximately 7,000 languages spoken in the world today. This number can vary slightly depending on the specific criteria used for language classification, as some languages may be considered dialects of others.\n" 207 | ] 208 | } 209 | ], 210 | "source": [ 211 | "from taot import create_system_message_taot, create_react_agent_taot\n", 212 | "import warnings\n", 213 | "\n", 214 | "# Suppress Pydantic serializer warnings in model response output\n", 215 | "warnings.filterwarnings(\"ignore\", category=UserWarning, module=\"pydantic.main\")\n", 216 | "\n", 217 | "# Example for calculator tool only\n", 218 | "system_message = \"You are a math expert. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\"\n", 219 | "system_message_taot = create_system_message_taot(system_message)\n", 220 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 221 | "# Add previous messages (if available)\n", 222 | "all_messages.extend(previous_messages)\n", 223 | "# Add current user prompt\n", 224 | "user_message = \"What is 123 * 456?\"\n", 225 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 226 | "# Get model response\n", 227 | "agent_executor_taot = create_react_agent_taot(model, tools=[calculator])\n", 228 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 229 | "print(response['messages'][0]['content'])\n", 230 | "\n", 231 | "# Example for text analyzer tool only\n", 232 | "system_message = \"You are an expert in linguitics. You are an assistant with access to specific tools. When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\n", 233 | "system_message_taot = create_system_message_taot(system_message)\n", 234 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 235 | "# Add previous messages (if available)\n", 236 | "all_messages.extend(previous_messages)\n", 237 | "# Add current user prompt\n", 238 | "user_message = \"How many words are in this sentence?: I built my 1st Hello World program\"\n", 239 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 240 | "# Get model response\n", 241 | "agent_executor_taot = create_react_agent_taot(model, tools=[text_analyzer])\n", 242 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 243 | "print(response['messages'][0]['content'])\n", 244 | "\n", 245 | "# Example for both tools with user question requiring math calculation\n", 246 | "system_message = \"\"\"You are an expert in math and linguitics. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\n", 247 | "When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\"\"\n", 248 | "system_message_taot = create_system_message_taot(system_message)\n", 249 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 250 | "# Add previous messages (if available)\n", 251 | "all_messages.extend(previous_messages)\n", 252 | "# Add current user prompt\n", 253 | "user_message = \"What is 123 * 456?\"\n", 254 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 255 | "# Get model response\n", 256 | "agent_executor_taot = create_react_agent_taot(model, tools=[calculator, text_analyzer])\n", 257 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 258 | "print(response['messages'][0]['content'])\n", 259 | "\n", 260 | "# Example for both tools with user question requiring analysis the text\n", 261 | "system_message = \"\"\"You are an expert in math and linguitics. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\n", 262 | "When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\"\"\n", 263 | "system_message_taot = create_system_message_taot(system_message)\n", 264 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 265 | "# Add previous messages (if available)\n", 266 | "all_messages.extend(previous_messages)\n", 267 | "# Add current user prompt\n", 268 | "user_message = \"How many words are in this sentence?: I built my 1st Hello World program\"\n", 269 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 270 | "# Get model response\n", 271 | "agent_executor_taot = create_react_agent_taot(model, tools=[calculator, text_analyzer])\n", 272 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 273 | "print(response['messages'][0]['content'])\n", 274 | "\n", 275 | "# Example for both tools with user question not requiring any tools\n", 276 | "system_message = \"\"\"You are an expert in math and linguitics. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\n", 277 | "When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\"\"\n", 278 | "system_message_taot = create_system_message_taot(system_message)\n", 279 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 280 | "# Add previous messages (if available)\n", 281 | "all_messages.extend(previous_messages)\n", 282 | "# Add current user prompt\n", 283 | "user_message = \"How many languages are there in the world?\"\n", 284 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 285 | "# Get model response\n", 286 | "agent_executor_taot = create_react_agent_taot(model, tools=[calculator, text_analyzer])\n", 287 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 288 | "print(response['messages'][0]['content'])\n" 289 | ] 290 | }, 291 | { 292 | "cell_type": "markdown", 293 | "metadata": {}, 294 | "source": [ 295 | "## Summary\n", 296 | "\n", 297 | "Putting all the scripts above together:" 298 | ] 299 | }, 300 | { 301 | "cell_type": "code", 302 | "execution_count": 36, 303 | "metadata": {}, 304 | "outputs": [ 305 | { 306 | "name": "stdout", 307 | "output_type": "stream", 308 | "text": [ 309 | "The result of 123 multiplied by 456 is 56088.\n", 310 | "The sentence contains 7 words.\n", 311 | "The product of 123 multiplied by 456 is 56,088.\n", 312 | "The sentence \"I built my 1st Hello World program\" contains 7 words.\n", 313 | "The world is estimated to have approximately 7,000 languages. However, this number is approximate and can vary depending on the source and criteria used to count languages.\n" 314 | ] 315 | } 316 | ], 317 | "source": [ 318 | "from langchain_core.tools import tool\n", 319 | "from dotenv import load_dotenv\n", 320 | "from langchain_openai import ChatOpenAI\n", 321 | "import os\n", 322 | "from taot import create_system_message_taot, create_react_agent_taot\n", 323 | "import warnings\n", 324 | "\n", 325 | "# Suppress Pydantic serializer warnings in model response output\n", 326 | "warnings.filterwarnings(\"ignore\", category=UserWarning, module=\"pydantic.main\")\n", 327 | "\n", 328 | "@tool\n", 329 | "def calculator(expression: str) -> str:\n", 330 | " \"\"\"Evaluate a math expression.\"\"\"\n", 331 | " try:\n", 332 | " expression = expression.strip()\n", 333 | " if not expression:\n", 334 | " return \"Error: Empty expression\"\n", 335 | " \n", 336 | " allowed_chars = set(\"0123456789+-*/(). \")\n", 337 | " if not all(c in allowed_chars for c in expression):\n", 338 | " return \"Error: Invalid characters in expression\"\n", 339 | " \n", 340 | " result = eval(expression)\n", 341 | " return str(result)\n", 342 | " except Exception as e:\n", 343 | " return f\"Error: {str(e)}\"\n", 344 | "\n", 345 | "@tool\n", 346 | "def text_analyzer(text: str, analysis_type: str) -> str:\n", 347 | " \"\"\"\n", 348 | " Analyze text to count either words or characters.\n", 349 | " \n", 350 | " Args:\n", 351 | " text (str): The text to analyze\n", 352 | " analysis_type (str): Either 'words' or 'chars'\n", 353 | " \"\"\"\n", 354 | " try:\n", 355 | " text = text.strip()\n", 356 | " if not text:\n", 357 | " return \"Error: Empty text\"\n", 358 | " \n", 359 | " if analysis_type.lower() == 'words':\n", 360 | " word_count = len(text.split())\n", 361 | " return f\"{word_count}\"\n", 362 | " elif analysis_type.lower() == 'chars':\n", 363 | " char_count = len(text)\n", 364 | " return f\"{char_count}\"\n", 365 | " else:\n", 366 | " return \"Error: analysis_type must be either 'words' or 'chars'\"\n", 367 | " except Exception as e:\n", 368 | " return f\"Error: {str(e)}\"\n", 369 | " \n", 370 | "# Load environment variable (ie. API key) from .env file\n", 371 | "load_dotenv()\n", 372 | "\n", 373 | "# Initialize model\n", 374 | "model = ChatOpenAI(\n", 375 | " model=\"deepseek/deepseek-r1-distill-qwen-14b\",\n", 376 | " api_key=os.environ[\"OPENROUTER_API_KEY\"],\n", 377 | " base_url=\"https://openrouter.ai/api/v1\"\n", 378 | ")\n", 379 | "\n", 380 | "# Example previous messages\n", 381 | "previous_messages = [\n", 382 | " # {\"role\": \"system\", \"content\": \"You are a helpful AI assistant.\"}, # Commented out as we do not include system message\n", 383 | " {\"role\": \"user\", \"content\": \"What is the capital of Australia?\"},\n", 384 | " {\"role\": \"assistant\", \"content\": \"The capital of Australia is Canberra.\"}\n", 385 | "]\n", 386 | "\n", 387 | "# Example for calculator tool only\n", 388 | "system_message = \"You are a math expert. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\"\n", 389 | "system_message_taot = create_system_message_taot(system_message)\n", 390 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 391 | "# Add previous messages (if available)\n", 392 | "all_messages.extend(previous_messages)\n", 393 | "# Add current user prompt\n", 394 | "user_message = \"What is 123 * 456?\"\n", 395 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 396 | "# Get model response\n", 397 | "agent_executor_taot = create_react_agent_taot(model, tools=[calculator])\n", 398 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 399 | "print(response['messages'][0]['content'])\n", 400 | "\n", 401 | "# Example for text analyzer tool only\n", 402 | "system_message = \"You are an expert in linguitics. You are an assistant with access to specific tools. When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\n", 403 | "system_message_taot = create_system_message_taot(system_message)\n", 404 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 405 | "# Add previous messages (if available)\n", 406 | "all_messages.extend(previous_messages)\n", 407 | "# Add current user prompt\n", 408 | "user_message = \"How many words are in this sentence?: I built my 1st Hello World program\"\n", 409 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 410 | "# Get model response\n", 411 | "agent_executor_taot = create_react_agent_taot(model, tools=[text_analyzer])\n", 412 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 413 | "print(response['messages'][0]['content'])\n", 414 | "\n", 415 | "# Example for both tools with user question requiring math calculation\n", 416 | "system_message = \"\"\"You are an expert in math and linguitics. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\n", 417 | "When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\"\"\n", 418 | "system_message_taot = create_system_message_taot(system_message)\n", 419 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 420 | "# Add previous messages (if available)\n", 421 | "all_messages.extend(previous_messages)\n", 422 | "# Add current user prompt\n", 423 | "user_message = \"What is 123 * 456?\"\n", 424 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 425 | "# Get model response\n", 426 | "agent_executor_taot = create_react_agent_taot(model, tools=[calculator, text_analyzer])\n", 427 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 428 | "print(response['messages'][0]['content'])\n", 429 | "\n", 430 | "# Example for both tools with user question requiring analysis the text\n", 431 | "system_message = \"\"\"You are an expert in math and linguitics. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\n", 432 | "When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\"\"\n", 433 | "system_message_taot = create_system_message_taot(system_message)\n", 434 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 435 | "# Add previous messages (if available)\n", 436 | "all_messages.extend(previous_messages)\n", 437 | "# Add current user prompt\n", 438 | "user_message = \"How many words are in this sentence?: I built my 1st Hello World program\"\n", 439 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 440 | "# Get model response\n", 441 | "agent_executor_taot = create_react_agent_taot(model, tools=[calculator, text_analyzer])\n", 442 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 443 | "print(response['messages'][0]['content'])\n", 444 | "\n", 445 | "# Example for both tools with user question not requiring any tools\n", 446 | "system_message = \"\"\"You are an expert in math and linguitics. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\n", 447 | "When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\"\"\n", 448 | "system_message_taot = create_system_message_taot(system_message)\n", 449 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 450 | "# Add previous messages (if available)\n", 451 | "all_messages.extend(previous_messages)\n", 452 | "# Add current user prompt\n", 453 | "user_message = \"How many languages are there in the world?\"\n", 454 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 455 | "# Get model response\n", 456 | "agent_executor_taot = create_react_agent_taot(model, tools=[calculator, text_analyzer])\n", 457 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 458 | "print(response['messages'][0]['content'])\n" 459 | ] 460 | } 461 | ], 462 | "metadata": { 463 | "kernelspec": { 464 | "display_name": "base", 465 | "language": "python", 466 | "name": "python3" 467 | }, 468 | "language_info": { 469 | "codemirror_mode": { 470 | "name": "ipython", 471 | "version": 3 472 | }, 473 | "file_extension": ".py", 474 | "mimetype": "text/x-python", 475 | "name": "python", 476 | "nbconvert_exporter": "python", 477 | "pygments_lexer": "ipython3", 478 | "version": "3.11.11" 479 | } 480 | }, 481 | "nbformat": 4, 482 | "nbformat_minor": 2 483 | } 484 | -------------------------------------------------------------------------------- /tutorial/taot_tutorial_ChatOpenAI_Qwen3.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Tool Ahead of Time (TAoT) Tutorial\n", 8 | "\n", 9 | "Lets jump straight into this tutorial, as time waits for no one 😊\n", 10 | "\n", 11 | "This tutorial uses the Qwen3 235B-A22B model, but this tutorial using the TAoT package can also be applied to all the other Qwen3 models (with the exception of the Qwen3 0.6B model) and also to any other models available through Langchain's ChatOpenAI class.\n", 12 | "\n", 13 | "First, pip install the taot package, as below:" 14 | ] 15 | }, 16 | { 17 | "cell_type": "code", 18 | "execution_count": null, 19 | "metadata": {}, 20 | "outputs": [], 21 | "source": [ 22 | "%pip install taot" 23 | ] 24 | }, 25 | { 26 | "cell_type": "markdown", 27 | "metadata": {}, 28 | "source": [ 29 | "If you haven't already, also pip install the other dependencies required in this tutorial:" 30 | ] 31 | }, 32 | { 33 | "cell_type": "code", 34 | "execution_count": null, 35 | "metadata": {}, 36 | "outputs": [], 37 | "source": [ 38 | "%pip install langchain-core langchain-openai" 39 | ] 40 | }, 41 | { 42 | "cell_type": "markdown", 43 | "metadata": {}, 44 | "source": [ 45 | "## Creating Tools\n", 46 | "\n", 47 | "Next, we create tool functions using LangChain's `@tool` decorator.\n", 48 | "\n", 49 | "This is just any function (with inputs and outputs) and `@tool` added at the top of the function.\n", 50 | "\n", 51 | "I have created two tool functions 'calculator' and 'text_analyzer' below:" 52 | ] 53 | }, 54 | { 55 | "cell_type": "code", 56 | "execution_count": 1, 57 | "metadata": {}, 58 | "outputs": [], 59 | "source": [ 60 | "from langchain_core.tools import tool\n", 61 | "\n", 62 | "@tool\n", 63 | "def calculator(expression: str) -> str:\n", 64 | " \"\"\"Evaluate a math expression.\"\"\"\n", 65 | " try:\n", 66 | " expression = expression.strip()\n", 67 | " if not expression:\n", 68 | " return \"Error: Empty expression\"\n", 69 | " \n", 70 | " allowed_chars = set(\"0123456789+-*/(). \")\n", 71 | " if not all(c in allowed_chars for c in expression):\n", 72 | " return \"Error: Invalid characters in expression\"\n", 73 | " \n", 74 | " result = eval(expression)\n", 75 | " return str(result)\n", 76 | " except Exception as e:\n", 77 | " return f\"Error: {str(e)}\"\n", 78 | "\n", 79 | "@tool\n", 80 | "def text_analyzer(text: str, analysis_type: str) -> str:\n", 81 | " \"\"\"\n", 82 | " Analyze text to count either words or characters.\n", 83 | " \n", 84 | " Args:\n", 85 | " text (str): The text to analyze\n", 86 | " analysis_type (str): Either 'words' or 'chars'\n", 87 | " \"\"\"\n", 88 | " try:\n", 89 | " text = text.strip()\n", 90 | " if not text:\n", 91 | " return \"Error: Empty text\"\n", 92 | " \n", 93 | " if analysis_type.lower() == 'words':\n", 94 | " word_count = len(text.split())\n", 95 | " return f\"{word_count}\"\n", 96 | " elif analysis_type.lower() == 'chars':\n", 97 | " char_count = len(text)\n", 98 | " return f\"{char_count}\"\n", 99 | " else:\n", 100 | " return \"Error: analysis_type must be either 'words' or 'chars'\"\n", 101 | " except Exception as e:\n", 102 | " return f\"Error: {str(e)}\"" 103 | ] 104 | }, 105 | { 106 | "cell_type": "markdown", 107 | "metadata": {}, 108 | "source": [ 109 | "## Initialize Model\n", 110 | "\n", 111 | "Now, initialize a model instance using the format below. \n", 112 | "\n", 113 | "In this tutorial, I am using the Qwen3 235B-A22B model hosted on the platform OpenRouter. This model hosted on OpenRouter is available on Langchain's ChatOpenAI class.\n", 114 | "\n", 115 | "If you want to use another model, you will need to check if your model (hosted on whichever platform you have chosen, for eg. Azure, Together AI or DeepSeek's own platform etc.) is first available on Langchain's ChatOpenAI class, and then change the values of the parameters `model`, `api_key` and `base_url` below according to which model and platform you have chosen." 116 | ] 117 | }, 118 | { 119 | "cell_type": "code", 120 | "execution_count": null, 121 | "metadata": {}, 122 | "outputs": [], 123 | "source": [ 124 | "from dotenv import load_dotenv\n", 125 | "from langchain_openai import ChatOpenAI\n", 126 | "import os\n", 127 | "\n", 128 | "# Load environment variable (ie. API key) from .env file\n", 129 | "load_dotenv()\n", 130 | "\n", 131 | "# Initialize Qwen3 235B-A22B model\n", 132 | "# This notebook tutorial using the TAot package would also work for all other Qwen3 models, with the exception of the Qwen3 0.6B model\n", 133 | "model = ChatOpenAI(\n", 134 | " model=\"qwen/qwen3-235b-a22b\",\n", 135 | " # model=\"qwen/qwen3-30b-a3b\",\n", 136 | " # model=\"qwen/qwen3-32b\",\n", 137 | " # model=\"qwen/qwen3-14b\",\n", 138 | " # model=\"qwen/qwen3-8b\",\n", 139 | " # model=\"qwen/qwen3-4b:free\",\n", 140 | " # model=\"qwen/qwen3-1.7b:free\",\n", 141 | " api_key=os.environ[\"OPENROUTER_API_KEY\"],\n", 142 | " base_url=\"https://openrouter.ai/api/v1\"\n", 143 | ")" 144 | ] 145 | }, 146 | { 147 | "cell_type": "markdown", 148 | "metadata": {}, 149 | "source": [ 150 | "## Previous Messages\n", 151 | "\n", 152 | "Next, if you already have a history of previous messages between the user and the chatbot, store them in the format below.\n", 153 | "\n", 154 | "Note: The format of the previous messages does not include the system message (which we will define later further down in this notebook). This design is chosen according to current best practices in chatbot design where we isolate the system message from previous messages." 155 | ] 156 | }, 157 | { 158 | "cell_type": "code", 159 | "execution_count": 3, 160 | "metadata": {}, 161 | "outputs": [], 162 | "source": [ 163 | "# Example previous messages\n", 164 | "previous_messages = [\n", 165 | " # {\"role\": \"system\", \"content\": \"You are a helpful AI assistant.\"}, # Commented out as we do not include system message\n", 166 | " {\"role\": \"user\", \"content\": \"What is the capital of Australia?\"},\n", 167 | " {\"role\": \"assistant\", \"content\": \"The capital of Australia is Canberra.\"}\n", 168 | "]" 169 | ] 170 | }, 171 | { 172 | "cell_type": "markdown", 173 | "metadata": {}, 174 | "source": [ 175 | "## Getting Model Response\n", 176 | "\n", 177 | "Finally, now the fun part where we get to see the response of the model using tool calling! 🛠️\n", 178 | "\n", 179 | "For ease of use, I have designed the taot package to mimic LangChain's and LangGraph's `create_react_agent` method with tool calling, ie. the taot package follows a similar method to LangChain's and LangGraph's:\n", 180 | "\n", 181 | "```\n", 182 | "from langgraph.prebuilt import create_react_agent\n", 183 | "\n", 184 | "agent_executor = create_react_agent(model, tools=[])\n", 185 | "response = agent_executor.invoke({\"messages\": all_messages})\n", 186 | "print(response[\"messages\"][-1].content)\n", 187 | "```\n", 188 | "\n", 189 | "First, the `system_message` variable below can start with any customized system message as per usual, for eg. \"You are a helpful assistant. \", \"You are an expert programmer in Python. \", \"You are a world class expert in SEO optimization. \" etc.\n", 190 | "\n", 191 | "Then, the `system_message` variable below needs to **STRICTLY** include the following: \"You are an assistant with access to specific tools. When the user's question requires a {tool use}, use the {'corresponding'} tool. For the {'corresponding'} tool, provide the {user message} as a string into the {'user message'} argument in the tool or any {'predefined values'} as a string for other arguments in the tool.\"\n", 192 | "\n", 193 | "For eg. for the 'calculator' tool, since the function for the 'calculator' tool above has one argument called 'expression', the `system_message` variable below would need to look like \"You are a math expert. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\"\n", 194 | "\n", 195 | "For the 'text_analyze' tool, since the function for the 'text_analyze' tool above has two arguments 'text' and 'analysis_type' (where the 'analysis_type' argument has two predefined values 'words' and 'chars'), the `system_message` variable below would need to look like \"You are an expert in linguitics. You are an assistant with access to specific tools. When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\n", 196 | "\n", 197 | "Below are five examples of different combinations of user questions and tools used:" 198 | ] 199 | }, 200 | { 201 | "cell_type": "code", 202 | "execution_count": 8, 203 | "metadata": {}, 204 | "outputs": [ 205 | { 206 | "name": "stdout", 207 | "output_type": "stream", 208 | "text": [ 209 | "The product of 123 and 456 is 56,088.\n", 210 | "The sentence contains 7 words.\n", 211 | "123 multiplied by 456 equals 56,088.\n", 212 | "The sentence \"I built my 1st Hello World program\" contains **7 words**.\n", 213 | "There are approximately 7,000 languages spoken in the world today, though the exact number varies depending on classification criteria.\n" 214 | ] 215 | } 216 | ], 217 | "source": [ 218 | "from taot import create_system_message_taot, create_react_agent_taot\n", 219 | "\n", 220 | "# Example for calculator tool only\n", 221 | "system_message = \"You are a math expert. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\"\n", 222 | "system_message_taot = create_system_message_taot(system_message)\n", 223 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 224 | "# Add previous messages (if available)\n", 225 | "all_messages.extend(previous_messages)\n", 226 | "# Add current user prompt\n", 227 | "user_message = \"What is 123 * 456?\"\n", 228 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 229 | "# Get model response\n", 230 | "agent_executor_taot = create_react_agent_taot(model, tools=[calculator])\n", 231 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 232 | "print(response['messages'][0]['content'])\n", 233 | "\n", 234 | "# Example for text analyzer tool only\n", 235 | "system_message = \"You are an expert in linguitics. You are an assistant with access to specific tools. When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\n", 236 | "system_message_taot = create_system_message_taot(system_message)\n", 237 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 238 | "# Add previous messages (if available)\n", 239 | "all_messages.extend(previous_messages)\n", 240 | "# Add current user prompt\n", 241 | "user_message = \"How many words are in this sentence?: I built my 1st Hello World program\"\n", 242 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 243 | "# Get model response\n", 244 | "agent_executor_taot = create_react_agent_taot(model, tools=[text_analyzer])\n", 245 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 246 | "print(response['messages'][0]['content'])\n", 247 | "\n", 248 | "# Example for both tools with user question requiring math calculation\n", 249 | "system_message = \"\"\"You are an expert in math and linguitics. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\n", 250 | "When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\"\"\n", 251 | "system_message_taot = create_system_message_taot(system_message)\n", 252 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 253 | "# Add previous messages (if available)\n", 254 | "all_messages.extend(previous_messages)\n", 255 | "# Add current user prompt\n", 256 | "user_message = \"What is 123 * 456?\"\n", 257 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 258 | "# Get model response\n", 259 | "agent_executor_taot = create_react_agent_taot(model, tools=[calculator, text_analyzer])\n", 260 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 261 | "print(response['messages'][0]['content'])\n", 262 | "\n", 263 | "# Example for both tools with user question requiring analysis the text\n", 264 | "system_message = \"\"\"You are an expert in math and linguitics. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\n", 265 | "When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\"\"\n", 266 | "system_message_taot = create_system_message_taot(system_message)\n", 267 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 268 | "# Add previous messages (if available)\n", 269 | "all_messages.extend(previous_messages)\n", 270 | "# Add current user prompt\n", 271 | "user_message = \"How many words are in this sentence?: I built my 1st Hello World program\"\n", 272 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 273 | "# Get model response\n", 274 | "agent_executor_taot = create_react_agent_taot(model, tools=[calculator, text_analyzer])\n", 275 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 276 | "print(response['messages'][0]['content'])\n", 277 | "\n", 278 | "# Example for both tools with user question not requiring any tools\n", 279 | "system_message = \"\"\"You are an expert in math and linguitics. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\n", 280 | "When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\"\"\n", 281 | "system_message_taot = create_system_message_taot(system_message)\n", 282 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 283 | "# Add previous messages (if available)\n", 284 | "all_messages.extend(previous_messages)\n", 285 | "# Add current user prompt\n", 286 | "user_message = \"How many languages are there in the world?\"\n", 287 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 288 | "# Get model response\n", 289 | "agent_executor_taot = create_react_agent_taot(model, tools=[calculator, text_analyzer])\n", 290 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 291 | "print(response['messages'][0]['content'])\n" 292 | ] 293 | }, 294 | { 295 | "cell_type": "markdown", 296 | "metadata": {}, 297 | "source": [ 298 | "## Summary\n", 299 | "\n", 300 | "Putting all the scripts above together:" 301 | ] 302 | }, 303 | { 304 | "cell_type": "code", 305 | "execution_count": null, 306 | "metadata": {}, 307 | "outputs": [ 308 | { 309 | "name": "stdout", 310 | "output_type": "stream", 311 | "text": [ 312 | "123 multiplied by 456 equals 56,088.\n", 313 | "The sentence \"I built my 1st Hello World program\" contains 7 words.\n", 314 | "The product of 123 multiplied by 456 is **56,088**. Let me know if you need help with anything else! 🧮😊\n", 315 | "The sentence contains 7 words: \"I,\" \"built,\" \"my,\" \"1st,\" \"Hello,\" \"World,\" and \"program.\"\n", 316 | "The exact number of languages in the world is difficult to determine precisely, but estimates range around **7,000** living languages. This number varies due to factors like language extinction, dialect classification, and ongoing linguistic research. No tool is needed for this factual question.\n" 317 | ] 318 | } 319 | ], 320 | "source": [ 321 | "from langchain_core.tools import tool\n", 322 | "from dotenv import load_dotenv\n", 323 | "from langchain_openai import ChatOpenAI\n", 324 | "import os\n", 325 | "from taot import create_system_message_taot, create_react_agent_taot\n", 326 | "\n", 327 | "@tool\n", 328 | "def calculator(expression: str) -> str:\n", 329 | " \"\"\"Evaluate a math expression.\"\"\"\n", 330 | " try:\n", 331 | " expression = expression.strip()\n", 332 | " if not expression:\n", 333 | " return \"Error: Empty expression\"\n", 334 | " \n", 335 | " allowed_chars = set(\"0123456789+-*/(). \")\n", 336 | " if not all(c in allowed_chars for c in expression):\n", 337 | " return \"Error: Invalid characters in expression\"\n", 338 | " \n", 339 | " result = eval(expression)\n", 340 | " return str(result)\n", 341 | " except Exception as e:\n", 342 | " return f\"Error: {str(e)}\"\n", 343 | "\n", 344 | "@tool\n", 345 | "def text_analyzer(text: str, analysis_type: str) -> str:\n", 346 | " \"\"\"\n", 347 | " Analyze text to count either words or characters.\n", 348 | " \n", 349 | " Args:\n", 350 | " text (str): The text to analyze\n", 351 | " analysis_type (str): Either 'words' or 'chars'\n", 352 | " \"\"\"\n", 353 | " try:\n", 354 | " text = text.strip()\n", 355 | " if not text:\n", 356 | " return \"Error: Empty text\"\n", 357 | " \n", 358 | " if analysis_type.lower() == 'words':\n", 359 | " word_count = len(text.split())\n", 360 | " return f\"{word_count}\"\n", 361 | " elif analysis_type.lower() == 'chars':\n", 362 | " char_count = len(text)\n", 363 | " return f\"{char_count}\"\n", 364 | " else:\n", 365 | " return \"Error: analysis_type must be either 'words' or 'chars'\"\n", 366 | " except Exception as e:\n", 367 | " return f\"Error: {str(e)}\"\n", 368 | " \n", 369 | "# Load environment variable (ie. API key) from .env file\n", 370 | "load_dotenv()\n", 371 | "\n", 372 | "# Initialize Qwen3 235B-A22B model\n", 373 | "# This notebook tutorial using the TAot package would also work for all other Qwen3 models, with the exception of the Qwen3 0.6B model\n", 374 | "model = ChatOpenAI(\n", 375 | " model=\"qwen/qwen3-235b-a22b\",\n", 376 | " # model=\"qwen/qwen3-30b-a3b\",\n", 377 | " # model=\"qwen/qwen3-32b\",\n", 378 | " # model=\"qwen/qwen3-14b\",\n", 379 | " # model=\"qwen/qwen3-8b\",\n", 380 | " # model=\"qwen/qwen3-4b:free\",\n", 381 | " # model=\"qwen/qwen3-1.7b:free\",\n", 382 | " api_key=os.environ[\"OPENROUTER_API_KEY\"],\n", 383 | " base_url=\"https://openrouter.ai/api/v1\"\n", 384 | ")\n", 385 | "\n", 386 | "# Example previous messages\n", 387 | "previous_messages = [\n", 388 | " # {\"role\": \"system\", \"content\": \"You are a helpful AI assistant.\"}, # Commented out as we do not include system message\n", 389 | " {\"role\": \"user\", \"content\": \"What is the capital of Australia?\"},\n", 390 | " {\"role\": \"assistant\", \"content\": \"The capital of Australia is Canberra.\"}\n", 391 | "]\n", 392 | "\n", 393 | "# Example for calculator tool only\n", 394 | "system_message = \"You are a math expert. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\"\n", 395 | "system_message_taot = create_system_message_taot(system_message)\n", 396 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 397 | "# Add previous messages (if available)\n", 398 | "all_messages.extend(previous_messages)\n", 399 | "# Add current user prompt\n", 400 | "user_message = \"What is 123 * 456?\"\n", 401 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 402 | "# Get model response\n", 403 | "agent_executor_taot = create_react_agent_taot(model, tools=[calculator])\n", 404 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 405 | "print(response['messages'][0]['content'])\n", 406 | "\n", 407 | "# Example for text analyzer tool only\n", 408 | "system_message = \"You are an expert in linguitics. You are an assistant with access to specific tools. When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\n", 409 | "system_message_taot = create_system_message_taot(system_message)\n", 410 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 411 | "# Add previous messages (if available)\n", 412 | "all_messages.extend(previous_messages)\n", 413 | "# Add current user prompt\n", 414 | "user_message = \"How many words are in this sentence?: I built my 1st Hello World program\"\n", 415 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 416 | "# Get model response\n", 417 | "agent_executor_taot = create_react_agent_taot(model, tools=[text_analyzer])\n", 418 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 419 | "print(response['messages'][0]['content'])\n", 420 | "\n", 421 | "# Example for both tools with user question requiring math calculation\n", 422 | "system_message = \"\"\"You are an expert in math and linguitics. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\n", 423 | "When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\"\"\n", 424 | "system_message_taot = create_system_message_taot(system_message)\n", 425 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 426 | "# Add previous messages (if available)\n", 427 | "all_messages.extend(previous_messages)\n", 428 | "# Add current user prompt\n", 429 | "user_message = \"What is 123 * 456?\"\n", 430 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 431 | "# Get model response\n", 432 | "agent_executor_taot = create_react_agent_taot(model, tools=[calculator, text_analyzer])\n", 433 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 434 | "print(response['messages'][0]['content'])\n", 435 | "\n", 436 | "# Example for both tools with user question requiring analysis the text\n", 437 | "system_message = \"\"\"You are an expert in math and linguitics. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\n", 438 | "When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\"\"\n", 439 | "system_message_taot = create_system_message_taot(system_message)\n", 440 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 441 | "# Add previous messages (if available)\n", 442 | "all_messages.extend(previous_messages)\n", 443 | "# Add current user prompt\n", 444 | "user_message = \"How many words are in this sentence?: I built my 1st Hello World program\"\n", 445 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 446 | "# Get model response\n", 447 | "agent_executor_taot = create_react_agent_taot(model, tools=[calculator, text_analyzer])\n", 448 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 449 | "print(response['messages'][0]['content'])\n", 450 | "\n", 451 | "# Example for both tools with user question not requiring any tools\n", 452 | "system_message = \"\"\"You are an expert in math and linguitics. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\n", 453 | "When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\"\"\n", 454 | "system_message_taot = create_system_message_taot(system_message)\n", 455 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 456 | "# Add previous messages (if available)\n", 457 | "all_messages.extend(previous_messages)\n", 458 | "# Add current user prompt\n", 459 | "user_message = \"How many languages are there in the world?\"\n", 460 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 461 | "# Get model response\n", 462 | "agent_executor_taot = create_react_agent_taot(model, tools=[calculator, text_analyzer])\n", 463 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 464 | "print(response['messages'][0]['content'])\n", 465 | "\n" 466 | ] 467 | } 468 | ], 469 | "metadata": { 470 | "kernelspec": { 471 | "display_name": "base", 472 | "language": "python", 473 | "name": "python3" 474 | }, 475 | "language_info": { 476 | "codemirror_mode": { 477 | "name": "ipython", 478 | "version": 3 479 | }, 480 | "file_extension": ".py", 481 | "mimetype": "text/x-python", 482 | "name": "python", 483 | "nbconvert_exporter": "python", 484 | "pygments_lexer": "ipython3", 485 | "version": "3.11.11" 486 | } 487 | }, 488 | "nbformat": 4, 489 | "nbformat_minor": 2 490 | } 491 | -------------------------------------------------------------------------------- /tutorial/taot_tutorial_ChatBedrockConverse.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Tool Ahead of Time (TAoT) Tutorial\n", 8 | "\n", 9 | "Lets jump straight into this tutorial, as time waits for no one 😊\n", 10 | "\n", 11 | "This tutorial uses the DeepSeek-R1 671B model, but this tutorial can also be applied to other models available through Langchain's ChatBedrockConverse class.\n", 12 | "\n", 13 | "First, pip install the taot package, as below:" 14 | ] 15 | }, 16 | { 17 | "cell_type": "code", 18 | "execution_count": null, 19 | "metadata": {}, 20 | "outputs": [], 21 | "source": [ 22 | "%pip install taot" 23 | ] 24 | }, 25 | { 26 | "cell_type": "markdown", 27 | "metadata": {}, 28 | "source": [ 29 | "If you haven't already, also pip install the other dependencies required in this tutorial:" 30 | ] 31 | }, 32 | { 33 | "cell_type": "code", 34 | "execution_count": null, 35 | "metadata": {}, 36 | "outputs": [], 37 | "source": [ 38 | "%pip install langchain-core langchain-aws" 39 | ] 40 | }, 41 | { 42 | "cell_type": "markdown", 43 | "metadata": {}, 44 | "source": [ 45 | "## Creating Tools\n", 46 | "\n", 47 | "Next, we create tool functions using LangChain's `@tool` decorator.\n", 48 | "\n", 49 | "This is just any function (with inputs and outputs) and `@tool` added at the top of the function.\n", 50 | "\n", 51 | "I have created two tool functions 'calculator' and 'text_analyzer' below:" 52 | ] 53 | }, 54 | { 55 | "cell_type": "code", 56 | "execution_count": 1, 57 | "metadata": {}, 58 | "outputs": [], 59 | "source": [ 60 | "from langchain_core.tools import tool\n", 61 | "\n", 62 | "@tool\n", 63 | "def calculator(expression: str) -> str:\n", 64 | " \"\"\"Evaluate a math expression.\"\"\"\n", 65 | " try:\n", 66 | " expression = expression.strip()\n", 67 | " if not expression:\n", 68 | " return \"Error: Empty expression\"\n", 69 | " \n", 70 | " allowed_chars = set(\"0123456789+-*/(). \")\n", 71 | " if not all(c in allowed_chars for c in expression):\n", 72 | " return \"Error: Invalid characters in expression\"\n", 73 | " \n", 74 | " result = eval(expression)\n", 75 | " return str(result)\n", 76 | " except Exception as e:\n", 77 | " return f\"Error: {str(e)}\"\n", 78 | "\n", 79 | "@tool\n", 80 | "def text_analyzer(text: str, analysis_type: str) -> str:\n", 81 | " \"\"\"\n", 82 | " Analyze text to count either words or characters.\n", 83 | " \n", 84 | " Args:\n", 85 | " text (str): The text to analyze\n", 86 | " analysis_type (str): Either 'words' or 'chars'\n", 87 | " \"\"\"\n", 88 | " try:\n", 89 | " text = text.strip()\n", 90 | " if not text:\n", 91 | " return \"Error: Empty text\"\n", 92 | " \n", 93 | " if analysis_type.lower() == 'words':\n", 94 | " word_count = len(text.split())\n", 95 | " return f\"{word_count}\"\n", 96 | " elif analysis_type.lower() == 'chars':\n", 97 | " char_count = len(text)\n", 98 | " return f\"{char_count}\"\n", 99 | " else:\n", 100 | " return \"Error: analysis_type must be either 'words' or 'chars'\"\n", 101 | " except Exception as e:\n", 102 | " return f\"Error: {str(e)}\"" 103 | ] 104 | }, 105 | { 106 | "cell_type": "markdown", 107 | "metadata": {}, 108 | "source": [ 109 | "## Initialize Model\n", 110 | "\n", 111 | "Now, initialize a model instance using the format below. \n", 112 | "\n", 113 | "In this tutorial, I am using the DeepSeek-R1 671B model hosted on Amazon Bedrock.\n", 114 | "\n", 115 | "If you want to use another model, you will need to first check if your model is available on Amazon Bedrock, and then change the value of the parameter `model_id` below to the model ID of your chosen model.\n", 116 | "\n", 117 | "**Tip:** To setup Amazon Bedrock just ask any AI with internet access (so that you get the latest up to date steps) the following question: \"You are an expert in Amazon Bedrock. Can you tell me the latest step-by-step guide on how to setup an AWS account and deploy a model in Amazon Bedrock within the Amazon Bedrock platform.\". Note this process will involve requesting for model access to DeepSeek-R1 671B (this will usually be instantaneous) and you will also likely need to request to increase the quotas for tokens-per-minute (TPM) and requests-per-minute (RPM) which by default are set at low levels (this will usually take 1-3 business days). When doing these steps, you will get to choose your own region to deploy the model (ie. `region_name`) and you will obtain the value for the parameters `aws_access_key_id` and `aws_secret_access_key` below from the IAM console on your AWS account." 118 | ] 119 | }, 120 | { 121 | "cell_type": "code", 122 | "execution_count": 2, 123 | "metadata": {}, 124 | "outputs": [], 125 | "source": [ 126 | "from dotenv import load_dotenv\n", 127 | "from langchain_aws import ChatBedrockConverse\n", 128 | "import os\n", 129 | "\n", 130 | "# Load environment variable (ie. API key) from .env file\n", 131 | "load_dotenv()\n", 132 | "\n", 133 | "# Initialize model\n", 134 | "model = ChatBedrockConverse(\n", 135 | " model_id=\"us.deepseek.r1-v1:0\",\n", 136 | " region_name=\"us-east-1\",\n", 137 | " aws_access_key_id=os.environ[\"AWS_ACCESS_KEY_ID\"],\n", 138 | " aws_secret_access_key=os.environ[\"AWS_SECRET_ACCESS_KEY\"]\n", 139 | ")" 140 | ] 141 | }, 142 | { 143 | "cell_type": "markdown", 144 | "metadata": {}, 145 | "source": [ 146 | "## Previous Messages\n", 147 | "\n", 148 | "Next, if you already have a history of previous messages between the user and the chatbot, store them in the format below.\n", 149 | "\n", 150 | "Note: The format of the previous messages does not include the system message (which we will define later further down in this notebook). This design is chosen according to current best practices in chatbot design where we isolate the system message from previous messages." 151 | ] 152 | }, 153 | { 154 | "cell_type": "code", 155 | "execution_count": 3, 156 | "metadata": {}, 157 | "outputs": [], 158 | "source": [ 159 | "# Example previous messages\n", 160 | "previous_messages = [\n", 161 | " # {\"role\": \"system\", \"content\": \"You are a helpful AI assistant.\"}, # Commented out as we do not include system message\n", 162 | " {\"role\": \"user\", \"content\": \"What is the capital of Australia?\"},\n", 163 | " {\"role\": \"assistant\", \"content\": \"The capital of Australia is Canberra.\"}\n", 164 | "]" 165 | ] 166 | }, 167 | { 168 | "cell_type": "markdown", 169 | "metadata": {}, 170 | "source": [ 171 | "## Getting Model Response\n", 172 | "\n", 173 | "Finally, now the fun part where we get to see the response of the model using tool calling! 🛠️\n", 174 | "\n", 175 | "For ease of use, I have designed the taot package to mimic LangChain's and LangGraph's `create_react_agent` method with tool calling, ie. the taot package follows a similar method to LangChain's and LangGraph's:\n", 176 | "\n", 177 | "```\n", 178 | "from langgraph.prebuilt import create_react_agent\n", 179 | "\n", 180 | "agent_executor = create_react_agent(model, tools=[])\n", 181 | "response = agent_executor.invoke({\"messages\": all_messages})\n", 182 | "print(response[\"messages\"][-1].content)\n", 183 | "```\n", 184 | "\n", 185 | "First, the `system_message` variable below can start with any customized system message as per usual, for eg. \"You are a helpful assistant. \", \"You are an expert programmer in Python. \", \"You are a world class expert in SEO optimization. \" etc.\n", 186 | "\n", 187 | "Then, the `system_message` variable below needs to **STRICTLY** include the following: \"You are an assistant with access to specific tools. When the user's question requires a {tool use}, use the {'corresponding'} tool. For the {'corresponding'} tool, provide the {user message} as a string into the {'user message'} argument in the tool or any {'predefined values'} as a string for other arguments in the tool.\"\n", 188 | "\n", 189 | "For eg. for the 'calculator' tool, since the function for the 'calculator' tool above has one argument called 'expression', the `system_message` variable below would need to look like \"You are a math expert. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\"\n", 190 | "\n", 191 | "For the 'text_analyze' tool, since the function for the 'text_analyze' tool above has two arguments 'text' and 'analysis_type' (where the 'analysis_type' argument has two predefined values 'words' and 'chars'), the `system_message` variable below would need to look like \"You are an expert in linguitics. You are an assistant with access to specific tools. When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\n", 192 | "\n", 193 | "Below are five examples of different combinations of user questions and tools used:" 194 | ] 195 | }, 196 | { 197 | "cell_type": "code", 198 | "execution_count": null, 199 | "metadata": {}, 200 | "outputs": [ 201 | { 202 | "name": "stdout", 203 | "output_type": "stream", 204 | "text": [ 205 | "The result of 123 multiplied by 456 is **56,088**.\n", 206 | "There are 7 words in the sentence: \"I built my 1st Hello World program.\"\n", 207 | "The result of multiplying 123 by 456 is **56,088**.\n", 208 | "There are **7 words** in the sentence: *\"I built my 1st Hello World program\"*.\n", 209 | "The exact number of languages in the world is difficult to determine, but estimates suggest there are between 6,000 to 7,000 distinct languages spoken globally, with many considered endangered.\n" 210 | ] 211 | } 212 | ], 213 | "source": [ 214 | "from taot import create_system_message_taot, create_react_agent_taot\n", 215 | "\n", 216 | "# Example for calculator tool only\n", 217 | "system_message = \"You are a math expert. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\"\n", 218 | "system_message_taot = create_system_message_taot(system_message)\n", 219 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 220 | "# Add previous messages (if available)\n", 221 | "all_messages.extend(previous_messages)\n", 222 | "# Add current user prompt\n", 223 | "user_message = \"What is 123 * 456?\"\n", 224 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 225 | "# Get model response\n", 226 | "agent_executor_taot = create_react_agent_taot(model, tools=[calculator])\n", 227 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 228 | "print(response['messages'][0]['content'].strip()) # Added .strip() to remove preceeding new lines created from using LangChain's ChatBedrockConverse\n", 229 | "\n", 230 | "# Example for text analyzer tool only\n", 231 | "system_message = \"You are an expert in linguitics. You are an assistant with access to specific tools. When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\n", 232 | "system_message_taot = create_system_message_taot(system_message)\n", 233 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 234 | "# Add previous messages (if available)\n", 235 | "all_messages.extend(previous_messages)\n", 236 | "# Add current user prompt\n", 237 | "user_message = \"How many words are in this sentence?: I built my 1st Hello World program\"\n", 238 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 239 | "# Get model response\n", 240 | "agent_executor_taot = create_react_agent_taot(model, tools=[text_analyzer])\n", 241 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 242 | "print(response['messages'][0]['content'].strip()) # Added .strip() to remove preceeding new lines created from using LangChain's ChatBedrockConverse\n", 243 | "\n", 244 | "# Example for both tools with user question requiring math calculation\n", 245 | "system_message = \"\"\"You are an expert in math and linguitics. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\n", 246 | "When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\"\"\n", 247 | "system_message_taot = create_system_message_taot(system_message)\n", 248 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 249 | "# Add previous messages (if available)\n", 250 | "all_messages.extend(previous_messages)\n", 251 | "# Add current user prompt\n", 252 | "user_message = \"What is 123 * 456?\"\n", 253 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 254 | "# Get model response\n", 255 | "agent_executor_taot = create_react_agent_taot(model, tools=[calculator, text_analyzer])\n", 256 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 257 | "print(response['messages'][0]['content'].strip()) # Added .strip() to remove preceeding new lines created from using LangChain's ChatBedrockConverse\n", 258 | "\n", 259 | "# Example for both tools with user question requiring analysis the text\n", 260 | "system_message = \"\"\"You are an expert in math and linguitics. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\n", 261 | "When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\"\"\n", 262 | "system_message_taot = create_system_message_taot(system_message)\n", 263 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 264 | "# Add previous messages (if available)\n", 265 | "all_messages.extend(previous_messages)\n", 266 | "# Add current user prompt\n", 267 | "user_message = \"How many words are in this sentence?: I built my 1st Hello World program\"\n", 268 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 269 | "# Get model response\n", 270 | "agent_executor_taot = create_react_agent_taot(model, tools=[calculator, text_analyzer])\n", 271 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 272 | "print(response['messages'][0]['content'].strip()) # Added .strip() to remove preceeding new lines created from using LangChain's ChatBedrockConverse\n", 273 | "\n", 274 | "# Example for both tools with user question not requiring any tools\n", 275 | "system_message = \"\"\"You are an expert in math and linguitics. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\n", 276 | "When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\"\"\n", 277 | "system_message_taot = create_system_message_taot(system_message)\n", 278 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 279 | "# Add previous messages (if available)\n", 280 | "all_messages.extend(previous_messages)\n", 281 | "# Add current user prompt\n", 282 | "user_message = \"How many languages are there in the world?\"\n", 283 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 284 | "# Get model response\n", 285 | "agent_executor_taot = create_react_agent_taot(model, tools=[calculator, text_analyzer])\n", 286 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 287 | "print(response['messages'][0]['content'].strip()) # Added .strip() to remove preceeding new lines created from using LangChain's ChatBedrockConverse" 288 | ] 289 | }, 290 | { 291 | "cell_type": "markdown", 292 | "metadata": {}, 293 | "source": [ 294 | "## Summary\n", 295 | "\n", 296 | "Putting all the scripts above together:" 297 | ] 298 | }, 299 | { 300 | "cell_type": "code", 301 | "execution_count": null, 302 | "metadata": {}, 303 | "outputs": [ 304 | { 305 | "name": "stdout", 306 | "output_type": "stream", 307 | "text": [ 308 | "The result of 123 multiplied by 456 is **56,088**.\n", 309 | "There are **7 words** in the sentence: *\"I built my 1st Hello World program\"*.\n", 310 | "The product of 123 and 456 is **56,088**.\n", 311 | "There are 7 words in the sentence: \"I built my 1st Hello World program.\"\n", 312 | "The exact number of languages in the world is debated, but estimates typically range between 6,000 to 7,000 distinct languages. This number fluctuates due to factors like language endangerment, dialect classification debates, and ongoing documentation efforts.\n" 313 | ] 314 | } 315 | ], 316 | "source": [ 317 | "from langchain_core.tools import tool\n", 318 | "from dotenv import load_dotenv\n", 319 | "from langchain_aws import ChatBedrockConverse\n", 320 | "import os\n", 321 | "from taot import create_system_message_taot, create_react_agent_taot\n", 322 | "\n", 323 | "@tool\n", 324 | "def calculator(expression: str) -> str:\n", 325 | " \"\"\"Evaluate a math expression.\"\"\"\n", 326 | " try:\n", 327 | " expression = expression.strip()\n", 328 | " if not expression:\n", 329 | " return \"Error: Empty expression\"\n", 330 | " \n", 331 | " allowed_chars = set(\"0123456789+-*/(). \")\n", 332 | " if not all(c in allowed_chars for c in expression):\n", 333 | " return \"Error: Invalid characters in expression\"\n", 334 | " \n", 335 | " result = eval(expression)\n", 336 | " return str(result)\n", 337 | " except Exception as e:\n", 338 | " return f\"Error: {str(e)}\"\n", 339 | "\n", 340 | "@tool\n", 341 | "def text_analyzer(text: str, analysis_type: str) -> str:\n", 342 | " \"\"\"\n", 343 | " Analyze text to count either words or characters.\n", 344 | " \n", 345 | " Args:\n", 346 | " text (str): The text to analyze\n", 347 | " analysis_type (str): Either 'words' or 'chars'\n", 348 | " \"\"\"\n", 349 | " try:\n", 350 | " text = text.strip()\n", 351 | " if not text:\n", 352 | " return \"Error: Empty text\"\n", 353 | " \n", 354 | " if analysis_type.lower() == 'words':\n", 355 | " word_count = len(text.split())\n", 356 | " return f\"{word_count}\"\n", 357 | " elif analysis_type.lower() == 'chars':\n", 358 | " char_count = len(text)\n", 359 | " return f\"{char_count}\"\n", 360 | " else:\n", 361 | " return \"Error: analysis_type must be either 'words' or 'chars'\"\n", 362 | " except Exception as e:\n", 363 | " return f\"Error: {str(e)}\"\n", 364 | " \n", 365 | "# Load environment variable (ie. API key) from .env file\n", 366 | "load_dotenv()\n", 367 | "\n", 368 | "# Initialize model\n", 369 | "model = ChatBedrockConverse(\n", 370 | " model_id=\"us.deepseek.r1-v1:0\",\n", 371 | " region_name=\"us-east-1\",\n", 372 | " aws_access_key_id=os.environ[\"AWS_ACCESS_KEY_ID\"],\n", 373 | " aws_secret_access_key=os.environ[\"AWS_SECRET_ACCESS_KEY\"]\n", 374 | ")\n", 375 | "\n", 376 | "# Example previous messages\n", 377 | "previous_messages = [\n", 378 | " # {\"role\": \"system\", \"content\": \"You are a helpful AI assistant.\"}, # Commented out as we do not include system message\n", 379 | " {\"role\": \"user\", \"content\": \"What is the capital of Australia?\"},\n", 380 | " {\"role\": \"assistant\", \"content\": \"The capital of Australia is Canberra.\"}\n", 381 | "]\n", 382 | "\n", 383 | "# Example for calculator tool only\n", 384 | "system_message = \"You are a math expert. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\"\n", 385 | "system_message_taot = create_system_message_taot(system_message)\n", 386 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 387 | "# Add previous messages (if available)\n", 388 | "all_messages.extend(previous_messages)\n", 389 | "# Add current user prompt\n", 390 | "user_message = \"What is 123 * 456?\"\n", 391 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 392 | "# Get model response\n", 393 | "agent_executor_taot = create_react_agent_taot(model, tools=[calculator])\n", 394 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 395 | "print(response['messages'][0]['content'].strip()) # Added .strip() to remove preceeding new lines created from using LangChain's ChatBedrockConverse\n", 396 | "\n", 397 | "# Example for text analyzer tool only\n", 398 | "system_message = \"You are an expert in linguitics. You are an assistant with access to specific tools. When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\n", 399 | "system_message_taot = create_system_message_taot(system_message)\n", 400 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 401 | "# Add previous messages (if available)\n", 402 | "all_messages.extend(previous_messages)\n", 403 | "# Add current user prompt\n", 404 | "user_message = \"How many words are in this sentence?: I built my 1st Hello World program\"\n", 405 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 406 | "# Get model response\n", 407 | "agent_executor_taot = create_react_agent_taot(model, tools=[text_analyzer])\n", 408 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 409 | "print(response['messages'][0]['content'].strip()) # Added .strip() to remove preceeding new lines created from using LangChain's ChatBedrockConverse\n", 410 | "\n", 411 | "# Example for both tools with user question requiring math calculation\n", 412 | "system_message = \"\"\"You are an expert in math and linguitics. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\n", 413 | "When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\"\"\n", 414 | "system_message_taot = create_system_message_taot(system_message)\n", 415 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 416 | "# Add previous messages (if available)\n", 417 | "all_messages.extend(previous_messages)\n", 418 | "# Add current user prompt\n", 419 | "user_message = \"What is 123 * 456?\"\n", 420 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 421 | "# Get model response\n", 422 | "agent_executor_taot = create_react_agent_taot(model, tools=[calculator, text_analyzer])\n", 423 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 424 | "print(response['messages'][0]['content'].strip()) # Added .strip() to remove preceeding new lines created from using LangChain's ChatBedrockConverse\n", 425 | "\n", 426 | "# Example for both tools with user question requiring analysis the text\n", 427 | "system_message = \"\"\"You are an expert in math and linguitics. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\n", 428 | "When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\"\"\n", 429 | "system_message_taot = create_system_message_taot(system_message)\n", 430 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 431 | "# Add previous messages (if available)\n", 432 | "all_messages.extend(previous_messages)\n", 433 | "# Add current user prompt\n", 434 | "user_message = \"How many words are in this sentence?: I built my 1st Hello World program\"\n", 435 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 436 | "# Get model response\n", 437 | "agent_executor_taot = create_react_agent_taot(model, tools=[calculator, text_analyzer])\n", 438 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 439 | "print(response['messages'][0]['content'].strip()) # Added .strip() to remove preceeding new lines created from using LangChain's ChatBedrockConverse\n", 440 | "\n", 441 | "# Example for both tools with user question not requiring any tools\n", 442 | "system_message = \"\"\"You are an expert in math and linguitics. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool.\n", 443 | "When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.\"\"\"\n", 444 | "system_message_taot = create_system_message_taot(system_message)\n", 445 | "all_messages = [{\"role\": \"system\", \"content\": system_message_taot}]\n", 446 | "# Add previous messages (if available)\n", 447 | "all_messages.extend(previous_messages)\n", 448 | "# Add current user prompt\n", 449 | "user_message = \"How many languages are there in the world?\"\n", 450 | "all_messages.append({\"role\": \"user\", \"content\": user_message})\n", 451 | "# Get model response\n", 452 | "agent_executor_taot = create_react_agent_taot(model, tools=[calculator, text_analyzer])\n", 453 | "response = agent_executor_taot.invoke({\"messages\": all_messages})\n", 454 | "print(response['messages'][0]['content'].strip()) # Added .strip() to remove preceeding new lines created from using LangChain's ChatBedrockConverse" 455 | ] 456 | } 457 | ], 458 | "metadata": { 459 | "kernelspec": { 460 | "display_name": "base", 461 | "language": "python", 462 | "name": "python3" 463 | }, 464 | "language_info": { 465 | "codemirror_mode": { 466 | "name": "ipython", 467 | "version": 3 468 | }, 469 | "file_extension": ".py", 470 | "mimetype": "text/x-python", 471 | "name": "python", 472 | "nbconvert_exporter": "python", 473 | "pygments_lexer": "ipython3", 474 | "version": "3.11.11" 475 | } 476 | }, 477 | "nbformat": 4, 478 | "nbformat_minor": 2 479 | } 480 | --------------------------------------------------------------------------------