├── tutorial ├── ChatOpenAI │ ├── .env │ ├── package.json │ ├── taotTutorialChatOpenAI.js │ └── package-lock.json ├── ChatOpenAI_Qwen3 │ ├── .env │ ├── package.json │ └── taotTutorialChatOpenAI_Qwen3.js ├── ChatOpenAI_QwQ32B │ ├── .env │ ├── package.json │ └── taotTutorialChatOpenAI_QwQ32B.js ├── ChatOpenAI_DeepSeekR10528 │ ├── .env │ ├── package.json │ └── taotTutorialChatOpenAIDeepSeekR10528.js ├── BaseChatModel │ ├── azure │ │ ├── .env │ │ ├── package.json │ │ ├── taotTutorialBaseChatModelAzure.js │ │ └── package-lock.json │ └── azureNoThink │ │ ├── .env │ │ ├── package.json │ │ └── taotTutorialBaseChatModelAzureNoThink.js └── McpAdapters_DeepSeekR1 │ ├── .env │ ├── package.json │ └── TutorialMcpAdaptersDeepSeekR1.mjs ├── jest.config.js ├── src ├── index.ts ├── models.ts ├── message.ts └── agent.ts ├── tsconfig.json ├── __tests__ └── message.test.ts ├── LICENSE ├── package.json └── README.md /tutorial/ChatOpenAI/.env: -------------------------------------------------------------------------------- 1 | OPENROUTER_API_KEY=MY_OPENROUTER_API_KEY -------------------------------------------------------------------------------- /tutorial/ChatOpenAI_Qwen3/.env: -------------------------------------------------------------------------------- 1 | OPENROUTER_API_KEY=MY_OPENROUTER_API_KEY -------------------------------------------------------------------------------- /tutorial/ChatOpenAI_QwQ32B/.env: -------------------------------------------------------------------------------- 1 | OPENROUTER_API_KEY=MY_OPENROUTER_API_KEY 2 | -------------------------------------------------------------------------------- /tutorial/ChatOpenAI_DeepSeekR10528/.env: -------------------------------------------------------------------------------- 1 | OPENROUTER_API_KEY=MY_OPENROUTER_API_KEY -------------------------------------------------------------------------------- /tutorial/BaseChatModel/azure/.env: -------------------------------------------------------------------------------- 1 | AZURE_API_KEY=MY_AZURE_API_KEY 2 | AZURE_ENDPOINT_BASE_URL=MY_AZURE_ENDPOINT_BASE_URL -------------------------------------------------------------------------------- /tutorial/BaseChatModel/azureNoThink/.env: -------------------------------------------------------------------------------- 1 | AZURE_API_KEY=MY_AZURE_API_KEY 2 | AZURE_ENDPOINT_BASE_URL=MY_AZURE_ENDPOINT_BASE_URL -------------------------------------------------------------------------------- /tutorial/McpAdapters_DeepSeekR1/.env: -------------------------------------------------------------------------------- 1 | OPENROUTER_API_KEY="MY_OPENROUTER_API_KEY" 2 | BRAVE_API_KEY="MY_BRAVE_API_KEY" 3 | ACCUWEATHER_API_KEY="MY_ACCUWEATHER_API_KEY" -------------------------------------------------------------------------------- /jest.config.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | preset: 'ts-jest', 3 | testEnvironment: 'node', 4 | testMatch: ['**/__tests__/**/*.ts?(x)', '**/?(*.)+(spec|test).ts?(x)'], 5 | collectCoverageFrom: [ 6 | 'src/**/*.ts', 7 | '!src/**/*.d.ts', 8 | ], 9 | }; -------------------------------------------------------------------------------- /src/index.ts: -------------------------------------------------------------------------------- 1 | export { ToolCall } from './models'; 2 | export { createSystemMessageTaot } from './message'; 3 | export { ManualToolAgent, createReactAgentTaot, Tool } from './agent'; 4 | 5 | /** 6 | * Package version 7 | */ 8 | export const version = '0.1.3'; -------------------------------------------------------------------------------- /src/models.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Defines the core model for tool calls. 3 | */ 4 | export interface ToolCall { 5 | /** 6 | * Name of the tool to call 7 | */ 8 | tool: string; 9 | 10 | /** 11 | * Arguments to pass to the tool 12 | */ 13 | args: Record; 14 | } -------------------------------------------------------------------------------- /tutorial/ChatOpenAI/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "chatopenai", 3 | "version": "1.0.0", 4 | "main": "taotTutorialChatOpenAI.js", 5 | "scripts": { 6 | "test": "echo \"Error: no test specified\" && exit 1" 7 | }, 8 | "keywords": [], 9 | "author": "", 10 | "license": "ISC", 11 | "description": "", 12 | "dependencies": { 13 | "@langchain/openai": "^0.4.4", 14 | "dotenv": "^16.4.7", 15 | "taot-ts": "^0.1.6" 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /tutorial/ChatOpenAI_Qwen3/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "chatopenai_qwen3", 3 | "version": "1.0.0", 4 | "main": "taotTutorialChatOpenAI_Qwen3.js", 5 | "scripts": { 6 | "test": "echo \"Error: no test specified\" && exit 1" 7 | }, 8 | "keywords": [], 9 | "author": "", 10 | "license": "ISC", 11 | "description": "", 12 | "dependencies": { 13 | "@langchain/openai": "^0.5.10", 14 | "dotenv": "^16.5.0", 15 | "taot-ts": "^0.1.7" 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /tutorial/ChatOpenAI_QwQ32B/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "chatopenai_qwq32b", 3 | "version": "1.0.0", 4 | "main": "taotTutorialChatOpenAI_QwQ32B.js", 5 | "scripts": { 6 | "test": "echo \"Error: no test specified\" && exit 1" 7 | }, 8 | "keywords": [], 9 | "author": "", 10 | "license": "ISC", 11 | "description": "", 12 | "dependencies": { 13 | "@langchain/openai": "^0.4.4", 14 | "dotenv": "^16.4.7", 15 | "taot-ts": "^0.1.7" 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /tutorial/BaseChatModel/azure/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "azure", 3 | "version": "1.0.0", 4 | "main": "taotTutorialBaseChatModelAzure.js", 5 | "scripts": { 6 | "test": "echo \"Error: no test specified\" && exit 1" 7 | }, 8 | "keywords": [], 9 | "author": "", 10 | "license": "ISC", 11 | "description": "", 12 | "dependencies": { 13 | "@langchain/core": "^0.3.42", 14 | "dotenv": "^16.4.7", 15 | "node-fetch": "^2.7.0", 16 | "taot-ts": "^0.1.6" 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /tutorial/ChatOpenAI_DeepSeekR10528/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "chatopenai_deepseekr10528", 3 | "version": "1.0.0", 4 | "main": "taotTutorialChatOpenAIDeepSeekR10528.js", 5 | "scripts": { 6 | "test": "echo \"Error: no test specified\" && exit 1" 7 | }, 8 | "keywords": [], 9 | "author": "", 10 | "license": "ISC", 11 | "description": "", 12 | "dependencies": { 13 | "@langchain/openai": "^0.5.11", 14 | "dotenv": "^16.5.0", 15 | "taot-ts": "^0.2.4" 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /tutorial/McpAdapters_DeepSeekR1/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "javascript", 3 | "version": "1.0.0", 4 | "main": "index.js", 5 | "scripts": { 6 | "test": "echo \"Error: no test specified\" && exit 1" 7 | }, 8 | "keywords": [], 9 | "author": "", 10 | "license": "ISC", 11 | "description": "", 12 | "dependencies": { 13 | "@langchain/langgraph": "^0.2.62", 14 | "@langchain/mcp-adapters": "^0.3.4", 15 | "@langchain/openai": "^0.5.2", 16 | "dotenv": "^16.4.7" 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "ES2020", 4 | "module": "CommonJS", 5 | "moduleResolution": "node", 6 | "declaration": true, 7 | "outDir": "./dist", 8 | "strict": true, 9 | "esModuleInterop": true, 10 | "skipLibCheck": true, 11 | "forceConsistentCasingInFileNames": true, 12 | "resolveJsonModule": true, 13 | "rootDir": "./src" 14 | }, 15 | "include": ["src/**/*"], 16 | "exclude": ["node_modules", "**/*.test.ts", "dist"] 17 | } -------------------------------------------------------------------------------- /tutorial/BaseChatModel/azureNoThink/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "azurenothink", 3 | "version": "1.0.0", 4 | "main": "taotTutorialBaseChatModelAzureNoThink.js", 5 | "scripts": { 6 | "test": "echo \"Error: no test specified\" && exit 1" 7 | }, 8 | "keywords": [], 9 | "author": "", 10 | "license": "ISC", 11 | "description": "", 12 | "dependencies": { 13 | "@langchain/core": "^0.3.42", 14 | "dotenv": "^16.4.7", 15 | "node-fetch": "^2.7.0", 16 | "taot-ts": "^0.1.6" 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /__tests__/message.test.ts: -------------------------------------------------------------------------------- 1 | import { createSystemMessageTaot } from '../src/message'; 2 | 3 | describe('createSystemMessageTaot', () => { 4 | it('should include the original system message', () => { 5 | const originalMessage = 'You are a helpful assistant.'; 6 | const result = createSystemMessageTaot(originalMessage); 7 | 8 | expect(result).toContain(originalMessage); 9 | }); 10 | 11 | it('should include instructions for tools', () => { 12 | const result = createSystemMessageTaot('Test'); 13 | 14 | expect(result).toContain('user\'s question matches a tool'); 15 | expect(result).toContain('JSON object'); 16 | }); 17 | 18 | it('should include the JSON schema', () => { 19 | const result = createSystemMessageTaot('Test'); 20 | 21 | expect(result).toContain('"tool"'); 22 | expect(result).toContain('"args"'); 23 | expect(result).toContain('"required"'); 24 | }); 25 | }); -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /src/message.ts: -------------------------------------------------------------------------------- 1 | import { JsonOutputParser } from "@langchain/core/output_parsers"; 2 | import { ToolCall } from "./models"; 3 | 4 | /** 5 | * Create a system message with tool instructions and JSON schema. 6 | * 7 | * @param systemMessage - The specific system message for tools 8 | * @returns Formatted system message with JSON schema instructions 9 | */ 10 | export function createSystemMessageTaot(systemMessage: string): string { 11 | const jsonParser = new JsonOutputParser(); 12 | 13 | // Get format instructions for the ToolCall schema 14 | const formatInstructions = `{ 15 | "properties": { 16 | "tool": { 17 | "type": "string", 18 | "description": "Name of the tool to call" 19 | }, 20 | "args": { 21 | "type": "object", 22 | "description": "Arguments to pass to the tool" 23 | } 24 | }, 25 | "required": ["tool", "args"] 26 | }`; 27 | 28 | return `${systemMessage}\n 29 | When a user's question matches a tool's capability, you MUST use that tool. 30 | Do not try to solve problems manually if a tool exists for that purpose. 31 | 32 | Output ONLY a JSON object (with no extra text) that adheres EXACTLY to the following schema: 33 | 34 | ${formatInstructions} 35 | 36 | If the user's question doesn't require any tool, answer directly in plain text with no JSON.`; 37 | } -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "taot-ts", 3 | "version": "0.1.4", 4 | "description": "Tools as Operational Transforms for LangChain - TypeScript version", 5 | "main": "dist/index.js", 6 | "types": "dist/index.d.ts", 7 | "files": [ 8 | "dist/**/*" 9 | ], 10 | "scripts": { 11 | "build": "tsc", 12 | "clean": "rimraf dist", 13 | "lint": "eslint --ext .ts src/", 14 | "test": "jest", 15 | "prepublishOnly": "npm run clean && npm run build" 16 | }, 17 | "keywords": [ 18 | "langchain", 19 | "ai", 20 | "llm", 21 | "agents", 22 | "tools" 23 | ], 24 | "author": "Leo Chow", 25 | "license": "MIT", 26 | "repository": { 27 | "type": "git", 28 | "url": "https://github.com/leockl/tool-ahead-of-time-ts.git" 29 | }, 30 | "homepage": "https://github.com/leockl/tool-ahead-of-time-ts", 31 | "bugs": { 32 | "url": "https://github.com/leockl/tool-ahead-of-time-ts/issues" 33 | }, 34 | "dependencies": { 35 | "@langchain/core": "^0.1.7", 36 | "taot-ts": "^0.1.3" 37 | }, 38 | "peerDependencies": { 39 | "@langchain/openai": ">=0.0.10", 40 | "dotenv": ">=8.0.0" 41 | }, 42 | "devDependencies": { 43 | "@langchain/openai": "^0.0.14", 44 | "@types/jest": "^29.5.0", 45 | "@types/node": "^18.15.0", 46 | "@typescript-eslint/eslint-plugin": "^5.56.0", 47 | "@typescript-eslint/parser": "^5.56.0", 48 | "dotenv": "^16.4.7", 49 | "eslint": "^8.36.0", 50 | "jest": "^29.5.0", 51 | "rimraf": "^4.4.0", 52 | "ts-jest": "^29.1.0", 53 | "typescript": "^5.3.3" 54 | }, 55 | "engines": { 56 | "node": ">=16.0.0" 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /tutorial/McpAdapters_DeepSeekR1/TutorialMcpAdaptersDeepSeekR1.mjs: -------------------------------------------------------------------------------- 1 | import dotenv from 'dotenv'; 2 | dotenv.config(); 3 | 4 | import { MultiServerMCPClient } from '@langchain/mcp-adapters'; 5 | import { ChatOpenAI } from '@langchain/openai'; 6 | import { createReactAgent } from '@langchain/langgraph/prebuilt'; 7 | 8 | const OPENROUTER_API_KEY = process.env.OPENROUTER_API_KEY; 9 | const BRAVE_API_KEY = process.env.BRAVE_API_KEY; 10 | const ACCUWEATHER_API_KEY = process.env.ACCUWEATHER_API_KEY; 11 | 12 | // Initialize the language model using "openAIApiKey" 13 | const model = new ChatOpenAI({ 14 | modelName: 'deepseek/deepseek-r1', 15 | apiKey: process.env.OPENROUTER_API_KEY, 16 | configuration: { 17 | baseURL: "https://openrouter.ai/api/v1" 18 | } 19 | }); 20 | 21 | async function main() { 22 | const client = new MultiServerMCPClient(); 23 | 24 | // Connect to the Brave Search MCP server via SSE with custom header. 25 | await client.connectToServerViaSSE('brave-search', 'http://localhost:8001/sse', { 26 | Authorization: BRAVE_API_KEY, 27 | }); 28 | 29 | // Connect to the Weather MCP server via SSE with custom header. 30 | await client.connectToServerViaSSE('weather', 'http://localhost:8002/sse', { 31 | Authorization: ACCUWEATHER_API_KEY, 32 | }); 33 | 34 | // Retrieve all tools from the connected servers. 35 | const tools = client.getTools(); 36 | 37 | // Create the agent with the language model and loaded tools. 38 | const agent = createReactAgent({ llm: model, tools }); 39 | 40 | // Example usage: Perform a web search using Brave Search. 41 | const searchResponse = await agent.invoke({ 42 | messages: [{ role: 'user', content: 'Search for the latest news on AI.' }], 43 | }); 44 | // console.log('Search Response:', searchResponse); 45 | console.log(searchResponse.messages[searchResponse.messages.length - 1].content); 46 | 47 | // Example usage: Get the weather forecast using the Weather MCP server. 48 | const weatherResponse = await agent.invoke({ 49 | messages: [{ role: 'user', content: "What's the weather forecast for Sydney tomorrow?" }], 50 | }); 51 | // console.log('Weather Response:', weatherResponse); 52 | console.log(weatherResponse.messages[weatherResponse.messages.length - 1].content); 53 | 54 | await client.close(); 55 | } 56 | 57 | // Run the main function using top-level await. 58 | await main(); 59 | -------------------------------------------------------------------------------- /src/agent.ts: -------------------------------------------------------------------------------- 1 | import { SystemMessage, HumanMessage, AIMessage, BaseMessage } from "@langchain/core/messages"; 2 | import { Runnable } from "@langchain/core/runnables"; 3 | import { ToolCall } from "./models"; 4 | 5 | /** 6 | * Interface for messages with role and content 7 | */ 8 | export interface Message { 9 | role: string; 10 | content: string; 11 | } 12 | 13 | /** 14 | * Interface defining the structure of a tool 15 | */ 16 | export interface Tool { 17 | name: string; 18 | invoke: (args: Record) => Promise; 19 | } 20 | 21 | /** 22 | * A custom agent that handles tools manually. 23 | */ 24 | export class ManualToolAgent extends Runnable<{ messages: Message[] }, { messages: { content: string }[] }> { 25 | lc_namespace = ["taot_ts", "agents"]; 26 | 27 | private model: any; 28 | private tools: any[]; 29 | private maxRetries: number = 100; 30 | 31 | /** 32 | * Create a new ManualToolAgent instance. 33 | * 34 | * @param model - The language model to use 35 | * @param tools - List of tool functions 36 | */ 37 | constructor(model: any, tools: any[]) { 38 | super({}); 39 | this.model = model; 40 | this.tools = tools; 41 | } 42 | 43 | /** 44 | * Invokes the agent with the provided inputs. 45 | * 46 | * @param inputs - The input messages 47 | * @returns The output messages 48 | */ 49 | async invoke( 50 | inputs: { messages: Message[] } 51 | ): Promise<{ messages: { content: string }[] }> { 52 | return this._call(inputs); 53 | } 54 | 55 | /** 56 | * Check if the response is empty or contains only whitespace. 57 | * 58 | * @param responseText - The response text to check 59 | * @returns True if response is empty, False otherwise 60 | */ 61 | private isEmptyResponse(responseText: string | null | undefined): boolean { 62 | if (responseText === null || responseText === undefined) { 63 | return true; 64 | } 65 | if (!responseText.trim()) { 66 | return true; 67 | } 68 | return false; 69 | } 70 | 71 | /** 72 | * Convert dictionary-based messages to LangChain message objects. 73 | * 74 | * @param messages - List of messages to convert 75 | * @returns Converted LangChain message objects 76 | */ 77 | private convertMessages(messages: Message[]): BaseMessage[] { 78 | const convertedMessages: BaseMessage[] = []; 79 | 80 | for (const message of messages) { 81 | const { role, content } = message; 82 | 83 | if (role === "system") { 84 | convertedMessages.push(new SystemMessage({ content })); 85 | } else if (role === "user") { 86 | convertedMessages.push(new HumanMessage({ content })); 87 | } else if (role === "assistant") { 88 | convertedMessages.push(new AIMessage({ content })); 89 | } 90 | } 91 | 92 | return convertedMessages; 93 | } 94 | 95 | /** 96 | * Format tool result using LLM to create natural language response. 97 | * 98 | * @param toolName - Name of the tool used 99 | * @param toolResult - Result from the tool 100 | * @param userQuery - Original user query 101 | * @returns Formatted natural language response 102 | */ 103 | private async formatToolResult( 104 | toolName: string, 105 | toolResult: string, 106 | userQuery: string 107 | ): Promise { 108 | const prompt = `Given the following: 109 | User query: ${userQuery} 110 | Tool used: ${toolName} 111 | Tool result: ${toolResult} 112 | 113 | Create a natural language response to the user query that incorporates the result from the tool. Do not mention anything about using the tool used. 114 | Keep it concise and direct.`; 115 | 116 | let retryCount = 0; 117 | while (retryCount < this.maxRetries) { 118 | const response = await this.model.invoke([new HumanMessage({ content: prompt })]); 119 | const responseContent = response.content as string; 120 | 121 | if (!this.isEmptyResponse(responseContent)) { 122 | return responseContent; 123 | } 124 | retryCount++; 125 | } 126 | 127 | // If we've reached here, we've exceeded max retries with empty responses 128 | // Return a default response with the raw tool result 129 | return `The result is: ${toolResult}`; 130 | } 131 | 132 | /** 133 | * Parse a JSON string to extract a ToolCall object. 134 | * 135 | * @param text - Text potentially containing JSON 136 | * @returns Parsed ToolCall object or null if parsing fails 137 | */ 138 | private parseToolCall(text: string): ToolCall | null { 139 | try { 140 | // Try to parse the entire response as JSON first 141 | try { 142 | const parsed = JSON.parse(text); 143 | if (parsed.tool && parsed.args) { 144 | return parsed as ToolCall; 145 | } 146 | } catch { 147 | // Not a valid JSON object, continue to regex matching 148 | } 149 | 150 | // Find JSON objects in the text 151 | const jsonRegex = /\{(?:[^{}]|(\{(?:[^{}]|(?:\{[^{}]*\}))*\}))*\}/g; 152 | const matches = text.match(jsonRegex); 153 | 154 | if (!matches) return null; 155 | 156 | for (const match of matches) { 157 | try { 158 | const parsed = JSON.parse(match); 159 | if (parsed.tool && parsed.args) { 160 | return parsed as ToolCall; 161 | } 162 | } catch (e) { 163 | // Continue to next match on parse error 164 | continue; 165 | } 166 | } 167 | 168 | return null; 169 | } catch (e) { 170 | console.error("Error parsing tool call:", e); 171 | return null; 172 | } 173 | } 174 | 175 | /** 176 | * Execute the agent with manual tool handling (internal implementation). 177 | * 178 | * @param inputs - Dictionary containing messages 179 | * @returns Response containing processed message 180 | */ 181 | async _call( 182 | inputs: { messages: Message[] } 183 | ): Promise<{ messages: { content: string }[] }> { 184 | // Get messages 185 | const { messages } = inputs; 186 | const userQuery = messages[messages.length - 1].content; // Get the last user message 187 | 188 | // Convert messages to LangChain format 189 | const convertedMessages = this.convertMessages(messages); 190 | 191 | // Implement a simplified version of ReAct reasoning 192 | const thinkingPrompt = new HumanMessage({ 193 | content: "Think through this step by step. If the user's request requires using a tool, respond with a JSON object containing 'tool' and 'args' properties." 194 | }); 195 | const augmentedMessages = [...convertedMessages, thinkingPrompt]; 196 | 197 | // Get response from the model with retry logic for empty responses 198 | let lastResponse: string | null = null; 199 | let retryCount = 0; 200 | 201 | while (retryCount < this.maxRetries) { 202 | const response = await this.model.invoke(augmentedMessages); 203 | lastResponse = response.content as string; 204 | 205 | if (!this.isEmptyResponse(lastResponse)) { 206 | break; 207 | } 208 | 209 | retryCount++; 210 | } 211 | 212 | // If we still have an empty response after all retries, return an error message 213 | if (this.isEmptyResponse(lastResponse)) { 214 | return { messages: [{ content: "I'm having trouble generating a response. Please try again." }] }; 215 | } 216 | 217 | // Try to parse as a tool call 218 | const toolCall = this.parseToolCall(lastResponse as string); 219 | 220 | if (toolCall) { 221 | try { 222 | // Find the matching tool 223 | const tool = this.tools.find(t => t.name === toolCall.tool); 224 | 225 | if (tool) { 226 | const rawResult = await tool.invoke(toolCall.args); 227 | // Format the result using LLM 228 | const result = await this.formatToolResult(toolCall.tool, rawResult, userQuery); 229 | return { messages: [{ content: result }] }; 230 | } else { 231 | return { messages: [{ content: "Error: Unknown tool" }] }; 232 | } 233 | } catch (e) { 234 | return { 235 | messages: [{ 236 | content: `Error processing tool call: ${e instanceof Error ? e.message : String(e)}` 237 | }] 238 | }; 239 | } 240 | } else { 241 | return { messages: [{ content: lastResponse as string }] }; 242 | } 243 | } 244 | } 245 | 246 | /** 247 | * Create a React agent with manual tool handling. 248 | * 249 | * @param model - The language model to use 250 | * @param tools - List of tool functions 251 | * @returns Agent with manual tool handling 252 | */ 253 | export function createReactAgentTaot( 254 | model: any, 255 | tools: any[] 256 | ): ManualToolAgent { 257 | return new ManualToolAgent(model, tools); 258 | } 259 | -------------------------------------------------------------------------------- /tutorial/ChatOpenAI/taotTutorialChatOpenAI.js: -------------------------------------------------------------------------------- 1 | // Load environment variable (ie. API key) from the .env file 2 | require('dotenv').config(); 3 | 4 | // Import from the taot-ts package and ChatOpenAI package 5 | const { createSystemMessageTaot, createReactAgentTaot } = require('taot-ts'); 6 | const { ChatOpenAI } = require('@langchain/openai'); 7 | 8 | // Define calculator tool 9 | const calculatorTool = { 10 | name: 'calculator', 11 | invoke: async (args) => { 12 | try { 13 | const expression = args.expression.trim(); 14 | if (!expression) { 15 | return "Error: Empty expression"; 16 | } 17 | 18 | const allowedChars = "0123456789+-*/() ."; 19 | for (const char of expression) { 20 | if (!allowedChars.includes(char)) { 21 | return "Error: Invalid characters in expression"; 22 | } 23 | } 24 | 25 | // Use Function instead of eval for better safety 26 | const result = new Function('return ' + expression)(); 27 | return String(result); 28 | } catch (e) { 29 | return `Error: ${e.message}`; 30 | } 31 | } 32 | }; 33 | 34 | // Define text analyzer tool 35 | const textAnalyzerTool = { 36 | name: 'text_analyzer', 37 | invoke: async (args) => { 38 | try { 39 | const text = args.text.trim(); 40 | const analysisType = args.analysis_type; 41 | 42 | if (!text) { 43 | return "Error: Empty text"; 44 | } 45 | 46 | if (analysisType.toLowerCase() === 'words') { 47 | const wordCount = text.split(/\s+/).filter(Boolean).length; 48 | return `${wordCount}`; 49 | } else if (analysisType.toLowerCase() === 'chars') { 50 | const charCount = text.length; 51 | return `${charCount}`; 52 | } else { 53 | return "Error: analysis_type must be either 'words' or 'chars'"; 54 | } 55 | } catch (e) { 56 | return `Error: ${e.message}`; 57 | } 58 | } 59 | }; 60 | 61 | // Initialize model - reused across examples 62 | // In this tutorial, I am using the DeepSeek-R1 671B model hosted on the platform OpenRouter. This model hosted on OpenRouter is available on Langchain's ChatOpenAI class. 63 | // If you want to use another model, you will need to check if your model (hosted on whichever platform you have chosen, for eg. Azure, Together AI or DeepSeek's own platform etc.) is first available on Langchain.js's ChatOpenAI class, and then change the values of the parameters "model", "api_key" and "base_url" below according to which model and platform you have chosen. 64 | const createModel = () => { 65 | return new ChatOpenAI({ 66 | modelName: "deepseek/deepseek-r1", 67 | apiKey: process.env.OPENROUTER_API_KEY, 68 | configuration: { 69 | baseURL: "https://openrouter.ai/api/v1" 70 | } 71 | }); 72 | }; 73 | 74 | // Example previous messages 75 | // Note: Based on current best practices in chatbot design, we do not include system message in previous_messages as it's handled separately further down the script 76 | const previous_messages = [ 77 | // { role: "system", content: "You are a helpful AI assistant." }, // Commented out as we do not include system message 78 | { role: "user", content: "What is the capital of Australia?" }, 79 | { role: "assistant", content: "The capital of Australia is Canberra." } 80 | ]; 81 | 82 | // Getting Model Response 83 | // For ease of use, I have designed the taot-ts package to mimic LangChain.js's and LangGraph.js's "createReactAgent" method with tool calling. 84 | // First, the systemMessage variable below can start with any customized system message as per usual, for eg. "You are a helpful assistant. ", "You are an expert programmer in Python. ", "You are a world class expert in SEO optimization. " etc. 85 | // Then, the systemMessage variable below needs to STRICTLY include the following: "You are an assistant with access to specific tools. When the user's question requires a {tool use}, use the {'corresponding'} tool. For the {'corresponding'} tool, provide the {user message} as a string into the {'user message'} argument in the tool or any {'predefined values'} as a string for other arguments in the tool." 86 | // For eg. for the 'calculator' tool, since the function for the 'calculator' tool above has one argument called 'expression', the systemMessage variable below would need to look like "You are a math expert. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool." 87 | // For the 'text analyze' tool, since the function for the 'text analyze' tool above has two arguments 'text' and 'analysis_type' (where the 'analysis_type' argument has two predefined values 'words' and 'chars'), the systemMessage variable below would need to look like "You are an expert in linguitics. You are an assistant with access to specific tools. When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool." 88 | // Below are five examples of different combinations of user questions and tools used: 89 | 90 | // Example for calculator tool only 91 | async function runCalculatorExample() { 92 | try { 93 | // Initialize model 94 | const model = createModel(); 95 | 96 | // Create system message 97 | const systemMessage = "You are a math expert. You are an assistant with access to specific tools. " + 98 | "When the user's question requires a calculation, use the 'calculator' tool. " + 99 | "For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool."; 100 | 101 | const systemMessageTaot = createSystemMessageTaot(systemMessage); 102 | 103 | // Prepare all messages 104 | const allMessages = [ 105 | { role: "system", content: systemMessageTaot } 106 | ]; 107 | 108 | // Add previous messages 109 | allMessages.push(...previous_messages); 110 | 111 | // Add current user query 112 | allMessages.push({ role: "user", content: "What is 123 * 456?" }); 113 | 114 | // Create agent and invoke 115 | const agent = createReactAgentTaot(model, [calculatorTool]); 116 | const response = await agent.invoke({ 117 | messages: allMessages 118 | }); 119 | 120 | // Print result 121 | console.log(response.messages[0].content); 122 | } catch (error) { 123 | console.error("Error occurred:", error); 124 | } 125 | } 126 | 127 | // Example for text analyzer tool only 128 | async function runTextAnalyzerExample() { 129 | try { 130 | // Initialize model 131 | const model = createModel(); 132 | 133 | // Create system message 134 | const systemMessage = "You are an expert in linguistics. You are an assistant with access to specific tools. " + 135 | "When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. " + 136 | "For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and " + 137 | "either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool."; 138 | 139 | const systemMessageTaot = createSystemMessageTaot(systemMessage); 140 | 141 | // Prepare all messages 142 | const allMessages = [ 143 | { role: "system", content: systemMessageTaot } 144 | ]; 145 | 146 | // Add previous messages 147 | allMessages.push(...previous_messages); 148 | 149 | // Add current user query 150 | allMessages.push({ role: "user", content: "How many words are in this sentence?: I built my 1st Hello World program" }); 151 | 152 | // Create agent and invoke 153 | const agent = createReactAgentTaot(model, [textAnalyzerTool]); 154 | const response = await agent.invoke({ 155 | messages: allMessages 156 | }); 157 | 158 | // Print result 159 | console.log(response.messages[0].content); 160 | } catch (error) { 161 | console.error("Error occurred:", error); 162 | } 163 | } 164 | 165 | // Example for both tools with user question requiring math calculation 166 | async function runBothToolsMathExample() { 167 | try { 168 | // Initialize model 169 | const model = createModel(); 170 | 171 | // Create system message 172 | const systemMessage = `You are an expert in math and linguistics. You are an assistant with access to specific tools. 173 | When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool. 174 | When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.`; 175 | 176 | const systemMessageTaot = createSystemMessageTaot(systemMessage); 177 | 178 | // Prepare all messages 179 | const allMessages = [ 180 | { role: "system", content: systemMessageTaot } 181 | ]; 182 | 183 | // Add previous messages 184 | allMessages.push(...previous_messages); 185 | 186 | // Add current user query 187 | allMessages.push({ role: "user", content: "What is 123 * 456?" }); 188 | 189 | // Create agent and invoke 190 | const agent = createReactAgentTaot(model, [calculatorTool, textAnalyzerTool]); 191 | const response = await agent.invoke({ 192 | messages: allMessages 193 | }); 194 | 195 | // Print result 196 | console.log(response.messages[0].content); 197 | } catch (error) { 198 | console.error("Error occurred:", error); 199 | } 200 | } 201 | 202 | // Example for both tools with user question requiring text analysis 203 | async function runBothToolsTextExample() { 204 | try { 205 | // Initialize model 206 | const model = createModel(); 207 | 208 | // Create system message 209 | const systemMessage = `You are an expert in math and linguistics. You are an assistant with access to specific tools. 210 | When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool. 211 | When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.`; 212 | 213 | const systemMessageTaot = createSystemMessageTaot(systemMessage); 214 | 215 | // Prepare all messages 216 | const allMessages = [ 217 | { role: "system", content: systemMessageTaot } 218 | ]; 219 | 220 | // Add previous messages 221 | allMessages.push(...previous_messages); 222 | 223 | // Add current user query 224 | allMessages.push({ role: "user", content: "How many words are in this sentence?: I built my 1st Hello World program" }); 225 | 226 | // Create agent and invoke 227 | const agent = createReactAgentTaot(model, [calculatorTool, textAnalyzerTool]); 228 | const response = await agent.invoke({ 229 | messages: allMessages 230 | }); 231 | 232 | // Print result 233 | console.log(response.messages[0].content); 234 | } catch (error) { 235 | console.error("Error occurred:", error); 236 | } 237 | } 238 | 239 | // Example for both tools with user question not requiring any tools 240 | async function runBothToolsNoToolsExample() { 241 | try { 242 | // Initialize model 243 | const model = createModel(); 244 | 245 | // Create system message 246 | const systemMessage = `You are an expert in math and linguistics. You are an assistant with access to specific tools. 247 | When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool. 248 | When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.`; 249 | 250 | const systemMessageTaot = createSystemMessageTaot(systemMessage); 251 | 252 | // Prepare all messages 253 | const allMessages = [ 254 | { role: "system", content: systemMessageTaot } 255 | ]; 256 | 257 | // Add previous messages 258 | allMessages.push(...previous_messages); 259 | 260 | // Add current user query 261 | allMessages.push({ role: "user", content: "How many languages are there in the world?" }); 262 | 263 | // Create agent and invoke 264 | const agent = createReactAgentTaot(model, [calculatorTool, textAnalyzerTool]); 265 | const response = await agent.invoke({ 266 | messages: allMessages 267 | }); 268 | 269 | // Print result 270 | console.log(response.messages[0].content); 271 | } catch (error) { 272 | console.error("Error occurred:", error); 273 | } 274 | } 275 | 276 | // Run all examples 277 | async function runAllExamples() { 278 | try { 279 | await runCalculatorExample(); 280 | await runTextAnalyzerExample(); 281 | await runBothToolsMathExample(); 282 | await runBothToolsTextExample(); 283 | await runBothToolsNoToolsExample(); 284 | } catch (error) { 285 | console.error("Error running examples:", error); 286 | } 287 | } 288 | 289 | // Run all examples 290 | runAllExamples(); 291 | -------------------------------------------------------------------------------- /tutorial/ChatOpenAI_QwQ32B/taotTutorialChatOpenAI_QwQ32B.js: -------------------------------------------------------------------------------- 1 | // Load environment variable (ie. API key) from the .env file 2 | require('dotenv').config(); 3 | 4 | // Import from the taot-ts package and ChatOpenAI package 5 | const { createSystemMessageTaot, createReactAgentTaot } = require('taot-ts'); 6 | const { ChatOpenAI } = require('@langchain/openai'); 7 | 8 | // Define calculator tool 9 | const calculatorTool = { 10 | name: 'calculator', 11 | invoke: async (args) => { 12 | try { 13 | const expression = args.expression.trim(); 14 | if (!expression) { 15 | return "Error: Empty expression"; 16 | } 17 | 18 | const allowedChars = "0123456789+-*/() ."; 19 | for (const char of expression) { 20 | if (!allowedChars.includes(char)) { 21 | return "Error: Invalid characters in expression"; 22 | } 23 | } 24 | 25 | // Use Function instead of eval for better safety 26 | const result = new Function('return ' + expression)(); 27 | return String(result); 28 | } catch (e) { 29 | return `Error: ${e.message}`; 30 | } 31 | } 32 | }; 33 | 34 | // Define text analyzer tool 35 | const textAnalyzerTool = { 36 | name: 'text_analyzer', 37 | invoke: async (args) => { 38 | try { 39 | const text = args.text.trim(); 40 | const analysisType = args.analysis_type; 41 | 42 | if (!text) { 43 | return "Error: Empty text"; 44 | } 45 | 46 | if (analysisType.toLowerCase() === 'words') { 47 | const wordCount = text.split(/\s+/).filter(Boolean).length; 48 | return `${wordCount}`; 49 | } else if (analysisType.toLowerCase() === 'chars') { 50 | const charCount = text.length; 51 | return `${charCount}`; 52 | } else { 53 | return "Error: analysis_type must be either 'words' or 'chars'"; 54 | } 55 | } catch (e) { 56 | return `Error: ${e.message}`; 57 | } 58 | } 59 | }; 60 | 61 | // Initialize model - reused across examples 62 | // In this tutorial, I am using the DeepSeek-R1 model hosted on the platform OpenRouter. This model hosted on OpenRouter is available on Langchain's ChatOpenAI class. 63 | // If you want to use another model, you will need to check if your model (hosted on whichever platform you have chosen, for eg. Azure, Together AI or DeepSeek's own platform etc.) is first available on Langchain.js's ChatOpenAI class, and then change the values of the parameters "model", "api_key" and "base_url" below according to which model and platform you have chosen. 64 | const createModel = () => { 65 | return new ChatOpenAI({ 66 | modelName: "qwen/qwq-32b", 67 | apiKey: process.env.OPENROUTER_API_KEY, 68 | configuration: { 69 | baseURL: "https://openrouter.ai/api/v1" 70 | } 71 | }); 72 | }; 73 | 74 | // Example previous messages 75 | // Note: Based on current best practices in chatbot design, we do not include system message in previous_messages as it's handled separately further down the script 76 | const previous_messages = [ 77 | // { role: "system", content: "You are a helpful AI assistant." }, // Commented out as we do not include system message 78 | { role: "user", content: "What is the capital of Australia?" }, 79 | { role: "assistant", content: "The capital of Australia is Canberra." } 80 | ]; 81 | 82 | // Getting Model Response 83 | // For ease of use, I have designed the taot-ts package to mimic LangChain.js's and LangGraph.js's "createReactAgent" method with tool calling. 84 | // First, the systemMessage variable below can start with any customized system message as per usual, for eg. "You are a helpful assistant. ", "You are an expert programmer in Python. ", "You are a world class expert in SEO optimization. " etc. 85 | // Then, the systemMessage variable below needs to STRICTLY include the following: "You are an assistant with access to specific tools. When the user's question requires a {tool use}, use the {'corresponding'} tool. For the {'corresponding'} tool, provide the {user message} as a string into the {'user message'} argument in the tool or any {'predefined values'} as a string for other arguments in the tool." 86 | // For eg. for the 'calculator' tool, since the function for the 'calculator' tool above has one argument called 'expression', the systemMessage variable below would need to look like "You are a math expert. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool." 87 | // For the 'text analyze' tool, since the function for the 'text analyze' tool above has two arguments 'text' and 'analysis_type' (where the 'analysis_type' argument has two predefined values 'words' and 'chars'), the systemMessage variable below would need to look like "You are an expert in linguitics. You are an assistant with access to specific tools. When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool." 88 | // Below are five examples of different combinations of user questions and tools used: 89 | 90 | // Example for calculator tool only 91 | async function runCalculatorExample() { 92 | try { 93 | // Initialize model 94 | const model = createModel(); 95 | 96 | // Create system message 97 | const systemMessage = "You are a math expert. You are an assistant with access to specific tools. " + 98 | "When the user's question requires a calculation, use the 'calculator' tool. " + 99 | "For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool."; 100 | 101 | const systemMessageTaot = createSystemMessageTaot(systemMessage); 102 | 103 | // Prepare all messages 104 | const allMessages = [ 105 | { role: "system", content: systemMessageTaot } 106 | ]; 107 | 108 | // Add previous messages 109 | allMessages.push(...previous_messages); 110 | 111 | // Add current user query 112 | allMessages.push({ role: "user", content: "What is 123 * 456?" }); 113 | 114 | // Create agent and invoke 115 | const agent = createReactAgentTaot(model, [calculatorTool]); 116 | const response = await agent.invoke({ 117 | messages: allMessages 118 | }); 119 | 120 | // Print result 121 | console.log(response.messages[0].content); 122 | } catch (error) { 123 | console.error("Error occurred:", error); 124 | } 125 | } 126 | 127 | // Example for text analyzer tool only 128 | async function runTextAnalyzerExample() { 129 | try { 130 | // Initialize model 131 | const model = createModel(); 132 | 133 | // Create system message 134 | const systemMessage = "You are an expert in linguistics. You are an assistant with access to specific tools. " + 135 | "When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. " + 136 | "For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and " + 137 | "either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool."; 138 | 139 | const systemMessageTaot = createSystemMessageTaot(systemMessage); 140 | 141 | // Prepare all messages 142 | const allMessages = [ 143 | { role: "system", content: systemMessageTaot } 144 | ]; 145 | 146 | // Add previous messages 147 | allMessages.push(...previous_messages); 148 | 149 | // Add current user query 150 | allMessages.push({ role: "user", content: "How many words are in this sentence?: I built my 1st Hello World program" }); 151 | 152 | // Create agent and invoke 153 | const agent = createReactAgentTaot(model, [textAnalyzerTool]); 154 | const response = await agent.invoke({ 155 | messages: allMessages 156 | }); 157 | 158 | // Print result 159 | console.log(response.messages[0].content); 160 | } catch (error) { 161 | console.error("Error occurred:", error); 162 | } 163 | } 164 | 165 | // Example for both tools with user question requiring math calculation 166 | async function runBothToolsMathExample() { 167 | try { 168 | // Initialize model 169 | const model = createModel(); 170 | 171 | // Create system message 172 | const systemMessage = `You are an expert in math and linguistics. You are an assistant with access to specific tools. 173 | When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool. 174 | When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.`; 175 | 176 | const systemMessageTaot = createSystemMessageTaot(systemMessage); 177 | 178 | // Prepare all messages 179 | const allMessages = [ 180 | { role: "system", content: systemMessageTaot } 181 | ]; 182 | 183 | // Add previous messages 184 | allMessages.push(...previous_messages); 185 | 186 | // Add current user query 187 | allMessages.push({ role: "user", content: "What is 123 * 456?" }); 188 | 189 | // Create agent and invoke 190 | const agent = createReactAgentTaot(model, [calculatorTool, textAnalyzerTool]); 191 | const response = await agent.invoke({ 192 | messages: allMessages 193 | }); 194 | 195 | // Print result 196 | console.log(response.messages[0].content); 197 | } catch (error) { 198 | console.error("Error occurred:", error); 199 | } 200 | } 201 | 202 | // Example for both tools with user question requiring text analysis 203 | async function runBothToolsTextExample() { 204 | try { 205 | // Initialize model 206 | const model = createModel(); 207 | 208 | // Create system message 209 | const systemMessage = `You are an expert in math and linguistics. You are an assistant with access to specific tools. 210 | When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool. 211 | When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.`; 212 | 213 | const systemMessageTaot = createSystemMessageTaot(systemMessage); 214 | 215 | // Prepare all messages 216 | const allMessages = [ 217 | { role: "system", content: systemMessageTaot } 218 | ]; 219 | 220 | // Add previous messages 221 | allMessages.push(...previous_messages); 222 | 223 | // Add current user query 224 | allMessages.push({ role: "user", content: "How many words are in this sentence?: I built my 1st Hello World program" }); 225 | 226 | // Create agent and invoke 227 | const agent = createReactAgentTaot(model, [calculatorTool, textAnalyzerTool]); 228 | const response = await agent.invoke({ 229 | messages: allMessages 230 | }); 231 | 232 | // Print result 233 | console.log(response.messages[0].content); 234 | } catch (error) { 235 | console.error("Error occurred:", error); 236 | } 237 | } 238 | 239 | // Example for both tools with user question not requiring any tools 240 | async function runBothToolsNoToolsExample() { 241 | try { 242 | // Initialize model 243 | const model = createModel(); 244 | 245 | // Create system message 246 | const systemMessage = `You are an expert in math and linguistics. You are an assistant with access to specific tools. 247 | When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool. 248 | When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.`; 249 | 250 | const systemMessageTaot = createSystemMessageTaot(systemMessage); 251 | 252 | // Prepare all messages 253 | const allMessages = [ 254 | { role: "system", content: systemMessageTaot } 255 | ]; 256 | 257 | // Add previous messages 258 | allMessages.push(...previous_messages); 259 | 260 | // Add current user query 261 | allMessages.push({ role: "user", content: "How many languages are there in the world?" }); 262 | 263 | // Create agent and invoke 264 | const agent = createReactAgentTaot(model, [calculatorTool, textAnalyzerTool]); 265 | const response = await agent.invoke({ 266 | messages: allMessages 267 | }); 268 | 269 | // Print result 270 | console.log(response.messages[0].content); 271 | } catch (error) { 272 | console.error("Error occurred:", error); 273 | } 274 | } 275 | 276 | // Run all examples 277 | async function runAllExamples() { 278 | try { 279 | await runCalculatorExample(); 280 | await runTextAnalyzerExample(); 281 | await runBothToolsMathExample(); 282 | await runBothToolsTextExample(); 283 | await runBothToolsNoToolsExample(); 284 | } catch (error) { 285 | console.error("Error running examples:", error); 286 | } 287 | } 288 | 289 | // Run all examples 290 | runAllExamples(); 291 | -------------------------------------------------------------------------------- /tutorial/ChatOpenAI_DeepSeekR10528/taotTutorialChatOpenAIDeepSeekR10528.js: -------------------------------------------------------------------------------- 1 | // Load environment variable (ie. API key) from the .env file 2 | require('dotenv').config(); 3 | 4 | // Import from the taot-ts package and ChatOpenAI package 5 | const { createSystemMessageTaot, createReactAgentTaot } = require('taot-ts'); 6 | const { ChatOpenAI } = require('@langchain/openai'); 7 | 8 | // Define calculator tool 9 | const calculatorTool = { 10 | name: 'calculator', 11 | invoke: async (args) => { 12 | try { 13 | const expression = args.expression.trim(); 14 | if (!expression) { 15 | return "Error: Empty expression"; 16 | } 17 | 18 | const allowedChars = "0123456789+-*/() ."; 19 | for (const char of expression) { 20 | if (!allowedChars.includes(char)) { 21 | return "Error: Invalid characters in expression"; 22 | } 23 | } 24 | 25 | // Use Function instead of eval for better safety 26 | const result = new Function('return ' + expression)(); 27 | return String(result); 28 | } catch (e) { 29 | return `Error: ${e.message}`; 30 | } 31 | } 32 | }; 33 | 34 | // Define text analyzer tool 35 | const textAnalyzerTool = { 36 | name: 'text_analyzer', 37 | invoke: async (args) => { 38 | try { 39 | const text = args.text.trim(); 40 | const analysisType = args.analysis_type; 41 | 42 | if (!text) { 43 | return "Error: Empty text"; 44 | } 45 | 46 | if (analysisType.toLowerCase() === 'words') { 47 | const wordCount = text.split(/\s+/).filter(Boolean).length; 48 | return `${wordCount}`; 49 | } else if (analysisType.toLowerCase() === 'chars') { 50 | const charCount = text.length; 51 | return `${charCount}`; 52 | } else { 53 | return "Error: analysis_type must be either 'words' or 'chars'"; 54 | } 55 | } catch (e) { 56 | return `Error: ${e.message}`; 57 | } 58 | } 59 | }; 60 | 61 | // Initialize model - reused across examples 62 | // In this tutorial, I am using the DeepSeek-R1-0528 685B model hosted on the platform OpenRouter. This model hosted on OpenRouter is available on Langchain's ChatOpenAI class. 63 | // If you want to use another model, you will need to check if your model (hosted on whichever platform you have chosen, for eg. Azure, Together AI or DeepSeek's own platform etc.) is first available on Langchain.js's ChatOpenAI class, and then change the values of the parameters "model", "api_key" and "base_url" below according to which model and platform you have chosen. 64 | const createModel = () => { 65 | return new ChatOpenAI({ 66 | modelName: "deepseek/deepseek-r1-0528", 67 | apiKey: process.env.OPENROUTER_API_KEY, 68 | configuration: { 69 | baseURL: "https://openrouter.ai/api/v1" 70 | } 71 | }); 72 | }; 73 | 74 | // Example previous messages 75 | // Note: Based on current best practices in chatbot design, we do not include system message in previous_messages as it's handled separately further down the script 76 | const previous_messages = [ 77 | // { role: "system", content: "You are a helpful AI assistant." }, // Commented out as we do not include system message 78 | { role: "user", content: "What is the capital of Australia?" }, 79 | { role: "assistant", content: "The capital of Australia is Canberra." } 80 | ]; 81 | 82 | // Getting Model Response 83 | // For ease of use, I have designed the taot-ts package to mimic LangChain.js's and LangGraph.js's "createReactAgent" method with tool calling. 84 | // First, the systemMessage variable below can start with any customized system message as per usual, for eg. "You are a helpful assistant. ", "You are an expert programmer in Python. ", "You are a world class expert in SEO optimization. " etc. 85 | // Then, the systemMessage variable below needs to STRICTLY include the following: "You are an assistant with access to specific tools. When the user's question requires a {tool use}, use the {'corresponding'} tool. For the {'corresponding'} tool, provide the {user message} as a string into the {'user message'} argument in the tool or any {'predefined values'} as a string for other arguments in the tool." 86 | // For eg. for the 'calculator' tool, since the function for the 'calculator' tool above has one argument called 'expression', the systemMessage variable below would need to look like "You are a math expert. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool." 87 | // For the 'text analyze' tool, since the function for the 'text analyze' tool above has two arguments 'text' and 'analysis_type' (where the 'analysis_type' argument has two predefined values 'words' and 'chars'), the systemMessage variable below would need to look like "You are an expert in linguitics. You are an assistant with access to specific tools. When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool." 88 | // Below are five examples of different combinations of user questions and tools used: 89 | 90 | // Example for calculator tool only 91 | async function runCalculatorExample() { 92 | try { 93 | // Initialize model 94 | const model = createModel(); 95 | 96 | // Create system message 97 | const systemMessage = "You are a math expert. You are an assistant with access to specific tools. " + 98 | "When the user's question requires a calculation, use the 'calculator' tool. " + 99 | "For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool."; 100 | 101 | const systemMessageTaot = createSystemMessageTaot(systemMessage); 102 | 103 | // Prepare all messages 104 | const allMessages = [ 105 | { role: "system", content: systemMessageTaot } 106 | ]; 107 | 108 | // Add previous messages 109 | allMessages.push(...previous_messages); 110 | 111 | // Add current user query 112 | allMessages.push({ role: "user", content: "What is 123 * 456?" }); 113 | 114 | // Create agent and invoke 115 | const agent = createReactAgentTaot(model, [calculatorTool]); 116 | const response = await agent.invoke({ 117 | messages: allMessages 118 | }); 119 | 120 | // Print result 121 | console.log(response.messages[0].content); 122 | } catch (error) { 123 | console.error("Error occurred:", error); 124 | } 125 | } 126 | 127 | // Example for text analyzer tool only 128 | async function runTextAnalyzerExample() { 129 | try { 130 | // Initialize model 131 | const model = createModel(); 132 | 133 | // Create system message 134 | const systemMessage = "You are an expert in linguistics. You are an assistant with access to specific tools. " + 135 | "When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. " + 136 | "For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and " + 137 | "either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool."; 138 | 139 | const systemMessageTaot = createSystemMessageTaot(systemMessage); 140 | 141 | // Prepare all messages 142 | const allMessages = [ 143 | { role: "system", content: systemMessageTaot } 144 | ]; 145 | 146 | // Add previous messages 147 | allMessages.push(...previous_messages); 148 | 149 | // Add current user query 150 | allMessages.push({ role: "user", content: "How many words are in this sentence?: I built my 1st Hello World program" }); 151 | 152 | // Create agent and invoke 153 | const agent = createReactAgentTaot(model, [textAnalyzerTool]); 154 | const response = await agent.invoke({ 155 | messages: allMessages 156 | }); 157 | 158 | // Print result 159 | console.log(response.messages[0].content); 160 | } catch (error) { 161 | console.error("Error occurred:", error); 162 | } 163 | } 164 | 165 | // Example for both tools with user question requiring math calculation 166 | async function runBothToolsMathExample() { 167 | try { 168 | // Initialize model 169 | const model = createModel(); 170 | 171 | // Create system message 172 | const systemMessage = `You are an expert in math and linguistics. You are an assistant with access to specific tools. 173 | When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool. 174 | When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.`; 175 | 176 | const systemMessageTaot = createSystemMessageTaot(systemMessage); 177 | 178 | // Prepare all messages 179 | const allMessages = [ 180 | { role: "system", content: systemMessageTaot } 181 | ]; 182 | 183 | // Add previous messages 184 | allMessages.push(...previous_messages); 185 | 186 | // Add current user query 187 | allMessages.push({ role: "user", content: "What is 123 * 456?" }); 188 | 189 | // Create agent and invoke 190 | const agent = createReactAgentTaot(model, [calculatorTool, textAnalyzerTool]); 191 | const response = await agent.invoke({ 192 | messages: allMessages 193 | }); 194 | 195 | // Print result 196 | console.log(response.messages[0].content); 197 | } catch (error) { 198 | console.error("Error occurred:", error); 199 | } 200 | } 201 | 202 | // Example for both tools with user question requiring text analysis 203 | async function runBothToolsTextExample() { 204 | try { 205 | // Initialize model 206 | const model = createModel(); 207 | 208 | // Create system message 209 | const systemMessage = `You are an expert in math and linguistics. You are an assistant with access to specific tools. 210 | When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool. 211 | When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.`; 212 | 213 | const systemMessageTaot = createSystemMessageTaot(systemMessage); 214 | 215 | // Prepare all messages 216 | const allMessages = [ 217 | { role: "system", content: systemMessageTaot } 218 | ]; 219 | 220 | // Add previous messages 221 | allMessages.push(...previous_messages); 222 | 223 | // Add current user query 224 | allMessages.push({ role: "user", content: "How many words are in this sentence?: I built my 1st Hello World program" }); 225 | 226 | // Create agent and invoke 227 | const agent = createReactAgentTaot(model, [calculatorTool, textAnalyzerTool]); 228 | const response = await agent.invoke({ 229 | messages: allMessages 230 | }); 231 | 232 | // Print result 233 | console.log(response.messages[0].content); 234 | } catch (error) { 235 | console.error("Error occurred:", error); 236 | } 237 | } 238 | 239 | // Example for both tools with user question not requiring any tools 240 | async function runBothToolsNoToolsExample() { 241 | try { 242 | // Initialize model 243 | const model = createModel(); 244 | 245 | // Create system message 246 | const systemMessage = `You are an expert in math and linguistics. You are an assistant with access to specific tools. 247 | When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool. 248 | When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.`; 249 | 250 | const systemMessageTaot = createSystemMessageTaot(systemMessage); 251 | 252 | // Prepare all messages 253 | const allMessages = [ 254 | { role: "system", content: systemMessageTaot } 255 | ]; 256 | 257 | // Add previous messages 258 | allMessages.push(...previous_messages); 259 | 260 | // Add current user query 261 | allMessages.push({ role: "user", content: "How many languages are there in the world?" }); 262 | 263 | // Create agent and invoke 264 | const agent = createReactAgentTaot(model, [calculatorTool, textAnalyzerTool]); 265 | const response = await agent.invoke({ 266 | messages: allMessages 267 | }); 268 | 269 | // Print result 270 | console.log(response.messages[0].content); 271 | } catch (error) { 272 | console.error("Error occurred:", error); 273 | } 274 | } 275 | 276 | // Run all examples 277 | async function runAllExamples() { 278 | try { 279 | await runCalculatorExample(); 280 | await runTextAnalyzerExample(); 281 | await runBothToolsMathExample(); 282 | await runBothToolsTextExample(); 283 | await runBothToolsNoToolsExample(); 284 | } catch (error) { 285 | console.error("Error running examples:", error); 286 | } 287 | } 288 | 289 | // Run all examples 290 | runAllExamples(); 291 | -------------------------------------------------------------------------------- /tutorial/ChatOpenAI_Qwen3/taotTutorialChatOpenAI_Qwen3.js: -------------------------------------------------------------------------------- 1 | // Load environment variable (ie. API key) from the .env file 2 | require('dotenv').config(); 3 | 4 | // Import from the taot-ts package and ChatOpenAI package 5 | const { createSystemMessageTaot, createReactAgentTaot } = require('taot-ts'); 6 | const { ChatOpenAI } = require('@langchain/openai'); 7 | 8 | // Define calculator tool 9 | const calculatorTool = { 10 | name: 'calculator', 11 | invoke: async (args) => { 12 | try { 13 | const expression = args.expression.trim(); 14 | if (!expression) { 15 | return "Error: Empty expression"; 16 | } 17 | 18 | const allowedChars = "0123456789+-*/() ."; 19 | for (const char of expression) { 20 | if (!allowedChars.includes(char)) { 21 | return "Error: Invalid characters in expression"; 22 | } 23 | } 24 | 25 | // Use Function instead of eval for better safety 26 | const result = new Function('return ' + expression)(); 27 | return String(result); 28 | } catch (e) { 29 | return `Error: ${e.message}`; 30 | } 31 | } 32 | }; 33 | 34 | // Define text analyzer tool 35 | const textAnalyzerTool = { 36 | name: 'text_analyzer', 37 | invoke: async (args) => { 38 | try { 39 | const text = args.text.trim(); 40 | const analysisType = args.analysis_type; 41 | 42 | if (!text) { 43 | return "Error: Empty text"; 44 | } 45 | 46 | if (analysisType.toLowerCase() === 'words') { 47 | const wordCount = text.split(/\s+/).filter(Boolean).length; 48 | return `${wordCount}`; 49 | } else if (analysisType.toLowerCase() === 'chars') { 50 | const charCount = text.length; 51 | return `${charCount}`; 52 | } else { 53 | return "Error: analysis_type must be either 'words' or 'chars'"; 54 | } 55 | } catch (e) { 56 | return `Error: ${e.message}`; 57 | } 58 | } 59 | }; 60 | 61 | // Initialize model - reused across examples 62 | // In this tutorial, I am using the DeepSeek-R1 model hosted on the platform OpenRouter. This model hosted on OpenRouter is available on Langchain's ChatOpenAI class. 63 | // If you want to use another model, you will need to check if your model (hosted on whichever platform you have chosen, for eg. Azure, Together AI or DeepSeek's own platform etc.) is first available on Langchain.js's ChatOpenAI class, and then change the values of the parameters "model", "api_key" and "base_url" below according to which model and platform you have chosen. 64 | const createModel = () => { 65 | return new ChatOpenAI({ 66 | modelName: "qwen/qwen3-235b-a22b", 67 | // modelName: "qwen/qwen3-30b-a3b", 68 | // modelName: "qwen/qwen3-32b", 69 | // modelName: "qwen/qwen3-14b", 70 | // modelName: "qwen/qwen3-8b", 71 | // modelName: "qwen/qwen3-4b:free", 72 | // modelName: "qwen/qwen3-1.7b:free", 73 | apiKey: process.env.OPENROUTER_API_KEY, 74 | configuration: { 75 | baseURL: "https://openrouter.ai/api/v1" 76 | } 77 | }); 78 | }; 79 | 80 | // Example previous messages 81 | // Note: Based on current best practices in chatbot design, we do not include system message in previous_messages as it's handled separately further down the script 82 | const previous_messages = [ 83 | // { role: "system", content: "You are a helpful AI assistant." }, // Commented out as we do not include system message 84 | { role: "user", content: "What is the capital of Australia?" }, 85 | { role: "assistant", content: "The capital of Australia is Canberra." } 86 | ]; 87 | 88 | // Getting Model Response 89 | // For ease of use, I have designed the taot-ts package to mimic LangChain.js's and LangGraph.js's "createReactAgent" method with tool calling. 90 | // First, the systemMessage variable below can start with any customized system message as per usual, for eg. "You are a helpful assistant. ", "You are an expert programmer in Python. ", "You are a world class expert in SEO optimization. " etc. 91 | // Then, the systemMessage variable below needs to STRICTLY include the following: "You are an assistant with access to specific tools. When the user's question requires a {tool use}, use the {'corresponding'} tool. For the {'corresponding'} tool, provide the {user message} as a string into the {'user message'} argument in the tool or any {'predefined values'} as a string for other arguments in the tool." 92 | // For eg. for the 'calculator' tool, since the function for the 'calculator' tool above has one argument called 'expression', the systemMessage variable below would need to look like "You are a math expert. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool." 93 | // For the 'text analyze' tool, since the function for the 'text analyze' tool above has two arguments 'text' and 'analysis_type' (where the 'analysis_type' argument has two predefined values 'words' and 'chars'), the systemMessage variable below would need to look like "You are an expert in linguitics. You are an assistant with access to specific tools. When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool." 94 | // Below are five examples of different combinations of user questions and tools used: 95 | 96 | // Example for calculator tool only 97 | async function runCalculatorExample() { 98 | try { 99 | // Initialize model 100 | const model = createModel(); 101 | 102 | // Create system message 103 | const systemMessage = "You are a math expert. You are an assistant with access to specific tools. " + 104 | "When the user's question requires a calculation, use the 'calculator' tool. " + 105 | "For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool."; 106 | 107 | const systemMessageTaot = createSystemMessageTaot(systemMessage); 108 | 109 | // Prepare all messages 110 | const allMessages = [ 111 | { role: "system", content: systemMessageTaot } 112 | ]; 113 | 114 | // Add previous messages 115 | allMessages.push(...previous_messages); 116 | 117 | // Add current user query 118 | allMessages.push({ role: "user", content: "What is 123 * 456?" }); 119 | 120 | // Create agent and invoke 121 | const agent = createReactAgentTaot(model, [calculatorTool]); 122 | const response = await agent.invoke({ 123 | messages: allMessages 124 | }); 125 | 126 | // Print result 127 | console.log(response.messages[0].content); 128 | } catch (error) { 129 | console.error("Error occurred:", error); 130 | } 131 | } 132 | 133 | // Example for text analyzer tool only 134 | async function runTextAnalyzerExample() { 135 | try { 136 | // Initialize model 137 | const model = createModel(); 138 | 139 | // Create system message 140 | const systemMessage = "You are an expert in linguistics. You are an assistant with access to specific tools. " + 141 | "When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. " + 142 | "For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and " + 143 | "either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool."; 144 | 145 | const systemMessageTaot = createSystemMessageTaot(systemMessage); 146 | 147 | // Prepare all messages 148 | const allMessages = [ 149 | { role: "system", content: systemMessageTaot } 150 | ]; 151 | 152 | // Add previous messages 153 | allMessages.push(...previous_messages); 154 | 155 | // Add current user query 156 | allMessages.push({ role: "user", content: "How many words are in this sentence?: I built my 1st Hello World program" }); 157 | 158 | // Create agent and invoke 159 | const agent = createReactAgentTaot(model, [textAnalyzerTool]); 160 | const response = await agent.invoke({ 161 | messages: allMessages 162 | }); 163 | 164 | // Print result 165 | console.log(response.messages[0].content); 166 | } catch (error) { 167 | console.error("Error occurred:", error); 168 | } 169 | } 170 | 171 | // Example for both tools with user question requiring math calculation 172 | async function runBothToolsMathExample() { 173 | try { 174 | // Initialize model 175 | const model = createModel(); 176 | 177 | // Create system message 178 | const systemMessage = `You are an expert in math and linguistics. You are an assistant with access to specific tools. 179 | When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool. 180 | When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.`; 181 | 182 | const systemMessageTaot = createSystemMessageTaot(systemMessage); 183 | 184 | // Prepare all messages 185 | const allMessages = [ 186 | { role: "system", content: systemMessageTaot } 187 | ]; 188 | 189 | // Add previous messages 190 | allMessages.push(...previous_messages); 191 | 192 | // Add current user query 193 | allMessages.push({ role: "user", content: "What is 123 * 456?" }); 194 | 195 | // Create agent and invoke 196 | const agent = createReactAgentTaot(model, [calculatorTool, textAnalyzerTool]); 197 | const response = await agent.invoke({ 198 | messages: allMessages 199 | }); 200 | 201 | // Print result 202 | console.log(response.messages[0].content); 203 | } catch (error) { 204 | console.error("Error occurred:", error); 205 | } 206 | } 207 | 208 | // Example for both tools with user question requiring text analysis 209 | async function runBothToolsTextExample() { 210 | try { 211 | // Initialize model 212 | const model = createModel(); 213 | 214 | // Create system message 215 | const systemMessage = `You are an expert in math and linguistics. You are an assistant with access to specific tools. 216 | When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool. 217 | When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.`; 218 | 219 | const systemMessageTaot = createSystemMessageTaot(systemMessage); 220 | 221 | // Prepare all messages 222 | const allMessages = [ 223 | { role: "system", content: systemMessageTaot } 224 | ]; 225 | 226 | // Add previous messages 227 | allMessages.push(...previous_messages); 228 | 229 | // Add current user query 230 | allMessages.push({ role: "user", content: "How many words are in this sentence?: I built my 1st Hello World program" }); 231 | 232 | // Create agent and invoke 233 | const agent = createReactAgentTaot(model, [calculatorTool, textAnalyzerTool]); 234 | const response = await agent.invoke({ 235 | messages: allMessages 236 | }); 237 | 238 | // Print result 239 | console.log(response.messages[0].content); 240 | } catch (error) { 241 | console.error("Error occurred:", error); 242 | } 243 | } 244 | 245 | // Example for both tools with user question not requiring any tools 246 | async function runBothToolsNoToolsExample() { 247 | try { 248 | // Initialize model 249 | const model = createModel(); 250 | 251 | // Create system message 252 | const systemMessage = `You are an expert in math and linguistics. You are an assistant with access to specific tools. 253 | When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool. 254 | When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.`; 255 | 256 | const systemMessageTaot = createSystemMessageTaot(systemMessage); 257 | 258 | // Prepare all messages 259 | const allMessages = [ 260 | { role: "system", content: systemMessageTaot } 261 | ]; 262 | 263 | // Add previous messages 264 | allMessages.push(...previous_messages); 265 | 266 | // Add current user query 267 | allMessages.push({ role: "user", content: "How many languages are there in the world?" }); 268 | 269 | // Create agent and invoke 270 | const agent = createReactAgentTaot(model, [calculatorTool, textAnalyzerTool]); 271 | const response = await agent.invoke({ 272 | messages: allMessages 273 | }); 274 | 275 | // Print result 276 | console.log(response.messages[0].content); 277 | } catch (error) { 278 | console.error("Error occurred:", error); 279 | } 280 | } 281 | 282 | // Run all examples 283 | async function runAllExamples() { 284 | try { 285 | await runCalculatorExample(); 286 | await runTextAnalyzerExample(); 287 | await runBothToolsMathExample(); 288 | await runBothToolsTextExample(); 289 | await runBothToolsNoToolsExample(); 290 | } catch (error) { 291 | console.error("Error running examples:", error); 292 | } 293 | } 294 | 295 | // Run all examples 296 | runAllExamples(); 297 | -------------------------------------------------------------------------------- /tutorial/BaseChatModel/azure/taotTutorialBaseChatModelAzure.js: -------------------------------------------------------------------------------- 1 | // Load environment variable (ie. API key) from the .env file 2 | require('dotenv').config(); 3 | 4 | // Import required packages 5 | const { BaseChatModel } = require("@langchain/core/language_models/chat_models"); 6 | const fetch = (...args) => import('node-fetch').then(({default: fetch}) => fetch(...args)); 7 | const { AIMessage } = require("@langchain/core/messages"); 8 | const { createSystemMessageTaot, createReactAgentTaot } = require("taot-ts"); 9 | 10 | // Define calculator tool 11 | const calculatorTool = { 12 | name: 'calculator', 13 | invoke: async (args) => { 14 | try { 15 | const expression = args.expression.trim(); 16 | if (!expression) { 17 | return "Error: Empty expression"; 18 | } 19 | 20 | const allowedChars = "0123456789+-*/() ."; 21 | for (const char of expression) { 22 | if (!allowedChars.includes(char)) { 23 | return "Error: Invalid characters in expression"; 24 | } 25 | } 26 | 27 | // Use Function instead of eval for better safety 28 | const result = new Function('return ' + expression)(); 29 | return String(result); 30 | } catch (e) { 31 | return `Error: ${e.message}`; 32 | } 33 | } 34 | }; 35 | 36 | // Define text analyzer tool 37 | const textAnalyzerTool = { 38 | name: 'text_analyzer', 39 | invoke: async (args) => { 40 | try { 41 | const text = args.text.trim(); 42 | const analysisType = args.analysis_type; 43 | 44 | if (!text) { 45 | return "Error: Empty text"; 46 | } 47 | 48 | if (analysisType.toLowerCase() === 'words') { 49 | const wordCount = text.split(/\s+/).filter(Boolean).length; 50 | return `${wordCount}`; 51 | } else if (analysisType.toLowerCase() === 'chars') { 52 | const charCount = text.length; 53 | return `${charCount}`; 54 | } else { 55 | return "Error: analysis_type must be either 'words' or 'chars'"; 56 | } 57 | } catch (e) { 58 | return `Error: ${e.message}`; 59 | } 60 | } 61 | }; 62 | 63 | // Initialize model - reused across examples 64 | // Create a custom chat model that uses direct fetch from LangChain's BaseChatModel implementation 65 | // In this tutorial, I am using the DeepSeek-R1 671B model hosted on Azure. This model hosted on Azure is available on Langchain.js's BaseChatModel class 66 | // If you want to use another model, you will need to check if your model (hosted on Azure) is first available on Langchain.js's BaseChatModel class, and then change the values of the parameters "apiKey" and "baseUrl" below according to how you deployed your model on Azure AI Foundry 67 | // Note: If you want to change the default temperature and max output tokens of the model, you can do this from the "temperature" and "max_tokens" paramters below 68 | class CustomAzureAIModel extends BaseChatModel { 69 | constructor() { 70 | super({}); 71 | this.apiKey = process.env.AZURE_API_KEY; 72 | this.baseUrl = process.env.AZURE_ENDPOINT_BASE_URL; 73 | } 74 | 75 | _llmType() { 76 | return "custom-azure-ai"; 77 | } 78 | 79 | async _generate(messages, options = {}) { 80 | // Convert LangChain message format to Azure AI format 81 | const formattedMessages = messages.map(message => { 82 | if (message._getType() === "human") { 83 | return { role: "user", content: message.content }; 84 | } else if (message._getType() === "ai") { 85 | return { role: "assistant", content: message.content }; 86 | } else if (message._getType() === "system") { 87 | return { role: "system", content: message.content }; 88 | } else if (message._getType() === "chat") { 89 | return { role: message.role, content: message.content }; 90 | } 91 | // Default to user role if unknown 92 | return { role: "user", content: String(message.content) }; 93 | }); 94 | 95 | // Prepare request body 96 | const requestBody = { 97 | messages: formattedMessages, 98 | temperature: options.temperature || 0.7, // Set temperature to 0.7 99 | max_tokens: options.maxTokens || 1000, // Set max output tokens to 1000 100 | model: this.modelName 101 | }; 102 | 103 | try { 104 | // Make the API call 105 | const response = await fetch(`${this.baseUrl}/chat/completions`, { 106 | method: 'POST', 107 | headers: { 108 | 'Authorization': `Bearer ${this.apiKey}`, 109 | 'Content-Type': 'application/json' 110 | }, 111 | body: JSON.stringify(requestBody) 112 | }); 113 | 114 | if (!response.ok) { 115 | const errorText = await response.text(); 116 | console.error(`API error (${response.status}): ${errorText}`); 117 | throw new Error(`API error: ${response.status} ${response.statusText}`); 118 | } 119 | 120 | const data = await response.json(); 121 | 122 | // Extract the response content 123 | const responseContent = data.choices[0].message.content; 124 | 125 | // Create a simplified generation object 126 | const generation = { 127 | message: new AIMessage(responseContent), 128 | text: responseContent 129 | }; 130 | 131 | return { 132 | generations: [generation] 133 | }; 134 | } catch (error) { 135 | console.error("Error in _generate:", error); 136 | throw error; 137 | } 138 | } 139 | } 140 | 141 | // Example previous messages 142 | // Note: Based on current best practices in chatbot design, we do not include system message in previous_messages as it's handled separately further down the script 143 | const previous_messages = [ 144 | // { role: "system", content: "You are a helpful AI assistant." }, // Commented out as we do not include system message 145 | { role: "user", content: "What is the capital of Australia?" }, 146 | { role: "assistant", content: "The capital of Australia is Canberra." } 147 | ]; 148 | 149 | // Getting Model Response 150 | // For ease of use, I have designed the taot-ts package to mimic LangChain.js's and LangGraph.js's "createReactAgent" method with tool calling 151 | // First, the systemMessage variable below can start with any customized system message as per usual, for eg. "You are a helpful assistant. ", "You are an expert programmer in Python. ", "You are a world class expert in SEO optimization. " etc 152 | // Then, the systemMessage variable below needs to STRICTLY include the following: "You are an assistant with access to specific tools. When the user's question requires a {tool use}, use the {'corresponding'} tool. For the {'corresponding'} tool, provide the {user message} as a string into the {'user message'} argument in the tool or any {'predefined values'} as a string for other arguments in the tool." 153 | // For eg. for the 'calculator' tool, since the function for the 'calculator' tool above has one argument called 'expression', the systemMessage variable below would need to look like "You are a math expert. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool." 154 | // For the 'text analyze' tool, since the function for the 'text analyze' tool above has two arguments 'text' and 'analysis_type' (where the 'analysis_type' argument has two predefined values 'words' and 'chars'), the systemMessage variable below would need to look like "You are an expert in linguitics. You are an assistant with access to specific tools. When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool." 155 | // Below are five examples of different combinations of user questions and tools used: 156 | 157 | // Example for calculator tool only 158 | async function runCalculatorExample() { 159 | try { 160 | // Create our custom model 161 | const model = new CustomAzureAIModel(); 162 | 163 | // Create system message for TAOT 164 | const systemMessage = "You are a math expert. You are an assistant with access to specific tools. " + 165 | "When the user's question requires a calculation, use the 'calculator' tool. " + 166 | "For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool."; 167 | 168 | const systemMessageTaot = createSystemMessageTaot(systemMessage); 169 | 170 | // Prepare all messages 171 | const allMessages = [ 172 | { role: "system", content: systemMessageTaot } 173 | ]; 174 | 175 | // Add previous messages 176 | allMessages.push(...previous_messages); 177 | 178 | // Add current user query 179 | allMessages.push({ role: "user", content: "What is 123 * 456?" }); 180 | 181 | // Create agent and invoke 182 | const agent = createReactAgentTaot(model, [calculatorTool]); 183 | const response = await agent.invoke({ 184 | messages: allMessages 185 | }); 186 | 187 | // Print result 188 | console.log(response.messages[0].content); 189 | } catch (error) { 190 | console.error("Error occurred:", error); 191 | } 192 | } 193 | 194 | // Example for text analyzer tool only 195 | async function runTextAnalyzerExample() { 196 | try { 197 | // Create our custom model 198 | const model = new CustomAzureAIModel(); 199 | 200 | // Create system message 201 | const systemMessage = "You are an expert in linguistics. You are an assistant with access to specific tools. " + 202 | "When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. " + 203 | "For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and " + 204 | "either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool."; 205 | 206 | const systemMessageTaot = createSystemMessageTaot(systemMessage); 207 | 208 | // Prepare all messages 209 | const allMessages = [ 210 | { role: "system", content: systemMessageTaot } 211 | ]; 212 | 213 | // Add previous messages 214 | allMessages.push(...previous_messages); 215 | 216 | // Add current user query 217 | allMessages.push({ role: "user", content: "How many words are in this sentence?: I built my 1st Hello World program" }); 218 | 219 | // Create agent and invoke 220 | const agent = createReactAgentTaot(model, [textAnalyzerTool]); 221 | const response = await agent.invoke({ 222 | messages: allMessages 223 | }); 224 | 225 | // Print result 226 | console.log(response.messages[0].content); 227 | } catch (error) { 228 | console.error("Error occurred:", error); 229 | } 230 | } 231 | 232 | // Example for both tools with user question requiring math calculation 233 | async function runBothToolsMathExample() { 234 | try { 235 | // Create our custom model 236 | const model = new CustomAzureAIModel(); 237 | 238 | // Create system message 239 | const systemMessage = `You are an expert in math and linguistics. You are an assistant with access to specific tools. 240 | When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool. 241 | When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.`; 242 | 243 | const systemMessageTaot = createSystemMessageTaot(systemMessage); 244 | 245 | // Prepare all messages 246 | const allMessages = [ 247 | { role: "system", content: systemMessageTaot } 248 | ]; 249 | 250 | // Add previous messages 251 | allMessages.push(...previous_messages); 252 | 253 | // Add current user query 254 | allMessages.push({ role: "user", content: "What is 123 * 456?" }); 255 | 256 | // Create agent and invoke 257 | const agent = createReactAgentTaot(model, [calculatorTool, textAnalyzerTool]); 258 | const response = await agent.invoke({ 259 | messages: allMessages 260 | }); 261 | 262 | // Print result 263 | console.log(response.messages[0].content); 264 | } catch (error) { 265 | console.error("Error occurred:", error); 266 | } 267 | } 268 | 269 | // Example for both tools with user question requiring text analysis 270 | async function runBothToolsTextExample() { 271 | try { 272 | // Create our custom model 273 | const model = new CustomAzureAIModel(); 274 | 275 | // Create system message 276 | const systemMessage = `You are an expert in math and linguistics. You are an assistant with access to specific tools. 277 | When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool. 278 | When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.`; 279 | 280 | const systemMessageTaot = createSystemMessageTaot(systemMessage); 281 | 282 | // Prepare all messages 283 | const allMessages = [ 284 | { role: "system", content: systemMessageTaot } 285 | ]; 286 | 287 | // Add previous messages 288 | allMessages.push(...previous_messages); 289 | 290 | // Add current user query 291 | allMessages.push({ role: "user", content: "How many words are in this sentence?: I built my 1st Hello World program" }); 292 | 293 | // Create agent and invoke 294 | const agent = createReactAgentTaot(model, [calculatorTool, textAnalyzerTool]); 295 | const response = await agent.invoke({ 296 | messages: allMessages 297 | }); 298 | 299 | // Print result 300 | console.log(response.messages[0].content); 301 | } catch (error) { 302 | console.error("Error occurred:", error); 303 | } 304 | } 305 | 306 | // Example for both tools with user question not requiring any tools 307 | async function runBothToolsNoToolsExample() { 308 | try { 309 | // Create our custom model 310 | const model = new CustomAzureAIModel(); 311 | 312 | // Create system message 313 | const systemMessage = `You are an expert in math and linguistics. You are an assistant with access to specific tools. 314 | When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool. 315 | When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.`; 316 | 317 | const systemMessageTaot = createSystemMessageTaot(systemMessage); 318 | 319 | // Prepare all messages 320 | const allMessages = [ 321 | { role: "system", content: systemMessageTaot } 322 | ]; 323 | 324 | // Add previous messages 325 | allMessages.push(...previous_messages); 326 | 327 | // Add current user query 328 | allMessages.push({ role: "user", content: "How many languages are there in the world?" }); 329 | 330 | // Create agent and invoke 331 | const agent = createReactAgentTaot(model, [calculatorTool, textAnalyzerTool]); 332 | const response = await agent.invoke({ 333 | messages: allMessages 334 | }); 335 | 336 | // Print result 337 | console.log(response.messages[0].content); 338 | } catch (error) { 339 | console.error("Error occurred:", error); 340 | } 341 | } 342 | 343 | // Run all examples 344 | async function runAllExamples() { 345 | try { 346 | await runCalculatorExample(); 347 | await runTextAnalyzerExample(); 348 | await runBothToolsMathExample(); 349 | await runBothToolsTextExample(); 350 | await runBothToolsNoToolsExample(); 351 | } catch (error) { 352 | console.error("Error running examples:", error); 353 | } 354 | } 355 | 356 | // Run all examples 357 | runAllExamples(); 358 | -------------------------------------------------------------------------------- /tutorial/BaseChatModel/azureNoThink/taotTutorialBaseChatModelAzureNoThink.js: -------------------------------------------------------------------------------- 1 | // Load environment variable (ie. API key) from the .env file 2 | require('dotenv').config(); 3 | 4 | // Import required packages 5 | const { BaseChatModel } = require("@langchain/core/language_models/chat_models"); 6 | const fetch = (...args) => import('node-fetch').then(({default: fetch}) => fetch(...args)); 7 | const { AIMessage } = require("@langchain/core/messages"); 8 | const { createSystemMessageTaot, createReactAgentTaot } = require("taot-ts"); 9 | 10 | // Define calculator tool 11 | const calculatorTool = { 12 | name: 'calculator', 13 | invoke: async (args) => { 14 | try { 15 | const expression = args.expression.trim(); 16 | if (!expression) { 17 | return "Error: Empty expression"; 18 | } 19 | 20 | const allowedChars = "0123456789+-*/() ."; 21 | for (const char of expression) { 22 | if (!allowedChars.includes(char)) { 23 | return "Error: Invalid characters in expression"; 24 | } 25 | } 26 | 27 | // Use Function instead of eval for better safety 28 | const result = new Function('return ' + expression)(); 29 | return String(result); 30 | } catch (e) { 31 | return `Error: ${e.message}`; 32 | } 33 | } 34 | }; 35 | 36 | // Define text analyzer tool 37 | const textAnalyzerTool = { 38 | name: 'text_analyzer', 39 | invoke: async (args) => { 40 | try { 41 | const text = args.text.trim(); 42 | const analysisType = args.analysis_type; 43 | 44 | if (!text) { 45 | return "Error: Empty text"; 46 | } 47 | 48 | if (analysisType.toLowerCase() === 'words') { 49 | const wordCount = text.split(/\s+/).filter(Boolean).length; 50 | return `${wordCount}`; 51 | } else if (analysisType.toLowerCase() === 'chars') { 52 | const charCount = text.length; 53 | return `${charCount}`; 54 | } else { 55 | return "Error: analysis_type must be either 'words' or 'chars'"; 56 | } 57 | } catch (e) { 58 | return `Error: ${e.message}`; 59 | } 60 | } 61 | }; 62 | 63 | // Initialize model - reused across examples 64 | // Create a custom chat model that uses direct fetch from LangChain's BaseChatModel implementation 65 | // In this tutorial, I am using the DeepSeek-R1 671B model hosted on Azure. This model hosted on Azure is available on Langchain.js's BaseChatModel class 66 | // If you want to use another model, you will need to check if your model (hosted on Azure) is first available on Langchain.js's BaseChatModel class, and then change the values of the parameters "apiKey" and "baseUrl" below according to how you deployed your model on Azure AI Foundry 67 | // Note: If you want to change the default temperature and max output tokens of the model, you can do this from the "temperature" and "max_tokens" paramters below 68 | class CustomAzureAIModel extends BaseChatModel { 69 | constructor() { 70 | super({}); 71 | this.apiKey = process.env.AZURE_API_KEY; 72 | this.baseUrl = process.env.AZURE_ENDPOINT_BASE_URL; 73 | } 74 | 75 | _llmType() { 76 | return "custom-azure-ai"; 77 | } 78 | 79 | async _generate(messages, options = {}) { 80 | // Convert LangChain message format to Azure AI format 81 | const formattedMessages = messages.map(message => { 82 | if (message._getType() === "human") { 83 | return { role: "user", content: message.content }; 84 | } else if (message._getType() === "ai") { 85 | return { role: "assistant", content: message.content }; 86 | } else if (message._getType() === "system") { 87 | return { role: "system", content: message.content }; 88 | } else if (message._getType() === "chat") { 89 | return { role: message.role, content: message.content }; 90 | } 91 | // Default to user role if unknown 92 | return { role: "user", content: String(message.content) }; 93 | }); 94 | 95 | // Prepare request body 96 | const requestBody = { 97 | messages: formattedMessages, 98 | temperature: options.temperature || 0.7, // Set temperature to 0.7 99 | max_tokens: options.maxTokens || 1000, // Set max output tokens to 1000 100 | model: this.modelName 101 | }; 102 | 103 | try { 104 | // Make the API call 105 | const response = await fetch(`${this.baseUrl}/chat/completions`, { 106 | method: 'POST', 107 | headers: { 108 | 'Authorization': `Bearer ${this.apiKey}`, 109 | 'Content-Type': 'application/json' 110 | }, 111 | body: JSON.stringify(requestBody) 112 | }); 113 | 114 | if (!response.ok) { 115 | const errorText = await response.text(); 116 | console.error(`API error (${response.status}): ${errorText}`); 117 | throw new Error(`API error: ${response.status} ${response.statusText}`); 118 | } 119 | 120 | const data = await response.json(); 121 | 122 | // Extract the response content 123 | const responseContent = data.choices[0].message.content; 124 | 125 | // Create a simplified generation object 126 | const generation = { 127 | message: new AIMessage(responseContent), 128 | text: responseContent 129 | }; 130 | 131 | return { 132 | generations: [generation] 133 | }; 134 | } catch (error) { 135 | console.error("Error in _generate:", error); 136 | throw error; 137 | } 138 | } 139 | } 140 | 141 | // Example previous messages 142 | // Note: Based on current best practices in chatbot design, we do not include system message in previous_messages as it's handled separately further down the script 143 | const previous_messages = [ 144 | // { role: "system", content: "You are a helpful AI assistant." }, // Commented out as we do not include system message 145 | { role: "user", content: "What is the capital of Australia?" }, 146 | { role: "assistant", content: "The capital of Australia is Canberra." } 147 | ]; 148 | 149 | // Getting Model Response 150 | // For ease of use, I have designed the taot-ts package to mimic LangChain.js's and LangGraph.js's "createReactAgent" method with tool calling 151 | // First, the systemMessage variable below can start with any customized system message as per usual, for eg. "You are a helpful assistant. ", "You are an expert programmer in Python. ", "You are a world class expert in SEO optimization. " etc 152 | // Then, the systemMessage variable below needs to STRICTLY include the following: "You are an assistant with access to specific tools. When the user's question requires a {tool use}, use the {'corresponding'} tool. For the {'corresponding'} tool, provide the {user message} as a string into the {'user message'} argument in the tool or any {'predefined values'} as a string for other arguments in the tool." 153 | // For eg. for the 'calculator' tool, since the function for the 'calculator' tool above has one argument called 'expression', the systemMessage variable below would need to look like "You are a math expert. You are an assistant with access to specific tools. When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool." 154 | // For the 'text analyze' tool, since the function for the 'text analyze' tool above has two arguments 'text' and 'analysis_type' (where the 'analysis_type' argument has two predefined values 'words' and 'chars'), the systemMessage variable below would need to look like "You are an expert in linguitics. You are an assistant with access to specific tools. When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool." 155 | // Below are five examples of different combinations of user questions and tools used: 156 | 157 | // Example for calculator tool only 158 | async function runCalculatorExample() { 159 | try { 160 | // Create our custom model 161 | const model = new CustomAzureAIModel(); 162 | 163 | // Create system message for TAOT 164 | const systemMessage = "You are a math expert. You are an assistant with access to specific tools. " + 165 | "When the user's question requires a calculation, use the 'calculator' tool. " + 166 | "For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool."; 167 | 168 | const systemMessageTaot = createSystemMessageTaot(systemMessage); 169 | 170 | // Prepare all messages 171 | const allMessages = [ 172 | { role: "system", content: systemMessageTaot } 173 | ]; 174 | 175 | // Add previous messages 176 | allMessages.push(...previous_messages); 177 | 178 | // Add current user query 179 | allMessages.push({ role: "user", content: "What is 123 * 456?" }); 180 | 181 | // Create agent and invoke 182 | const agent = createReactAgentTaot(model, [calculatorTool]); 183 | const response = await agent.invoke({ 184 | messages: allMessages 185 | }); 186 | 187 | // Print only the final result part without the think block 188 | const content = response.messages[0].content; 189 | const resultNoThink = content.split('')[1].trim(); 190 | console.log(resultNoThink); 191 | } catch (error) { 192 | console.error("Error occurred:", error); 193 | } 194 | } 195 | 196 | // Example for text analyzer tool only 197 | async function runTextAnalyzerExample() { 198 | try { 199 | // Create our custom model 200 | const model = new CustomAzureAIModel(); 201 | 202 | // Create system message 203 | const systemMessage = "You are an expert in linguistics. You are an assistant with access to specific tools. " + 204 | "When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. " + 205 | "For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and " + 206 | "either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool."; 207 | 208 | const systemMessageTaot = createSystemMessageTaot(systemMessage); 209 | 210 | // Prepare all messages 211 | const allMessages = [ 212 | { role: "system", content: systemMessageTaot } 213 | ]; 214 | 215 | // Add previous messages 216 | allMessages.push(...previous_messages); 217 | 218 | // Add current user query 219 | allMessages.push({ role: "user", content: "How many words are in this sentence?: I built my 1st Hello World program" }); 220 | 221 | // Create agent and invoke 222 | const agent = createReactAgentTaot(model, [textAnalyzerTool]); 223 | const response = await agent.invoke({ 224 | messages: allMessages 225 | }); 226 | 227 | // Print only the final result part without the think block 228 | const content = response.messages[0].content; 229 | const resultNoThink = content.split('')[1].trim(); 230 | console.log(resultNoThink); 231 | } catch (error) { 232 | console.error("Error occurred:", error); 233 | } 234 | } 235 | 236 | // Example for both tools with user question requiring math calculation 237 | async function runBothToolsMathExample() { 238 | try { 239 | // Create our custom model 240 | const model = new CustomAzureAIModel(); 241 | 242 | // Create system message 243 | const systemMessage = `You are an expert in math and linguistics. You are an assistant with access to specific tools. 244 | When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool. 245 | When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.`; 246 | 247 | const systemMessageTaot = createSystemMessageTaot(systemMessage); 248 | 249 | // Prepare all messages 250 | const allMessages = [ 251 | { role: "system", content: systemMessageTaot } 252 | ]; 253 | 254 | // Add previous messages 255 | allMessages.push(...previous_messages); 256 | 257 | // Add current user query 258 | allMessages.push({ role: "user", content: "What is 123 * 456?" }); 259 | 260 | // Create agent and invoke 261 | const agent = createReactAgentTaot(model, [calculatorTool, textAnalyzerTool]); 262 | const response = await agent.invoke({ 263 | messages: allMessages 264 | }); 265 | 266 | // Print only the final result part without the think block 267 | const content = response.messages[0].content; 268 | const resultNoThink = content.split('')[1].trim(); 269 | console.log(resultNoThink); 270 | } catch (error) { 271 | console.error("Error occurred:", error); 272 | } 273 | } 274 | 275 | // Example for both tools with user question requiring text analysis 276 | async function runBothToolsTextExample() { 277 | try { 278 | // Create our custom model 279 | const model = new CustomAzureAIModel(); 280 | 281 | // Create system message 282 | const systemMessage = `You are an expert in math and linguistics. You are an assistant with access to specific tools. 283 | When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool. 284 | When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.`; 285 | 286 | const systemMessageTaot = createSystemMessageTaot(systemMessage); 287 | 288 | // Prepare all messages 289 | const allMessages = [ 290 | { role: "system", content: systemMessageTaot } 291 | ]; 292 | 293 | // Add previous messages 294 | allMessages.push(...previous_messages); 295 | 296 | // Add current user query 297 | allMessages.push({ role: "user", content: "How many words are in this sentence?: I built my 1st Hello World program" }); 298 | 299 | // Create agent and invoke 300 | const agent = createReactAgentTaot(model, [calculatorTool, textAnalyzerTool]); 301 | const response = await agent.invoke({ 302 | messages: allMessages 303 | }); 304 | 305 | // Print only the final result part without the think block 306 | const content = response.messages[0].content; 307 | const resultNoThink = content.split('')[1].trim(); 308 | console.log(resultNoThink); 309 | } catch (error) { 310 | console.error("Error occurred:", error); 311 | } 312 | } 313 | 314 | // Example for both tools with user question not requiring any tools 315 | async function runBothToolsNoToolsExample() { 316 | try { 317 | // Create our custom model 318 | const model = new CustomAzureAIModel(); 319 | 320 | // Create system message 321 | const systemMessage = `You are an expert in math and linguistics. You are an assistant with access to specific tools. 322 | When the user's question requires a calculation, use the 'calculator' tool. For the 'calculator' tool, provide the user provided math expression as a string into the 'expression' argument in the tool. 323 | When the user's question requires analysis of the text provided by the user, use the 'text_analyzer' tool. For the 'text_analyzer' tool, provide the user provided text as a string into the 'text' argument in the tool and either 'words' or 'chars' as a string into the 'analysis_type' argument in the tool.`; 324 | 325 | const systemMessageTaot = createSystemMessageTaot(systemMessage); 326 | 327 | // Prepare all messages 328 | const allMessages = [ 329 | { role: "system", content: systemMessageTaot } 330 | ]; 331 | 332 | // Add previous messages 333 | allMessages.push(...previous_messages); 334 | 335 | // Add current user query 336 | allMessages.push({ role: "user", content: "How many languages are there in the world?" }); 337 | 338 | // Create agent and invoke 339 | const agent = createReactAgentTaot(model, [calculatorTool, textAnalyzerTool]); 340 | const response = await agent.invoke({ 341 | messages: allMessages 342 | }); 343 | 344 | // Print only the final result part without the think block 345 | const content = response.messages[0].content; 346 | const resultNoThink = content.split('')[1].trim(); 347 | console.log(resultNoThink); 348 | } catch (error) { 349 | console.error("Error occurred:", error); 350 | } 351 | } 352 | 353 | // Run all examples 354 | async function runAllExamples() { 355 | try { 356 | await runCalculatorExample(); 357 | await runTextAnalyzerExample(); 358 | await runBothToolsMathExample(); 359 | await runBothToolsTextExample(); 360 | await runBothToolsNoToolsExample(); 361 | } catch (error) { 362 | console.error("Error running examples:", error); 363 | } 364 | } 365 | 366 | // Run all examples 367 | runAllExamples(); 368 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Tool-Ahead-of-Time-TypeScript (TAoT-ts): Because Why Wait? 🕒 2 | Ever found yourself staring at a shiny new LLM through LangChain.js's window, but can't use tool calling because it's "not supported yet"? 3 | 4 | *Sad agent noises* 😢 5 | 6 | Well, hold my JSON parser, because this repo says "NOT TODAY!" 🦾 7 | 8 | ## What is this sorcery? 🧙‍♂️ 9 | 10 | This is a TypeScript package (a package mirrowing the equivalent Python package: https://github.com/leockl/tool-ahead-of-time) that enables tool calling for any model available through LangChain.js's ChatOpenAI class (and by extension, any model available through OpenAI's class), and any model available through LangChain.js's BaseChatModel class, even before LangChain.js and LangGraph.js officially supports it! 11 | 12 | Yes, you read that right. We're living in the age of AI and things move fast 🏎️💨 13 | 14 | It essentially works by reformatting the output response of the model into a JSON parser and passing this on to the relevant tools. 15 | 16 | This repo showcases an example with DeepSeek-R1 671B, which isn't currently supported with tool calling by LangChain.js and LangGraph.js (as of 16th Feb 2025). 17 | 18 | ## Features 🌟 19 | 20 | - Tool calling support for OpenAI and non-OpenAI models available on: 21 | - LangChain.js's ChatOpenAI class (and by extension, OpenAI and non-OpenAI models available on the base OpenAI's class). 22 | - LangChain.js's BaseChatModel class. 23 | - This package follows a similar method to LangChain.js's and LangGraph.js's `createReactAgent` method for tool calling, so makes it easy for you to read the syntax. 😊 24 | - Zero waiting for official support required. 25 | - More robust than a caffeinated developer at 3 AM. ☕ 26 | 27 | ## Quick Start 🚀 28 | 29 | I will show below how to run the tutorials for LangChain.js's ChatOpenAI class (using DeepSeek-R1 671B on OpenRouter), LangChain.js's BaseChatModel class (using DeepSeek-R1 671B on Microsoft Azure) and LangChain.js's ChatOpenAI class (using QwQ-32B on OpenRouter) in this repo which uses the `taot-ts` package: 30 | 31 | ### 1. LangChain.js's ChatOpenAI class (using DeepSeek-R1 671B on OpenRouter) 32 | 33 | First, create an empty "tutorial" folder and an empty "ChatOpenAI" sub-folder in your local device. Then copy the "taotTutorialChatOpenAI.js" and ".env" files (under the "tutorial" folder and "ChatOpenAI" sub-folder in this repo) into your empty "ChatOpenAI" sub-folder in your local device. Note you will need to enter your own API key into the ".env" file. 34 | 35 | Then run the following lines of code: 36 | 37 | ```bash 38 | # Navigate to the "ChatOpenAI" sub-directory in your local device 39 | cd tutorial\ChatOpenAI 40 | 41 | # Initialize as npm project 42 | npm init -y 43 | 44 | # Npm install the "taot-ts" package 45 | npm install taot-ts 46 | 47 | # Npm install dependencies required in the "taotTutorialChatOpenAI.js" file 48 | npm install dotenv @langchain/openai 49 | 50 | # Run the "taotTutorialChatOpenAI.js" file 51 | node taotTutorialChatOpenAI.js 52 | ``` 53 | 54 | After running the "taotTutorialChatOpenAI.js" file, you should see a similar results to the below: 55 | 56 | ```bash 57 | 123 multiplied by 456 equals 56,088. 58 | There are 7 words in the sentence "I built my 1st Hello World program." 59 | The product of 123 multiplied by 456 is **56,088**. 60 | There are 7 words in the sentence: *I*, *built*, *my*, *1st*, *Hello*, *World*, and *program*. 61 | The exact number of languages spoken globally is estimated to be around 7,000, though this number can vary due to factors like dialect continuums and language endangerment. Ethnologue (2023) currently documents 7,168 living languages. 62 | ``` 63 | 64 | ### 2. LangChain.js's BaseChatModel class (using DeepSeek-R1 671B on Microsoft Azure) 65 | 66 | First, create an empty "tutorial" folder, an empty "BaseChatModel" sub-folder and an empty "azure" sub-sub-folder in your local device. Then copy the "taotTutorialBaseChatModelAzure.js" and ".env" files (under the "tutorial" folder -> "BaseChatModel" sub-folder -> "azure" sub-sub-folder in this repo) into your empty "azure" sub-sub-folder in your local device. Note you will need to enter your own API key and endpoint into the ".env" file. 67 | 68 | **Tip**: To setup Azure (ie. Azure AI Foundry) just ask any AI with internet access (so that you get the latest up to date steps) the following question: "You are an expert in Microsoft Azure. Can you tell me the latest step-by-step guide on how to setup an Azure AI Foundry account and deploy a model in Azure AI Foundry within the Azure AI Foundry platform.". Once you have your model deployed, you can obtain the value for the parameters API key (credential) and endpoint from the "Models + endpoints" tab in Azure AI Foundry as shown in the screenshot below: 69 | 70 | ![deepseek-r1_azure](https://github.com/user-attachments/assets/b9ec1a0d-d509-48e8-8591-4d9af29f392c) 71 | 72 | Then run the following lines of code: 73 | 74 | ```bash 75 | # Navigate to the "azure" sub-sub-directory in your local device 76 | cd tutorial\BaseChatModel\azure 77 | 78 | # Initialize as npm project 79 | npm init -y 80 | 81 | # Npm install the "taot-ts" package 82 | npm install taot-ts 83 | 84 | # Npm install dependencies required in the "taotTutorialBaseChatModelAzure.js" file 85 | npm install dotenv node-fetch@2 @langchain/core 86 | 87 | # Run the "taotTutorialBaseChatModelAzure.js" file 88 | node taotTutorialBaseChatModelAzure.js 89 | ``` 90 | 91 | After running the "taotTutorialBaseChatModelAzure.js" file, you should see a similar results to the below: 92 | 93 | ```bash 94 | 95 | Okay, the user asked for the product of 123 and 456. I need to provide a clear answer using the calculator result, which is 56088. The instructions say not to mention the tool used, so I shouldn't refer to a calculator. Just state the result directly. Let me phrase it concisely. Maybe start with "The product of 123 multiplied by 456 is 56,088." That's straightforward and includes the commas for readability. Check if the number is correct: 123*456. Let me do a quick verification. 100*456=45,600, 20*456=9,120, 3*456=1,368. Adding those: 45,600 + 9,120 = 54,720 + 1,368 = 56,088. Yep, that's right. So the answer is correct. Alright, just present the result as the answer without any extra details. That should do it. 96 | 97 | 98 | The product of 123 multiplied by 456 is **56,088**. 99 | 100 | Okay, the user asked, "How many words are in this sentence?: I built my 1st Hello World program". The tool result says 7. Let me check that. 101 | 102 | First, the sentence is "I built my 1st Hello World program". Let's count the words. "I" is 1, "built" 2, "my" 3, "1st" 4, "Hello" 5, "World" 6, "program" 7. So yeah, that's seven words. The number "1st" counts as a single word even though it's a numeral and a suffix. Similarly, "Hello World" is two separate words. The user might wonder if contractions or numbers affect the count, but standard practice is to count each sequence of characters between spaces as a word. So the answer should be straightforward: "There are 7 words in that sentence." 103 | 104 | 105 | There are 7 words in the sentence: "I built my 1st Hello World program." 106 | 107 | Okay, the user asked for the product of 123 and 456. The calculator gave 56,088. I need to answer naturally without mentioning the tool. Let me phrase it simply. Start with the result, maybe state the numbers multiplied equal that. Keep it short and direct. No extra words. Check the numbers again to be sure. Yeah, 123 times 456 is indeed 56,088. Alright, that should do it. 108 | 109 | 110 | The product of 123 multiplied by 456 is **56,088**. 111 | 112 | Okay, the user is asking how many words are in the sentence they provided. The sentence is "I built my 1st Hello World program". The tool result says 7 words. Let me check that. 113 | 114 | First, I need to split the sentence into words. Words are usually separated by spaces. Let's break it down: 115 | 116 | 1. I 117 | 2. built 118 | 3. my 119 | 4. 1st 120 | 5. Hello 121 | 6. World 122 | 7. program 123 | 124 | That's seven words. Wait, sometimes people might confuse numbers with hyphens or other characters, but "1st" is a single word. "Hello World" here is two separate words because of the space. So yes, the tool is correct. 125 | 126 | The user wants a concise and direct response without mentioning the tool. So the answer should just state the number. Maybe like, "There are 7 words in the sentence." That's straightforward and uses the result without extra details. 127 | 128 | 129 | There are 7 words in the sentence. 130 | 131 | Okay, the user is asking how many languages there are in the world. Let me think. The question is about counting languages. I remember that there's a tool called 'text_analyzer' which can analyze text. But wait, the user didn't provide any text to analyze. The tools I have are calculator and text_analyzer. The calculator is for math expressions, and text_analyzer is for analyzing provided text, counting words or characters. 132 | 133 | Since the user's question isn't about analyzing a given text or performing a calculation, but rather a factual question about the number of languages globally, there's no tool available here. The text_analyzer requires the user to provide text, which they didn't. So the correct response is to answer directly. But the user's previous instruction said to only use tools when applicable and answer in plain text otherwise. The assistant should know that this question can't be addressed with the available tools, hence respond in plain text with the factual answer. 134 | 135 | 136 | The exact number of languages in the world is difficult to determine, but estimates suggest there are roughly 7,000-7,100 living languages globally. This number fluctuates due to factors like language endangerment and discovery of undocumented languages. 137 | ``` 138 | 139 | To remove the "think" blocks from the response, use the line of code `content.split('')[1].trim()`. I have done this for you in the "taotTutorialBaseChatModelAzureNoThink.js" file under the "tutorial" folder -> "BaseChatModel" sub-folder -> "azureNoThink" sub-sub-folder in this repo. Just follow the same steps under "**Langchain.js's BaseChatModel class**" above but for the "taotTutorialBaseChatModelAzureNoThink.js" file. After running the "taotTutorialBaseChatModelAzureNoThink.js" file, you should see a similar results to the below: 140 | 141 | ```bash 142 | The result of 123 multiplied by 456 is **56,088**. 143 | There are 7 words in the sentence. 144 | The product of 123 and 456 is **56,088**. 145 | The sentence "I built my 1st Hello World program" contains **7 words**. 146 | The exact number of languages in the world is difficult to determine, but estimates suggest there are approximately **7,000 languages** spoken globally. This number fluctuates due to factors like language endangerment, evolution, and documentation efforts. 147 | ``` 148 | 149 | ### 3. LangChain.js's ChatOpenAI class (using QwQ-32B on OpenRouter) 150 | 151 | First, create an empty "tutorial" folder and an empty "ChatOpenAI_QwQ32B" sub-folder in your local device. Then copy the "taotTutorialChatOpenAI_QwQ32B.js" and ".env" files (under the "tutorial" folder and "ChatOpenAI_QwQ32B" sub-folder in this repo) into your empty "ChatOpenAI_QwQ32B" sub-folder in your local device. Note you will need to enter your own API key into the ".env" file. 152 | 153 | Then run the following lines of code: 154 | 155 | ```bash 156 | # Navigate to the "ChatOpenAI_QwQ32B" sub-directory in your local device 157 | cd tutorial\ChatOpenAI_QwQ32B 158 | 159 | # Initialize as npm project 160 | npm init -y 161 | 162 | # Npm install the "taot-ts" package 163 | npm install taot-ts 164 | 165 | # Npm install dependencies required in the "taotTutorialChatOpenAI.js" file 166 | npm install dotenv @langchain/openai 167 | 168 | # Run the "taotTutorialChatOpenAI.js" file 169 | node taotTutorialChatOpenAI_QwQ32B.js 170 | ``` 171 | 172 | After running the "taotTutorialChatOpenAI_QwQ32B.js" file, you should see a similar results to the below: 173 | 174 | ```bash 175 | The result of 123 multiplied by 456 is 56,088. 176 | There are 7 words in the sentence "I built my 1st Hello World program." Let me know if you need help with anything else! 177 | The product of 123 and 456 is 56,088. 178 | The sentence "I built my 1st Hello World program" contains **7 words**. 179 | The number of languages in the world is a complex and debated figure, but the most commonly cited estimate from Ethnologue is **7,100+ living languages**. This number can vary based on classification criteria and ongoing documentation efforts. 180 | ``` 181 | 182 | ### 4. LangChain's MCP Adapters library with DeepSeek-R1 671B (via LangChain's ChatOpenAI class on OpenRouter) 183 | 184 | For this section, please refer to the "TutorialMcpAdaptersDeepSeekR1.mjs" file under the "tutorial" folder -> "McpAdapters_DeepSeekR1" sub-folder in this repo. 185 | 186 | This notebook tutorial showcases a step-by-step guide on how to implement DeepSeek-R1 connected to tools in MCP servers, using LangChain's MCP Adapters library (here: https://github.com/langchain-ai/langchain-mcp-adapters). 187 | 188 | I am using MCP servers from an MPC server registry/depository called MCP Server Cloud (here: https://mcpserver.cloud/, or their GitHub repo here: https://github.com/modelcontextprotocol). 189 | 190 | I will be connecting DeepSeek-R1 to 2 MCP servers, with 1 tool in each MCP server. Namely, I will be using the Brave Search MCP Server (here: https://mcpserver.cloud/server/server-brave-search) and the AccuWeather MCP Server (here: https://mcpserver.cloud/server/mcp-weather-server). 191 | 192 | To use the Brave Search MCP Server and the AccuWeather MCP Server, you will need to create a Brave Browser API key (here: https://brave.com/search/api/) and an AccuWeather API key (here: https://developer.accuweather.com/getting-started), respectively. They are both free and it's fairly straight forward to do this (but note creating a Brave Browser API key require a credit card even for the free subscription). Just ask any AI for the step-by-step guide to do this. 193 | 194 | Once you have your Brave Browser and AccuWeather API keys, save them in a .env file, along with an OpenRouter API key (for this notebook tutorial I will be using DeepSeek-R1 hosted on OpenRouter). This .env file is saved in the same folder as where this Jupyter Notebook will be saved. 195 | 196 | Now that we have all the above setup, let's get into the more technical part of this notebook tutorial. How LangChain's MCP Adapters library works is it convert tools in MCP servers into LangChain tools, so then these LangChain tools can be used within the LangChain/LangGraph framework. Yes, it's as simple as that! 197 | 198 | Currently MCP servers are still in it's early development stages and so MCP servers doesn't yet have a direct SSE (Server-Sent Events) connection. To fix this, I have used a package called Supergateway (here: https://github.com/supercorp-ai/supergateway) which establishes a SSE connection for MCP servers. [Note: Currently there are several other ways to connect to MCP servers including downloading MCP servers into your local device and then connecting with the MCP server locally in your device using a Python package called langchain-mcp-tools (here: https://github.com/hideya/langchain-mcp-tools-py, where support for remote MCP server connection is currently experimental) or using the docker approach (here: https://www.youtube.com/watch?v=rdvt1qBZJtI), but I have chosen to use the Supergateway package approach as it is more realistic to connect to remote servers via SSE connections. The Supergateway package is run using npx (which is available in Node.js) which means if you haven't already, you will need to download Node.js (from here: https://nodejs.org/en/download) in order to use the Supergateway package via npx.] 199 | 200 | Referring to the instructions in the README file in the Supergateway's GitHub repo, in particular the "stdio → SSE" section ("Expose an MCP stdio server as an SSE server:"): 201 | - To establish a SSE connection for the Brave Search MCP Server using Supergateway, run the following command below in your IDE's (for eg. Cursor or VS Code) Terminal window (where this will use port 8001): 202 | `npx -y supergateway --stdio "npx -y @modelcontextprotocol/server-brave-search" --port 8001 --baseUrl http://localhost:8001 --ssePath /sse --messagePath /message` 203 | - To establish a SSE connection for the AccuWeather MCP Server using Supergateway, open a 2nd Terminal window in your IDE and run the following command below in this 2nd Terminal window (where this will use port 8002): 204 | `npx -y supergateway --stdio "uvx --from git+https://github.com/adhikasp/mcp-weather.git mcp-weather" --port 8002 --baseUrl http://localhost:8002 --ssePath /sse --messagePath /message` 205 | 206 | **Tip:** If you are unsure how to write the commands above for other MCP servers, just copy and paste the entire README file instructions in Supergateway's GitHub repo and the entire content of the MCP server page in the MCP Server Cloud registry/depository wesbite (for eg. for the Brave Search MCP Server, copy and paste the entire content from this page from the MCP Server Cloud registry/depository website: https://mcpserver.cloud/server/server-brave-search) into an AI and ask the AI to give you the "stdio → SSE" command. 207 | 208 | Now that you have both the Brave Search MCP Server and AccuWeather MCP Server SSE connections running, you can now run the "TutorialMcpAdaptersDeepSeekR1.mjs" file which uses `http://localhost:8001/sse` for the Brave Search MCP Server and `http://localhost:8002/sse` for the AccuWeather MCP Server. 209 | 210 | First, create an empty "tutorial" folder and an empty "McpAdapters_DeepSeekR1" sub-folder in your local device. Then copy the "TutorialMcpAdaptersDeepSeekR1.mjs" and ".env" files (under the "tutorial" folder and "McpAdapters_DeepSeekR1" sub-folder in this repo) into your empty "McpAdapters_DeepSeekR1" sub-folder in your local device. Note the "TutorialMcpAdaptersDeepSeekR1.mjs file needs to be saved as a .mjs file and you will need to enter your own API keys into the ".env" file. 211 | 212 | Then run the following lines of code in your IDE Terminal: 213 | 214 | ``` 215 | # Navigate to the "McpAdapters_DeepSeekR1" sub-directory in your local device 216 | cd tutorial\McpDeepSeepR1 217 | 218 | # Initialize as npm project 219 | npm init -y 220 | 221 | # Npm install dependencies required in the "TutorialMcpAdaptersDeepSeekR1.mjs" file 222 | npm install @langchain/mcp-adapters @langchain/langgraph @langchain/openai dotenv 223 | 224 | # Run the "TutorialMcpAdaptersDeepSeekR1.mjs" file 225 | node TutorialMcpDeepSeepR1.mjs 226 | ``` 227 | 228 | After running the "TutorialMcpAdaptersDeepSeekR1.mjs" file, you should see a similar results to the below: 229 | 230 | ``` 231 | Here's a summary of the latest AI news from credible sources: 232 | 233 | 1. **Industry Trends** (ArtificialIntelligence-News.com): Dedicated coverage of frontline AI developments, including emerging industry trends and innovations. 234 | 235 | 2. **Corporate AI Adoption** (NY Times): 236 | - H&M is experimenting with AI to create "digital twins" of clothing models. 237 | - News outlets are grappling with accuracy issues in AI-generated content, with some issuing frequent corrections. 238 | 239 | 3. **AI Chatbots** (NBC News): Focus on tools like ChatGPT, Google’s Bard, and Apple’s rumored AI chatbot, highlighting their growing role in tech. 240 | 241 | 4. **Research & Robotics** (ScienceDaily): Updates on AI-driven robotics and computational models aiming to replicate human intelligence. 242 | 243 | 5. **Critical Perspectives** (The Guardian): Ongoing analysis of AI’s societal impact, ethics, and policy challenges. 244 | 245 | For deeper insights, visit these sources directly using the provided links. Let me know if you’d like updates on a specific subtopic! 🤖 246 | Here's the weather forecast for Sydney tomorrow based on current information: 247 | 248 | **Tomorrow's Weather (Sydney, Australia):** 249 | - **Daytime High:** 72°F (22°C) with windy conditions and a mix of clouds and sun (AccuWeather) 250 | - **Rain:** Morning showers possible, decreasing through the day (The Weather Channel) 251 | - **Evening:** Cooler at 63°F (17°C), with lingering breezy conditions 252 | - **Wind:** SSW winds 15-25 mph (24-40 km/h) 253 | 254 | Key details: Expect a brisk day with morning showers tapering off, followed by partly sunny skies. The breeze will make it feel cooler than the actual temperature. 255 | 256 | Would you like me to check another day or clarify further? 257 | ``` 258 | 259 | Remember once you are done with using the MCP Servers, you can close off or disconnect the MCP Server's SSE connections by typing "CTRL" + "C" keys in your IDE's Terminal window. 260 | 261 | **Takeaway:** This notebook tutorial demonstrates that even without having DeepSeek-R1 fine-tuned for tool calling or even without using my Tool-Ahead-of-Time package (since LangChain's MCP Adapters library works by converting tools in MCP servers into LangChain tools), MCP (via LangChain's MCP Adapters library) still works with DeepSeek-R1. This is likely because DeepSeek-R1 671B is a reasoning model and also how the prompts are written within LangChain's MCP Adapters library. 262 | 263 | ### 5. LangChain.js's ChatOpenAI class (using Qwen3 models on OpenRouter) 264 | 265 | First, create an empty "tutorial" folder and an empty "ChatOpenAI_Qwen3" sub-folder in your local device. Then copy the "taotTutorialChatOpenAI_Qwen3.js" and ".env" files (under the "tutorial" folder and "ChatOpenAI_Qwen3" sub-folder in this repo) into your empty "ChatOpenAI_Qwen3" sub-folder in your local device. Note you will need to enter your own API key into the ".env" file. 266 | 267 | Then run the following lines of code: 268 | 269 | ```bash 270 | # Navigate to the "ChatOpenAI_Qwen3" sub-directory in your local device 271 | cd tutorial\ChatOpenAI_Qwen3 272 | 273 | # Initialize as npm project 274 | npm init -y 275 | 276 | # Npm install the "taot-ts" package 277 | npm install taot-ts 278 | 279 | # Npm install dependencies required in the "taotTutorialChatOpenAI_Qwen3.js" file 280 | npm install dotenv @langchain/openai 281 | 282 | # Run the "taotTutorialChatOpenAI_Qwen3.js" file 283 | node taotTutorialChatOpenAI_Qwen3.js 284 | ``` 285 | 286 | After running the "taotTutorialChatOpenAI_QwQ32B.js" file, you should see a similar results to the below: 287 | 288 | ```bash 289 | 123 multiplied by 456 equals 56,088. 290 | The sentence "I built my 1st Hello World program" contains 7 words. 291 | The product of 123 multiplied by 456 is **56,088**. Let me know if you need help with anything else! 🧮😊 292 | The sentence contains 7 words: "I," "built," "my," "1st," "Hello," "World," and "program." 293 | The exact number of languages in the world is difficult to determine precisely, but estimates range around **7,000** living languages. This number varies due to factors like language extinction, dialect classification, and ongoing linguistic research. No tool is needed for this factual question. 294 | ``` 295 | 296 | ### 6. LangChain.js's ChatOpenAI class (using DeepSeek-R1-0528 685B on OpenRouter) 297 | 298 | First, create an empty "tutorial" folder and an empty "ChatOpenAI_DeepSeekR10528" sub-folder in your local device. Then copy the "taotTutorialChatOpenAIDeepSeekR10528.js" and ".env" files (under the "tutorial" folder and "ChatOpenAI_DeepSeekR10528" sub-folder in this repo) into your empty "ChatOpenAI_DeepSeekR10528" sub-folder in your local device. Note you will need to enter your own API key into the ".env" file. 299 | 300 | Then run the following lines of code: 301 | 302 | ```bash 303 | # Navigate to the "ChatOpenAI_DeepSeekR10528" sub-directory in your local device 304 | cd tutorial\ChatOpenAI_DeepSeekR10528 305 | 306 | # Initialize as npm project 307 | npm init -y 308 | 309 | # Npm install the "taot-ts" package 310 | npm install taot-ts 311 | 312 | # Npm install dependencies required in the "taotTutorialChatOpenAI.js" file 313 | npm install dotenv @langchain/openai 314 | 315 | # Run the "taotTutorialChatOpenAI.js" file 316 | node taotTutorialChatOpenAIDeepSeekR10528.js 317 | ``` 318 | 319 | After running the "taotTutorialChatOpenAIDeepSeekR10528.js" file, you should see a similar results to the below: 320 | 321 | ```bash 322 | 123 multiplied by 456 equals 56,088. 323 | There are 7 words in the sentence "I built my 1st Hello World program." 324 | The result of multiplying 123 by 456 is 56,088. 325 | That sentence has 7 words. 326 | The exact number of languages in the world is difficult to determine due to factors like dialects, endangered languages, and ongoing linguistic research. However, according to major linguistic references like Ethnologue, there are approximately **7,000 to 8,000** living languages spoken globally today. This number fluctuates as languages evolve or become extinct. 327 | ``` 328 | 329 | ## Changelog 📖 330 | 331 | 20th Feb 2025: 332 | - Package now available on PyPI! Just "pip install taot" and you're ready to go. 333 | - Completely redesigned to follow LangChain's and LangGraph's intuitive `create_react_agent` tool calling methods. 334 | - Produces natural language responses when tool calling is performed. 335 | 336 | 1st Mar 2025: 337 | - Package now available in TypeScript on npm! Just "npm install taot-ts" and you're ready to go. 338 | 339 | 8th Mar 2025: 340 | - Updated repo to include implementation support for Microsoft Azure via Langchain.js's BaseChatModel class (Note: As of 8th Mar 2025, Langchain.js's ChatOpenAI, ChatAzureOpenAI (considered to be deprecated) and AzureChatOpenAI does not provide Microsoft Azure support for the DeepSeek-R1 model). 341 | 342 | 16th Mar 2025: 343 | - Updated repo to include example tutorial for tool calling support for QwQ-32B using Langchain.js's ChatOpenAI class (hosted on OpenRouter). See "taotTutorialChatOpenAI_QwQ32B.js" file under the "tutorial" folder -> "ChatOpenAI_QwQ32B" sub-folder in this repo. While doing this, I noticed OpenRouter's API for QwQ-32B is unstable and returning empty responses (likely because QwQ-32B is a new model added on OpenRouter only about a week ago). Due to this, I have updated the taot-ts package to keep looping until a non-empty response is returned. If you have previously downloaded the package, please update the package via `npm update taot-ts`. 344 | - Checked out OpenAI Agents SDK framework for tool calling support for non-OpenAI providers/models (https://openai.github.io/openai-agents-python/models/) and they don't support tool calling for DeepSeek-R1 (or models available through OpenRouter) yet (as of 16th Mar 2025), so there you go! 😉 345 | 346 | 28th Mar 2025: 347 | - Attempted to implement DeepSeek-R1 for Amazon Bedrock via LangChain.js's ChatBedrockConverse and LangChain.js's BedrockChat classes, but DeepSeek-R1's model ID `us.deepseek.r1-v1:0` is not yet supported for these two classes. 348 | - Managed to implement DeepSeek-R1 for Amazon Bedrock directly via JavaScript version of the AWS SDK (ie. `@aws-sdk/client-bedrock-runtime`), but the model responses are not stable where sometimes it will return the reasoning/thinking part of the model with no token tags (for eg. ``, `<|Assistant|>`, `<|end_of_sentence|>` etc.) which will allow us to filter out the reasoning/thinking part. Because of this, I have decided not to release this implementation for now. 349 | - The DeepSeek-R1 Python implementation for Amazon Bedrock via LangChain's ChatBedrockConverse class was successful. Please refer to the equivalent Python repo for this package here: https://github.com/leockl/tool-ahead-of-time. 350 | 351 | 6th April 2025: 352 | - Special Update: Updated repo to include implementation support for using LangChain.js's MCP Adapters library with DeepSeek-R1 671B (via LangChain.js's ChatOpenAI class on OpenRouter). 353 | - Special Update: Implementation support for using LangGraph's Bigtool library with DeepSeek-R1 671B (via LangChain's ChatOpenAI class on OpenRouter) was not included as there is currently no JavaScript/TypeScript support for the LangGraph's Bigtool library. The Python implementation for this was successful, see the equivalent Python repo here: https://github.com/leockl/tool-ahead-of-time. 354 | 355 | 7th May 2025: 356 | - Updated repo to include example tutorial for tool calling support for all the Qwen3 models using Langchain's ChatOpenAI class (hosted on OpenRouter), with the exception of the Qwen3 0.6B model. My observation is that the Qwen 0.6B model is just not "smart" or performant enough to understand when tool use is required. 357 | 358 | 4th Jun 2025: 359 | - Updated repo to include example tutorial for tool calling support for DeepSeek-R1-0528 685B model using Langchain's ChatOpenAI class (hosted on OpenRouter). 360 | 361 | ## Contributions 🤝 362 | 363 | Feel free to contribute! Whether it's adding features, fixing bugs, adding comments in the code or any suggestions to improve this repo, all are welcomed 😄 364 | 365 | ## Disclaimer ⚠️ 366 | 367 | This package is like that friend who shows up to the party early - technically not invited yet, but hopes to bring such good vibes that everyone's glad they came. 368 | 369 | ## License 📜 370 | 371 | MIT License - Because sharing is caring, and we care about you having tool calling RIGHT NOW. 372 | 373 | --- 374 | 375 | Made with ❤️ and a healthy dose of impatience. 376 | 377 | Please give my GitHub repo a ⭐ if this was helpful. Thank you! 378 | -------------------------------------------------------------------------------- /tutorial/ChatOpenAI/package-lock.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "chatopenai", 3 | "version": "1.0.0", 4 | "lockfileVersion": 3, 5 | "requires": true, 6 | "packages": { 7 | "": { 8 | "name": "chatopenai", 9 | "version": "1.0.0", 10 | "license": "ISC", 11 | "dependencies": { 12 | "@langchain/openai": "^0.4.4", 13 | "dotenv": "^16.4.7", 14 | "taot-ts": "^0.1.6" 15 | } 16 | }, 17 | "node_modules/@cfworker/json-schema": { 18 | "version": "4.1.1", 19 | "resolved": "https://registry.npmjs.org/@cfworker/json-schema/-/json-schema-4.1.1.tgz", 20 | "integrity": "sha512-gAmrUZSGtKc3AiBL71iNWxDsyUC5uMaKKGdvzYsBoTW/xi42JQHl7eKV2OYzCUqvc+D2RCcf7EXY2iCyFIk6og==", 21 | "license": "MIT", 22 | "peer": true 23 | }, 24 | "node_modules/@langchain/core": { 25 | "version": "0.3.42", 26 | "resolved": "https://registry.npmjs.org/@langchain/core/-/core-0.3.42.tgz", 27 | "integrity": "sha512-pT/jC5lqWK3YGDq8dQwgKoa6anqAhMtG1x5JbnrOj9NdaLeBbCKBDQ+/Ykzk3nZ8o+0UMsaXNZo7IVL83VVjHg==", 28 | "license": "MIT", 29 | "peer": true, 30 | "dependencies": { 31 | "@cfworker/json-schema": "^4.0.2", 32 | "ansi-styles": "^5.0.0", 33 | "camelcase": "6", 34 | "decamelize": "1.2.0", 35 | "js-tiktoken": "^1.0.12", 36 | "langsmith": ">=0.2.8 <0.4.0", 37 | "mustache": "^4.2.0", 38 | "p-queue": "^6.6.2", 39 | "p-retry": "4", 40 | "uuid": "^10.0.0", 41 | "zod": "^3.22.4", 42 | "zod-to-json-schema": "^3.22.3" 43 | }, 44 | "engines": { 45 | "node": ">=18" 46 | } 47 | }, 48 | "node_modules/@langchain/openai": { 49 | "version": "0.4.4", 50 | "resolved": "https://registry.npmjs.org/@langchain/openai/-/openai-0.4.4.tgz", 51 | "integrity": "sha512-UZybJeMd8+UX7Kn47kuFYfqKdBCeBUWNqDtmAr6ZUIMMnlsNIb6MkrEEhGgAEjGCpdT4CU8U/DyyddTz+JayOQ==", 52 | "license": "MIT", 53 | "dependencies": { 54 | "js-tiktoken": "^1.0.12", 55 | "openai": "^4.77.0", 56 | "zod": "^3.22.4", 57 | "zod-to-json-schema": "^3.22.3" 58 | }, 59 | "engines": { 60 | "node": ">=18" 61 | }, 62 | "peerDependencies": { 63 | "@langchain/core": ">=0.3.39 <0.4.0" 64 | } 65 | }, 66 | "node_modules/@types/node": { 67 | "version": "18.19.79", 68 | "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.79.tgz", 69 | "integrity": "sha512-90K8Oayimbctc5zTPHPfZloc/lGVs7f3phUAAMcTgEPtg8kKquGZDERC8K4vkBYkQQh48msiYUslYtxTWvqcAg==", 70 | "license": "MIT", 71 | "dependencies": { 72 | "undici-types": "~5.26.4" 73 | } 74 | }, 75 | "node_modules/@types/node-fetch": { 76 | "version": "2.6.12", 77 | "resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.12.tgz", 78 | "integrity": "sha512-8nneRWKCg3rMtF69nLQJnOYUcbafYeFSjqkw3jCRLsqkWFlHaoQrr5mXmofFGOx3DKn7UfmBMyov8ySvLRVldA==", 79 | "license": "MIT", 80 | "dependencies": { 81 | "@types/node": "*", 82 | "form-data": "^4.0.0" 83 | } 84 | }, 85 | "node_modules/@types/retry": { 86 | "version": "0.12.0", 87 | "resolved": "https://registry.npmjs.org/@types/retry/-/retry-0.12.0.tgz", 88 | "integrity": "sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==", 89 | "license": "MIT" 90 | }, 91 | "node_modules/@types/uuid": { 92 | "version": "10.0.0", 93 | "resolved": "https://registry.npmjs.org/@types/uuid/-/uuid-10.0.0.tgz", 94 | "integrity": "sha512-7gqG38EyHgyP1S+7+xomFtL+ZNHcKv6DwNaCZmJmo1vgMugyF3TCnXVg4t1uk89mLNwnLtnY3TpOpCOyp1/xHQ==", 95 | "license": "MIT" 96 | }, 97 | "node_modules/abort-controller": { 98 | "version": "3.0.0", 99 | "resolved": "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz", 100 | "integrity": "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==", 101 | "license": "MIT", 102 | "dependencies": { 103 | "event-target-shim": "^5.0.0" 104 | }, 105 | "engines": { 106 | "node": ">=6.5" 107 | } 108 | }, 109 | "node_modules/agentkeepalive": { 110 | "version": "4.6.0", 111 | "resolved": "https://registry.npmjs.org/agentkeepalive/-/agentkeepalive-4.6.0.tgz", 112 | "integrity": "sha512-kja8j7PjmncONqaTsB8fQ+wE2mSU2DJ9D4XKoJ5PFWIdRMa6SLSN1ff4mOr4jCbfRSsxR4keIiySJU0N9T5hIQ==", 113 | "license": "MIT", 114 | "dependencies": { 115 | "humanize-ms": "^1.2.1" 116 | }, 117 | "engines": { 118 | "node": ">= 8.0.0" 119 | } 120 | }, 121 | "node_modules/ansi-styles": { 122 | "version": "5.2.0", 123 | "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", 124 | "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", 125 | "license": "MIT", 126 | "engines": { 127 | "node": ">=10" 128 | }, 129 | "funding": { 130 | "url": "https://github.com/chalk/ansi-styles?sponsor=1" 131 | } 132 | }, 133 | "node_modules/asynckit": { 134 | "version": "0.4.0", 135 | "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", 136 | "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==", 137 | "license": "MIT" 138 | }, 139 | "node_modules/base64-js": { 140 | "version": "1.5.1", 141 | "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", 142 | "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", 143 | "funding": [ 144 | { 145 | "type": "github", 146 | "url": "https://github.com/sponsors/feross" 147 | }, 148 | { 149 | "type": "patreon", 150 | "url": "https://www.patreon.com/feross" 151 | }, 152 | { 153 | "type": "consulting", 154 | "url": "https://feross.org/support" 155 | } 156 | ], 157 | "license": "MIT" 158 | }, 159 | "node_modules/binary-search": { 160 | "version": "1.3.6", 161 | "resolved": "https://registry.npmjs.org/binary-search/-/binary-search-1.3.6.tgz", 162 | "integrity": "sha512-nbE1WxOTTrUWIfsfZ4aHGYu5DOuNkbxGokjV6Z2kxfJK3uaAb8zNK1muzOeipoLHZjInT4Br88BHpzevc681xA==", 163 | "license": "CC0-1.0" 164 | }, 165 | "node_modules/call-bind-apply-helpers": { 166 | "version": "1.0.2", 167 | "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", 168 | "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", 169 | "license": "MIT", 170 | "dependencies": { 171 | "es-errors": "^1.3.0", 172 | "function-bind": "^1.1.2" 173 | }, 174 | "engines": { 175 | "node": ">= 0.4" 176 | } 177 | }, 178 | "node_modules/camelcase": { 179 | "version": "6.3.0", 180 | "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", 181 | "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", 182 | "license": "MIT", 183 | "engines": { 184 | "node": ">=10" 185 | }, 186 | "funding": { 187 | "url": "https://github.com/sponsors/sindresorhus" 188 | } 189 | }, 190 | "node_modules/chalk": { 191 | "version": "4.1.2", 192 | "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", 193 | "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", 194 | "license": "MIT", 195 | "peer": true, 196 | "dependencies": { 197 | "ansi-styles": "^4.1.0", 198 | "supports-color": "^7.1.0" 199 | }, 200 | "engines": { 201 | "node": ">=10" 202 | }, 203 | "funding": { 204 | "url": "https://github.com/chalk/chalk?sponsor=1" 205 | } 206 | }, 207 | "node_modules/chalk/node_modules/ansi-styles": { 208 | "version": "4.3.0", 209 | "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", 210 | "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", 211 | "license": "MIT", 212 | "peer": true, 213 | "dependencies": { 214 | "color-convert": "^2.0.1" 215 | }, 216 | "engines": { 217 | "node": ">=8" 218 | }, 219 | "funding": { 220 | "url": "https://github.com/chalk/ansi-styles?sponsor=1" 221 | } 222 | }, 223 | "node_modules/color-convert": { 224 | "version": "2.0.1", 225 | "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", 226 | "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", 227 | "license": "MIT", 228 | "peer": true, 229 | "dependencies": { 230 | "color-name": "~1.1.4" 231 | }, 232 | "engines": { 233 | "node": ">=7.0.0" 234 | } 235 | }, 236 | "node_modules/color-name": { 237 | "version": "1.1.4", 238 | "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", 239 | "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", 240 | "license": "MIT", 241 | "peer": true 242 | }, 243 | "node_modules/combined-stream": { 244 | "version": "1.0.8", 245 | "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", 246 | "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", 247 | "license": "MIT", 248 | "dependencies": { 249 | "delayed-stream": "~1.0.0" 250 | }, 251 | "engines": { 252 | "node": ">= 0.8" 253 | } 254 | }, 255 | "node_modules/commander": { 256 | "version": "10.0.1", 257 | "resolved": "https://registry.npmjs.org/commander/-/commander-10.0.1.tgz", 258 | "integrity": "sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug==", 259 | "license": "MIT", 260 | "engines": { 261 | "node": ">=14" 262 | } 263 | }, 264 | "node_modules/console-table-printer": { 265 | "version": "2.12.1", 266 | "resolved": "https://registry.npmjs.org/console-table-printer/-/console-table-printer-2.12.1.tgz", 267 | "integrity": "sha512-wKGOQRRvdnd89pCeH96e2Fn4wkbenSP6LMHfjfyNLMbGuHEFbMqQNuxXqd0oXG9caIOQ1FTvc5Uijp9/4jujnQ==", 268 | "license": "MIT", 269 | "peer": true, 270 | "dependencies": { 271 | "simple-wcswidth": "^1.0.1" 272 | } 273 | }, 274 | "node_modules/decamelize": { 275 | "version": "1.2.0", 276 | "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz", 277 | "integrity": "sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA==", 278 | "license": "MIT", 279 | "engines": { 280 | "node": ">=0.10.0" 281 | } 282 | }, 283 | "node_modules/delayed-stream": { 284 | "version": "1.0.0", 285 | "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", 286 | "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", 287 | "license": "MIT", 288 | "engines": { 289 | "node": ">=0.4.0" 290 | } 291 | }, 292 | "node_modules/dotenv": { 293 | "version": "16.4.7", 294 | "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.4.7.tgz", 295 | "integrity": "sha512-47qPchRCykZC03FhkYAhrvwU4xDBFIj1QPqaarj6mdM/hgUzfPHcpkHJOn3mJAufFeeAxAzeGsr5X0M4k6fLZQ==", 296 | "license": "BSD-2-Clause", 297 | "engines": { 298 | "node": ">=12" 299 | }, 300 | "funding": { 301 | "url": "https://dotenvx.com" 302 | } 303 | }, 304 | "node_modules/dunder-proto": { 305 | "version": "1.0.1", 306 | "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", 307 | "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", 308 | "license": "MIT", 309 | "dependencies": { 310 | "call-bind-apply-helpers": "^1.0.1", 311 | "es-errors": "^1.3.0", 312 | "gopd": "^1.2.0" 313 | }, 314 | "engines": { 315 | "node": ">= 0.4" 316 | } 317 | }, 318 | "node_modules/es-define-property": { 319 | "version": "1.0.1", 320 | "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", 321 | "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", 322 | "license": "MIT", 323 | "engines": { 324 | "node": ">= 0.4" 325 | } 326 | }, 327 | "node_modules/es-errors": { 328 | "version": "1.3.0", 329 | "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", 330 | "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", 331 | "license": "MIT", 332 | "engines": { 333 | "node": ">= 0.4" 334 | } 335 | }, 336 | "node_modules/es-object-atoms": { 337 | "version": "1.1.1", 338 | "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", 339 | "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", 340 | "license": "MIT", 341 | "dependencies": { 342 | "es-errors": "^1.3.0" 343 | }, 344 | "engines": { 345 | "node": ">= 0.4" 346 | } 347 | }, 348 | "node_modules/es-set-tostringtag": { 349 | "version": "2.1.0", 350 | "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz", 351 | "integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==", 352 | "license": "MIT", 353 | "dependencies": { 354 | "es-errors": "^1.3.0", 355 | "get-intrinsic": "^1.2.6", 356 | "has-tostringtag": "^1.0.2", 357 | "hasown": "^2.0.2" 358 | }, 359 | "engines": { 360 | "node": ">= 0.4" 361 | } 362 | }, 363 | "node_modules/event-target-shim": { 364 | "version": "5.0.1", 365 | "resolved": "https://registry.npmjs.org/event-target-shim/-/event-target-shim-5.0.1.tgz", 366 | "integrity": "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==", 367 | "license": "MIT", 368 | "engines": { 369 | "node": ">=6" 370 | } 371 | }, 372 | "node_modules/eventemitter3": { 373 | "version": "4.0.7", 374 | "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz", 375 | "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==", 376 | "license": "MIT" 377 | }, 378 | "node_modules/form-data": { 379 | "version": "4.0.2", 380 | "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.2.tgz", 381 | "integrity": "sha512-hGfm/slu0ZabnNt4oaRZ6uREyfCj6P4fT/n6A1rGV+Z0VdGXjfOhVUpkn6qVQONHGIFwmveGXyDs75+nr6FM8w==", 382 | "license": "MIT", 383 | "dependencies": { 384 | "asynckit": "^0.4.0", 385 | "combined-stream": "^1.0.8", 386 | "es-set-tostringtag": "^2.1.0", 387 | "mime-types": "^2.1.12" 388 | }, 389 | "engines": { 390 | "node": ">= 6" 391 | } 392 | }, 393 | "node_modules/form-data-encoder": { 394 | "version": "1.7.2", 395 | "resolved": "https://registry.npmjs.org/form-data-encoder/-/form-data-encoder-1.7.2.tgz", 396 | "integrity": "sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A==", 397 | "license": "MIT" 398 | }, 399 | "node_modules/formdata-node": { 400 | "version": "4.4.1", 401 | "resolved": "https://registry.npmjs.org/formdata-node/-/formdata-node-4.4.1.tgz", 402 | "integrity": "sha512-0iirZp3uVDjVGt9p49aTaqjk84TrglENEDuqfdlZQ1roC9CWlPk6Avf8EEnZNcAqPonwkG35x4n3ww/1THYAeQ==", 403 | "license": "MIT", 404 | "dependencies": { 405 | "node-domexception": "1.0.0", 406 | "web-streams-polyfill": "4.0.0-beta.3" 407 | }, 408 | "engines": { 409 | "node": ">= 12.20" 410 | } 411 | }, 412 | "node_modules/function-bind": { 413 | "version": "1.1.2", 414 | "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", 415 | "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", 416 | "license": "MIT", 417 | "funding": { 418 | "url": "https://github.com/sponsors/ljharb" 419 | } 420 | }, 421 | "node_modules/get-intrinsic": { 422 | "version": "1.3.0", 423 | "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", 424 | "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", 425 | "license": "MIT", 426 | "dependencies": { 427 | "call-bind-apply-helpers": "^1.0.2", 428 | "es-define-property": "^1.0.1", 429 | "es-errors": "^1.3.0", 430 | "es-object-atoms": "^1.1.1", 431 | "function-bind": "^1.1.2", 432 | "get-proto": "^1.0.1", 433 | "gopd": "^1.2.0", 434 | "has-symbols": "^1.1.0", 435 | "hasown": "^2.0.2", 436 | "math-intrinsics": "^1.1.0" 437 | }, 438 | "engines": { 439 | "node": ">= 0.4" 440 | }, 441 | "funding": { 442 | "url": "https://github.com/sponsors/ljharb" 443 | } 444 | }, 445 | "node_modules/get-proto": { 446 | "version": "1.0.1", 447 | "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", 448 | "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", 449 | "license": "MIT", 450 | "dependencies": { 451 | "dunder-proto": "^1.0.1", 452 | "es-object-atoms": "^1.0.0" 453 | }, 454 | "engines": { 455 | "node": ">= 0.4" 456 | } 457 | }, 458 | "node_modules/gopd": { 459 | "version": "1.2.0", 460 | "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", 461 | "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", 462 | "license": "MIT", 463 | "engines": { 464 | "node": ">= 0.4" 465 | }, 466 | "funding": { 467 | "url": "https://github.com/sponsors/ljharb" 468 | } 469 | }, 470 | "node_modules/has-flag": { 471 | "version": "4.0.0", 472 | "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", 473 | "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", 474 | "license": "MIT", 475 | "peer": true, 476 | "engines": { 477 | "node": ">=8" 478 | } 479 | }, 480 | "node_modules/has-symbols": { 481 | "version": "1.1.0", 482 | "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", 483 | "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", 484 | "license": "MIT", 485 | "engines": { 486 | "node": ">= 0.4" 487 | }, 488 | "funding": { 489 | "url": "https://github.com/sponsors/ljharb" 490 | } 491 | }, 492 | "node_modules/has-tostringtag": { 493 | "version": "1.0.2", 494 | "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz", 495 | "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", 496 | "license": "MIT", 497 | "dependencies": { 498 | "has-symbols": "^1.0.3" 499 | }, 500 | "engines": { 501 | "node": ">= 0.4" 502 | }, 503 | "funding": { 504 | "url": "https://github.com/sponsors/ljharb" 505 | } 506 | }, 507 | "node_modules/hasown": { 508 | "version": "2.0.2", 509 | "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", 510 | "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", 511 | "license": "MIT", 512 | "dependencies": { 513 | "function-bind": "^1.1.2" 514 | }, 515 | "engines": { 516 | "node": ">= 0.4" 517 | } 518 | }, 519 | "node_modules/humanize-ms": { 520 | "version": "1.2.1", 521 | "resolved": "https://registry.npmjs.org/humanize-ms/-/humanize-ms-1.2.1.tgz", 522 | "integrity": "sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ==", 523 | "license": "MIT", 524 | "dependencies": { 525 | "ms": "^2.0.0" 526 | } 527 | }, 528 | "node_modules/is-any-array": { 529 | "version": "2.0.1", 530 | "resolved": "https://registry.npmjs.org/is-any-array/-/is-any-array-2.0.1.tgz", 531 | "integrity": "sha512-UtilS7hLRu++wb/WBAw9bNuP1Eg04Ivn1vERJck8zJthEvXCBEBpGR/33u/xLKWEQf95803oalHrVDptcAvFdQ==", 532 | "license": "MIT" 533 | }, 534 | "node_modules/js-tiktoken": { 535 | "version": "1.0.19", 536 | "resolved": "https://registry.npmjs.org/js-tiktoken/-/js-tiktoken-1.0.19.tgz", 537 | "integrity": "sha512-XC63YQeEcS47Y53gg950xiZ4IWmkfMe4p2V9OSaBt26q+p47WHn18izuXzSclCI73B7yGqtfRsT6jcZQI0y08g==", 538 | "license": "MIT", 539 | "dependencies": { 540 | "base64-js": "^1.5.1" 541 | } 542 | }, 543 | "node_modules/langsmith": { 544 | "version": "0.3.12", 545 | "resolved": "https://registry.npmjs.org/langsmith/-/langsmith-0.3.12.tgz", 546 | "integrity": "sha512-e4qWM27hxEr8GfO6dgXrc3W8La+wxkX1zEtMhxhqS/Th2ujTt5OH7x0uXfXFDqCv9WaC3nquo1Y2s4vpYmLLtg==", 547 | "license": "MIT", 548 | "peer": true, 549 | "dependencies": { 550 | "@types/uuid": "^10.0.0", 551 | "chalk": "^4.1.2", 552 | "console-table-printer": "^2.12.1", 553 | "p-queue": "^6.6.2", 554 | "p-retry": "4", 555 | "semver": "^7.6.3", 556 | "uuid": "^10.0.0" 557 | }, 558 | "peerDependencies": { 559 | "openai": "*" 560 | }, 561 | "peerDependenciesMeta": { 562 | "openai": { 563 | "optional": true 564 | } 565 | } 566 | }, 567 | "node_modules/math-intrinsics": { 568 | "version": "1.1.0", 569 | "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", 570 | "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", 571 | "license": "MIT", 572 | "engines": { 573 | "node": ">= 0.4" 574 | } 575 | }, 576 | "node_modules/mime-db": { 577 | "version": "1.52.0", 578 | "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", 579 | "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", 580 | "license": "MIT", 581 | "engines": { 582 | "node": ">= 0.6" 583 | } 584 | }, 585 | "node_modules/mime-types": { 586 | "version": "2.1.35", 587 | "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", 588 | "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", 589 | "license": "MIT", 590 | "dependencies": { 591 | "mime-db": "1.52.0" 592 | }, 593 | "engines": { 594 | "node": ">= 0.6" 595 | } 596 | }, 597 | "node_modules/ml-array-mean": { 598 | "version": "1.1.6", 599 | "resolved": "https://registry.npmjs.org/ml-array-mean/-/ml-array-mean-1.1.6.tgz", 600 | "integrity": "sha512-MIdf7Zc8HznwIisyiJGRH9tRigg3Yf4FldW8DxKxpCCv/g5CafTw0RRu51nojVEOXuCQC7DRVVu5c7XXO/5joQ==", 601 | "license": "MIT", 602 | "dependencies": { 603 | "ml-array-sum": "^1.1.6" 604 | } 605 | }, 606 | "node_modules/ml-array-sum": { 607 | "version": "1.1.6", 608 | "resolved": "https://registry.npmjs.org/ml-array-sum/-/ml-array-sum-1.1.6.tgz", 609 | "integrity": "sha512-29mAh2GwH7ZmiRnup4UyibQZB9+ZLyMShvt4cH4eTK+cL2oEMIZFnSyB3SS8MlsTh6q/w/yh48KmqLxmovN4Dw==", 610 | "license": "MIT", 611 | "dependencies": { 612 | "is-any-array": "^2.0.0" 613 | } 614 | }, 615 | "node_modules/ml-distance": { 616 | "version": "4.0.1", 617 | "resolved": "https://registry.npmjs.org/ml-distance/-/ml-distance-4.0.1.tgz", 618 | "integrity": "sha512-feZ5ziXs01zhyFUUUeZV5hwc0f5JW0Sh0ckU1koZe/wdVkJdGxcP06KNQuF0WBTj8FttQUzcvQcpcrOp/XrlEw==", 619 | "license": "MIT", 620 | "dependencies": { 621 | "ml-array-mean": "^1.1.6", 622 | "ml-distance-euclidean": "^2.0.0", 623 | "ml-tree-similarity": "^1.0.0" 624 | } 625 | }, 626 | "node_modules/ml-distance-euclidean": { 627 | "version": "2.0.0", 628 | "resolved": "https://registry.npmjs.org/ml-distance-euclidean/-/ml-distance-euclidean-2.0.0.tgz", 629 | "integrity": "sha512-yC9/2o8QF0A3m/0IXqCTXCzz2pNEzvmcE/9HFKOZGnTjatvBbsn4lWYJkxENkA4Ug2fnYl7PXQxnPi21sgMy/Q==", 630 | "license": "MIT" 631 | }, 632 | "node_modules/ml-tree-similarity": { 633 | "version": "1.0.0", 634 | "resolved": "https://registry.npmjs.org/ml-tree-similarity/-/ml-tree-similarity-1.0.0.tgz", 635 | "integrity": "sha512-XJUyYqjSuUQkNQHMscr6tcjldsOoAekxADTplt40QKfwW6nd++1wHWV9AArl0Zvw/TIHgNaZZNvr8QGvE8wLRg==", 636 | "license": "MIT", 637 | "dependencies": { 638 | "binary-search": "^1.3.5", 639 | "num-sort": "^2.0.0" 640 | } 641 | }, 642 | "node_modules/ms": { 643 | "version": "2.1.3", 644 | "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", 645 | "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", 646 | "license": "MIT" 647 | }, 648 | "node_modules/mustache": { 649 | "version": "4.2.0", 650 | "resolved": "https://registry.npmjs.org/mustache/-/mustache-4.2.0.tgz", 651 | "integrity": "sha512-71ippSywq5Yb7/tVYyGbkBggbU8H3u5Rz56fH60jGFgr8uHwxs+aSKeqmluIVzM0m0kB7xQjKS6qPfd0b2ZoqQ==", 652 | "license": "MIT", 653 | "bin": { 654 | "mustache": "bin/mustache" 655 | } 656 | }, 657 | "node_modules/node-domexception": { 658 | "version": "1.0.0", 659 | "resolved": "https://registry.npmjs.org/node-domexception/-/node-domexception-1.0.0.tgz", 660 | "integrity": "sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==", 661 | "funding": [ 662 | { 663 | "type": "github", 664 | "url": "https://github.com/sponsors/jimmywarting" 665 | }, 666 | { 667 | "type": "github", 668 | "url": "https://paypal.me/jimmywarting" 669 | } 670 | ], 671 | "license": "MIT", 672 | "engines": { 673 | "node": ">=10.5.0" 674 | } 675 | }, 676 | "node_modules/node-fetch": { 677 | "version": "2.7.0", 678 | "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz", 679 | "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", 680 | "license": "MIT", 681 | "dependencies": { 682 | "whatwg-url": "^5.0.0" 683 | }, 684 | "engines": { 685 | "node": "4.x || >=6.0.0" 686 | }, 687 | "peerDependencies": { 688 | "encoding": "^0.1.0" 689 | }, 690 | "peerDependenciesMeta": { 691 | "encoding": { 692 | "optional": true 693 | } 694 | } 695 | }, 696 | "node_modules/num-sort": { 697 | "version": "2.1.0", 698 | "resolved": "https://registry.npmjs.org/num-sort/-/num-sort-2.1.0.tgz", 699 | "integrity": "sha512-1MQz1Ed8z2yckoBeSfkQHHO9K1yDRxxtotKSJ9yvcTUUxSvfvzEq5GwBrjjHEpMlq/k5gvXdmJ1SbYxWtpNoVg==", 700 | "license": "MIT", 701 | "engines": { 702 | "node": ">=8" 703 | }, 704 | "funding": { 705 | "url": "https://github.com/sponsors/sindresorhus" 706 | } 707 | }, 708 | "node_modules/openai": { 709 | "version": "4.86.2", 710 | "resolved": "https://registry.npmjs.org/openai/-/openai-4.86.2.tgz", 711 | "integrity": "sha512-nvYeFjmjdSu6/msld+22JoUlCICNk/lUFpSMmc6KNhpeNLpqL70TqbD/8Vura/tFmYqHKW0trcjgPwUpKSPwaA==", 712 | "license": "Apache-2.0", 713 | "dependencies": { 714 | "@types/node": "^18.11.18", 715 | "@types/node-fetch": "^2.6.4", 716 | "abort-controller": "^3.0.0", 717 | "agentkeepalive": "^4.2.1", 718 | "form-data-encoder": "1.7.2", 719 | "formdata-node": "^4.3.2", 720 | "node-fetch": "^2.6.7" 721 | }, 722 | "bin": { 723 | "openai": "bin/cli" 724 | }, 725 | "peerDependencies": { 726 | "ws": "^8.18.0", 727 | "zod": "^3.23.8" 728 | }, 729 | "peerDependenciesMeta": { 730 | "ws": { 731 | "optional": true 732 | }, 733 | "zod": { 734 | "optional": true 735 | } 736 | } 737 | }, 738 | "node_modules/p-finally": { 739 | "version": "1.0.0", 740 | "resolved": "https://registry.npmjs.org/p-finally/-/p-finally-1.0.0.tgz", 741 | "integrity": "sha512-LICb2p9CB7FS+0eR1oqWnHhp0FljGLZCWBE9aix0Uye9W8LTQPwMTYVGWQWIw9RdQiDg4+epXQODwIYJtSJaow==", 742 | "license": "MIT", 743 | "engines": { 744 | "node": ">=4" 745 | } 746 | }, 747 | "node_modules/p-queue": { 748 | "version": "6.6.2", 749 | "resolved": "https://registry.npmjs.org/p-queue/-/p-queue-6.6.2.tgz", 750 | "integrity": "sha512-RwFpb72c/BhQLEXIZ5K2e+AhgNVmIejGlTgiB9MzZ0e93GRvqZ7uSi0dvRF7/XIXDeNkra2fNHBxTyPDGySpjQ==", 751 | "license": "MIT", 752 | "dependencies": { 753 | "eventemitter3": "^4.0.4", 754 | "p-timeout": "^3.2.0" 755 | }, 756 | "engines": { 757 | "node": ">=8" 758 | }, 759 | "funding": { 760 | "url": "https://github.com/sponsors/sindresorhus" 761 | } 762 | }, 763 | "node_modules/p-retry": { 764 | "version": "4.6.2", 765 | "resolved": "https://registry.npmjs.org/p-retry/-/p-retry-4.6.2.tgz", 766 | "integrity": "sha512-312Id396EbJdvRONlngUx0NydfrIQ5lsYu0znKVUzVvArzEIt08V1qhtyESbGVd1FGX7UKtiFp5uwKZdM8wIuQ==", 767 | "license": "MIT", 768 | "dependencies": { 769 | "@types/retry": "0.12.0", 770 | "retry": "^0.13.1" 771 | }, 772 | "engines": { 773 | "node": ">=8" 774 | } 775 | }, 776 | "node_modules/p-timeout": { 777 | "version": "3.2.0", 778 | "resolved": "https://registry.npmjs.org/p-timeout/-/p-timeout-3.2.0.tgz", 779 | "integrity": "sha512-rhIwUycgwwKcP9yTOOFK/AKsAopjjCakVqLHePO3CC6Mir1Z99xT+R63jZxAT5lFZLa2inS5h+ZS2GvR99/FBg==", 780 | "license": "MIT", 781 | "dependencies": { 782 | "p-finally": "^1.0.0" 783 | }, 784 | "engines": { 785 | "node": ">=8" 786 | } 787 | }, 788 | "node_modules/retry": { 789 | "version": "0.13.1", 790 | "resolved": "https://registry.npmjs.org/retry/-/retry-0.13.1.tgz", 791 | "integrity": "sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==", 792 | "license": "MIT", 793 | "engines": { 794 | "node": ">= 4" 795 | } 796 | }, 797 | "node_modules/semver": { 798 | "version": "7.7.1", 799 | "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.1.tgz", 800 | "integrity": "sha512-hlq8tAfn0m/61p4BVRcPzIGr6LKiMwo4VM6dGi6pt4qcRkmNzTcWq6eCEjEh+qXjkMDvPlOFFSGwQjoEa6gyMA==", 801 | "license": "ISC", 802 | "bin": { 803 | "semver": "bin/semver.js" 804 | }, 805 | "engines": { 806 | "node": ">=10" 807 | } 808 | }, 809 | "node_modules/simple-wcswidth": { 810 | "version": "1.0.1", 811 | "resolved": "https://registry.npmjs.org/simple-wcswidth/-/simple-wcswidth-1.0.1.tgz", 812 | "integrity": "sha512-xMO/8eNREtaROt7tJvWJqHBDTMFN4eiQ5I4JRMuilwfnFcV5W9u7RUkueNkdw0jPqGMX36iCywelS5yilTuOxg==", 813 | "license": "MIT", 814 | "peer": true 815 | }, 816 | "node_modules/supports-color": { 817 | "version": "7.2.0", 818 | "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", 819 | "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", 820 | "license": "MIT", 821 | "peer": true, 822 | "dependencies": { 823 | "has-flag": "^4.0.0" 824 | }, 825 | "engines": { 826 | "node": ">=8" 827 | } 828 | }, 829 | "node_modules/taot-ts": { 830 | "version": "0.1.6", 831 | "resolved": "https://registry.npmjs.org/taot-ts/-/taot-ts-0.1.6.tgz", 832 | "integrity": "sha512-21hRcxbjWqz3Qq4oHtyaANri9zXBd5GehpHmUVFvnwBQjT87bfScG7xauD2GReGVP5d7pvCvt73kL9uLfaYAKQ==", 833 | "license": "MIT", 834 | "dependencies": { 835 | "@langchain/core": "^0.1.7", 836 | "taot-ts": "^0.1.3" 837 | }, 838 | "engines": { 839 | "node": ">=16.0.0" 840 | }, 841 | "peerDependencies": { 842 | "@langchain/openai": ">=0.0.10", 843 | "dotenv": ">=8.0.0" 844 | } 845 | }, 846 | "node_modules/taot-ts/node_modules/@langchain/core": { 847 | "version": "0.1.63", 848 | "resolved": "https://registry.npmjs.org/@langchain/core/-/core-0.1.63.tgz", 849 | "integrity": "sha512-+fjyYi8wy6x1P+Ee1RWfIIEyxd9Ee9jksEwvrggPwwI/p45kIDTdYTblXsM13y4mNWTiACyLSdbwnPaxxdoz+w==", 850 | "license": "MIT", 851 | "dependencies": { 852 | "ansi-styles": "^5.0.0", 853 | "camelcase": "6", 854 | "decamelize": "1.2.0", 855 | "js-tiktoken": "^1.0.12", 856 | "langsmith": "~0.1.7", 857 | "ml-distance": "^4.0.0", 858 | "mustache": "^4.2.0", 859 | "p-queue": "^6.6.2", 860 | "p-retry": "4", 861 | "uuid": "^9.0.0", 862 | "zod": "^3.22.4", 863 | "zod-to-json-schema": "^3.22.3" 864 | }, 865 | "engines": { 866 | "node": ">=18" 867 | } 868 | }, 869 | "node_modules/taot-ts/node_modules/@langchain/core/node_modules/uuid": { 870 | "version": "9.0.1", 871 | "resolved": "https://registry.npmjs.org/uuid/-/uuid-9.0.1.tgz", 872 | "integrity": "sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA==", 873 | "funding": [ 874 | "https://github.com/sponsors/broofa", 875 | "https://github.com/sponsors/ctavan" 876 | ], 877 | "license": "MIT", 878 | "bin": { 879 | "uuid": "dist/bin/uuid" 880 | } 881 | }, 882 | "node_modules/taot-ts/node_modules/langsmith": { 883 | "version": "0.1.68", 884 | "resolved": "https://registry.npmjs.org/langsmith/-/langsmith-0.1.68.tgz", 885 | "integrity": "sha512-otmiysWtVAqzMx3CJ4PrtUBhWRG5Co8Z4o7hSZENPjlit9/j3/vm3TSvbaxpDYakZxtMjhkcJTqrdYFipISEiQ==", 886 | "license": "MIT", 887 | "dependencies": { 888 | "@types/uuid": "^10.0.0", 889 | "commander": "^10.0.1", 890 | "p-queue": "^6.6.2", 891 | "p-retry": "4", 892 | "semver": "^7.6.3", 893 | "uuid": "^10.0.0" 894 | }, 895 | "peerDependencies": { 896 | "openai": "*" 897 | }, 898 | "peerDependenciesMeta": { 899 | "openai": { 900 | "optional": true 901 | } 902 | } 903 | }, 904 | "node_modules/tr46": { 905 | "version": "0.0.3", 906 | "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", 907 | "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==", 908 | "license": "MIT" 909 | }, 910 | "node_modules/undici-types": { 911 | "version": "5.26.5", 912 | "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", 913 | "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==", 914 | "license": "MIT" 915 | }, 916 | "node_modules/uuid": { 917 | "version": "10.0.0", 918 | "resolved": "https://registry.npmjs.org/uuid/-/uuid-10.0.0.tgz", 919 | "integrity": "sha512-8XkAphELsDnEGrDxUOHB3RGvXz6TeuYSGEZBOjtTtPm2lwhGBjLgOzLHB63IUWfBpNucQjND6d3AOudO+H3RWQ==", 920 | "funding": [ 921 | "https://github.com/sponsors/broofa", 922 | "https://github.com/sponsors/ctavan" 923 | ], 924 | "license": "MIT", 925 | "bin": { 926 | "uuid": "dist/bin/uuid" 927 | } 928 | }, 929 | "node_modules/web-streams-polyfill": { 930 | "version": "4.0.0-beta.3", 931 | "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-4.0.0-beta.3.tgz", 932 | "integrity": "sha512-QW95TCTaHmsYfHDybGMwO5IJIM93I/6vTRk+daHTWFPhwh+C8Cg7j7XyKrwrj8Ib6vYXe0ocYNrmzY4xAAN6ug==", 933 | "license": "MIT", 934 | "engines": { 935 | "node": ">= 14" 936 | } 937 | }, 938 | "node_modules/webidl-conversions": { 939 | "version": "3.0.1", 940 | "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", 941 | "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==", 942 | "license": "BSD-2-Clause" 943 | }, 944 | "node_modules/whatwg-url": { 945 | "version": "5.0.0", 946 | "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", 947 | "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", 948 | "license": "MIT", 949 | "dependencies": { 950 | "tr46": "~0.0.3", 951 | "webidl-conversions": "^3.0.0" 952 | } 953 | }, 954 | "node_modules/zod": { 955 | "version": "3.24.2", 956 | "resolved": "https://registry.npmjs.org/zod/-/zod-3.24.2.tgz", 957 | "integrity": "sha512-lY7CDW43ECgW9u1TcT3IoXHflywfVqDYze4waEz812jR/bZ8FHDsl7pFQoSZTz5N+2NqRXs8GBwnAwo3ZNxqhQ==", 958 | "license": "MIT", 959 | "funding": { 960 | "url": "https://github.com/sponsors/colinhacks" 961 | } 962 | }, 963 | "node_modules/zod-to-json-schema": { 964 | "version": "3.24.3", 965 | "resolved": "https://registry.npmjs.org/zod-to-json-schema/-/zod-to-json-schema-3.24.3.tgz", 966 | "integrity": "sha512-HIAfWdYIt1sssHfYZFCXp4rU1w2r8hVVXYIlmoa0r0gABLs5di3RCqPU5DDROogVz1pAdYBaz7HK5n9pSUNs3A==", 967 | "license": "ISC", 968 | "peerDependencies": { 969 | "zod": "^3.24.1" 970 | } 971 | } 972 | } 973 | } 974 | -------------------------------------------------------------------------------- /tutorial/BaseChatModel/azure/package-lock.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "azure", 3 | "version": "1.0.0", 4 | "lockfileVersion": 3, 5 | "requires": true, 6 | "packages": { 7 | "": { 8 | "name": "azure", 9 | "version": "1.0.0", 10 | "license": "ISC", 11 | "dependencies": { 12 | "@langchain/core": "^0.3.42", 13 | "dotenv": "^16.4.7", 14 | "node-fetch": "^2.7.0", 15 | "taot-ts": "^0.1.6" 16 | } 17 | }, 18 | "node_modules/@cfworker/json-schema": { 19 | "version": "4.1.1", 20 | "resolved": "https://registry.npmjs.org/@cfworker/json-schema/-/json-schema-4.1.1.tgz", 21 | "integrity": "sha512-gAmrUZSGtKc3AiBL71iNWxDsyUC5uMaKKGdvzYsBoTW/xi42JQHl7eKV2OYzCUqvc+D2RCcf7EXY2iCyFIk6og==", 22 | "license": "MIT" 23 | }, 24 | "node_modules/@langchain/core": { 25 | "version": "0.3.42", 26 | "resolved": "https://registry.npmjs.org/@langchain/core/-/core-0.3.42.tgz", 27 | "integrity": "sha512-pT/jC5lqWK3YGDq8dQwgKoa6anqAhMtG1x5JbnrOj9NdaLeBbCKBDQ+/Ykzk3nZ8o+0UMsaXNZo7IVL83VVjHg==", 28 | "license": "MIT", 29 | "dependencies": { 30 | "@cfworker/json-schema": "^4.0.2", 31 | "ansi-styles": "^5.0.0", 32 | "camelcase": "6", 33 | "decamelize": "1.2.0", 34 | "js-tiktoken": "^1.0.12", 35 | "langsmith": ">=0.2.8 <0.4.0", 36 | "mustache": "^4.2.0", 37 | "p-queue": "^6.6.2", 38 | "p-retry": "4", 39 | "uuid": "^10.0.0", 40 | "zod": "^3.22.4", 41 | "zod-to-json-schema": "^3.22.3" 42 | }, 43 | "engines": { 44 | "node": ">=18" 45 | } 46 | }, 47 | "node_modules/@langchain/openai": { 48 | "version": "0.4.4", 49 | "resolved": "https://registry.npmjs.org/@langchain/openai/-/openai-0.4.4.tgz", 50 | "integrity": "sha512-UZybJeMd8+UX7Kn47kuFYfqKdBCeBUWNqDtmAr6ZUIMMnlsNIb6MkrEEhGgAEjGCpdT4CU8U/DyyddTz+JayOQ==", 51 | "license": "MIT", 52 | "peer": true, 53 | "dependencies": { 54 | "js-tiktoken": "^1.0.12", 55 | "openai": "^4.77.0", 56 | "zod": "^3.22.4", 57 | "zod-to-json-schema": "^3.22.3" 58 | }, 59 | "engines": { 60 | "node": ">=18" 61 | }, 62 | "peerDependencies": { 63 | "@langchain/core": ">=0.3.39 <0.4.0" 64 | } 65 | }, 66 | "node_modules/@types/node": { 67 | "version": "18.19.79", 68 | "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.79.tgz", 69 | "integrity": "sha512-90K8Oayimbctc5zTPHPfZloc/lGVs7f3phUAAMcTgEPtg8kKquGZDERC8K4vkBYkQQh48msiYUslYtxTWvqcAg==", 70 | "license": "MIT", 71 | "peer": true, 72 | "dependencies": { 73 | "undici-types": "~5.26.4" 74 | } 75 | }, 76 | "node_modules/@types/node-fetch": { 77 | "version": "2.6.12", 78 | "resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.12.tgz", 79 | "integrity": "sha512-8nneRWKCg3rMtF69nLQJnOYUcbafYeFSjqkw3jCRLsqkWFlHaoQrr5mXmofFGOx3DKn7UfmBMyov8ySvLRVldA==", 80 | "license": "MIT", 81 | "peer": true, 82 | "dependencies": { 83 | "@types/node": "*", 84 | "form-data": "^4.0.0" 85 | } 86 | }, 87 | "node_modules/@types/retry": { 88 | "version": "0.12.0", 89 | "resolved": "https://registry.npmjs.org/@types/retry/-/retry-0.12.0.tgz", 90 | "integrity": "sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==", 91 | "license": "MIT" 92 | }, 93 | "node_modules/@types/uuid": { 94 | "version": "10.0.0", 95 | "resolved": "https://registry.npmjs.org/@types/uuid/-/uuid-10.0.0.tgz", 96 | "integrity": "sha512-7gqG38EyHgyP1S+7+xomFtL+ZNHcKv6DwNaCZmJmo1vgMugyF3TCnXVg4t1uk89mLNwnLtnY3TpOpCOyp1/xHQ==", 97 | "license": "MIT" 98 | }, 99 | "node_modules/abort-controller": { 100 | "version": "3.0.0", 101 | "resolved": "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz", 102 | "integrity": "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==", 103 | "license": "MIT", 104 | "peer": true, 105 | "dependencies": { 106 | "event-target-shim": "^5.0.0" 107 | }, 108 | "engines": { 109 | "node": ">=6.5" 110 | } 111 | }, 112 | "node_modules/agentkeepalive": { 113 | "version": "4.6.0", 114 | "resolved": "https://registry.npmjs.org/agentkeepalive/-/agentkeepalive-4.6.0.tgz", 115 | "integrity": "sha512-kja8j7PjmncONqaTsB8fQ+wE2mSU2DJ9D4XKoJ5PFWIdRMa6SLSN1ff4mOr4jCbfRSsxR4keIiySJU0N9T5hIQ==", 116 | "license": "MIT", 117 | "peer": true, 118 | "dependencies": { 119 | "humanize-ms": "^1.2.1" 120 | }, 121 | "engines": { 122 | "node": ">= 8.0.0" 123 | } 124 | }, 125 | "node_modules/ansi-styles": { 126 | "version": "5.2.0", 127 | "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", 128 | "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", 129 | "license": "MIT", 130 | "engines": { 131 | "node": ">=10" 132 | }, 133 | "funding": { 134 | "url": "https://github.com/chalk/ansi-styles?sponsor=1" 135 | } 136 | }, 137 | "node_modules/asynckit": { 138 | "version": "0.4.0", 139 | "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", 140 | "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==", 141 | "license": "MIT", 142 | "peer": true 143 | }, 144 | "node_modules/base64-js": { 145 | "version": "1.5.1", 146 | "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", 147 | "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", 148 | "funding": [ 149 | { 150 | "type": "github", 151 | "url": "https://github.com/sponsors/feross" 152 | }, 153 | { 154 | "type": "patreon", 155 | "url": "https://www.patreon.com/feross" 156 | }, 157 | { 158 | "type": "consulting", 159 | "url": "https://feross.org/support" 160 | } 161 | ], 162 | "license": "MIT" 163 | }, 164 | "node_modules/binary-search": { 165 | "version": "1.3.6", 166 | "resolved": "https://registry.npmjs.org/binary-search/-/binary-search-1.3.6.tgz", 167 | "integrity": "sha512-nbE1WxOTTrUWIfsfZ4aHGYu5DOuNkbxGokjV6Z2kxfJK3uaAb8zNK1muzOeipoLHZjInT4Br88BHpzevc681xA==", 168 | "license": "CC0-1.0" 169 | }, 170 | "node_modules/call-bind-apply-helpers": { 171 | "version": "1.0.2", 172 | "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", 173 | "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", 174 | "license": "MIT", 175 | "peer": true, 176 | "dependencies": { 177 | "es-errors": "^1.3.0", 178 | "function-bind": "^1.1.2" 179 | }, 180 | "engines": { 181 | "node": ">= 0.4" 182 | } 183 | }, 184 | "node_modules/camelcase": { 185 | "version": "6.3.0", 186 | "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", 187 | "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", 188 | "license": "MIT", 189 | "engines": { 190 | "node": ">=10" 191 | }, 192 | "funding": { 193 | "url": "https://github.com/sponsors/sindresorhus" 194 | } 195 | }, 196 | "node_modules/chalk": { 197 | "version": "4.1.2", 198 | "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", 199 | "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", 200 | "license": "MIT", 201 | "dependencies": { 202 | "ansi-styles": "^4.1.0", 203 | "supports-color": "^7.1.0" 204 | }, 205 | "engines": { 206 | "node": ">=10" 207 | }, 208 | "funding": { 209 | "url": "https://github.com/chalk/chalk?sponsor=1" 210 | } 211 | }, 212 | "node_modules/chalk/node_modules/ansi-styles": { 213 | "version": "4.3.0", 214 | "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", 215 | "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", 216 | "license": "MIT", 217 | "dependencies": { 218 | "color-convert": "^2.0.1" 219 | }, 220 | "engines": { 221 | "node": ">=8" 222 | }, 223 | "funding": { 224 | "url": "https://github.com/chalk/ansi-styles?sponsor=1" 225 | } 226 | }, 227 | "node_modules/color-convert": { 228 | "version": "2.0.1", 229 | "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", 230 | "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", 231 | "license": "MIT", 232 | "dependencies": { 233 | "color-name": "~1.1.4" 234 | }, 235 | "engines": { 236 | "node": ">=7.0.0" 237 | } 238 | }, 239 | "node_modules/color-name": { 240 | "version": "1.1.4", 241 | "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", 242 | "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", 243 | "license": "MIT" 244 | }, 245 | "node_modules/combined-stream": { 246 | "version": "1.0.8", 247 | "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", 248 | "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", 249 | "license": "MIT", 250 | "peer": true, 251 | "dependencies": { 252 | "delayed-stream": "~1.0.0" 253 | }, 254 | "engines": { 255 | "node": ">= 0.8" 256 | } 257 | }, 258 | "node_modules/commander": { 259 | "version": "10.0.1", 260 | "resolved": "https://registry.npmjs.org/commander/-/commander-10.0.1.tgz", 261 | "integrity": "sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug==", 262 | "license": "MIT", 263 | "engines": { 264 | "node": ">=14" 265 | } 266 | }, 267 | "node_modules/console-table-printer": { 268 | "version": "2.12.1", 269 | "resolved": "https://registry.npmjs.org/console-table-printer/-/console-table-printer-2.12.1.tgz", 270 | "integrity": "sha512-wKGOQRRvdnd89pCeH96e2Fn4wkbenSP6LMHfjfyNLMbGuHEFbMqQNuxXqd0oXG9caIOQ1FTvc5Uijp9/4jujnQ==", 271 | "license": "MIT", 272 | "dependencies": { 273 | "simple-wcswidth": "^1.0.1" 274 | } 275 | }, 276 | "node_modules/decamelize": { 277 | "version": "1.2.0", 278 | "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz", 279 | "integrity": "sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA==", 280 | "license": "MIT", 281 | "engines": { 282 | "node": ">=0.10.0" 283 | } 284 | }, 285 | "node_modules/delayed-stream": { 286 | "version": "1.0.0", 287 | "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", 288 | "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", 289 | "license": "MIT", 290 | "peer": true, 291 | "engines": { 292 | "node": ">=0.4.0" 293 | } 294 | }, 295 | "node_modules/dotenv": { 296 | "version": "16.4.7", 297 | "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.4.7.tgz", 298 | "integrity": "sha512-47qPchRCykZC03FhkYAhrvwU4xDBFIj1QPqaarj6mdM/hgUzfPHcpkHJOn3mJAufFeeAxAzeGsr5X0M4k6fLZQ==", 299 | "license": "BSD-2-Clause", 300 | "engines": { 301 | "node": ">=12" 302 | }, 303 | "funding": { 304 | "url": "https://dotenvx.com" 305 | } 306 | }, 307 | "node_modules/dunder-proto": { 308 | "version": "1.0.1", 309 | "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", 310 | "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", 311 | "license": "MIT", 312 | "peer": true, 313 | "dependencies": { 314 | "call-bind-apply-helpers": "^1.0.1", 315 | "es-errors": "^1.3.0", 316 | "gopd": "^1.2.0" 317 | }, 318 | "engines": { 319 | "node": ">= 0.4" 320 | } 321 | }, 322 | "node_modules/es-define-property": { 323 | "version": "1.0.1", 324 | "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", 325 | "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", 326 | "license": "MIT", 327 | "peer": true, 328 | "engines": { 329 | "node": ">= 0.4" 330 | } 331 | }, 332 | "node_modules/es-errors": { 333 | "version": "1.3.0", 334 | "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", 335 | "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", 336 | "license": "MIT", 337 | "peer": true, 338 | "engines": { 339 | "node": ">= 0.4" 340 | } 341 | }, 342 | "node_modules/es-object-atoms": { 343 | "version": "1.1.1", 344 | "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", 345 | "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", 346 | "license": "MIT", 347 | "peer": true, 348 | "dependencies": { 349 | "es-errors": "^1.3.0" 350 | }, 351 | "engines": { 352 | "node": ">= 0.4" 353 | } 354 | }, 355 | "node_modules/es-set-tostringtag": { 356 | "version": "2.1.0", 357 | "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz", 358 | "integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==", 359 | "license": "MIT", 360 | "peer": true, 361 | "dependencies": { 362 | "es-errors": "^1.3.0", 363 | "get-intrinsic": "^1.2.6", 364 | "has-tostringtag": "^1.0.2", 365 | "hasown": "^2.0.2" 366 | }, 367 | "engines": { 368 | "node": ">= 0.4" 369 | } 370 | }, 371 | "node_modules/event-target-shim": { 372 | "version": "5.0.1", 373 | "resolved": "https://registry.npmjs.org/event-target-shim/-/event-target-shim-5.0.1.tgz", 374 | "integrity": "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==", 375 | "license": "MIT", 376 | "peer": true, 377 | "engines": { 378 | "node": ">=6" 379 | } 380 | }, 381 | "node_modules/eventemitter3": { 382 | "version": "4.0.7", 383 | "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz", 384 | "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==", 385 | "license": "MIT" 386 | }, 387 | "node_modules/form-data": { 388 | "version": "4.0.2", 389 | "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.2.tgz", 390 | "integrity": "sha512-hGfm/slu0ZabnNt4oaRZ6uREyfCj6P4fT/n6A1rGV+Z0VdGXjfOhVUpkn6qVQONHGIFwmveGXyDs75+nr6FM8w==", 391 | "license": "MIT", 392 | "peer": true, 393 | "dependencies": { 394 | "asynckit": "^0.4.0", 395 | "combined-stream": "^1.0.8", 396 | "es-set-tostringtag": "^2.1.0", 397 | "mime-types": "^2.1.12" 398 | }, 399 | "engines": { 400 | "node": ">= 6" 401 | } 402 | }, 403 | "node_modules/form-data-encoder": { 404 | "version": "1.7.2", 405 | "resolved": "https://registry.npmjs.org/form-data-encoder/-/form-data-encoder-1.7.2.tgz", 406 | "integrity": "sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A==", 407 | "license": "MIT", 408 | "peer": true 409 | }, 410 | "node_modules/formdata-node": { 411 | "version": "4.4.1", 412 | "resolved": "https://registry.npmjs.org/formdata-node/-/formdata-node-4.4.1.tgz", 413 | "integrity": "sha512-0iirZp3uVDjVGt9p49aTaqjk84TrglENEDuqfdlZQ1roC9CWlPk6Avf8EEnZNcAqPonwkG35x4n3ww/1THYAeQ==", 414 | "license": "MIT", 415 | "peer": true, 416 | "dependencies": { 417 | "node-domexception": "1.0.0", 418 | "web-streams-polyfill": "4.0.0-beta.3" 419 | }, 420 | "engines": { 421 | "node": ">= 12.20" 422 | } 423 | }, 424 | "node_modules/function-bind": { 425 | "version": "1.1.2", 426 | "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", 427 | "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", 428 | "license": "MIT", 429 | "peer": true, 430 | "funding": { 431 | "url": "https://github.com/sponsors/ljharb" 432 | } 433 | }, 434 | "node_modules/get-intrinsic": { 435 | "version": "1.3.0", 436 | "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", 437 | "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", 438 | "license": "MIT", 439 | "peer": true, 440 | "dependencies": { 441 | "call-bind-apply-helpers": "^1.0.2", 442 | "es-define-property": "^1.0.1", 443 | "es-errors": "^1.3.0", 444 | "es-object-atoms": "^1.1.1", 445 | "function-bind": "^1.1.2", 446 | "get-proto": "^1.0.1", 447 | "gopd": "^1.2.0", 448 | "has-symbols": "^1.1.0", 449 | "hasown": "^2.0.2", 450 | "math-intrinsics": "^1.1.0" 451 | }, 452 | "engines": { 453 | "node": ">= 0.4" 454 | }, 455 | "funding": { 456 | "url": "https://github.com/sponsors/ljharb" 457 | } 458 | }, 459 | "node_modules/get-proto": { 460 | "version": "1.0.1", 461 | "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", 462 | "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", 463 | "license": "MIT", 464 | "peer": true, 465 | "dependencies": { 466 | "dunder-proto": "^1.0.1", 467 | "es-object-atoms": "^1.0.0" 468 | }, 469 | "engines": { 470 | "node": ">= 0.4" 471 | } 472 | }, 473 | "node_modules/gopd": { 474 | "version": "1.2.0", 475 | "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", 476 | "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", 477 | "license": "MIT", 478 | "peer": true, 479 | "engines": { 480 | "node": ">= 0.4" 481 | }, 482 | "funding": { 483 | "url": "https://github.com/sponsors/ljharb" 484 | } 485 | }, 486 | "node_modules/has-flag": { 487 | "version": "4.0.0", 488 | "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", 489 | "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", 490 | "license": "MIT", 491 | "engines": { 492 | "node": ">=8" 493 | } 494 | }, 495 | "node_modules/has-symbols": { 496 | "version": "1.1.0", 497 | "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", 498 | "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", 499 | "license": "MIT", 500 | "peer": true, 501 | "engines": { 502 | "node": ">= 0.4" 503 | }, 504 | "funding": { 505 | "url": "https://github.com/sponsors/ljharb" 506 | } 507 | }, 508 | "node_modules/has-tostringtag": { 509 | "version": "1.0.2", 510 | "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz", 511 | "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", 512 | "license": "MIT", 513 | "peer": true, 514 | "dependencies": { 515 | "has-symbols": "^1.0.3" 516 | }, 517 | "engines": { 518 | "node": ">= 0.4" 519 | }, 520 | "funding": { 521 | "url": "https://github.com/sponsors/ljharb" 522 | } 523 | }, 524 | "node_modules/hasown": { 525 | "version": "2.0.2", 526 | "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", 527 | "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", 528 | "license": "MIT", 529 | "peer": true, 530 | "dependencies": { 531 | "function-bind": "^1.1.2" 532 | }, 533 | "engines": { 534 | "node": ">= 0.4" 535 | } 536 | }, 537 | "node_modules/humanize-ms": { 538 | "version": "1.2.1", 539 | "resolved": "https://registry.npmjs.org/humanize-ms/-/humanize-ms-1.2.1.tgz", 540 | "integrity": "sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ==", 541 | "license": "MIT", 542 | "peer": true, 543 | "dependencies": { 544 | "ms": "^2.0.0" 545 | } 546 | }, 547 | "node_modules/is-any-array": { 548 | "version": "2.0.1", 549 | "resolved": "https://registry.npmjs.org/is-any-array/-/is-any-array-2.0.1.tgz", 550 | "integrity": "sha512-UtilS7hLRu++wb/WBAw9bNuP1Eg04Ivn1vERJck8zJthEvXCBEBpGR/33u/xLKWEQf95803oalHrVDptcAvFdQ==", 551 | "license": "MIT" 552 | }, 553 | "node_modules/js-tiktoken": { 554 | "version": "1.0.19", 555 | "resolved": "https://registry.npmjs.org/js-tiktoken/-/js-tiktoken-1.0.19.tgz", 556 | "integrity": "sha512-XC63YQeEcS47Y53gg950xiZ4IWmkfMe4p2V9OSaBt26q+p47WHn18izuXzSclCI73B7yGqtfRsT6jcZQI0y08g==", 557 | "license": "MIT", 558 | "dependencies": { 559 | "base64-js": "^1.5.1" 560 | } 561 | }, 562 | "node_modules/langsmith": { 563 | "version": "0.3.12", 564 | "resolved": "https://registry.npmjs.org/langsmith/-/langsmith-0.3.12.tgz", 565 | "integrity": "sha512-e4qWM27hxEr8GfO6dgXrc3W8La+wxkX1zEtMhxhqS/Th2ujTt5OH7x0uXfXFDqCv9WaC3nquo1Y2s4vpYmLLtg==", 566 | "license": "MIT", 567 | "dependencies": { 568 | "@types/uuid": "^10.0.0", 569 | "chalk": "^4.1.2", 570 | "console-table-printer": "^2.12.1", 571 | "p-queue": "^6.6.2", 572 | "p-retry": "4", 573 | "semver": "^7.6.3", 574 | "uuid": "^10.0.0" 575 | }, 576 | "peerDependencies": { 577 | "openai": "*" 578 | }, 579 | "peerDependenciesMeta": { 580 | "openai": { 581 | "optional": true 582 | } 583 | } 584 | }, 585 | "node_modules/math-intrinsics": { 586 | "version": "1.1.0", 587 | "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", 588 | "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", 589 | "license": "MIT", 590 | "peer": true, 591 | "engines": { 592 | "node": ">= 0.4" 593 | } 594 | }, 595 | "node_modules/mime-db": { 596 | "version": "1.52.0", 597 | "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", 598 | "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", 599 | "license": "MIT", 600 | "peer": true, 601 | "engines": { 602 | "node": ">= 0.6" 603 | } 604 | }, 605 | "node_modules/mime-types": { 606 | "version": "2.1.35", 607 | "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", 608 | "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", 609 | "license": "MIT", 610 | "peer": true, 611 | "dependencies": { 612 | "mime-db": "1.52.0" 613 | }, 614 | "engines": { 615 | "node": ">= 0.6" 616 | } 617 | }, 618 | "node_modules/ml-array-mean": { 619 | "version": "1.1.6", 620 | "resolved": "https://registry.npmjs.org/ml-array-mean/-/ml-array-mean-1.1.6.tgz", 621 | "integrity": "sha512-MIdf7Zc8HznwIisyiJGRH9tRigg3Yf4FldW8DxKxpCCv/g5CafTw0RRu51nojVEOXuCQC7DRVVu5c7XXO/5joQ==", 622 | "license": "MIT", 623 | "dependencies": { 624 | "ml-array-sum": "^1.1.6" 625 | } 626 | }, 627 | "node_modules/ml-array-sum": { 628 | "version": "1.1.6", 629 | "resolved": "https://registry.npmjs.org/ml-array-sum/-/ml-array-sum-1.1.6.tgz", 630 | "integrity": "sha512-29mAh2GwH7ZmiRnup4UyibQZB9+ZLyMShvt4cH4eTK+cL2oEMIZFnSyB3SS8MlsTh6q/w/yh48KmqLxmovN4Dw==", 631 | "license": "MIT", 632 | "dependencies": { 633 | "is-any-array": "^2.0.0" 634 | } 635 | }, 636 | "node_modules/ml-distance": { 637 | "version": "4.0.1", 638 | "resolved": "https://registry.npmjs.org/ml-distance/-/ml-distance-4.0.1.tgz", 639 | "integrity": "sha512-feZ5ziXs01zhyFUUUeZV5hwc0f5JW0Sh0ckU1koZe/wdVkJdGxcP06KNQuF0WBTj8FttQUzcvQcpcrOp/XrlEw==", 640 | "license": "MIT", 641 | "dependencies": { 642 | "ml-array-mean": "^1.1.6", 643 | "ml-distance-euclidean": "^2.0.0", 644 | "ml-tree-similarity": "^1.0.0" 645 | } 646 | }, 647 | "node_modules/ml-distance-euclidean": { 648 | "version": "2.0.0", 649 | "resolved": "https://registry.npmjs.org/ml-distance-euclidean/-/ml-distance-euclidean-2.0.0.tgz", 650 | "integrity": "sha512-yC9/2o8QF0A3m/0IXqCTXCzz2pNEzvmcE/9HFKOZGnTjatvBbsn4lWYJkxENkA4Ug2fnYl7PXQxnPi21sgMy/Q==", 651 | "license": "MIT" 652 | }, 653 | "node_modules/ml-tree-similarity": { 654 | "version": "1.0.0", 655 | "resolved": "https://registry.npmjs.org/ml-tree-similarity/-/ml-tree-similarity-1.0.0.tgz", 656 | "integrity": "sha512-XJUyYqjSuUQkNQHMscr6tcjldsOoAekxADTplt40QKfwW6nd++1wHWV9AArl0Zvw/TIHgNaZZNvr8QGvE8wLRg==", 657 | "license": "MIT", 658 | "dependencies": { 659 | "binary-search": "^1.3.5", 660 | "num-sort": "^2.0.0" 661 | } 662 | }, 663 | "node_modules/ms": { 664 | "version": "2.1.3", 665 | "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", 666 | "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", 667 | "license": "MIT", 668 | "peer": true 669 | }, 670 | "node_modules/mustache": { 671 | "version": "4.2.0", 672 | "resolved": "https://registry.npmjs.org/mustache/-/mustache-4.2.0.tgz", 673 | "integrity": "sha512-71ippSywq5Yb7/tVYyGbkBggbU8H3u5Rz56fH60jGFgr8uHwxs+aSKeqmluIVzM0m0kB7xQjKS6qPfd0b2ZoqQ==", 674 | "license": "MIT", 675 | "bin": { 676 | "mustache": "bin/mustache" 677 | } 678 | }, 679 | "node_modules/node-domexception": { 680 | "version": "1.0.0", 681 | "resolved": "https://registry.npmjs.org/node-domexception/-/node-domexception-1.0.0.tgz", 682 | "integrity": "sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==", 683 | "funding": [ 684 | { 685 | "type": "github", 686 | "url": "https://github.com/sponsors/jimmywarting" 687 | }, 688 | { 689 | "type": "github", 690 | "url": "https://paypal.me/jimmywarting" 691 | } 692 | ], 693 | "license": "MIT", 694 | "peer": true, 695 | "engines": { 696 | "node": ">=10.5.0" 697 | } 698 | }, 699 | "node_modules/node-fetch": { 700 | "version": "2.7.0", 701 | "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz", 702 | "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", 703 | "license": "MIT", 704 | "dependencies": { 705 | "whatwg-url": "^5.0.0" 706 | }, 707 | "engines": { 708 | "node": "4.x || >=6.0.0" 709 | }, 710 | "peerDependencies": { 711 | "encoding": "^0.1.0" 712 | }, 713 | "peerDependenciesMeta": { 714 | "encoding": { 715 | "optional": true 716 | } 717 | } 718 | }, 719 | "node_modules/num-sort": { 720 | "version": "2.1.0", 721 | "resolved": "https://registry.npmjs.org/num-sort/-/num-sort-2.1.0.tgz", 722 | "integrity": "sha512-1MQz1Ed8z2yckoBeSfkQHHO9K1yDRxxtotKSJ9yvcTUUxSvfvzEq5GwBrjjHEpMlq/k5gvXdmJ1SbYxWtpNoVg==", 723 | "license": "MIT", 724 | "engines": { 725 | "node": ">=8" 726 | }, 727 | "funding": { 728 | "url": "https://github.com/sponsors/sindresorhus" 729 | } 730 | }, 731 | "node_modules/openai": { 732 | "version": "4.86.2", 733 | "resolved": "https://registry.npmjs.org/openai/-/openai-4.86.2.tgz", 734 | "integrity": "sha512-nvYeFjmjdSu6/msld+22JoUlCICNk/lUFpSMmc6KNhpeNLpqL70TqbD/8Vura/tFmYqHKW0trcjgPwUpKSPwaA==", 735 | "license": "Apache-2.0", 736 | "peer": true, 737 | "dependencies": { 738 | "@types/node": "^18.11.18", 739 | "@types/node-fetch": "^2.6.4", 740 | "abort-controller": "^3.0.0", 741 | "agentkeepalive": "^4.2.1", 742 | "form-data-encoder": "1.7.2", 743 | "formdata-node": "^4.3.2", 744 | "node-fetch": "^2.6.7" 745 | }, 746 | "bin": { 747 | "openai": "bin/cli" 748 | }, 749 | "peerDependencies": { 750 | "ws": "^8.18.0", 751 | "zod": "^3.23.8" 752 | }, 753 | "peerDependenciesMeta": { 754 | "ws": { 755 | "optional": true 756 | }, 757 | "zod": { 758 | "optional": true 759 | } 760 | } 761 | }, 762 | "node_modules/p-finally": { 763 | "version": "1.0.0", 764 | "resolved": "https://registry.npmjs.org/p-finally/-/p-finally-1.0.0.tgz", 765 | "integrity": "sha512-LICb2p9CB7FS+0eR1oqWnHhp0FljGLZCWBE9aix0Uye9W8LTQPwMTYVGWQWIw9RdQiDg4+epXQODwIYJtSJaow==", 766 | "license": "MIT", 767 | "engines": { 768 | "node": ">=4" 769 | } 770 | }, 771 | "node_modules/p-queue": { 772 | "version": "6.6.2", 773 | "resolved": "https://registry.npmjs.org/p-queue/-/p-queue-6.6.2.tgz", 774 | "integrity": "sha512-RwFpb72c/BhQLEXIZ5K2e+AhgNVmIejGlTgiB9MzZ0e93GRvqZ7uSi0dvRF7/XIXDeNkra2fNHBxTyPDGySpjQ==", 775 | "license": "MIT", 776 | "dependencies": { 777 | "eventemitter3": "^4.0.4", 778 | "p-timeout": "^3.2.0" 779 | }, 780 | "engines": { 781 | "node": ">=8" 782 | }, 783 | "funding": { 784 | "url": "https://github.com/sponsors/sindresorhus" 785 | } 786 | }, 787 | "node_modules/p-retry": { 788 | "version": "4.6.2", 789 | "resolved": "https://registry.npmjs.org/p-retry/-/p-retry-4.6.2.tgz", 790 | "integrity": "sha512-312Id396EbJdvRONlngUx0NydfrIQ5lsYu0znKVUzVvArzEIt08V1qhtyESbGVd1FGX7UKtiFp5uwKZdM8wIuQ==", 791 | "license": "MIT", 792 | "dependencies": { 793 | "@types/retry": "0.12.0", 794 | "retry": "^0.13.1" 795 | }, 796 | "engines": { 797 | "node": ">=8" 798 | } 799 | }, 800 | "node_modules/p-timeout": { 801 | "version": "3.2.0", 802 | "resolved": "https://registry.npmjs.org/p-timeout/-/p-timeout-3.2.0.tgz", 803 | "integrity": "sha512-rhIwUycgwwKcP9yTOOFK/AKsAopjjCakVqLHePO3CC6Mir1Z99xT+R63jZxAT5lFZLa2inS5h+ZS2GvR99/FBg==", 804 | "license": "MIT", 805 | "dependencies": { 806 | "p-finally": "^1.0.0" 807 | }, 808 | "engines": { 809 | "node": ">=8" 810 | } 811 | }, 812 | "node_modules/retry": { 813 | "version": "0.13.1", 814 | "resolved": "https://registry.npmjs.org/retry/-/retry-0.13.1.tgz", 815 | "integrity": "sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==", 816 | "license": "MIT", 817 | "engines": { 818 | "node": ">= 4" 819 | } 820 | }, 821 | "node_modules/semver": { 822 | "version": "7.7.1", 823 | "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.1.tgz", 824 | "integrity": "sha512-hlq8tAfn0m/61p4BVRcPzIGr6LKiMwo4VM6dGi6pt4qcRkmNzTcWq6eCEjEh+qXjkMDvPlOFFSGwQjoEa6gyMA==", 825 | "license": "ISC", 826 | "bin": { 827 | "semver": "bin/semver.js" 828 | }, 829 | "engines": { 830 | "node": ">=10" 831 | } 832 | }, 833 | "node_modules/simple-wcswidth": { 834 | "version": "1.0.1", 835 | "resolved": "https://registry.npmjs.org/simple-wcswidth/-/simple-wcswidth-1.0.1.tgz", 836 | "integrity": "sha512-xMO/8eNREtaROt7tJvWJqHBDTMFN4eiQ5I4JRMuilwfnFcV5W9u7RUkueNkdw0jPqGMX36iCywelS5yilTuOxg==", 837 | "license": "MIT" 838 | }, 839 | "node_modules/supports-color": { 840 | "version": "7.2.0", 841 | "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", 842 | "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", 843 | "license": "MIT", 844 | "dependencies": { 845 | "has-flag": "^4.0.0" 846 | }, 847 | "engines": { 848 | "node": ">=8" 849 | } 850 | }, 851 | "node_modules/taot-ts": { 852 | "version": "0.1.6", 853 | "resolved": "https://registry.npmjs.org/taot-ts/-/taot-ts-0.1.6.tgz", 854 | "integrity": "sha512-21hRcxbjWqz3Qq4oHtyaANri9zXBd5GehpHmUVFvnwBQjT87bfScG7xauD2GReGVP5d7pvCvt73kL9uLfaYAKQ==", 855 | "license": "MIT", 856 | "dependencies": { 857 | "@langchain/core": "^0.1.7", 858 | "taot-ts": "^0.1.3" 859 | }, 860 | "engines": { 861 | "node": ">=16.0.0" 862 | }, 863 | "peerDependencies": { 864 | "@langchain/openai": ">=0.0.10", 865 | "dotenv": ">=8.0.0" 866 | } 867 | }, 868 | "node_modules/taot-ts/node_modules/@langchain/core": { 869 | "version": "0.1.63", 870 | "resolved": "https://registry.npmjs.org/@langchain/core/-/core-0.1.63.tgz", 871 | "integrity": "sha512-+fjyYi8wy6x1P+Ee1RWfIIEyxd9Ee9jksEwvrggPwwI/p45kIDTdYTblXsM13y4mNWTiACyLSdbwnPaxxdoz+w==", 872 | "license": "MIT", 873 | "dependencies": { 874 | "ansi-styles": "^5.0.0", 875 | "camelcase": "6", 876 | "decamelize": "1.2.0", 877 | "js-tiktoken": "^1.0.12", 878 | "langsmith": "~0.1.7", 879 | "ml-distance": "^4.0.0", 880 | "mustache": "^4.2.0", 881 | "p-queue": "^6.6.2", 882 | "p-retry": "4", 883 | "uuid": "^9.0.0", 884 | "zod": "^3.22.4", 885 | "zod-to-json-schema": "^3.22.3" 886 | }, 887 | "engines": { 888 | "node": ">=18" 889 | } 890 | }, 891 | "node_modules/taot-ts/node_modules/@langchain/core/node_modules/uuid": { 892 | "version": "9.0.1", 893 | "resolved": "https://registry.npmjs.org/uuid/-/uuid-9.0.1.tgz", 894 | "integrity": "sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA==", 895 | "funding": [ 896 | "https://github.com/sponsors/broofa", 897 | "https://github.com/sponsors/ctavan" 898 | ], 899 | "license": "MIT", 900 | "bin": { 901 | "uuid": "dist/bin/uuid" 902 | } 903 | }, 904 | "node_modules/taot-ts/node_modules/langsmith": { 905 | "version": "0.1.68", 906 | "resolved": "https://registry.npmjs.org/langsmith/-/langsmith-0.1.68.tgz", 907 | "integrity": "sha512-otmiysWtVAqzMx3CJ4PrtUBhWRG5Co8Z4o7hSZENPjlit9/j3/vm3TSvbaxpDYakZxtMjhkcJTqrdYFipISEiQ==", 908 | "license": "MIT", 909 | "dependencies": { 910 | "@types/uuid": "^10.0.0", 911 | "commander": "^10.0.1", 912 | "p-queue": "^6.6.2", 913 | "p-retry": "4", 914 | "semver": "^7.6.3", 915 | "uuid": "^10.0.0" 916 | }, 917 | "peerDependencies": { 918 | "openai": "*" 919 | }, 920 | "peerDependenciesMeta": { 921 | "openai": { 922 | "optional": true 923 | } 924 | } 925 | }, 926 | "node_modules/tr46": { 927 | "version": "0.0.3", 928 | "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", 929 | "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==", 930 | "license": "MIT" 931 | }, 932 | "node_modules/undici-types": { 933 | "version": "5.26.5", 934 | "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", 935 | "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==", 936 | "license": "MIT", 937 | "peer": true 938 | }, 939 | "node_modules/uuid": { 940 | "version": "10.0.0", 941 | "resolved": "https://registry.npmjs.org/uuid/-/uuid-10.0.0.tgz", 942 | "integrity": "sha512-8XkAphELsDnEGrDxUOHB3RGvXz6TeuYSGEZBOjtTtPm2lwhGBjLgOzLHB63IUWfBpNucQjND6d3AOudO+H3RWQ==", 943 | "funding": [ 944 | "https://github.com/sponsors/broofa", 945 | "https://github.com/sponsors/ctavan" 946 | ], 947 | "license": "MIT", 948 | "bin": { 949 | "uuid": "dist/bin/uuid" 950 | } 951 | }, 952 | "node_modules/web-streams-polyfill": { 953 | "version": "4.0.0-beta.3", 954 | "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-4.0.0-beta.3.tgz", 955 | "integrity": "sha512-QW95TCTaHmsYfHDybGMwO5IJIM93I/6vTRk+daHTWFPhwh+C8Cg7j7XyKrwrj8Ib6vYXe0ocYNrmzY4xAAN6ug==", 956 | "license": "MIT", 957 | "peer": true, 958 | "engines": { 959 | "node": ">= 14" 960 | } 961 | }, 962 | "node_modules/webidl-conversions": { 963 | "version": "3.0.1", 964 | "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", 965 | "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==", 966 | "license": "BSD-2-Clause" 967 | }, 968 | "node_modules/whatwg-url": { 969 | "version": "5.0.0", 970 | "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", 971 | "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", 972 | "license": "MIT", 973 | "dependencies": { 974 | "tr46": "~0.0.3", 975 | "webidl-conversions": "^3.0.0" 976 | } 977 | }, 978 | "node_modules/zod": { 979 | "version": "3.24.2", 980 | "resolved": "https://registry.npmjs.org/zod/-/zod-3.24.2.tgz", 981 | "integrity": "sha512-lY7CDW43ECgW9u1TcT3IoXHflywfVqDYze4waEz812jR/bZ8FHDsl7pFQoSZTz5N+2NqRXs8GBwnAwo3ZNxqhQ==", 982 | "license": "MIT", 983 | "funding": { 984 | "url": "https://github.com/sponsors/colinhacks" 985 | } 986 | }, 987 | "node_modules/zod-to-json-schema": { 988 | "version": "3.24.3", 989 | "resolved": "https://registry.npmjs.org/zod-to-json-schema/-/zod-to-json-schema-3.24.3.tgz", 990 | "integrity": "sha512-HIAfWdYIt1sssHfYZFCXp4rU1w2r8hVVXYIlmoa0r0gABLs5di3RCqPU5DDROogVz1pAdYBaz7HK5n9pSUNs3A==", 991 | "license": "ISC", 992 | "peerDependencies": { 993 | "zod": "^3.24.1" 994 | } 995 | } 996 | } 997 | } 998 | --------------------------------------------------------------------------------