├── .gitignore ├── 1-agent ├── 1-agent.js └── notes.md ├── 2-langgraph ├── 1-agent.ts └── notes.md ├── 3-search ├── 1-regular-search.js ├── 2-agent-search.js └── notes.md ├── 4-persistance-streaming ├── 1-events.ts ├── 2-tokens.ts └── notes.md ├── 5-human ├── 1-interrupt.ts ├── 2-modify.ts ├── 3-timetravel.ts ├── 4-practice.ts └── notes.md ├── 6-writer ├── 1-writer.ts └── notes.md ├── 7-resources └── notes.md ├── README.md ├── package-lock.json └── package.json /.gitignore: -------------------------------------------------------------------------------- 1 | # Logs 2 | logs 3 | *.log 4 | npm-debug.log* 5 | yarn-debug.log* 6 | yarn-error.log* 7 | lerna-debug.log* 8 | .pnpm-debug.log* 9 | 10 | # Diagnostic reports (https://nodejs.org/api/report.html) 11 | report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json 12 | 13 | # Runtime data 14 | pids 15 | *.pid 16 | *.seed 17 | *.pid.lock 18 | 19 | # Directory for instrumented libs generated by jscoverage/JSCover 20 | lib-cov 21 | 22 | # Coverage directory used by tools like istanbul 23 | coverage 24 | *.lcov 25 | 26 | # nyc test coverage 27 | .nyc_output 28 | 29 | # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) 30 | .grunt 31 | 32 | # Bower dependency directory (https://bower.io/) 33 | bower_components 34 | 35 | # node-waf configuration 36 | .lock-wscript 37 | 38 | # Compiled binary addons (https://nodejs.org/api/addons.html) 39 | build/Release 40 | 41 | # Dependency directories 42 | node_modules/ 43 | jspm_packages/ 44 | 45 | # Snowpack dependency directory (https://snowpack.dev/) 46 | web_modules/ 47 | 48 | # TypeScript cache 49 | *.tsbuildinfo 50 | 51 | # Optional npm cache directory 52 | .npm 53 | 54 | # Optional eslint cache 55 | .eslintcache 56 | 57 | # Optional stylelint cache 58 | .stylelintcache 59 | 60 | # Microbundle cache 61 | .rpt2_cache/ 62 | .rts2_cache_cjs/ 63 | .rts2_cache_es/ 64 | .rts2_cache_umd/ 65 | 66 | # Optional REPL history 67 | .node_repl_history 68 | 69 | # Output of 'npm pack' 70 | *.tgz 71 | 72 | # Yarn Integrity file 73 | .yarn-integrity 74 | 75 | # dotenv environment variable files 76 | .env 77 | .env.development.local 78 | .env.test.local 79 | .env.production.local 80 | .env.local 81 | 82 | # parcel-bundler cache (https://parceljs.org/) 83 | .cache 84 | .parcel-cache 85 | 86 | # Next.js build output 87 | .next 88 | out 89 | 90 | # Nuxt.js build / generate output 91 | .nuxt 92 | dist 93 | 94 | # Gatsby files 95 | .cache/ 96 | # Comment in the public line in if your project uses Gatsby and not Next.js 97 | # https://nextjs.org/blog/next-9-1#public-directory-support 98 | # public 99 | 100 | # vuepress build output 101 | .vuepress/dist 102 | 103 | # vuepress v2.x temp and cache directory 104 | .temp 105 | .cache 106 | 107 | # Docusaurus cache and generated files 108 | .docusaurus 109 | 110 | # Serverless directories 111 | .serverless/ 112 | 113 | # FuseBox cache 114 | .fusebox/ 115 | 116 | # DynamoDB Local files 117 | .dynamodb/ 118 | 119 | # TernJS port file 120 | .tern-port 121 | 122 | # Stores VSCode versions used for testing VSCode extensions 123 | .vscode-test 124 | 125 | # yarn v2 126 | .yarn/cache 127 | .yarn/unplugged 128 | .yarn/build-state.yml 129 | .yarn/install-state.gz 130 | .pnp.* 131 | -------------------------------------------------------------------------------- /1-agent/1-agent.js: -------------------------------------------------------------------------------- 1 | import OpenAI from 'openai'; 2 | 3 | // Based on https://til.simonwillison.net/llms/python-react-pattern 4 | 5 | // Set up an OpenAI model (Note the key is in our ENV) 6 | const client = new OpenAI(); 7 | 8 | const chatCompletion = await client.chat.completions.create({ 9 | model: 'gpt-3.5-turbo', 10 | messages: [{role: 'user', content: 'Hello world'}] 11 | }); 12 | 13 | 14 | // Check we are working ok 15 | console.log(chatCompletion.choices[0].message.content); 16 | 17 | 18 | // We will be reusing this block so leave it uncommented! 19 | class Agent { 20 | #system = null; 21 | #messages = []; 22 | 23 | constructor(system) { 24 | this.#system = system; 25 | 26 | if (this.#system) { 27 | this.#messages.push({role: 'system', content: system}); 28 | } 29 | } 30 | 31 | async call(message) { 32 | this.#messages.push({role: 'user', content: message}); 33 | const result = await this.#execute(); 34 | this.#messages.push({role: 'assistant', content: result}); 35 | return result; 36 | } 37 | 38 | async getMessages() { 39 | return this.#messages; 40 | } 41 | 42 | async #execute() { 43 | const completion = await client.chat.completions.create({ 44 | model: 'gpt-4o', 45 | temperature: 0, 46 | messages: this.#messages 47 | }); 48 | 49 | return completion.choices[0].message.content; 50 | } 51 | } 52 | 53 | const prompt = ` 54 | You run in a loop of Thought, Action, PAUSE, Observation. 55 | At the end of the loop you output an Answer 56 | Use Thought to describe your thoughts about the question you have been asked. 57 | Use Action to run one of the actions available to you - then return PAUSE. 58 | Observation will be the result of running those actions. 59 | 60 | Your available actions are: 61 | 62 | calculate: 63 | e.g. calculate: 4 * 7 / 3 64 | Runs a calculation and returns the number 65 | 66 | averageDogWeight: 67 | e.g. averageDogWeight: Collie 68 | returns average weight of a dog when given the breed 69 | 70 | Example session: 71 | 72 | Question: How much does a Bulldog weigh? 73 | Thought: I should look the dogs weight using average_dog_weight 74 | Action: average_dog_weight: Bulldog 75 | PAUSE 76 | 77 | You will be called again with this: 78 | 79 | Observation: A Bulldog weights 51 lbs 80 | 81 | You then output: 82 | 83 | Answer: A bulldog weights 51 lbs 84 | `.trim(); 85 | 86 | function calculate(what) { 87 | return eval(what); 88 | } 89 | 90 | function averageDogWeight(name) { 91 | let result = 'An average dog weights 50 lbs'; 92 | 93 | switch (name) { 94 | case 'Scottish Terrier': 95 | result = 'A Scottish Terriers average weight is 20 lbs'; 96 | break; 97 | case 'Border Collie': 98 | result = 'A Border Collies average weight is 37 lbs'; 99 | break; 100 | case 'Toy Poodle': 101 | result = 'A Toy Poodles average weight is 7 lbs'; 102 | break; 103 | default: 104 | break; 105 | } 106 | 107 | return result; 108 | } 109 | 110 | const knownActions = { 111 | calculate: calculate, 112 | averageDogWeight: averageDogWeight 113 | } 114 | 115 | 116 | /* 117 | const aBot1 = new Agent(prompt); 118 | 119 | // Let's try our agent out 120 | const result1 = await aBot1.call('How much does a Toy Poodle weigh?'); 121 | console.log(result1); 122 | 123 | // We can test if it's action would work out 124 | const result2 = averageDogWeight('Toy Poodle'); 125 | console.log(result2); 126 | 127 | // Now let's call it again with the reusult 128 | const nextPrompt1 = 'Observation: ' + result2; 129 | const result3 = await aBot1.call(nextPrompt1); 130 | console.log(result3); 131 | 132 | // We can check out the messages 133 | console.log(aBot1.getMessages()); 134 | */ 135 | 136 | /* 137 | // Ok Now let's try a more complex series of prompts 138 | const aBot2 = new Agent(prompt); // Let's reinitialize for a clean set of messages 139 | 140 | const question1 = 'I have 2 dogs, a border collie and a scottish terrier.\nWhat is their combined weight?' 141 | 142 | const result4 = await aBot2.call(question1); 143 | console.log(result4); 144 | 145 | const nextPrompt2 = 'Observation: ' + averageDogWeight('Border Collie'); 146 | const result5 = await aBot2.call(nextPrompt2); 147 | console.log(result5); 148 | 149 | const nextPrompt3 = 'Observation: ' + averageDogWeight('Scottish Terrier'); 150 | const result6 = await aBot2.call(nextPrompt3); 151 | console.log(result6); 152 | 153 | const nextPrompt4 = 'Observation: ' + calculate('37 + 20'); 154 | const result7 = await aBot.call(nextPrompt4); 155 | console.log(result7); 156 | */ 157 | 158 | /* 159 | // Let's try a fully automated version of this 160 | async function query(question, maxTurns = 5) { 161 | const actionRegEx = /^Action: (\w+): (.*)$/ // This will find the action string 162 | const bot = new Agent(prompt); 163 | 164 | let i = 0; 165 | let nextPrompt = question; 166 | 167 | while (i < maxTurns) { 168 | i += 1; 169 | 170 | const result = await bot.call(nextPrompt); 171 | 172 | const actions = result 173 | .split('\n') 174 | .map(a => a.match(actionRegEx)) 175 | .filter(a => a !== null); 176 | 177 | if (actions.length > 0) { 178 | let [_, action, actionInput] = actions[0]; 179 | 180 | if (!(action in knownActions)) { 181 | throw new Error(`Unknown action: ${action}: ${actionInput}`); 182 | } 183 | 184 | console.log(` -- running ${action} ${actionInput}`); 185 | let observation = knownActions[action](actionInput); 186 | console.log('Observation:', observation); 187 | 188 | nextPrompt = `Observation: ${observation}`; 189 | } else { 190 | return; 191 | } 192 | } 193 | } 194 | 195 | const question2 = 'I have 2 dogs, a border collie and a scottish terrier.\nWhat is their combined weight?' 196 | await query(question2); 197 | */ 198 | -------------------------------------------------------------------------------- /1-agent/notes.md: -------------------------------------------------------------------------------- 1 | # Build an agent from scratch 2 | In this lesson we will see this is not too hard, while doing this note what the LLM is being asked to do and what the runtime is doing. 3 | 4 | This agent will be based on the ReAct pattern. 5 | ``` 6 | ReAct (Reason + Act) 7 | ==================== 8 | thought Actions 9 | ------------- ------------- 10 | | | | | 11 | | v | v 12 | Reasoning LLM Env 13 | Traces 14 | ^ | ^ | 15 | | | | | 16 | ------------- ------------- 17 | Observations 18 | ``` 19 | - The LLM thinks about what to do 20 | - It decides an action to take 21 | - It then executes the action in an environment 22 | - This results in an observation 23 | - The observation then informs the next thought 24 | - This iteration continues until the LLM thinks it is done 25 | -------------------------------------------------------------------------------- /2-langgraph/1-agent.ts: -------------------------------------------------------------------------------- 1 | import { StateGraph, Annotation, END } from "@langchain/langgraph"; 2 | import { ChatOpenAI } from '@langchain/openai'; 3 | import { BaseMessage, SystemMessage, HumanMessage } from '@langchain/core/messages'; 4 | import { ToolMessage } from '@langchain/core/messages/tool'; 5 | import { TavilySearchResults } from '@langchain/community/tools/tavily_search'; 6 | import terminalImage from 'terminal-image'; 7 | 8 | const tools = [new TavilySearchResults({maxResults: 4})]; 9 | 10 | // Let's check the tool is set up 11 | console.log(tools[0].constructor.name); 12 | console.log(tools[0].name); 13 | 14 | // First we define the state 15 | const AgentState = Annotation.Root({ 16 | messages: Annotation({ 17 | reducer: (x, y) => x.concat(y), 18 | default: () => [] 19 | }) 20 | }); 21 | 22 | // Now the system prompt 23 | const system = `You are a smart research assistant. Use the search engine to look up information. 24 | You are allowed to make multiple calls (either together or in sequence). 25 | Only look up information when you are sure of what you want. 26 | If you need to look up some information before asking a follow up question, you are allowed to do that!`; 27 | 28 | // Now the model we will use 29 | const model = new ChatOpenAI({model: 'gpt-3.5-turbo'}).bindTools(tools); 30 | 31 | // Here is where we construct the graph 32 | const graph = new StateGraph(AgentState) 33 | .addNode('llm', callOpenAi) 34 | .addNode('action', takeAction) 35 | .addConditionalEdges( 36 | 'llm', 37 | existsAction, 38 | {true: 'action', false: END} 39 | ) 40 | .addEdge('action','llm') 41 | .setEntryPoint('llm') 42 | .compile(); 43 | 44 | // We can visualise the graph 45 | 46 | const graphImg = await graph.getGraph().drawMermaidPng(); 47 | const graphImgBuffer = await graphImg.arrayBuffer(); 48 | console.log(await terminalImage.buffer(new Uint8Array(graphImgBuffer))); 49 | 50 | 51 | /* 52 | // Now lets call the agent with a question 53 | const messages1 = [new HumanMessage('What is the weather in sf?')]; 54 | 55 | const result1 = await graph.invoke({messages: messages1}); 56 | console.log('Final result: ' + JSON.stringify(result1)); 57 | console.log('\nResult: ' + result1.messages[result1.messages.length-1].content); 58 | */ 59 | 60 | /* 61 | // Let's try a more complex question 62 | const messages2 = [new HumanMessage('What is the weather in SF and LA?')]; 63 | 64 | const result2 = await graph.invoke({messages: messages2}); 65 | console.log('Result: ' + result2.messages[result2.messages.length-1].content); 66 | */ 67 | 68 | /* 69 | // Let's try a complex question where there is a demendency between the question results 70 | const messages3 = [new HumanMessage('Who won the super bowl in 2024? ' + 71 | 'In what state is the winning team headquarters located? ' + 72 | 'What is the GDP of that state? Answer each question.')]; 73 | 74 | const result3 = await graph.invoke({messages: messages3}); 75 | console.log('Result: ' + result3.messages[result3.messages.length-1].content); 76 | */ 77 | 78 | 79 | 80 | ///////// FUNCTIONS USED BY THE GRAPH ////////// 81 | // This is the function for the conditional edge 82 | function existsAction(state) { 83 | const result = state.messages[state.messages.length-1]; 84 | 85 | return result.tool_calls.length > 0; 86 | } 87 | 88 | // This is the function for the llm node 89 | async function callOpenAi(state) { 90 | let messages = state.messages; 91 | 92 | if (system) { 93 | messages = [new SystemMessage(system), ...messages]; 94 | } 95 | 96 | const message = await model.invoke(messages); 97 | return {messages: [message]}; 98 | } 99 | 100 | // This is the function for the action node 101 | async function takeAction(state) { 102 | const results = []; 103 | const toolCalls = state.messages[state.messages.length-1].tool_calls; 104 | let result = null; 105 | 106 | for(const t of toolCalls) { 107 | console.log('Calling: ' + JSON.stringify(t)); 108 | if (tools.some(tool => tool.name === t.name)) { 109 | const tool = tools.find(tool => tool.name === t.name); 110 | result = await tool.invoke(t.args); 111 | } else { 112 | console.log('\n...bad tool name...'); 113 | result = 'bad tool name, retry'; 114 | } 115 | results.push(new ToolMessage({tool_call_id: t.id, name: t.name, content: result.toString()})); 116 | } 117 | 118 | console.log('Back to the model!'); 119 | return {messages: results}; 120 | } 121 | -------------------------------------------------------------------------------- /2-langgraph/notes.md: -------------------------------------------------------------------------------- 1 | # Langraph Components 2 | In this lesson we are going to look at how what we did in the last session can be replicated in `LangGraph` components. Let's remind ourselves of what we did previously in code. 3 | 4 | ## Break Down 5 | ``` 6 | SYSTEM: You run in a loop of Thought, 7 | ------------------------- Action, PAUSE, Observation 8 | User | What is the ... Poodle? | ... 9 | ------------------------- Your available actions are... 10 | | 11 | v calculate: e.g. 4*7/3 12 | ------------------------- ... 13 | Prompt | System: You are a ... | Example session: 14 | ------------------------- User: ...weight of collie... 15 | | What is the ... Poodle? | 16 | ------------------------- 17 | | Obs: 37 |<---- 18 | ------------------------- | 19 | | | 20 | | | 21 | v | 22 | ------------------------ | 23 | LLM | LLM | | Thought: To find the combined weights 24 | ------------------------ | of a collie and a poodle I 25 | | | first need the average weight 26 | v | of a collie. 27 | ------------------------- | 28 | | Action: call a tool | | Action: averageDogWeight Toy Poodle\n 29 | ------------------------- | PAUSE 30 | | | 31 | v | 32 | / \ | 33 | < > | 34 | \ / | 35 | / V \ | 36 | / \ | 37 | / \ | 38 | return -------- | 39 | Tool | Action | | 40 | -------- | 41 | | | 42 | v | 43 | -------- | 44 | Observation | 37 |________| 45 | -------- 46 | ``` 47 | This was encoded in our `query` function: 48 | ```javascript 49 | async function query(question, maxTurns = 5) { 50 | const actionRegEx = /^Action: (\w+): (.*)$/ // This will find the action string 51 | const bot = new Agent(prompt); 52 | 53 | let i = 0; 54 | let nextPrompt = question; 55 | 56 | while (i < maxTurns) { 57 | i += 1; 58 | 59 | const result = await bot.call(nextPrompt); 60 | 61 | const actions = result 62 | .split('\n') 63 | .map(a => a.match(actionRegEx)) 64 | .filter(a => a !== null); 65 | 66 | if (actions.length > 0) { 67 | let [_, action, actionInput] = actions[0]; 68 | 69 | if (!(action in knownActions)) { 70 | throw new Error(`Unknown action: ${action}: ${actionInput}`); 71 | } 72 | 73 | console.log(` -- running ${action} ${actionInput}`); 74 | let observation = knownActions[action](actionInput); 75 | console.log('Observation:', observation); 76 | 77 | nextPrompt = `Observation: ${observation}`; 78 | } else { 79 | return; 80 | } 81 | } 82 | } 83 | ``` 84 | And in the tools we provided: 85 | ```javascript 86 | function calculate(what) { 87 | return eval(what); 88 | } 89 | 90 | function averageDogWeight(name) { 91 | let result = 'An average dog weights 50 lbs'; 92 | 93 | switch (name) { 94 | case 'Scottish Terrier': 95 | result = 'A Scottish Terriers average weight is 20 lbs'; 96 | break; 97 | case 'Border Collie': 98 | result = 'A Border Collies average weight is 37 lbs'; 99 | break; 100 | case 'Toy Poodle': 101 | result = 'A Toy Poodles average weight is 7 lbs'; 102 | break; 103 | default: 104 | break; 105 | } 106 | 107 | return result; 108 | } 109 | ``` 110 | But what does all of this look like in terms of `LangChain` components? 111 | ## LangChain components 112 | ### LangChain: Prompts 113 | Prompt templates allow for reusable prompts: 114 | ```javascript 115 | import { ChatPromptTemplate } from '@langchain/core/prompts'; 116 | 117 | const promptTemplate = ChatPromptTemplate.fromTemplate( 118 | 'Tell me a {adjective} joke about {content}.' 119 | ); 120 | ``` 121 | There are also prompts for agents in the hub: 122 | ```javascript 123 | import { pull } from 'langchain/hub'; 124 | 125 | const prompt = await pull('hwchase17/react'); 126 | // https://smith.langchain.com/hub/hwchase17/react 127 | ``` 128 | ### LangChain: Tools 129 | There are various tools we can use directly from LangChain, for example the Tavily tool which we will use later: 130 | ```javascript 131 | // get a tool from the library 132 | import { TavilySearchResults } from '@langchain/community/tools/tavily_search'; 133 | 134 | const tool = new TavilySearchResults({ 135 | maxResults: 2 136 | }); 137 | 138 | tools = [..., tool]; 139 | const functions = tools.map(convertToOpenAIFunction); 140 | model = model.bind({functions: functions}); 141 | ``` 142 | ## New in LangGraph 143 | In the example we did in the previous lesson the majority of the code written was in the loop itself. LangGraph lets us describe and control that flow, especially loops, it brings: 144 | - Cyclic graphs 145 | - Persistance 146 | - Human-in-the-loop 147 | ### Graphs 148 | If you look at all of the approaches described in the [introduction](../README.md) they are all graphs so LangGraph allows for the creation of these. 149 | - LangGraph extends LangChain to support graphs. 150 | - Single and Multi-agent flows can be described and represented as graphs. 151 | - Allows for extremely controlled flows. 152 | - Built in-persistence allows for human-in-the-loop workflows. 153 | Some of the key features that LangGraph supports are familiar from any directed graph modelling, it allows for: 154 | ``` 155 | --- --- 156 | | | | | Nodes: Agents or functions 157 | --- --- 158 | 159 | ---------> Edges: connecting Nodes 160 | 161 | ^ 162 | / \---> 163 | ---->< > Conditional edges: decisions 164 | \ /---> 165 | V 166 | ``` 167 | So to create our previous code in LangGraph we simply need the following set up: 168 | 169 | ``` 170 | | Entrypoint: starting node 171 | v 172 | --- 173 | Agent node | |<------- 174 | --- | 175 | | | 176 | v | Edge 177 | / \ | 178 | Conditional edge < > | 179 | \ / | 180 | / V \ action | 181 | / \ | 182 | --- --- | 183 | End node | X | | |----- Function node 184 | --- --- 185 | ``` 186 | ### Data / State 187 | ``` 188 | ----------------------------------------- 189 | | Agent State | 190 | | ------------------------- | 191 | | | System: You are a ... | Previous | 192 | | ------------------------- graph | 193 | | | What is the ... Poodle? | goes | 194 | | ------------------------- here | 195 | | | Obs: 37 | | 196 | | ------------------------- | 197 | ----------------------------------------- 198 | ``` 199 | - Agent state is accessible to all parts of the graph 200 | - It is local to the graph 201 | - Can be stored in a persistence layer 202 | A simple of example of this is: 203 | ```typescript 204 | const AgentState = Annotation.Root({ 205 | messages: Annotation({ 206 | reducer: (x, y) => x.concat(y), 207 | default: () => [] 208 | }) 209 | }) 210 | ``` 211 | This means our `AgentState` contains a variable `messages` which is an array of `BaseMessages`. The `reducer` means when we get a new message we add the message to the array. 212 | 213 | A more complex example is: 214 | ```typescript 215 | const AgentState = Annotation.Root({ 216 | input: : Annotation, 217 | chatHistory: Annotation, 218 | agentOutcome: Annotation<[{,,}]>, 219 | internediateSteps: Annotation<[{,}]>({ 220 | reducer: (x, y) => x.concat(y), 221 | default: () => [] 222 | }), 223 | }) 224 | ``` 225 | In the complex example the first three variables will be overwritten by any updates where as the last variable adds steps as we go. 226 | ## TypeScript Code 227 | **Note:** as this is now using typing of variable we need to use `TypeScript` with LangGraph not just `JavaScript`. In order to run our code we can no longer use `node` directly on the command line but we need to use the `tsx` module. We also use the `*.ts` extension on our files to indicate they contain `TypeScript` not `JavaScript`. For example, to run a simple file called test we would need to use the command: 228 | ``` 229 | npx tsx test.ts 230 | ``` 231 | 232 | **Dependancies:** As this uses the `tsx` we also need to install that as well as the dependancies for `@langchain.community` and `@langchain.langgraph`. These are already in the `package.json` file. Also, to use the Tavily API you need to register an account with Tavily, [here](https://tavily.com/), to obtain an API key. Once you have this key export it to your local environment as: `TAVILY_API_KEY`. 233 | -------------------------------------------------------------------------------- /3-search/1-regular-search.js: -------------------------------------------------------------------------------- 1 | import { tavily } from '@tavily/core'; 2 | import ddg from 'duckduckgo-search'; 3 | import { convert } from 'html-to-text'; 4 | 5 | const client = tavily({apiKey: process.env.TAVILY_API_KEY}); 6 | 7 | 8 | // This is just done to check were working with Tavily, the rest of the code is a regular search 9 | const result1 = await client.search( 10 | 'What is in Nvidia\'s new Blackwell GPU?', 11 | {includeAnswer: true} 12 | ); 13 | 14 | console.log(result1.answer); 15 | 16 | /* 17 | // Now let's try a regular search 18 | const city = 'San Jose, CA'; 19 | 20 | const query = `What is the current weather in ${city}? 21 | Should I travel there today? 22 | "weather.com"` 23 | 24 | const urlList = await search(query); 25 | const url = urlList[0]; 26 | const html = await scrapeWeatherInfo(url); 27 | 28 | // Here's the raw result 29 | console.log('Website: ' + url + '\n\n'); 30 | console.log(result2.slice(0,50000)); 31 | */ 32 | 33 | /* 34 | // Now let's atte mpt to clean up the result 35 | const weatherData = [] 36 | const text = convert(html, { 37 | baseElements: {selectors: ['h1','h2','h3','p']}, 38 | preserveNewLines: false, 39 | selectors: [ 40 | { selector: 'h1', options: { leadingLineBreaks: 0, trailingLineBreaks: 0, trimEmptyLines: true } }, 41 | { selector: 'h2', options: { leadingLineBreaks: 0, trailingLineBreaks: 0, trimEmptyLines: true } }, 42 | { selector: 'h3', options: { leadingLineBreaks: 0, trailingLineBreaks: 0, trimEmptyLines: true } }, 43 | { selector: 'p', options: { leadingLineBreaks: 0, trailingLineBreaks: 0, trimEmptyLines: true } }, 44 | ] 45 | }); 46 | const result3 = text.replace(/\r?\n|\r/g, ' '); 47 | 48 | console.log('Website: ' + url + '\n\n'); 49 | console.log(result3); 50 | */ 51 | 52 | ///////// FUNCTIONS USED TO MANUALLY SEARCH ////////// 53 | async function search(query, maxResults=6) { 54 | try { 55 | const ddgResults = []; 56 | 57 | for await (const result of ddg.text(query)) { 58 | ddgResults.push(result); 59 | } 60 | 61 | const results = ddgResults.map(item => item.href); 62 | return results.length > maxResults ? results.slice(0, maxResults): results; 63 | } catch (e) { 64 | console.log('Returning SF results due to exception: ' + e.message); 65 | const results = [ 66 | 'https://weather.com/weather/today/l/USCA0987:1:US', 67 | 'https://weather.com/weather/hourbyhour/l/54f9d8baac32496f6b5497b4bf7a277c3e2e6cc5625de69680e6169e7e38e9a8' 68 | ]; 69 | 70 | return results; 71 | } 72 | } 73 | 74 | async function scrapeWeatherInfo(url) { 75 | if (url) { 76 | const headers = {'User-Agent': 'Mozilla/5.0'}; 77 | const response = await fetch(url, headers); 78 | const html = await response.text(); 79 | 80 | if (response.status !== 200) { 81 | return 'Failed to retrieve the webpage.'; 82 | } else { 83 | return html; 84 | } 85 | } else { 86 | return 'Weather information could not be found.'; 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /3-search/2-agent-search.js: -------------------------------------------------------------------------------- 1 | import { tavily } from '@tavily/core'; 2 | import ddg from 'duckduckgo-search'; 3 | import { convert } from 'html-to-text'; 4 | 5 | const client = tavily({apiKey: process.env.TAVILY_API_KEY}); 6 | 7 | // Here is the same quesry from our regular search 8 | const city = 'San Jose, CA'; 9 | 10 | const query = `What is the current weather in ${city}? 11 | Should I travel there today? 12 | "weather.com"` 13 | 14 | 15 | const result = await client.search(query, {maxResults: 1}); 16 | 17 | console.log(result.results[0].content); 18 | 19 | const json = result.results[0].content; 20 | const parsedJson = JSON.parse(json.replaceAll('\'', '\"')); 21 | 22 | console.log(parsedJson); 23 | -------------------------------------------------------------------------------- /3-search/notes.md: -------------------------------------------------------------------------------- 1 | # Agentic Search 2 | 3 | To try this section we needed to install the Tavily JS API and ensure the API key is set in the environment. 4 | ``` 5 | npm i @tavily/core 6 | ``` 7 | ## Why Search Tool 8 | ``` 9 | -------- ------- ------- 10 | | Prompt |--->| Agent |--->| Agent | 11 | -------- ------- ------- 12 | | ^ 13 | Search query | | RAG content 14 | v | 15 | -------- | 16 | | Search |------- 17 | | Tool | 18 | -------- 19 | ``` 20 | ## Inside the search tool 21 | ``` 22 | -------- -------- 23 | | Source | | Source | 24 | -------- -------- 25 | ^ ^ 26 | | | 27 | | | 28 | ------- ------------- ---------- ----------- -------------- 29 | | Query |--->| Sub-queries |----->| Retrieve |----->| Scoring & |----->| Return top-k | 30 | ------- ------------- | ---------- | filtering | | documents | 31 | ------------- | | | ----------- -------------- 32 | ------------- | | 33 | v v 34 | -------- -------- 35 | | Source | | Source | 36 | -------- -------- 37 | /\ 38 | / \ 39 | ----------------------------------------------------------- 40 | | Source | 41 | | Chunked source | 42 | | -------------- | 43 | | | xxxxxxxxxxxx | Top-k chunks | 44 | | -------------- -------------- | 45 | | | xxxxxxxxxxxx | | xxxxxxxxxxxx | | 46 | | ----------- -------------- -------------- | 47 | | | Sub-query |----->| |----->| xxxxxxxxxxxx | | 48 | | ----------- -------------- -------------- | 49 | | | xxxxxxxxxxxx | | xxxxxxxxxxxx | | 50 | | -------------- -------------- | 51 | | | | | 52 | | -------------- | 53 | | | 54 | ----------------------------------------------------------- 55 | ``` 56 | -------------------------------------------------------------------------------- /4-persistance-streaming/1-events.ts: -------------------------------------------------------------------------------- 1 | import { StateGraph, Annotation, END } from '@langchain/langgraph'; 2 | import { ChatOpenAI } from '@langchain/openai'; 3 | import { BaseMessage, SystemMessage, HumanMessage } from '@langchain/core/messages'; 4 | import { ToolMessage } from '@langchain/core/messages/tool' 5 | import { SqliteSaver } from '@langchain/langgraph-checkpoint-sqlite'; 6 | import { TavilySearchResults } from '@langchain/community/tools/tavily_search'; 7 | 8 | const tools = [new TavilySearchResults({maxResults: 2})]; 9 | 10 | // First we define the state 11 | const AgentState = Annotation.Root({ 12 | messages: Annotation({ 13 | reducer: (x, y) => x.concat(y), 14 | default: () => [] 15 | }) 16 | }); 17 | 18 | // Now define the memory for persistance 19 | const memory = SqliteSaver.fromConnString(':memory:'); 20 | 21 | // Now the system prompt 22 | const system = `You are a smart research assistant. Use the search engine to look up information. 23 | You are allowed to make multiple calls (either together or in sequence). 24 | Only look up information when you are sure of what you want. 25 | If you need to look up some information before asking a follow up question, you are allowed to do that!`; 26 | 27 | // Now the model we will use 28 | const model = new ChatOpenAI({model: 'gpt-3.5-turbo'}).bindTools(tools); 29 | 30 | // Here is where we construct the graph 31 | const graph = new StateGraph(AgentState) 32 | .addNode('llm', callOpenAi) 33 | .addNode('action', takeAction) 34 | .addConditionalEdges( 35 | 'llm', 36 | existsAction, 37 | {true: 'action', false: END} 38 | ) 39 | .addEdge('action','llm') 40 | .setEntryPoint('llm') 41 | .compile({checkpointer: memory}); 42 | 43 | 44 | // Now lets call the agent with a question 45 | const messages1 = [new HumanMessage('What is the weather in sf?')]; 46 | 47 | const config1 = { 48 | streamMode: 'updates', // Specifies we want to see the internal events 49 | configurable: {thread_id: '1'} // Used by memory to keep the converstion going 50 | }; 51 | 52 | for await (const event of await graph.stream({messages: messages1}, config1)) { 53 | for (const [node, values] of Object.entries(event)) { 54 | console.log(values.messages); 55 | } 56 | } 57 | 58 | 59 | // Let's check this is a converstion, this should know we're asking about the weather 60 | const messages2 = [new HumanMessage('What about in LA?')]; 61 | 62 | for await (const event of await graph.stream({messages: messages2}, config1)) { 63 | for (const [node, values] of Object.entries(event)) { 64 | console.log(values.messages); 65 | } 66 | } 67 | 68 | 69 | 70 | // This should show we can access the context 71 | const messages3 = [new HumanMessage('Which one is warmer?')]; 72 | 73 | for await (const event of await graph.stream({messages: messages3}, config1)) { 74 | for (const [node, values] of Object.entries(event)) { 75 | console.log(values.messages); 76 | } 77 | } 78 | 79 | 80 | 81 | // Here we can see the importance of the thread_id in maintining that context 82 | const config2 = { 83 | streamMode: 'updates', 84 | configurable: {thread_id: '2'} 85 | }; 86 | for await (const event of await graph.stream({messages: messages3}, config2)) { 87 | for (const [node, values] of Object.entries(event)) { 88 | console.log(values.messages); 89 | } 90 | } 91 | 92 | 93 | 94 | ///////// FUNCTIONS USED BY THE GRAPH ////////// 95 | // This is the function for the conditional edge 96 | function existsAction(state) { 97 | const result = state.messages[state.messages.length-1]; 98 | 99 | return result.tool_calls.length > 0; 100 | } 101 | 102 | // This is the function for the llm node 103 | async function callOpenAi(state) { 104 | let messages = state.messages; 105 | 106 | if (system) { 107 | messages = [new SystemMessage(system), ...messages]; 108 | } 109 | 110 | const message = await model.invoke(messages); 111 | return {messages: [message]}; 112 | } 113 | 114 | // This is the function for the action node 115 | async function takeAction(state) { 116 | const results = []; 117 | const toolCalls = state.messages[state.messages.length-1].tool_calls; 118 | let result = null; 119 | 120 | for(const t of toolCalls) { 121 | console.log('Calling: ' + JSON.stringify(t)); 122 | if (tools.some(tool => tool.name === t.name)) { 123 | const tool = tools.find(tool => tool.name === t.name); 124 | result = await tool.invoke(t.args); 125 | } else { 126 | console.log('\n...bad tool name...'); 127 | result = 'bad tool name, retry'; 128 | } 129 | results.push(new ToolMessage({tool_call_id: t.id, name: t.name, content: result.toString()})); 130 | } 131 | console.log('Back to the model!'); 132 | return {messages: results}; 133 | } 134 | -------------------------------------------------------------------------------- /4-persistance-streaming/2-tokens.ts: -------------------------------------------------------------------------------- 1 | import { StateGraph, Annotation, END } from '@langchain/langgraph'; 2 | import { ChatOpenAI } from '@langchain/openai'; 3 | import { BaseMessage, SystemMessage, HumanMessage, isAIMessageChunk } from '@langchain/core/messages'; 4 | import { ToolMessage } from '@langchain/core/messages/tool' 5 | import { SqliteSaver } from '@langchain/langgraph-checkpoint-sqlite'; 6 | import { TavilySearchResults } from '@langchain/community/tools/tavily_search'; 7 | 8 | const tools = [new TavilySearchResults({maxResults: 2})]; 9 | 10 | // First we define the state 11 | const AgentState = Annotation.Root({ 12 | messages: Annotation({ 13 | reducer: (x, y) => x.concat(y), 14 | default: () => [] 15 | }) 16 | }); 17 | 18 | // Now define the memory for persistance 19 | const memory = SqliteSaver.fromConnString(':memory:'); 20 | 21 | // Now the system prompt 22 | const system = `You are a smart research assistant. Use the search engine to look up information. 23 | You are allowed to make multiple calls (either together or in sequence). 24 | Only look up information when you are sure of what you want. 25 | If you need to look up some information before asking a follow up question, you are allowed to do that!`; 26 | 27 | // Now the model we will use 28 | const model = new ChatOpenAI({ 29 | model: 'gpt-3.5-turbo', 30 | temperature: 0, 31 | streaming: true 32 | }).bindTools(tools); 33 | 34 | // Here is where we construct the graph 35 | const graph = new StateGraph(AgentState) 36 | .addNode('llm', callOpenAi) 37 | .addNode('action', takeAction) 38 | .addConditionalEdges( 39 | 'llm', 40 | existsAction, 41 | {true: 'action', false: END} 42 | ) 43 | .addEdge('action','llm') 44 | .setEntryPoint('llm') 45 | .compile({checkpointer: memory}); 46 | 47 | 48 | // Now lets call the agent with a question 49 | const messages1 = [new HumanMessage('What is the weather in sf?')]; 50 | 51 | const config1 = { 52 | streamMode: 'messages', // Specifies we want to see the LLM tokens 53 | configurable: {thread_id: '4'}, // Used by memory to keep the converstion going 54 | version: 'v2' 55 | }; 56 | 57 | const stream = await graph.stream({messages: messages1}, config1); 58 | 59 | for await (const [message, _metadata] of stream) { 60 | if (isAIMessageChunk(message)) { 61 | if (message.content) { process.stdout.write(message.content + '|'); } 62 | } 63 | } 64 | 65 | 66 | 67 | ///////// FUNCTIONS USED BY THE GRAPH ////////// 68 | // This is the function for the conditional edge 69 | function existsAction(state) { 70 | const result = state.messages[state.messages.length-1]; 71 | 72 | return result.tool_calls.length > 0; 73 | } 74 | 75 | // This is the function for the llm node 76 | async function callOpenAi(state) { 77 | let messages = state.messages; 78 | 79 | if (system) { 80 | messages = [new SystemMessage(system), ...messages]; 81 | } 82 | 83 | const message = await model.invoke(messages); 84 | return {messages: [message]}; 85 | } 86 | 87 | // This is the function for the action node 88 | async function takeAction(state) { 89 | const results = []; 90 | const toolCalls = state.messages[state.messages.length-1].tool_calls; 91 | let result = null; 92 | 93 | for(const t of toolCalls) { 94 | console.log('Calling: ' + JSON.stringify(t)); 95 | if (tools.some(tool => tool.name === t.name)) { 96 | const tool = tools.find(tool => tool.name === t.name); 97 | result = await tool.invoke(t.args); 98 | } else { 99 | console.log('\n...bad tool name...'); 100 | result = 'bad tool name, retry'; 101 | } 102 | results.push(new ToolMessage({tool_call_id: t.id, name: t.name, content: result.toString()})); 103 | } 104 | console.log('Back to the model!'); 105 | return {messages: results}; 106 | } 107 | -------------------------------------------------------------------------------- /4-persistance-streaming/notes.md: -------------------------------------------------------------------------------- 1 | # Persistence and Streaming 2 | These are especially useful for long running processes where we may need to stop, and pause, then resume at a later date. Streaming also allows us to receive a series of signals so were are aware of what the agent is up to at all times. 3 | -------------------------------------------------------------------------------- /5-human/1-interrupt.ts: -------------------------------------------------------------------------------- 1 | import { StateGraph, Annotation, END } from '@langchain/langgraph'; 2 | import { ChatOpenAI } from '@langchain/openai'; 3 | import { MessageUnion, SystemMessage, HumanMessage, AIMessage } from '@langchain/core/messages'; 4 | import { ToolMessage } from '@langchain/core/messages/tool' 5 | import { SqliteSaver } from '@langchain/langgraph-checkpoint-sqlite'; 6 | import { TavilySearchResults } from '@langchain/community/tools/tavily_search'; 7 | import { v4 as uuid4 } from 'uuid'; 8 | import confirm from '@inquirer/confirm'; 9 | 10 | // Here we set up the parts we need for the agent 11 | 12 | // First define the memory for persistance 13 | const memory = SqliteSaver.fromConnString(':memory:'); 14 | 15 | 16 | // Set up the tools 17 | const tools = [new TavilySearchResults({maxResults: 2})]; 18 | 19 | // First we define the state 20 | const AgentState = Annotation.Root({ 21 | messages: Annotation({ 22 | reducer: (left, right) => reduceMessages(left, right), // Now we use the function 23 | default: () => [] 24 | }) 25 | }); 26 | 27 | // Now the system prompt 28 | const system = `You are a smart research assistant. Use the search engine to look up information. 29 | You are allowed to make multiple calls (either together or in sequence). 30 | Only look up information when you are sure of what you want. 31 | If you need to look up some information before asking a follow up question, you are allowed to do that!`; 32 | 33 | // Now the model we will use 34 | const model = new ChatOpenAI({ 35 | model: 'gpt-3.5-turbo', 36 | }).bindTools(tools); 37 | 38 | 39 | // Now we can construct our agent graph 40 | const graph = new StateGraph(AgentState) 41 | .addNode('llm', callOpenAi) 42 | .addNode('action', takeAction) 43 | .addConditionalEdges( 44 | 'llm', 45 | existsAction, 46 | {true: 'action', false: END} 47 | ) 48 | .addEdge('action','llm') 49 | .setEntryPoint('llm') 50 | .compile({ 51 | checkpointer: memory, 52 | interruptBefore: ['action'] 53 | }); 54 | 55 | ///// Human approval 56 | 57 | // Now lets call the agent with a question 58 | const messages1 = [new HumanMessage('What is the weather in sf?')]; 59 | 60 | const config1 = { 61 | configurable: {thread_id: '1'} // Used by memory to keep the converstion going 62 | }; 63 | 64 | // Start the agent (we will stop before calling the tool) 65 | for await (const event of await graph.stream({messages: messages1}, config1)) { 66 | for (const [node, values] of Object.entries(event)) { 67 | console.log(values.messages); 68 | } 69 | } 70 | 71 | // Let's examine the state of the graph at the interrunpted point 72 | const state = await graph.getState(config1); 73 | console.log(state); 74 | console.log(state.next); 75 | 76 | 77 | ///// Continue after interrupt 78 | /* 79 | // Now we can let it complete by passing in null as a message, the threat_id in the config 80 | // associates this with the state of the graph and lets it continue 81 | for await (const event of await graph.stream(null, config1)) { 82 | for (const [node, values] of Object.entries(event)) { 83 | console.log(values); 84 | } 85 | } 86 | */ 87 | 88 | /* 89 | // Now let's rerun this and allow for decisions to be made 90 | const messages2 = [new HumanMessage('What is the weather in LA?')]; 91 | 92 | const config2 = { 93 | configurable: {thread_id: '2'} // We use a new thread 94 | }; 95 | 96 | for await (const event of await graph.stream({messages: messages2}, config2)) { 97 | for (const [node, values] of Object.entries(event)) { 98 | console.log(values.messages); 99 | } 100 | } 101 | 102 | 103 | let state = await graph.getState(config2); 104 | 105 | while (state.next != '') { 106 | console.log('\n' + state.next + '\n'); 107 | 108 | if (await confirm({ message: 'proceed?' })) { 109 | for await (const event of await graph.stream(null, config2)) { 110 | for (const [node, values] of Object.entries(event)) { 111 | console.log(values); 112 | } 113 | } 114 | } else { 115 | console.log('aborting.'); 116 | break; 117 | } 118 | state = await graph.getState(config2); 119 | } 120 | */ 121 | 122 | 123 | 124 | ///////// FUNCTIONS USED BY THE GRAPH ////////// 125 | 126 | // This is the function for the conditional edge 127 | function existsAction(state) { 128 | const result = state.messages[state.messages.length-1]; 129 | 130 | return result.tool_calls ? result.tool_calls.length > 0 : false; 131 | } 132 | 133 | // This is the function for the llm node 134 | async function callOpenAi(state) { 135 | let messages = state.messages; 136 | 137 | if (system) { 138 | messages = [new SystemMessage(system), ...messages]; 139 | } 140 | 141 | const message = await model.invoke(messages); 142 | return {messages: [message]}; 143 | } 144 | 145 | // This is the function for the action node 146 | async function takeAction(state) { 147 | const results = []; 148 | const toolCalls = state.messages[state.messages.length-1].tool_calls; 149 | let result = null; 150 | 151 | for(const t of toolCalls) { 152 | console.log('Calling: ' + JSON.stringify(t)); 153 | if (tools.some(tool => tool.name === t.name)) { 154 | const tool = tools.find(tool => tool.name === t.name); 155 | result = await tool.invoke(t.args); 156 | } else { 157 | console.log('\n...bad tool name...'); 158 | result = 'bad tool name, retry'; 159 | } 160 | results.push(new ToolMessage({tool_call_id: t.id, name: t.name, content: result.toString()})); 161 | } 162 | console.log('Back to the model!'); 163 | return {messages: results}; 164 | } 165 | 166 | // This function is used by the graph state to update the messages list 167 | // This lets us update previous messages based on id (or we add them) 168 | function reduceMessages(left: MessageUnion[], right: MessageUnion[]):MessaggeUnion[] { 169 | // Ensure any new messages have an id 170 | for (const message of right) { 171 | if (message.id === null || message.id === undefined) { 172 | message.id = uuid4(); 173 | message.lc_kwargs.id = message.id; 174 | } 175 | } 176 | 177 | // Copy the current set of messages 178 | const merged = [...left]; 179 | 180 | // Now check to see if the new message exists in the curren set of messages 181 | // if it does we update it if it does not we append it to the end ofg the array 182 | for (const message of right) { 183 | const i = merged.findIndex(existing => existing.id === message.id); 184 | 185 | i !== -1 ? merged[i] = message : merged.push(message); 186 | } 187 | 188 | return merged; 189 | } 190 | -------------------------------------------------------------------------------- /5-human/2-modify.ts: -------------------------------------------------------------------------------- 1 | import { StateGraph, Annotation, END } from '@langchain/langgraph'; 2 | import { ChatOpenAI } from '@langchain/openai'; 3 | import { MessageUnion, SystemMessage, HumanMessage, AIMessage } from '@langchain/core/messages'; 4 | import { ToolMessage } from '@langchain/core/messages/tool' 5 | import { SqliteSaver } from '@langchain/langgraph-checkpoint-sqlite'; 6 | import { TavilySearchResults } from '@langchain/community/tools/tavily_search'; 7 | import { v4 as uuid4 } from 'uuid'; 8 | import confirm from '@inquirer/confirm'; 9 | 10 | // Here we set up the parts we need for the agent 11 | 12 | // First define the memory for persistance 13 | const memory = SqliteSaver.fromConnString(':memory:'); 14 | 15 | // Set up the tools 16 | const tools = [new TavilySearchResults({maxResults: 2})]; 17 | 18 | // First we define the state (with a custom reducer) 19 | const AgentState = Annotation.Root({ 20 | messages: Annotation({ 21 | reducer: (left, right) => reduceMessages(left, right), // See below for function 22 | default: () => [] 23 | }) 24 | }); 25 | 26 | // Now the system prompt 27 | const system = `You are a smart research assistant. Use the search engine to look up information. 28 | You are allowed to make multiple calls (either together or in sequence). 29 | Only look up information when you are sure of what you want. 30 | If you need to look up some information before asking a follow up question, you are allowed to do that!`; 31 | 32 | // Now the model we will use 33 | const model = new ChatOpenAI({ 34 | model: 'gpt-3.5-turbo', 35 | }).bindTools(tools); 36 | 37 | 38 | // Now we can construct our agent graph 39 | const graph = new StateGraph(AgentState) 40 | .addNode('llm', callOpenAi) // See below for function 41 | .addNode('action', takeAction) // See below for function 42 | .addConditionalEdges( 43 | 'llm', 44 | existsAction, // See below for function 45 | {true: 'action', false: END} 46 | ) 47 | .addEdge('action','llm') 48 | .setEntryPoint('llm') 49 | .compile({ 50 | checkpointer: memory, 51 | interruptBefore: ['action'] 52 | }); 53 | 54 | const messages = [new HumanMessage('What is the weather in LA?')]; 55 | 56 | const config = { 57 | configurable: {thread_id: '2'} 58 | }; 59 | 60 | for await (const event of await graph.stream({messages: messages}, config)) { 61 | for (const [node, values] of Object.entries(event)) { 62 | console.log('\nStreaming:') 63 | console.log(values.messages); 64 | } 65 | } 66 | 67 | let currentValues = await graph.getState(config); 68 | 69 | // This is the current state 70 | console.log(currentValues.values); 71 | // Here is the message we want to modify 72 | console.log(currentValues.values.messages[currentValues.values.messages.length-1]); 73 | // In fact this is the part of the message we want change 74 | console.log(currentValues.values.messages[currentValues.values.messages.length-1].tool_calls); 75 | // The state returned at this point now hase a human message without an id! 76 | 77 | // Store the tool_call ID 78 | const _id = currentValues.values.messages[currentValues.values.messages.length-1].tool_calls[0].id; 79 | // Now let's replace the tool call with our own, updating the query but preserving the ID 80 | currentValues.values.messages[currentValues.values.messages.length-1].tool_calls = [{ 81 | name: 'tavily_search_results_json', 82 | args: { 83 | input: 'current weather in Louisiana' 84 | }, 85 | type: 'tool_call', 86 | id: _id 87 | }]; 88 | 89 | await graph.updateState(config, currentValues.values); 90 | 91 | let newValues = await graph.getState(config); 92 | console.log(newValues.values); 93 | 94 | for await (const event of await graph.stream(null, config)) { 95 | for (const [node, values] of Object.entries(event)) { 96 | console.log(values.messages); 97 | } 98 | } 99 | 100 | 101 | ///////// FUNCTIONS USED BY THE GRAPH ////////// 102 | 103 | // This is the function for the conditional edge 104 | function existsAction(state) { 105 | const result = state.messages[state.messages.length-1]; 106 | 107 | return result.tool_calls ? result.tool_calls.length > 0 : false; 108 | } 109 | 110 | // This is the function for the llm node 111 | async function callOpenAi(state) { 112 | let messages = state.messages; 113 | 114 | if (system) { 115 | messages = [new SystemMessage(system), ...messages]; 116 | } 117 | 118 | const message = await model.invoke(messages); 119 | return {messages: [message]}; 120 | } 121 | 122 | // This is the function for the action node 123 | async function takeAction(state) { 124 | const results = []; 125 | const toolCalls = state.messages[state.messages.length-1].tool_calls; 126 | let result = null; 127 | 128 | for(const t of toolCalls) { 129 | console.log('Calling: ' + JSON.stringify(t)); 130 | if (tools.some(tool => tool.name === t.name)) { 131 | const tool = tools.find(tool => tool.name === t.name); 132 | result = await tool.invoke(t.args); 133 | } else { 134 | //console.log('\n...bad tool name...'); 135 | result = 'bad tool name, retry'; 136 | } 137 | results.push(new ToolMessage({tool_call_id: t.id, name: t.name, content: result.toString()})); 138 | } 139 | //console.log('Back to the model!'); 140 | return {messages: results}; 141 | } 142 | 143 | // This function is used by the graph state to update the messages list 144 | // This lets us update previous messages based on id (or we add them) 145 | function reduceMessages(left: MessageUnion[], right: MessageUnion[]):MessaggeUnion[] { 146 | // Ensure any new messages have an id 147 | for (const message of right) { 148 | if (message.id === null || message.id === undefined) { 149 | message.id = uuid4(); 150 | message.lc_kwargs.id = message.id; 151 | } 152 | } 153 | 154 | // Copy the current set of messages 155 | const merged = [...left]; 156 | 157 | // Now check to see if the new message exists in the curren set of messages 158 | // if it does we update it if it does not we append it to the end ofg the array 159 | for (const message of right) { 160 | const i = merged.findIndex(existing => existing.id === message.id); 161 | 162 | i !== -1 ? merged[i] = message : merged.push(message); 163 | } 164 | 165 | return merged; 166 | } 167 | -------------------------------------------------------------------------------- /5-human/3-timetravel.ts: -------------------------------------------------------------------------------- 1 | import { StateGraph, Annotation, END } from '@langchain/langgraph'; 2 | import { ChatOpenAI } from '@langchain/openai'; 3 | import { MessageUnion, SystemMessage, HumanMessage, AIMessage } from '@langchain/core/messages'; 4 | import { ToolMessage } from '@langchain/core/messages/tool' 5 | import { SqliteSaver } from '@langchain/langgraph-checkpoint-sqlite'; 6 | import { TavilySearchResults } from '@langchain/community/tools/tavily_search'; 7 | import { v4 as uuid4 } from 'uuid'; 8 | import confirm from '@inquirer/confirm'; 9 | 10 | // Here we set up the parts we need for the agent 11 | 12 | // First define the memory for persistance 13 | const memory = SqliteSaver.fromConnString(':memory:'); 14 | 15 | // Set up the tools 16 | const tools = [new TavilySearchResults({maxResults: 2})]; 17 | 18 | // First we define the state (with a custom reducer) 19 | const AgentState = Annotation.Root({ 20 | messages: Annotation({ 21 | reducer: (left, right) => reduceMessages(left, right), // See below for function 22 | default: () => [] 23 | }) 24 | }); 25 | 26 | // Now the system prompt 27 | const system = `You are a smart research assistant. Use the search engine to look up information. 28 | You are allowed to make multiple calls (either together or in sequence). 29 | Only look up information when you are sure of what you want. 30 | If you need to look up some information before asking a follow up question, you are allowed to do that!`; 31 | 32 | // Now the model we will use 33 | const model = new ChatOpenAI({ 34 | model: 'gpt-3.5-turbo', 35 | }).bindTools(tools); 36 | 37 | 38 | // Now we can construct our agent graph 39 | const graph = new StateGraph(AgentState) 40 | .addNode('llm', callOpenAi) // See below for function 41 | .addNode('action', takeAction) // See below for function 42 | .addConditionalEdges( 43 | 'llm', 44 | existsAction, // See below for function 45 | {true: 'action', false: END} 46 | ) 47 | .addEdge('action','llm') 48 | .setEntryPoint('llm') 49 | .compile({ 50 | checkpointer: memory, 51 | interruptBefore: ['action'] 52 | }); 53 | 54 | 55 | // Let's do what we did in the modify exercise 56 | const messages = [new HumanMessage('What is the weather in LA?')]; 57 | 58 | const config = { 59 | configurable: {thread_id: '3'} 60 | }; 61 | 62 | for await (const event of await graph.stream({messages: messages}, config)) { 63 | for (const [node, values] of Object.entries(event)) { 64 | console.log('\nStreaming:') 65 | console.log(values.messages); 66 | } 67 | } 68 | 69 | let currentValues = await graph.getState(config); 70 | 71 | // Now let's replace the tool call with our own input 72 | currentValues.values.messages[currentValues.values.messages.length-1].tool_calls[0].args.input = 'current weather in Louisiana'; 73 | 74 | await graph.updateState(config, currentValues.values); 75 | 76 | for await (const event of await graph.stream(null, config)) { 77 | for (const [node, values] of Object.entries(event)) { 78 | console.log(values.messages); 79 | } 80 | } 81 | 82 | // At this point we should have a result with the weather in Louisiana 83 | const states = []; 84 | 85 | const stateHistory = await graph.getStateHistory(config); 86 | 87 | for await (const state of stateHistory) { 88 | console.log(state); 89 | console.log('--'); 90 | states.push(state); 91 | } 92 | 93 | // Let's get the state from our history in which we wanted to 94 | // call the search engine and ask about the weather in LA 95 | const toReplay = states[states.length-3]; 96 | console.log(toReplay); 97 | 98 | // Now we can run things forward from that state using toReplay.config 99 | for await (const event of await graph.stream(null, toReplay.config)) { 100 | for (const [node, values] of Object.entries(event)) { 101 | console.log(values.messages); 102 | } 103 | } 104 | 105 | // We could also edit that historical message (for example suggest we get the weather from accuweather) 106 | toReplay.values.messages[toReplay.values.messages.length-1].tool_calls[0].args.input = 'current weather in LA, accuweather'; 107 | 108 | const branchState = await graph.updateState(toReplay.config, toReplay.values); 109 | 110 | for await (const event of await graph.stream(null, branchState)) { 111 | for (const [node, values] of Object.entries(event)) { 112 | if (node !== '__end__') { 113 | console.log(values); 114 | } 115 | } 116 | } 117 | 118 | // Another option is to add a message to a state at a given timepoint 119 | // We have this message we just modified 120 | console.log(toReplay); 121 | 122 | // We could fake a response to this as though a tool was called 123 | // Let's get the tool call id, this needs to be in the 'reply' 124 | const _id = toReplay.values.messages[toReplay.values.messages.length-1].tool_calls[0].id; 125 | 126 | const toolMessage = new ToolMessage({ 127 | toolCallId: _id, 128 | name: 'tavily_search_results_json', 129 | content: '54 degree celcius' 130 | }); 131 | 132 | const stateUpdate = {messages: [toolMessage]}; 133 | 134 | // Note that when we do the update we need to specify we are adding this state 135 | // 'as if' we were the action node so we don't need to visit that node 136 | const branchAndAdd = await graph.updateState(toReplay.config, stateUpdate, 'action'); 137 | 138 | for await (const event of await graph.stream(null, branchAndAdd)) { 139 | for (const [node, values] of Object.entries(event)) { 140 | console.log(values); 141 | } 142 | } 143 | 144 | 145 | 146 | 147 | ///////// FUNCTIONS USED BY THE GRAPH ////////// 148 | 149 | // This is the function for the conditional edge 150 | function existsAction(state) { 151 | const result = state.messages[state.messages.length-1]; 152 | 153 | return result.tool_calls ? result.tool_calls.length > 0 : false; 154 | } 155 | 156 | // This is the function for the llm node 157 | async function callOpenAi(state) { 158 | let messages = state.messages; 159 | 160 | if (system) { 161 | messages = [new SystemMessage(system), ...messages]; 162 | } 163 | 164 | const message = await model.invoke(messages); 165 | return {messages: [message]}; 166 | } 167 | 168 | // This is the function for the action node 169 | async function takeAction(state) { 170 | const results = []; 171 | const toolCalls = state.messages[state.messages.length-1].tool_calls; 172 | let result = null; 173 | 174 | for(const t of toolCalls) { 175 | console.log('Calling: ' + JSON.stringify(t)); 176 | if (tools.some(tool => tool.name === t.name)) { 177 | const tool = tools.find(tool => tool.name === t.name); 178 | result = await tool.invoke(t.args); 179 | } else { 180 | //console.log('\n...bad tool name...'); 181 | result = 'bad tool name, retry'; 182 | } 183 | results.push(new ToolMessage({tool_call_id: t.id, name: t.name, content: result.toString()})); 184 | } 185 | //console.log('Back to the model!'); 186 | return {messages: results}; 187 | } 188 | 189 | // This function is used by the graph state to update the messages list 190 | // This lets us update previous messages based on id (or we add them) 191 | function reduceMessages(left: MessageUnion[], right: MessageUnion[]):MessaggeUnion[] { 192 | // Ensure any new messages have an id 193 | for (const message of right) { 194 | if (message.id === null || message.id === undefined) { 195 | message.id = uuid4(); 196 | message.lc_kwargs.id = message.id; 197 | } 198 | } 199 | 200 | // Copy the current set of messages 201 | const merged = [...left]; 202 | 203 | // Now check to see if the new message exists in the curren set of messages 204 | // if it does we update it if it does not we append it to the end ofg the array 205 | for (const message of right) { 206 | const i = merged.findIndex(existing => existing.id === message.id); 207 | 208 | i !== -1 ? merged[i] = message : merged.push(message); 209 | } 210 | 211 | return merged; 212 | } 213 | -------------------------------------------------------------------------------- /5-human/4-practice.ts: -------------------------------------------------------------------------------- 1 | import { StateGraph, Annotation, END } from '@langchain/langgraph'; 2 | import { SqliteSaver } from '@langchain/langgraph-checkpoint-sqlite'; 3 | 4 | // First we define the state 5 | const AgentState = Annotation.Root({ 6 | lnode: Annotation, 7 | scratch: Annotation, 8 | count: Annotation({ 9 | reducer: (current, update) => current += update, 10 | default: () => 0 11 | }) 12 | }); 13 | 14 | // Then some memory 15 | const memory = SqliteSaver.fromConnString(':memory:'); 16 | 17 | // Now we can assemble the graph 18 | const graph = new StateGraph(AgentState) 19 | .addNode('Node1', node1) // See below for function 20 | .addNode('Node2', node2) // See below for function 21 | .addEdge('Node1', 'Node2') 22 | .addConditionalEdges( 23 | 'Node2', 24 | shouldContinue, // See below for function 25 | {true: 'Node1', false: END} 26 | ) 27 | .setEntryPoint('Node1') 28 | .compile({ checkpointer: memory }); 29 | 30 | // Let's set up a thread 31 | const thread1 = { 32 | configurable: {thread_id: '1'} 33 | }; 34 | 35 | // Run It! 36 | await graph.invoke({count: 0, scratch: 'hi'}, thread1); 37 | 38 | // look at the current state 39 | const currentState1 = await graph.getState(thread1); 40 | console.log('The current state:'); 41 | console.log(currentState1); 42 | 43 | // Look at the state history 44 | console.log('\nThe state history:'); 45 | const stateHistory1 = await graph.getStateHistory(thread1); 46 | for await (const state of stateHistory1) { 47 | console.log(state); 48 | } 49 | 50 | // Now let's store the state configs in a list 51 | console.log('\nStoring state configs:'); 52 | const stateHistory2 = await graph.getStateHistory(thread1); 53 | const states1 = []; 54 | for await (const state of stateHistory2) { 55 | states1.push(state.config); 56 | console.log(state.config); 57 | console.log(state.values.count); 58 | } 59 | 60 | // Let's grab an earlier state 61 | console.log('\nGet an earlier state config:'); 62 | const earlyState = states1[states1.length-3]; 63 | console.log(earlyState); 64 | console.log('\nGet the full earlier state:'); 65 | const fullEarlyState = await graph.getState(earlyState); 66 | console.log(fullEarlyState); 67 | 68 | // Go back in time 69 | console.log('\nRerun the graph from the earlier state:'); 70 | graph.invoke(null, earlyState); 71 | 72 | const stateHistory3 = await graph.getStateHistory(thread1); 73 | for await (const state of stateHistory3) { 74 | console.log(state.config); 75 | console.log(state.values.count); 76 | } 77 | 78 | // Modify state 79 | // This time we'll start a new thread 80 | const thread2 = { 81 | configurable: {thread_id: '2'} 82 | }; 83 | 84 | // Run It! 85 | console.log('Run it, then check the state:'); 86 | await graph.invoke({count: 0, scratch: 'hi'}, thread2); 87 | const currentState2 = await graph.getState(thread1); 88 | console.log(currentState2); 89 | 90 | // Now let's look at the history 91 | console.log('\nStoring new state configs:'); 92 | const stateHistory4 = await graph.getStateHistory(thread2); 93 | const states2 = []; 94 | for await (const state of stateHistory4) { 95 | states2.push(state.config); 96 | console.log(state.config); 97 | console.log(state.values.count); 98 | } 99 | 100 | // Again let's get a previous state: 101 | console.log('\nSave previous state config:'); 102 | const saveState = await graph.getState(states2.length-3); 103 | console.log(saveState); 104 | 105 | // Now we modify that state 106 | console.log('\nModify the previous state values:'); 107 | saveState.values.count = -3; 108 | saveState.values.scratch = 'hello'; 109 | console.log(saveState); 110 | 111 | console.log('\nThe updated state history:'); 112 | await graph.updateState(thread2, saveState.values); 113 | 114 | const stateHistory5 = await graph.getStateHistory(thread2); 115 | let i = 0; 116 | for await (const state of stateHistory5) { 117 | if (i >= 3) console.log(state); 118 | i++; 119 | } 120 | 121 | // Now udate the state again but se the node to node 1 122 | console.log('\nThe updated state history and set the node:'); 123 | await graph.updateState(thread2, saveState.values, 'Node1'); 124 | const stateHistory6 = await graph.getStateHistory(thread2); 125 | i = 0; 126 | for await (const state of stateHistory6) { 127 | if (i >= 3) console.log(state); 128 | i++; 129 | } 130 | 131 | console.log('\nFinally invoke the graph with this new state:'); 132 | await graph.invoke(null, thread2); 133 | const stateHistory7 = await graph.getStateHistory(thread2); 134 | for await (const state of stateHistory7) { 135 | console.log(state); 136 | } 137 | 138 | ///////// FUNCTIONS USED BY THE GRAPH ////////// 139 | function node1(state: AgentState) { 140 | console.log('node1, count: ' + state.count); 141 | return {lnode: 'node_1', count: 1}; 142 | } 143 | 144 | function node2(state: AgentState) { 145 | console.log('node2, count: ' + state.count); 146 | return {lnode: 'node_2', count: 1}; 147 | } 148 | 149 | function shouldContinue(state) { 150 | return state.count < 3; 151 | } 152 | -------------------------------------------------------------------------------- /5-human/notes.md: -------------------------------------------------------------------------------- 1 | # Human in the Loop 2 | There are many instances were you want to provide human oversight of the agents work. This lesson builds on what we looked at in Lesson 4. 3 | 4 | ## State memory 5 | As a graph is executed a snapshot of the state is stored in the memory: 6 | ``` 7 | | 8 | v 9 | --- 10 | |0,3|<------- 11 | --- | Memory 12 | | | --------- 13 | v | | State 3 | 14 | / \ | --------- 15 | < 1 > | | State 2 | 16 | \ / | --------- 17 | / V \ | | State 1 | 18 | / \ | --------- 19 | --- --- | | State 0 | 20 | | | | 2 |----- --------- 21 | --- --- | 22 | V 23 | StateSnapshot: { AgentState, useful_things} 24 | | 25 | V 26 | {..., thread, thread_ts, ...} 27 | | | 28 | V | 29 | config = {configurable: {thread_id: '1', V 30 | thread_ts: '1ef17b36-ed06-6185-8001-15cf75dea535'}}; 31 | ``` 32 | The thread and the thread_ts (the threads UID) can be used to access the states: 33 | ``` 34 | config = {configurable: {thread_id: '1'}}; --------- 35 | | | State 3 | 36 | V --------- 37 | graph.getState({..., thread, ...}); Returns 'current state' 38 | ``` 39 | We can also get an iterator over all of the previous states: 40 | ``` 41 | --------- 42 | graph.getStateHistory({..., thread, ...}); | State 3 | 43 | --------- | 44 | --------- | 45 | Returns an iterator over --------- | 46 | all StateSnapshots --------- 47 | ``` 48 | Using the iterator allows us to get individual snapshots with their UIDs `thread_ts` we can then use this to replay from a previous point: 49 | ``` 50 | graph.invoke(null, {..., thread, thread_ts, ...}); --------- 51 | graph.stream(null, {..., thread, thread_ts, ...}); | State n | 52 | --------- 53 | Run with state n as the 54 | starting point, Time Travel 55 | ``` 56 | If we do the above with out the `thread_ts` identifier we will use the current state as the starting point: 57 | ``` 58 | graph.invoke(null, {..., thread, ...}); --------- 59 | graph.stream(null, {..., thread, ...}); | State 3 | 60 | --------- 61 | Uses the current state 62 | as the starting point 63 | ``` 64 | We can also use the `thread_ts` to access a previous state, we can then modify it and add the modified state back on the memory stack making it the current state: 65 | ``` 66 | --------- 67 | graph.getState({..., thread, thread_ts, ...}); | State 1 | 68 | --------- 69 | | 70 | V 71 | --------- 72 | modify it | State m | 73 | --------- 74 | | 75 | V 76 | --------- 77 | graph.updateState(thread, state_m.values); | State 4 | 78 | --------- 79 | graph.invoke(null, {..., thread, ...}); Runs state 4 as the current state. 80 | ``` 81 | -------------------------------------------------------------------------------- /6-writer/1-writer.ts: -------------------------------------------------------------------------------- 1 | import { StateGraph, Annotation, END } from '@langchain/langgraph'; 2 | import { SqliteSaver } from '@langchain/langgraph-checkpoint-sqlite'; 3 | import { MessageUnion, SystemMessage, HumanMessage, AIMessage, ChatMessage } from '@langchain/core/messages'; 4 | import { ChatOpenAI } from '@langchain/openai'; 5 | import { z } from 'zod'; 6 | import { tavily } from '@tavily/core'; 7 | // Here we set up the parts we need for the agent 8 | 9 | // First define the memory for persistance 10 | const memory = SqliteSaver.fromConnString(':memory:'); 11 | 12 | // Define the state 13 | const AgentState = Annotation.Root({ 14 | task: Annotation, 15 | plan: Annotation, 16 | draft: Annotation, 17 | critique: Annotation, 18 | content: Annotation, 19 | revision: Annotation, 20 | maxRevisions: Annotation, 21 | }); 22 | 23 | // Now the model we will use 24 | const model = new ChatOpenAI({ 25 | model: 'gpt-3.5-turbo', 26 | temperature: 0 27 | }); 28 | 29 | // Lets set all of the prompts up 30 | const PLAN_PROMPT = `You are an expert writer tasked with writing a high level outline of an essay. \ 31 | Write such an outline for the user provided topic. \ 32 | Give an outline of the essay along with any relevant notes or instructions for the sections.`; 33 | 34 | const WRITER_PROMPT = `You are an essay assistant tasked with writing excellent 5-paragraph essays. \ 35 | Generate the best essay possible for the user's request and the initial outline. \ 36 | If the user provides critique, respond with a revised version of your previous attempts. \ 37 | Utilize all the information below as needed: 38 | 39 | ------ 40 | 41 | {content}`; 42 | 43 | const REFLECTION_PROMPT = `You are a teacher grading an essay submission. \ 44 | Generate critique and recommendations for the user's submission. \ 45 | Provide detailed recommendations, including requests for length, depth, style, etc.`; 46 | 47 | const RESEARCH_PLAN_PROMPT = `You are a researcher charged with providing information that can \ 48 | be used when writing the following essay. Generate a list of search queries that will gather \ 49 | any relevant information. Only generate 3 queries max.`; 50 | 51 | const RESEARCH_CRITIQUE_PROMPT = `You are a researcher charged with providing information that can \ 52 | be used when making any requested revisions (as outlined below). \ 53 | Generate a list of search queries that will gather any relevant information. Only generate 3 queries max.`; 54 | 55 | // Define a structure for the queries 56 | const Queries = z.object({ 57 | queries: z.array(z.string()) 58 | }); 59 | 60 | // get the search set up 61 | const client = tavily({apiKey: process.env.TAVILY_API_KEY}); 62 | 63 | // Now we can construct the graph 64 | const graph = new StateGraph(AgentState) 65 | .addNode('planner', planNode) 66 | .addNode('generate', generationNode) 67 | .addNode('reflect', reflectionNode) 68 | .addNode('researchPlan', researchPlanNode) 69 | .addNode('researchCritique', researchCritiqueNode) 70 | .addEdge('planner','researchPlan') 71 | .addEdge('researchPlan','generate') 72 | .addEdge('reflect','researchCritique') 73 | .addEdge('researchCritique','generate') 74 | .addConditionalEdges( 75 | 'generate', 76 | shouldContinue, 77 | ['reflect', END] 78 | ) 79 | .setEntryPoint('planner') 80 | .compile({ checkpointer: memory }); 81 | 82 | 83 | // Ok let's use the graph 84 | const thread = { 85 | configurable: {thread_id: '1'} 86 | }; 87 | 88 | for await (const event of await graph.stream({ 89 | task: 'What is the difference between LangChain and LangSmith?', 90 | maxRevisions: 2, 91 | revision: 1 92 | }, thread)) { 93 | for (const [node, values] of Object.entries(event)) { 94 | console.log(values); 95 | } 96 | } 97 | 98 | 99 | 100 | ///////// FUNCTIONS USED BY THE GRAPH ////////// 101 | async function planNode(state: AgentState) { 102 | const messages = [ 103 | new SystemMessage({content: PLAN_PROMPT}), 104 | new HumanMessage({content: state.task}) 105 | ]; 106 | 107 | const response = await model.invoke(messages); 108 | 109 | return {plan: response.content}; 110 | } 111 | 112 | 113 | async function researchPlanNode(state: AgentState) { 114 | const messages = [ 115 | new SystemMessage({content: RESEARCH_PLAN_PROMPT}), 116 | new HumanMessage({content: state.task}) 117 | ]; 118 | 119 | const queries = await model.withStructuredOutput(Queries).invoke(messages); 120 | 121 | const content = (state.content || []); 122 | 123 | for (const q of queries.queries) { 124 | const response = await client.search(q, {maxResults: 2}); 125 | 126 | for (const r of response.results) { 127 | content.push(r.content); 128 | } 129 | } 130 | 131 | return {content: content}; 132 | } 133 | 134 | 135 | async function generationNode(state: AgentState) { 136 | const content = (state.content || []).join('\n\n'); 137 | 138 | const userMessage = new HumanMessage({content: state.task + '\n\nHere is my plan:\n\n' + state.plan}); 139 | 140 | const messages = [ 141 | new SystemMessage({content: WRITER_PROMPT.replace('{content}', content)}), 142 | userMessage 143 | ]; 144 | 145 | const response = await model.invoke(messages); 146 | 147 | return {draft: response.content, revision: state.revision+1}; 148 | } 149 | 150 | 151 | async function reflectionNode(state: AgentState) { 152 | const messages = [ 153 | new SystemMessage({content: REFLECTION_PROMPT}), 154 | new HumanMessage({content: state.draft}) 155 | ]; 156 | 157 | const response = await model.invoke(messages); 158 | 159 | return {critique: response.content}; 160 | } 161 | 162 | 163 | async function researchCritiqueNode(state: AgentState) { 164 | const messages = [ 165 | new SystemMessage({content: RESEARCH_CRITIQUE_PROMPT}), 166 | new HumanMessage({content: state.critique}) 167 | ]; 168 | 169 | const queries = await model.withStructuredOutput(Queries).invoke(messages); 170 | 171 | const content = (state.content || []); 172 | 173 | for (const q of queries.queries) { 174 | const response = await client.search(q, {maxResults: 2}); 175 | 176 | for (const r of response.results) { 177 | content.push(r.content); 178 | } 179 | } 180 | 181 | return {content: content}; 182 | } 183 | 184 | 185 | function shouldContinue(state) { 186 | return state.revision > state.maxRevisions ? END : 'reflect'; 187 | } 188 | -------------------------------------------------------------------------------- /6-writer/notes.md: -------------------------------------------------------------------------------- 1 | # Essay Writer 2 | 3 | ``` 4 | | 5 | v 6 | --- 7 | plan | 1 | 8 | --- 9 | | 10 | v 11 | --- 12 | research_plan | 2 | 13 | --- 14 | | 15 | v 16 | --- 17 | generate | 3 |<------- 18 | --- | 19 | | | 20 | v | 21 | / \ | 22 | < > | 23 | \ / | 24 | / V \continue| 25 | / \ | 26 | --- | | 27 | | X | | | 28 | --- --- | 29 | reflect | | | 30 | --- | 31 | | | 32 | v | 33 | --- | 34 | research_critique | |----- 35 | --- 36 | ``` 37 | 1. You are an expert writer tasked with writing a **high level outline** of an essay. Write such an outline for the user provided topic. Give an outline of the essay along with any relevant notes or instructions for the sections. 38 | 2. You are a researcher charged with providing information that can be used when writing the following essay. Generate a list of **search queries that will gather any relevant information**. Only generate 3 queries max. 39 | 3. You are an essay assistant charged with **writing excellent 5-paragraph essays**. Generate ... if the **user provides critique, respond with a revised version** of your previous attempts. Utilize information: 40 | -------------------------------------------------------------------------------- /7-resources/notes.md: -------------------------------------------------------------------------------- 1 | # Resources 2 | 3 | [LangChain.JS Introduction](https://js.langchain.com/docs/introduction/) 4 | This covers: 5 | - **LangChain**, the basic libraries. 6 | - **LangChain Community**, partner libraries and tools such as Tavily. 7 | - **Templates**, to accelerate development. 8 | - **LangServe**, help to build out and serve apps. 9 | - **LangSmith**, debug and monitor services. 10 | 11 | [LangChain.JS Repo](https://github.com/langchain-ai/langchainjs) 12 | Useful resources such as cook books, templates. 13 | 14 | [LangGraph.JS](https://langchain-ai.github.io/langgraphjs/tutorials/quickstart/) 15 | Covers what this course looked at in more depth with: 16 | - Concept guides 17 | - Tutorials 18 | - How-to-guides 19 | - References 20 | 21 | [LangChain on DeepLearning.AI](https://www.deeplearning.ai/courses/?courses_date_desc%5BrefinementList%5D%5Bpartnership%5D%5B0%5D=LangChain) 22 | Other LangChain courses available. 23 | 24 | [LangSmith Prompt Hub](https://smith.langchain.com/hub) 25 | A look at other prompts 26 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # AI Agents in LangGraph 2 | This is based on the DeepLearning.AI, [AI Agents in LangGraph](https://learn.deeplearning.ai/courses/ai-agents-in-langgraph/) course. In this repository I have converted all of the examples from Python to JavaScript. 3 | ## Introduction 4 | Agentic use and agentic search have moved on a lot recently. The sort of workflow discussed is one in which a request is made, actions to find information are taken and the results are written up. This is an iterative process until the goal is reached. This involves: 5 | - Planning 6 | - Tool use 7 | - Reflection 8 | - Multi-agent communication 9 | - Memory 10 | Langchain offers many of these elements, but has recently improved agent support with Cyclic Graphs. 11 | ### Cyclic Graphs 12 | ``` 13 | ReAct (Reason + Act) 14 | ==================== 15 | Actions 16 | ------------- ------------- 17 | | | | | 18 | | v | v 19 | Reasoning LLM Env 20 | Traces 21 | ^ | ^ | 22 | | | | | 23 | ------------- ------------- 24 | Observations 25 | ``` 26 | [ReAct: Synergizing reasoning and acting in language models](https://arxiv.org/abs/2210.03629) 27 | ``` 28 | Self-Refine 29 | =========== 30 | 31 | Input 32 | ------------- | ------------- 33 | | | | | | 34 | v | v | v 35 | Feedback Model M Refine 36 | | ^ ^ | 37 | | | | | 38 | ------------- ------------- 39 | Use M to get Use M to refine 40 | feedback on its its previous output 41 | own output given the feedback 42 | ``` 43 | [Self-Refine: Iterative refinement with self-feedback](https://arxiv.org/abs/2303.17651) 44 | ``` 45 | AlphaCodium 46 | =========== 47 | PRE-PROCESSING CODE ITERATIONS 48 | ------------------------------------------------------- -------------------------------------- 49 | _____________ 50 | | | 51 | v | 52 | Input - Problem Generate Rank Iterate on | Iterate on 53 | Description + Possible ---------> Solutions Public Tests ---------> AI Tests <- 54 | Public Tests Solutions | ^ | | 55 | | ^ | | | | 56 | | | | |_____ |------- 57 | v | v | | v 58 | Problem Public Tests Generate Initial | Final 59 | Reflection ---------> Reasoning Additional ---------> Code <- Solution 60 | AI Tests Solution 61 | ``` 62 | [Code engineering with AlphaCodium: From prompt engineering to flow engineering](https://arxiv.org/pdf/2401.08500) 63 | Langchain has been extended to include `LangGraph` to support these. 64 | ### Lessons 65 | - [Build an Agent from Scratch](./1-agent/notes.md) 66 | - [LangGraph Components](./2-langgraph/notes.md) 67 | - [Agentic Search Tools](./3-search/notes.md) 68 | - [Persistence and Streaming](./4-persistance-streaming/notes.md) 69 | - [Human in the loop](./5-human/notes.md) 70 | - [Essay Writer](./6-writer/notes.md) 71 | - [LangChain Resources](./7-resources/notes.md) 72 | ## Set-Up 73 | ### API Key 74 | If you want to try these out you will first need to setup your own ChatGPT secret key in your local environment. [Here](https://chatgpt.en.obiscr.com/blog/posts/2023/How-to-get-api-key/) is how you get a key. Once you have this put it in a local (server side) environment variable. For example in Mac OS, assuming you are using `zsh`, append the following to the file `.zshenv` in you own home directory: 75 | ``` 76 | export OPENAI_API_KEY='your_secret_key_value' 77 | ``` 78 | When you restart the shell or your machine the environment variable `OPENAI_API_KEY` will be in place. 79 | ### Tavily API Key 80 | This tutorial also uses the [Tavily](https://docs.tavily.com) search tool. To use this you need to sign up for an API key and also export that API key in the same way we exported the OpenAPI key for ChatGPT. In this case you need to add the line: 81 | ``` 82 | export TAVILY_API_KEY='your_tavily_api_key' 83 | ``` 84 | Add this into your environment settings for the shell you are using. 85 | ### Node and JS 86 | Before trying any of the exercises don't forget to run `npm install` in the `./agents-langraph` directory to install the Node modules needed. 87 | 88 | In each subdirectory you will find a `*.js` or a `*.ts` file and, sometimes, some supporting files. Each file contains multiple prompts. 89 | 90 | In most cases the initial exercise is ready to run and the other exercises are commented out using the `\* ... *\` comment markers. In these cases the commented code blocks will have their own calls to the LLM. If you uncomment these blocks then be sure to comment out the last to calls above while you run that exercise, it will reduce run time and costs. 91 | 92 | ## Conclusion 93 | There are some architectures it is worth knpwing about but which are not covered by this course. These include: 94 | ### Multi-Agent 95 | Multiple agents working on the same state. All work on the same shared state. 96 | ``` 97 | ----------------------------------- 98 | | User | 99 | | Input: Generate a chart of ave. | 100 | | temp. in Alaska over last decade. | 101 | ----------------------------------- 102 | | 103 | First go to researcher 104 | | 105 | v 106 | -------------------------- ----------------------- ----------------- 107 | | Researcher |--------- message ------->| Router |<-------- message --------| Chart Generator | 108 | | Call a 'search' function | | (If statements based) | | | 109 | | or FINISH |<--- If 'continue' and ---| on agent output) |--- If 'continue' and --->| Code execution | 110 | -------------------------- state.sender == ----------------------- state.sender == ----------------- 111 | ^ 'chart_generator' | 'researcher' ^ 112 | | | | 113 | If state.sender == If function is called If state.sender == 114 | 'researcher' | 'chart_generator' 115 | | v | 116 | | ----------- | 117 | --------------------------------------------| call_tool |----------------------------------------- 118 | ----------- 119 | ``` 120 | ### Supervisor 121 | In this case the state may not be shared, each Agent may be a separate graph. The supervisor can use a more powerful LLM. 122 | ``` 123 | ------- 124 | | User | 125 | ------- 126 | | ^ 127 | | | 128 | v | 129 | ----------------------------------- 130 | | Supervisor | 131 | ----------------------------------- 132 | ^ | | ^ ^ | 133 | | route route | | route 134 | | | | | | | 135 | | v v | | v 136 | --------- --------- --------- 137 | | Agent 1 | | Agent 2 | | Agent 3 | 138 | --------- --------- --------- 139 | ``` 140 | ### Flow Engineering (Plan and Execute) 141 | See the [Codium model above](###Cyclic-Graphs), this is essentially a pipeline with some noted that loop. The plan and execute style flow is a simpler example of this: 142 | 143 | ``` 144 | ------ 145 | ---------->| Plan | 146 | | ------ 147 | | | 148 | | 2 generate tasks 149 | 1 user v 150 | request ----------- ----------------- 151 | | | Task list | | | 152 | ------ | * ~~~~~~~ | ------------------- v 153 | | User | | * ~~~~~~~ |-- 3 exec tasks -->| Single-task-agent | Loop to solve task <--> TOOL 154 | ------ | * ~~~~~~~ | ------------------- | 155 | ^ | * ~~~~~~~ | | ^ | 156 | | ----------- | ----------------- 157 | | ^ 4 update state 158 | | 5b replan more tasks with task results 159 | 5a respond | | 160 | to user -------- | 161 | ----------| Replan |<------------------------------- 162 | -------- 163 | ``` 164 | ### Language Agent Tree Search 165 | This approach repeats until solved: 166 | 1. Select Node 167 | 2. Generate new candidates 168 | 2. Act, reflect and score 169 | 3. Back propagate (update parents) 170 | This makes great use of persistence to jump back to previous times when needed. 171 | ``` 172 | ---------- --------- ---------- --------- ---------- --------- 173 | | Generate | | Reflect | | Generate | | Reflect | | Generate | | Reflect | 174 | ---------- ^--------- ^---------- ^--------- ^---------- ^--------- 175 | | / | / | / | | / / | | | 176 | v / v / v / | | / / | v | 177 | ---------- / --------- / ---------- / -|-----|- / ---------- / |------|- 178 | | %Act | | %Act .3 | | %Act .3 | | %Act .6 | | %Act .6 | ||%Act .7 | 179 | ---------- --------- ---------- -|-----|- ---------- |------|- 180 | | | | ^ ^ | | | ^ | 181 | v v v/ \v v | / | 182 | --- --- ----- ----- ----- ----- --|-- --|-- 183 | | % | | % | | %.8 || %.4 || %.8 || %.4 || %.9 || %.4 | 184 | --- --- ----- ----- ----- ----- --|-- --|-- 185 | | | | ^^ | 186 | v v v | \ v 187 | --- --- ----- ----- 188 | | % || % | | %.2 || % 1 | 189 | --- --- ----- ----- 190 | ``` 191 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "agents-langgraph", 3 | "version": "1.0.0", 4 | "main": "index.js", 5 | "type": "module", 6 | "scripts": { 7 | "test": "echo \"Error: no test specified\" && exit 1" 8 | }, 9 | "author": "", 10 | "license": "ISC", 11 | "description": "", 12 | "dependencies": { 13 | "@inquirer/confirm": "^5.0.2", 14 | "@langchain/community": "^0.3.8", 15 | "@langchain/langgraph": "^0.2.20", 16 | "@langchain/langgraph-checkpoint-sqlite": "^0.1.3", 17 | "@tavily/core": "^0.0.2", 18 | "duckduckgo-search": "^1.0.7", 19 | "html-to-text": "^9.0.5", 20 | "langchain": "^0.3.5", 21 | "openai": "^4.68.2", 22 | "terminal-image": "^3.0.0", 23 | "tsx": "^4.19.1", 24 | "uuid": "^11.0.3", 25 | "zod": "^3.23.8" 26 | } 27 | } 28 | --------------------------------------------------------------------------------