├── .codespellignore
├── .env.example
├── .eslintrc.cjs
├── .github
└── workflows
│ ├── integration-tests.yml
│ └── unit-tests.yml
├── .gitignore
├── LICENSE
├── README.md
├── Untitled.ipynb
├── jest.config.js
├── langgraph.json
├── package.json
├── scripts
└── checkLanggraphPaths.js
├── src
└── memory_agent
│ ├── configuration.ts
│ ├── graph.ts
│ ├── prompts.ts
│ ├── state.ts
│ ├── tools.ts
│ └── utils.ts
├── static
├── memories.png
└── memory_graph.png
├── tests
├── agent.int.test.ts
├── agent.test.ts
└── configuration.test.ts
├── tsconfig.json
└── yarn.lock
/.codespellignore:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/langchain-ai/memory-agent-js/0962e2d2bce4e7a7a38ac543c6d5056cd2a740e1/.codespellignore
--------------------------------------------------------------------------------
/.env.example:
--------------------------------------------------------------------------------
1 | TAVILY_API_KEY=...
2 |
3 | # To separate your traces from other application
4 | LANGCHAIN_PROJECT=data-enrichment
5 | # LANGCHAIN_API_KEY=...
6 | # LANGCHAIN_TRACING_V2=true
7 |
8 | # The following depend on your selected configuration
9 |
10 | ## LLM choice:
11 | ANTHROPIC_API_KEY=....
12 | FIREWORKS_API_KEY=...
13 | OPENAI_API_KEY=...
14 |
--------------------------------------------------------------------------------
/.eslintrc.cjs:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | extends: [
3 | "eslint:recommended",
4 | "prettier",
5 | "plugin:@typescript-eslint/recommended",
6 | ],
7 | parserOptions: {
8 | ecmaVersion: 12,
9 | parser: "@typescript-eslint/parser",
10 | project: "./tsconfig.json",
11 | sourceType: "module",
12 | },
13 | plugins: ["import", "@typescript-eslint", "no-instanceof"],
14 | ignorePatterns: [
15 | ".eslintrc.cjs",
16 | "scripts",
17 | "node_modules",
18 | "dist",
19 | "dist-cjs",
20 | "*.js",
21 | "*.cjs",
22 | "*.d.ts",
23 | ],
24 | rules: {
25 | "no-process-env": 0,
26 | "no-instanceof/no-instanceof": 2,
27 | "@typescript-eslint/explicit-module-boundary-types": 0,
28 | "@typescript-eslint/no-empty-function": 0,
29 | "@typescript-eslint/no-non-null-assertion": 0,
30 | "@typescript-eslint/no-shadow": 0,
31 | "@typescript-eslint/no-empty-interface": 0,
32 | "@typescript-eslint/no-use-before-define": ["error", "nofunc"],
33 | "@typescript-eslint/no-unused-vars": ["warn", { args: "none" }],
34 | "@typescript-eslint/no-floating-promises": "error",
35 | "@typescript-eslint/no-misused-promises": "error",
36 | camelcase: 0,
37 | "class-methods-use-this": 0,
38 | "import/extensions": [2, "ignorePackages"],
39 | "import/no-extraneous-dependencies": [
40 | "error",
41 | { devDependencies: ["**/*.test.ts"] },
42 | ],
43 | "import/no-unresolved": 0,
44 | "import/prefer-default-export": 0,
45 | "keyword-spacing": "error",
46 | "max-classes-per-file": 0,
47 | "max-len": 0,
48 | "no-await-in-loop": 0,
49 | "no-bitwise": 0,
50 | "no-console": 0,
51 | "no-restricted-syntax": 0,
52 | "no-shadow": 0,
53 | "no-continue": 0,
54 | "no-underscore-dangle": 0,
55 | "no-use-before-define": 0,
56 | "no-useless-constructor": 0,
57 | "no-return-await": 0,
58 | "consistent-return": 0,
59 | "no-else-return": 0,
60 | "new-cap": ["error", { properties: false, capIsNew: false }],
61 | },
62 | };
63 |
--------------------------------------------------------------------------------
/.github/workflows/integration-tests.yml:
--------------------------------------------------------------------------------
1 | # This workflow will run integration tests for the current project once per day
2 |
3 | name: Integration Tests
4 |
5 | on:
6 | schedule:
7 | - cron: "37 14 * * *" # Run at 7:37 AM Pacific Time (14:37 UTC) every day
8 | workflow_dispatch: # Allows triggering the workflow manually in GitHub UI
9 |
10 | # If another scheduled run starts while this workflow is still running,
11 | # cancel the earlier run in favor of the next run.
12 | concurrency:
13 | group: ${{ github.workflow }}-${{ github.ref }}
14 | cancel-in-progress: true
15 |
16 | jobs:
17 | integration-tests:
18 | name: Integration Tests
19 | strategy:
20 | matrix:
21 | os: [ubuntu-latest]
22 | node-version: [20.x]
23 | runs-on: ${{ matrix.os }}
24 | steps:
25 | - uses: actions/checkout@v4
26 | - name: Use Node.js ${{ matrix.node-version }}
27 | uses: actions/setup-node@v3
28 | with:
29 | node-version: ${{ matrix.node-version }}
30 | cache: "yarn"
31 | - name: Install dependencies
32 | run: yarn install --immutable
33 | - name: Build project
34 | run: yarn build
35 | - name: Run integration tests
36 | env:
37 | ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
38 | TAVILY_API_KEY: ${{ secrets.TAVILY_API_KEY }}
39 | run: yarn test:int
40 |
--------------------------------------------------------------------------------
/.github/workflows/unit-tests.yml:
--------------------------------------------------------------------------------
1 | # This workflow will run unit tests for the current project
2 |
3 | name: CI
4 |
5 | on:
6 | push:
7 | branches: ["main"]
8 | pull_request:
9 | workflow_dispatch: # Allows triggering the workflow manually in GitHub UI
10 |
11 | # If another push to the same PR or branch happens while this workflow is still running,
12 | # cancel the earlier run in favor of the next run.
13 | concurrency:
14 | group: ${{ github.workflow }}-${{ github.ref }}
15 | cancel-in-progress: true
16 |
17 | jobs:
18 | unit-tests:
19 | name: Unit Tests
20 | strategy:
21 | matrix:
22 | os: [ubuntu-latest]
23 | node-version: [18.x, 20.x]
24 | runs-on: ${{ matrix.os }}
25 | steps:
26 | - uses: actions/checkout@v4
27 | - name: Use Node.js ${{ matrix.node-version }}
28 | uses: actions/setup-node@v3
29 | with:
30 | node-version: ${{ matrix.node-version }}
31 | cache: "yarn"
32 | - name: Install dependencies
33 | run: yarn install --immutable
34 | - name: Build project
35 | run: yarn build
36 |
37 | - name: Lint project
38 | run: yarn lint:all
39 |
40 | - name: Check README spelling
41 | uses: codespell-project/actions-codespell@v2
42 | with:
43 | ignore_words_file: .codespellignore
44 | path: README.md
45 |
46 | - name: Check code spelling
47 | uses: codespell-project/actions-codespell@v2
48 | with:
49 | ignore_words_file: .codespellignore
50 | path: src/
51 |
52 | - name: Run tests
53 | env:
54 | ANTHROPIC_API_KEY: afakekey
55 | TAVILY_API_KEY: anotherfakekey
56 | run: yarn test
57 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | index.cjs
2 | index.js
3 | index.d.ts
4 | node_modules
5 | dist
6 | .yarn/*
7 | !.yarn/patches
8 | !.yarn/plugins
9 | !.yarn/releases
10 | !.yarn/sdks
11 | !.yarn/versions
12 |
13 | .turbo
14 | **/.turbo
15 | **/.eslintcache
16 |
17 | .env
18 | .ipynb_checkpoints
19 |
20 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 LangChain
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # LangGraph.js ReAct Memory Agent
2 |
3 | [](https://github.com/langchain-ai/memory-agent-js/actions/workflows/unit-tests.yml)
4 | [](https://github.com/langchain-ai/memory-agent-js/actions/workflows/integration-tests.yml)
5 | [](https://langgraph-studio.vercel.app/templates/open?githubUrl=https://github.com/langchain-ai/memory-agent-js)
6 |
7 | This repo provides a simple example of a ReAct-style agent with a tool to save memories, implemented in JavaScript. This is a straightforward way to allow an agent to persist important information for later use. In this implementation, we save all memories scoped to a configurable `userId`, enabling the bot to learn and remember a user's preferences across different conversational threads.
8 |
9 | 
10 |
11 | ## Getting Started
12 |
13 | This quickstart will get your memory service deployed on [LangGraph Cloud](https://langchain-ai.github.io/langgraph/cloud/). Once created, you can interact with it from any API.
14 |
15 | Assuming you have already [installed LangGraph Studio](https://github.com/langchain-ai/langgraph-studio?tab=readme-ov-file#download), to set up:
16 |
17 | 1. Create a `.env` file.
18 |
19 | ```bash
20 | cp .env.example .env
21 | ```
22 |
23 | 2. Define required API keys in your `.env` file.
24 |
25 |
28 |
29 | ### Setup Model
30 |
31 | The defaults values for `model` are shown below:
32 |
33 | ```yaml
34 | model: anthropic/claude-3-5-sonnet-20240620
35 | ```
36 |
37 | Follow the instructions below to get set up, or pick one of the additional options.
38 |
39 | #### Anthropic
40 |
41 | To use Anthropic's chat models:
42 |
43 | 1. Sign up for an [Anthropic API key](https://console.anthropic.com/) if you haven't already.
44 | 2. Once you have your API key, add it to your `.env` file:
45 |
46 | ```
47 | ANTHROPIC_API_KEY=your-api-key
48 | ```
49 |
50 | #### OpenAI
51 |
52 | To use OpenAI's chat models:
53 |
54 | 1. Sign up for an [OpenAI API key](https://platform.openai.com/signup).
55 | 2. Once you have your API key, add it to your `.env` file:
56 |
57 | ```
58 | OPENAI_API_KEY=your-api-key
59 | ```
60 |
61 |
64 |
65 | 3. Open in LangGraph studio. Navigate to the `memory_agent` graph and have a conversation with it! Try sending some messages saying your name and other things the bot should remember.
66 |
67 | Assuming the bot saved some memories, create a _new_ thread using the `+` icon. Then chat with the bot again - if you've completed your setup correctly, the bot should now have access to the memories you've saved!
68 |
69 | You can review the saved memories by clicking the "memory" button.
70 |
71 | 
72 |
73 | ## How it works
74 |
75 | This chat bot reads from your memory graph's `Store` to easily list extracted memories. If it calls a tool, LangGraph will route to the `store_memory` node to save the information to the store.
76 |
77 | ## How to evaluate
78 |
79 | Memory management can be challenging to get right, especially if you add additional tools for the bot to choose between.
80 | To tune the frequency and quality of memories your bot is saving, we recommend starting from an evaluation set, adding to it over time as you find and address common errors in your service.
81 |
82 | We have provided a few example evaluation cases in [the test file here](./tests/agent.int.test.ts). As you can see, the metrics themselves don't have to be terribly complicated, especially not at the outset.
83 |
84 | ## How to customize
85 |
86 | 1. Customize memory content: we've defined a simple memory structure `content: string, context: string` for each memory, but you could structure them in other ways.
87 | 2. Provide additional tools: the bot will be more useful if you connect it to other functions.
88 | 3. Select a different model: We default to anthropic/claude-3-5-sonnet-20240620. You can select a compatible chat model using provider/model-name via configuration. Example: openai/gpt-4.
89 | 4. Customize the prompts: We provide a default prompt in the [prompts.ts](src/memory_agent/prompts.ts) file. You can easily update this via configuration.
90 |
91 |
222 |
--------------------------------------------------------------------------------
/Untitled.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 4,
6 | "id": "09472bfb-2223-4f83-8f98-5a48d29dd89a",
7 | "metadata": {},
8 | "outputs": [
9 | {
10 | "name": "stdout",
11 | "output_type": "stream",
12 | "text": [
13 | "table_name=
columns=[, , ] conditions=[Condition(column='status', operator=, value='fulfilled'), Condition(column='ordered_at', operator=='>, value='2023-05-01'), Condition(column='ordered_at', operator=, value='2023-05-31'), Condition(column='delivered_at', operator='>, value=DynamicValue(column_name='expected_delivery_date'))] order_by=\n"
14 | ]
15 | }
16 | ],
17 | "source": [
18 | "from enum import Enum\n",
19 | "from typing import Union\n",
20 | "\n",
21 | "from langsmith.wrappers import wrap_openai\n",
22 | "import openai\n",
23 | "from openai import OpenAI\n",
24 | "from pydantic import BaseModel\n",
25 | "\n",
26 | "client = OpenAI()\n",
27 | "client = wrap_openai(client)\n",
28 | "\n",
29 | "\n",
30 | "class Table(str, Enum):\n",
31 | " orders = \"orders\"\n",
32 | " customers = \"customers\"\n",
33 | " products = \"products\"\n",
34 | "\n",
35 | "\n",
36 | "class Column(str, Enum):\n",
37 | " id = \"id\"\n",
38 | " status = \"status\"\n",
39 | " expected_delivery_date = \"expected_delivery_date\"\n",
40 | " delivered_at = \"delivered_at\"\n",
41 | " shipped_at = \"shipped_at\"\n",
42 | " ordered_at = \"ordered_at\"\n",
43 | " canceled_at = \"canceled_at\"\n",
44 | "\n",
45 | "\n",
46 | "class Operator(str, Enum):\n",
47 | " eq = \"=\"\n",
48 | " gt = \">\"\n",
49 | " lt = \"<\"\n",
50 | " le = \"<=\"\n",
51 | " ge = \">=\"\n",
52 | " ne = \"!=\"\n",
53 | "\n",
54 | "\n",
55 | "class OrderBy(str, Enum):\n",
56 | " asc = \"asc\"\n",
57 | " desc = \"desc\"\n",
58 | "\n",
59 | "\n",
60 | "class DynamicValue(BaseModel):\n",
61 | " column_name: str\n",
62 | "\n",
63 | "\n",
64 | "class Condition(BaseModel):\n",
65 | " column: str\n",
66 | " operator: Operator\n",
67 | " value: Union[str, int, DynamicValue]\n",
68 | "\n",
69 | "\n",
70 | "class Query(BaseModel):\n",
71 | " table_name: Table\n",
72 | " columns: list[Column]\n",
73 | " conditions: list[Condition]\n",
74 | " order_by: OrderBy\n",
75 | "\n",
76 | "\n",
77 | "completion = client.beta.chat.completions.parse(\n",
78 | " model=\"gpt-4o-2024-08-06\",\n",
79 | " messages=[\n",
80 | " {\n",
81 | " \"role\": \"system\",\n",
82 | " \"content\": \"You are a helpful assistant. The current date is August 6, 2024. You help users query for the data they are looking for by calling the query function.\",\n",
83 | " },\n",
84 | " {\n",
85 | " \"role\": \"user\",\n",
86 | " \"content\": \"look up all my orders in may of last year that were fulfilled but not delivered on time\",\n",
87 | " },\n",
88 | " ],\n",
89 | " tools=[\n",
90 | " openai.pydantic_function_tool(Query),\n",
91 | " ],\n",
92 | ")\n",
93 | "\n",
94 | "print(completion.choices[0].message.tool_calls[0].function.parsed_arguments)"
95 | ]
96 | },
97 | {
98 | "cell_type": "code",
99 | "execution_count": null,
100 | "id": "0d6903df-d580-4988-a412-29acbe5a997d",
101 | "metadata": {},
102 | "outputs": [],
103 | "source": [
104 | "from typing import List\n",
105 | "\n",
106 | "import ell\n",
107 | "\n",
108 | "\n",
109 | "@ell.simple(model=\"gpt-4o-mini\", temperature=1.0, n=10)\n",
110 | "def write_ten_drafts(idea: str):\n",
111 | " \"\"\"You are an adept story writer. The story should only be 3 paragraphs\"\"\"\n",
112 | " return f\"Write a story about {idea}.\"\n",
113 | "\n",
114 | "\n",
115 | "@ell.simple(model=\"gpt-4o\", temperature=0.1)\n",
116 | "def choose_the_best_draft(drafts: List[str]):\n",
117 | " \"\"\"You are an expert fiction editor.\"\"\"\n",
118 | " return f\"Choose the best draft from the following list: {'\\n'.join(drafts)}.\"\n",
119 | "\n",
120 | "\n",
121 | "drafts = write_ten_drafts(idea)\n",
122 | "\n",
123 | "best_draft = choose_the_best_draft(drafts) # Best of 10 sampling."
124 | ]
125 | },
126 | {
127 | "cell_type": "code",
128 | "execution_count": 4,
129 | "id": "edc5e407-5498-417d-8643-b049eef0af94",
130 | "metadata": {},
131 | "outputs": [],
132 | "source": [
133 | "# %pip install -U langchain langchain-openai"
134 | ]
135 | },
136 | {
137 | "cell_type": "code",
138 | "execution_count": 34,
139 | "id": "c6791557-dfdd-489c-a8c7-7f1884cd9d2e",
140 | "metadata": {},
141 | "outputs": [],
142 | "source": [
143 | "def simple(**dec_kwargs):\n",
144 | " def decorator(fn):\n",
145 | " model, msgs = (\n",
146 | " init_chat_model(**dec_kwargs),\n",
147 | " [(\"system\", getattr(fn, \"__doc__\"))] if getattr(fn, \"__doc__\") else [],\n",
148 | " )\n",
149 | "\n",
150 | " def call(*args, **kwargs):\n",
151 | " return model.invoke([*msgs, (\"user\", str(fn(*args, **kwargs)))]).content\n",
152 | "\n",
153 | " return call\n",
154 | "\n",
155 | " return decorator"
156 | ]
157 | },
158 | {
159 | "cell_type": "code",
160 | "execution_count": null,
161 | "id": "eb20961e-1715-4d80-a231-abcd61dccece",
162 | "metadata": {},
163 | "outputs": [],
164 | "source": [
165 | "@simple(model=\"gpt-4o-mini\", temperature=1.0, n=10)\n",
166 | "def write_ten_drafts(idea: str):\n",
167 | " \"\"\"You are an adept story writer. The story should only be 3 paragraphs\"\"\"\n",
168 | " return f\"Write a story about {idea}.\"\n",
169 | "\n",
170 | "\n",
171 | "@simple(model=\"gpt-4o-mini\")\n",
172 | "def choose_the_best_draft(drafts: list[str]):\n",
173 | " \"\"\"You are an expert fiction editor.\"\"\"\n",
174 | " return f\"Choose the best draft from the following list: {drafts}.\""
175 | ]
176 | },
177 | {
178 | "cell_type": "code",
179 | "execution_count": 30,
180 | "id": "3cb3f496-4054-4081-b7ca-039f80b34557",
181 | "metadata": {},
182 | "outputs": [],
183 | "source": [
184 | "import inspect\n",
185 | "\n",
186 | "from langchain.chat_models import init_chat_model\n",
187 | "from langgraph.prebuilt import create_react_agent\n",
188 | "\n",
189 | "# def _get_inputs(signature, *args, **kwargs):\n",
190 | "# \"\"\"Return a dictionary of inputs from the function signature.\"\"\"\n",
191 | "# bound = signature.bind_partial(*args, **kwargs)\n",
192 | "# bound.apply_defaults()\n",
193 | "# arguments = dict(bound.arguments)\n",
194 | "# arguments.pop(\"self\", None)\n",
195 | "# arguments.pop(\"cls\", None)\n",
196 | "# for param_name, param in signature.parameters.items():\n",
197 | "# if param.kind == inspect.Parameter.VAR_KEYWORD:\n",
198 | "# # Update with the **kwargs, and remove the original entry\n",
199 | "# # This is to help flatten out keyword arguments\n",
200 | "# if param_name in arguments:\n",
201 | "# arguments.update(arguments[param_name])\n",
202 | "# arguments.pop(param_name)\n",
203 | "\n",
204 | "# return arguments\n",
205 | "\n",
206 | "\n",
207 | "# def _get_inputs_safe(signature, *args, **kwargs):\n",
208 | "# try:\n",
209 | "# return _get_inputs(signature, *args, **kwargs)\n",
210 | "# except BaseException as e:\n",
211 | "# print(e)\n",
212 | "# return {\"args\": args, \"kwargs\": kwargs}\n",
213 | "\n",
214 | "\n",
215 | "def simple(**dec_kwargs):\n",
216 | " def decorator(fn):\n",
217 | " sysprompt = getattr(fn, \"__doc__\", \"\")\n",
218 | " # sig = inspect.signature(fn)\n",
219 | " model = init_chat_model(**dec_kwargs)\n",
220 | " # agent = create_react_agent(model, [fn])\n",
221 | "\n",
222 | " def call(*args, **kwargs):\n",
223 | " # agent_args = _get_inputs_safe(sig, *args, **kwargs)\n",
224 | " resp = fn(*args, **kwargs)\n",
225 | " return model.invoke([(\"system\", sysprompt), (\"user\", str(resp))]).content\n",
226 | "\n",
227 | " return call\n",
228 | "\n",
229 | " return decorator"
230 | ]
231 | },
232 | {
233 | "cell_type": "code",
234 | "execution_count": 31,
235 | "id": "24ef9043-ff1a-4454-9e6d-c86a2655e2be",
236 | "metadata": {},
237 | "outputs": [],
238 | "source": []
239 | },
240 | {
241 | "cell_type": "code",
242 | "execution_count": 32,
243 | "id": "32a36963-4cd0-476f-903a-064cea9330eb",
244 | "metadata": {},
245 | "outputs": [],
246 | "source": []
247 | },
248 | {
249 | "cell_type": "code",
250 | "execution_count": 33,
251 | "id": "bed51da2-a920-4971-bbae-d84ce99cf884",
252 | "metadata": {},
253 | "outputs": [
254 | {
255 | "data": {
256 | "text/plain": [
257 | "'The phrase \"once upon a time\" is the better draft. It evokes a sense of storytelling and invites the reader into a narrative, while \"I like pie\" is more of a simple statement without much context or depth. \"Once upon a time\" has the potential to lead into a rich and engaging story.'"
258 | ]
259 | },
260 | "execution_count": 33,
261 | "metadata": {},
262 | "output_type": "execute_result"
263 | }
264 | ],
265 | "source": []
266 | },
267 | {
268 | "cell_type": "code",
269 | "execution_count": null,
270 | "id": "56237a71-3446-43d5-bb2e-6cbc0ddf7364",
271 | "metadata": {},
272 | "outputs": [],
273 | "source": []
274 | }
275 | ],
276 | "metadata": {
277 | "kernelspec": {
278 | "display_name": "Python 3 (ipykernel)",
279 | "language": "python",
280 | "name": "python3"
281 | },
282 | "language_info": {
283 | "codemirror_mode": {
284 | "name": "ipython",
285 | "version": 3
286 | },
287 | "file_extension": ".py",
288 | "mimetype": "text/x-python",
289 | "name": "python",
290 | "nbconvert_exporter": "python",
291 | "pygments_lexer": "ipython3",
292 | "version": "3.11.2"
293 | }
294 | },
295 | "nbformat": 4,
296 | "nbformat_minor": 5
297 | }
298 |
--------------------------------------------------------------------------------
/jest.config.js:
--------------------------------------------------------------------------------
1 | export default {
2 | preset: "ts-jest/presets/default-esm",
3 | moduleNameMapper: {
4 | "^(\\.{1,2}/.*)\\.js$": "$1",
5 | },
6 | transform: {
7 | "^.+\\.tsx?$": [
8 | "ts-jest",
9 | {
10 | useESM: true,
11 | },
12 | ],
13 | },
14 | extensionsToTreatAsEsm: [".ts"],
15 | setupFiles: ["dotenv/config"],
16 | passWithNoTests: true,
17 | testTimeout: 20_000,
18 | };
19 |
--------------------------------------------------------------------------------
/langgraph.json:
--------------------------------------------------------------------------------
1 | {
2 | "node_version": "20",
3 | "dockerfile_lines": [],
4 | "dependencies": ["."],
5 | "graphs": {
6 | "agent": "./src/memory_agent/graph.ts:graph"
7 | },
8 | "env": ".env"
9 | }
10 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "memory-agent",
3 | "version": "0.0.1",
4 | "description": "A ReAct-style agent with a tool to store memories.",
5 | "main": "src/chatbot/index.ts",
6 | "author": "Brace Sproul",
7 | "license": "MIT",
8 | "private": true,
9 | "type": "module",
10 | "packageManager": "yarn@1.22.22",
11 | "scripts": {
12 | "build": "tsc",
13 | "clean": "rm -rf dist",
14 | "test": "node --experimental-vm-modules node_modules/jest/bin/jest.js --testPathPattern=\\.test\\.ts$ --testPathIgnorePatterns=\\.int\\.test\\.ts$",
15 | "test:int": "node --experimental-vm-modules node_modules/jest/bin/jest.js --testPathPattern=\\.int\\.test\\.ts$",
16 | "format": "prettier --write .",
17 | "lint": "eslint src",
18 | "format:check": "prettier --check .",
19 | "lint:langgraph-json": "node scripts/checkLanggraphPaths.js",
20 | "lint:all": "yarn lint & yarn lint:langgraph-json & yarn format:check",
21 | "test:all": "yarn test && yarn test:int && yarn lint:langgraph"
22 | },
23 | "dependencies": {
24 | "@langchain/anthropic": "^0.3.12",
25 | "@langchain/aws": "^0.1.3",
26 | "@langchain/cohere": "^0.3.2",
27 | "@langchain/community": "^0.3.27",
28 | "@langchain/core": "^0.3.37",
29 | "@langchain/google-genai": "^0.1.7",
30 | "@langchain/google-vertexai": "^0.1.8",
31 | "@langchain/groq": "^0.1.3",
32 | "@langchain/langgraph": "^0.2.43",
33 | "@langchain/langgraph-sdk": "^0.0.36",
34 | "@langchain/mistralai": "^0.2.0",
35 | "@langchain/ollama": "^0.1.5",
36 | "@langchain/openai": "^0.4.2",
37 | "langchain": "^0.3.14",
38 | "langsmith": "^0.3.3",
39 | "ts-node": "^10.9.2",
40 | "uuid": "^10.0.0",
41 | "zod": "^3.23.8"
42 | },
43 | "devDependencies": {
44 | "@eslint/eslintrc": "^3.1.0",
45 | "@eslint/js": "^9.9.1",
46 | "@jest/globals": "^29.7.0",
47 | "@tsconfig/recommended": "^1.0.7",
48 | "@types/jest": "^29.5.0",
49 | "@types/node": "^20.14.8",
50 | "@typescript-eslint/eslint-plugin": "^5.59.8",
51 | "@typescript-eslint/parser": "^5.59.8",
52 | "dotenv": "^16.4.5",
53 | "eslint": "^8.41.0",
54 | "eslint-config-prettier": "^8.8.0",
55 | "eslint-plugin-import": "^2.27.5",
56 | "eslint-plugin-no-instanceof": "^1.0.1",
57 | "eslint-plugin-prettier": "^4.2.1",
58 | "jest": "^29.7.0",
59 | "prettier": "^3.3.3",
60 | "ts-jest": "^29.1.0",
61 | "typescript": "^5.3.3"
62 | }
63 | }
64 |
--------------------------------------------------------------------------------
/scripts/checkLanggraphPaths.js:
--------------------------------------------------------------------------------
1 | import fs from "fs";
2 | import path from "path";
3 | import { fileURLToPath } from "url";
4 |
5 | // Function to check if a file exists
6 | function fileExists(filePath) {
7 | return fs.existsSync(filePath);
8 | }
9 |
10 | // Function to check if an object is exported from a file
11 | function isObjectExported(filePath, objectName) {
12 | try {
13 | const fileContent = fs.readFileSync(filePath, "utf8");
14 | const exportRegex = new RegExp(
15 | `export\\s+(?:const|let|var)\\s+${objectName}\\s*=|export\\s+\\{[^}]*\\b${objectName}\\b[^}]*\\}`,
16 | );
17 | return exportRegex.test(fileContent);
18 | } catch (error) {
19 | console.error(`Error reading file ${filePath}: ${error.message}`);
20 | return false;
21 | }
22 | }
23 |
24 | // Main function to check langgraph.json
25 | function checkLanggraphPaths() {
26 | const __filename = fileURLToPath(import.meta.url);
27 | const __dirname = path.dirname(__filename);
28 | const langgraphPath = path.join(__dirname, "..", "langgraph.json");
29 |
30 | if (!fileExists(langgraphPath)) {
31 | console.error("langgraph.json not found in the root directory");
32 | process.exit(1);
33 | }
34 |
35 | try {
36 | const langgraphContent = JSON.parse(fs.readFileSync(langgraphPath, "utf8"));
37 | const graphs = langgraphContent.graphs;
38 |
39 | if (!graphs || typeof graphs !== "object") {
40 | console.error('Invalid or missing "graphs" object in langgraph.json');
41 | process.exit(1);
42 | }
43 |
44 | let hasError = false;
45 |
46 | for (const [key, value] of Object.entries(graphs)) {
47 | const [filePath, objectName] = value.split(":");
48 | const fullPath = path.join(__dirname, "..", filePath);
49 |
50 | if (!fileExists(fullPath)) {
51 | console.error(`File not found: ${fullPath}`);
52 | hasError = true;
53 | continue;
54 | }
55 |
56 | if (!isObjectExported(fullPath, objectName)) {
57 | console.error(
58 | `Object "${objectName}" is not exported from ${fullPath}`,
59 | );
60 | hasError = true;
61 | }
62 | }
63 |
64 | if (hasError) {
65 | process.exit(1);
66 | } else {
67 | console.log(
68 | "All paths in langgraph.json are valid and objects are exported correctly.",
69 | );
70 | }
71 | } catch (error) {
72 | console.error(`Error parsing langgraph.json: ${error.message}`);
73 | process.exit(1);
74 | }
75 | }
76 |
77 | checkLanggraphPaths();
78 |
--------------------------------------------------------------------------------
/src/memory_agent/configuration.ts:
--------------------------------------------------------------------------------
1 | // Define the configurable parameters for the agent
2 |
3 | import { Annotation, LangGraphRunnableConfig } from "@langchain/langgraph";
4 | import { SYSTEM_PROMPT } from "./prompts.js";
5 |
6 | export const ConfigurationAnnotation = Annotation.Root({
7 | userId: Annotation(),
8 | model: Annotation(),
9 | systemPrompt: Annotation(),
10 | });
11 |
12 | export type Configuration = typeof ConfigurationAnnotation.State;
13 |
14 | export function ensureConfiguration(config?: LangGraphRunnableConfig) {
15 | const configurable = config?.configurable || {};
16 | return {
17 | userId: configurable?.userId || "default",
18 | model: configurable?.model || "anthropic/claude-3-5-sonnet-20240620",
19 | systemPrompt: configurable?.systemPrompt || SYSTEM_PROMPT,
20 | };
21 | }
22 |
--------------------------------------------------------------------------------
/src/memory_agent/graph.ts:
--------------------------------------------------------------------------------
1 | // Main graph
2 | import {
3 | LangGraphRunnableConfig,
4 | START,
5 | StateGraph,
6 | END,
7 | } from "@langchain/langgraph";
8 | import { BaseMessage, AIMessage } from "@langchain/core/messages";
9 | import { initChatModel } from "langchain/chat_models/universal";
10 | import { initializeTools } from "./tools.js";
11 | import {
12 | ConfigurationAnnotation,
13 | ensureConfiguration,
14 | } from "./configuration.js";
15 | import { GraphAnnotation } from "./state.js";
16 | import { getStoreFromConfigOrThrow, splitModelAndProvider } from "./utils.js";
17 |
18 | const llm = await initChatModel();
19 |
20 | async function callModel(
21 | state: typeof GraphAnnotation.State,
22 | config: LangGraphRunnableConfig,
23 | ): Promise<{ messages: BaseMessage[] }> {
24 | const store = getStoreFromConfigOrThrow(config);
25 | const configurable = ensureConfiguration(config);
26 | const memories = await store.search(["memories", configurable.userId], {
27 | limit: 10,
28 | });
29 |
30 | let formatted =
31 | memories
32 | ?.map((mem) => `[${mem.key}]: ${JSON.stringify(mem.value)}`)
33 | ?.join("\n") || "";
34 | if (formatted) {
35 | formatted = `\n\n${formatted}\n`;
36 | }
37 |
38 | const sys = configurable.systemPrompt
39 | .replace("{user_info}", formatted)
40 | .replace("{time}", new Date().toISOString());
41 |
42 | const tools = initializeTools(config);
43 | const boundLLM = llm.bind({
44 | tools: tools,
45 | tool_choice: "auto",
46 | });
47 |
48 | const result = await boundLLM.invoke(
49 | [{ role: "system", content: sys }, ...state.messages],
50 | {
51 | configurable: splitModelAndProvider(configurable.model),
52 | },
53 | );
54 |
55 | return { messages: [result] };
56 | }
57 |
58 | async function storeMemory(
59 | state: typeof GraphAnnotation.State,
60 | config: LangGraphRunnableConfig,
61 | ): Promise<{ messages: BaseMessage[] }> {
62 | const lastMessage = state.messages[state.messages.length - 1] as AIMessage;
63 | const toolCalls = lastMessage.tool_calls || [];
64 |
65 | const tools = initializeTools(config);
66 | const upsertMemoryTool = tools[0];
67 |
68 | const savedMemories = await Promise.all(
69 | toolCalls.map(async (tc) => {
70 | return await upsertMemoryTool.invoke(tc);
71 | }),
72 | );
73 |
74 | return { messages: savedMemories };
75 | }
76 |
77 | function routeMessage(
78 | state: typeof GraphAnnotation.State,
79 | ): "store_memory" | typeof END {
80 | const lastMessage = state.messages[state.messages.length - 1] as AIMessage;
81 | if (lastMessage.tool_calls?.length) {
82 | return "store_memory";
83 | }
84 | return END;
85 | }
86 |
87 | // Create the graph + all nodes
88 | export const builder = new StateGraph(
89 | {
90 | stateSchema: GraphAnnotation,
91 | },
92 | ConfigurationAnnotation,
93 | )
94 | .addNode("call_model", callModel)
95 | .addNode("store_memory", storeMemory)
96 | .addEdge(START, "call_model")
97 | .addConditionalEdges("call_model", routeMessage, {
98 | store_memory: "store_memory",
99 | [END]: END,
100 | })
101 | .addEdge("store_memory", "call_model");
102 |
103 | export const graph = builder.compile();
104 | graph.name = "MemoryAgent";
105 |
--------------------------------------------------------------------------------
/src/memory_agent/prompts.ts:
--------------------------------------------------------------------------------
1 | // Define default prompts
2 |
3 | export const SYSTEM_PROMPT = `You are a helpful and friendly chatbot. Get to know the user! \
4 | Ask questions! Be spontaneous!
5 | {user_info}
6 |
7 | System Time: {time}`;
8 |
--------------------------------------------------------------------------------
/src/memory_agent/state.ts:
--------------------------------------------------------------------------------
1 | import { BaseMessage } from "@langchain/core/messages";
2 | import {
3 | Annotation,
4 | Messages,
5 | messagesStateReducer,
6 | } from "@langchain/langgraph";
7 |
8 | /**
9 | * Main graph state.
10 | */
11 | export const GraphAnnotation = Annotation.Root({
12 | /**
13 | * The messages in the conversation.
14 | */
15 | messages: Annotation({
16 | reducer: messagesStateReducer,
17 | default: () => [],
18 | }),
19 | });
20 |
--------------------------------------------------------------------------------
/src/memory_agent/tools.ts:
--------------------------------------------------------------------------------
1 | import { LangGraphRunnableConfig } from "@langchain/langgraph";
2 | import { ensureConfiguration } from "./configuration.js";
3 | import { v4 as uuidv4 } from "uuid";
4 | import { tool } from "@langchain/core/tools";
5 | import { z } from "zod";
6 | import { getStoreFromConfigOrThrow } from "./utils.js";
7 |
8 | /**
9 | * Initialize tools within a function so that they have access to the current
10 | * state and config at runtime.
11 | */
12 | export function initializeTools(config?: LangGraphRunnableConfig) {
13 | /**
14 | * Upsert a memory in the database.
15 | * @param content The main content of the memory.
16 | * @param context Additional context for the memory.
17 | * @param memoryId Optional ID to overwrite an existing memory.
18 | * @returns A string confirming the memory storage.
19 | */
20 | async function upsertMemory(opts: {
21 | content: string;
22 | context: string;
23 | memoryId?: string;
24 | }): Promise {
25 | const { content, context, memoryId } = opts;
26 | if (!config || !config.store) {
27 | throw new Error("Config or store not provided");
28 | }
29 |
30 | const configurable = ensureConfiguration(config);
31 | const memId = memoryId || uuidv4();
32 | const store = getStoreFromConfigOrThrow(config);
33 |
34 | await store.put(["memories", configurable.userId], memId, {
35 | content,
36 | context,
37 | });
38 |
39 | return `Stored memory ${memId}`;
40 | }
41 |
42 | const upsertMemoryTool = tool(upsertMemory, {
43 | name: "upsertMemory",
44 | description:
45 | "Upsert a memory in the database. If a memory conflicts with an existing one, \
46 | update the existing one by passing in the memory_id instead of creating a duplicate. \
47 | If the user corrects a memory, update it. Can call multiple times in parallel \
48 | if you need to store or update multiple memories.",
49 | schema: z.object({
50 | content: z.string().describe(
51 | "The main content of the memory. For example: \
52 | 'User expressed interest in learning about French.'",
53 | ),
54 | context: z.string().describe(
55 | "Additional context for the memory. For example: \
56 | 'This was mentioned while discussing career options in Europe.'",
57 | ),
58 | memoryId: z
59 | .string()
60 | .optional()
61 | .describe(
62 | "The memory ID to overwrite. Only provide if updating an existing memory.",
63 | ),
64 | }),
65 | });
66 |
67 | return [upsertMemoryTool];
68 | }
69 |
--------------------------------------------------------------------------------
/src/memory_agent/utils.ts:
--------------------------------------------------------------------------------
1 | import { BaseStore, LangGraphRunnableConfig } from "@langchain/langgraph";
2 | /**
3 | * Get the store from the configuration or throw an error.
4 | */
5 | export function getStoreFromConfigOrThrow(
6 | config: LangGraphRunnableConfig,
7 | ): BaseStore {
8 | if (!config.store) {
9 | throw new Error("Store not found in configuration");
10 | }
11 |
12 | return config.store;
13 | }
14 |
15 | /**
16 | * Split the fully specified model name into model and provider.
17 | */
18 | export function splitModelAndProvider(fullySpecifiedName: string): {
19 | model: string;
20 | provider?: string;
21 | } {
22 | let provider: string | undefined;
23 | let model: string;
24 |
25 | if (fullySpecifiedName.includes("/")) {
26 | [provider, model] = fullySpecifiedName.split("/", 2);
27 | } else {
28 | model = fullySpecifiedName;
29 | }
30 |
31 | return { model, provider };
32 | }
33 |
--------------------------------------------------------------------------------
/static/memories.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/langchain-ai/memory-agent-js/0962e2d2bce4e7a7a38ac543c6d5056cd2a740e1/static/memories.png
--------------------------------------------------------------------------------
/static/memory_graph.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/langchain-ai/memory-agent-js/0962e2d2bce4e7a7a38ac543c6d5056cd2a740e1/static/memory_graph.png
--------------------------------------------------------------------------------
/tests/agent.int.test.ts:
--------------------------------------------------------------------------------
1 | import { describe, it, expect } from "@jest/globals";
2 | import { MemorySaver, InMemoryStore } from "@langchain/langgraph";
3 | import { builder } from "../src/memory_agent/graph.js";
4 |
5 | describe("Memory Graph", () => {
6 | const conversations = [
7 | ["My name is Alice and I love pizza. Remember this."],
8 | [
9 | "Hi, I'm Bob and I enjoy playing tennis. Remember this.",
10 | "Yes, I also have a pet dog named Max.",
11 | "Max is a golden retriever and he's 5 years old. Please remember this too.",
12 | ],
13 | [
14 | "Hello, I'm Charlie. I work as a software engineer and I'm passionate about AI. Remember this.",
15 | "I specialize in machine learning algorithms and I'm currently working on a project involving natural language processing.",
16 | "My main goal is to improve sentiment analysis accuracy in multi-lingual texts. It's challenging but exciting.",
17 | "We've made some progress using transformer models, but we're still working on handling context and idioms across languages.",
18 | "Chinese and English have been the most challenging pair so far due to their vast differences in structure and cultural contexts.",
19 | ],
20 | ];
21 |
22 | it.each(
23 | conversations.map((conversation, index) => [
24 | ["short", "medium", "long"][index],
25 | conversation,
26 | ]),
27 | )(
28 | "should store memories for %s conversation",
29 | async (_, conversation) => {
30 | const memStore = new InMemoryStore();
31 | const graph = builder.compile({
32 | store: memStore,
33 | checkpointer: new MemorySaver(),
34 | });
35 | const userId = "test-user";
36 | for (const content of conversation) {
37 | await graph.invoke(
38 | {
39 | messages: [
40 | { role: "user", content: [{ type: "text", text: content }] },
41 | ],
42 | },
43 | {
44 | configurable: {
45 | userId,
46 | thread_id: "thread",
47 | model: "gpt-4o-mini",
48 | systemPrompt: "You are a helpful assistant.",
49 | },
50 | },
51 | );
52 | }
53 |
54 | const namespace = ["memories", userId];
55 | const memories = await memStore.search(namespace);
56 | expect(memories.length).toBeGreaterThan(0);
57 |
58 | const badNamespace = ["memories", "wrong-user"];
59 | const badMemories = await memStore.search(badNamespace);
60 | expect(badMemories.length).toBe(0);
61 | },
62 | 30000,
63 | );
64 | });
65 |
--------------------------------------------------------------------------------
/tests/agent.test.ts:
--------------------------------------------------------------------------------
1 | import { describe, it, expect } from "@jest/globals";
2 | import { graph } from "../src/memory_agent/graph.js";
3 |
4 | describe("Memory Graph", () => {
5 | it("should initialize and compile the graph", () => {
6 | expect(graph).toBeDefined();
7 | expect(graph.name).toBe("MemoryAgent");
8 | });
9 |
10 | // TODO: Add more test cases for individual nodes, routing logic, tool integration, and output validation
11 | });
12 |
--------------------------------------------------------------------------------
/tests/configuration.test.ts:
--------------------------------------------------------------------------------
1 | import { describe, it, expect } from "@jest/globals";
2 | import { ensureConfiguration } from "../src/memory_agent/configuration.js";
3 |
4 | describe("Configuration", () => {
5 | it("should initialize configuration from an empty object", () => {
6 | const emptyConfig = {};
7 | const result = ensureConfiguration(emptyConfig);
8 | expect(result).toBeDefined();
9 | expect(typeof result).toBe("object");
10 | });
11 | });
12 |
--------------------------------------------------------------------------------
/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "@tsconfig/recommended",
3 | "compilerOptions": {
4 | "target": "ES2021",
5 | "lib": ["ES2021", "ES2022.Object", "DOM"],
6 | "module": "NodeNext",
7 | "moduleResolution": "nodenext",
8 | "esModuleInterop": true,
9 | "declaration": true,
10 | "noImplicitReturns": true,
11 | "noFallthroughCasesInSwitch": true,
12 | "noUnusedLocals": true,
13 | "noUnusedParameters": true,
14 | "useDefineForClassFields": true,
15 | "strictPropertyInitialization": false,
16 | "allowJs": true,
17 | "strict": true,
18 | "outDir": "dist",
19 | "types": ["jest", "node"],
20 | "resolveJsonModule": true
21 | },
22 | "include": ["**/*.ts", "**/*.js"],
23 | "exclude": ["node_modules", "dist"]
24 | }
25 |
--------------------------------------------------------------------------------