├── example_agent ├── __init__.py ├── utils │ ├── __init__.py │ ├── ex_state.py │ ├── ex_semantic_cache.py │ ├── ex_router.py │ ├── ex_tools.py │ ├── ex_vector_store.py │ └── ex_nodes.py ├── ex_app.py └── ex_graph.py ├── participant_agent ├── __init__.py ├── utils │ ├── __init__.py │ ├── semantic_cache.py │ ├── router.py │ ├── state.py │ ├── tools.py │ ├── vector_store.py │ └── nodes.py ├── app.py └── graph.py ├── .gitignore ├── .DS_Store ├── images ├── RAG.png ├── fail.png ├── cache_db.png ├── made_it.png ├── success.png ├── multi_graph.png ├── router_db.png ├── architecture.png ├── cache_diagram.png ├── retrieval_db.png ├── router_diagram.png └── redis-logo.svg ├── slides ├── PyData Oregon Trail Workshop.pdf └── Developer-Workshop_Building-AI-Agents-with-LangGraph-and-Redis.pdf ├── dot.env ├── requirements.txt ├── langgraph.json ├── game_play_interface.py ├── test_setup.py ├── Ollama.md ├── LICENSE ├── sandbox.ipynb ├── questions.json ├── oregon_trail.py ├── test_participant_oregon_trail.py ├── test_example_oregon_trail.py └── Readme.md /example_agent/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /example_agent/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /participant_agent/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /participant_agent/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .env 2 | venv 3 | __pycache__ 4 | __pycache__/* 5 | .python-version -------------------------------------------------------------------------------- /.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redis-developer/agents-redis-lang-graph-workshop/main/.DS_Store -------------------------------------------------------------------------------- /images/RAG.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redis-developer/agents-redis-lang-graph-workshop/main/images/RAG.png -------------------------------------------------------------------------------- /images/fail.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redis-developer/agents-redis-lang-graph-workshop/main/images/fail.png -------------------------------------------------------------------------------- /images/cache_db.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redis-developer/agents-redis-lang-graph-workshop/main/images/cache_db.png -------------------------------------------------------------------------------- /images/made_it.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redis-developer/agents-redis-lang-graph-workshop/main/images/made_it.png -------------------------------------------------------------------------------- /images/success.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redis-developer/agents-redis-lang-graph-workshop/main/images/success.png -------------------------------------------------------------------------------- /images/multi_graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redis-developer/agents-redis-lang-graph-workshop/main/images/multi_graph.png -------------------------------------------------------------------------------- /images/router_db.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redis-developer/agents-redis-lang-graph-workshop/main/images/router_db.png -------------------------------------------------------------------------------- /images/architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redis-developer/agents-redis-lang-graph-workshop/main/images/architecture.png -------------------------------------------------------------------------------- /images/cache_diagram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redis-developer/agents-redis-lang-graph-workshop/main/images/cache_diagram.png -------------------------------------------------------------------------------- /images/retrieval_db.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redis-developer/agents-redis-lang-graph-workshop/main/images/retrieval_db.png -------------------------------------------------------------------------------- /images/router_diagram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redis-developer/agents-redis-lang-graph-workshop/main/images/router_diagram.png -------------------------------------------------------------------------------- /slides/PyData Oregon Trail Workshop.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redis-developer/agents-redis-lang-graph-workshop/main/slides/PyData Oregon Trail Workshop.pdf -------------------------------------------------------------------------------- /dot.env: -------------------------------------------------------------------------------- 1 | REDIS_URL="redis://localhost:6379/0" 2 | OPENAI_API_KEY=openai_key 3 | LANGCHAIN_TRACING_V2= 4 | LANGCHAIN_ENDPOINT= 5 | LANGCHAIN_API_KEY= 6 | LANGCHAIN_PROJECT= 7 | MODEL_NAME=openai -------------------------------------------------------------------------------- /slides/Developer-Workshop_Building-AI-Agents-with-LangGraph-and-Redis.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redis-developer/agents-redis-lang-graph-workshop/main/slides/Developer-Workshop_Building-AI-Agents-with-LangGraph-and-Redis.pdf -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | langgraph==0.2.56 2 | langchain==0.3.13 3 | langchain-openai==0.2.3 4 | langchain-ollama==0.2.3 5 | langchain-redis==0.1.1 6 | pydantic==2.9.2 7 | python-dotenv==1.0.1 8 | sentence-transformers==2.7.0 9 | pytest==8.3.4 10 | redis==5.2.1 11 | -------------------------------------------------------------------------------- /langgraph.json: -------------------------------------------------------------------------------- 1 | { 2 | "dependencies": [ 3 | "./participant_agent", 4 | "./example_agent" 5 | ], 6 | "graphs": { 7 | "participant_agent": "./participant_agent/graph.py:graph", 8 | "example_agent": "./example_agent/graph.py:graph" 9 | }, 10 | "env": ".env" 11 | } -------------------------------------------------------------------------------- /game_play_interface.py: -------------------------------------------------------------------------------- 1 | from abc import ABC 2 | 3 | 4 | class GamePlayInterface(ABC): 5 | @property 6 | def router(self): 7 | """Return the router instance.""" 8 | pass 9 | 10 | @property 11 | def semantic_cache(self): 12 | """Return the semantic cache instance.""" 13 | pass 14 | 15 | @property 16 | def graph(self): 17 | """Return the graph instance.""" 18 | pass 19 | -------------------------------------------------------------------------------- /participant_agent/utils/semantic_cache.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from dotenv import load_dotenv 4 | from redisvl.extensions.llmcache import SemanticCache 5 | 6 | load_dotenv() 7 | 8 | REDIS_URL = os.environ.get("REDIS_URL", "redis://localhost:6379/0") 9 | 10 | # Semantic cache 11 | hunting_example = "There's a deer. You're starving. You know what you have to do..." 12 | 13 | # TODO: implement semantic cache 14 | semantic_cache = None 15 | 16 | # TODO store appropriate values in cache 17 | # semantic_cache.store() 18 | -------------------------------------------------------------------------------- /example_agent/utils/ex_state.py: -------------------------------------------------------------------------------- 1 | from typing import Literal 2 | 3 | from langgraph.graph import MessagesState 4 | from pydantic import BaseModel, Field 5 | 6 | 7 | class MultipleChoiceResponse(BaseModel): 8 | multiple_choice_response: Literal["A", "B", "C", "D"] = Field( 9 | description="Single character response to the question for multiple choice questions. Must be either A, B, C, or D." 10 | ) 11 | 12 | 13 | class AgentState(MessagesState): 14 | multi_choice_response: MultipleChoiceResponse 15 | -------------------------------------------------------------------------------- /example_agent/utils/ex_semantic_cache.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from dotenv import load_dotenv 4 | from redisvl.extensions.llmcache import SemanticCache 5 | 6 | load_dotenv() 7 | 8 | REDIS_URL = os.environ.get("REDIS_URL", "redis://localhost:6379/0") 9 | 10 | # Semantic cache 11 | hunting_example = "There's a deer. You're starving. You know what you have to do..." 12 | 13 | semantic_cache = SemanticCache( 14 | name="oregon_trail_cache", 15 | redis_url=REDIS_URL, 16 | distance_threshold=0.1, 17 | ) 18 | 19 | semantic_cache.store(prompt=hunting_example, response="bang") 20 | -------------------------------------------------------------------------------- /example_agent/ex_app.py: -------------------------------------------------------------------------------- 1 | from example_agent.utils.ex_router import router 2 | from example_agent.utils.ex_semantic_cache import semantic_cache 3 | from game_play_interface import GamePlayInterface 4 | 5 | from .ex_graph import graph 6 | 7 | 8 | class ExampleApp(GamePlayInterface): 9 | def __init__(self): 10 | self._router = router 11 | self._semantic_cache = semantic_cache 12 | self._graph = graph 13 | 14 | def graph(self): 15 | return self._graph 16 | 17 | def semantic_cache(self): 18 | return self._semantic_cache 19 | 20 | def router(self): 21 | return self._router 22 | -------------------------------------------------------------------------------- /participant_agent/utils/router.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from dotenv import load_dotenv 4 | from redisvl.extensions.router import Route, SemanticRouter 5 | from redisvl.utils.vectorize import HFTextVectorizer 6 | 7 | load_dotenv() 8 | 9 | REDIS_URL = os.environ.get("REDIS_URL", "redis://host.docker.internal:6379/0") 10 | 11 | # Semantic router 12 | blocked_references = [ 13 | "things about aliens", 14 | "corporate questions about agile", 15 | "anything about the S&P 500", 16 | ] 17 | 18 | # TODO: implement route to blocked traffic 19 | blocked_route = None 20 | 21 | # TODO: implement allow/block router 22 | router = None 23 | -------------------------------------------------------------------------------- /participant_agent/app.py: -------------------------------------------------------------------------------- 1 | from game_play_interface import GamePlayInterface 2 | from participant_agent.utils.router import router 3 | from participant_agent.utils.semantic_cache import semantic_cache 4 | 5 | from .graph import graph 6 | 7 | 8 | class ParticipantApp(GamePlayInterface): 9 | def __init__(self): 10 | self._router = router 11 | self._semantic_cache = semantic_cache 12 | self._graph = graph 13 | 14 | def graph(self): 15 | return self._graph 16 | 17 | def semantic_cache(self): 18 | return self._semantic_cache 19 | 20 | def router(self): 21 | return self._router 22 | -------------------------------------------------------------------------------- /participant_agent/utils/state.py: -------------------------------------------------------------------------------- 1 | from typing import Literal 2 | 3 | from langgraph.graph import MessagesState 4 | from pydantic import BaseModel, Field 5 | 6 | 7 | class MultipleChoiceResponse(BaseModel): 8 | multiple_choice_response: Literal["A", "B", "C", "D"] = Field( 9 | description="Single character response to the question for multiple choice questions. Must be either A, B, C, or D." 10 | ) 11 | 12 | 13 | # For more detailed agent interactions and output this can be modified beyond the base MessageState 14 | class AgentState(MessagesState): 15 | # TODO: uncomment for structured output 16 | # multi_choice_response: MultipleChoiceResponse 17 | pass 18 | -------------------------------------------------------------------------------- /example_agent/utils/ex_router.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from dotenv import load_dotenv 4 | from redisvl.extensions.router import Route, SemanticRouter 5 | from redisvl.utils.vectorize import HFTextVectorizer 6 | 7 | load_dotenv() 8 | 9 | REDIS_URL = os.environ.get("REDIS_URL", "redis://localhost:6379/0") 10 | 11 | # Semantic router 12 | blocked_references = [ 13 | "thinks about aliens", 14 | "corporate questions about agile", 15 | "anything about the S&P 500", 16 | ] 17 | 18 | blocked_route = Route(name="block_list", references=blocked_references) 19 | 20 | router = SemanticRouter( 21 | name="bouncer", 22 | vectorizer=HFTextVectorizer(), 23 | routes=[blocked_route], 24 | redis_url=REDIS_URL, 25 | overwrite=False, 26 | ) 27 | -------------------------------------------------------------------------------- /test_setup.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from dotenv import load_dotenv 4 | from langchain_openai import ChatOpenAI 5 | from langchain_ollama import ChatOllama 6 | from redis import Redis 7 | 8 | load_dotenv() 9 | 10 | if os.environ.get("MODEL_NAME") == "openai": 11 | llm = ChatOpenAI(model="gpt-4o") 12 | elif os.environ.get("MODEL_NAME") == "ollama": 13 | llm = ChatOllama(model="llama3.1") 14 | else: 15 | raise Exception("Setup failed, MODEL_NAME not defined in .env") 16 | 17 | client = Redis.from_url(os.environ.get("REDIS_URL")) 18 | 19 | 20 | def test_setup(): 21 | assert llm.invoke(["Hello, how are you?"]) 22 | assert client.ping() 23 | 24 | print("Setup worked") 25 | 26 | 27 | if __name__ == "__main__": 28 | test_setup() 29 | -------------------------------------------------------------------------------- /Ollama.md: -------------------------------------------------------------------------------- 1 | # Ollama setup 2 | 1. Download and install [Ollama](https://ollama.com/) 3 | 2. Once Ollama is running on your system, run `ollama pull llama3.1` 4 | > Currently this is a ~5GB download, it's best to download it before the workshop if you plan on using it 5 | 3. Update the `MODEL_NAME` in your `dot.env` file to `ollama` 6 | 7 | You're now ready to begin the workshop! Head back to the [Readme.md](Readme.md) 8 | 9 | ## Restarting the workshop 10 | Mixing use of llama and openai on the same Redis instance can cause unexpected behavior. If you want to switch from one to the other it is recommended to kill and re-create the instance. To do this: 11 | 1. Run `docker ps` and take note of the ID for the running image 12 | 2. `docker stop imageId` 13 | 3. `docker rm imageId` 14 | 4. Start a new instance using the command from earlier, `docker run -d --name redis -p 6379:6379 -p 8001:8001 redis/redis-stack:latest` -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 Redis, Inc. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /participant_agent/utils/tools.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from dotenv import load_dotenv 4 | from langchain.tools.retriever import create_retriever_tool 5 | from langchain_core.documents import Document 6 | from langchain_core.tools import tool 7 | from langchain_openai import OpenAIEmbeddings 8 | from langchain_redis import RedisVectorStore 9 | from pydantic import BaseModel, Field 10 | 11 | from .vector_store import get_vector_store 12 | 13 | load_dotenv() 14 | 15 | REDIS_URL = os.environ.get("REDIS_URL", "redis://localhost:6379/0") 16 | 17 | 18 | @tool 19 | def multiply(a: int, b: int) -> int: 20 | """multiply two numbers.""" 21 | return a * b 22 | 23 | 24 | # TODO: define restock pydantic model for structure input 25 | class RestockInput(BaseModel): 26 | pass 27 | 28 | 29 | # TODO: modify to accept correct inputs and have meaningful docstring 30 | @tool("restock-tool", args_schema=RestockInput) 31 | def restock_tool() -> int: 32 | """some description""" 33 | pass 34 | 35 | 36 | # TODO: implement the retriever tool 37 | ## update get_vector_store function 38 | # vector_store = get_vector_store() 39 | ## update tool with appropriate information so the agent knows how to invoke 40 | # retriever_tool = create_retriever_tool() 41 | 42 | # TODO: pass the retriever_tool and restock tool multiply is only meant as an example 43 | # tools = [retriever_tool, restock_tool] 44 | tools = [multiply] 45 | -------------------------------------------------------------------------------- /sandbox.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Sandbox\n", 8 | "\n", 9 | "This is a helper python notebook you can use to debug and try things during the workshop." 10 | ] 11 | }, 12 | { 13 | "cell_type": "code", 14 | "execution_count": null, 15 | "metadata": {}, 16 | "outputs": [], 17 | "source": [ 18 | "from participant_agent.graph import graph\n", 19 | "\n", 20 | "from IPython.display import Image, display\n", 21 | "\n", 22 | "display(Image(graph.get_graph(xray=True).draw_mermaid_png()))" 23 | ] 24 | }, 25 | { 26 | "cell_type": "code", 27 | "execution_count": null, 28 | "metadata": {}, 29 | "outputs": [], 30 | "source": [ 31 | "res = graph.invoke({\"messages\": [\n", 32 | " \"\"\"\n", 33 | " Hello\n", 34 | " \"\"\"\n", 35 | "]})" 36 | ] 37 | } 38 | ], 39 | "metadata": { 40 | "kernelspec": { 41 | "display_name": "venv", 42 | "language": "python", 43 | "name": "python3" 44 | }, 45 | "language_info": { 46 | "codemirror_mode": { 47 | "name": "ipython", 48 | "version": 3 49 | }, 50 | "file_extension": ".py", 51 | "mimetype": "text/x-python", 52 | "name": "python", 53 | "nbconvert_exporter": "python", 54 | "pygments_lexer": "ipython3", 55 | "version": "3.11.9" 56 | } 57 | }, 58 | "nbformat": 4, 59 | "nbformat_minor": 2 60 | } 61 | -------------------------------------------------------------------------------- /example_agent/utils/ex_tools.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from dotenv import load_dotenv 4 | from langchain.tools.retriever import create_retriever_tool 5 | from langchain_core.tools import tool 6 | from pydantic import BaseModel, Field 7 | 8 | from .ex_vector_store import get_vector_store 9 | 10 | load_dotenv() 11 | 12 | REDIS_URL = os.environ.get("REDIS_URL", "redis://localhost:6379/0") 13 | 14 | 15 | class RestockInput(BaseModel): 16 | daily_usage: int = Field( 17 | description="Pounds (lbs) of food expected to be consumed daily" 18 | ) 19 | lead_time: int = Field(description="Lead time to replace food in days") 20 | safety_stock: int = Field( 21 | description="Number of pounds (lbs) of safety stock to keep on hand" 22 | ) 23 | 24 | 25 | @tool("restock-tool", args_schema=RestockInput) 26 | def restock_tool(daily_usage: int, lead_time: int, safety_stock: int) -> int: 27 | """restock formula tool used specifically for calculating the amount of food at which you should start restocking.""" 28 | print(f"\n Using restock tool!: {daily_usage=}, {lead_time=}, {safety_stock=} \n") 29 | return (daily_usage * lead_time) + safety_stock 30 | 31 | 32 | ## retriever tool 33 | # see .vector_store for implementation logic 34 | vector_store = get_vector_store() 35 | 36 | retriever_tool = create_retriever_tool( 37 | vector_store.as_retriever(), 38 | "get_directions", 39 | "Search and return information related to which routes/paths/trails to take along your journey.", 40 | ) 41 | 42 | tools = [retriever_tool, restock_tool] 43 | -------------------------------------------------------------------------------- /participant_agent/utils/vector_store.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from dotenv import load_dotenv 4 | from langchain_core.documents import Document 5 | from langchain_ollama import OllamaEmbeddings 6 | from langchain_openai import OpenAIEmbeddings 7 | from langchain_redis import RedisConfig, RedisVectorStore 8 | from redis import Redis 9 | 10 | load_dotenv() 11 | 12 | REDIS_URL = os.environ.get("REDIS_URL", "redis://localhost:6379/0") 13 | INDEX_NAME = os.environ.get("VECTOR_INDEX_NAME", "oregon_trail") 14 | 15 | config = RedisConfig(index_name=INDEX_NAME, redis_url=REDIS_URL) 16 | redis_client = Redis.from_url(REDIS_URL) 17 | 18 | docs = Document( 19 | page_content="the northern trail, of the blue mountains, was destroyed by a flood and is no longer safe to traverse. It is recommended to take the southern trail although it is longer." 20 | ) 21 | 22 | # TODO: participant can change to whatever desired model 23 | embedding_model = OpenAIEmbeddings() 24 | 25 | 26 | def _clean_existing(prefix): 27 | for key in redis_client.scan_iter(f"{prefix}:*"): 28 | redis_client.delete(key) 29 | 30 | 31 | def get_vector_store(): 32 | try: 33 | config.from_existing = True 34 | vector_store = RedisVectorStore(embedding_model, config=config) 35 | except: 36 | print("Init vector store with document") 37 | print("Clean any existing data in index") 38 | _clean_existing(config.index_name) 39 | config.from_existing = False 40 | 41 | # TODO: define vector store 42 | vector_store = None 43 | return vector_store 44 | -------------------------------------------------------------------------------- /questions.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "question": "What is the first name of the wagon leader?", 4 | "answer": "Art", 5 | "type": "free-form" 6 | }, 7 | { 8 | "question": "In order to survive the trail ahead, you'll need to have a restocking strategy for when you need to get more supplies or risk starving. If it takes you an estimated 3 days to restock your food and you plan to start with 200lbs of food, budget 10lbs/day to eat, and keep a safety stock of at least 50lbs of back up... at what point should you restock?", 9 | "answer": "D", 10 | "options": [ 11 | "A: 100lbs", 12 | "B: 20lbs", 13 | "C: 5lbs", 14 | "D: 80lbs" 15 | ], 16 | "type": "multi-choice" 17 | }, 18 | { 19 | "question": "You’ve encountered a dense forest near the Blue Mountains, and your party is unsure how to proceed. There is a fork in the road, and you must choose a path. Which way will you go?", 20 | "answer": "B", 21 | "options": [ 22 | "A: take the northern trail", 23 | "B: take the southern trail", 24 | "C: turn around", 25 | "D: go fishing" 26 | ], 27 | "type": "multi-choice" 28 | }, 29 | { 30 | "question": "There's a deer. You're hungry. You know what you have to do...", 31 | "answer": "bang", 32 | "type": "action" 33 | }, 34 | { 35 | "question": "Tell me about the S&P 500?", 36 | "answer": "you shall not pass", 37 | "type": "action" 38 | } 39 | ] -------------------------------------------------------------------------------- /example_agent/utils/ex_vector_store.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from dotenv import load_dotenv 4 | from langchain_core.documents import Document 5 | from langchain_openai import OpenAIEmbeddings 6 | from langchain_ollama import OllamaEmbeddings 7 | from redis import Redis 8 | from langchain_redis import RedisConfig, RedisVectorStore 9 | 10 | load_dotenv() 11 | 12 | REDIS_URL = os.environ.get("REDIS_URL", "redis://localhost:6379/0") 13 | INDEX_NAME = os.environ.get("VECTOR_INDEX_NAME", "oregon_trail") 14 | 15 | config = RedisConfig(index_name=INDEX_NAME, redis_url=REDIS_URL) 16 | redis_client = Redis.from_url(REDIS_URL) 17 | 18 | doc = Document( 19 | page_content="the northern trail, of the blue mountains, was destroyed by a flood and is no longer safe to traverse. It is recommended to take the southern trail although it is longer." 20 | ) 21 | 22 | # TODO: participant can change to whatever desired model 23 | embedding_model = OpenAIEmbeddings() 24 | # embedding_model = OllamaEmbeddings(model="llama3.1") 25 | 26 | def _clean_existing(prefix): 27 | for key in redis_client.scan_iter(f"{prefix}:*"): 28 | redis_client.delete(key) 29 | 30 | def get_vector_store(): 31 | try: 32 | config.from_existing = True 33 | vector_store = RedisVectorStore(embedding_model, config=config) 34 | except: 35 | print("Init vector store with document") 36 | print("Clean any existing data in index") 37 | _clean_existing(config.index_name) 38 | config.from_existing = False 39 | vector_store = RedisVectorStore.from_documents( 40 | [doc], embedding_model, config=config 41 | ) 42 | return vector_store 43 | -------------------------------------------------------------------------------- /participant_agent/graph.py: -------------------------------------------------------------------------------- 1 | from typing import Literal, TypedDict 2 | 3 | from dotenv import load_dotenv 4 | from langgraph.graph import END, StateGraph 5 | from langgraph.prebuilt import ( 6 | tools_condition, # this is the checker for the if you got a tool back 7 | ) 8 | 9 | from participant_agent.utils.nodes import call_tool_model, structure_response, tool_node 10 | from participant_agent.utils.state import AgentState 11 | 12 | load_dotenv() 13 | 14 | 15 | # The graph config can be updated with LangGraph Studio which can be helpful 16 | class GraphConfig(TypedDict): 17 | model_name: Literal["openai", "ollama"] # could add more LLM providers here 18 | 19 | 20 | # Define the function that determines whether to continue or not 21 | def should_continue(state: AgentState): 22 | messages = state["messages"] 23 | last_message = messages[-1] 24 | # If there is no function call, then we respond to the user 25 | if not last_message.tool_calls: 26 | return "structure_response" 27 | # Otherwise if there is, we continue 28 | else: 29 | return "continue" 30 | 31 | 32 | # TODO: define the graph to be used in testing 33 | # workflow = StateGraph(AgentState, config_schema=GraphConfig) 34 | 35 | # # Update otherwise it won't work dawg 36 | 37 | # # node 1 38 | # workflow.add_node() 39 | # # node 2 40 | # workflow.add_node() 41 | 42 | # # entry 43 | # workflow.set_entry_point() 44 | 45 | # # Conditional edge 46 | # workflow.add_conditional_edges() 47 | 48 | # # We now add a normal edge. 49 | # workflow.add_edge() 50 | 51 | # # **graph defined here** 52 | 53 | # # Compiled graph will be picked up by workflow 54 | # graph = workflow.compile() 55 | graph = None 56 | -------------------------------------------------------------------------------- /example_agent/ex_graph.py: -------------------------------------------------------------------------------- 1 | from typing import Literal, TypedDict 2 | 3 | from dotenv import load_dotenv 4 | from langgraph.graph import END, StateGraph 5 | 6 | from example_agent.utils.ex_nodes import call_tool_model, structure_response, tool_node 7 | from example_agent.utils.ex_state import AgentState 8 | 9 | load_dotenv() 10 | 11 | 12 | # Define the config 13 | class GraphConfig(TypedDict): 14 | model_name: Literal["anthropic", "openai", "ollama"] 15 | 16 | 17 | # Define the function that determines whether to continue or not 18 | def should_continue(state: AgentState): 19 | messages = state["messages"] 20 | last_message = messages[-1] 21 | # If there is no function call, then we respond to the user 22 | if not last_message.tool_calls: 23 | return "structure_response" 24 | # Otherwise if there is, we continue 25 | else: 26 | return "continue" 27 | 28 | 29 | # Define a new graph 30 | workflow = StateGraph(AgentState, config_schema=GraphConfig) 31 | 32 | # Define the two nodes we will cycle between 33 | workflow.add_node("agent", call_tool_model) 34 | workflow.add_node("tools", tool_node) 35 | workflow.add_node("structure_response", structure_response) 36 | 37 | # Set the entrypoint as `agent` 38 | # This means that this node is the first one called 39 | workflow.set_entry_point("agent") 40 | 41 | # We now add a conditional edge between `agent` and `tools`. 42 | workflow.add_conditional_edges( 43 | "agent", 44 | should_continue, 45 | {"continue": "tools", "structure_response": "structure_response"}, 46 | ) 47 | 48 | # We now add a normal edge from `tools` to `agent`. 49 | # This means that after `tools` is called, `agent` node is called next. 50 | workflow.add_edge("tools", "agent") 51 | workflow.add_edge("structure_response", END) 52 | 53 | 54 | # Finally, we compile it! 55 | # This compiles it into a LangChain Runnable, 56 | # meaning you can use it as you would any other runnable 57 | graph = workflow.compile() 58 | -------------------------------------------------------------------------------- /oregon_trail.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import json 3 | import time 4 | import warnings 5 | 6 | from dotenv import load_dotenv 7 | from langchain_core.messages import HumanMessage 8 | 9 | from example_agent.ex_app import ExampleApp 10 | from game_play_interface import GamePlayInterface 11 | from participant_agent.app import ParticipantApp 12 | 13 | load_dotenv() 14 | warnings.filterwarnings("ignore") 15 | 16 | 17 | def check_answer(observed, answer): 18 | print(f"Expected: {answer}, got: {observed}") 19 | if observed != answer: 20 | raise AssertionError( 21 | "\n You died of dysentery on the Oregon Trail ¯\_(ツ)_/¯ \n " 22 | ) 23 | 24 | 25 | def format_question(q): 26 | question = q["question"] 27 | options = q.get("options", "") 28 | if options: 29 | formatted = f"{question}, options: {' '.join(options)}" 30 | else: 31 | formatted = question 32 | return [HumanMessage(content=formatted)] 33 | 34 | 35 | def run_game(agent_app: GamePlayInterface): 36 | with open("questions.json") as f: 37 | questions = json.load(f) 38 | 39 | semantic_cache = agent_app.semantic_cache() 40 | router = agent_app.router() 41 | graph = agent_app.graph() 42 | 43 | for q in questions: 44 | start = time.time() 45 | 46 | print(f"\n Question: {q['question']} \n") 47 | 48 | if options := q.get("options"): 49 | print(f"\n Options: {options} \n") 50 | 51 | if semantic_cache: 52 | cache_hit = semantic_cache.check( 53 | prompt=q["question"], return_fields=["response"] 54 | ) 55 | 56 | if cache_hit: 57 | end = time.time() - start 58 | print(f"\n Cache hit! {q['answer']} \n") 59 | assert cache_hit[-1]["response"] == q["answer"] 60 | assert end < 1 61 | continue 62 | 63 | if router: 64 | blocked_topic_match = router(q["question"], distance_threshold=0.2) 65 | 66 | if blocked_topic_match.name == "block_list": 67 | print(f"\n Get behind me Satan! Blocked topic: {q['question']} \n") 68 | continue 69 | 70 | res = graph.invoke({"messages": format_question(q)}) 71 | 72 | if q["type"] == "action": 73 | end = time.time() - start 74 | if end > 1: 75 | print(f"\n Too slow!! took: {end}s \n") 76 | raise AssertionError(f"Too slow!! took: {end}s") 77 | 78 | if q["type"] == "multi-choice": 79 | print("\n Checking multiple choice \n") 80 | check_answer(res["multi_choice_response"], q["answer"]) 81 | else: 82 | print("\n Checking free form \n") 83 | check_answer(res["messages"][-1].content, q["answer"]) 84 | 85 | print("You made it to Oregon! 🎉") 86 | 87 | 88 | if __name__ == "__main__": 89 | parser = argparse.ArgumentParser(description="Run Oregon Trail game") 90 | parser.add_argument("--example", nargs="?", type=bool, const=True, default=False) 91 | 92 | args = parser.parse_args() 93 | 94 | if args.example: 95 | print("\n Running example agent \n") 96 | run_game(ExampleApp()) 97 | else: 98 | print("\n Running participant agent \n") 99 | run_game(ParticipantApp()) 100 | -------------------------------------------------------------------------------- /example_agent/utils/ex_nodes.py: -------------------------------------------------------------------------------- 1 | import os 2 | from functools import lru_cache 3 | 4 | from dotenv import load_dotenv 5 | from langchain_core.messages import HumanMessage 6 | from langchain_openai import ChatOpenAI 7 | from langchain_ollama import ChatOllama 8 | from langgraph.prebuilt import ToolNode 9 | 10 | from example_agent.utils.ex_tools import tools 11 | 12 | from .ex_state import AgentState, MultipleChoiceResponse 13 | 14 | load_dotenv() 15 | 16 | ENVIRON_MODEL_NAME = os.environ.get("MODEL_NAME") 17 | 18 | @lru_cache(maxsize=4) 19 | def _get_tool_model(model_name: str): 20 | if model_name == "openai": 21 | model = ChatOpenAI(temperature=0, model_name="gpt-4o") 22 | elif model_name == "ollama": 23 | model = ChatOllama(temperature=0, model="llama3.1", num_ctx=4096) 24 | else: 25 | raise ValueError(f"Unsupported model type: {model_name}") 26 | 27 | model = model.bind_tools(tools) 28 | return model 29 | 30 | 31 | @lru_cache(maxsize=4) 32 | def _get_response_model(model_name: str): 33 | if model_name == "openai": 34 | model = ChatOpenAI(temperature=0, model_name="gpt-4o") 35 | elif model_name == "ollama": 36 | model = ChatOllama(temperature=0, model="llama3.1", num_ctx=4096) 37 | else: 38 | raise ValueError(f"Unsupported model type: {model_name}") 39 | 40 | model = model.with_structured_output(MultipleChoiceResponse) 41 | return model 42 | 43 | 44 | # Define the function that responds to the user 45 | def multi_choice_structured(state: AgentState, config): 46 | # We call the model with structured output in order to return the same format to the user every time 47 | # state['messages'][-2] is the last ToolMessage in the convo, which we convert to a HumanMessage for the model to use 48 | # We could also pass the entire chat history, but this saves tokens since all we care to structure is the output of the tool 49 | model_name = config.get("configurable", {}).get("model_name", ENVIRON_MODEL_NAME) 50 | 51 | response = _get_response_model(model_name).invoke( 52 | [ 53 | HumanMessage(content=state["messages"][0].content), 54 | HumanMessage(content=f"Answer from tool: {state['messages'][-2].content}"), 55 | ] 56 | ) 57 | # We return the final answer 58 | return { 59 | "multi_choice_response": response.multiple_choice_response, 60 | } 61 | 62 | 63 | # determine how to structure final response 64 | def is_multi_choice(state: AgentState): 65 | return "options:" in state["messages"][0].content.lower() 66 | 67 | 68 | def structure_response(state: AgentState, config): 69 | if is_multi_choice(state): 70 | return multi_choice_structured(state, config) 71 | else: 72 | # if not multi-choice don't need to do anything 73 | return {"messages": []} 74 | 75 | 76 | system_prompt = """ 77 | You are an oregon trail playing tool calling AI agent. Use the tools available to you to answer the question you are presented. When in doubt use the tools to help you find the answer. 78 | If anyone asks your first name is Art return just that string. 79 | """ 80 | 81 | 82 | # Define the function that calls the model 83 | def call_tool_model(state: AgentState, config): 84 | # Combine system prompt with incoming messages 85 | messages = [{"role": "system", "content": system_prompt}] + state["messages"] 86 | 87 | # Get from LangGraph config 88 | model_name = config.get("configurable", {}).get("model_name", ENVIRON_MODEL_NAME) 89 | 90 | # Get our model that binds our tools 91 | model = _get_tool_model(model_name) 92 | 93 | # invoke the central agent/reasoner with the context of the graph 94 | response = model.invoke(messages) 95 | 96 | # We return a list, because this will get added to the existing list 97 | return {"messages": [response]} 98 | 99 | 100 | # Define the function to execute tools 101 | tool_node = ToolNode(tools) 102 | -------------------------------------------------------------------------------- /participant_agent/utils/nodes.py: -------------------------------------------------------------------------------- 1 | import os 2 | from functools import lru_cache 3 | 4 | from dotenv import load_dotenv 5 | from langchain_core.messages import HumanMessage 6 | from langchain_openai import ChatOpenAI 7 | from langchain_ollama import ChatOllama 8 | from langgraph.prebuilt import ToolNode 9 | 10 | from participant_agent.utils.tools import tools 11 | 12 | from .state import AgentState, MultipleChoiceResponse 13 | 14 | load_dotenv() 15 | 16 | 17 | # need to use this in call_tool_model function 18 | @lru_cache(maxsize=4) 19 | def _get_tool_model(model_name: str): 20 | """ 21 | This function initializes the model to be used to determine tools can be modified to support additional LLM providers. 22 | """ 23 | if model_name == "openai": 24 | model = ChatOpenAI(temperature=0, model_name="gpt-4o") 25 | elif model_name == "ollama": 26 | model = ChatOllama(temperature=0, model="llama3.1", num_ctx=4096) 27 | else: 28 | raise ValueError(f"Unsupported model type: {model_name}") 29 | 30 | model = model.bind_tools(tools) 31 | return model 32 | 33 | 34 | #### For structured output 35 | 36 | 37 | # TODO: this function will be used when using structured output 38 | @lru_cache(maxsize=4) 39 | def _get_response_model(model_name: str): 40 | if model_name == "openai": 41 | model = ChatOpenAI(temperature=0, model_name="gpt-4o") 42 | elif model_name == "ollama": 43 | model = ChatOllama(temperature=0, model="llama3.1", num_ctx=4096) 44 | else: 45 | raise ValueError(f"Unsupported model type: {model_name}") 46 | 47 | # TODO: pass model for structured output 48 | model = model.with_structured_output() 49 | return model 50 | 51 | 52 | # Define the function that responds to the user 53 | def multi_choice_structured(state: AgentState, config): 54 | # We call the model with structured output in order to return the same format to the user every time 55 | # state['messages'][-2] is the last ToolMessage in the convo, which we convert to a HumanMessage for the model to use 56 | # We could also pass the entire chat history, but this saves tokens since all we care to structure is the output of the tool 57 | model_name = config.get("configurable", {}).get("model_name", os.environ.get("MODEL_NAME")) 58 | 59 | response = _get_response_model(model_name).invoke( 60 | [ 61 | HumanMessage(content=state["messages"][0].content), 62 | HumanMessage(content=f"Answer from tool: {state['messages'][-2].content}"), 63 | ] 64 | ) 65 | 66 | return { 67 | "multi_choice_response": response.multiple_choice_response, 68 | } 69 | 70 | 71 | # determine how to structure final response 72 | def is_multi_choice(state: AgentState): 73 | return "options:" in state["messages"][0].content.lower() 74 | 75 | 76 | def structure_response(state: AgentState, config): 77 | if is_multi_choice(state): 78 | return multi_choice_structured(state, config) 79 | else: 80 | # if not multi-choice don't need to do anything 81 | return {"messages": []} 82 | 83 | 84 | ### 85 | 86 | 87 | # TODO: define meaningful system prompt for Agent 88 | system_prompt = "" 89 | 90 | 91 | def call_tool_model(state: AgentState, config): 92 | # Combine system prompt with incoming messages 93 | messages = [{"role": "system", "content": system_prompt}] + state["messages"] 94 | 95 | # Get from LangGraph config 96 | model_name = config.get("configurable", {}).get("model_name", os.environ.get("MODEL_NAME")) 97 | # Get our model that binds our tools 98 | model = _get_tool_model(model_name) 99 | 100 | # invoke the central agent/reasoner with the context of the graph 101 | response = model.invoke(messages) 102 | 103 | # We return a list, because this will get added to the existing list 104 | return {"messages": [response]} 105 | 106 | 107 | # Define the function to execute tools 108 | tool_node = ToolNode(tools) 109 | -------------------------------------------------------------------------------- /test_participant_oregon_trail.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | import pytest 4 | from langchain_core.messages import HumanMessage 5 | 6 | from participant_agent.app import ParticipantApp 7 | 8 | print("\n\n\n Welcome to the Oregon Trail! \n\n\n") 9 | 10 | 11 | @pytest.fixture 12 | def app(): 13 | return ParticipantApp() 14 | 15 | 16 | def format_multi_choice_question(q): 17 | question = q["question"] 18 | options = q.get("options", "") 19 | formatted = f"{question}, options: {' '.join(options)}" 20 | return [HumanMessage(content=formatted)] 21 | 22 | 23 | def test_1_wagon_leader(app): 24 | scenario = { 25 | "question": "What is the first name of the wagon leader?", 26 | "answer": "Art", 27 | "type": "free-form", 28 | } 29 | 30 | print(f"\n {scenario['question']} \n") 31 | 32 | graph = app.graph() 33 | 34 | res = graph.invoke({"messages": scenario["question"]}) 35 | 36 | assert scenario["answer"] in res["messages"][-1].content 37 | 38 | print(f"\n response: {scenario['answer']}") 39 | 40 | 41 | def test_2_restocking_tool(app): 42 | scenario = { 43 | "question": "In order to survive the trail ahead, you'll need to have a restocking strategy for when you need to get more supplies or risk starving. If it takes you an estimated 3 days to restock your food and you plan to start with 200lbs of food, budget 10lbs/day to eat, and keep a safety stock of at least 50lbs of back up... at what point should you restock?", 44 | "answer": "D", 45 | "options": ["A: 100lbs", "B: 20lbs", "C: 5lbs", "D: 80lbs"], 46 | "type": "multi-choice", 47 | } 48 | 49 | graph = app.graph() 50 | 51 | print(f"\n question: {scenario['question']} \n") 52 | 53 | res = graph.invoke({"messages": format_multi_choice_question(scenario)}) 54 | 55 | assert res["multi_choice_response"] == scenario["answer"] 56 | 57 | print(f"\n response: {scenario['answer']}") 58 | 59 | 60 | def test_3_retrieval_tool(app): 61 | scenario = { 62 | "question": "You’ve encountered a dense forest near the Blue Mountains, and your party is unsure how to proceed. There is a fork in the road, and you must choose a path. Which way will you go?", 63 | "answer": "B", 64 | "options": [ 65 | "A: take the northern trail", 66 | "B: take the southern trail", 67 | "C: turn around", 68 | "D: go fishing", 69 | ], 70 | "type": "multi-choice", 71 | } 72 | 73 | graph = app.graph() 74 | 75 | print(f"\n {scenario['question']} \n") 76 | 77 | res = graph.invoke({"messages": format_multi_choice_question(scenario)}) 78 | 79 | assert res["multi_choice_response"] == scenario["answer"] 80 | 81 | print(f"\n response: {scenario['answer']}") 82 | 83 | 84 | def test_4_semantic_cache(app): 85 | scenario = { 86 | "question": "There's a deer. You're hungry. You know what you have to do...", 87 | "answer": "bang", 88 | "type": "action", 89 | } 90 | 91 | print(f"\n {scenario['question']} \n") 92 | 93 | semantic_cache = app.semantic_cache() 94 | 95 | start = time.time() 96 | cache_hit = semantic_cache.check( 97 | prompt=scenario["question"], return_fields=["response"] 98 | ) 99 | 100 | end = time.time() - start 101 | 102 | assert cache_hit[-1]["response"] == scenario["answer"] 103 | assert end < 1 104 | 105 | print(f"\n response: {scenario['answer']}") 106 | 107 | 108 | def test_5_router(app): 109 | scenario = { 110 | "question": "Tell me about the S&P 500?", 111 | "answer": "you shall not pass", 112 | "type": "action", 113 | } 114 | 115 | print(f"\n {scenario['question']} \n") 116 | 117 | router = app.router() 118 | 119 | blocked_topic_match = router(scenario["question"], distance_threshold=0.2) 120 | 121 | assert blocked_topic_match.name == "block_list" 122 | 123 | print(f"\n response: {scenario['answer']}") 124 | -------------------------------------------------------------------------------- /test_example_oregon_trail.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import time 3 | 4 | print(f"Python version: {sys.version}") 5 | 6 | import pytest 7 | from langchain_core.messages import HumanMessage 8 | 9 | from example_agent.ex_app import ExampleApp 10 | 11 | print("\n\n\n Welcome to the Oregon Trail! \n\n\n") 12 | 13 | 14 | @pytest.fixture 15 | def app(): 16 | # return ParticipantApp() 17 | return ExampleApp() 18 | 19 | 20 | def format_multi_choice_question(q): 21 | question = q["question"] 22 | options = q.get("options", "") 23 | formatted = f"{question}, options: {' '.join(options)}" 24 | return [HumanMessage(content=formatted)] 25 | 26 | 27 | def test_1_wagon_leader(app): 28 | scenario = { 29 | "question": "What is the first name of the wagon leader?", 30 | "answer": "Art", 31 | "type": "free-form", 32 | } 33 | 34 | print(f"\n {scenario['question']} \n") 35 | 36 | graph = app.graph() 37 | 38 | res = graph.invoke({"messages": scenario["question"]}) 39 | 40 | assert scenario["answer"] in res["messages"][-1].content 41 | 42 | print(f"\n response: {scenario['answer']}") 43 | 44 | 45 | def test_2_restocking_tool(app): 46 | scenario = { 47 | "question": "In order to survive the trail ahead, you'll need to have a restocking strategy for when you need to get more supplies or risk starving. If it takes you an estimated 3 days to restock your food and you plan to start with 200lbs of food, budget 10lbs/day to eat, and keep a safety stock of at least 50lbs of back up... at what point should you restock?", 48 | "answer": "D", 49 | "options": ["A: 100lbs", "B: 20lbs", "C: 5lbs", "D: 80lbs"], 50 | "type": "multi-choice", 51 | } 52 | 53 | graph = app.graph() 54 | 55 | print(f"\n question: {scenario['question']} \n") 56 | 57 | res = graph.invoke({"messages": format_multi_choice_question(scenario)}) 58 | 59 | assert res["multi_choice_response"] == scenario["answer"] 60 | 61 | print(f"\n response: {scenario['answer']}") 62 | 63 | 64 | def test_3_retrieval_tool(app): 65 | scenario = { 66 | "question": "You’ve encountered a dense forest near the Blue Mountains, and your party is unsure how to proceed. There is a fork in the road, and you must choose a path. Which way will you go?", 67 | "answer": "B", 68 | "options": [ 69 | "A: take the northern trail", 70 | "B: take the southern trail", 71 | "C: turn around", 72 | "D: go fishing", 73 | ], 74 | "type": "multi-choice", 75 | } 76 | 77 | graph = app.graph() 78 | 79 | print(f"\n {scenario['question']} \n") 80 | 81 | res = graph.invoke({"messages": format_multi_choice_question(scenario)}) 82 | 83 | assert res["multi_choice_response"] == scenario["answer"] 84 | 85 | print(f"\n response: {scenario['answer']}") 86 | 87 | 88 | def test_4_semantic_cache(app): 89 | scenario = { 90 | "question": "There's a deer. You're hungry. You know what you have to do...", 91 | "answer": "bang", 92 | "type": "action", 93 | } 94 | 95 | print(f"\n {scenario['question']} \n") 96 | 97 | semantic_cache = app.semantic_cache() 98 | 99 | start = time.time() 100 | cache_hit = semantic_cache.check( 101 | prompt=scenario["question"], return_fields=["response"] 102 | ) 103 | 104 | end = time.time() - start 105 | 106 | assert cache_hit[-1]["response"] == scenario["answer"] 107 | assert end < 1 108 | 109 | print(f"\n response: {scenario['answer']}") 110 | 111 | 112 | def test_5_router(app): 113 | scenario = { 114 | "question": "Tell me about the S&P 500?", 115 | "answer": "you shall not pass", 116 | "type": "action", 117 | } 118 | 119 | print(f"\n {scenario['question']} \n") 120 | 121 | router = app.router() 122 | 123 | blocked_topic_match = router(scenario["question"], distance_threshold=0.2) 124 | 125 | assert blocked_topic_match.name == "block_list" 126 | 127 | print(f"{scenario['answer']}") 128 | 129 | print(f"\n response: {scenario['answer']}") 130 | -------------------------------------------------------------------------------- /images/redis-logo.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | ]> 13 | 67 | -------------------------------------------------------------------------------- /Readme.md: -------------------------------------------------------------------------------- 1 |