├── graph.py ├── .devcontainer ├── post_create.sh └── devcontainer.json ├── llm.py ├── .gitignore ├── tools ├── cypher.py └── vector.py ├── requirements.txt ├── .streamlit └── secrets.toml.example ├── agent.py ├── solutions ├── graph.py ├── tools │ ├── cypher-simple.py │ ├── cypher-finetuned.py │ ├── cypher-fewshot.py │ ├── vector.py │ ├── cypher.py │ └── cypher-degrees.py ├── llm.py ├── utils.py ├── bot.py ├── test_solution.py ├── agent-chat.py ├── agent-scoped.py ├── agent-vector.py ├── agent.py └── agent-cypher.py ├── examples └── helloworld │ └── app.py ├── SETUP.md ├── utils.py ├── bot.py └── README.adoc /graph.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | 3 | # Connect to Neo4j 4 | -------------------------------------------------------------------------------- /.devcontainer/post_create.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | pip3 install -r requirements.txt -------------------------------------------------------------------------------- /llm.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | 3 | # Create the LLM 4 | 5 | 6 | # Create the Embedding model -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | .streamlit/secrets.toml 3 | .env 4 | .venv 5 | .DS_Store 6 | .vscode 7 | .pytest_cache -------------------------------------------------------------------------------- /tools/cypher.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | from llm import llm 3 | from graph import graph 4 | 5 | # Create the Cypher QA chain -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | tenacity!=8.4.0 2 | langchain==0.3.9 3 | openai==1.56.0 4 | langchain-openai==0.2.10 5 | neo4j==5.27.0 6 | streamlit==1.51.0 7 | langchainhub==0.1.21 8 | langchain-neo4j==0.1.1 -------------------------------------------------------------------------------- /.streamlit/secrets.toml.example: -------------------------------------------------------------------------------- 1 | OPENAI_API_KEY = "sk-..." 2 | OPENAI_MODEL = "gpt-4" 3 | 4 | NEO4J_URI = "bolt://" 5 | NEO4J_USERNAME = "neo4j" 6 | NEO4J_PASSWORD = "" 7 | NEO4J_DATABASE = "neo4j" -------------------------------------------------------------------------------- /agent.py: -------------------------------------------------------------------------------- 1 | from llm import llm 2 | from graph import graph 3 | 4 | # Create a movie chat chain 5 | 6 | # Create a set of tools 7 | 8 | # Create chat history callback 9 | 10 | # Create the agent 11 | 12 | # Create a handler to call the agent -------------------------------------------------------------------------------- /tools/vector.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | from llm import llm, embeddings 3 | from graph import graph 4 | 5 | # Create the Neo4jVector 6 | 7 | # Create the retriever 8 | 9 | # Create the prompt 10 | 11 | # Create the chain 12 | 13 | # Create a function to call the chain -------------------------------------------------------------------------------- /solutions/graph.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | 3 | # tag::graph[] 4 | from langchain_neo4j import Neo4jGraph 5 | 6 | graph = Neo4jGraph( 7 | url=st.secrets["NEO4J_URI"], 8 | username=st.secrets["NEO4J_USERNAME"], 9 | password=st.secrets["NEO4J_PASSWORD"], 10 | database=st.secrets["NEO4J_DATABASE"], 11 | ) 12 | #end::graph[] -------------------------------------------------------------------------------- /solutions/tools/cypher-simple.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | from llm import llm 3 | from graph import graph 4 | 5 | # tag::import[] 6 | from langchain_neo4j import GraphCypherQAChain 7 | # end::import[] 8 | 9 | # tag::cypher-qa[] 10 | cypher_qa = GraphCypherQAChain.from_llm( 11 | llm, 12 | graph=graph, 13 | verbose=True, 14 | allow_dangerous_requests=True 15 | ) 16 | # end::cypher-qa[] 17 | -------------------------------------------------------------------------------- /examples/helloworld/app.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | 3 | st.set_page_config( 4 | page_title="Hello", 5 | page_icon="👋", 6 | ) 7 | 8 | st.write("# Welcome to Streamlit! 👋") 9 | 10 | st.markdown(""" 11 | This component supports **markdown formatting**. 12 | 13 | [Check out their documentation](https://docs.streamlit.io) for more information on how to get started. 14 | """) 15 | 16 | st.write('updated') -------------------------------------------------------------------------------- /solutions/llm.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | 3 | # tag::llm[] 4 | # Create the LLM 5 | from langchain_openai import ChatOpenAI 6 | 7 | llm = ChatOpenAI( 8 | openai_api_key=st.secrets["OPENAI_API_KEY"], 9 | model=st.secrets["OPENAI_MODEL"], 10 | ) 11 | # end::llm[] 12 | 13 | # tag::embedding[] 14 | # Create the Embedding model 15 | from langchain_openai import OpenAIEmbeddings 16 | 17 | embeddings = OpenAIEmbeddings( 18 | openai_api_key=st.secrets["OPENAI_API_KEY"] 19 | ) 20 | # end::embedding[] 21 | -------------------------------------------------------------------------------- /SETUP.md: -------------------------------------------------------------------------------- 1 | # Setup 2 | 3 | This repository accompanies the [Build a Neo4j-backed Chatbot using Python course](https://graphacademy.neo4j.com/courses/llm-chatbot-python/) on [GraphAcademy](https://graphacademy.neo4j.com). 4 | 5 | When the devcontainer is created, such as in a GitHub codespace, all the required software and packages will be installed. 6 | 7 | Follow the [Setup Instructions in GraphAcademy](https://graphacademy.neo4j.com/courses/llm-chatbot-python/1-project-setup/2-setup/) to get started. 8 | 9 | To start the chatbot, run: 10 | 11 | ```bash 12 | streamlit run bot.py 13 | ``` 14 | -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | from streamlit.runtime.scriptrunner.script_runner import get_script_run_ctx 3 | 4 | def write_message(role, content, save = True): 5 | """ 6 | This is a helper function that saves a message to the 7 | session state and then writes a message to the UI 8 | """ 9 | # Append to session state 10 | if save: 11 | st.session_state.messages.append({"role": role, "content": content}) 12 | 13 | # Write to UI 14 | with st.chat_message(role): 15 | st.markdown(content) 16 | 17 | def get_session_id(): 18 | return get_script_run_ctx().session_id 19 | -------------------------------------------------------------------------------- /solutions/utils.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | from streamlit.runtime.scriptrunner.script_runner import get_script_run_ctx 3 | 4 | # tag::write_message[] 5 | def write_message(role, content, save = True): 6 | """ 7 | This is a helper function that saves a message to the 8 | session state and then writes a message to the UI 9 | """ 10 | # Append to session state 11 | if save: 12 | st.session_state.messages.append({"role": role, "content": content}) 13 | 14 | # Write to UI 15 | with st.chat_message(role): 16 | st.markdown(content) 17 | # end::write_message[] 18 | 19 | # tag::get_session_id[] 20 | def get_session_id(): 21 | return get_script_run_ctx().session_id 22 | # end::get_session_id[] -------------------------------------------------------------------------------- /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "Python 3", 3 | "image": "mcr.microsoft.com/devcontainers/python:1-3.12-bullseye", 4 | "postCreateCommand": "bash .devcontainer/post_create.sh", 5 | "portsAttributes": { 6 | "8501": { 7 | "label": "Application", 8 | "onAutoForward": "openPreview" 9 | } 10 | }, 11 | "forwardPorts": [ 12 | 8501 13 | ], 14 | "customizations": { 15 | "codespaces": { 16 | "openFiles": [ 17 | "SETUP.md", 18 | "bot.py", 19 | ".streamlit/secrets.toml.example" 20 | ] 21 | }, 22 | "vscode": { 23 | "settings": { 24 | "python.createEnvironment.trigger": "off" 25 | }, 26 | "extensions": [ 27 | "ms-python.python", 28 | "ms-python.debugpy" 29 | ] 30 | } 31 | } 32 | } -------------------------------------------------------------------------------- /bot.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | from utils import write_message 3 | 4 | # Page Config 5 | st.set_page_config("Ebert", page_icon=":movie_camera:") 6 | 7 | # Set up Session State 8 | if "messages" not in st.session_state: 9 | st.session_state.messages = [ 10 | {"role": "assistant", "content": "Hi, I'm the GraphAcademy Chatbot! How can I help you?"}, 11 | ] 12 | 13 | # Submit handler 14 | def handle_submit(message): 15 | """ 16 | Submit handler: 17 | 18 | You will modify this method to talk with an LLM and provide 19 | context using data from Neo4j. 20 | """ 21 | 22 | # Handle the response 23 | with st.spinner('Thinking...'): 24 | # # TODO: Replace this with a call to your LLM 25 | from time import sleep 26 | sleep(1) 27 | write_message('assistant', message) 28 | 29 | 30 | # Display messages in Session State 31 | for message in st.session_state.messages: 32 | write_message(message['role'], message['content'], save=False) 33 | 34 | # Handle any user input 35 | if question := st.chat_input("What is up?"): 36 | # Display user message in chat message container 37 | write_message('user', question) 38 | 39 | # Generate a response 40 | handle_submit(question) 41 | -------------------------------------------------------------------------------- /solutions/tools/cypher-finetuned.py: -------------------------------------------------------------------------------- 1 | from langchain_neo4j import GraphCypherQAChain 2 | # tag::import-prompt-template[] 3 | from langchain.prompts.prompt import PromptTemplate 4 | # end::import-prompt-template[] 5 | 6 | from llm import llm 7 | from graph import graph 8 | 9 | # tag::prompt[] 10 | CYPHER_GENERATION_TEMPLATE = """ 11 | You are an expert Neo4j Developer translating user questions into Cypher to answer questions about movies and provide recommendations. 12 | Convert the user's question based on the schema. 13 | 14 | Use only the provided relationship types and properties in the schema. 15 | Do not use any other relationship types or properties that are not provided. 16 | 17 | Do not return entire nodes or embedding properties. 18 | 19 | Fine Tuning: 20 | 21 | For movie titles that begin with "The", move "the" to the end. For example "The 39 Steps" becomes "39 Steps, The" or "the matrix" becomes "Matrix, The". 22 | 23 | 24 | Schema: 25 | {schema} 26 | 27 | Question: 28 | {question} 29 | 30 | Cypher Query: 31 | """ 32 | # end::prompt[] 33 | 34 | # tag::template[] 35 | cypher_prompt = PromptTemplate.from_template(CYPHER_GENERATION_TEMPLATE) 36 | # end::template[] 37 | 38 | 39 | # tag::cypher-qa[] 40 | cypher_qa = GraphCypherQAChain.from_llm( 41 | llm, 42 | graph=graph, 43 | verbose=True, 44 | cypher_prompt=cypher_prompt, 45 | allow_dangerous_requests=True 46 | ) 47 | # end::cypher-qa[] 48 | -------------------------------------------------------------------------------- /solutions/bot.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | from utils import write_message 3 | # tag::import_agent[] 4 | from agent import generate_response 5 | # end::import_agent[] 6 | 7 | # tag::setup[] 8 | # Page Config 9 | st.set_page_config("Ebert", page_icon=":movie_camera:") 10 | # end::setup[] 11 | 12 | # tag::session[] 13 | # Set up Session State 14 | if "messages" not in st.session_state: 15 | st.session_state.messages = [ 16 | {"role": "assistant", "content": "Hi, I'm the GraphAcademy Chatbot! How can I help you?"}, 17 | ] 18 | # end::session[] 19 | 20 | # tag::submit[] 21 | # Submit handler 22 | def handle_submit(message): 23 | """ 24 | Submit handler: 25 | 26 | You will modify this method to talk with an LLM and provide 27 | context using data from Neo4j. 28 | """ 29 | 30 | # Handle the response 31 | with st.spinner('Thinking...'): 32 | # Call the agent 33 | response = generate_response(message) 34 | write_message('assistant', response) 35 | 36 | # end::submit[] 37 | 38 | 39 | # tag::chat[] 40 | # Display messages in Session State 41 | for message in st.session_state.messages: 42 | write_message(message['role'], message['content'], save=False) 43 | 44 | # Handle any user input 45 | if prompt := st.chat_input("What is up?"): 46 | # Display user message in chat message container 47 | write_message('user', prompt) 48 | 49 | # Generate a response 50 | handle_submit(prompt) 51 | # end::chat[] 52 | -------------------------------------------------------------------------------- /solutions/tools/cypher-fewshot.py: -------------------------------------------------------------------------------- 1 | from langchain_neo4j import GraphCypherQAChain 2 | from langchain.prompts.prompt import PromptTemplate 3 | 4 | from llm import llm 5 | from graph import graph 6 | 7 | 8 | # tag::prompt[] 9 | CYPHER_GENERATION_TEMPLATE = """ 10 | You are an expert Neo4j Developer translating user questions into Cypher to answer questions about movies and provide recommendations. 11 | Convert the user's question based on the schema. 12 | 13 | Use only the provided relationship types and properties in the schema. 14 | Do not use any other relationship types or properties that are not provided. 15 | 16 | Do not return entire nodes or embedding properties. 17 | 18 | Fine Tuning: 19 | 20 | For movie titles that begin with "The", move "the" to the end. For example "The 39 Steps" becomes "39 Steps, The" or "the matrix" becomes "Matrix, The". 21 | 22 | Example Cypher Statements: 23 | 24 | 1. To find who acted in a movie: 25 | ``` 26 | MATCH (p:Person)-[r:ACTED_IN]->(m:Movie {{title: "Movie Title"}}) 27 | RETURN p.name, r.role 28 | ``` 29 | 30 | 2. To find who directed a movie: 31 | ``` 32 | MATCH (p:Person)-[r:DIRECTED]->(m:Movie {{title: "Movie Title"}}) 33 | RETURN p.name 34 | ``` 35 | 36 | Schema: 37 | {schema} 38 | 39 | Question: 40 | {question} 41 | """ 42 | # end::prompt[] 43 | 44 | cypher_prompt = PromptTemplate.from_template(CYPHER_GENERATION_TEMPLATE) 45 | 46 | cypher_qa = GraphCypherQAChain.from_llm( 47 | llm, 48 | graph=graph, 49 | verbose=True, 50 | cypher_prompt=cypher_prompt, 51 | allow_dangerous_requests=True 52 | ) -------------------------------------------------------------------------------- /README.adoc: -------------------------------------------------------------------------------- 1 | = Build an Neo4j-backed Chatbot using Python 2 | 3 | This repository accompanies the link:https://graphacademy.neo4j.com/courses/llm-chatbot-python[Build an Neo4j-backed Chatbot using Python^] course on link:https://graphacademy.neo4j.com/?ref=github[Neo4j GraphAcademy^]. 4 | 5 | For a complete walkthrough of this repository, link:https://graphacademy.neo4j.com/courses/llm-chatbot-python/?ref=github[enrol now^]. 6 | 7 | link:https://codespaces.new/neo4j-graphacademy/llm-chatbot-python[image:https://github.com/codespaces/badge.svg[Open in GitHub Codespaces]^] 8 | 9 | == Running the application 10 | 11 | To run the application, you must install the libraries listed in `requirements.txt`. 12 | 13 | [source,sh] 14 | pip install -r requirements.txt 15 | 16 | 17 | Then run the `streamlit run` command to start the app on link:http://localhost:8501/[http://localhost:8501/^]. 18 | 19 | [source,sh] 20 | streamlit run bot.py 21 | 22 | == Tests 23 | 24 | To run the solution tests: 25 | 26 | . Create Neo4j instance with the `recommendations` dataset 27 | . Run the link:https://raw.githubusercontent.com/neo4j-graphacademy/courses/refs/heads/main/asciidoc/courses/llm-chatbot-python/modules/3-tools/lessons/1-vector-tool/reset.cypher[Cypher to add embeddings and create the vector index^]. 28 | . Create a virtual environment and install the requirements. 29 | + 30 | [source,sh] 31 | pip install -r requirements.txt 32 | . Install `pytest` 33 | + 34 | [source,sh] 35 | pip install pytest 36 | . Create a `secrets.toml` file in the `.streamlit` directory. Use `secrets.toml.example` as a template. 37 | . Run the tests 38 | + 39 | [source,sh] 40 | pytest -------------------------------------------------------------------------------- /solutions/test_solution.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | def test_secrets(): 4 | import streamlit as st 5 | 6 | def check_secret(key): 7 | assert len(st.secrets[key]) > 0, f"{key} not found in secrets.toml" 8 | 9 | try: 10 | check_secret("OPENAI_API_KEY") 11 | check_secret("OPENAI_MODEL") 12 | check_secret("NEO4J_URI") 13 | check_secret("NEO4J_USERNAME") 14 | check_secret("NEO4J_PASSWORD") 15 | 16 | except FileNotFoundError: 17 | assert False, "secrets.toml file not found" 18 | 19 | def test_vector(): 20 | try: 21 | from tools.vector import get_movie_plot 22 | assert get_movie_plot("Aliens land on earth") is not None 23 | 24 | vector_exists = True 25 | 26 | except ValueError: 27 | assert False, "The moviePlots index does not exist. Run the Cypher script - https://raw.githubusercontent.com/neo4j-graphacademy/courses/refs/heads/main/asciidoc/courses/llm-chatbot-python/modules/3-tools/lessons/1-vector-tool/reset.cypher" 28 | 29 | assert True 30 | 31 | def test_bot_conversation(): 32 | from streamlit.testing.v1 import AppTest 33 | 34 | at = AppTest.from_file(script_path="solutions/bot.py",default_timeout=60).run() 35 | assert not at.exception, "Bot failed to start" 36 | 37 | question = "What is a good movie about aliens landing on earth?" 38 | 39 | at.chat_input[0].set_value(question).run() 40 | 41 | assert at.chat_message[0].markdown[0].value == "Hi, I'm the GraphAcademy Chatbot! How can I help you?" 42 | assert at.chat_message[1].markdown[0].value == question 43 | assert len(at.chat_message[2].markdown[0].value) > 0, "No response from the bot" -------------------------------------------------------------------------------- /solutions/tools/vector.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | from llm import llm, embeddings 3 | from graph import graph 4 | 5 | # tag::import_vector[] 6 | from langchain_neo4j import Neo4jVector 7 | # end::import_vector[] 8 | # tag::import_chain[] 9 | from langchain.chains.combine_documents import create_stuff_documents_chain 10 | from langchain.chains import create_retrieval_chain 11 | # end::import_chain[] 12 | 13 | # tag::import_chat_prompt[] 14 | from langchain_core.prompts import ChatPromptTemplate 15 | # end::import_chat_prompt[] 16 | 17 | 18 | # tag::vector[] 19 | neo4jvector = Neo4jVector.from_existing_index( 20 | embeddings, # <1> 21 | graph=graph, # <2> 22 | index_name="moviePlots", # <3> 23 | node_label="Movie", # <4> 24 | text_node_property="plot", # <5> 25 | embedding_node_property="plotEmbedding", # <6> 26 | retrieval_query=""" 27 | RETURN 28 | node.plot AS text, 29 | score, 30 | { 31 | title: node.title, 32 | directors: [ (person)-[:DIRECTED]->(node) | person.name ], 33 | actors: [ (person)-[r:ACTED_IN]->(node) | [person.name, r.role] ], 34 | tmdbId: node.tmdbId, 35 | source: 'https://www.themoviedb.org/movie/'+ node.tmdbId 36 | } AS metadata 37 | """ 38 | ) 39 | # end::vector[] 40 | 41 | # tag::retriever[] 42 | retriever = neo4jvector.as_retriever() 43 | # end::retriever[] 44 | 45 | # tag::prompt[] 46 | instructions = ( 47 | "Use the given context to answer the question." 48 | "If you don't know the answer, say you don't know." 49 | "Context: {context}" 50 | ) 51 | 52 | prompt = ChatPromptTemplate.from_messages( 53 | [ 54 | ("system", instructions), 55 | ("human", "{input}"), 56 | ] 57 | ) 58 | # end::prompt[] 59 | 60 | # tag::chain[] 61 | question_answer_chain = create_stuff_documents_chain(llm, prompt) 62 | plot_retriever = create_retrieval_chain( 63 | retriever, 64 | question_answer_chain 65 | ) 66 | # end::chain[] 67 | 68 | # tag::get_movie_plot[] 69 | def get_movie_plot(input): 70 | return plot_retriever.invoke({"input": input}) 71 | # end::get_movie_plot[] 72 | -------------------------------------------------------------------------------- /solutions/agent-chat.py: -------------------------------------------------------------------------------- 1 | from llm import llm 2 | from graph import graph 3 | 4 | # tag::import_movie_chat[] 5 | from langchain_core.prompts import ChatPromptTemplate 6 | from langchain.schema import StrOutputParser 7 | # end::import_movie_chat[] 8 | 9 | # tag::import_tool[] 10 | from langchain.tools import Tool 11 | # end::import_tool[] 12 | 13 | # tag::import_memory[] 14 | from langchain_neo4j import Neo4jChatMessageHistory 15 | # end::import_memory[] 16 | 17 | # tag::import_agent[] 18 | from langchain.agents import AgentExecutor, create_react_agent 19 | from langchain_core.runnables.history import RunnableWithMessageHistory 20 | from langchain import hub 21 | # end::import_agent[] 22 | 23 | # tag::import_get_session_id[] 24 | from utils import get_session_id 25 | # end::import_get_session_id[] 26 | 27 | # tag::movie_chat[] 28 | chat_prompt = ChatPromptTemplate.from_messages( 29 | [ 30 | ("system", "You are a movie expert providing information about movies."), 31 | ("human", "{input}"), 32 | ] 33 | ) 34 | 35 | movie_chat = chat_prompt | llm | StrOutputParser() 36 | # end::movie_chat[] 37 | 38 | # tag::tools[] 39 | tools = [ 40 | Tool.from_function( 41 | name="General Chat", 42 | description="For general movie chat not covered by other tools", 43 | func=movie_chat.invoke, 44 | ) 45 | ] 46 | # end::tools[] 47 | 48 | # tag::get_memory[] 49 | def get_memory(session_id): 50 | return Neo4jChatMessageHistory(session_id=session_id, graph=graph) 51 | # end::get_memory[] 52 | 53 | # tag::agent[] 54 | # tag::agent_prompt[] 55 | agent_prompt = hub.pull("hwchase17/react-chat") 56 | # end::agent_prompt[] 57 | agent = create_react_agent(llm, tools, agent_prompt) 58 | agent_executor = AgentExecutor( 59 | agent=agent, 60 | tools=tools, 61 | verbose=True 62 | ) 63 | 64 | chat_agent = RunnableWithMessageHistory( 65 | agent_executor, 66 | get_memory, 67 | input_messages_key="input", 68 | history_messages_key="chat_history", 69 | ) 70 | # end::agent[] 71 | 72 | # tag::generate_response[] 73 | def generate_response(user_input): 74 | """ 75 | Create a handler that calls the Conversational agent 76 | and returns a response to be rendered in the UI 77 | """ 78 | 79 | response = chat_agent.invoke( 80 | {"input": user_input}, 81 | {"configurable": {"session_id": get_session_id()}},) 82 | 83 | return response['output'] 84 | # end::generate_response[] 85 | -------------------------------------------------------------------------------- /solutions/tools/cypher.py: -------------------------------------------------------------------------------- 1 | from langchain_neo4j import GraphCypherQAChain 2 | from langchain.prompts.prompt import PromptTemplate 3 | 4 | from llm import llm 5 | from graph import graph 6 | 7 | CYPHER_GENERATION_TEMPLATE = """ 8 | You are an expert Neo4j Developer translating user questions into Cypher to answer questions about movies and provide recommendations. 9 | Convert the user's question based on the schema. 10 | 11 | Use only the provided relationship types and properties in the schema. 12 | Do not use any other relationship types or properties that are not provided. 13 | 14 | Do not return entire nodes or embedding properties. 15 | 16 | Fine Tuning: 17 | 18 | For movie titles that begin with "The", move "the" to the end. For example "The 39 Steps" becomes "39 Steps, The" or "the matrix" becomes "Matrix, The". 19 | 20 | Example Cypher Statements: 21 | 22 | 1. To find who acted in a movie: 23 | ``` 24 | MATCH (p:Person)-[r:ACTED_IN]->(m:Movie {{title: "Movie Title"}}) 25 | RETURN p.name, r.role 26 | ``` 27 | 28 | 2. To find who directed a movie: 29 | ``` 30 | MATCH (p:Person)-[r:DIRECTED]->(m:Movie {{title: "Movie Title"}}) 31 | RETURN p.name 32 | ``` 33 | 34 | 3. How to find how many degrees of separation there are between two people: 35 | ``` 36 | MATCH path = shortestPath( 37 | (p1:Person {{name: "Actor 1"}})-[:ACTED_IN|DIRECTED*]-(p2:Person {{name: "Actor 2"}}) 38 | ) 39 | WITH path, p1, p2, relationships(path) AS rels 40 | RETURN 41 | p1 {{ .name, .born, link:'https://www.themoviedb.org/person/'+ p1.tmdbId }} AS start, 42 | p2 {{ .name, .born, link:'https://www.themoviedb.org/person/'+ p2.tmdbId }} AS end, 43 | reduce(output = '', i in range(0, length(path)-1) | 44 | output + CASE 45 | WHEN i = 0 THEN 46 | startNode(rels[i]).name + CASE WHEN type(rels[i]) = 'ACTED_IN' THEN ' played '+ rels[i].role +' in 'ELSE ' directed ' END + endNode(rels[i]).title 47 | ELSE 48 | ' with '+ startNode(rels[i]).name + ', who '+ CASE WHEN type(rels[i]) = 'ACTED_IN' THEN 'played '+ rels[i].role +' in ' 49 | ELSE 'directed ' 50 | END + endNode(rels[i]).title 51 | END 52 | ) AS pathBetweenPeople 53 | ``` 54 | 55 | Schema: 56 | {schema} 57 | 58 | Question: 59 | {question} 60 | """ 61 | 62 | cypher_prompt = PromptTemplate.from_template(CYPHER_GENERATION_TEMPLATE) 63 | 64 | cypher_qa = GraphCypherQAChain.from_llm( 65 | llm, 66 | graph=graph, 67 | verbose=True, 68 | cypher_prompt=cypher_prompt, 69 | allow_dangerous_requests=True 70 | ) -------------------------------------------------------------------------------- /solutions/tools/cypher-degrees.py: -------------------------------------------------------------------------------- 1 | from langchain_neo4j import GraphCypherQAChain 2 | from langchain.prompts.prompt import PromptTemplate 3 | 4 | from llm import llm 5 | from graph import graph 6 | 7 | 8 | # tag::prompt[] 9 | CYPHER_GENERATION_TEMPLATE = """ 10 | You are an expert Neo4j Developer translating user questions into Cypher to answer questions about movies and provide recommendations. 11 | Convert the user's question based on the schema. 12 | 13 | Use only the provided relationship types and properties in the schema. 14 | Do not use any other relationship types or properties that are not provided. 15 | 16 | Do not return entire nodes or embedding properties. 17 | 18 | Fine Tuning: 19 | 20 | For movie titles that begin with "The", move "the" to the end. For example "The 39 Steps" becomes "39 Steps, The" or "the matrix" becomes "Matrix, The". 21 | 22 | Example Cypher Statements: 23 | 24 | 1. To find who acted in a movie: 25 | ``` 26 | MATCH (p:Person)-[r:ACTED_IN]->(m:Movie {{title: "Movie Title"}}) 27 | RETURN p.name, r.role 28 | ``` 29 | 30 | 2. To find who directed a movie: 31 | ``` 32 | MATCH (p:Person)-[r:DIRECTED]->(m:Movie {{title: "Movie Title"}}) 33 | RETURN p.name 34 | ``` 35 | 36 | 3. How to find how many degrees of separation there are between two people: 37 | ``` 38 | MATCH path = shortestPath( 39 | (p1:Person {{name: "Actor 1"}})-[:ACTED_IN|DIRECTED*]-(p2:Person {{name: "Actor 2"}}) 40 | ) 41 | WITH path, p1, p2, relationships(path) AS rels 42 | RETURN 43 | p1 {{ .name, .born, link:'https://www.themoviedb.org/person/'+ p1.tmdbId }} AS start, 44 | p2 {{ .name, .born, link:'https://www.themoviedb.org/person/'+ p2.tmdbId }} AS end, 45 | reduce(output = '', i in range(0, length(path)-1) | 46 | output + CASE 47 | WHEN i = 0 THEN 48 | startNode(rels[i]).name + CASE WHEN type(rels[i]) = 'ACTED_IN' THEN ' played '+ rels[i].role +' in 'ELSE ' directed ' END + endNode(rels[i]).title 49 | ELSE 50 | ' with '+ startNode(rels[i]).name + ', who '+ CASE WHEN type(rels[i]) = 'ACTED_IN' THEN 'played '+ rels[i].role +' in ' 51 | ELSE 'directed ' 52 | END + endNode(rels[i]).title 53 | END 54 | ) AS pathBetweenPeople 55 | ``` 56 | 57 | Schema: 58 | {schema} 59 | 60 | Question: 61 | {question} 62 | """ 63 | # end::prompt[] 64 | 65 | cypher_prompt = PromptTemplate.from_template(CYPHER_GENERATION_TEMPLATE) 66 | 67 | cypher_qa = GraphCypherQAChain.from_llm( 68 | llm, 69 | graph=graph, 70 | verbose=True, 71 | cypher_prompt=cypher_prompt, 72 | allow_dangerous_requests=True 73 | ) -------------------------------------------------------------------------------- /solutions/agent-scoped.py: -------------------------------------------------------------------------------- 1 | from llm import llm 2 | from graph import graph 3 | from langchain_core.prompts import ChatPromptTemplate 4 | # tag::import_prompt[] 5 | from langchain_core.prompts import PromptTemplate 6 | # end::import_prompt[] 7 | from langchain.schema import StrOutputParser 8 | from langchain.tools import Tool 9 | from langchain_neo4j import Neo4jChatMessageHistory 10 | from langchain.agents import AgentExecutor, create_react_agent 11 | from langchain_core.runnables.history import RunnableWithMessageHistory 12 | from langchain import hub 13 | from utils import get_session_id 14 | 15 | chat_prompt = ChatPromptTemplate.from_messages( 16 | [ 17 | ("system", "You are a movie expert providing information about movies."), 18 | ("human", "{input}"), 19 | ] 20 | ) 21 | 22 | movie_chat = chat_prompt | llm | StrOutputParser() 23 | 24 | tools = [ 25 | Tool.from_function( 26 | name="General Chat", 27 | description="For general movie chat not covered by other tools", 28 | func=movie_chat.invoke, 29 | ) 30 | ] 31 | def get_memory(session_id): 32 | return Neo4jChatMessageHistory(session_id=session_id, graph=graph) 33 | 34 | # tag::agent_prompt[] 35 | agent_prompt = PromptTemplate.from_template(""" 36 | You are a movie expert providing information about movies. 37 | Be as helpful as possible and return as much information as possible. 38 | Do not answer any questions that do not relate to movies, actors or directors. 39 | 40 | Do not answer any questions using your pre-trained knowledge, only use the information provided in the context. 41 | 42 | TOOLS: 43 | ------ 44 | 45 | You have access to the following tools: 46 | 47 | {tools} 48 | 49 | To use a tool, please use the following format: 50 | 51 | ``` 52 | Thought: Do I need to use a tool? Yes 53 | Action: the action to take, should be one of [{tool_names}] 54 | Action Input: the input to the action 55 | Observation: the result of the action 56 | ``` 57 | 58 | When you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format: 59 | 60 | ``` 61 | Thought: Do I need to use a tool? No 62 | Final Answer: [your response here] 63 | ``` 64 | 65 | Begin! 66 | 67 | Previous conversation history: 68 | {chat_history} 69 | 70 | New input: {input} 71 | {agent_scratchpad} 72 | """) 73 | # end::agent_prompt[] 74 | 75 | # tag::agent[] 76 | agent = create_react_agent(llm, tools, agent_prompt) 77 | agent_executor = AgentExecutor( 78 | agent=agent, 79 | tools=tools, 80 | verbose=True 81 | ) 82 | 83 | chat_agent = RunnableWithMessageHistory( 84 | agent_executor, 85 | get_memory, 86 | input_messages_key="input", 87 | history_messages_key="chat_history", 88 | ) 89 | # end::agent[] 90 | 91 | def generate_response(user_input): 92 | """ 93 | Create a handler that calls the Conversational agent 94 | and returns a response to be rendered in the UI 95 | """ 96 | 97 | response = chat_agent.invoke( 98 | {"input": user_input}, 99 | {"configurable": {"session_id": get_session_id()}},) 100 | 101 | return response['output'] 102 | -------------------------------------------------------------------------------- /solutions/agent-vector.py: -------------------------------------------------------------------------------- 1 | from llm import llm 2 | from graph import graph 3 | from langchain_core.prompts import ChatPromptTemplate 4 | from langchain_core.prompts import PromptTemplate 5 | from langchain.schema import StrOutputParser 6 | from langchain.tools import Tool 7 | from langchain_neo4j import Neo4jChatMessageHistory 8 | from langchain.agents import AgentExecutor, create_react_agent 9 | from langchain_core.runnables.history import RunnableWithMessageHistory 10 | from langchain import hub 11 | from utils import get_session_id 12 | 13 | # tag::import_get_movie_plot[] 14 | from tools.vector import get_movie_plot 15 | # end::import_get_movie_plot[] 16 | 17 | chat_prompt = ChatPromptTemplate.from_messages( 18 | [ 19 | ("system", "You are a movie expert providing information about movies."), 20 | ("human", "{input}"), 21 | ] 22 | ) 23 | 24 | movie_chat = chat_prompt | llm | StrOutputParser() 25 | 26 | # tag::tools[] 27 | tools = [ 28 | Tool.from_function( 29 | name="General Chat", 30 | description="For general movie chat not covered by other tools", 31 | func=movie_chat.invoke, 32 | ), 33 | Tool.from_function( 34 | name="Movie Plot Search", 35 | description="For when you need to find information about movies based on a plot", 36 | func=get_movie_plot, 37 | ) 38 | ] 39 | # end::tools[] 40 | 41 | def get_memory(session_id): 42 | return Neo4jChatMessageHistory(session_id=session_id, graph=graph) 43 | 44 | agent_prompt = PromptTemplate.from_template(""" 45 | You are a movie expert providing information about movies. 46 | Be as helpful as possible and return as much information as possible. 47 | Do not answer any questions that do not relate to movies, actors or directors. 48 | 49 | Do not answer any questions using your pre-trained knowledge, only use the information provided in the context. 50 | 51 | TOOLS: 52 | ------ 53 | 54 | You have access to the following tools: 55 | 56 | {tools} 57 | 58 | To use a tool, please use the following format: 59 | 60 | ``` 61 | Thought: Do I need to use a tool? Yes 62 | Action: the action to take, should be one of [{tool_names}] 63 | Action Input: the input to the action 64 | Observation: the result of the action 65 | ``` 66 | 67 | When you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format: 68 | 69 | ``` 70 | Thought: Do I need to use a tool? No 71 | Final Answer: [your response here] 72 | ``` 73 | 74 | Begin! 75 | 76 | Previous conversation history: 77 | {chat_history} 78 | 79 | New input: {input} 80 | {agent_scratchpad} 81 | """) 82 | 83 | agent = create_react_agent(llm, tools, agent_prompt) 84 | agent_executor = AgentExecutor( 85 | agent=agent, 86 | tools=tools, 87 | verbose=True 88 | ) 89 | 90 | chat_agent = RunnableWithMessageHistory( 91 | agent_executor, 92 | get_memory, 93 | input_messages_key="input", 94 | history_messages_key="chat_history", 95 | ) 96 | 97 | def generate_response(user_input): 98 | """ 99 | Create a handler that calls the Conversational agent 100 | and returns a response to be rendered in the UI 101 | """ 102 | 103 | response = chat_agent.invoke( 104 | {"input": user_input}, 105 | {"configurable": {"session_id": get_session_id()}},) 106 | 107 | return response['output'] 108 | -------------------------------------------------------------------------------- /solutions/agent.py: -------------------------------------------------------------------------------- 1 | from llm import llm 2 | from graph import graph 3 | from langchain_core.prompts import ChatPromptTemplate 4 | from langchain_core.prompts import PromptTemplate 5 | from langchain.schema import StrOutputParser 6 | from langchain.tools import Tool 7 | from langchain_neo4j import Neo4jChatMessageHistory 8 | from langchain.agents import AgentExecutor, create_react_agent 9 | from langchain_core.runnables.history import RunnableWithMessageHistory 10 | from langchain import hub 11 | from utils import get_session_id 12 | 13 | from tools.vector import get_movie_plot 14 | from tools.cypher import cypher_qa 15 | 16 | chat_prompt = ChatPromptTemplate.from_messages( 17 | [ 18 | ("system", "You are a movie expert providing information about movies."), 19 | ("human", "{input}"), 20 | ] 21 | ) 22 | 23 | movie_chat = chat_prompt | llm | StrOutputParser() 24 | 25 | tools = [ 26 | Tool.from_function( 27 | name="General Chat", 28 | description="For general movie chat not covered by other tools", 29 | func=movie_chat.invoke, 30 | ), 31 | Tool.from_function( 32 | name="Movie Plot Search", 33 | description="For when you need to find information about movies based on a plot", 34 | func=get_movie_plot, 35 | ), 36 | Tool.from_function( 37 | name="Movie information", 38 | description="Provide information about movies questions using Cypher", 39 | func = cypher_qa 40 | ) 41 | ] 42 | 43 | def get_memory(session_id): 44 | return Neo4jChatMessageHistory(session_id=session_id, graph=graph) 45 | 46 | agent_prompt = PromptTemplate.from_template(""" 47 | You are a movie expert providing information about movies. 48 | Be as helpful as possible and return as much information as possible. 49 | Do not answer any questions that do not relate to movies, actors or directors. 50 | 51 | Do not answer any questions using your pre-trained knowledge, only use the information provided in the context. 52 | 53 | TOOLS: 54 | ------ 55 | 56 | You have access to the following tools: 57 | 58 | {tools} 59 | 60 | To use a tool, please use the following format: 61 | 62 | ``` 63 | Thought: Do I need to use a tool? Yes 64 | Action: the action to take, should be one of [{tool_names}] 65 | Action Input: the input to the action 66 | Observation: the result of the action 67 | ``` 68 | 69 | When you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format: 70 | 71 | ``` 72 | Thought: Do I need to use a tool? No 73 | Final Answer: [your response here] 74 | ``` 75 | 76 | Begin! 77 | 78 | Previous conversation history: 79 | {chat_history} 80 | 81 | New input: {input} 82 | {agent_scratchpad} 83 | """) 84 | 85 | agent = create_react_agent(llm, tools, agent_prompt) 86 | agent_executor = AgentExecutor( 87 | agent=agent, 88 | tools=tools, 89 | verbose=True 90 | ) 91 | 92 | chat_agent = RunnableWithMessageHistory( 93 | agent_executor, 94 | get_memory, 95 | input_messages_key="input", 96 | history_messages_key="chat_history", 97 | ) 98 | 99 | def generate_response(user_input): 100 | """ 101 | Create a handler that calls the Conversational agent 102 | and returns a response to be rendered in the UI 103 | """ 104 | 105 | response = chat_agent.invoke( 106 | {"input": user_input}, 107 | {"configurable": {"session_id": get_session_id()}},) 108 | 109 | return response['output'] 110 | -------------------------------------------------------------------------------- /solutions/agent-cypher.py: -------------------------------------------------------------------------------- 1 | from llm import llm 2 | from graph import graph 3 | from langchain_core.prompts import ChatPromptTemplate 4 | from langchain_core.prompts import PromptTemplate 5 | from langchain.schema import StrOutputParser 6 | from langchain.tools import Tool 7 | from langchain_neo4j import Neo4jChatMessageHistory 8 | from langchain.agents import AgentExecutor, create_react_agent 9 | from langchain_core.runnables.history import RunnableWithMessageHistory 10 | from langchain import hub 11 | from utils import get_session_id 12 | 13 | from tools.vector import get_movie_plot 14 | # tag::import_cypher_qa[] 15 | from tools.cypher import cypher_qa 16 | # end::import_cypher_qa[] 17 | 18 | chat_prompt = ChatPromptTemplate.from_messages( 19 | [ 20 | ("system", "You are a movie expert providing information about movies."), 21 | ("human", "{input}"), 22 | ] 23 | ) 24 | 25 | movie_chat = chat_prompt | llm | StrOutputParser() 26 | 27 | # tag::tools[] 28 | tools = [ 29 | Tool.from_function( 30 | name="General Chat", 31 | description="For general movie chat not covered by other tools", 32 | func=movie_chat.invoke, 33 | ), 34 | Tool.from_function( 35 | name="Movie Plot Search", 36 | description="For when you need to find information about movies based on a plot", 37 | func=get_movie_plot, 38 | ), 39 | Tool.from_function( 40 | name="Movie information", 41 | description="Provide information about movies questions using Cypher", 42 | func = cypher_qa 43 | ) 44 | ] 45 | # end::tools[] 46 | 47 | def get_memory(session_id): 48 | return Neo4jChatMessageHistory(session_id=session_id, graph=graph) 49 | 50 | agent_prompt = PromptTemplate.from_template(""" 51 | You are a movie expert providing information about movies. 52 | Be as helpful as possible and return as much information as possible. 53 | Do not answer any questions that do not relate to movies, actors or directors. 54 | 55 | Do not answer any questions using your pre-trained knowledge, only use the information provided in the context. 56 | 57 | TOOLS: 58 | ------ 59 | 60 | You have access to the following tools: 61 | 62 | {tools} 63 | 64 | To use a tool, please use the following format: 65 | 66 | ``` 67 | Thought: Do I need to use a tool? Yes 68 | Action: the action to take, should be one of [{tool_names}] 69 | Action Input: the input to the action 70 | Observation: the result of the action 71 | ``` 72 | 73 | When you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format: 74 | 75 | ``` 76 | Thought: Do I need to use a tool? No 77 | Final Answer: [your response here] 78 | ``` 79 | 80 | Begin! 81 | 82 | Previous conversation history: 83 | {chat_history} 84 | 85 | New input: {input} 86 | {agent_scratchpad} 87 | """) 88 | 89 | agent = create_react_agent(llm, tools, agent_prompt) 90 | agent_executor = AgentExecutor( 91 | agent=agent, 92 | tools=tools, 93 | verbose=True 94 | ) 95 | 96 | chat_agent = RunnableWithMessageHistory( 97 | agent_executor, 98 | get_memory, 99 | input_messages_key="input", 100 | history_messages_key="chat_history", 101 | ) 102 | 103 | def generate_response(user_input): 104 | """ 105 | Create a handler that calls the Conversational agent 106 | and returns a response to be rendered in the UI 107 | """ 108 | 109 | response = chat_agent.invoke( 110 | {"input": user_input}, 111 | {"configurable": {"session_id": get_session_id()}},) 112 | 113 | return response['output'] 114 | --------------------------------------------------------------------------------