├── .gitignore ├── README.md ├── backend ├── agent.py ├── database.py ├── endpoints.py ├── logger.py ├── main.py ├── movie_database_tool.py └── run.py ├── data └── imdb_top_1000.csv ├── frontend ├── .gitignore ├── package-lock.json ├── package.json ├── public │ ├── favicon.ico │ ├── index.html │ ├── logo192.png │ ├── logo512.png │ ├── manifest.json │ └── robots.txt └── src │ ├── App.css │ ├── App.js │ ├── App.test.js │ ├── index.css │ ├── index.js │ ├── logo.svg │ ├── reportWebVitals.js │ └── setupTests.js ├── image └── llm-imdb.gif ├── poetry.lock └── pyproject.toml /.gitignore: -------------------------------------------------------------------------------- 1 | # Logs 2 | logs 3 | *.log 4 | npm-debug.log* 5 | yarn-debug.log* 6 | yarn-error.log* 7 | lerna-debug.log* 8 | 9 | # Mac 10 | .DS_Store 11 | .ruff_cache 12 | 13 | # Diagnostic reports (https://nodejs.org/api/report.html) 14 | report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json 15 | 16 | # Runtime data 17 | pids 18 | *.pid 19 | *.seed 20 | *.pid.lock 21 | 22 | # Directory for instrumented libs generated by jscoverage/JSCover 23 | lib-cov 24 | 25 | # Coverage directory used by tools like istanbul 26 | coverage 27 | *.lcov 28 | 29 | # nyc test coverage 30 | .nyc_output 31 | 32 | # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) 33 | .grunt 34 | 35 | # Bower dependency directory (https://bower.io/) 36 | bower_components 37 | 38 | # node-waf configuration 39 | .lock-wscript 40 | 41 | # Compiled binary addons (https://nodejs.org/api/addons.html) 42 | build/Release 43 | 44 | # Dependency directories 45 | node_modules/ 46 | jspm_packages/ 47 | 48 | # TypeScript v1 declaration files 49 | typings/ 50 | 51 | # TypeScript cache 52 | *.tsbuildinfo 53 | 54 | # Optional npm cache directory 55 | .npm 56 | 57 | # Optional eslint cache 58 | .eslintcache 59 | 60 | # Microbundle cache 61 | .rpt2_cache/ 62 | .rts2_cache_cjs/ 63 | .rts2_cache_es/ 64 | .rts2_cache_umd/ 65 | 66 | # Optional REPL history 67 | .node_repl_history 68 | 69 | # Output of 'npm pack' 70 | *.tgz 71 | 72 | # Yarn Integrity file 73 | .yarn-integrity 74 | 75 | # dotenv environment variables file 76 | .env 77 | .env.test 78 | 79 | # parcel-bundler cache (https://parceljs.org/) 80 | .cache 81 | 82 | # Next.js build output 83 | .next 84 | 85 | # Nuxt.js build / generate output 86 | .nuxt 87 | dist 88 | 89 | # Gatsby files 90 | .cache/ 91 | # Comment in the public line in if your project uses Gatsby and *not* Next.js 92 | # https://nextjs.org/blog/next-9-1#public-directory-support 93 | # public 94 | 95 | # vuepress build output 96 | .vuepress/dist 97 | 98 | # Serverless directories 99 | .serverless/ 100 | 101 | # FuseBox cache 102 | .fusebox/ 103 | 104 | # DynamoDB Local files 105 | .dynamodb/ 106 | 107 | # TernJS port file 108 | .tern-port 109 | # Byte-compiled / optimized / DLL files 110 | __pycache__/ 111 | *.py[cod] 112 | *$py.class 113 | notebooks 114 | 115 | # C extensions 116 | *.so 117 | 118 | # Distribution / packaging 119 | .Python 120 | build/ 121 | develop-eggs/ 122 | dist/ 123 | downloads/ 124 | eggs/ 125 | .eggs/ 126 | lib/ 127 | lib64/ 128 | parts/ 129 | sdist/ 130 | var/ 131 | wheels/ 132 | pip-wheel-metadata/ 133 | share/python-wheels/ 134 | *.egg-info/ 135 | .installed.cfg 136 | *.egg 137 | MANIFEST 138 | 139 | # PyInstaller 140 | # Usually these files are written by a python script from a template 141 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 142 | *.manifest 143 | *.spec 144 | 145 | # Installer logs 146 | pip-log.txt 147 | pip-delete-this-directory.txt 148 | 149 | # Unit test / coverage reports 150 | htmlcov/ 151 | .tox/ 152 | .nox/ 153 | .coverage 154 | .coverage.* 155 | .cache 156 | nosetests.xml 157 | coverage.xml 158 | *.cover 159 | *.py,cover 160 | .hypothesis/ 161 | .pytest_cache/ 162 | 163 | # Translations 164 | *.mo 165 | *.pot 166 | 167 | # Django stuff: 168 | *.log 169 | local_settings.py 170 | db.sqlite3 171 | db.sqlite3-journal 172 | 173 | # Flask stuff: 174 | instance/ 175 | .webassets-cache 176 | 177 | # Scrapy stuff: 178 | .scrapy 179 | 180 | # Sphinx documentation 181 | docs/_build/ 182 | 183 | # PyBuilder 184 | target/ 185 | 186 | # Jupyter Notebook 187 | .ipynb_checkpoints 188 | 189 | # IPython 190 | profile_default/ 191 | ipython_config.py 192 | 193 | # pyenv 194 | .python-version 195 | 196 | # pipenv 197 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 198 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 199 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 200 | # install all needed dependencies. 201 | #Pipfile.lock 202 | 203 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 204 | __pypackages__/ 205 | 206 | # Celery stuff 207 | celerybeat-schedule 208 | celerybeat.pid 209 | 210 | # SageMath parsed files 211 | *.sage.py 212 | 213 | # Environments 214 | .env 215 | .venv 216 | env/ 217 | venv/ 218 | ENV/ 219 | env.bak/ 220 | venv.bak/ 221 | 222 | # Spyder project settings 223 | .spyderproject 224 | .spyproject 225 | 226 | # Rope project settings 227 | .ropeproject 228 | 229 | # mkdocs documentation 230 | /site 231 | 232 | # mypy 233 | .mypy_cache/ 234 | .dmypy.json 235 | dmypy.json 236 | 237 | # Poetry 238 | .testenv/* 239 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # IMDB-LLM: Movie Query Made Simple 2 | 3 | ![IMDB-LLM Demo](./image/llm-imdb.gif) 4 | 5 | Welcome to IMDB-LLM, a proof of concept app that demonstrates the power of [LangChain](https://github.com/hwchase17/langchain) and LLMs in extracting information from graphs! In just 10 hours it is possible to develop a user-friendly application that enables users to search for movie titles in the IMDB dataset graph or discover similar movies using natural language queries. The best part? If the LLM lacks specific information, it will first search on Google, then query the title in the database. 6 | 7 | ## Overview 8 | 9 | IMDB-LLM's Graph Explorer is built using [LangChain](https://github.com/hwchase17/langchain) and LLMs, state-of-the-art technologies in natural language processing and machine learning. The application employs a graph representation of the IMDB dataset, encompassing data on movies, actors, directors, and more. Users can input queries in natural language, such as "Give me some drama movie options with Leonardo DiCaprio" or "Show me movies directed by Christopher Nolan", and the LLM will retrieve the pertinent information from the graph. 10 | 11 | Should the LLM be unable to provide an answer, it will utilize the Google Search API to find supplementary information, which it will then use to refine the search. 12 | 13 | ## Features 14 | 15 | - Search for movie titles within the graph 16 | - Discover similar movies using natural language queries 17 | - Automatic Google search for missing information 18 | 19 | ## Installation and Setup 20 | 21 | 1. Clone the repository: 22 | 23 | ```bash 24 | git clone https://github.com/ibiscp/LLM-IMDB.git 25 | 26 | ``` 27 | 28 | 2. Navigate to the frontend directory and install the required dependencies 29 | 30 | ```bash 31 | cd frontend 32 | npm install 33 | ``` 34 | 35 | 3. Install the necessary dependencies for the backend 36 | 37 | ```bash 38 | poetry install 39 | ``` 40 | 41 | 4. Launch the frontend 42 | 43 | ```bash 44 | npm run start 45 | ``` 46 | 47 | 5. Set up the environment variables 48 | 49 | ```bash 50 | export OPENAI_API_KEY= 51 | export SERPAPI_API_KEY= 52 | ``` 53 | 54 | 6. Start the backend 55 | 56 | ```bash 57 | python3 backend/main.py 58 | ``` 59 | 60 | 7. Open the application in your browser at http://localhost:3000 61 | -------------------------------------------------------------------------------- /backend/agent.py: -------------------------------------------------------------------------------- 1 | from langchain.agents.agent import AgentExecutor 2 | from langchain.agents.mrkl.base import ZeroShotAgent 3 | from langchain.agents.tools import Tool 4 | from langchain.llms import OpenAI 5 | from langchain.utilities.serpapi import SerpAPIWrapper 6 | from movie_database_tool import LLMGraphChain 7 | 8 | ZERO_SHOT_FORMAT_INSTRUCTIONS = """ 9 | Use the following format: 10 | 11 | 1. If similarity is 1.0, you know the answer is exactly what the user asked for. 12 | 2. If similarities are not equal to 1.0, you need to present the user with a list of similar movies, saying "Here are the movies that are similar to what you asked for". 13 | 3. If you find the movie title directly in the Search tool, you always need to look in the Movies_chain to check if that movie is in the graph database. 14 | 15 | Instructions: 16 | a. Question: The input question you need to address. 17 | b. Thought: Consider the appropriate course of action. 18 | c. Action: Choose one of the available tools from [{tool_names}]. 19 | d. Action Input: Provide the input for the selected action. 20 | e. Observation: Describe the outcome of the action. 21 | (Note: Steps b to e may be repeated multiple times as needed.) 22 | 23 | f. Thought: Indicate that the final answer is determined. 24 | g. Final Answer: Provide the ultimate response to the original input question. 25 | """ 26 | 27 | 28 | class MovieAgent(AgentExecutor): 29 | """Movie agent""" 30 | 31 | @staticmethod 32 | def function_name(): 33 | return "MovieAgent" 34 | 35 | @classmethod 36 | def initialize(cls, movie_graph, *args, **kwargs): 37 | llm = OpenAI(temperature=0) 38 | 39 | movie_tool = LLMGraphChain(llm=llm, graph=movie_graph, verbose=True) 40 | 41 | # Load the tool configs that are needed. 42 | search = SerpAPIWrapper() 43 | tools = [ 44 | Tool( 45 | name="Movies_chain", 46 | func=movie_tool.run, 47 | description="Utilize this tool to search within a movie database, specifically designed to answer movie-related questions. The tool accepts inputs such as clear title, genre, director, actor, or year, ensuring accurate and targeted results. Ideal for inquiries that require information from one or more of the following categories: title, genre, director, actor, or year. This specialized tool offers streamlined search capabilities to help you find the movie information you need with ease.", 48 | ), 49 | Tool( 50 | name="Search", 51 | func=search.run, 52 | description="Use this tool when you need to gather broader or non-movie-specific information, such as finding the year a particular event occurred, researching historical context, searching for the year of nominated movies, or seeking other related data. After obtaining the necessary details, you can then use the movies_chain for more targeted movie information. This tool offers a wide range of search capabilities to help you find the answers you need on the internet.", 53 | ), 54 | ] 55 | 56 | agent = ZeroShotAgent.from_llm_and_tools( 57 | llm, tools, format_instructions=ZERO_SHOT_FORMAT_INSTRUCTIONS 58 | ) 59 | 60 | return cls.from_agent_and_tools(agent=agent, tools=tools, verbose=True) 61 | 62 | def __init__(self, *args, **kwargs): 63 | super().__init__(*args, **kwargs) 64 | 65 | def run(self, *args, **kwargs): 66 | return super().run(*args, **kwargs) 67 | -------------------------------------------------------------------------------- /backend/database.py: -------------------------------------------------------------------------------- 1 | from typing import List, Optional, Tuple 2 | 3 | import networkx as nx 4 | import pandas as pd 5 | 6 | 7 | class MovieDatabase: 8 | def __init__(self, csv_file: str = "data/imdb_top_1000.csv"): 9 | """Initialize the movie database""" 10 | 11 | # Process the csv file 12 | df = self.process_csv(csv_file) 13 | 14 | # Get the features 15 | years, genres, directors, actors, titles = self.get_features(df) 16 | 17 | # Create the graph 18 | self.graph = self.create_graph(years, genres, directors, actors, titles) 19 | 20 | def process_csv(self, csv_file: str): 21 | """Process the csv file""" 22 | 23 | # Read csv file 24 | df = pd.read_csv(csv_file) 25 | df.index = pd.RangeIndex(start=0, stop=len(df)) 26 | 27 | # Drop columns 28 | df = df.drop(columns=["Certificate", "Meta_score", "No_of_Votes", "Gross"]) 29 | 30 | # Rename columns 31 | df = df.rename( 32 | columns={ 33 | "Series_Title": "Title", 34 | "Released_Year": "Year", 35 | "IMDB_Rating": "Rating", 36 | "Poster_Link": "Poster", 37 | } 38 | ) 39 | 40 | # Drop line where Year is PG 41 | df = df[df["Year"] != "PG"] 42 | 43 | # Change runtime to only minutes and convert to int 44 | df["Runtime"] = df["Runtime"].str.replace("min", "") 45 | df["Runtime"] = df["Runtime"].astype(int) 46 | 47 | # Change year to int 48 | df["Year"] = df["Year"].astype(int) 49 | 50 | # Change rating to float 51 | df["Rating"] = df["Rating"].astype(float) 52 | 53 | # Change genre to list 54 | df["Genre"] = df["Genre"].str.split(", ") 55 | 56 | # Change director to list 57 | df["Director"] = df["Director"].str.split(", ") 58 | 59 | # Change actors to list 60 | df["Actors"] = df[["Star1", "Star2", "Star3", "Star4"]].values.tolist() 61 | 62 | # Drop columns 63 | df = df.drop(columns=["Star1", "Star2", "Star3", "Star4"]) 64 | 65 | return df 66 | 67 | def get_features(self, df: pd.DataFrame): 68 | """Get the features from the dataframe""" 69 | 70 | def _extracted_from_get_features_8(df: pd.DataFrame, arg1: str): 71 | # Get all genres 72 | result = [] 73 | for genre in df[arg1]: 74 | result.extend(genre) 75 | return set(result) 76 | 77 | # Get all titles 78 | titles = df.to_dict(orient="index") 79 | 80 | # Get all genres, directors, and actors 81 | genres = _extracted_from_get_features_8(df, "Genre") 82 | directors = _extracted_from_get_features_8(df, "Director") 83 | actors = _extracted_from_get_features_8(df, "Actors") 84 | 85 | # Get all years 86 | years = set(df["Year"]) 87 | 88 | return years, genres, directors, actors, titles 89 | 90 | def create_graph( 91 | self, years: set, genres: set, directors: set, actors: set, titles: dict 92 | ): 93 | """Create a graph from the dataframe""" 94 | 95 | # Initialize the graph 96 | G = nx.Graph() 97 | 98 | def add_nodes_to_graph(graph, items, node_type): 99 | for item in items: 100 | graph.add_node(item, type=node_type) 101 | 102 | def add_edges_to_graph(graph, title, items, edge_type): 103 | for item in items: 104 | graph.add_edge(title, item, type=edge_type) 105 | 106 | # Add years, genres, directors, and actors nodes 107 | add_nodes_to_graph(G, years, "year") 108 | add_nodes_to_graph(G, genres, "genre") 109 | add_nodes_to_graph(G, directors, "director") 110 | add_nodes_to_graph(G, actors, "actor") 111 | 112 | # Iterate through the list of dictionaries and add nodes and edges 113 | for movie in titles.values(): 114 | genre = movie.pop("Genre") 115 | director = movie.pop("Director") 116 | actor = movie.pop("Actors") 117 | year = movie.pop("Year") 118 | title = movie.pop("Title") 119 | 120 | # Add title node 121 | G.add_node(title, type="title", attributes=movie) 122 | 123 | # Add year node and edge 124 | G.add_node(year, type="year") 125 | G.add_edge(title, year, type="title_year_edge") 126 | 127 | # Add edges for directors, actors, and genres 128 | add_edges_to_graph(G, title, director, "title_director_edge") 129 | add_edges_to_graph(G, title, actor, "title_actor_edge") 130 | add_edges_to_graph(G, title, genre, "title_genre_edge") 131 | 132 | return G 133 | 134 | def query_movies( 135 | self, 136 | title: Optional[str] = None, 137 | year: Optional[int] = None, 138 | genre: Optional[str] = None, 139 | director: Optional[str] = None, 140 | actor: Optional[str] = None, 141 | same_attributes_as: Optional[dict[str, str]] = None, 142 | ) -> List[Tuple[str, float]]: 143 | def get_neighbors_by_edge_type(node, edge_type): 144 | return [ 145 | neighbor 146 | for neighbor, edge_attrs in self.graph[node].items() 147 | if edge_attrs["type"] == edge_type 148 | ] 149 | 150 | def matches_partial(queried, neighbor): 151 | if isinstance(queried, int) and isinstance(neighbor, int): 152 | return queried == neighbor 153 | elif isinstance(queried, str) and isinstance(neighbor, str): 154 | for q in queried.split(): 155 | if q.lower() in neighbor.lower(): 156 | return True 157 | return False 158 | 159 | def similarity_score(title, queried_attributes): 160 | matching_attributes = sum( 161 | any((matches_partial(attr, n) for n in self.graph.neighbors(title))) 162 | for attr in queried_attributes 163 | ) 164 | return matching_attributes / len(queried_attributes) 165 | 166 | queried_attributes = [] 167 | if year: 168 | queried_attributes.append(year) 169 | if genre: 170 | queried_attributes.append(genre) 171 | if director: 172 | queried_attributes.append(director) 173 | if actor: 174 | queried_attributes.append(actor) 175 | 176 | if same_attributes_as: 177 | for key, value in same_attributes_as.items(): 178 | queried_attributes.extend( 179 | get_neighbors_by_edge_type( 180 | self.graph, value, f"title_{key}_edself.graphe" 181 | ) 182 | ) 183 | 184 | if title: 185 | queried_attributes.extend( 186 | get_neighbors_by_edge_type(title, "title_year_edge") 187 | ) 188 | queried_attributes.extend( 189 | get_neighbors_by_edge_type(title, "title_genre_edge") 190 | ) 191 | queried_attributes.extend( 192 | get_neighbors_by_edge_type(title, "title_director_edge") 193 | ) 194 | queried_attributes.extend( 195 | get_neighbors_by_edge_type(title, "title_actor_edge") 196 | ) 197 | 198 | movie_scores = [ 199 | (m, similarity_score(m, queried_attributes)) 200 | for m, data in self.graph.nodes(data=True) 201 | if data["type"] == "title" 202 | ] 203 | movie_scores = [(m, s) for m, s in movie_scores if s > 0] 204 | movie_scores.sort(key=lambda x: x[1], reverse=True) 205 | 206 | # Keep only score 1 if exists 207 | if any(s == 1 for m, s in movie_scores): 208 | movie_scores = [(m, s) for m, s in movie_scores if s == 1] 209 | 210 | return movie_scores[:5] 211 | 212 | 213 | if __name__ == "__main__": 214 | database = MovieDatabase() 215 | 216 | a = database.graph.nodes["The Dark Knight"] 217 | 218 | print(a) 219 | -------------------------------------------------------------------------------- /backend/endpoints.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from agent import MovieAgent 4 | from database import MovieDatabase 5 | from fastapi import APIRouter, HTTPException, Query 6 | from run import get_result_and_thought_using_graph 7 | 8 | # build router 9 | router = APIRouter() 10 | logger = logging.getLogger(__name__) 11 | movie_graph = MovieDatabase() 12 | agent_movie = MovieAgent.initialize(movie_graph=movie_graph) 13 | 14 | 15 | @router.get("/predict") 16 | def get_load(message: str = Query(...)): 17 | try: 18 | return get_result_and_thought_using_graph(agent_movie, movie_graph, message) 19 | except Exception as e: 20 | # Log stack trace 21 | logger.exception(e) 22 | raise HTTPException(status_code=500, detail=str(e)) from e 23 | -------------------------------------------------------------------------------- /backend/logger.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from pathlib import Path 3 | 4 | from rich.logging import RichHandler 5 | 6 | logger = logging.getLogger("imdb") 7 | 8 | 9 | def configure(log_level: str = "INFO", log_file: Path = None): # type: ignore 10 | log_format = "%(asctime)s - %(levelname)s - %(message)s" 11 | log_level_value = getattr(logging, log_level.upper(), logging.INFO) 12 | 13 | logging.basicConfig( 14 | level=log_level_value, 15 | format=log_format, 16 | datefmt="[%X]", 17 | handlers=[RichHandler(rich_tracebacks=True)], 18 | ) 19 | 20 | if log_file: 21 | log_file = Path(log_file) 22 | log_file.parent.mkdir(parents=True, exist_ok=True) 23 | 24 | file_handler = logging.FileHandler(log_file) 25 | file_handler.setFormatter(logging.Formatter(log_format)) 26 | logger.addHandler(file_handler) 27 | 28 | logger.info(f"Logger set up with log level: {log_level_value}({log_level})") 29 | if log_file: 30 | logger.info(f"Log file: {log_file}") 31 | -------------------------------------------------------------------------------- /backend/main.py: -------------------------------------------------------------------------------- 1 | from endpoints import router as endpoints_router 2 | from fastapi import FastAPI 3 | from fastapi.middleware.cors import CORSMiddleware 4 | 5 | 6 | def create_app(): 7 | """Create the FastAPI app and include the router.""" 8 | app = FastAPI() 9 | 10 | origins = [ 11 | "*", 12 | ] 13 | 14 | app.add_middleware( 15 | CORSMiddleware, 16 | allow_origins=origins, 17 | allow_credentials=True, 18 | allow_methods=["*"], 19 | allow_headers=["*"], 20 | ) 21 | 22 | app.include_router(endpoints_router) 23 | return app 24 | 25 | 26 | app = create_app() 27 | 28 | 29 | if __name__ == "__main__": 30 | import uvicorn 31 | 32 | uvicorn.run(app, host="127.0.0.1", port=7860) 33 | -------------------------------------------------------------------------------- /backend/movie_database_tool.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, List 2 | 3 | from database import MovieDatabase 4 | from langchain.chains.base import Chain 5 | from langchain.chains.llm import LLMChain 6 | from langchain.llms.base import BaseLLM 7 | from langchain.prompts.base import BasePromptTemplate 8 | from langchain.prompts.prompt import PromptTemplate 9 | from pydantic import BaseModel, Extra 10 | 11 | _PROMPT_TEMPLATE = """ 12 | You are helping to create a query for searching a graph database that finds similar movies based on specified parameters. 13 | Your task is to translate the given question into a set of parameters for the query. Only include the information you were given. 14 | 15 | The parameters are: 16 | title (str, optional): The title of the movie 17 | year (int, optional): The year the movie was released 18 | genre (str, optional): The genre of the movie 19 | director (str, optional): The director of the movie 20 | actor (str, optional): The actor in the movie 21 | same_attributes_as (optional): A dictionary of attributes to match the same attributes as another movie (optional) 22 | 23 | Use the following format: 24 | Question: "Question here" 25 | Output: "Graph parameters here" 26 | 27 | Example: 28 | Question: "What is the title of the movie that was released in 2004 and directed by Steven Spielberg?" 29 | Output: 30 | year: 2004 31 | director: Steven Spielberg 32 | 33 | Question: "Movie with the same director as Eternal Sunshine of the Spotless Mind?" 34 | Output: 35 | same_attributes_as: 36 | director: Eternal Sunshine of the Spotless Mind 37 | 38 | Begin! 39 | 40 | Question: {question} 41 | Output: 42 | """ 43 | 44 | PROMPT = PromptTemplate(input_variables=["question"], template=_PROMPT_TEMPLATE) 45 | 46 | 47 | class LLMGraphChain(Chain, BaseModel): 48 | """Chain that interprets a prompt and executes python code to do math. 49 | 50 | Example: 51 | .. code-block:: python 52 | 53 | from langchain import LLMMathChain, OpenAI 54 | llm_math = LLMMathChain(llm=OpenAI()) 55 | """ 56 | 57 | llm: BaseLLM 58 | """LLM wrapper to use.""" 59 | prompt: BasePromptTemplate = PROMPT 60 | """Prompt to use to translate to python if neccessary.""" 61 | input_key: str = "question" #: :meta private: 62 | output_key: str = "answer" #: :meta private: 63 | graph: MovieDatabase 64 | 65 | class Config: 66 | """Configuration for this pydantic object.""" 67 | 68 | extra = Extra.forbid 69 | arbitrary_types_allowed = True 70 | 71 | @property 72 | def input_keys(self) -> List[str]: 73 | """Expect input key. 74 | 75 | :meta private: 76 | """ 77 | return [self.input_key] 78 | 79 | @property 80 | def output_keys(self) -> List[str]: 81 | """Expect output key. 82 | 83 | :meta private: 84 | """ 85 | return [self.output_key] 86 | 87 | def _process_llm_result(self, t: str) -> Dict[str, str]: 88 | import yaml 89 | 90 | self.callback_manager.on_text("\nQuery:\n", verbose=self.verbose) 91 | self.callback_manager.on_text(t, color="green", verbose=self.verbose) 92 | # Convert t to a dictionary 93 | t = yaml.safe_load(t) 94 | output = self.graph.query_movies(**t) 95 | self.callback_manager.on_text("\nAnswer: ", verbose=self.verbose) 96 | self.callback_manager.on_text(output, color="yellow", verbose=self.verbose) 97 | return {self.output_key: "\n".join([f"{i[0]}: {i[1]}" for i in output])} 98 | 99 | def _call(self, inputs: Dict[str, str]) -> Dict[str, str]: 100 | llm_executor = LLMChain( 101 | prompt=self.prompt, llm=self.llm, callback_manager=self.callback_manager 102 | ) 103 | self.callback_manager.on_text(inputs[self.input_key], verbose=self.verbose) 104 | t = llm_executor.predict(question=inputs[self.input_key], stop=["Output:"]) 105 | return self._process_llm_result(t) 106 | 107 | @property 108 | def _chain_type(self) -> str: 109 | return "llm_movie_database" 110 | 111 | 112 | if __name__ == "__main__": 113 | from langchain.llms import OpenAI 114 | 115 | llm = OpenAI(temperature=0.3) 116 | 117 | chain = LLMGraphChain(llm=llm, verbose=True) 118 | 119 | output = chain.run( 120 | "What is the title of the movie that was released in 2002 and directed by Steven Spielberg?" 121 | ) 122 | 123 | print(output) 124 | -------------------------------------------------------------------------------- /backend/run.py: -------------------------------------------------------------------------------- 1 | import contextlib 2 | import io 3 | from typing import Dict 4 | 5 | from logger import logger 6 | 7 | 8 | def get_attributes_from_node(graph, title: str) -> Dict: 9 | """Get attributes from node""" 10 | attribute_dict = {"actors": [], "genre": [], "directors": [], "year": None} 11 | 12 | # Get attributes from node 13 | attribute_dict.update(**graph.nodes[title]["attributes"]) 14 | 15 | # Add title 16 | attribute_dict["title"] = title 17 | 18 | for item, attr in graph[title].items(): 19 | if attr["type"] == "title_year_edge": 20 | attribute_dict["year"] = item 21 | elif attr["type"] == "title_director_edge": 22 | attribute_dict["directors"].append(item) 23 | elif attr["type"] == "title_actor_edge": 24 | attribute_dict["actors"].append(item) 25 | elif attr["type"] == "title_genre_edge": 26 | attribute_dict["genre"].append(item) 27 | 28 | return attribute_dict 29 | 30 | 31 | def get_result_and_thought_using_graph( 32 | langchain_object, 33 | database, 34 | message: str, 35 | ): 36 | """Get result and thought from extracted json""" 37 | try: 38 | if hasattr(langchain_object, "verbose"): 39 | langchain_object.verbose = True 40 | chat_input = None 41 | memory_key = "" 42 | if hasattr(langchain_object, "memory") and langchain_object.memory is not None: 43 | memory_key = langchain_object.memory.memory_key 44 | 45 | for key in langchain_object.input_keys: 46 | if key not in [memory_key, "chat_history"]: 47 | chat_input = {key: message} 48 | 49 | langchain_object.return_intermediate_steps = True 50 | 51 | with io.StringIO() as output_buffer, contextlib.redirect_stdout(output_buffer): 52 | try: 53 | output = langchain_object(chat_input) 54 | # output = { 55 | # 'movies': [ 56 | # { 57 | # 'actors': ['Leonardo DiCaprio', 'Tom Hanks', 'Christopher Walken', 'Martin Sheen'], 58 | # 'genre': ['Biography', 'Crime', 'Drama'], 59 | # 'directors': ['Steven Spielberg'], 60 | # 'year': 2002, 61 | # 'Poster': 'https://m.media-amazon.com/images/M/MV5BMTY5MzYzNjc5NV5BMl5BanBnXkFtZTYwNTUyNTc2.V1_UX67_CR0,0,67,98_AL.jpg', 62 | # 'Runtime': 141, 63 | # 'Rating': 8.1, 64 | # 'Overview': 'Barely 21 yet, Frank is a skilled forger who has passed as a doctor, lawyer and pilot. FBI agent Carl becomes obsessed with tracking down the con man, who only revels in the pursuit.', 65 | # 'title': 'Catch Me If You Can' 66 | # }, 67 | # { 68 | # 'actors': ['Tom Cruise', 'Colin Farrell', 'Samantha Morton', 'Max von Sydow'], 69 | # 'genre': ['Action', 'Crime', 'Mystery'], 70 | # 'directors': ['Steven Spielberg'], 71 | # 'year': 2002, 72 | # 'Poster': 'https://m.media-amazon.com/images/M/MV5BZTI3YzZjZjEtMDdjOC00OWVjLTk0YmYtYzI2MGMwZjFiMzBlXkEyXkFqcGdeQXVyMTQxNzMzNDI@.V1_UX67_CR0,0,67,98_AL.jpg', 73 | # 'Runtime': 145, 74 | # 'Rating': 7.6, 75 | # 'Overview': 'In a future where a special police unit is able to arrest murderers before they commit their crimes, an officer from that unit is himself accused of a future murder.', 76 | # 'title': 'Minority Report' 77 | # } 78 | # ], 79 | # 'response': 'The movie released in 2002 and directed by Steven Spielberg is Catch Me If You Can and Minority Report.', 80 | # 'thought': 'Thought' 81 | # } 82 | # return output 83 | except ValueError as exc: 84 | # make the error message more informative 85 | logger.debug(f"Error: {str(exc)}") 86 | output = langchain_object.run(chat_input) 87 | 88 | intermediate_steps = [ 89 | action[1] 90 | for action in output["intermediate_steps"] 91 | if action[0].tool == "Movies_chain" 92 | ][0] 93 | 94 | movie_names = build_dict(intermediate_steps) 95 | 96 | movies = [ 97 | get_attributes_from_node(database.graph, movie) 98 | for movie in movie_names.keys() 99 | ] 100 | 101 | thought = output_buffer.getvalue().strip() 102 | 103 | except Exception as exc: 104 | raise ValueError(f"Error: {str(exc)}") from exc 105 | 106 | return {"movies": movies, "response": output["output"], "thought": thought} 107 | 108 | 109 | def build_dict(input): 110 | pairs = input.split("\n") 111 | 112 | # Create an empty dictionary to store the pairs 113 | my_dict = {} 114 | 115 | # Iterate over each pair and add it to the dictionary 116 | for pair in pairs: 117 | # Split the pair into movie and rating using the colon character 118 | movie, rating = pair.split(": ") 119 | 120 | # Convert the rating to a float and add it to the dictionary 121 | my_dict[movie] = float(rating) 122 | 123 | return my_dict 124 | 125 | 126 | def format_intermediate_steps(intermediate_steps): 127 | formatted_chain = "> Entering new AgentExecutor chain...\n" 128 | for step in intermediate_steps: 129 | action = step[0] 130 | observation = step[1] 131 | 132 | formatted_chain += ( 133 | f" {action.log}\nAction: {action.tool}\nAction Input: {action.tool_input}\n" 134 | ) 135 | formatted_chain += f"Observation: {observation}\n" 136 | 137 | final_answer = f"Final Answer: {observation}\n" 138 | formatted_chain += f"Thought: I now know the final answer\n{final_answer}\n" 139 | formatted_chain += "> Finished chain.\n" 140 | 141 | return formatted_chain 142 | -------------------------------------------------------------------------------- /frontend/.gitignore: -------------------------------------------------------------------------------- 1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files. 2 | 3 | # dependencies 4 | /node_modules 5 | /.pnp 6 | .pnp.js 7 | 8 | # testing 9 | /coverage 10 | 11 | # production 12 | /build 13 | 14 | # misc 15 | .DS_Store 16 | .env.local 17 | .env.development.local 18 | .env.test.local 19 | .env.production.local 20 | 21 | npm-debug.log* 22 | yarn-debug.log* 23 | yarn-error.log* 24 | -------------------------------------------------------------------------------- /frontend/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "movies-chat", 3 | "version": "0.1.0", 4 | "private": true, 5 | "dependencies": { 6 | "@emotion/react": "^11.10.6", 7 | "@emotion/styled": "^11.10.6", 8 | "@fortawesome/react-fontawesome": "^0.2.0", 9 | "@testing-library/jest-dom": "^5.16.5", 10 | "@testing-library/react": "^13.4.0", 11 | "@testing-library/user-event": "^13.5.0", 12 | "ansi-to-html": "^0.7.2", 13 | "axios": "^1.3.5", 14 | "react": "^18.2.0", 15 | "react-dom": "^18.2.0", 16 | "react-icons": "^4.8.0", 17 | "react-scripts": "5.0.1", 18 | "socket.io-client": "^4.6.1", 19 | "web-vitals": "^2.1.4" 20 | }, 21 | "scripts": { 22 | "start": "react-scripts start", 23 | "build": "react-scripts build", 24 | "test": "react-scripts test", 25 | "eject": "react-scripts eject" 26 | }, 27 | "eslintConfig": { 28 | "extends": [ 29 | "react-app", 30 | "react-app/jest" 31 | ] 32 | }, 33 | "browserslist": { 34 | "production": [ 35 | ">0.2%", 36 | "not dead", 37 | "not op_mini all" 38 | ], 39 | "development": [ 40 | "last 1 chrome version", 41 | "last 1 firefox version", 42 | "last 1 safari version" 43 | ] 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /frontend/public/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ibiscp/LLM-IMDB/d12cd5cc5d00721e90c68b697863b36365df3cae/frontend/public/favicon.ico -------------------------------------------------------------------------------- /frontend/public/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 12 | 13 | 17 | 18 | 27 | React App 28 | 29 | 30 | 31 |
32 | 42 | 43 | 44 | -------------------------------------------------------------------------------- /frontend/public/logo192.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ibiscp/LLM-IMDB/d12cd5cc5d00721e90c68b697863b36365df3cae/frontend/public/logo192.png -------------------------------------------------------------------------------- /frontend/public/logo512.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ibiscp/LLM-IMDB/d12cd5cc5d00721e90c68b697863b36365df3cae/frontend/public/logo512.png -------------------------------------------------------------------------------- /frontend/public/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "short_name": "React App", 3 | "name": "Create React App Sample", 4 | "icons": [ 5 | { 6 | "src": "favicon.ico", 7 | "sizes": "64x64 32x32 24x24 16x16", 8 | "type": "image/x-icon" 9 | }, 10 | { 11 | "src": "logo192.png", 12 | "type": "image/png", 13 | "sizes": "192x192" 14 | }, 15 | { 16 | "src": "logo512.png", 17 | "type": "image/png", 18 | "sizes": "512x512" 19 | } 20 | ], 21 | "start_url": ".", 22 | "display": "standalone", 23 | "theme_color": "#000000", 24 | "background_color": "#ffffff" 25 | } 26 | -------------------------------------------------------------------------------- /frontend/public/robots.txt: -------------------------------------------------------------------------------- 1 | # https://www.robotstxt.org/robotstxt.html 2 | User-agent: * 3 | Disallow: 4 | -------------------------------------------------------------------------------- /frontend/src/App.css: -------------------------------------------------------------------------------- 1 | body { 2 | margin: 0; 3 | font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen', 4 | 'Ubuntu', 'Cantarell', 'Fira Sans', 'Droid Sans', 'Helvetica Neue', 5 | sans-serif; 6 | -webkit-font-smoothing: antialiased; 7 | -moz-osx-font-smoothing: grayscale; 8 | background-color: #4b0082; 9 | } 10 | 11 | .App { 12 | text-align: center; 13 | } 14 | 15 | .chat-container { 16 | display: flex; 17 | flex-direction: column; 18 | align-items: center; 19 | justify-content: center; 20 | height: 100%; 21 | } 22 | 23 | .chat-box { 24 | width: 80%; 25 | max-width: 1200px; 26 | background-color: white; 27 | border-radius: 10px; 28 | padding: 20px; 29 | box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1); 30 | } 31 | 32 | .message { 33 | display: flex; 34 | justify-content: flex-end; 35 | } 36 | 37 | .message-bot { 38 | text-align: left; 39 | background-color: #e0e0e0; 40 | border-radius: 10px; 41 | padding: 5px 10px; 42 | margin-bottom: 5px; 43 | max-width: 75%; /* Set a maximum width */ 44 | word-wrap: break-word; /* Enable word wrapping */ 45 | } 46 | 47 | .message-user { 48 | text-align: right; 49 | background-color: #4b0082; 50 | color: white; 51 | border-radius: 10px; 52 | padding: 5px 10px; 53 | margin-bottom: 5px; 54 | max-width: 75%; /* Set a maximum width */ 55 | word-wrap: break-word; /* Enable word wrapping */ 56 | } 57 | 58 | .message-bot-container, 59 | .message-user-container { 60 | display: flex; 61 | width: 100%; 62 | } 63 | 64 | .message-bot-container { 65 | justify-content: flex-start; 66 | } 67 | 68 | .message-user-container { 69 | justify-content: flex-end; 70 | } 71 | 72 | 73 | 74 | 75 | .input-container { 76 | display: flex; 77 | } 78 | 79 | .input { 80 | flex-grow: 1; 81 | border: 1px solid #ddd; 82 | border-radius: 4px; 83 | padding: 6px; 84 | } 85 | 86 | .send-button { 87 | background-color: #4b0082; 88 | color: white; 89 | border: none; 90 | border-radius: 4px; 91 | padding: 6px 12px; 92 | margin-left: 6px; 93 | cursor: pointer; 94 | } 95 | 96 | .send-button[disabled] { 97 | cursor: default; 98 | } 99 | 100 | .message { 101 | margin-bottom: 10px; 102 | } 103 | 104 | .user { 105 | font-weight: bold; 106 | color: #4b0082; 107 | } 108 | 109 | .message-content { 110 | display: flex; 111 | flex-direction: column; 112 | align-items: center; 113 | } 114 | 115 | .movie-cards-container { 116 | display: flex; 117 | overflow-x: auto; 118 | gap: 10px; 119 | justify-content: center; 120 | } 121 | 122 | .movie-card { 123 | display: flex; 124 | flex-direction: column; 125 | background-color: white; 126 | border-radius: 10px; 127 | padding: 10px; 128 | box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1); 129 | min-width: 200px; 130 | max-width: 300px; 131 | } 132 | 133 | .movie-top-section { 134 | display: flex; 135 | flex-direction: row; 136 | align-items: flex-start; 137 | margin-bottom: 10px; 138 | } 139 | 140 | .movie-card h4 { 141 | font-size: 18px; 142 | margin: 5px 0; 143 | } 144 | 145 | .movie-card p { 146 | font-size: 14px; 147 | margin: 5px 0; 148 | } 149 | 150 | .movie-cover-container { 151 | flex: 0 0 auto; 152 | margin-right: 10px; 153 | } 154 | 155 | .movie-details { 156 | flex: 1 1 auto; 157 | display: flex; 158 | flex-direction: column; 159 | } 160 | 161 | .movie-cover { 162 | max-height: 100%; 163 | max-width: 100px; 164 | border-radius: 10px; 165 | object-fit: cover; 166 | margin-bottom: 10px; 167 | } 168 | 169 | .watch-now-button { 170 | background-color: indigo; 171 | color: white; 172 | border: none; 173 | border-radius: 5px; 174 | padding: 5px 10px; 175 | font-size: 14px; 176 | cursor: pointer; 177 | margin-top: auto; 178 | } 179 | 180 | .synopsis { 181 | font-size: 14px; 182 | margin: 10px 0; 183 | text-align: justify; 184 | } 185 | 186 | .genre { 187 | font-size: 14px; 188 | margin: 10px 0; 189 | text-align: center; 190 | } 191 | 192 | .toggle-thought-button { 193 | color: indigo; 194 | cursor: pointer; 195 | border: 0px; 196 | background-color: transparent; 197 | text-align: center; 198 | font-size: 1rem; 199 | } 200 | 201 | .thought-modal { 202 | position: fixed; 203 | top: 0; 204 | left: 0; 205 | right: 0; 206 | bottom: 0; 207 | background-color: rgba(0, 0, 0, 0.5); 208 | display: flex; 209 | align-items: center; 210 | justify-content: center; 211 | } 212 | 213 | .thought-modal-content { 214 | background-color: white; 215 | padding: 20px; 216 | border-radius: 5px; 217 | max-width: 80%; 218 | width: 800px; 219 | text-align: left; 220 | } 221 | 222 | .thought-icon { 223 | margin-right: 0.5rem; 224 | font-size: 1.2rem; 225 | } 226 | 227 | .close-modal-button { 228 | background-color: indigo; 229 | color: white; 230 | border: none; 231 | border-radius: 5px; 232 | padding: 5px 10px; 233 | cursor: pointer; 234 | display: block; 235 | margin: 0 auto; 236 | } -------------------------------------------------------------------------------- /frontend/src/App.js: -------------------------------------------------------------------------------- 1 | import React, { useState, useEffect, useRef } from 'react'; 2 | import { BsFillGearFill } from "react-icons/bs" 3 | import axios from 'axios'; 4 | import './App.css'; 5 | var Convert = require('ansi-to-html'); 6 | var convert = new Convert({newline: true}); 7 | 8 | 9 | 10 | 11 | function App() { 12 | const [input, setInput] = useState(''); 13 | const [messages, setMessages] = useState([]); 14 | const [loading, setLoading] = useState(false); 15 | const messagesEndRef = useRef(null); 16 | const [currentThought, setCurrentThought] = useState(null); 17 | const [thought, setThought] = useState(null); 18 | const [isThoughtVisible, setIsThoughtVisible] = useState(false); 19 | 20 | 21 | const scrollToBottom = () => { 22 | messagesEndRef.current?.scrollIntoView({ behavior: 'smooth' }); 23 | }; 24 | 25 | useEffect(scrollToBottom, [messages]); 26 | 27 | const sendMessage = async () => { 28 | if (input.trim() === '') return; 29 | 30 | setMessages([...messages, { text: input, user: 'You' }]); 31 | setInput(''); 32 | 33 | try { 34 | setLoading(true); 35 | const response = await axios.get("http://127.0.0.1:7860/predict", { 36 | params: { message: input }, 37 | }); 38 | 39 | const answer = response.data.response; 40 | const movies = response.data.movies; 41 | const thought = response.data.thought; 42 | 43 | setThought(thought); 44 | setIsThoughtVisible(false); 45 | 46 | setMessages((prevMessages) => [ 47 | ...prevMessages, 48 | { 49 | text: answer, 50 | user: "Agent", 51 | movies: movies, 52 | thought: thought, 53 | }, 54 | ]); 55 | } catch (error) { 56 | console.error("Error fetching message:", error); 57 | } finally { 58 | setLoading(false); 59 | } 60 | }; 61 | 62 | const ThoughtModal = ({ show, onClose, thought }) => { 63 | if (!show) { 64 | return null; 65 | } 66 | 67 | return ( 68 |
69 |
70 | {/*

Bot's Thought

*/} 71 |
72 | 75 |
76 |
77 | ); 78 | 79 | }; 80 | 81 | 82 | const MovieCard = ({ movie }) => { 83 | const { title, year, directors, Overview, Poster, genre } = movie; 84 | 85 | return ( 86 |
87 |
88 |
89 | {title} 90 |
91 |
92 |

{title}

93 |

{year}

94 |

{directors.join(', ')}

95 |
96 |
97 |

{genre.join(', ')}

98 |

{Overview}

99 | 100 |
101 | ); 102 | }; 103 | 104 | 105 | const handleKeyPress = (event) => { 106 | if (event.key === 'Enter') { 107 | sendMessage(); 108 | } 109 | }; 110 | 111 | return ( 112 |
113 | setCurrentThought(null)} 116 | thought={currentThought} 117 | /> 118 |
119 |
120 |
121 | {messages.map((message, index) => ( 122 | 123 |
130 |
135 | 136 | {message.text} 137 | {message.user === "Agent" && message.thought && ( 138 | 144 | )} 145 |
146 | 147 |
148 | 149 | {message.movies && message.movies.length > 0 && ( 150 |
151 | {message.movies.map((movie) => ( 152 | 153 | ))} 154 |
155 | )} 156 |
157 | ))} 158 |
159 |
160 |
161 | setInput(e.target.value)} 166 | onKeyPress={handleKeyPress} 167 | disabled={loading} 168 | /> 169 | 172 |
173 |
174 |
175 |
176 | ); 177 | 178 | 179 | } 180 | 181 | export default App; 182 | -------------------------------------------------------------------------------- /frontend/src/App.test.js: -------------------------------------------------------------------------------- 1 | import { render, screen } from '@testing-library/react'; 2 | import App from './App'; 3 | 4 | test('renders learn react link', () => { 5 | render(); 6 | const linkElement = screen.getByText(/learn react/i); 7 | expect(linkElement).toBeInTheDocument(); 8 | }); 9 | -------------------------------------------------------------------------------- /frontend/src/index.css: -------------------------------------------------------------------------------- 1 | body { 2 | margin: 0; 3 | font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen', 4 | 'Ubuntu', 'Cantarell', 'Fira Sans', 'Droid Sans', 'Helvetica Neue', 5 | sans-serif; 6 | -webkit-font-smoothing: antialiased; 7 | -moz-osx-font-smoothing: grayscale; 8 | } 9 | 10 | code { 11 | font-family: source-code-pro, Menlo, Monaco, Consolas, 'Courier New', 12 | monospace; 13 | } 14 | -------------------------------------------------------------------------------- /frontend/src/index.js: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | import ReactDOM from 'react-dom/client'; 3 | import './index.css'; 4 | import App from './App'; 5 | import reportWebVitals from './reportWebVitals'; 6 | 7 | const root = ReactDOM.createRoot(document.getElementById('root')); 8 | root.render( 9 | 10 | 11 | 12 | ); 13 | 14 | // If you want to start measuring performance in your app, pass a function 15 | // to log results (for example: reportWebVitals(console.log)) 16 | // or send to an analytics endpoint. Learn more: https://bit.ly/CRA-vitals 17 | reportWebVitals(); 18 | -------------------------------------------------------------------------------- /frontend/src/logo.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /frontend/src/reportWebVitals.js: -------------------------------------------------------------------------------- 1 | const reportWebVitals = onPerfEntry => { 2 | if (onPerfEntry && onPerfEntry instanceof Function) { 3 | import('web-vitals').then(({ getCLS, getFID, getFCP, getLCP, getTTFB }) => { 4 | getCLS(onPerfEntry); 5 | getFID(onPerfEntry); 6 | getFCP(onPerfEntry); 7 | getLCP(onPerfEntry); 8 | getTTFB(onPerfEntry); 9 | }); 10 | } 11 | }; 12 | 13 | export default reportWebVitals; 14 | -------------------------------------------------------------------------------- /frontend/src/setupTests.js: -------------------------------------------------------------------------------- 1 | // jest-dom adds custom jest matchers for asserting on DOM nodes. 2 | // allows you to do things like: 3 | // expect(element).toHaveTextContent(/react/i) 4 | // learn more: https://github.com/testing-library/jest-dom 5 | import '@testing-library/jest-dom'; 6 | -------------------------------------------------------------------------------- /image/llm-imdb.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ibiscp/LLM-IMDB/d12cd5cc5d00721e90c68b697863b36365df3cae/image/llm-imdb.gif -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "llm-imdb" 3 | version = "0.1.0" 4 | description = "Proof of concept app using LangChain and LLMs to retrieve information from graphs, built with the IMDB dataset" 5 | authors = ["Ibis Prevedello "] 6 | readme = "README.md" 7 | packages = [{include = "llm_imdb"}] 8 | 9 | [tool.poetry.dependencies] 10 | python = "^3.11" 11 | pandas = "^2.0.0" 12 | langchain = "^0.0.136" 13 | networkx = "^3.1" 14 | fastapi = "^0.95.0" 15 | rich = "^13.3.3" 16 | openai = "^0.27.4" 17 | google-search-results = "^2.4.2" 18 | uvicorn = "^0.21.1" 19 | setuptools = "^67.6.1" 20 | 21 | [tool.poetry.group.dev.dependencies] 22 | black = "^23.1.0" 23 | ipykernel = "^6.21.2" 24 | mypy = "^1.1.1" 25 | ruff = "^0.0.254" 26 | httpx = "^0.23.3" 27 | pytest = "^7.2.2" 28 | types-requests = "^2.28.11" 29 | requests = "^2.28.0" 30 | 31 | [build-system] 32 | requires = ["poetry-core"] 33 | build-backend = "poetry.core.masonry.api" 34 | --------------------------------------------------------------------------------