├── Unit2-Tools ├── area.txt ├── test.txt ├── images │ ├── ReAct_loop.png │ ├── tool-selection.png │ ├── easytool-simplify.png │ ├── function-calling.png │ └── react-agent-flow.png ├── OpenAI-Function-Calling.ipynb └── data │ └── tmdb_tool.json ├── Unit4-Planning ├── images │ ├── cot.jpeg │ ├── sot.jpg │ ├── tot.png │ ├── cot-sc.jpg │ ├── Reflexion.png │ ├── langgraph.jpg │ ├── plan-solve.png │ └── reflection.png ├── together_llm.py ├── gemini_llm.py ├── Basics_Langgraph.ipynb ├── Planning_with_task_decomposition.ipynb └── Skeleton_of_Thought_Generation.ipynb ├── Unit3-Memory ├── images │ ├── naive-rag.webp │ ├── buffer-memory.png │ ├── entity-example.png │ ├── summary-memory.png │ └── buffer-window-memory.png ├── knowledge-agent │ ├── knowledge-agent.png │ ├── current_profile.json │ ├── README.md │ ├── chatbot.py │ ├── together_llm.py │ └── KnowledgeAgent.py └── MultiModal_RAG.ipynb ├── Unit5-Agent_Examples ├── data │ └── recipes.pdf ├── images │ ├── movie-bot.png │ ├── agentic-rag.png │ ├── code-assistant.png │ └── agentic-rag-block.png ├── temp_test.py ├── chatbot.py ├── Movie_Recommendation_Agent │ ├── chatbot.py │ └── movie_recommendation_agent.py ├── together_llm.py └── movie_recommendation_agent.py ├── Unit1-Foundation_LLM_Agents ├── images │ ├── selfask.png │ └── key-agent-steps-first-agent.png └── My_First_Agent.ipynb ├── .env ├── requirements.txt ├── LICENSE └── README.md /Unit2-Tools/area.txt: -------------------------------------------------------------------------------- 1 | 20.999999999999996 -------------------------------------------------------------------------------- /Unit2-Tools/test.txt: -------------------------------------------------------------------------------- 1 | This is a test for the tool -------------------------------------------------------------------------------- /Unit4-Planning/images/cot.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rohanmistry231/LLM-Agents/main/Unit4-Planning/images/cot.jpeg -------------------------------------------------------------------------------- /Unit4-Planning/images/sot.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rohanmistry231/LLM-Agents/main/Unit4-Planning/images/sot.jpg -------------------------------------------------------------------------------- /Unit4-Planning/images/tot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rohanmistry231/LLM-Agents/main/Unit4-Planning/images/tot.png -------------------------------------------------------------------------------- /Unit2-Tools/images/ReAct_loop.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rohanmistry231/LLM-Agents/main/Unit2-Tools/images/ReAct_loop.png -------------------------------------------------------------------------------- /Unit4-Planning/images/cot-sc.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rohanmistry231/LLM-Agents/main/Unit4-Planning/images/cot-sc.jpg -------------------------------------------------------------------------------- /Unit3-Memory/images/naive-rag.webp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rohanmistry231/LLM-Agents/main/Unit3-Memory/images/naive-rag.webp -------------------------------------------------------------------------------- /Unit4-Planning/images/Reflexion.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rohanmistry231/LLM-Agents/main/Unit4-Planning/images/Reflexion.png -------------------------------------------------------------------------------- /Unit4-Planning/images/langgraph.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rohanmistry231/LLM-Agents/main/Unit4-Planning/images/langgraph.jpg -------------------------------------------------------------------------------- /Unit4-Planning/images/plan-solve.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rohanmistry231/LLM-Agents/main/Unit4-Planning/images/plan-solve.png -------------------------------------------------------------------------------- /Unit4-Planning/images/reflection.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rohanmistry231/LLM-Agents/main/Unit4-Planning/images/reflection.png -------------------------------------------------------------------------------- /Unit2-Tools/images/tool-selection.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rohanmistry231/LLM-Agents/main/Unit2-Tools/images/tool-selection.png -------------------------------------------------------------------------------- /Unit3-Memory/images/buffer-memory.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rohanmistry231/LLM-Agents/main/Unit3-Memory/images/buffer-memory.png -------------------------------------------------------------------------------- /Unit3-Memory/images/entity-example.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rohanmistry231/LLM-Agents/main/Unit3-Memory/images/entity-example.png -------------------------------------------------------------------------------- /Unit3-Memory/images/summary-memory.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rohanmistry231/LLM-Agents/main/Unit3-Memory/images/summary-memory.png -------------------------------------------------------------------------------- /Unit5-Agent_Examples/data/recipes.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rohanmistry231/LLM-Agents/main/Unit5-Agent_Examples/data/recipes.pdf -------------------------------------------------------------------------------- /Unit2-Tools/images/easytool-simplify.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rohanmistry231/LLM-Agents/main/Unit2-Tools/images/easytool-simplify.png -------------------------------------------------------------------------------- /Unit2-Tools/images/function-calling.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rohanmistry231/LLM-Agents/main/Unit2-Tools/images/function-calling.png -------------------------------------------------------------------------------- /Unit2-Tools/images/react-agent-flow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rohanmistry231/LLM-Agents/main/Unit2-Tools/images/react-agent-flow.png -------------------------------------------------------------------------------- /Unit5-Agent_Examples/images/movie-bot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rohanmistry231/LLM-Agents/main/Unit5-Agent_Examples/images/movie-bot.png -------------------------------------------------------------------------------- /Unit5-Agent_Examples/images/agentic-rag.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rohanmistry231/LLM-Agents/main/Unit5-Agent_Examples/images/agentic-rag.png -------------------------------------------------------------------------------- /Unit1-Foundation_LLM_Agents/images/selfask.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rohanmistry231/LLM-Agents/main/Unit1-Foundation_LLM_Agents/images/selfask.png -------------------------------------------------------------------------------- /Unit3-Memory/images/buffer-window-memory.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rohanmistry231/LLM-Agents/main/Unit3-Memory/images/buffer-window-memory.png -------------------------------------------------------------------------------- /Unit5-Agent_Examples/images/code-assistant.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rohanmistry231/LLM-Agents/main/Unit5-Agent_Examples/images/code-assistant.png -------------------------------------------------------------------------------- /Unit3-Memory/knowledge-agent/knowledge-agent.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rohanmistry231/LLM-Agents/main/Unit3-Memory/knowledge-agent/knowledge-agent.png -------------------------------------------------------------------------------- /Unit5-Agent_Examples/images/agentic-rag-block.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rohanmistry231/LLM-Agents/main/Unit5-Agent_Examples/images/agentic-rag-block.png -------------------------------------------------------------------------------- /.env: -------------------------------------------------------------------------------- 1 | OPENAI_API_KEY='sk-' 2 | LANGCHAIN_API_KEY='' 3 | LANGCHAIN_HUB_API_KEY='' 4 | TAVILY_API_KEY='tvly-' 5 | ANTHROPIC_API_KEY='' 6 | GOOGLE_API_KEY='' 7 | TOGETHER_API_KEY='' 8 | -------------------------------------------------------------------------------- /Unit1-Foundation_LLM_Agents/images/key-agent-steps-first-agent.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rohanmistry231/LLM-Agents/main/Unit1-Foundation_LLM_Agents/images/key-agent-steps-first-agent.png -------------------------------------------------------------------------------- /Unit3-Memory/knowledge-agent/current_profile.json: -------------------------------------------------------------------------------- 1 | {"Emily": "Is 8 years old. Likes outdoor sports", "Ethan": "15 year old. Into Music. Likes to play Guitar.", "Dad": "Is into Wood Working."} -------------------------------------------------------------------------------- /Unit3-Memory/knowledge-agent/README.md: -------------------------------------------------------------------------------- 1 | ### Knowledge Agent 2 | 3 | The knowledge agent architecture is explained below: 4 | 5 | ![Knowledge-Agent](knowledge-agent.png) 6 | 7 | ### Instructions 8 | To run the Streamlit front-end, please run 9 | 10 | `streamlit run chatbot.py` 11 | 12 | If starting fresh, delete `current_profile.json` to clear existing profiles -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | openai 2 | langchain 3 | langchain-community 4 | tavily-python 5 | langchain-openai 6 | langchainhub 7 | python-dotenv 8 | pyyaml 9 | arxiv 10 | mediawikiapi 11 | wikibase-rest-api-client 12 | pymupdf 13 | youtube-search 14 | termcolor 15 | langchain-anthropic 16 | torch 17 | transformers 18 | together 19 | sentence_transformers 20 | datasets 21 | chromadb 22 | langchain-google-genai 23 | tiktoken 24 | google-generativeai 25 | embedchain==0.1.111 26 | youtube_transcript_api 27 | pytube 28 | langgraph 29 | grandalf 30 | semantic-router 31 | ipykernel -------------------------------------------------------------------------------- /Unit5-Agent_Examples/temp_test.py: -------------------------------------------------------------------------------- 1 | def largest_common_substring(str1, str2): 2 | m = len(str1) 3 | n = len(str2) 4 | result = '' 5 | length = 0 6 | dp = [[0] * (n + 1) for _ in range(m + 1)] 7 | 8 | for i in range(1, m + 1): 9 | for j in range(1, n + 1): 10 | if str1[i - 1] == str2[j - 1]: 11 | dp[i][j] = dp[i - 1][j - 1] + 1 12 | if dp[i][j] > length: 13 | length = dp[i][j] 14 | result = str1[i - length:i] 15 | else: 16 | dp[i][j] = 0 17 | 18 | return result 19 | result = largest_common_substring('abcdef', 'zabcf') 20 | print(result) -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Priyanka Dwivedi 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Unit5-Agent_Examples/chatbot.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | from movie_recommendation_agent import Chatbot 3 | from langchain.memory import ChatMessageHistory 4 | st.title("Classical Movies Recommendation agent") 5 | 6 | # Initialize chat history 7 | if "messages" not in st.session_state: 8 | st.session_state.messages = [] 9 | if "memory" not in st.session_state: 10 | st.session_state.memory = ChatMessageHistory(session_id="test-session") 11 | 12 | ##Initialize the Agents: 13 | rec_agent = Chatbot(model='gpt-4o', temperature=0.3, memory=st.session_state.memory) 14 | 15 | # Display chat messages from history on app rerun 16 | for message in st.session_state.messages: 17 | with st.chat_message(message["role"]): 18 | st.markdown(message["content"]) 19 | 20 | # Accept user input 21 | if prompt := st.chat_input("Type your question here."): 22 | # Add user message to chat history 23 | st.session_state.messages.append({"role": "user", "content": prompt}) 24 | # Display user message in chat message container 25 | with st.chat_message("user"): 26 | st.markdown(prompt) 27 | # Display assistant response in chat message container 28 | ## Get response 29 | response = rec_agent.run(prompt) 30 | # Display assistant response in chat message container 31 | with st.chat_message("assistant"): 32 | st.markdown(response) 33 | # Add assistant response to chat history 34 | st.session_state.messages.append({"role": "assistant", "content": response}) 35 | -------------------------------------------------------------------------------- /Unit3-Memory/knowledge-agent/chatbot.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | from KnowledgeAgent import Memory, Chatbot 3 | 4 | st.title("Gift Recommendation agent") 5 | 6 | ##Initialize the Agents: 7 | memory_agent = Memory(model='meta-llama/Llama-3-8b-chat-hf', temperature=0.2) 8 | rec_agent = Chatbot(model='gpt-4-turbo-preview', temperature=0.3) 9 | 10 | # Initialize chat history 11 | if "messages" not in st.session_state: 12 | st.session_state.messages = [] 13 | 14 | # Display chat messages from history on app rerun 15 | for message in st.session_state.messages: 16 | with st.chat_message(message["role"]): 17 | st.markdown(message["content"]) 18 | 19 | # Accept user input 20 | if prompt := st.chat_input("Type your question here."): 21 | # Add user message to chat history 22 | st.session_state.messages.append({"role": "user", "content": prompt}) 23 | # Display user message in chat message container 24 | with st.chat_message("user"): 25 | st.markdown(prompt) 26 | # Display assistant response in chat message container 27 | ### Add to memory 28 | mem_result = memory_agent.run(prompt) 29 | st.write("Memory agent output: ", mem_result['text']) 30 | ## Get response 31 | response = rec_agent.run(prompt) 32 | # Display assistant response in chat message container 33 | with st.chat_message("assistant"): 34 | st.markdown(response) 35 | # Add assistant response to chat history 36 | st.session_state.messages.append({"role": "assistant", "content": response}) 37 | -------------------------------------------------------------------------------- /Unit5-Agent_Examples/Movie_Recommendation_Agent/chatbot.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | from movie_recommendation_agent import Chatbot 3 | from langchain.memory import ChatMessageHistory 4 | st.title("Classical Movies Recommendation agent") 5 | 6 | # Initialize chat history 7 | if "messages" not in st.session_state: 8 | st.session_state.messages = [] 9 | if "memory" not in st.session_state: 10 | st.session_state.memory = ChatMessageHistory(session_id="test-session") 11 | 12 | ##Initialize the Agents: 13 | rec_agent = Chatbot(model='gpt-4o', temperature=0.3, memory=st.session_state.memory) 14 | 15 | # Display chat messages from history on app rerun 16 | for message in st.session_state.messages: 17 | with st.chat_message(message["role"]): 18 | st.markdown(message["content"]) 19 | 20 | # Accept user input 21 | if prompt := st.chat_input("Type your question here."): 22 | # Add user message to chat history 23 | st.session_state.messages.append({"role": "user", "content": prompt}) 24 | # Display user message in chat message container 25 | with st.chat_message("user"): 26 | st.markdown(prompt) 27 | # Display assistant response in chat message container 28 | ## Get response 29 | response = rec_agent.run(prompt) 30 | # Display assistant response in chat message container 31 | with st.chat_message("assistant"): 32 | st.markdown(response) 33 | # Add assistant response to chat history 34 | st.session_state.messages.append({"role": "assistant", "content": response}) 35 | -------------------------------------------------------------------------------- /Unit3-Memory/knowledge-agent/together_llm.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Dict, Iterator, List, Mapping, Optional 2 | from together import Together 3 | import os 4 | from langchain_core.callbacks.manager import CallbackManagerForLLMRun 5 | from langchain_core.language_models.llms import LLM 6 | from langchain_core.outputs import GenerationChunk 7 | 8 | 9 | class TogetherLLM(LLM): 10 | """ Runs an open source LLM model from together 11 | """ 12 | 13 | def _call( 14 | self, 15 | prompt: str, 16 | stop: Optional[List[str]] = None, 17 | run_manager: Optional[CallbackManagerForLLMRun] = None, 18 | model: Optional[str]= 'mistralai/Mistral-7B-Instruct-v0.2', 19 | temperature: Optional[float] = 0.3, 20 | max_tokens: Optional[int] = 256, 21 | **kwargs: Any, 22 | ) -> str: 23 | """Run the LLM on the given input. 24 | 25 | Override this method to implement the LLM logic. 26 | 27 | Args: 28 | prompt: The prompt to generate from. 29 | stop: Stop words to use when generating. Model output is cut off at the 30 | first occurrence of any of the stop substrings. 31 | If stop tokens are not supported consider raising NotImplementedError. 32 | run_manager: Callback manager for the run. 33 | **kwargs: Arbitrary additional keyword arguments. These are usually passed 34 | to the model provider API call. 35 | 36 | Returns: 37 | The model output as a string. Actual completions SHOULD NOT include the prompt. 38 | """ 39 | if stop is not None: 40 | raise ValueError("stop kwargs are not permitted.") 41 | together_client = Together(api_key=os.environ["TOGETHER_API_KEY"]) 42 | response = together_client.chat.completions.create( 43 | model=model, 44 | messages=[{"role": "user", "content": f"{prompt}"}], 45 | temperature=temperature, 46 | max_tokens=max_tokens 47 | ) 48 | return response.choices[0].message.content 49 | 50 | 51 | def _stream( 52 | self, 53 | prompt: str, 54 | stop: Optional[List[str]] = None, 55 | run_manager: Optional[CallbackManagerForLLMRun] = None, 56 | **kwargs: Any, 57 | ) -> Iterator[GenerationChunk]: 58 | """Stream the LLM on the given prompt. 59 | 60 | This method should be overridden by subclasses that support streaming. 61 | 62 | If not implemented, the default behavior of calls to stream will be to 63 | fallback to the non-streaming version of the model and return 64 | the output as a single chunk. 65 | 66 | Args: 67 | prompt: The prompt to generate from. 68 | stop: Stop words to use when generating. Model output is cut off at the 69 | first occurrence of any of these substrings. 70 | run_manager: Callback manager for the run. 71 | **kwargs: Arbitrary additional keyword arguments. These are usually passed 72 | to the model provider API call. 73 | 74 | Returns: 75 | An iterator of GenerationChunks. 76 | """ 77 | return None 78 | 79 | @property 80 | def _identifying_params(self) -> Dict[str, Any]: 81 | """Return a dictionary of identifying parameters.""" 82 | return { 83 | # The model name allows users to specify custom token counting 84 | # rules in LLM monitoring applications (e.g., in LangSmith users 85 | # can provide per token pricing for their model and monitor 86 | # costs for the given LLM.) 87 | "model_name": "Together open source LLM", 88 | } 89 | 90 | @property 91 | def _llm_type(self) -> str: 92 | """Get the type of language model used by this chat model. Used for logging purposes only.""" 93 | return "Together Open source LLM" -------------------------------------------------------------------------------- /Unit4-Planning/together_llm.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Dict, Iterator, List, Mapping, Optional 2 | import os 3 | from langchain_core.callbacks.manager import CallbackManagerForLLMRun 4 | from langchain_core.language_models.llms import LLM 5 | from langchain_core.outputs import GenerationChunk 6 | from together import Together 7 | 8 | 9 | class TogetherLLM(LLM): 10 | model_name: str = "Together open source LLM" 11 | max_tokens: int = 1024 12 | temperature: float = 0.3 13 | 14 | """ Runs an open source LLM model from together """ 15 | 16 | def __init__(self, model: str = 'mistralai/Mistral-7B-Instruct-v0.3', max_tokens: int = None, temperature: float = None, **kwargs: Any): 17 | """ 18 | Initializes the TogetherLLM with specified parameters. 19 | 20 | Args: 21 | model: The name of the model to use. 22 | max_tokens: The maximum number of tokens to generate (optional). 23 | temperature: The temperature to use for sampling (optional). 24 | **kwargs: Additional keyword arguments to pass to the parent LLM class. 25 | """ 26 | kwargs['model_name'] = model 27 | kwargs['max_tokens'] = max_tokens if max_tokens is not None else self.__class__.max_tokens 28 | kwargs['temperature'] = temperature if temperature is not None else self.__class__.temperature 29 | super().__init__(**kwargs) 30 | 31 | 32 | def _call( 33 | self, 34 | prompt: str, 35 | stop: Optional[List[str]] = None, 36 | run_manager: Optional[CallbackManagerForLLMRun] = None, 37 | **kwargs: Any, 38 | ) -> str: 39 | if stop is not None: 40 | raise ValueError("stop kwargs are not permitted.") 41 | 42 | together_client = Together(api_key=os.environ["TOGETHER_API_KEY"]) 43 | response = together_client.chat.completions.create( 44 | model=self.model_name, # Use the model specified in __init__ 45 | messages=[{"role": "user", "content": f"{prompt}"}], 46 | temperature=self.temperature, # Use the temperature specified in __init__ 47 | max_tokens=self.max_tokens, # Use the max_tokens specified in __init__ 48 | ) 49 | return response.choices[0].message.content 50 | 51 | 52 | def _stream( 53 | self, 54 | prompt: str, 55 | stop: Optional[List[str]] = None, 56 | run_manager: Optional[CallbackManagerForLLMRun] = None, 57 | **kwargs: Any, 58 | ) -> Iterator[GenerationChunk]: 59 | """Stream the LLM on the given prompt. 60 | 61 | This method should be overridden by subclasses that support streaming. 62 | 63 | If not implemented, the default behavior of calls to stream will be to 64 | fallback to the non-streaming version of the model and return 65 | the output as a single chunk. 66 | 67 | Args: 68 | prompt: The prompt to generate from. 69 | stop: Stop words to use when generating. Model output is cut off at the 70 | first occurrence of any of these substrings. 71 | run_manager: Callback manager for the run. 72 | **kwargs: Arbitrary additional keyword arguments. These are usually passed 73 | to the model provider API call. 74 | 75 | Returns: 76 | An iterator of GenerationChunks. 77 | """ 78 | return None 79 | 80 | @property 81 | def _identifying_params(self) -> Dict[str, Any]: 82 | """Return a dictionary of identifying parameters.""" 83 | return { 84 | # The model name allows users to specify custom token counting 85 | # rules in LLM monitoring applications (e.g., in LangSmith users 86 | # can provide per token pricing for their model and monitor 87 | # costs for the given LLM.) 88 | "model_name": "Together open source LLM", 89 | } 90 | 91 | @property 92 | def _llm_type(self) -> str: 93 | """Get the type of language model used by this chat model. Used for logging purposes only.""" 94 | return "Together Open source LLM" -------------------------------------------------------------------------------- /Unit5-Agent_Examples/together_llm.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Dict, Iterator, List, Mapping, Optional 2 | import os 3 | from langchain_core.callbacks.manager import CallbackManagerForLLMRun 4 | from langchain_core.language_models.llms import LLM 5 | from langchain_core.outputs import GenerationChunk 6 | from together import Together 7 | 8 | 9 | class TogetherLLM(LLM): 10 | model_name: str = "Together open source LLM" 11 | max_tokens: int = 256 12 | temperature: float = 0.3 13 | 14 | """ Runs an open source LLM model from together """ 15 | 16 | def __init__(self, model: str = 'mistralai/Mistral-7B-Instruct-v0.3', max_tokens: int = None, temperature: float = None, **kwargs: Any): 17 | """ 18 | Initializes the TogetherLLM with specified parameters. 19 | 20 | Args: 21 | model: The name of the model to use. 22 | max_tokens: The maximum number of tokens to generate (optional). 23 | temperature: The temperature to use for sampling (optional). 24 | **kwargs: Additional keyword arguments to pass to the parent LLM class. 25 | """ 26 | kwargs['model_name'] = model 27 | kwargs['max_tokens'] = max_tokens if max_tokens is not None else self.__class__.max_tokens 28 | kwargs['temperature'] = temperature if temperature is not None else self.__class__.temperature 29 | super().__init__(**kwargs) 30 | 31 | 32 | def _call( 33 | self, 34 | prompt: str, 35 | stop: Optional[List[str]] = None, 36 | run_manager: Optional[CallbackManagerForLLMRun] = None, 37 | **kwargs: Any, 38 | ) -> str: 39 | if stop is not None: 40 | raise ValueError("stop kwargs are not permitted.") 41 | 42 | together_client = Together(api_key=os.environ["TOGETHER_API_KEY"]) 43 | response = together_client.chat.completions.create( 44 | model=self.model_name, # Use the model specified in __init__ 45 | messages=[{"role": "user", "content": f"{prompt}"}], 46 | temperature=self.temperature, # Use the temperature specified in __init__ 47 | max_tokens=self.max_tokens, # Use the max_tokens specified in __init__ 48 | ) 49 | return response.choices[0].message.content 50 | 51 | 52 | def _stream( 53 | self, 54 | prompt: str, 55 | stop: Optional[List[str]] = None, 56 | run_manager: Optional[CallbackManagerForLLMRun] = None, 57 | **kwargs: Any, 58 | ) -> Iterator[GenerationChunk]: 59 | """Stream the LLM on the given prompt. 60 | 61 | This method should be overridden by subclasses that support streaming. 62 | 63 | If not implemented, the default behavior of calls to stream will be to 64 | fallback to the non-streaming version of the model and return 65 | the output as a single chunk. 66 | 67 | Args: 68 | prompt: The prompt to generate from. 69 | stop: Stop words to use when generating. Model output is cut off at the 70 | first occurrence of any of these substrings. 71 | run_manager: Callback manager for the run. 72 | **kwargs: Arbitrary additional keyword arguments. These are usually passed 73 | to the model provider API call. 74 | 75 | Returns: 76 | An iterator of GenerationChunks. 77 | """ 78 | return None 79 | 80 | @property 81 | def _identifying_params(self) -> Dict[str, Any]: 82 | """Return a dictionary of identifying parameters.""" 83 | return { 84 | # The model name allows users to specify custom token counting 85 | # rules in LLM monitoring applications (e.g., in LangSmith users 86 | # can provide per token pricing for their model and monitor 87 | # costs for the given LLM.) 88 | "model_name": "Together open source LLM", 89 | } 90 | 91 | @property 92 | def _llm_type(self) -> str: 93 | """Get the type of language model used by this chat model. Used for logging purposes only.""" 94 | return "Together Open source LLM" -------------------------------------------------------------------------------- /Unit4-Planning/gemini_llm.py: -------------------------------------------------------------------------------- 1 | 2 | from dotenv import load_dotenv 3 | from typing import Any, Dict, Iterator, List, Mapping, Optional 4 | import os 5 | load_dotenv() 6 | 7 | import google.generativeai as genai 8 | GOOGLE_API_KEY= os.getenv('GOOGLE_API_KEY') 9 | 10 | genai.configure(api_key=GOOGLE_API_KEY) 11 | 12 | from langchain_core.callbacks.manager import CallbackManagerForLLMRun 13 | from langchain_core.language_models.llms import LLM 14 | from langchain_core.outputs import GenerationChunk 15 | 16 | # Set up the model 17 | generation_config = { 18 | "top_p": 0.95, 19 | "top_k": 40, 20 | "max_output_tokens": 8192, 21 | # "response_mime_type": "application/json", 22 | } 23 | 24 | safety_settings = [ 25 | { 26 | "category": "HARM_CATEGORY_HARASSMENT", 27 | "threshold": "BLOCK_MEDIUM_AND_ABOVE" 28 | }, 29 | { 30 | "category": "HARM_CATEGORY_HATE_SPEECH", 31 | "threshold": "BLOCK_MEDIUM_AND_ABOVE" 32 | }, 33 | { 34 | "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", 35 | "threshold": "BLOCK_MEDIUM_AND_ABOVE" 36 | }, 37 | { 38 | "category": "HARM_CATEGORY_DANGEROUS_CONTENT", 39 | "threshold": "BLOCK_MEDIUM_AND_ABOVE" 40 | }, 41 | ] 42 | 43 | 44 | class GeminiLLM(LLM): 45 | model_name: str = "Gemini LLM" 46 | temperature: float = 0.3 47 | 48 | """ Runs the Gemini 1.5 Pro model from Google """ 49 | 50 | def __init__(self, model: str = 'gemini-1.5-flash-latest', temperature: float = None, response_mime_type: str= None, **kwargs: Any): 51 | """ 52 | Initializes the TogetherLLM with specified parameters. 53 | 54 | Args: 55 | model: The name of the model to use. 56 | temperature: The temperature to use for sampling (optional). 57 | **kwargs: Additional keyword arguments to pass to the parent LLM class. 58 | """ 59 | super().__init__(**kwargs) 60 | self.model_name = model # Define model as an attribute 61 | generation_config['temperature'] = temperature if temperature is not None else self.__class__.temperature 62 | if response_mime_type: 63 | generation_config['response_mime_type'] = response_mime_type 64 | 65 | def _call( 66 | self, 67 | prompt: str, 68 | stop: Optional[List[str]] = None, 69 | run_manager: Optional[CallbackManagerForLLMRun] = None, 70 | **kwargs: Any, 71 | ) -> str: 72 | if stop is not None: 73 | raise ValueError("stop kwargs are not permitted.") 74 | gemini_model = genai.GenerativeModel(model_name=self.model_name, generation_config=generation_config) 75 | response = gemini_model.generate_content(prompt) 76 | response_list = response.text 77 | return response_list 78 | 79 | 80 | def _stream( 81 | self, 82 | prompt: str, 83 | stop: Optional[List[str]] = None, 84 | run_manager: Optional[CallbackManagerForLLMRun] = None, 85 | **kwargs: Any, 86 | ) -> Iterator[GenerationChunk]: 87 | """Stream the LLM on the given prompt. 88 | 89 | This method should be overridden by subclasses that support streaming. 90 | 91 | If not implemented, the default behavior of calls to stream will be to 92 | fallback to the non-streaming version of the model and return 93 | the output as a single chunk. 94 | 95 | Args: 96 | prompt: The prompt to generate from. 97 | stop: Stop words to use when generating. Model output is cut off at the 98 | first occurrence of any of these substrings. 99 | run_manager: Callback manager for the run. 100 | **kwargs: Arbitrary additional keyword arguments. These are usually passed 101 | to the model provider API call. 102 | 103 | Returns: 104 | An iterator of GenerationChunks. 105 | """ 106 | return None 107 | 108 | @property 109 | def _identifying_params(self) -> Dict[str, Any]: 110 | """Return a dictionary of identifying parameters.""" 111 | return { 112 | # The model name allows users to specify custom token counting 113 | # rules in LLM monitoring applications (e.g., in LangSmith users 114 | # can provide per token pricing for their model and monitor 115 | # costs for the given LLM.) 116 | "model_name": "Gemini LLM", 117 | } 118 | 119 | @property 120 | def _llm_type(self) -> str: 121 | """Get the type of language model used by this chat model. Used for logging purposes only.""" 122 | return "Gemini LLM" -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## Welcome to the course on LLM-Agents 2 | This Github has all the coding exercises organized by Unit 3 | 4 | ### Installation 5 | The installation steps are: 6 | 1. Git clone this repo - `git clone https://github.com/priya-dwivedi/udemy-llm-agents.git` 7 | 2. Move to the current directory - `cd udemy-llm-agents` 8 | 3. If using conda - set a new envtt: 9 | `conda create -n llm_agents` 10 | 11 | `conda activate llm_agents` 12 | 13 | `conda install pip` 14 | 15 | 3. Install the dependencies - `pip install -r requirements.txt` 16 | 4. Add an environment file called `*.env` 17 | 5. Add your keys to the env file 18 | 19 | ### Unit 1- Foundation of LLM agents 20 | This unit focuses on components of an LLM Agent and building your first simple agent 21 | Coding Exercise for the first Self-Ask Agent: 22 | - Run Locally using Notebook: [First-Agent.ipynb](./Unit1-Foundation_LLM_Agents/My_First_Agent.ipynb) 23 | - Run on Colab with Link: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1TMRYeWTi7hN1vBH5Y8hn95sIF92rhEH-?usp=sharing) 24 | 25 | 26 | ### Unit 2- LLM Tools 27 | This unit focuses on integrating external tools into an Agent 28 | Coding Exercise for the second unit on Tools: 29 | 1. Langchain Tools 30 | - Run Locally using Notebook: [Langchain-Tools.ipynb](./Unit2-Tools/Langchain-tools.ipynb) 31 | - Run on Colab with Link: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1yHxctap6bQeNgHJN3nLmHlyg7buFX3LA?usp=sharing) 32 | 33 | 2. LLM RestAPI tool selection 34 | - Run Locally using Notebook: [LLM-RestAPI-Selection.ipynb](./Unit2-Tools/LLM_RestAPI_Selection.ipynb) 35 | - Run on Colab with Link: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1f7rgCsAuNsWbvgd0BFuIHwYgU4MAziMh?usp=sharing) 36 | 37 | 3. OpenAI Function Calling 38 | - Run Locally using Notebook: [OpenAI-Function-Calling.ipynb](./Unit2-Tools/OpenAI-Function-Calling.ipynb) 39 | - Run on Colab with Link: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1rWZqczP4dBiYWSryHBstSVBkoGflhCJi?usp=sharing) 40 | 41 | ### Unit 3- Memory 42 | This unit focuses on different types of memory and integrating memory into the Agent 43 | Coding Exercise for the third unit on Memory: 44 | 1. Langchain Short term Memory 45 | - Run Locally using Notebook: [Langchain-Short-term-memory.ipynb](./Unit3-Memory/Langchain-Short-term-Memory.ipynb) 46 | - Run on Colab with Link: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1WAv7GwDHBrI4GZsGwrD0ssXGT8j7G4_q?usp=sharing) 47 | 48 | 2. RAG pipeline and RAG compared to Long Context LLMs 49 | - Run Locally using Notebook: [RAG_vs_LongContext.ipynb](./Unit3-Memory/RAG_vs_LongContext.ipynb) 50 | - Run on Colab with Link: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1Vv39cL0DTwxy1WJB2w2VBFbvh7FuDjxi?usp=sharing) 51 | 52 | 3. Simple Multimodal RAG 53 | - Run Locally using Notebook: [MultiModal-RAG.ipynb](./Unit3-Memory/MultiModal_RAG.ipynb) 54 | - Run on Colab with Link: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1yX2r1u_euYuxODzTyoeFPhCtUgymq7Gh?usp=sharing) 55 | 56 | 4. Knowledge-Agent 57 | Can only be run locally 58 | Instructions: 59 | * Navigate to the correct directory: `cd Unit3-Memory/knowledge-agent` 60 | * Optionally: Delete current profile : `rm current_profile.json` 61 | * Run Streamlit interface: `streamlit run chatbot.py` 62 | 63 | ### Unit 4-Planning 64 | This unit focuses on different types of planning and how to improve the Agent's accuracy and performance with planning 65 | Coding Exercise for the fourth unit on Planning: 66 | 1. Plannning with task decomposition 67 | - Run Locally using Notebook: [Planning_with_task_decomposition.ipynb](./Unit4-Planning/Planning_with_task_decomposition.ipynb) 68 | - Run on Colab with Link: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1EzQ8-3ubyChDXaX5VRie58Bw5z6hDnkK?usp=sharing) 69 | 70 | 2. Skeleton of Thought Generation 71 | - Run Locally using Notebook: [Skeleton_of_Thought_Generationa.ipynb](./Unit4-Planning/Skeleton_of_Thought_Generation.ipynb) 72 | - Run on Colab with Link: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1JjgbvuhJdna-6ZgT0MdDOP7iJ-ftcxKr?usp=sharing) 73 | 74 | 3. Basics of Langgraph 75 | - Run Locally using Notebook: [Basics_of_Langgraph.ipynb](./Unit4-Planning/Basics_of_Langgraph.ipynb) 76 | - Run on Colab with Link: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1UWYtytGMC4UtEE4LoFmVcMbrf0Wke0jn?usp=sharing) 77 | 78 | 4. Reflection Agent 79 | - Run Locally using Notebook: [Reflection_Agent.ipynb](./Unit4-Planning/Reflection_Agent.ipynb) 80 | - Run on Colab with Link: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/15hi4bAmZflP_Z7pS75FhbbsA8kVkQ2co?usp=sharing) 81 | 82 | 4. Reflexion Agent 83 | - Run Locally using Notebook: [Reflexion_Agent.ipynb](./Unit4-Planning/Reflexion_Agent.ipynb) 84 | - Run on Colab with Link: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1s0VCGf7nboqoBs-Kh-bi2n6Nlo4smQBc?usp=sharing) 85 | 86 | ### Unit 5-Agent Examples 87 | This unit focuses on building more complex agents combining everything learned so far in the course 88 | Coding Exercise for the fifth unit on Agent Examples: 89 | 1. Agentic-RAG 90 | - Run Locally using Notebook: [Agentic-RAG.ipynb](./Unit5-Agent-Examples/Agentic-RAG.ipynb) 91 | - Run on Colab with Link: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/10V6iHz_qnfXwV2bCgLDeSwUNWw8GgzE6?usp=sharing) 92 | 93 | 2. Movie Recommendation Bot 94 | Can only be run locally 95 | Instructions: 96 | * Navigate to the correct directory: `cd Unit5-Agent_Examples/Movie_Recommendation_Agent` 97 | * Run Streamlit interface: `streamlit run chatbot.py` 98 | 99 | 3. Coding Assistant 100 | - Run Locally using Notebook: [Coding-Assistant.ipynb](./Unit5-Agent-Examples/Coding-Assistant.ipynb) 101 | - Run on Colab with Link: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/122G2pJ9f_-01ROt00Hd9WEM_l25P_aN_?usp=sharing) 102 | 103 | -------------------------------------------------------------------------------- /Unit5-Agent_Examples/movie_recommendation_agent.py: -------------------------------------------------------------------------------- 1 | ### Langchain imports 2 | from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder, PromptTemplate 3 | from langchain.chains import LLMChain 4 | from langchain_community.tools import tool 5 | from langchain.agents import AgentExecutor, create_openai_tools_agent 6 | from langchain_openai.chat_models import ChatOpenAI 7 | from langchain.memory import ChatMessageHistory 8 | from langchain_core.messages import SystemMessage, AIMessage, HumanMessage 9 | from langchain_core.runnables.history import RunnableWithMessageHistory 10 | 11 | import pandas as pd 12 | 13 | 14 | class MovieSelectionTool(): 15 | 16 | @tool("Movie_Selector") 17 | def movie_selector(data): 18 | """Call this tool to get a list of movies that match the user's criteria. 19 | The input to this tool is a movie genres, min and max year of the movie. 20 | All genres should be pipe sepearated and years should be / separated. 21 | Example if the user is look for comedy and drama from 1990 to 2000, then input should be `Comedy|Drama/1990/2000` 22 | Example if the user is looking for Romance, Family and Animation from 1995 to 2005 then input should be: Romance|Family|Animation/1995/2005 23 | The output from this tool will be all the movies that match that criteria 24 | """ 25 | try: 26 | genres_piped, min_year, max_year = data.split('/') 27 | min_year = int(min_year) 28 | max_year = int(max_year) 29 | genres = genres_piped.split('|') 30 | print(f"Tool Run with inputs: Genres: {genres} Min Year: {min_year} Max Year: {max_year}") 31 | 32 | ## Step 1: Load CSV 33 | important_columns = ['title', 'movie_genres', 'year', 'rating', 'overview'] 34 | df = pd.read_csv('data/movie_ratings.csv') 35 | df = df.dropna() 36 | df = df[important_columns] 37 | 38 | ## Step 2: Filter based on criteria 39 | #### Filter by selected genres 40 | genres_filter = df['movie_genres'].apply(lambda x: any(item for item in genres if item in x)) 41 | df = df[genres_filter] 42 | #### Filter by year 43 | year_filter = (df['year'] >= min_year) & (df['year'] <= max_year) 44 | df = df[year_filter] 45 | ### Filter by rating 46 | min_rating =3 47 | df = df[df['rating'] >= min_rating] 48 | 49 | 50 | ##Step 3: Sort DF by number of matched genres 51 | df['matched_genres'] = df['movie_genres'].apply(lambda x: len([item for item in genres if item in x])) 52 | df.sort_values(by='matched_genres', ascending=False, inplace=True) 53 | df = df.head(100) 54 | 55 | ## Step 4: Create data for loading in the model 56 | text_to_llm = '' 57 | for index, row in df.iterrows(): 58 | text_to_llm += f"--- Movie Title {row['title']} --- " 59 | text_to_llm += f"--- Movie Plot {row['overview']} --- " 60 | text_to_llm += f"--- Movie Genres {row['movie_genres']} ---" 61 | text_to_llm += f"--- Movie Release Year {row['year']} ---" 62 | text_to_llm += f"--- Movie Rating {row['rating']} ---" 63 | text_to_llm += '\n' 64 | 65 | return text_to_llm 66 | except Exception as e: 67 | print(e) 68 | return "Error with the input format for the tool." 69 | 70 | movie_tool = MovieSelectionTool.movie_selector 71 | 72 | class Chatbot: 73 | def __init__(self, model='gpt-4o', temperature=0.3, memory=None): 74 | self.llm_rec = ChatOpenAI(model=model, temperature=temperature) 75 | self.system_prompt = ''' You are a friendly assistant who will guide customers to select movies of their choice from a database I have. 76 | You will follow a multi-stage approach to finding the best movies for a customer 77 | 78 | Introduction: Introduce yourself as a movie recommendation bot who will help customers find the best movies from a wide selection of classical movies from 1980 to 2010. 79 | 80 | Stage 1: Understand the customer's interests 81 | Ask only one question at a time. Always end your responses with a question 82 | 1. Ask 2-3 questions to understand the customer's interests and what kind of movies they like 83 | 2. Infer the genre of the movie based on their preferences or explicitely ask it. I have movies from the following genres: 84 | 'Science Fiction', 'Fantasy', 'War', 'Adventure', 'Romance', 'Documentary', 'Family', 'Animation', 'Comedy', 'TV Movie', 'Thriller', 'Drama', 'Crime', 'Mystery', 'Music', 'Horror', 'Action', 'History' 85 | 3. I have movies from 1985-2005. Understand which decade, range of years they are interested in 86 | 87 | Stage 2: Use the tool you have to get a database of movies you have 88 | The input to the tool is the genres and min and max years 89 | The tool takes the following inputs: 90 | Genres as a list. Genres should be from the above selections 91 | Min and max years as int 92 | All genres should be pipe sepearated and years should be / separated. 93 | Example if the user is look for comedy and drama from 1990 to 2000, then input should be `Comedy|Drama/1990/2000` 94 | Example if the user is looking for Romance, Family and Animation from 1995 to 2005 then input should be: Romance|Family|Animation/1995/2005 95 | Example if the user is looking for Comedy and Drama from 1990 to 2000 then input should be: Comedy|Drama/1990/2000 96 | 97 | Stage 3: The tool will give you all the movies that match the criteria you shared and are well rated 98 | From this set, you now need to present top 3 choices to the customer and explain your rationale 99 | Try and match the movies to the type they like 100 | 101 | Stage 4: Adjust your choices if the user doesn't like your recommendation 102 | Ask them about the kind of plot they would like to see and then look for movies with those plots 103 | Present the top 3 choices again 104 | 105 | General rules: 106 | 1. Ask one question at a time. Wait for the user's response before asking the next question 107 | 2. End your responses with questions so you can continue the conversation 108 | ''' 109 | self.memory = memory if memory else ChatMessageHistory(session_id="test-session") 110 | self.tools = [movie_tool] 111 | 112 | def run(self, input): 113 | ## Load Current Profile 114 | 115 | prompt_rec = ChatPromptTemplate.from_messages( 116 | [ 117 | SystemMessage( 118 | content=self.system_prompt 119 | ), 120 | MessagesPlaceholder(variable_name="chat_history"), 121 | MessagesPlaceholder(variable_name="agent_scratchpad"), 122 | 123 | ("user", "{input}"), 124 | ] 125 | ) 126 | # Construct the OpenAI Functions agent 127 | recommendation_agent = create_openai_tools_agent(self.llm_rec, self.tools, prompt_rec) 128 | agent_executor = AgentExecutor(agent=recommendation_agent, tools=self.tools) 129 | agent_with_chat_history = RunnableWithMessageHistory( 130 | agent_executor, 131 | # This is needed because in most real world scenarios, a session id is needed 132 | # It isn't really used here because we are using a simple in memory ChatMessageHistory 133 | lambda session_id: self.memory, 134 | input_messages_key="input", 135 | history_messages_key="chat_history", 136 | ) 137 | result = agent_with_chat_history.invoke({'input': input,}, config={"configurable": {"session_id": "1234"}}) 138 | # print("Memory: ", self.memory) 139 | # print("========") 140 | return result['output'] -------------------------------------------------------------------------------- /Unit5-Agent_Examples/Movie_Recommendation_Agent/movie_recommendation_agent.py: -------------------------------------------------------------------------------- 1 | ### Langchain imports 2 | from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder, PromptTemplate 3 | from langchain.chains import LLMChain 4 | from langchain_community.tools import tool 5 | from langchain.agents import AgentExecutor, create_openai_tools_agent 6 | from langchain_openai.chat_models import ChatOpenAI 7 | from langchain.memory import ChatMessageHistory 8 | from langchain_core.messages import SystemMessage, AIMessage, HumanMessage 9 | from langchain_core.runnables.history import RunnableWithMessageHistory 10 | 11 | import pandas as pd 12 | 13 | 14 | class MovieSelectionTool(): 15 | 16 | @tool("Movie_Selector") 17 | def movie_selector(data): 18 | """Call this tool to get a list of movies that match the user's criteria. 19 | The input to this tool is a movie genres, min and max year of the movie. 20 | All genres should be pipe sepearated and years should be / separated. 21 | Example if the user is look for comedy and drama from 1990 to 2000, then input should be `Comedy|Drama/1990/2000` 22 | Example if the user is looking for Romance, Family and Animation from 1995 to 2005 then input should be: Romance|Family|Animation/1995/2005 23 | The output from this tool will be all the movies that match that criteria 24 | """ 25 | try: 26 | genres_piped, min_year, max_year = data.split('/') 27 | min_year = int(min_year) 28 | max_year = int(max_year) 29 | genres = genres_piped.split('|') 30 | print(f"Tool Run with inputs: Genres: {genres} Min Year: {min_year} Max Year: {max_year}") 31 | 32 | ## Step 1: Load CSV 33 | important_columns = ['title', 'movie_genres', 'year', 'rating', 'overview'] 34 | df = pd.read_csv('../data/movie_ratings.csv') 35 | df = df.dropna() 36 | df = df[important_columns] 37 | 38 | ## Step 2: Filter based on criteria 39 | #### Filter by selected genres 40 | genres_filter = df['movie_genres'].apply(lambda x: any(item for item in genres if item in x)) 41 | df = df[genres_filter] 42 | #### Filter by year 43 | year_filter = (df['year'] >= min_year) & (df['year'] <= max_year) 44 | df = df[year_filter] 45 | ### Filter by rating 46 | min_rating =3 47 | df = df[df['rating'] >= min_rating] 48 | 49 | 50 | ##Step 3: Sort DF by number of matched genres 51 | df['matched_genres'] = df['movie_genres'].apply(lambda x: len([item for item in genres if item in x])) 52 | df.sort_values(by='matched_genres', ascending=False, inplace=True) 53 | df = df.head(100) 54 | 55 | ## Step 4: Create data for loading in the model 56 | text_to_llm = '' 57 | for index, row in df.iterrows(): 58 | text_to_llm += f"--- Movie Title {row['title']} --- " 59 | text_to_llm += f"--- Movie Plot {row['overview']} --- " 60 | text_to_llm += f"--- Movie Genres {row['movie_genres']} ---" 61 | text_to_llm += f"--- Movie Release Year {row['year']} ---" 62 | text_to_llm += f"--- Movie Rating {row['rating']} ---" 63 | text_to_llm += '\n' 64 | 65 | return text_to_llm 66 | except Exception as e: 67 | print(e) 68 | return "Error with the input format for the tool." 69 | 70 | movie_tool = MovieSelectionTool.movie_selector 71 | 72 | class Chatbot: 73 | def __init__(self, model='gpt-4o', temperature=0.3, memory=None): 74 | self.llm_rec = ChatOpenAI(model=model, temperature=temperature) 75 | self.system_prompt = ''' You are a friendly assistant who will guide customers to select movies of their choice from a database I have. 76 | You will follow a multi-stage approach to finding the best movies for a customer 77 | 78 | Introduction: Introduce yourself as a movie recommendation bot who will help customers find the best movies from a wide selection of classical movies from 1980 to 2010. 79 | 80 | Stage 1: Understand the customer's interests 81 | Ask only one question at a time. Always end your responses with a question 82 | 1. Ask 2-3 questions to understand the customer's interests and what kind of movies they like 83 | 2. Infer the genre of the movie based on their preferences or explicitely ask it. I have movies from the following genres: 84 | 'Science Fiction', 'Fantasy', 'War', 'Adventure', 'Romance', 'Documentary', 'Family', 'Animation', 'Comedy', 'TV Movie', 'Thriller', 'Drama', 'Crime', 'Mystery', 'Music', 'Horror', 'Action', 'History' 85 | 3. I have movies from 1985-2005. Understand which decade, range of years they are interested in 86 | 87 | Stage 2: Use the tool you have to get a database of movies you have 88 | The input to the tool is the genres and min and max years 89 | The tool takes the following inputs: 90 | Genres as a list. Genres should be from the above selections 91 | Min and max years as int 92 | All genres should be pipe sepearated and years should be / separated. 93 | Example if the user is look for comedy and drama from 1990 to 2000, then input should be `Comedy|Drama/1990/2000` 94 | Example if the user is looking for Romance, Family and Animation from 1995 to 2005 then input should be: Romance|Family|Animation/1995/2005 95 | Example if the user is looking for Comedy and Drama from 1990 to 2000 then input should be: Comedy|Drama/1990/2000 96 | 97 | Stage 3: The tool will give you all the movies that match the criteria you shared and are well rated 98 | From this set, you now need to present top 3 choices to the customer and explain your rationale 99 | Try and match the movies to the type they like 100 | 101 | Stage 4: Adjust your choices if the user doesn't like your recommendation 102 | Ask them about the kind of plot they would like to see and then look for movies with those plots 103 | Present the top 3 choices again 104 | 105 | General rules: 106 | 1. Ask one question at a time. Wait for the user's response before asking the next question 107 | 2. End your responses with questions so you can continue the conversation 108 | ''' 109 | self.memory = memory if memory else ChatMessageHistory(session_id="test-session") 110 | self.tools = [movie_tool] 111 | 112 | def run(self, input): 113 | ## Load Current Profile 114 | 115 | prompt_rec = ChatPromptTemplate.from_messages( 116 | [ 117 | SystemMessage( 118 | content=self.system_prompt 119 | ), 120 | MessagesPlaceholder(variable_name="chat_history"), 121 | MessagesPlaceholder(variable_name="agent_scratchpad"), 122 | 123 | ("user", "{input}"), 124 | ] 125 | ) 126 | # Construct the OpenAI Functions agent 127 | recommendation_agent = create_openai_tools_agent(self.llm_rec, self.tools, prompt_rec) 128 | agent_executor = AgentExecutor(agent=recommendation_agent, tools=self.tools) 129 | agent_with_chat_history = RunnableWithMessageHistory( 130 | agent_executor, 131 | # This is needed because in most real world scenarios, a session id is needed 132 | # It isn't really used here because we are using a simple in memory ChatMessageHistory 133 | lambda session_id: self.memory, 134 | input_messages_key="input", 135 | history_messages_key="chat_history", 136 | ) 137 | result = agent_with_chat_history.invoke({'input': input,}, config={"configurable": {"session_id": "1234"}}) 138 | # print("Memory: ", self.memory) 139 | # print("========") 140 | return result['output'] -------------------------------------------------------------------------------- /Unit3-Memory/knowledge-agent/KnowledgeAgent.py: -------------------------------------------------------------------------------- 1 | 2 | import os 3 | import json 4 | from dotenv import load_dotenv 5 | load_dotenv() 6 | 7 | os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY") ## Put your OpenAI API key here 8 | os.environ["TAVILY_API_KEY"] = os.getenv("TAVILY_API_KEY") ## Put your Tavily Search API key here 9 | os.environ["LANGCHAIN_API_KEY"] = os.getenv("LANGCHAIN_API_KEY") ## Put your Langsmith API key here 10 | os.environ["LANGCHAIN_HUB_API_KEY"] = os.getenv("LANGCHAIN_API_KEY") ## Put your Langsmith API key here 11 | os.environ["LANGCHAIN_TRACING_V2"] = 'true' ## Set this as True 12 | os.environ["LANGCHAIN_ENDPOINT"] = 'https://api.smith.langchain.com/' ## Set this as: https://api.smith.langchain.com/ 13 | os.environ["LANGCHAIN_HUB_API_URL"] = 'https://api.hub.langchain.com' ## Set this as : https://api.hub.langchain.com 14 | os.environ["LANGCHAIN_PROJECT"] = 'llm-agents-memory' 15 | 16 | ## Add Together API Key 17 | os.environ["TOGETHER_API_KEY"] = os.getenv("TOGETHER_API_KEY") 18 | 19 | ### import together 20 | from together_llm import TogetherLLM 21 | 22 | ### Langchain imports 23 | from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder, PromptTemplate 24 | from langchain.chains import LLMChain 25 | from langchain_community.tools.tavily_search import TavilySearchResults 26 | from langchain.agents import AgentExecutor, create_openai_tools_agent 27 | from langchain_openai.chat_models import ChatOpenAI 28 | from langchain.memory import ChatMessageHistory 29 | from langchain_core.messages import SystemMessage, AIMessage, HumanMessage 30 | from langchain_core.runnables.history import RunnableWithMessageHistory 31 | 32 | 33 | class Memory: 34 | def __init__(self, model='meta-llama/Llama-3-8b-chat-hf', temperature=0.1, max_tokens=256): 35 | self.llm_memory = TogetherLLM(model=model, temperature=temperature, max_tokens=max_tokens) 36 | self.system_message = ''' You are an expert at extracting relevant details from the user message and adding them to the profile. 37 | Your current profile has information about a person and their interests or dislikes. 38 | Example can be: 39 | Dad: Likes to do gardening. Loved the shovel I bought him last year 40 | 41 | When you get a new message, you can do one of the following: 42 | 43 | 1. Do nothing. No interesting information was presented 44 | 2. Update the current profile for a person. If someone's likes/dislikes change, then keep track of that 45 | 3. Add a new person to the Current Profile. If a new person is mentioned, then keep track of that 46 | 4. Remove a person from the Current Profile. If someone is mentioned as no longer being relevant, then remove them from the profile. 47 | 48 | Important: When you are done, return the Current Profile as a JSON in the format below. 49 | Below is an example of the profile. Not the actual one. 50 | {{ 51 | "Dad" : "Likes Gardening. Loved the shovel gifted last year", 52 | "Mom" : "Has birthday in July. loves hiking" 53 | }} 54 | VERY IMPORTANT: ONLY return the JSON output. If you have no profile return None. Don't return any text or explanations 55 | 56 | Current Profile: {current_profile} 57 | 58 | New Message: {input} 59 | ''' 60 | 61 | def load_json_file(self, file_path): 62 | """Load a JSON file from the given file path or return None if not present or on error.""" 63 | if os.path.exists(file_path): 64 | try: 65 | with open(file_path, 'r') as file: 66 | return json.load(file) 67 | except (FileNotFoundError, json.JSONDecodeError) as e: 68 | print(f"Error loading JSON file: {e}") 69 | else: 70 | print(f"No file found at {file_path}") 71 | return {} 72 | 73 | def run(self, input): 74 | prompt = PromptTemplate( 75 | input_variables=["current_profile", "input"], template=self.system_message) 76 | 77 | # Create an LLMChain using the custom LLM 78 | memory_agent = LLMChain(llm=self.llm_memory, prompt=prompt) 79 | 80 | current_profile = self.load_json_file('current_profile.json') 81 | ## Run the chain and return results 82 | result = memory_agent.invoke({'input':input, "current_profile": current_profile}) 83 | print("Memory Agent Output: ", result['text']) 84 | try: 85 | output = json.loads(result['text']) 86 | 87 | ## Save this to profile file 88 | with open('current_profile.json', 'w') as file: 89 | json.dump(output, file) 90 | except: 91 | print("LLM message was not in JSON format. Is not being saved") 92 | return result 93 | 94 | 95 | class Chatbot: 96 | def __init__(self, model='gpt-4-turbo-preview', temperature=0.3): 97 | self.llm_rec = ChatOpenAI(model=model, temperature=temperature, model_kwargs={"response_format": {"type": "json_object"}}) 98 | self.system_prompt = ''' You are an expert at recommending gifts based on the person's profile/likes and dislikes. 99 | You use the web search tool you have to do a web search and select gifts based on likes/dislikes/budget. 100 | You also have access to a everyone's current profile which stores long term information about everyone. If present use this, else proceed on your own. 101 | You respond in JSON format in the key response. Don't respond as a string. Example: 102 | {{'response': 'If your dad likes gardening then buy them a nice shovel which will cost around $25'}} 103 | Don't ask the user questions. Just respond with what you have. 104 | ''' 105 | self.memory = ChatMessageHistory(session_id="test-session") 106 | self.tools = [TavilySearchResults(max_results=1)] 107 | 108 | def load_json_file(self, file_path): 109 | """Load a JSON file from the given file path or return None if not present or on error.""" 110 | if os.path.exists(file_path): 111 | try: 112 | with open(file_path, 'r') as file: 113 | return json.load(file) 114 | except (FileNotFoundError, json.JSONDecodeError) as e: 115 | print(f"Error loading JSON file: {e}") 116 | else: 117 | print(f"No file found at {file_path}") 118 | return {} 119 | 120 | def run(self, input): 121 | ## Load Current Profile 122 | current_profile = self.load_json_file('current_profile.json') 123 | 124 | prompt_rec = ChatPromptTemplate.from_messages( 125 | [ 126 | SystemMessage( 127 | content=self.system_prompt 128 | ), 129 | MessagesPlaceholder(variable_name="chat_history"), 130 | MessagesPlaceholder(variable_name="agent_scratchpad"), 131 | 132 | ("user", " Current profile for everyone is: {current_profile}"), 133 | ("user", " Now help me with suggestions for: {input}. Use the search tool provided to you to get information"), 134 | ] 135 | ) 136 | # Construct the OpenAI Functions agent 137 | recommendation_agent = create_openai_tools_agent(self.llm_rec, self.tools, prompt_rec) 138 | agent_executor = AgentExecutor(agent=recommendation_agent, tools=self.tools) 139 | agent_with_chat_history = RunnableWithMessageHistory( 140 | agent_executor, 141 | # This is needed because in most real world scenarios, a session id is needed 142 | # It isn't really used here because we are using a simple in memory ChatMessageHistory 143 | lambda session_id: self.memory, 144 | input_messages_key="input", 145 | history_messages_key="chat_history", 146 | ) 147 | result = agent_with_chat_history.invoke({'input': input, 'current_profile': current_profile}, config={"configurable": {"session_id": "1234"}}) 148 | print("Result from the recommendation agent: ", result) 149 | return result['output'] 150 | 151 | if __name__=="__main__": 152 | memory_agent = Memory(model='meta-llama/Llama-3-8b-chat-hf', temperature=0.2) 153 | memory_agent.run("My mom likes to bake. She is passionate of making bread") 154 | 155 | rec_agent = Chatbot() 156 | result = rec_agent.run("Can you recommend some kitchen mixers for my mom") 157 | print(result) -------------------------------------------------------------------------------- /Unit4-Planning/Basics_Langgraph.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "## Langgraph Basics\n", 8 | "\n", 9 | "![lang-graph](images/langgraph.jpg)\n", 10 | "\n", 11 | "What is LangGraph?\n", 12 | "\n", 13 | "LangGraph is a library built on top of LangChain, designed to add cyclic computational capabilities to your LLM applications. While LangChain allows you to define chains of computation (Directed Acyclic Graphs or DAGs), LangGraph introduces the ability to add cycles, enabling more complex, agent-like behaviors where you can call an LLM in a loop, asking it what action to take next.\n", 14 | "\n", 15 | "* Stateful Graph: LangGraph revolves around the concept of a stateful graph, where each node in the graph represents a step in your computation, and the graph maintains a state that is passed around and updated as the computation progresses.\n", 16 | "\n", 17 | "* Nodes: Nodes are the building blocks of your LangGraph. Each node represents a function or a computation step. You define nodes to perform specific tasks, such as processing input, making decisions, or interacting with external APIs.\n", 18 | "\n", 19 | "* Edges: Edges connect the nodes in your graph, defining the flow of computation. LangGraph supports conditional edges, allowing you to dynamically determine the next node to execute based on the current state of the graph\n", 20 | "\n", 21 | "\n", 22 | "Source: https://medium.com/@cplog/introduction-to-langgraph-a-beginners-guide-14f9be027141\n", 23 | "\n", 24 | "\n", 25 | "In LangGraph, both MessageGraph and StateGraph are used to represent and manage the state of a graph during its execution. However, they differ in how they structure and update this state.\n", 26 | "\n", 27 | "### MessageGraph:\n", 28 | "\n", 29 | "* State Structure: The state is a simple list of messages.\n", 30 | "* Update Mechanism: Each node's output (typically a message) is appended to the state list after execution.\n", 31 | "* Ideal Use Case: Best suited for applications involving conversational AI or chatbots, where the state primarily consists of a history of messages exchanged between the user and the AI.\n", 32 | "\n", 33 | "### StateGraph:\n", 34 | "\n", 35 | "* State Structure: The state is a more general dictionary (or object), where different keys can store different types of information relevant to the graph's execution.\n", 36 | "* Update Mechanism: Each node can update any part of the state dictionary based on its output and logic.\n", 37 | "* Ideal Use Case: Offers greater flexibility, making it suitable for a wider range of applications where the state needs to store more complex information beyond just messages, such as user preferences, environment variables, or intermediate results.\n", 38 | "\n", 39 | "#### Key Difference:\n", 40 | "\n", 41 | "The main difference lies in the structure and flexibility of the state. MessageGraph is a specialized version of StateGraph, optimized for scenarios where the state is primarily a conversation history. StateGraph provides more flexibility for storing and updating arbitrary state information." 42 | ] 43 | }, 44 | { 45 | "cell_type": "markdown", 46 | "metadata": {}, 47 | "source": [ 48 | "#### Basic Langgraph example" 49 | ] 50 | }, 51 | { 52 | "cell_type": "code", 53 | "execution_count": 7, 54 | "metadata": {}, 55 | "outputs": [ 56 | { 57 | "name": "stdout", 58 | "output_type": "stream", 59 | "text": [ 60 | "+-----------+ \n", 61 | "| __start__ | \n", 62 | "+-----------+ \n", 63 | " * \n", 64 | " * \n", 65 | " * \n", 66 | "+------------+ \n", 67 | "| greet_node | \n", 68 | "+------------+ \n", 69 | " * \n", 70 | " * \n", 71 | " * \n", 72 | "+-----------+ \n", 73 | "| user_node | \n", 74 | "+-----------+ \n", 75 | " * \n", 76 | " * \n", 77 | " * \n", 78 | " +---------+ \n", 79 | " | __end__ | \n", 80 | " +---------+ \n" 81 | ] 82 | } 83 | ], 84 | "source": [ 85 | "from langgraph.graph import Graph\n", 86 | "\n", 87 | "def greet(input):\n", 88 | " return \"Hello \"\n", 89 | "\n", 90 | "def user(input):\n", 91 | " return input + \"Sam!\"\n", 92 | " \n", 93 | "#Create a graph\n", 94 | "\n", 95 | "workflow = Graph()\n", 96 | "\n", 97 | "#Add python functions as nodes\n", 98 | "workflow.add_node(\"greet_node\", greet)\n", 99 | "workflow.add_node(\"user_node\", user)\n", 100 | "\n", 101 | "#Add an edge\n", 102 | "workflow.add_edge(\"greet_node\", \"user_node\")\n", 103 | "\n", 104 | "workflow.set_entry_point(\"greet_node\")\n", 105 | "workflow.set_finish_point(\"user_node\")\n", 106 | "\n", 107 | "app = workflow.compile()\n", 108 | "app.get_graph().print_ascii()\n" 109 | ] 110 | }, 111 | { 112 | "cell_type": "code", 113 | "execution_count": 8, 114 | "metadata": {}, 115 | "outputs": [ 116 | { 117 | "name": "stdout", 118 | "output_type": "stream", 119 | "text": [ 120 | "Output from node 'greet_node':\n", 121 | "---\n", 122 | "Hello \n", 123 | "\n", 124 | "---\n", 125 | "\n", 126 | "Output from node 'user_node':\n", 127 | "---\n", 128 | "Hello Sam!\n", 129 | "\n", 130 | "---\n", 131 | "\n" 132 | ] 133 | } 134 | ], 135 | "source": [ 136 | "input = 'Hi'\n", 137 | "for output in app.stream(input):\n", 138 | " for key, value in output.items():\n", 139 | " print(f\"Output from node '{key}':\")\n", 140 | " print(\"---\")\n", 141 | " print(value)\n", 142 | " print(\"\\n---\\n\")" 143 | ] 144 | }, 145 | { 146 | "cell_type": "markdown", 147 | "metadata": {}, 148 | "source": [ 149 | "### Langggraph example with LLM Model" 150 | ] 151 | }, 152 | { 153 | "cell_type": "code", 154 | "execution_count": 9, 155 | "metadata": {}, 156 | "outputs": [ 157 | { 158 | "data": { 159 | "text/plain": [ 160 | "True" 161 | ] 162 | }, 163 | "execution_count": 9, 164 | "metadata": {}, 165 | "output_type": "execute_result" 166 | } 167 | ], 168 | "source": [ 169 | "import os\n", 170 | "from dotenv import load_dotenv\n", 171 | "load_dotenv()" 172 | ] 173 | }, 174 | { 175 | "cell_type": "code", 176 | "execution_count": 10, 177 | "metadata": {}, 178 | "outputs": [], 179 | "source": [ 180 | "os.environ[\"OPENAI_API_KEY\"] = os.getenv(\"OPENAI_API_KEY\") ## Put your OpenAI API key here\n", 181 | "os.environ[\"LANGCHAIN_API_KEY\"] = os.getenv(\"LANGCHAIN_API_KEY\") ## Put your Langsmith API key here\n", 182 | "os.environ[\"LANGCHAIN_HUB_API_KEY\"] = os.getenv(\"LANGCHAIN_API_KEY\") ## Put your Langsmith API key here\n", 183 | "os.environ[\"LANGCHAIN_TRACING_V2\"] = 'true' ## Set this as True\n", 184 | "os.environ[\"LANGCHAIN_ENDPOINT\"] = 'https://api.smith.langchain.com/' ## Set this as: https://api.smith.langchain.com/\n", 185 | "os.environ[\"LANGCHAIN_PROJECT\"] = 'llm-langgraph'" 186 | ] 187 | }, 188 | { 189 | "cell_type": "code", 190 | "execution_count": 11, 191 | "metadata": {}, 192 | "outputs": [ 193 | { 194 | "name": "stdout", 195 | "output_type": "stream", 196 | "text": [ 197 | " +-----------+ \n", 198 | " | __start__ | \n", 199 | " +-----------+ \n", 200 | " * \n", 201 | " * \n", 202 | " * \n", 203 | " +-------------+ \n", 204 | " | Create_Joke | \n", 205 | " +-------------+ \n", 206 | " * \n", 207 | " * \n", 208 | " * \n", 209 | "+----------------+ \n", 210 | "| Criticize_Joke | \n", 211 | "+----------------+ \n", 212 | " * \n", 213 | " * \n", 214 | " * \n", 215 | " +---------+ \n", 216 | " | __end__ | \n", 217 | " +---------+ \n" 218 | ] 219 | } 220 | ], 221 | "source": [ 222 | "from langgraph.graph import Graph\n", 223 | "from langchain_openai import ChatOpenAI\n", 224 | "\n", 225 | "# Set the model as ChatOpenAI\n", 226 | "model = ChatOpenAI(temperature=0) \n", 227 | "\n", 228 | "def joker(input):\n", 229 | " prompt = f\"Tell me a joke about {input}\"\n", 230 | " response = model.invoke(prompt)\n", 231 | " return response.content\n", 232 | "\n", 233 | "def critic(input):\n", 234 | " prompt = f\"Rate and Criticize this joke: {input}\"\n", 235 | " response = model.invoke(prompt)\n", 236 | " return response.content\n", 237 | " \n", 238 | "#Create a graph\n", 239 | "\n", 240 | "workflow = Graph()\n", 241 | "\n", 242 | "workflow.add_node(\"Create_Joke\", joker)\n", 243 | "workflow.add_node(\"Criticize_Joke\", critic)\n", 244 | "\n", 245 | "workflow.add_edge(\"Create_Joke\", \"Criticize_Joke\")\n", 246 | "\n", 247 | "workflow.set_entry_point(\"Create_Joke\")\n", 248 | "workflow.set_finish_point(\"Criticize_Joke\")\n", 249 | "\n", 250 | "app = workflow.compile()\n", 251 | "app.get_graph().print_ascii()\n" 252 | ] 253 | }, 254 | { 255 | "cell_type": "code", 256 | "execution_count": 12, 257 | "metadata": {}, 258 | "outputs": [ 259 | { 260 | "name": "stdout", 261 | "output_type": "stream", 262 | "text": [ 263 | "Output from node 'Create_Joke':\n", 264 | "---\n", 265 | "Why did the Canadian cross the road?\n", 266 | "\n", 267 | "To get to the Tim Hortons on the other side, eh!\n", 268 | "\n", 269 | "---\n", 270 | "\n", 271 | "Output from node 'Criticize_Joke':\n", 272 | "---\n", 273 | "I would rate this joke a 6 out of 10. It plays on the stereotype of Canadians loving Tim Hortons, which can be funny to some people. However, it is a bit predictable and not very original. Adding a more unexpected punchline could make it funnier.\n", 274 | "\n", 275 | "---\n", 276 | "\n" 277 | ] 278 | } 279 | ], 280 | "source": [ 281 | "input = 'Canadians'\n", 282 | "for output in app.stream(input):\n", 283 | " for key, value in output.items():\n", 284 | " print(f\"Output from node '{key}':\")\n", 285 | " print(\"---\")\n", 286 | " print(value)\n", 287 | " print(\"\\n---\\n\")" 288 | ] 289 | }, 290 | { 291 | "cell_type": "code", 292 | "execution_count": null, 293 | "metadata": {}, 294 | "outputs": [], 295 | "source": [] 296 | } 297 | ], 298 | "metadata": { 299 | "kernelspec": { 300 | "display_name": "llm_agents", 301 | "language": "python", 302 | "name": "python3" 303 | }, 304 | "language_info": { 305 | "codemirror_mode": { 306 | "name": "ipython", 307 | "version": 3 308 | }, 309 | "file_extension": ".py", 310 | "mimetype": "text/x-python", 311 | "name": "python", 312 | "nbconvert_exporter": "python", 313 | "pygments_lexer": "ipython3", 314 | "version": "3.12.4" 315 | } 316 | }, 317 | "nbformat": 4, 318 | "nbformat_minor": 2 319 | } 320 | -------------------------------------------------------------------------------- /Unit3-Memory/MultiModal_RAG.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [ 8 | { 9 | "data": { 10 | "text/plain": [ 11 | "True" 12 | ] 13 | }, 14 | "execution_count": 1, 15 | "metadata": {}, 16 | "output_type": "execute_result" 17 | } 18 | ], 19 | "source": [ 20 | "import os\n", 21 | "from embedchain import App\n", 22 | "from embedchain.models.data_type import DataType\n", 23 | "\n", 24 | "### Load the envtt file\n", 25 | "from dotenv import load_dotenv\n", 26 | "load_dotenv()" 27 | ] 28 | }, 29 | { 30 | "cell_type": "markdown", 31 | "metadata": {}, 32 | "source": [ 33 | "### EmbedChain Library\n", 34 | "\n", 35 | "Documentation: https://docs.embedchain.ai/get-started/quickstart\n", 36 | "\n", 37 | "EmbedChain is an open-source framework that makes it easy to build and deploy retrieval-augmented generation (RAG) applications powered by large language models (LLMs). Its “Conventional but Configurable” approach caters to both software and machine learning engineers.\n", 38 | "\n", 39 | "Key advantages of EmbedChain include:\n", 40 | "* Simplifies RAG Development: Building robust RAG pipelines involves complexities like data integration, chunking, indexing, vector storage, and more. EmbedChain streamlines this process.\n", 41 | "* Flexible Architecture: Choose components like LLMs, vector databases, data loaders, chunkers, and retrieval strategies to tailor the pipeline to your needs.\n", 42 | "* Efficient Data Handling: EmbedChain automatically loads data, generates embeddings for relevant chunks, and stores them in your chosen vector database.\n", 43 | "* User-Friendly APIs: Beginners can build LLM apps in just 4 lines of code, while advanced users can deeply customize the RAG pipeline.\n", 44 | "\n", 45 | "The core workflow is straightforward:\n", 46 | "* Add Data: Automatically load, chunk, embed, and index your data sources.\n", 47 | "* Query: Turn user questions into embeddings to retrieve relevant documents." 48 | ] 49 | }, 50 | { 51 | "cell_type": "markdown", 52 | "metadata": {}, 53 | "source": [ 54 | "### Config\n", 55 | "Set up your config below.\n", 56 | "You can define your vectordb, embedding, and llm" 57 | ] 58 | }, 59 | { 60 | "cell_type": "code", 61 | "execution_count": 2, 62 | "metadata": {}, 63 | "outputs": [], 64 | "source": [ 65 | "os.environ[\"OPENAI_API_KEY\"] = os.getenv(\"OPENAI_API_KEY\") ## Put your OpenAI API key here" 66 | ] 67 | }, 68 | { 69 | "cell_type": "code", 70 | "execution_count": 3, 71 | "metadata": {}, 72 | "outputs": [], 73 | "source": [ 74 | "config = {\n", 75 | " 'vectordb': {\n", 76 | " 'provider': 'chroma',\n", 77 | " 'config': {\n", 78 | " 'collection_name': 'rag-collection',\n", 79 | " 'dir': 'db',\n", 80 | " 'allow_reset': True \n", 81 | " }\n", 82 | " },\n", 83 | " 'embedder': {\n", 84 | " 'provider': 'openai',\n", 85 | " 'config': {\n", 86 | " 'model': 'text-embedding-3-small'\n", 87 | " }\n", 88 | " },\n", 89 | " 'llm': {\n", 90 | " 'provider': 'openai',\n", 91 | " 'config': {\n", 92 | " 'model': 'gpt-3.5-turbo-0125',\n", 93 | " 'temperature': 0.5,\n", 94 | " 'top_p': 1,\n", 95 | " 'stream': False,\n", 96 | " 'prompt': (\n", 97 | " \"Use the following pieces of context to answer the query at the end.\\n\"\n", 98 | " \"If you don't know the answer, just say that you don't know, don't try to make up an answer.\\n\"\n", 99 | " \"$context\\n\\nQuery: $query\\n\\nHelpful Answer:\"\n", 100 | " ),\n", 101 | " 'system_prompt': (\n", 102 | " \"You are an expert at looking at the provided context and answering user's query.\"\n", 103 | " ),\n", 104 | " }\n", 105 | " }\n", 106 | "}" 107 | ] 108 | }, 109 | { 110 | "cell_type": "markdown", 111 | "metadata": {}, 112 | "source": [ 113 | "### Embed your documents\n", 114 | "\n", 115 | "* Supported Data Sources : https://docs.embedchain.ai/components/data-sources/overview\n", 116 | "* Supported LLM Models: https://docs.embedchain.ai/components/llms" 117 | ] 118 | }, 119 | { 120 | "cell_type": "code", 121 | "execution_count": 4, 122 | "metadata": {}, 123 | "outputs": [], 124 | "source": [ 125 | "app = App.from_config(config=config)" 126 | ] 127 | }, 128 | { 129 | "cell_type": "code", 130 | "execution_count": 5, 131 | "metadata": {}, 132 | "outputs": [], 133 | "source": [ 134 | "### Sources about the recently released Llama3 model\n", 135 | "youtube_sources = ['https://www.youtube.com/watch?v=cEHFzvU-pzk', 'https://www.youtube.com/watch?v=8Ul_0jddTU4']\n", 136 | "web_sources = ['https://www.theverge.com/2024/4/18/24134103/llama-3-benchmark-testing-ai-gemma-gemini-mistral']" 137 | ] 138 | }, 139 | { 140 | "cell_type": "code", 141 | "execution_count": 6, 142 | "metadata": {}, 143 | "outputs": [ 144 | { 145 | "name": "stderr", 146 | "output_type": "stream", 147 | "text": [ 148 | "Inserting batches in chromadb: 100%|██████████| 1/1 [00:00<00:00, 1.75it/s]\n" 149 | ] 150 | } 151 | ], 152 | "source": [ 153 | "## Add your sources to the app\n", 154 | "for video in youtube_sources:\n", 155 | " app.add(video, data_type=DataType.YOUTUBE_VIDEO)\n", 156 | "\n", 157 | "for pdf in web_sources:\n", 158 | " app.add(pdf, data_type=DataType.WEB_PAGE)" 159 | ] 160 | }, 161 | { 162 | "cell_type": "code", 163 | "execution_count": 7, 164 | "metadata": {}, 165 | "outputs": [ 166 | { 167 | "data": { 168 | "text/plain": [ 169 | "'The Llama 3 model is available in different sizes, including an 8 billion parameter model, a 70 billion parameter model, and there is a larger model in training with 405 billion parameters.'" 170 | ] 171 | }, 172 | "execution_count": 7, 173 | "metadata": {}, 174 | "output_type": "execute_result" 175 | } 176 | ], 177 | "source": [ 178 | "app.query(\"What different sizes is the Llama3 model avaialble in?\")" 179 | ] 180 | }, 181 | { 182 | "cell_type": "code", 183 | "execution_count": 8, 184 | "metadata": {}, 185 | "outputs": [ 186 | { 187 | "data": { 188 | "text/plain": [ 189 | "'According to the provided context, Meta claims that in certain benchmarking tests, the Llama 3 8B model outperformed similarly sized models like Mistral 7B. Specifically, in the MMLU benchmark, Llama 3 8B performed significantly better than Gemma 7B and Mistral 7B. So, based on this information, the Llama3-8B model is reported to perform better than the Mistral 7B model in benchmarking tests.'" 190 | ] 191 | }, 192 | "execution_count": 8, 193 | "metadata": {}, 194 | "output_type": "execute_result" 195 | } 196 | ], 197 | "source": [ 198 | "app.query(\"How does Llama3-8B compare to Mistral 7B model?\")" 199 | ] 200 | }, 201 | { 202 | "cell_type": "markdown", 203 | "metadata": {}, 204 | "source": [ 205 | "### Integrating an open source model\n", 206 | "\n", 207 | "Use Together AI to access open source models\n", 208 | "\n", 209 | "Available inference models: https://docs.together.ai/docs/inference-models" 210 | ] 211 | }, 212 | { 213 | "cell_type": "code", 214 | "execution_count": 9, 215 | "metadata": {}, 216 | "outputs": [], 217 | "source": [ 218 | "os.environ[\"TOGETHER_API_KEY\"] = os.getenv(\"TOGETHER_API_KEY\") ## Put your Together API key here" 219 | ] 220 | }, 221 | { 222 | "cell_type": "code", 223 | "execution_count": 10, 224 | "metadata": {}, 225 | "outputs": [], 226 | "source": [ 227 | "### Change the LLM in the config\n", 228 | "\n", 229 | "config = {\n", 230 | " 'vectordb': {\n", 231 | " 'provider': 'chroma',\n", 232 | " 'config': {\n", 233 | " 'collection_name': 'rag-collection-opensource',\n", 234 | " 'dir': 'db',\n", 235 | " 'allow_reset': True \n", 236 | " }\n", 237 | " },\n", 238 | " 'embedder': {\n", 239 | " 'provider': 'openai',\n", 240 | " 'config': {\n", 241 | " 'model': 'text-embedding-3-small'\n", 242 | " }\n", 243 | " },\n", 244 | " 'llm': {\n", 245 | " 'provider': 'together',\n", 246 | " 'config': {\n", 247 | " 'model': 'mistralai/Mistral-7B-Instruct-v0.2',\n", 248 | " 'temperature': 0.5,\n", 249 | " 'top_p': 1,\n", 250 | " 'prompt': (\n", 251 | " \"Use the following pieces of context to answer the query at the end.\\n\"\n", 252 | " \"If you don't know the answer, just say that you don't know, don't try to make up an answer.\\n\"\n", 253 | " \"$context\\n\\nQuery: $query\\n\\nHelpful Answer:\"\n", 254 | " )\n", 255 | " }\n", 256 | " }\n", 257 | "}" 258 | ] 259 | }, 260 | { 261 | "cell_type": "code", 262 | "execution_count": 11, 263 | "metadata": {}, 264 | "outputs": [], 265 | "source": [ 266 | "app_opensource = App.from_config(config=config)" 267 | ] 268 | }, 269 | { 270 | "cell_type": "code", 271 | "execution_count": 12, 272 | "metadata": {}, 273 | "outputs": [ 274 | { 275 | "name": "stderr", 276 | "output_type": "stream", 277 | "text": [ 278 | "Inserting batches in chromadb: 100%|██████████| 1/1 [00:00<00:00, 2.23it/s]\n" 279 | ] 280 | } 281 | ], 282 | "source": [ 283 | "## Add your sources to the app\n", 284 | "for video in youtube_sources:\n", 285 | " app_opensource.add(video, data_type=DataType.YOUTUBE_VIDEO)\n", 286 | "\n", 287 | "for pdf in web_sources:\n", 288 | " app_opensource.add(pdf, data_type=DataType.WEB_PAGE)" 289 | ] 290 | }, 291 | { 292 | "cell_type": "code", 293 | "execution_count": 13, 294 | "metadata": {}, 295 | "outputs": [ 296 | { 297 | "name": "stderr", 298 | "output_type": "stream", 299 | "text": [ 300 | "/Users/pdwivedi/miniconda3/envs/test/lib/python3.12/site-packages/langchain_core/_api/deprecation.py:119: LangChainDeprecationWarning: The class `Together` was deprecated in LangChain 0.0.12 and will be removed in 0.3. An updated version of the class exists in the langchain-together package and should be used instead. To use it run `pip install -U langchain-together` and import as `from langchain_together import Together`.\n", 301 | " warn_deprecated(\n" 302 | ] 303 | }, 304 | { 305 | "data": { 306 | "text/plain": [ 307 | "\" According to Meta's blog post, Llama3-8B performs significantly better than Mistral 7B in the MMLU benchmark, which typically measures general knowledge. However, it is important to note that benchmark testing has its limitations and should be considered alongside other evaluation methods. Human evaluators also marked Llama3-8B higher than Mistral 7B in certain use cases, according to Meta.\"" 308 | ] 309 | }, 310 | "execution_count": 13, 311 | "metadata": {}, 312 | "output_type": "execute_result" 313 | } 314 | ], 315 | "source": [ 316 | "app_opensource.query(\"How does Llama3-8B compare to Mistral 7B model?\")" 317 | ] 318 | }, 319 | { 320 | "cell_type": "code", 321 | "execution_count": 14, 322 | "metadata": {}, 323 | "outputs": [ 324 | { 325 | "data": { 326 | "text/plain": [ 327 | "\" Llama3, which was released by Meta, features improvements over its predecessor, Llama2. The new model has a more diverse range of responses, fewer false refusals, better reasoning abilities, and enhanced instruction understanding. Additionally, Llama3 offers both text-based and potentially multimodal responses in the future. Two sizes of Llama3 have been released: an 8B and a 70B model. These models outperform similarly sized models like Google's Gemma and Gemini, Mistral 7B, and Anthropic's Claude 3 in benchmarking tests. Meta is currently training larger versions of Llama3, which will have over 400B parameters and be capable of more complex patterns than the smaller versions. However, Meta did not release a preview of these larger models or compare them to other big models like GPT-4.\"" 328 | ] 329 | }, 330 | "execution_count": 14, 331 | "metadata": {}, 332 | "output_type": "execute_result" 333 | } 334 | ], 335 | "source": [ 336 | "app_opensource.query(\"How does Llama3 architecture differ from Llama2?\")" 337 | ] 338 | }, 339 | { 340 | "cell_type": "markdown", 341 | "metadata": {}, 342 | "source": [ 343 | "## Exercise\n", 344 | "\n", 345 | "- Create your own RAG collection on a different topic. It can be anything like your favorite movie or a book\n", 346 | "- Integrate data from a few different sources like PDFs, Webpages, Videos. If there is code involved you can integrate Github too\n", 347 | "- Set an open source model as an LLM\n", 348 | "\n", 349 | "Test how your system does. Change configs for embeddings/retriever/different LLM and observe the difference \n" 350 | ] 351 | }, 352 | { 353 | "cell_type": "code", 354 | "execution_count": null, 355 | "metadata": {}, 356 | "outputs": [], 357 | "source": [] 358 | } 359 | ], 360 | "metadata": { 361 | "kernelspec": { 362 | "display_name": "llm_agents", 363 | "language": "python", 364 | "name": "python3" 365 | }, 366 | "language_info": { 367 | "codemirror_mode": { 368 | "name": "ipython", 369 | "version": 3 370 | }, 371 | "file_extension": ".py", 372 | "mimetype": "text/x-python", 373 | "name": "python", 374 | "nbconvert_exporter": "python", 375 | "pygments_lexer": "ipython3", 376 | "version": "3.12.4" 377 | } 378 | }, 379 | "nbformat": 4, 380 | "nbformat_minor": 2 381 | } 382 | -------------------------------------------------------------------------------- /Unit1-Foundation_LLM_Agents/My_First_Agent.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "id": "kU02uIts0zQU" 7 | }, 8 | "source": [ 9 | "### Set your credentials" 10 | ] 11 | }, 12 | { 13 | "cell_type": "code", 14 | "execution_count": 1, 15 | "metadata": {}, 16 | "outputs": [], 17 | "source": [ 18 | "import os\n", 19 | "from dotenv import load_dotenv" 20 | ] 21 | }, 22 | { 23 | "cell_type": "markdown", 24 | "metadata": {}, 25 | "source": [ 26 | "### Set Credentials" 27 | ] 28 | }, 29 | { 30 | "cell_type": "code", 31 | "execution_count": 2, 32 | "metadata": {}, 33 | "outputs": [ 34 | { 35 | "data": { 36 | "text/plain": [ 37 | "True" 38 | ] 39 | }, 40 | "execution_count": 2, 41 | "metadata": {}, 42 | "output_type": "execute_result" 43 | } 44 | ], 45 | "source": [ 46 | "load_dotenv()" 47 | ] 48 | }, 49 | { 50 | "cell_type": "code", 51 | "execution_count": 3, 52 | "metadata": { 53 | "id": "8YSqbVwcXBeM" 54 | }, 55 | "outputs": [], 56 | "source": [ 57 | "\n", 58 | "os.environ[\"OPENAI_API_KEY\"] = os.getenv(\"OPENAI_API_KEY\") ## Put your OpenAI API key here\n", 59 | "os.environ[\"TAVILY_API_KEY\"] = os.getenv(\"TAVILY_API_KEY\") ## Put your Tavily Search API key here\n", 60 | "os.environ[\"LANGCHAIN_API_KEY\"] = os.getenv(\"LANGCHAIN_API_KEY\") ## Put your Langsmith API key here\n", 61 | "os.environ[\"LANGCHAIN_HUB_API_KEY\"] = os.getenv(\"LANGCHAIN_API_KEY\") ## Put your Langsmith API key here\n", 62 | "os.environ[\"LANGCHAIN_TRACING_V2\"] = 'true' ## Set this as True\n", 63 | "os.environ[\"LANGCHAIN_ENDPOINT\"] = 'https://api.smith.langchain.com/' ## Set this as: https://api.smith.langchain.com/\n", 64 | "os.environ[\"LANGCHAIN_HUB_API_URL\"] = 'https://api.hub.langchain.com' ## Set this as : https://api.hub.langchain.com\n", 65 | "os.environ[\"LANGCHAIN_PROJECT\"] = 'llm-agents-intro'" 66 | ] 67 | }, 68 | { 69 | "cell_type": "markdown", 70 | "metadata": { 71 | "id": "ai053z993oT9" 72 | }, 73 | "source": [ 74 | "## Self Ask with Search Agent\n", 75 | "\n", 76 | "This agent breaks down a complex question into a series of simpler questions and uses a search tool to look up answers to the simpler questions in order to answer the original complex question. It's suitable for scenarios where the question is complex and needs to be broken down.\n", 77 | "\n", 78 | "This is the source paper for [Self-Ask](https://ofir.io/self-ask.pdf)\n", 79 | "\n", 80 | "\n" 81 | ] 82 | }, 83 | { 84 | "cell_type": "markdown", 85 | "metadata": { 86 | "id": "LISYHlMJ5WWz" 87 | }, 88 | "source": [ 89 | "Difference between Chain of Thought and Self Ask\n", 90 | "\n", 91 | "![self-ask.png](images/selfask.png)\n" 92 | ] 93 | }, 94 | { 95 | "cell_type": "markdown", 96 | "metadata": { 97 | "id": "2obobQ5G4tzE" 98 | }, 99 | "source": [ 100 | "Let's review [self-ask with search](https://python.langchain.com/docs/modules/agents/agent_types/self_ask_with_search) as it's defined in Langchain." 101 | ] 102 | }, 103 | { 104 | "cell_type": "code", 105 | "execution_count": 4, 106 | "metadata": { 107 | "id": "QH0GCKgcAf5O" 108 | }, 109 | "outputs": [], 110 | "source": [ 111 | "from langchain_community.tools.tavily_search import TavilyAnswer\n", 112 | "from langchain.agents import AgentExecutor, create_self_ask_with_search_agent\n", 113 | "from langchain import hub" 114 | ] 115 | }, 116 | { 117 | "cell_type": "code", 118 | "execution_count": 5, 119 | "metadata": { 120 | "colab": { 121 | "base_uri": "https://localhost:8080/", 122 | "height": 137 123 | }, 124 | "id": "Z1Tv2Qqr5dVb", 125 | "outputId": "445e2367-2efc-4e6f-c20e-938b5d3293c5" 126 | }, 127 | "outputs": [ 128 | { 129 | "data": { 130 | "text/plain": [ 131 | "'Question: Who lived longer, Muhammad Ali or Alan Turing?\\nAre follow up questions needed here: Yes.\\nFollow up: How old was Muhammad Ali when he died?\\nIntermediate answer: Muhammad Ali was 74 years old when he died.\\nFollow up: How old was Alan Turing when he died?\\nIntermediate answer: Alan Turing was 41 years old when he died.\\nSo the final answer is: Muhammad Ali\\n\\nQuestion: When was the founder of craigslist born?\\nAre follow up questions needed here: Yes.\\nFollow up: Who was the founder of craigslist?\\nIntermediate answer: Craigslist was founded by Craig Newmark.\\nFollow up: When was Craig Newmark born?\\nIntermediate answer: Craig Newmark was born on December 6, 1952.\\nSo the final answer is: December 6, 1952\\n\\nQuestion: Who was the maternal grandfather of George Washington?\\nAre follow up questions needed here: Yes.\\nFollow up: Who was the mother of George Washington?\\nIntermediate answer: The mother of George Washington was Mary Ball Washington.\\nFollow up: Who was the father of Mary Ball Washington?\\nIntermediate answer: The father of Mary Ball Washington was Joseph Ball.\\nSo the final answer is: Joseph Ball\\n\\nQuestion: Are both the directors of Jaws and Casino Royale from the same country?\\nAre follow up questions needed here: Yes.\\nFollow up: Who is the director of Jaws?\\nIntermediate answer: The director of Jaws is Steven Spielberg.\\nFollow up: Where is Steven Spielberg from?\\nIntermediate answer: The United States.\\nFollow up: Who is the director of Casino Royale?\\nIntermediate answer: The director of Casino Royale is Martin Campbell.\\nFollow up: Where is Martin Campbell from?\\nIntermediate answer: New Zealand.\\nSo the final answer is: No\\n\\nQuestion: {input}\\nAre followup questions needed here:{agent_scratchpad}'" 132 | ] 133 | }, 134 | "execution_count": 5, 135 | "metadata": {}, 136 | "output_type": "execute_result" 137 | } 138 | ], 139 | "source": [ 140 | "# Get the prompt to use - you can modify this!\n", 141 | "prompt = hub.pull(\"hwchase17/self-ask-with-search\")\n", 142 | "prompt.template" 143 | ] 144 | }, 145 | { 146 | "cell_type": "markdown", 147 | "metadata": { 148 | "id": "HGuwxTytAKMc" 149 | }, 150 | "source": [ 151 | "### Set up your LLM" 152 | ] 153 | }, 154 | { 155 | "cell_type": "code", 156 | "execution_count": 6, 157 | "metadata": { 158 | "id": "pClvcR0d5vuB" 159 | }, 160 | "outputs": [], 161 | "source": [ 162 | "from langchain_openai import ChatOpenAI\n", 163 | "llm = ChatOpenAI(model='gpt-3.5-turbo',\n", 164 | " temperature=0.3)" 165 | ] 166 | }, 167 | { 168 | "cell_type": "markdown", 169 | "metadata": { 170 | "id": "S9bGYfDI__7x" 171 | }, 172 | "source": [ 173 | "### Set up your Search Tools\n", 174 | "\n", 175 | "We will initialize the tools we want to use. This is a good tool because it gives us answers (not documents)\n", 176 | "\n", 177 | "For this agent, only one tool can be used and it needs to be named “Intermediate Answer”" 178 | ] 179 | }, 180 | { 181 | "cell_type": "code", 182 | "execution_count": 7, 183 | "metadata": { 184 | "id": "brhV4mXsAA7a" 185 | }, 186 | "outputs": [], 187 | "source": [ 188 | "tools = [TavilyAnswer(max_results=1, name=\"Intermediate Answer\")]" 189 | ] 190 | }, 191 | { 192 | "cell_type": "markdown", 193 | "metadata": { 194 | "id": "2QhopqtBAapV" 195 | }, 196 | "source": [ 197 | "### Setup and run your agent" 198 | ] 199 | }, 200 | { 201 | "cell_type": "markdown", 202 | "metadata": { 203 | "id": "pXFBflQhDN62" 204 | }, 205 | "source": [ 206 | "![agent-steps](images/key-agent-steps-first-agent.png)\n" 207 | ] 208 | }, 209 | { 210 | "cell_type": "code", 211 | "execution_count": 8, 212 | "metadata": { 213 | "id": "B0fSgBQaAZky" 214 | }, 215 | "outputs": [], 216 | "source": [ 217 | "# Construct the Self Ask With Search Agent\n", 218 | "agent = create_self_ask_with_search_agent(llm, tools, prompt)\n", 219 | "\n", 220 | "# Create an agent executor by passing in the agent and tools\n", 221 | "agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True, handle_parsing_errors=True, max_iterations=8)" 222 | ] 223 | }, 224 | { 225 | "cell_type": "code", 226 | "execution_count": 9, 227 | "metadata": {}, 228 | "outputs": [ 229 | { 230 | "name": "stdout", 231 | "output_type": "stream", 232 | "text": [ 233 | "\n", 234 | "\n", 235 | "\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n", 236 | "\u001b[32;1m\u001b[1;3mCould not parse output: Yes.\u001b[0mInvalid or incomplete response\u001b[32;1m\u001b[1;3mFollow up: Who is the founder of Anthropic?\u001b[0m\u001b[36;1m\u001b[1;3mThe founder of Anthropic is Dario Amodei, who is also the co-founder and CEO of the company. He is an Italian-American artificial intelligence researcher and entrepreneur.\u001b[0m\u001b[32;1m\u001b[1;3mCould not parse output: Follow up: When was Dario Amodei born?\n", 237 | "\u001b[0mInvalid or incomplete response\u001b[32;1m\u001b[1;3mCould not parse output: Final answer: Invalid or incomplete response\u001b[0mInvalid or incomplete response\u001b[32;1m\u001b[1;3mCould not parse output: Final answer: Invalid or incomplete response\u001b[0mInvalid or incomplete response\u001b[32;1m\u001b[1;3mCould not parse output: Final answer: Invalid or incomplete response\u001b[0mInvalid or incomplete response\u001b[32;1m\u001b[1;3mCould not parse output: Could not parse output: Final answer: Invalid or incomplete response\u001b[0mInvalid or incomplete response\u001b[32;1m\u001b[1;3mCould not parse output: Could not parse output: Final answer: Invalid or incomplete response\u001b[0mInvalid or incomplete response\u001b[32;1m\u001b[1;3m\u001b[0m\n", 238 | "\n", 239 | "\u001b[1m> Finished chain.\u001b[0m\n" 240 | ] 241 | } 242 | ], 243 | "source": [ 244 | "response = agent_executor.invoke({'input': 'Which year was the founder of Anthropic born in'})" 245 | ] 246 | }, 247 | { 248 | "cell_type": "code", 249 | "execution_count": 10, 250 | "metadata": { 251 | "colab": { 252 | "base_uri": "https://localhost:8080/" 253 | }, 254 | "id": "24xNM35KEGyT", 255 | "outputId": "b54b4094-90eb-4733-bb71-72d4a48889ca" 256 | }, 257 | "outputs": [ 258 | { 259 | "name": "stdout", 260 | "output_type": "stream", 261 | "text": [ 262 | "\n", 263 | "\n", 264 | "\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n", 265 | "\u001b[32;1m\u001b[1;3mYes. \n", 266 | "Follow up: When was superconductivity discovered?\u001b[0m\u001b[36;1m\u001b[1;3mSuperconductivity was discovered in various materials at different temperatures over the years. Lead was discovered to exhibit superconductivity at 7 K in 1913, niobium at 10 K in the 1930s, and niobium nitride at 16 K in 1941. The discovery of high-temperature superconductors, such as the lanthanum-based cuprate perovskite material with a transition temperature of 35 K, was made in 1986.\u001b[0m\u001b[32;1m\u001b[1;3mFollow up: Who was the president of the US in 1986?\u001b[0m\u001b[36;1m\u001b[1;3mRonald Reagan was the President of the United States in 1986.\u001b[0m\u001b[32;1m\u001b[1;3mSo the final answer is: Ronald Reagan\u001b[0m\n", 267 | "\n", 268 | "\u001b[1m> Finished chain.\u001b[0m\n" 269 | ] 270 | }, 271 | { 272 | "data": { 273 | "text/plain": [ 274 | "{'input': 'Who was the president of US when superconductivity were discovered?',\n", 275 | " 'output': 'Ronald Reagan'}" 276 | ] 277 | }, 278 | "execution_count": 10, 279 | "metadata": {}, 280 | "output_type": "execute_result" 281 | } 282 | ], 283 | "source": [ 284 | "agent_executor.invoke(\n", 285 | " {\"input\": \"Who was the president of US when superconductivity were discovered?\"}\n", 286 | ")" 287 | ] 288 | }, 289 | { 290 | "cell_type": "code", 291 | "execution_count": 11, 292 | "metadata": { 293 | "colab": { 294 | "base_uri": "https://localhost:8080/" 295 | }, 296 | "id": "p9j5NgtHAtmg", 297 | "outputId": "2cebcf88-636c-49f5-a0c7-e1061e13bf59" 298 | }, 299 | "outputs": [ 300 | { 301 | "name": "stdout", 302 | "output_type": "stream", 303 | "text": [ 304 | "\n", 305 | "\n", 306 | "\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n", 307 | "\u001b[32;1m\u001b[1;3mCould not parse output: No.\u001b[0mInvalid or incomplete response\u001b[32;1m\u001b[1;3mFollow up: Which is the highest mountain in Asia?\u001b[0m\u001b[36;1m\u001b[1;3mMount Everest is the highest mountain in Asia, reaching an elevation of 29,032 feet (8,849 meters).\u001b[0m\u001b[32;1m\u001b[1;3mSo the final answer is: Mount Everest\u001b[0m\n", 308 | "\n", 309 | "\u001b[1m> Finished chain.\u001b[0m\n" 310 | ] 311 | }, 312 | { 313 | "data": { 314 | "text/plain": [ 315 | "{'input': 'I want to hike the highest mountain in Asia. How long does will it take me to hike it?',\n", 316 | " 'output': 'Mount Everest'}" 317 | ] 318 | }, 319 | "execution_count": 11, 320 | "metadata": {}, 321 | "output_type": "execute_result" 322 | } 323 | ], 324 | "source": [ 325 | "agent_executor.invoke(\n", 326 | " {\"input\": \"I want to hike the highest mountain in Asia. How long does will it take me to hike it?\"}\n", 327 | ")" 328 | ] 329 | }, 330 | { 331 | "cell_type": "markdown", 332 | "metadata": {}, 333 | "source": [ 334 | "### Exercise\n", 335 | "\n", 336 | "- Test out the self-ask-with-search agent on your own questions\n", 337 | "- Where does it work, where does it fail?\n", 338 | "- What are the limitations of this approach?" 339 | ] 340 | }, 341 | { 342 | "cell_type": "markdown", 343 | "metadata": {}, 344 | "source": [] 345 | } 346 | ], 347 | "metadata": { 348 | "colab": { 349 | "provenance": [] 350 | }, 351 | "kernelspec": { 352 | "display_name": "Python 3", 353 | "name": "python3" 354 | }, 355 | "language_info": { 356 | "codemirror_mode": { 357 | "name": "ipython", 358 | "version": 3 359 | }, 360 | "file_extension": ".py", 361 | "mimetype": "text/x-python", 362 | "name": "python", 363 | "nbconvert_exporter": "python", 364 | "pygments_lexer": "ipython3", 365 | "version": "3.12.4" 366 | } 367 | }, 368 | "nbformat": 4, 369 | "nbformat_minor": 0 370 | } 371 | -------------------------------------------------------------------------------- /Unit4-Planning/Planning_with_task_decomposition.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "### Planning with task decomposition\n", 8 | "\n", 9 | "In this notebook, we will look at several methods of planning that allow you to decompose a complex task into smaller pieces.\n", 10 | "\n", 11 | "Methods that we will cover include\n", 12 | "\n", 13 | "1. Chain of Thought prompting (COT)\n", 14 | "2. Tree of Thought prompting (TOT)\n", 15 | "3. Plan and Solve" 16 | ] 17 | }, 18 | { 19 | "cell_type": "code", 20 | "execution_count": 1, 21 | "metadata": {}, 22 | "outputs": [ 23 | { 24 | "data": { 25 | "text/plain": [ 26 | "True" 27 | ] 28 | }, 29 | "execution_count": 1, 30 | "metadata": {}, 31 | "output_type": "execute_result" 32 | } 33 | ], 34 | "source": [ 35 | "import os\n", 36 | "from dotenv import load_dotenv\n", 37 | "load_dotenv()" 38 | ] 39 | }, 40 | { 41 | "cell_type": "code", 42 | "execution_count": 2, 43 | "metadata": {}, 44 | "outputs": [], 45 | "source": [ 46 | "os.environ[\"OPENAI_API_KEY\"] = os.getenv(\"OPENAI_API_KEY\") ## Put your OpenAI API key here\n", 47 | "os.environ[\"TAVILY_API_KEY\"] = os.getenv(\"TAVILY_API_KEY\") ## Put your Tavily Search API key here\n", 48 | "os.environ[\"LANGCHAIN_API_KEY\"] = os.getenv(\"LANGCHAIN_API_KEY\") ## Put your Langsmith API key here\n", 49 | "os.environ[\"LANGCHAIN_HUB_API_KEY\"] = os.getenv(\"LANGCHAIN_API_KEY\") ## Put your Langsmith API key here\n", 50 | "os.environ[\"LANGCHAIN_TRACING_V2\"] = 'true' ## Set this as True\n", 51 | "os.environ[\"LANGCHAIN_ENDPOINT\"] = 'https://api.smith.langchain.com/' ## Set this as: https://api.smith.langchain.com/\n", 52 | "os.environ[\"LANGCHAIN_HUB_API_URL\"] = 'https://api.hub.langchain.com' ## Set this as : https://api.hub.langchain.com\n", 53 | "os.environ[\"LANGCHAIN_PROJECT\"] = 'llm-agents-planning'" 54 | ] 55 | }, 56 | { 57 | "cell_type": "markdown", 58 | "metadata": {}, 59 | "source": [ 60 | "## Chain of Thought Prompting\n", 61 | "\n", 62 | "Learn More: https://deepgram.com/learn/chain-of-thought-prompting-guide\n", 63 | "\n", 64 | "![COT](images/cot.jpeg)" 65 | ] 66 | }, 67 | { 68 | "cell_type": "code", 69 | "execution_count": 3, 70 | "metadata": {}, 71 | "outputs": [], 72 | "source": [ 73 | "open_source_model = 'mistralai/Mistral-7B-Instruct-v0.3'" 74 | ] 75 | }, 76 | { 77 | "cell_type": "code", 78 | "execution_count": 4, 79 | "metadata": {}, 80 | "outputs": [], 81 | "source": [ 82 | "## Add Together API Key\n", 83 | "os.environ[\"TOGETHER_API_KEY\"] = os.getenv(\"TOGETHER_API_KEY\")\n", 84 | "### import together\n", 85 | "from together_llm import TogetherLLM\n", 86 | "## Langchain imports\n", 87 | "from langchain_openai import OpenAI\n" 88 | ] 89 | }, 90 | { 91 | "cell_type": "code", 92 | "execution_count": 5, 93 | "metadata": {}, 94 | "outputs": [], 95 | "source": [ 96 | "llm = TogetherLLM(model=open_source_model, temperature=0.3, max_tokens=800)" 97 | ] 98 | }, 99 | { 100 | "cell_type": "code", 101 | "execution_count": 6, 102 | "metadata": {}, 103 | "outputs": [ 104 | { 105 | "name": "stdout", 106 | "output_type": "stream", 107 | "text": [ 108 | " The two trains will meet 640 km away from City A.\n", 109 | "\n", 110 | "Here are the steps to solve the problem:\n", 111 | "\n", 112 | "1. Let x be the distance the faster train travels in the two hours before the slower train starts moving.\n", 113 | "2. In those two hours, the slower train remains stationary, so the combined distance the faster train travels is equal to the distance between the cities minus the distance the slower train needs to travel to meet the faster train.\n", 114 | "3. So, we have the equation: 120t + 120(2) = 960 - 80(t - 2), where t is the time in hours it takes for the trains to meet after both start moving.\n", 115 | "4. Simplifying the equation, we get: 120t + 240 = 960 - 80t + 160.\n", 116 | "5. Combining like terms, we get: 240 + 80t = 960 - 120t.\n", 117 | "6. Solving for t, we get: t = (960 - 240) / (120 + 80) = 720 / 200 = 3.6 hours.\n", 118 | "7. Now that we know the time it takes for the trains to meet, we can find the distance the faster train travels before the slower train starts moving: 120 * 3.6 = 432 km.\n", 119 | "8. Since the trains meet after traveling an additional 640 - 432 = 208 km together, they meet 208 km away from City A.\n" 120 | ] 121 | } 122 | ], 123 | "source": [ 124 | "from langchain.prompts import PromptTemplate\n", 125 | "\n", 126 | "\n", 127 | "# Template for the problem and the desired format of the answer\n", 128 | "template = \"\"\"Solve the following problem step by step. First give the answer and then the steps.\n", 129 | "\n", 130 | "{problem}\n", 131 | "\"\"\"\n", 132 | "prompt = PromptTemplate(template=template, input_variables=[\"problem\"])\n", 133 | "\n", 134 | "cot_chain = prompt | llm\n", 135 | "math_problem = '''\n", 136 | "A train leaves City A traveling towards City B at a speed of 120 km/h.\n", 137 | " Two hours later, a slower train leaves City B traveling towards City A at a speed of 80 km/h.\n", 138 | " If the distance between the two cities is 960 km, how far from City A will the two trains meet?\n", 139 | "'''\n", 140 | "\n", 141 | "# Use the chain to generate the result\n", 142 | "result = cot_chain.invoke({\"problem\": math_problem})\n", 143 | "print(result)\n" 144 | ] 145 | }, 146 | { 147 | "cell_type": "markdown", 148 | "metadata": {}, 149 | "source": [ 150 | "### Chain of Thought with Self Consistency\n", 151 | "- Run the problem several times and take the most common answer\n", 152 | "\n", 153 | "![COT-SC](images/cot-sc.jpg)" 154 | ] 155 | }, 156 | { 157 | "cell_type": "markdown", 158 | "metadata": {}, 159 | "source": [ 160 | "### COT with Examples:\n", 161 | "\n", 162 | "cot_prompt = '''Solve the following problem step-by-step, clearly showing your calculations and reasoning.\n", 163 | "Problem: {math_problem}\n", 164 | "\n", 165 | "Example of a math problem solved step by step\n", 166 | "\n", 167 | "\"Solve the following problem step-by-step, clearly showing your calculations and reasoning.\n", 168 | "Problem: A group of friends went to a restaurant. The total bill was $120, and they wanted to leave a 15% tip. How much should each person pay if there were 5 friends splitting the bill and tip evenly?\"\n", 169 | "\n", 170 | "Response:\n", 171 | "\n", 172 | "Step 1: Calculate the tip amount.\n", 173 | "\n", 174 | "15% of the $120 bill is (15/100) * $120 = $18.\n", 175 | "Step 2: Calculate the total cost including the tip.\n", 176 | "\n", 177 | "The bill plus tip is $120 + $18 = $138.\n", 178 | "Step 3: Divide the total cost by the number of friends.\n", 179 | "\n", 180 | "Each friend should pay $138 / 5 friends = $27.60.\n", 181 | "Therefore, each of the 5 friends should pay $27.60 to cover the bill and tip evenly..'''" 182 | ] 183 | }, 184 | { 185 | "cell_type": "markdown", 186 | "metadata": {}, 187 | "source": [ 188 | "## Plan and Solve\n", 189 | "\n", 190 | "Paper: https://arxiv.org/pdf/2305.04091v3\n", 191 | "\n", 192 | "![Plan_Solve](images/plan-solve.png)" 193 | ] 194 | }, 195 | { 196 | "cell_type": "code", 197 | "execution_count": 7, 198 | "metadata": {}, 199 | "outputs": [ 200 | { 201 | "name": "stdout", 202 | "output_type": "stream", 203 | "text": [ 204 | " Step 1: Understand the problem\n", 205 | "- We have two trains traveling towards each other.\n", 206 | "- Train A leaves City A at a speed of 120 km/h.\n", 207 | "- Train B leaves City B (which is 960 km away from City A) at a speed of 80 km/h, 2 hours after Train A has left.\n", 208 | "- We need to find out how far from City A the two trains will meet.\n", 209 | "\n", 210 | "Step 2: Convert the time difference\n", 211 | "- Since Train B leaves 2 hours after Train A, we can say that Train A has traveled for 2 hours before Train B starts moving.\n", 212 | "\n", 213 | "Step 3: Calculate the distance covered by Train A in 2 hours\n", 214 | "- Distance = Speed * Time\n", 215 | "- Distance covered by Train A = 120 km/h * 2 hours = 240 km\n", 216 | "\n", 217 | "Step 4: Calculate the combined speed of both trains\n", 218 | "- Combined speed = Speed of Train A + Speed of Train B = 120 km/h + 80 km/h = 200 km/h\n", 219 | "\n", 220 | "Step 5: Calculate the remaining distance between the trains\n", 221 | "- Remaining distance = Total distance - Distance covered by Train A = 960 km - 240 km = 720 km\n", 222 | "\n", 223 | "Step 6: Calculate the time it takes for the trains to meet\n", 224 | "- Time = Remaining distance / Combined speed = 720 km / 200 km/h = 3.6 hours\n", 225 | "\n", 226 | "Step 7: Calculate the distance traveled by both trains together in the remaining 3.6 hours\n", 227 | "- Distance = Speed * Time = 200 km/h * 3.6 hours = 720 km\n", 228 | "\n", 229 | "Step 8: Add the distance covered by Train A and the distance traveled by both trains together to find the final meeting point\n", 230 | "- Final meeting point = Distance covered by Train A + Distance traveled by both trains together = 240 km + 720 km = 960 km\n", 231 | "\n", 232 | "However, this result is the total distance between the cities, not the distance from City A to the meeting point. Since the trains meet 3.6 hours after Train A leaves, they meet after traveling for 3.6 hours from City A.\n", 233 | "\n", 234 | "Step 9: Calculate the distance traveled by Train A in the 3.6 hours\n", 235 | "- Distance = Speed * Time = 120 km/h * 3.6 hours = 432 km\n", 236 | "\n", 237 | "Step 10: Subtract the distance traveled by Train A from the total distance to find the distance from City A to the meeting point\n", 238 | "- Distance from City A to the meeting point = Total distance - Distance traveled by Train A = 960 km - 432 km = 528 km\n", 239 | "\n", 240 | "So, the two trains will meet 528 km from City A.\n" 241 | ] 242 | } 243 | ], 244 | "source": [ 245 | "# Template for the problem and the desired format of the answer\n", 246 | "template = \"\"\"Solve the following problem by first devising a step by step plan for solving the problem. Then carry out the plan step by step\n", 247 | "\n", 248 | "{problem}\n", 249 | "\"\"\"\n", 250 | "prompt = PromptTemplate(template=template, input_variables=[\"problem\"])\n", 251 | "\n", 252 | "plan_solve_chain = prompt | llm\n", 253 | "math_problem = '''\n", 254 | "A train leaves City A traveling towards City B at a speed of 120 km/h.\n", 255 | " Two hours later, a slower train leaves City B traveling towards City A at a speed of 80 km/h.\n", 256 | " If the distance between the two cities is 960 km, how far from City A will the two trains meet?\n", 257 | "'''\n", 258 | "\n", 259 | "# Use the chain to generate the result\n", 260 | "result = plan_solve_chain.invoke({\"problem\": math_problem})\n", 261 | "print(result)" 262 | ] 263 | }, 264 | { 265 | "cell_type": "markdown", 266 | "metadata": {}, 267 | "source": [ 268 | "## Tree of Thought\n", 269 | "\n", 270 | "Read more about it here: https://www.promptingguide.ai/techniques/tot\n", 271 | "\n", 272 | "![TOT](images/tot.png)" 273 | ] 274 | }, 275 | { 276 | "cell_type": "code", 277 | "execution_count": 8, 278 | "metadata": {}, 279 | "outputs": [ 280 | { 281 | "name": "stdout", 282 | "output_type": "stream", 283 | "text": [ 284 | "Step1 :\n", 285 | " \n", 286 | "I have a problem related to {input}. Could you brainstorm three distinct solutions? Please consider a variety of factors such as {perfect_factors}\n", 287 | "A:\n", 288 | "Step 2:\n", 289 | "\n", 290 | "For each of the three proposed solutions, evaluate their potential. Consider their pros and cons, initial effort needed, implementation difficulty, potential challenges, and the expected outcomes. Assign a probability of success and a confidence level to each option based on these factors\n", 291 | "\n", 292 | "{solutions}\n", 293 | "\n", 294 | "A:\n", 295 | "Step 3:\n", 296 | "\n", 297 | "For each solution, deepen the thought process. Generate potential scenarios, strategies for implementation, any necessary partnerships or resources, and how potential obstacles might be overcome. Also, consider any potential unexpected outcomes and how they might be handled.\n", 298 | "\n", 299 | "{review}\n", 300 | "\n", 301 | "A:\n", 302 | "Step 4:\n", 303 | "\n", 304 | "Based on the evaluations and scenarios, rank the solutions in order of promise. Provide a justification for each ranking and offer any final thoughts or considerations for each solution\n", 305 | "{deepen_thought_process}\n", 306 | "\n", 307 | "A:\n" 308 | ] 309 | } 310 | ], 311 | "source": [ 312 | "# set the LANGCHAIN_API_KEY environment variable (create key in settings)\n", 313 | "from langchain import hub\n", 314 | "\n", 315 | "tot_step1 = hub.pull(\"rachnogstyle/nlw_jan24_cot_step1\")\n", 316 | "tot_step2 = hub.pull(\"rachnogstyle/nlw_jan24_cot_step2\")\n", 317 | "tot_step3 = hub.pull(\"rachnogstyle/nlw_jan24_cot_step3\")\n", 318 | "tot_step4 = hub.pull(\"rachnogstyle/nlw_jan24_cot_step4\")\n", 319 | "\n", 320 | "print(tot_step1.template)\n", 321 | "print(tot_step2.template)\n", 322 | "print(tot_step3.template)\n", 323 | "print(tot_step4.template)" 324 | ] 325 | }, 326 | { 327 | "cell_type": "code", 328 | "execution_count": 11, 329 | "metadata": {}, 330 | "outputs": [], 331 | "source": [ 332 | "from langchain_openai import ChatOpenAI\n", 333 | "from langchain.schema.runnable import RunnablePassthrough\n", 334 | "\n", 335 | "\n", 336 | "llm_openai = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0.2)" 337 | ] 338 | }, 339 | { 340 | "cell_type": "code", 341 | "execution_count": 15, 342 | "metadata": {}, 343 | "outputs": [], 344 | "source": [ 345 | "\n", 346 | "prompt1 = PromptTemplate(template=tot_step1.template, input_variables=[\"input\", \"perfect_factors\"])\n", 347 | "prompt2 = PromptTemplate(template=tot_step2.template, input_variables=[\"solutions\"])\n", 348 | "prompt3 = PromptTemplate(template=tot_step3.template, input_variables=[\"review\"])\n", 349 | "prompt4 = PromptTemplate(template=tot_step4.template, input_variables=[\"deepen_thought_process\"])\n", 350 | "\n", 351 | "tot_complete_chain = (\n", 352 | " prompt1\n", 353 | " | llm_openai\n", 354 | " | {'solutions': RunnablePassthrough()}\n", 355 | " | prompt2\n", 356 | " | llm_openai\n", 357 | " | {'review': RunnablePassthrough()}\n", 358 | " | prompt3\n", 359 | " | llm_openai\n", 360 | " | {'deepen_thought_process': RunnablePassthrough()}\n", 361 | " | prompt4\n", 362 | " | llm_openai\n", 363 | " | {'ranked_solution': RunnablePassthrough()}\n", 364 | ")" 365 | ] 366 | }, 367 | { 368 | "cell_type": "code", 369 | "execution_count": 16, 370 | "metadata": {}, 371 | "outputs": [ 372 | { 373 | "name": "stdout", 374 | "output_type": "stream", 375 | "text": [ 376 | "{'ranked_solution': AIMessage(content='Ranking of Solutions in Order of Promise:\\n\\n1. Option 3: Establishing partnerships with other space agencies or private companies\\n2. Option 1: Developing sustainable habitats on Mars\\n3. Option 2: Creating a reliable transportation system between Earth and Mars\\n\\nJustification for Rankings:\\n\\nOption 3 is ranked the highest as it offers the most promising path forward for Mars colonization by leveraging the expertise and resources of multiple organizations. Collaboration with other space agencies and private companies can increase the likelihood of success and maximize shared knowledge and support.\\n\\nOption 1 is ranked second as developing sustainable habitats on Mars is crucial for long-term colonization. While it presents challenges such as construction and maintenance, the use of advanced technologies and partnerships with experts can overcome these obstacles.\\n\\nOption 2 is ranked last as creating a reliable transportation system, while important, may not be as immediately critical as establishing sustainable habitats or forming partnerships. However, advancements in propulsion technology and collaboration with aerospace companies can still lead to significant progress in Mars colonization.\\n\\nFinal Thoughts:\\n\\nEach solution has its own set of challenges and opportunities, and a combination of all three options may be necessary for a successful Mars colonization mission. It is important to consider the interplay between sustainable living, transportation, and collaboration in order to create a comprehensive and effective strategy for colonizing Mars.', response_metadata={'token_usage': {'completion_tokens': 263, 'prompt_tokens': 563, 'total_tokens': 826}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-c4b0a032-3f38-43e5-8c82-d2b18a4ce224-0')}\n" 377 | ] 378 | } 379 | ], 380 | "source": [ 381 | "results = tot_complete_chain.invoke( {\n", 382 | " \"input\": \"human colonization of Mars\",\n", 383 | " \"perfect_factors\": \"The distance between Earth and Mars is very large, making regular resupply difficult\"\n", 384 | " })\n", 385 | "print(results)" 386 | ] 387 | }, 388 | { 389 | "cell_type": "code", 390 | "execution_count": 18, 391 | "metadata": {}, 392 | "outputs": [ 393 | { 394 | "name": "stdout", 395 | "output_type": "stream", 396 | "text": [ 397 | "Ranking of Solutions in Order of Promise:\n", 398 | "\n", 399 | "1. Option 3: Establishing partnerships with other space agencies or private companies\n", 400 | "2. Option 1: Developing sustainable habitats on Mars\n", 401 | "3. Option 2: Creating a reliable transportation system between Earth and Mars\n", 402 | "\n", 403 | "Justification for Rankings:\n", 404 | "\n", 405 | "Option 3 is ranked the highest as it offers the most promising path forward for Mars colonization by leveraging the expertise and resources of multiple organizations. Collaboration with other space agencies and private companies can increase the likelihood of success and maximize shared knowledge and support.\n", 406 | "\n", 407 | "Option 1 is ranked second as developing sustainable habitats on Mars is crucial for long-term colonization. While it presents challenges such as construction and maintenance, the use of advanced technologies and partnerships with experts can overcome these obstacles.\n", 408 | "\n", 409 | "Option 2 is ranked last as creating a reliable transportation system, while important, may not be as immediately critical as establishing sustainable habitats or forming partnerships. However, advancements in propulsion technology and collaboration with aerospace companies can still lead to significant progress in Mars colonization.\n", 410 | "\n", 411 | "Final Thoughts:\n", 412 | "\n", 413 | "Each solution has its own set of challenges and opportunities, and a combination of all three options may be necessary for a successful Mars colonization mission. It is important to consider the interplay between sustainable living, transportation, and collaboration in order to create a comprehensive and effective strategy for colonizing Mars.\n" 414 | ] 415 | } 416 | ], 417 | "source": [ 418 | "print(results['ranked_solution'].content)" 419 | ] 420 | } 421 | ], 422 | "metadata": { 423 | "kernelspec": { 424 | "display_name": "langchain", 425 | "language": "python", 426 | "name": "python3" 427 | }, 428 | "language_info": { 429 | "codemirror_mode": { 430 | "name": "ipython", 431 | "version": 3 432 | }, 433 | "file_extension": ".py", 434 | "mimetype": "text/x-python", 435 | "name": "python", 436 | "nbconvert_exporter": "python", 437 | "pygments_lexer": "ipython3", 438 | "version": "3.12.4" 439 | } 440 | }, 441 | "nbformat": 4, 442 | "nbformat_minor": 2 443 | } 444 | -------------------------------------------------------------------------------- /Unit4-Planning/Skeleton_of_Thought_Generation.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "id": "U7oT5LEa-c54" 7 | }, 8 | "source": [ 9 | "## Skeleton of Thought method of generation\n", 10 | "\n", 11 | "LLM decoding is normally sequential. In this method of decoding, the LLM first generates a skeleton of the response. Then it elaborates on each point in the skeleton concurrently.\n", 12 | "\n", 13 | "This method of decoding resembles how humans approach a problem - First generate the outline of a solution and then do parallel processing\n", 14 | "\n", 15 | "You can read more in my blog here: https://generativeai.pub/skeleton-of-thought-processing-0980d9b75f52\n", 16 | "\n", 17 | "![skeleton-of-thought-process.png](images/sot.jpg)" 18 | ] 19 | }, 20 | { 21 | "cell_type": "markdown", 22 | "metadata": { 23 | "id": "kw3l4YdP_4ga" 24 | }, 25 | "source": [ 26 | "## Skeleton of thought step by step" 27 | ] 28 | }, 29 | { 30 | "cell_type": "code", 31 | "execution_count": 1, 32 | "metadata": {}, 33 | "outputs": [ 34 | { 35 | "data": { 36 | "text/plain": [ 37 | "True" 38 | ] 39 | }, 40 | "execution_count": 1, 41 | "metadata": {}, 42 | "output_type": "execute_result" 43 | } 44 | ], 45 | "source": [ 46 | "import os\n", 47 | "import json\n", 48 | "from dotenv import load_dotenv\n", 49 | "load_dotenv()" 50 | ] 51 | }, 52 | { 53 | "cell_type": "code", 54 | "execution_count": 2, 55 | "metadata": { 56 | "id": "zFACeK_v_els" 57 | }, 58 | "outputs": [], 59 | "source": [ 60 | "from openai import OpenAI\n", 61 | "os.environ[\"OPENAI_API_KEY\"] = os.getenv(\"OPENAI_API_KEY\") ## Put your OpenAI API key here" 62 | ] 63 | }, 64 | { 65 | "cell_type": "code", 66 | "execution_count": 3, 67 | "metadata": { 68 | "id": "nCRsDYhJACjY" 69 | }, 70 | "outputs": [], 71 | "source": [ 72 | "class Gpt4Turbo:\n", 73 | " def __init__(self):\n", 74 | " self.MODEL = 'gpt-3.5-turbo'\n", 75 | " self.TOKEN_LIMIT=4000\n", 76 | " self.client = OpenAI()\n", 77 | "\n", 78 | " def gptCall_json(self, temperature, messages: list):\n", 79 | " try:\n", 80 | " response = self.client.chat.completions.create(model=self.MODEL,\n", 81 | " messages=messages,\n", 82 | " temperature=temperature,\n", 83 | " max_tokens=self.TOKEN_LIMIT,\n", 84 | " stream=False,\n", 85 | " response_format={\"type\": \"json_object\"}) ## Enforce output format\n", 86 | "\n", 87 | " output = response.choices[0].message.content\n", 88 | " return output\n", 89 | "\n", 90 | " except Exception as e:\n", 91 | " print(e)\n", 92 | " return \"\"" 93 | ] 94 | }, 95 | { 96 | "cell_type": "markdown", 97 | "metadata": { 98 | "id": "Pdj0dnzqAGxu" 99 | }, 100 | "source": [ 101 | "## Prompt to generate the skeleon outline" 102 | ] 103 | }, 104 | { 105 | "cell_type": "code", 106 | "execution_count": 4, 107 | "metadata": { 108 | "id": "L5LCRN5ZAGMG" 109 | }, 110 | "outputs": [], 111 | "source": [ 112 | "question = \"How can I improve my time management skills?\"\n", 113 | "outline_prompt = f'''\n", 114 | "You're an organizer responsible for only giving the skeleton (not the full content) for answering the question.\n", 115 | "Provide the skeleton as a JSON to answer the question. Instead of writing a full sentence, each skeleton point should\n", 116 | "be very short with only 2~5 words. Generally, the skeleton should have 3~10 points. The skeleton is an outline that would be expanded later.\n", 117 | "Don't elaborate on the point in the skeleton.\n", 118 | "Example:\n", 119 | "\\n\\nQuestion:\\nWhat are the typical types of Chinese dishes?: \\n Response: {{\"answer\" : [\"Dumplings\" , \"Noodles\" , \"Dim Sum\" , \"Hot Pot\" , \"Wonton\", \"Ma Po Tofu\", \"Char Siu\", \" Fried Rice\"]}}.\n", 120 | "\\n\\nQuestion:\\nWhat are some practical tips for individuals to reduce their carbon emissions?\\n Response: {{ \"answer\" :[\"Energy Conservation\", \"Efficient transportation\", \"Home Energy Efficiency\", \"Reduce Water Consumption\", \"Sustainable Diet\", \"Sustainable Travel\"]}}\n", 121 | "\n", 122 | " \\n\\nNow, please provide the skeleton for the following question.\\n{question}\\n Response: {{\"answer\": [...]}}\n", 123 | "'''" 124 | ] 125 | }, 126 | { 127 | "cell_type": "code", 128 | "execution_count": 5, 129 | "metadata": { 130 | "colab": { 131 | "base_uri": "https://localhost:8080/" 132 | }, 133 | "id": "8Ru4OQ7DAO97", 134 | "outputId": "d201eb1d-51dc-42a8-d1df-62c60ba54657" 135 | }, 136 | "outputs": [ 137 | { 138 | "name": "stdout", 139 | "output_type": "stream", 140 | "text": [ 141 | "['Set goals', 'Prioritize tasks', 'Create schedule', 'Use tools', 'Avoid multitasking', 'Take breaks', 'Delegate tasks', 'Learn to say no']\n" 142 | ] 143 | } 144 | ], 145 | "source": [ 146 | "TEMPERATURE=0.5\n", 147 | "message=[]\n", 148 | "\n", 149 | "message.append({\"role\": \"system\", \"content\": \"You are a helpful assistant. You respond in JSON format.\"})\n", 150 | "message.append({\"role\": \"user\", \"content\": outline_prompt})\n", 151 | "\n", 152 | "\n", 153 | "final_output = []\n", 154 | "gpt4_turbo = Gpt4Turbo()\n", 155 | "result = gpt4_turbo.gptCall_json(TEMPERATURE,message)\n", 156 | "result = json.loads(result)['answer']\n", 157 | "print(result)" 158 | ] 159 | }, 160 | { 161 | "cell_type": "markdown", 162 | "metadata": { 163 | "id": "xV-TNK0uAP2u" 164 | }, 165 | "source": [ 166 | "Nice! So we got the model to five us a skeleton of the output" 167 | ] 168 | }, 169 | { 170 | "cell_type": "markdown", 171 | "metadata": { 172 | "id": "JW2os7hAAXew" 173 | }, 174 | "source": [ 175 | "### Prompt to elaborate on a point" 176 | ] 177 | }, 178 | { 179 | "cell_type": "code", 180 | "execution_count": 6, 181 | "metadata": { 182 | "id": "orO6bC4OAPM_" 183 | }, 184 | "outputs": [], 185 | "source": [ 186 | "point = result[0]\n", 187 | "point_prompt = f'''\n", 188 | "You help elaborate on the point user wants. Your input is a question and one possible answer from the question, also called . You will elaborate on the and give a 2-3 sentence response\n", 189 | "on how the helps answer the question. Start your response by mentioning the and then colon like point: and then your response\n", 190 | "Your response will be in JSON format. Example: {{\"answer\": {point}: your response\"}}\n", 191 | "\\n\\nNow, please elaborate on the following point. Question: {question}\\n : {point} \\n Response: {{\"answer\": [...]}}\n", 192 | "'''" 193 | ] 194 | }, 195 | { 196 | "cell_type": "code", 197 | "execution_count": 7, 198 | "metadata": { 199 | "colab": { 200 | "base_uri": "https://localhost:8080/" 201 | }, 202 | "id": "9VavyRWFAeP3", 203 | "outputId": "ce542c10-ee44-4c60-f9e7-3c11db8baf8a" 204 | }, 205 | "outputs": [ 206 | { 207 | "name": "stdout", 208 | "output_type": "stream", 209 | "text": [ 210 | "{\"answer\": \"Set goals: Setting clear and achievable goals is essential for improving time management skills. By establishing specific objectives, you can prioritize tasks, allocate time effectively, and track your progress. This helps you stay focused, organized, and motivated to manage your time more efficiently.\"}\n" 211 | ] 212 | } 213 | ], 214 | "source": [ 215 | "TEMPERATURE=0.3\n", 216 | "message=[]\n", 217 | "\n", 218 | "message.append({\"role\": \"system\", \"content\": \"You are a helpful assistant. You respond in JSON format.\"})\n", 219 | "message.append({\"role\": \"user\", \"content\": point_prompt})\n", 220 | "\n", 221 | "\n", 222 | "gpt4_turbo = Gpt4Turbo()\n", 223 | "result = gpt4_turbo.gptCall_json(TEMPERATURE,message)\n", 224 | "print(result)" 225 | ] 226 | }, 227 | { 228 | "cell_type": "markdown", 229 | "metadata": { 230 | "id": "XgBTtu76Ag0K" 231 | }, 232 | "source": [ 233 | "## Putting both together including concurrent calls" 234 | ] 235 | }, 236 | { 237 | "cell_type": "code", 238 | "execution_count": 8, 239 | "metadata": { 240 | "id": "er3HWOv8Aehk" 241 | }, 242 | "outputs": [], 243 | "source": [ 244 | "import concurrent.futures\n", 245 | "import json" 246 | ] 247 | }, 248 | { 249 | "cell_type": "code", 250 | "execution_count": 9, 251 | "metadata": { 252 | "id": "Upccbz6rAopt" 253 | }, 254 | "outputs": [], 255 | "source": [ 256 | "class Gpt4Turbo:\n", 257 | " def __init__(self):\n", 258 | " self.MODEL = 'gpt-3.5-turbo-1106'\n", 259 | " self.TOKEN_LIMIT=4000\n", 260 | " self.client = OpenAI()\n", 261 | " self.temperature =0.3\n", 262 | " self.streaming = False\n", 263 | "\n", 264 | " def gptCall_json(self, temperature, messages: list):\n", 265 | " try:\n", 266 | " response = self.client.chat.completions.create(model=self.MODEL,\n", 267 | " messages=messages,\n", 268 | " temperature=temperature,\n", 269 | " max_tokens=self.TOKEN_LIMIT,\n", 270 | " stream=False,\n", 271 | " response_format={\"type\": \"json_object\"}) ## Enforce output format\n", 272 | "\n", 273 | "\n", 274 | " return response.choices[0].message.content\n", 275 | "\n", 276 | " except Exception as e:\n", 277 | " print(e)\n", 278 | " return \"\"\n", 279 | "\n", 280 | " def generate_skeleton(self):\n", 281 | " question = self.question\n", 282 | " outline_prompt = f'''\n", 283 | " You're an organizer responsible for only giving the skeleton (not the full content) for answering the question.\n", 284 | " Provide the skeleton as a JSON to answer the question. Instead of writing a full sentence, each skeleton point should\n", 285 | " be very short with only 2~5 words. Generally, the skeleton should have 3~10 points. The skeleton is an outline that would be expanded later.\n", 286 | " Don't elaborate on the point in the skeleton.\n", 287 | " Example:\n", 288 | " \\n\\nQuestion:\\nWhat are the typical types of Chinese dishes?: \\n Response: {{\"answer\" : [\"Dumplings\" , \"Noodles\" , \"Dim Sum\" , \"Hot Pot\" , \"Wonton\", \"Ma Po Tofu\", \"Char Siu\", \" Fried Rice\"]}}.\n", 289 | " \\n\\nQuestion:\\nWhat are some practical tips for individuals to reduce their carbon emissions?\\n Response: {{ \"answer\" :[\"Energy Conservation\", \"Efficient transportation\", \"Home Energy Efficiency\", \"Reduce Water Consumption\", \"Sustainable Diet\", \"Sustainable Travel\"]}}\n", 290 | "\n", 291 | " \\n\\nNow, please provide the skeleton for the following question.\\n{question}\\n Response: {{\"answer\": [...]}}\n", 292 | " '''\n", 293 | "\n", 294 | " ## Make the message\n", 295 | " message=[]\n", 296 | " message.append({\"role\": \"system\", \"content\": \"You are a helpful assistant. You respond in JSON format.\"})\n", 297 | " message.append({\"role\": \"user\", \"content\": outline_prompt})\n", 298 | "\n", 299 | " result = self.gptCall_json(self.temperature, message)\n", 300 | " result = json.loads(result)\n", 301 | " self.result = result['answer']\n", 302 | "\n", 303 | "\n", 304 | " def elaborate_point(self, point):\n", 305 | "\n", 306 | " question = self.question\n", 307 | "\n", 308 | " point_prompt = f'''\n", 309 | " You help elaborate on the point user wants. Your input is a question and one possible answer from the question, also called . You will elaborate on the and give a 2-3 sentence response\n", 310 | " on how the helps answer the question. Start your response by mentioning the and then colon like point: and then your response\n", 311 | " Your response will be in JSON format. Example: {{\"answer\": {point}: your response\"}}\n", 312 | " \\n\\nNow, please elaborate on the following point. Question: {question}\\n : {point} \\n Response: {{\"answer\": [...]}}\n", 313 | " '''\n", 314 | "\n", 315 | " ## Make the message\n", 316 | " message=[]\n", 317 | " message.append({\"role\": \"system\", \"content\": \"You are a helpful assistant. You respond in JSON format.\"})\n", 318 | " message.append({\"role\": \"user\", \"content\": point_prompt})\n", 319 | "\n", 320 | " result = self.gptCall_json(self.temperature, message)\n", 321 | " point_elaborate = json.loads(result)\n", 322 | " return point_elaborate['answer']\n", 323 | "\n", 324 | "\n", 325 | " def concurrent_results(self, question):\n", 326 | " self.question = question\n", 327 | " self.generate_skeleton()\n", 328 | " num_points = len(self.result)\n", 329 | " # Create a thread pool executor with 5 threads\n", 330 | " with concurrent.futures.ThreadPoolExecutor(max_workers=num_points) as executor:\n", 331 | " # Submit the API calls to the executor\n", 332 | " outputs = [executor.submit(self.elaborate_point, point) for point in self.result]\n", 333 | " # Wait for the API calls to complete and get the results\n", 334 | " results = [future.result() for future in concurrent.futures.as_completed(outputs)]\n", 335 | "\n", 336 | " # Use list comprehension to add enumeration and \"\\n\" each record\n", 337 | " string_list = [f\"{i+1}. {record}\\n\" for i, record in enumerate(results)]\n", 338 | "\n", 339 | " # Join the string_list elements into a single string\n", 340 | " final_output = ''.join(string_list)\n", 341 | " return final_output" 342 | ] 343 | }, 344 | { 345 | "cell_type": "code", 346 | "execution_count": 10, 347 | "metadata": { 348 | "colab": { 349 | "base_uri": "https://localhost:8080/" 350 | }, 351 | "id": "ukvdqitZArcf", 352 | "outputId": "ef451e22-1738-428e-b3c5-feda498a6ce0" 353 | }, 354 | "outputs": [ 355 | { 356 | "name": "stdout", 357 | "output_type": "stream", 358 | "text": [ 359 | "1. Set goals: Setting goals helps you prioritize your tasks and allocate your time effectively. By defining clear objectives, you can focus on activities that align with your goals and avoid wasting time on less important tasks. This allows you to manage your time more efficiently and achieve better results.\n", 360 | "2. Delegate when possible: Delegating tasks to others can help free up your time to focus on more important or high-priority tasks. By assigning tasks to others who are capable, you can ensure that work is being completed efficiently and effectively, allowing you to better manage your time and workload.\n", 361 | "3. Take breaks: Taking breaks can help improve productivity and focus by allowing the brain to rest and recharge. It can also prevent burnout and help maintain a healthy work-life balance, ultimately leading to better time management.\n", 362 | "4. Limit distractions: By limiting distractions, you can create a focused environment that allows you to allocate your time more effectively. This helps in managing time by reducing interruptions and increasing productivity, enabling you to complete tasks more efficiently.\n", 363 | "5. Time blocking: Time blocking is a time management technique where you schedule specific blocks of time for different tasks or activities. This helps you focus on one task at a time, reduces multitasking, and allows for better prioritization of important activities. By allocating dedicated time slots for different tasks, you can effectively manage your time and improve productivity.\n", 364 | "6. Use tools and technology: By leveraging tools and technology such as time management apps, calendars, and project management software, you can streamline your tasks, set reminders, and prioritize your activities. This can help you stay organized, track your progress, and make more efficient use of your time.\n", 365 | "7. Reflect and adjust: Reflecting on how you currently manage your time and identifying areas for improvement allows you to make necessary adjustments. By evaluating your current time management strategies and making changes based on your reflections, you can continuously improve your approach to managing time effectively.\n", 366 | "8. Prioritize tasks: Prioritizing tasks helps you focus on the most important and urgent activities, allowing you to allocate your time and resources efficiently. By identifying and tackling high-priority tasks first, you can ensure that you make the most significant impact on your goals and overall productivity.\n", 367 | "\n", 368 | "CPU times: user 77.3 ms, sys: 9.34 ms, total: 86.7 ms\n", 369 | "Wall time: 2.46 s\n" 370 | ] 371 | } 372 | ], 373 | "source": [ 374 | "%%time\n", 375 | "gpt4_turbo = Gpt4Turbo()\n", 376 | "question = \"How do I best manage my time?\"\n", 377 | "result_sot = gpt4_turbo.concurrent_results(question)\n", 378 | "print(result_sot)" 379 | ] 380 | }, 381 | { 382 | "cell_type": "code", 383 | "execution_count": 11, 384 | "metadata": { 385 | "colab": { 386 | "base_uri": "https://localhost:8080/" 387 | }, 388 | "id": "kt8Zw_zEBOjC", 389 | "outputId": "cf244668-a8be-4bed-8369-5408341e17e7" 390 | }, 391 | "outputs": [ 392 | { 393 | "name": "stdout", 394 | "output_type": "stream", 395 | "text": [ 396 | "Number of tokens: 444\n" 397 | ] 398 | } 399 | ], 400 | "source": [ 401 | "import tiktoken\n", 402 | "encoding = tiktoken.encoding_for_model(\"gpt-3.5-turbo\")\n", 403 | "num_tokens = len(encoding.encode(result_sot))\n", 404 | "print(f\"Number of tokens: {num_tokens}\")" 405 | ] 406 | }, 407 | { 408 | "cell_type": "markdown", 409 | "metadata": { 410 | "id": "qNdeIf_QA7iU" 411 | }, 412 | "source": [ 413 | "It took 2.2s to generate an output that was 360 tokens\n", 414 | "\n" 415 | ] 416 | }, 417 | { 418 | "cell_type": "markdown", 419 | "metadata": { 420 | "id": "9_4g9VEjlFX8" 421 | }, 422 | "source": [ 423 | "### General ChatGPT" 424 | ] 425 | }, 426 | { 427 | "cell_type": "code", 428 | "execution_count": 12, 429 | "metadata": { 430 | "colab": { 431 | "base_uri": "https://localhost:8080/" 432 | }, 433 | "id": "afRAzt56A6o1", 434 | "outputId": "db290bee-0eaf-4f99-b18c-e59848e775a2" 435 | }, 436 | "outputs": [ 437 | { 438 | "name": "stdout", 439 | "output_type": "stream", 440 | "text": [ 441 | "{'tips': ['Prioritize your tasks by importance and deadline to ensure you focus on the most critical ones first', 'Create a daily or weekly schedule to allocate specific time slots for different activities or projects', 'Use time management tools such as calendars, to-do lists, or apps to help you stay organized and on track', 'Break down larger tasks into smaller, manageable steps to avoid feeling overwhelmed and to make progress more achievable', 'Minimize distractions by setting specific periods for focused work and taking regular breaks to maintain productivity', 'Learn to say no to non-essential tasks or commitments to avoid overloading your schedule', 'Regularly review and adjust your time management strategies to identify areas for improvement and adapt to changing priorities', 'Consider seeking support or guidance from a mentor, coach, or time management expert to develop personalized strategies for optimizing your time']}\n", 442 | "CPU times: user 22 ms, sys: 2.84 ms, total: 24.9 ms\n", 443 | "Wall time: 1.99 s\n" 444 | ] 445 | } 446 | ], 447 | "source": [ 448 | "%%time\n", 449 | "gpt4_turbo = Gpt4Turbo()\n", 450 | "\n", 451 | "message=[]\n", 452 | "message.append({\"role\": \"system\", \"content\": \"You are a helpful assistant. Respond in JSON format\"})\n", 453 | "message.append({\"role\": \"user\", \"content\": f'Answer the user question below as a LONG answer of atleast 8 sentences. Give the answer in bullets. Question: {question}. Answer: {{\"answer\" : ...}}'})\n", 454 | "\n", 455 | "single_result = gpt4_turbo.gptCall_json(temperature=0.3, messages=message)\n", 456 | "single_result = json.loads(single_result)\n", 457 | "print(single_result['answer'])" 458 | ] 459 | }, 460 | { 461 | "cell_type": "markdown", 462 | "metadata": { 463 | "id": "KuMFABpAl92L" 464 | }, 465 | "source": [ 466 | "It took 1.92 seconds to generate an output that is smaller" 467 | ] 468 | }, 469 | { 470 | "cell_type": "code", 471 | "execution_count": null, 472 | "metadata": { 473 | "id": "FGpTafZYwgSR" 474 | }, 475 | "outputs": [], 476 | "source": [] 477 | } 478 | ], 479 | "metadata": { 480 | "colab": { 481 | "provenance": [] 482 | }, 483 | "kernelspec": { 484 | "display_name": "Python 3", 485 | "name": "python3" 486 | }, 487 | "language_info": { 488 | "codemirror_mode": { 489 | "name": "ipython", 490 | "version": 3 491 | }, 492 | "file_extension": ".py", 493 | "mimetype": "text/x-python", 494 | "name": "python", 495 | "nbconvert_exporter": "python", 496 | "pygments_lexer": "ipython3", 497 | "version": "3.12.4" 498 | } 499 | }, 500 | "nbformat": 4, 501 | "nbformat_minor": 0 502 | } 503 | -------------------------------------------------------------------------------- /Unit2-Tools/OpenAI-Function-Calling.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "attachments": {}, 5 | "cell_type": "markdown", 6 | "metadata": {}, 7 | "source": [ 8 | "## Function Calling Notebook\n", 9 | "\n", 10 | "This notebook walks through the steps to run function calling through OpenAI. Function Calling mimics what Langchain does internally with tools and agents. The main difference is that Langchain executes the tool whereas OpenAI function calling comes back with the parameters to call the tool" 11 | ] 12 | }, 13 | { 14 | "cell_type": "code", 15 | "execution_count": 1, 16 | "metadata": {}, 17 | "outputs": [ 18 | { 19 | "data": { 20 | "text/plain": [ 21 | "True" 22 | ] 23 | }, 24 | "execution_count": 1, 25 | "metadata": {}, 26 | "output_type": "execute_result" 27 | } 28 | ], 29 | "source": [ 30 | "from dotenv import load_dotenv\n", 31 | "load_dotenv()" 32 | ] 33 | }, 34 | { 35 | "cell_type": "markdown", 36 | "metadata": {}, 37 | "source": [ 38 | "![Function Calling](images/function-calling.png)" 39 | ] 40 | }, 41 | { 42 | "cell_type": "code", 43 | "execution_count": 2, 44 | "metadata": {}, 45 | "outputs": [ 46 | { 47 | "name": "stderr", 48 | "output_type": "stream", 49 | "text": [ 50 | "/Users/pdwivedi/miniconda3/envs/test/lib/python3.12/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", 51 | " from .autonotebook import tqdm as notebook_tqdm\n" 52 | ] 53 | } 54 | ], 55 | "source": [ 56 | "import json\n", 57 | "import openai\n", 58 | "import os\n", 59 | "from openai import OpenAI\n", 60 | "openai.api_key = os.getenv(\"OPENAI_API_KEY\")\n", 61 | "\n", 62 | "from langchain_community.tools.tavily_search import TavilySearchResults, TavilyAnswer\n", 63 | "\n", 64 | "from transformers import pipeline\n", 65 | "from termcolor import colored\n", 66 | "\n" 67 | ] 68 | }, 69 | { 70 | "attachments": {}, 71 | "cell_type": "markdown", 72 | "metadata": {}, 73 | "source": [ 74 | "#### Setting OpenAI Calling from Python" 75 | ] 76 | }, 77 | { 78 | "cell_type": "code", 79 | "execution_count": 3, 80 | "metadata": {}, 81 | "outputs": [], 82 | "source": [ 83 | "class Chatbot:\n", 84 | " def __init__(self):\n", 85 | " ## reading config file\n", 86 | " self.client = OpenAI()\n", 87 | " self.GPT_MODEL = \"gpt-4-turbo-preview\"\n", 88 | "\n", 89 | " def call_openai(self, query, tools=None):\n", 90 | " ## Add User Query to Messages\n", 91 | " if tools:\n", 92 | " completion = self.client.chat.completions.create(\n", 93 | " model=self.GPT_MODEL,\n", 94 | " messages=query,\n", 95 | " response_format={\"type\": \"json_object\"},\n", 96 | " temperature=0.2,\n", 97 | " tools = tools,\n", 98 | " tool_choice = \"auto\"\n", 99 | " )\n", 100 | " else:\n", 101 | " completion = self.client.chat.completions.create(\n", 102 | " model=self.GPT_MODEL,\n", 103 | " messages=query,\n", 104 | " response_format={\"type\": \"json_object\"},\n", 105 | " temperature=0.4,\n", 106 | " )\n", 107 | " \n", 108 | " ## Lets look at the output we got\n", 109 | " # print(\"Output from the model: \", completion.choices[0].message)\n", 110 | " # print(\"\\n\")\n", 111 | " tools_response = []\n", 112 | "\n", 113 | " ## Extract any content\n", 114 | " result = completion.choices[0].message.content\n", 115 | " ## If content is present, load it in JSON\n", 116 | " if result is not None:\n", 117 | " result = json.loads(result)\n", 118 | " result = result['response']\n", 119 | "\n", 120 | " ## Extract any tool calls\n", 121 | " tools_output = completion.choices[0].message.tool_calls\n", 122 | " ## If tool_calls is in result iterate and extract all of them\n", 123 | " if tools_output is not None:\n", 124 | " for tool in tools_output:\n", 125 | " tools_response.append((tool.function.name, tool.function.arguments))\n", 126 | " return result, tools_response" 127 | ] 128 | }, 129 | { 130 | "cell_type": "code", 131 | "execution_count": 4, 132 | "metadata": {}, 133 | "outputs": [ 134 | { 135 | "name": "stdout", 136 | "output_type": "stream", 137 | "text": [ 138 | "Spring in North America officially starts with the vernal equinox, which occurs on March 20th or 21st each year. []\n" 139 | ] 140 | } 141 | ], 142 | "source": [ 143 | "query = \"When does spring start in North Amercia?\"\n", 144 | "messages = []\n", 145 | "messages.append({\"role\": \"system\", \"content\":\"You are a friendly chatbot who likes to chat with users and extract relevant information. You respond back in JSON format. Put your answer in the key response\"})\n", 146 | "messages.append({\"role\": \"user\", \"content\": query})\n", 147 | "\n", 148 | "\n", 149 | "chatbot = Chatbot()\n", 150 | "result, tools_output = chatbot.call_openai(messages)\n", 151 | "print(result, tools_output)" 152 | ] 153 | }, 154 | { 155 | "cell_type": "markdown", 156 | "metadata": {}, 157 | "source": [ 158 | "### Define Functions using the Schema from OpenAI\n", 159 | "\n", 160 | "We are going to have 3 functions\n", 161 | "* Search - Tavily Search for the user query\n", 162 | "* Calculator - Function that would do the math calculation\n", 163 | "* Sentiment - Get text sentiment using transformer pipeline" 164 | ] 165 | }, 166 | { 167 | "cell_type": "code", 168 | "execution_count": 5, 169 | "metadata": {}, 170 | "outputs": [], 171 | "source": [ 172 | "tools = [\n", 173 | " {\n", 174 | " \"type\": \"function\",\n", 175 | " \"function\": {\n", 176 | " \"name\": \"get_sentiment_text\",\n", 177 | " \"description\": \"Get the sentiment of the input text\",\n", 178 | " \"parameters\": {\n", 179 | " \"type\": \"object\",\n", 180 | " \"properties\": {\n", 181 | " \"query\": {\n", 182 | " \"type\": \"string\",\n", 183 | " \"description\": \"The text to analyze\"\n", 184 | " },\n", 185 | " },\n", 186 | " \"required\": [\"query\"]\n", 187 | " }\n", 188 | " }\n", 189 | " },\n", 190 | " {\n", 191 | " \"type\": \"function\",\n", 192 | " \"function\": {\n", 193 | " \"name\": \"get_search_results\",\n", 194 | " \"description\": \"Search for results on a given topic\",\n", 195 | " \"parameters\": {\n", 196 | " \"type\": \"object\",\n", 197 | " \"properties\": {\n", 198 | " \"query\": {\n", 199 | " \"type\": \"string\",\n", 200 | " \"description\": \"The search query\"\n", 201 | " },\n", 202 | " \"limit\": {\n", 203 | " \"type\": \"integer\",\n", 204 | " \"description\": \"The number of results to return\"\n", 205 | " }\n", 206 | " },\n", 207 | " \"required\": [\"query\"]\n", 208 | " }\n", 209 | " }\n", 210 | " },\n", 211 | " {\n", 212 | " \"type\": \"function\",\n", 213 | " \"function\": {\n", 214 | " \"name\": \"calculator\",\n", 215 | " \"description\": \"Calculate the input query. Useful when you have a math computation\",\n", 216 | " \"parameters\": {\n", 217 | " \"type\": \"object\",\n", 218 | " \"properties\": {\n", 219 | " \"query\": {\n", 220 | " \"type\": \"string\",\n", 221 | " \"description\": \"The calculation to be done\"\n", 222 | " }\n", 223 | " }\n", 224 | " }\n", 225 | " }\n", 226 | " }\n", 227 | " ]" 228 | ] 229 | }, 230 | { 231 | "cell_type": "code", 232 | "execution_count": 6, 233 | "metadata": {}, 234 | "outputs": [ 235 | { 236 | "name": "stdout", 237 | "output_type": "stream", 238 | "text": [ 239 | "None [('calculator', '{\"query\":\"15*6\"}')]\n" 240 | ] 241 | } 242 | ], 243 | "source": [ 244 | "query = \"What is the product of 15 and 6\"\n", 245 | "\n", 246 | "messages = []\n", 247 | "messages.append({\"role\": \"system\", \"content\":\"You are a friendly chatbot who likes to chat with users and extract relevant information. You respond back in JSON format. Put your answer in the key response\"})\n", 248 | "messages.append({\"role\": \"user\", \"content\": query})\n", 249 | "\n", 250 | "\n", 251 | "chatbot = Chatbot()\n", 252 | "result, tools_output = chatbot.call_openai(messages, tools)\n", 253 | "print(result, tools_output)\n" 254 | ] 255 | }, 256 | { 257 | "cell_type": "markdown", 258 | "metadata": {}, 259 | "source": [ 260 | "### Write the defintion of functions" 261 | ] 262 | }, 263 | { 264 | "cell_type": "code", 265 | "execution_count": 7, 266 | "metadata": {}, 267 | "outputs": [], 268 | "source": [ 269 | "import re\n", 270 | "\n", 271 | "def safe_calculator(query):\n", 272 | " \"\"\"\n", 273 | " A simple calculator that evaluates basic arithmetic expressions from a string.\n", 274 | " Supports addition (+), subtraction (-), multiplication (*), and division (/).\n", 275 | "\n", 276 | " Parameters:\n", 277 | " - query: str, a mathematical expression as a string (e.g., '15*6')\n", 278 | "\n", 279 | " Returns:\n", 280 | " - The result of the arithmetic operation or an error message if the query is invalid.\n", 281 | " \"\"\"\n", 282 | " # Pattern to match a basic arithmetic expression\n", 283 | " pattern = r'^(\\d+(\\.\\d+)?)\\s*([\\+\\-\\*/])\\s*(\\d+(\\.\\d+)?)$'\n", 284 | " match = re.match(pattern, query)\n", 285 | " if not match:\n", 286 | " return \"Error: Invalid input format.\"\n", 287 | "\n", 288 | " # Extract operands and operator\n", 289 | " a, operator, b = float(match.group(1)), match.group(3), float(match.group(4))\n", 290 | "\n", 291 | " # Perform calculation\n", 292 | " if operator == '+':\n", 293 | " return a + b\n", 294 | " elif operator == '-':\n", 295 | " return a - b\n", 296 | " elif operator == '*':\n", 297 | " return a * b\n", 298 | " elif operator == '/':\n", 299 | " if b == 0:\n", 300 | " return \"Error: Division by zero is not allowed.\"\n", 301 | " return a / b\n", 302 | " else:\n", 303 | " return \"Error: Unsupported operation.\"\n", 304 | "\n", 305 | "\n", 306 | "def tavily_search(query, limit=2):\n", 307 | " \"\"\"Function to execute Google Search.\"\"\"\n", 308 | " try:\n", 309 | " tool = TavilyAnswer(max_results=limit)\n", 310 | " results = tool.invoke({\"query\":query})\n", 311 | " # results = \" \".join(item['content'] for item in search_results)\n", 312 | " except Exception as e:\n", 313 | " results = f\"query failed with error: {e}\"\n", 314 | " return results\n", 315 | "\n", 316 | "def math_calculator(query):\n", 317 | " \"\"\"Function to run Math Calculations.\"\"\"\n", 318 | " try:\n", 319 | " results = safe_calculator(query)\n", 320 | " except Exception as e:\n", 321 | " results = f\"query failed with error: {e}\"\n", 322 | " return results\n", 323 | "\n", 324 | "def text_sentiment(query):\n", 325 | " \"\"\"Function to get sentiment of a text.\"\"\"\n", 326 | " try:\n", 327 | " pipe = pipeline(\"sentiment-analysis\")\n", 328 | " results = pipe(query)\n", 329 | " except Exception as e:\n", 330 | " results = f\"query failed with error: {e}\"\n", 331 | " return results\n", 332 | "\n", 333 | "\n", 334 | "def function_executor(tools_response_LLM):\n", 335 | " \"\"\"Tie above functions together so either can be executed\"\"\"\n", 336 | " name, params= tools_response_LLM[0]\n", 337 | " params = json.loads(params)\n", 338 | " if name == 'get_search_results':\n", 339 | " query = params['query']\n", 340 | " limit = params.get('limit', 2)\n", 341 | " results = tavily_search(query, limit)\n", 342 | " elif name == 'calculator':\n", 343 | " query = params['query']\n", 344 | " results = math_calculator(query)\n", 345 | " elif name == 'get_sentiment_text':\n", 346 | " query = params['query']\n", 347 | " results = text_sentiment(query)\n", 348 | " else:\n", 349 | " results = f\"Error: function {name} does not exist\"\n", 350 | " return results" 351 | ] 352 | }, 353 | { 354 | "cell_type": "code", 355 | "execution_count": 8, 356 | "metadata": {}, 357 | "outputs": [ 358 | { 359 | "data": { 360 | "text/plain": [ 361 | "4.2" 362 | ] 363 | }, 364 | "execution_count": 8, 365 | "metadata": {}, 366 | "output_type": "execute_result" 367 | } 368 | ], 369 | "source": [ 370 | "math_calculator(\"21/5\")" 371 | ] 372 | }, 373 | { 374 | "cell_type": "code", 375 | "execution_count": 9, 376 | "metadata": {}, 377 | "outputs": [ 378 | { 379 | "name": "stderr", 380 | "output_type": "stream", 381 | "text": [ 382 | "No model was supplied, defaulted to distilbert/distilbert-base-uncased-finetuned-sst-2-english and revision af0f99b (https://huggingface.co/distilbert/distilbert-base-uncased-finetuned-sst-2-english).\n", 383 | "Using a pipeline without specifying a model name and revision in production is not recommended.\n", 384 | "/Users/pdwivedi/miniconda3/envs/test/lib/python3.12/site-packages/huggingface_hub/file_download.py:1132: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.\n", 385 | " warnings.warn(\n" 386 | ] 387 | }, 388 | { 389 | "data": { 390 | "text/plain": [ 391 | "[{'label': 'NEGATIVE', 'score': 0.9995974898338318}]" 392 | ] 393 | }, 394 | "execution_count": 9, 395 | "metadata": {}, 396 | "output_type": "execute_result" 397 | } 398 | ], 399 | "source": [ 400 | "text_sentiment(\"I hated the Dune 2 movie\")" 401 | ] 402 | }, 403 | { 404 | "cell_type": "code", 405 | "execution_count": 10, 406 | "metadata": {}, 407 | "outputs": [ 408 | { 409 | "data": { 410 | "text/plain": [ 411 | "'Robert Downey Jr. has won one Academy Award for Best Actor in a Supporting Role.'" 412 | ] 413 | }, 414 | "execution_count": 10, 415 | "metadata": {}, 416 | "output_type": "execute_result" 417 | } 418 | ], 419 | "source": [ 420 | "tavily_search(\"How many Oscars has Robert Downey Jr. won?\")" 421 | ] 422 | }, 423 | { 424 | "attachments": {}, 425 | "cell_type": "markdown", 426 | "metadata": {}, 427 | "source": [ 428 | "### First Call to OpenAI\n", 429 | "\n", 430 | "We will test the code by passing in the function arguments and making a first call to OpenAI" 431 | ] 432 | }, 433 | { 434 | "cell_type": "code", 435 | "execution_count": 11, 436 | "metadata": {}, 437 | "outputs": [ 438 | { 439 | "name": "stdout", 440 | "output_type": "stream", 441 | "text": [ 442 | "/n\n", 443 | "\u001b[33m[{'role': 'system', 'content': '\\nYou are a friendly chatbot who looks at the tools they have and selects the best giving one.\\nYou respond back in JSON format.\\nPut your response in the key response.\\nImportant: Choose only one tool at a time. \\n'}, {'role': 'user', 'content': '\\nWhat are the latest reviews for Dune 2 movie? What is the sentiment of that\\n'}]\u001b[0m\n", 444 | "\u001b[32m[('get_search_results', '{\"query\": \"Dune 2 movie reviews\", \"limit\": 5}'), ('get_sentiment_text', '{\"query\": \"Dune 2 movie reviews\"}')]\u001b[0m\n", 445 | "\u001b[34mBased on the data provided, \"Dune Part Two\" has received positive reviews. Rosalynn Try-Hane from Battle Royale With Cheese rated the movie 4/5 stars, mentioning that it is bolder and more bombastic than the first installment in terms of cinematography and direction. The review from RogerEbert.com compares the movie to \"The Lord of the Rings: The Two Towers,\" highlighting the character development of Paul Atreides and the continuous battle and danger depicted in the film. The portrayal of characters like Feyd-Rautha by Austin Butler is also praised for its intensity.\u001b[0m\n", 446 | "/n\n", 447 | "\u001b[33m[{'role': 'system', 'content': '\\nYou are a friendly chatbot who looks at the tools they have and selects the best giving one.\\nYou respond back in JSON format.\\nPut your response in the key response.\\nImportant: Choose only one tool at a time. \\n'}, {'role': 'user', 'content': '\\nWhat are the latest reviews for Dune 2 movie? What is the sentiment of that\\n'}, {'role': 'function', 'name': 'get_search_results', 'content': 'Based on the data provided, \"Dune Part Two\" has received positive reviews. Rosalynn Try-Hane from Battle Royale With Cheese rated the movie 4/5 stars, mentioning that it is bolder and more bombastic than the first installment in terms of cinematography and direction. The review from RogerEbert.com compares the movie to \"The Lord of the Rings: The Two Towers,\" highlighting the character development of Paul Atreides and the continuous battle and danger depicted in the film. The portrayal of characters like Feyd-Rautha by Austin Butler is also praised for its intensity.'}]\u001b[0m\n" 448 | ] 449 | }, 450 | { 451 | "name": "stderr", 452 | "output_type": "stream", 453 | "text": [ 454 | "No model was supplied, defaulted to distilbert/distilbert-base-uncased-finetuned-sst-2-english and revision af0f99b (https://huggingface.co/distilbert/distilbert-base-uncased-finetuned-sst-2-english).\n", 455 | "Using a pipeline without specifying a model name and revision in production is not recommended.\n" 456 | ] 457 | }, 458 | { 459 | "name": "stdout", 460 | "output_type": "stream", 461 | "text": [ 462 | "\u001b[32m[('get_sentiment_text', '{\"query\":\"Based on the data provided, \\\\\"Dune Part Two\\\\\" has received positive reviews. Rosalynn Try-Hane from Battle Royale With Cheese rated the movie 4/5 stars, mentioning that it is bolder and more bombastic than the first installment in terms of cinematography and direction. The review from RogerEbert.com compares the movie to \\\\\"The Lord of the Rings: The Two Towers,\\\\\" highlighting the character development of Paul Atreides and the continuous battle and danger depicted in the film. The portrayal of characters like Feyd-Rautha by Austin Butler is also praised for its intensity.\"}')]\u001b[0m\n" 463 | ] 464 | }, 465 | { 466 | "name": "stderr", 467 | "output_type": "stream", 468 | "text": [ 469 | "/Users/pdwivedi/miniconda3/envs/test/lib/python3.12/site-packages/huggingface_hub/file_download.py:1132: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.\n", 470 | " warnings.warn(\n" 471 | ] 472 | }, 473 | { 474 | "name": "stdout", 475 | "output_type": "stream", 476 | "text": [ 477 | "\u001b[34m[{'label': 'POSITIVE', 'score': 0.9995766282081604}]\u001b[0m\n", 478 | "/n\n", 479 | "\u001b[33m[{'role': 'system', 'content': '\\nYou are a friendly chatbot who looks at the tools they have and selects the best giving one.\\nYou respond back in JSON format.\\nPut your response in the key response.\\nImportant: Choose only one tool at a time. \\n'}, {'role': 'user', 'content': '\\nWhat are the latest reviews for Dune 2 movie? What is the sentiment of that\\n'}, {'role': 'function', 'name': 'get_search_results', 'content': 'Based on the data provided, \"Dune Part Two\" has received positive reviews. Rosalynn Try-Hane from Battle Royale With Cheese rated the movie 4/5 stars, mentioning that it is bolder and more bombastic than the first installment in terms of cinematography and direction. The review from RogerEbert.com compares the movie to \"The Lord of the Rings: The Two Towers,\" highlighting the character development of Paul Atreides and the continuous battle and danger depicted in the film. The portrayal of characters like Feyd-Rautha by Austin Butler is also praised for its intensity.'}, {'role': 'function', 'name': 'get_sentiment_text', 'content': \"[{'label': 'POSITIVE', 'score': 0.9995766282081604}]\"}]\u001b[0m\n", 480 | "\u001b[31m{'search_results': 'Based on the data provided, \"Dune Part Two\" has received positive reviews. Rosalynn Try-Hane from Battle Royale With Cheese rated the movie 4/5 stars, mentioning that it is bolder and more bombastic than the first installment in terms of cinematography and direction. The review from RogerEbert.com compares the movie to \"The Lord of the Rings: The Two Towers,\" highlighting the character development of Paul Atreides and the continuous battle and danger depicted in the film. The portrayal of characters like Feyd-Rautha by Austin Butler is also praised for its intensity.', 'sentiment_analysis': {'label': 'POSITIVE', 'score': 0.9995766282081604}}\u001b[0m\n" 481 | ] 482 | } 483 | ], 484 | "source": [ 485 | "# user_request = \"\"\"\n", 486 | "# Find Harry Styles' age. What is their current age, multiplied by 2.1 ?\n", 487 | "# \"\"\"\n", 488 | "\n", 489 | "# user_request = \"\"\"\n", 490 | "# What is current US unemployment rate? What will it be if it doubles?\n", 491 | "# \"\"\"\n", 492 | "\n", 493 | "user_request = \"\"\"\n", 494 | "What are the latest reviews for Dune 2 movie? What is the sentiment of that\n", 495 | "\"\"\"\n", 496 | "\n", 497 | "\n", 498 | "system_message = '''\n", 499 | "You are a friendly chatbot who looks at the tools they have and selects the best giving one.\n", 500 | "You respond back in JSON format.\n", 501 | "Put your response in the key response.\n", 502 | "Important: Choose only one tool at a time. \n", 503 | "'''\n", 504 | "\n", 505 | "messages = []\n", 506 | "messages.append({\"role\": \"system\", \"content\":system_message})\n", 507 | "messages.append({\"role\": \"user\", \"content\": user_request})\n", 508 | "chatbot = Chatbot()\n", 509 | "\n", 510 | "keep_running = True\n", 511 | "while keep_running:\n", 512 | " print('/n')\n", 513 | " print(colored(messages, 'yellow'))\n", 514 | " result, tools_output = chatbot.call_openai(messages, tools=tools)\n", 515 | "\n", 516 | " if result is not None:\n", 517 | " messages.append({\"role\": \"assistant\", \"content\": result})\n", 518 | " print(colored(result, 'red'))\n", 519 | " keep_running = False\n", 520 | "\n", 521 | " if len(tools_output) >0:\n", 522 | " ## Run the tool request from LLM\n", 523 | " print(colored(tools_output, 'green'))\n", 524 | " function_results = function_executor(tools_output)\n", 525 | " print(colored(function_results, 'blue'))\n", 526 | "\n", 527 | " ### Pass the tool output back to LLM\n", 528 | " messages.append({\"role\": \"function\", \"name\": str(tools_output[0][0]), \"content\": str(function_results)})\n", 529 | " keep_running = True" 530 | ] 531 | }, 532 | { 533 | "cell_type": "code", 534 | "execution_count": 12, 535 | "metadata": {}, 536 | "outputs": [ 537 | { 538 | "name": "stdout", 539 | "output_type": "stream", 540 | "text": [ 541 | "/n\n", 542 | "\u001b[33m[{'role': 'system', 'content': '\\nYou are a friendly chatbot who looks at the tools they have and selects the best giving one.\\nYou respond back in JSON format.\\nPut your response in the key response.\\nImportant: Choose only one tool at a time. \\n'}, {'role': 'user', 'content': '\\nWhat is current US unemployment rate? What will it be if it doubles?\\n'}]\u001b[0m\n", 543 | "\u001b[32m[('get_search_results', '{\"query\":\"current US unemployment rate\",\"limit\":1}')]\u001b[0m\n", 544 | "\u001b[34mThe current US unemployment rate is 4.00% as of the most recent data available.\u001b[0m\n", 545 | "/n\n", 546 | "\u001b[33m[{'role': 'system', 'content': '\\nYou are a friendly chatbot who looks at the tools they have and selects the best giving one.\\nYou respond back in JSON format.\\nPut your response in the key response.\\nImportant: Choose only one tool at a time. \\n'}, {'role': 'user', 'content': '\\nWhat is current US unemployment rate? What will it be if it doubles?\\n'}, {'role': 'function', 'name': 'get_search_results', 'content': 'The current US unemployment rate is 4.00% as of the most recent data available.'}]\u001b[0m\n", 547 | "\u001b[32m[('calculator', '{\"query\":\"4.00 * 2\"}')]\u001b[0m\n", 548 | "\u001b[34m8.0\u001b[0m\n", 549 | "/n\n", 550 | "\u001b[33m[{'role': 'system', 'content': '\\nYou are a friendly chatbot who looks at the tools they have and selects the best giving one.\\nYou respond back in JSON format.\\nPut your response in the key response.\\nImportant: Choose only one tool at a time. \\n'}, {'role': 'user', 'content': '\\nWhat is current US unemployment rate? What will it be if it doubles?\\n'}, {'role': 'function', 'name': 'get_search_results', 'content': 'The current US unemployment rate is 4.00% as of the most recent data available.'}, {'role': 'function', 'name': 'calculator', 'content': '8.0'}]\u001b[0m\n", 551 | "\u001b[31m{'current_unemployment_rate': '4.00%', 'doubled_unemployment_rate': '8.00%'}\u001b[0m\n" 552 | ] 553 | } 554 | ], 555 | "source": [ 556 | "\n", 557 | "user_request = \"\"\"\n", 558 | "What is current US unemployment rate? What will it be if it doubles?\n", 559 | "\"\"\"\n", 560 | "\n", 561 | "\n", 562 | "system_message = '''\n", 563 | "You are a friendly chatbot who looks at the tools they have and selects the best giving one.\n", 564 | "You respond back in JSON format.\n", 565 | "Put your response in the key response.\n", 566 | "Important: Choose only one tool at a time. \n", 567 | "'''\n", 568 | "\n", 569 | "messages = []\n", 570 | "messages.append({\"role\": \"system\", \"content\":system_message})\n", 571 | "messages.append({\"role\": \"user\", \"content\": user_request})\n", 572 | "chatbot = Chatbot()\n", 573 | "\n", 574 | "keep_running = True\n", 575 | "while keep_running:\n", 576 | " print('/n')\n", 577 | " print(colored(messages, 'yellow'))\n", 578 | " result, tools_output = chatbot.call_openai(messages, tools=tools)\n", 579 | "\n", 580 | " if result is not None:\n", 581 | " messages.append({\"role\": \"assistant\", \"content\": result})\n", 582 | " print(colored(result, 'red'))\n", 583 | " keep_running = False\n", 584 | "\n", 585 | " if len(tools_output) >0:\n", 586 | " ## Run the tool request from LLM\n", 587 | " print(colored(tools_output, 'green'))\n", 588 | " function_results = function_executor(tools_output)\n", 589 | " print(colored(function_results, 'blue'))\n", 590 | "\n", 591 | " ### Pass the tool output back to LLM\n", 592 | " messages.append({\"role\": \"function\", \"name\": str(tools_output[0][0]), \"content\": str(function_results)})\n", 593 | " keep_running = True" 594 | ] 595 | }, 596 | { 597 | "cell_type": "code", 598 | "execution_count": null, 599 | "metadata": {}, 600 | "outputs": [], 601 | "source": [] 602 | } 603 | ], 604 | "metadata": { 605 | "kernelspec": { 606 | "display_name": "llm_agents", 607 | "language": "python", 608 | "name": "python3" 609 | }, 610 | "language_info": { 611 | "codemirror_mode": { 612 | "name": "ipython", 613 | "version": 3 614 | }, 615 | "file_extension": ".py", 616 | "mimetype": "text/x-python", 617 | "name": "python", 618 | "nbconvert_exporter": "python", 619 | "pygments_lexer": "ipython3", 620 | "version": "3.12.4" 621 | }, 622 | "orig_nbformat": 4 623 | }, 624 | "nbformat": 4, 625 | "nbformat_minor": 2 626 | } 627 | -------------------------------------------------------------------------------- /Unit2-Tools/data/tmdb_tool.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "ID": 1, 4 | "tool_name": "/movie/{movie_id}/keywords", 5 | "tool_description": "Get the keywords that have been added to a movie. You should first know the movie_id and thus this tool should be used after /search/movie.", 6 | "tool_usage": "GET /movie/{movie_id}/keywords", 7 | "Example": { 8 | "Scenario": "if you want to find out the keywords that have been added to a movie with movie_id 456.", 9 | "Parameters": { 10 | "input": "GET /movie/456/keywords" 11 | } 12 | } 13 | }, 14 | { 15 | "ID": 2, 16 | "tool_name": "/tv/popular", 17 | "tool_description": "Get a list of the current popular TV shows on TMDb.", 18 | "tool_usage": "GET /tv/popular", 19 | "Example": { 20 | "Scenario": "if you want to get a list of the current popular TV shows on TMDb.", 21 | "Parameters": { 22 | "input": "GET /tv/popular" 23 | } 24 | } 25 | }, 26 | { 27 | "ID": 3, 28 | "tool_name": "/person/{person_id}", 29 | "tool_description": "Get the primary person details by id. You should first know the person_id and thus this tool should be used after /search/person.", 30 | "tool_usage": "GET /person/{person_id}", 31 | "Example": { 32 | "Scenario": "if you want to know the primary details of person with person_id 456.", 33 | "Parameters": { 34 | "input": "GET /person/456" 35 | } 36 | } 37 | }, 38 | { 39 | "ID": 4, 40 | "tool_name": "/movie/{movie_id}/reviews", 41 | "tool_description": "Get the user reviews for a movie. You should first know the movie_id and thus this tool should be used after /search/movie.", 42 | "tool_usage": "GET /movie/{movie_id}/reviews", 43 | "Example": { 44 | "Scenario": "If you want to read the user reviews for a movie with movie_id 456.", 45 | "Parameters": { 46 | "input": "GET /movie/456/reviews" 47 | } 48 | } 49 | }, 50 | { 51 | "ID": 5, 52 | "tool_name": "/movie/{movie_id}/release_dates", 53 | "tool_description": "Get the release date along with the certification for a movie. You should first know the movie_id and thus this tool should be used after /search/movie.", 54 | "tool_usage": "GET /movie/{movie_id}/release_dates", 55 | "Example": { 56 | "Scenario": "if you want to know the release dates and certification of the movie with movie_id 123.", 57 | "Parameters": { 58 | "input": "GET /movie/123/release_dates" 59 | } 60 | } 61 | }, 62 | { 63 | "ID": 6, 64 | "tool_name": "/tv/{tv_id}/season/{season_number}/episode/{episode_number}/credits", 65 | "tool_description": "Get the credits (cast, crew and guest stars) for a TV episode. You should first know the tv_id and thus this tool should be used after /search/tv.", 66 | "tool_usage": "GET /tv/{tv_id}/season/{season_number}/episode/{episode_number}/credits", 67 | "Example": { 68 | "Scenario": "If you want to get the credits of a TV episode with tv_id 456, season_number 2, and episode_number 3.", 69 | "Parameters": { 70 | "input": "GET /tv/456/season/2/episode/3/credits" 71 | } 72 | } 73 | }, 74 | { 75 | "ID": 7, 76 | "tool_name": "/movie/{movie_id}/images", 77 | "tool_description": "Get the images that belong to a movie. You should first know the movie_id and thus this tool should be used after /search/movie.", 78 | "tool_usage": "GET /movie/{movie_id}/images", 79 | "Example": { 80 | "Scenario": "if you want to get the images of a movie with movie_id 456.", 81 | "Parameters": { 82 | "input": "GET /movie/456/images" 83 | } 84 | } 85 | }, 86 | { 87 | "ID": 8, 88 | "tool_name": "/search/tv", 89 | "tool_description": "Search for a TV show, which can obtain tv_id.", 90 | "tool_usage": "GET /search/tv", 91 | "Example": { 92 | "Scenario": "If you want to search for the id of a TV show with the name 'Friends'", 93 | "Parameters": { 94 | "input": "GET /search/tv?query=Friends" 95 | } 96 | } 97 | }, 98 | { 99 | "ID": 9, 100 | "tool_name": "/tv/{tv_id}/season/{season_number}/episode/{episode_number}", 101 | "tool_description": "Get the TV episode details by id.", 102 | "tool_usage": "GET /tv/{tv_id}/season/{season_number}/episode/{episode_number}", 103 | "Example": { 104 | "Scenario": "If you want to get details of the 3rd episode from the 2nd season of the TV show with tv_id 456.", 105 | "Parameters": { 106 | "input": "GET /tv/456/season/2/episode/3" 107 | } 108 | } 109 | }, 110 | { 111 | "ID": 10, 112 | "tool_name": "/network/{network_id}/images", 113 | "tool_description": "Get the TV network logos by id.", 114 | "tool_usage": "GET /network/{network_id}/images", 115 | "Example": { 116 | "Scenario": "if you want to retrieve the logos of a TV network with network_id 50.", 117 | "Parameters": { 118 | "input": "GET /network/50/images" 119 | } 120 | } 121 | }, 122 | { 123 | "ID": 11, 124 | "tool_name": "/genre/tv/list", 125 | "tool_description": "Get the list of official genres for TV shows.", 126 | "tool_usage": "GET /genre/tv/list", 127 | "Example": { 128 | "Scenario": "if you want to get the list of official genres for TV shows.", 129 | "Parameters": { 130 | "input": "GET /genre/tv/list" 131 | } 132 | } 133 | }, 134 | { 135 | "ID": 12, 136 | "tool_name": "/search/movie", 137 | "tool_description": "Search for movies, which can obtain movie_id.", 138 | "tool_usage": "GET /search/movie", 139 | "Example": { 140 | "Scenario": "If you want to search for the id of a movie with the name 'Avatar'", 141 | "Parameters": { 142 | "input": "GET /search/movie?query=Avatar" 143 | } 144 | } 145 | }, 146 | { 147 | "ID": 13, 148 | "tool_name": "/discover/movie", 149 | "tool_description": "Discover movies by different types of data like average rating, number of votes, genres and certifications", 150 | "tool_usage": "GET /discover/movie", 151 | "Example": { 152 | "Scenario": "If you want to discover movies by different types of data like average rating, number of votes, genres and certifications.", 153 | "Parameters": { 154 | "input": "GET /discover/movie" 155 | } 156 | } 157 | }, 158 | { 159 | "ID": 14, 160 | "tool_name": "/movie/upcoming", 161 | "tool_description": "Get a list of upcoming movies in theatres.", 162 | "tool_usage": "GET /movie/upcoming", 163 | "Example": { 164 | "Scenario": "if you want to get a list of upcoming movies in theatres.", 165 | "Parameters": { 166 | "input": "GET /movie/upcoming" 167 | } 168 | } 169 | }, 170 | { 171 | "ID": 15, 172 | "tool_name": "/credit/{credit_id}", 173 | "tool_description": "Get a movie or TV credit details by id.", 174 | "tool_usage": "GET /credit/{credit_id}", 175 | "Example": { 176 | "Scenario": "if you want to get the details of movie or TV credit with credit_id as 456.", 177 | "Parameters": { 178 | "input": "GET /credit/456" 179 | } 180 | } 181 | }, 182 | { 183 | "ID": 16, 184 | "tool_name": "/person/{person_id}/tv_credits", 185 | "tool_description": "Get the TV show credits for a person. You should first know the person_id and thus this tool should be used after /search/person.", 186 | "tool_usage": "GET /person/{person_id}/tv_credits", 187 | "Example": { 188 | "Scenario": "If you want to get the TV show credits of a person with person_id 456.", 189 | "Parameters": { 190 | "input": "GET /person/456/tv_credits" 191 | } 192 | } 193 | }, 194 | { 195 | "ID": 17, 196 | "tool_name": "/tv/latest", 197 | "tool_description": "Get the most newly created TV show.", 198 | "tool_usage": "GET /tv/latest", 199 | "Example": { 200 | "Scenario": "If you want to get the most newly created TV show.", 201 | "Parameters": { 202 | "input": "GET /tv/latest" 203 | } 204 | } 205 | }, 206 | { 207 | "ID": 18, 208 | "tool_name": "/company/{company_id}", 209 | "tool_description": "Get a companies details by id. You should first know the company_id and thus this tool should be used after /search/company.", 210 | "tool_usage": "GET /company/{company_id}", 211 | "Example": { 212 | "Scenario": "if you want to know the details of a company with company_id 456.", 213 | "Parameters": { 214 | "input": "GET /company/456" 215 | } 216 | } 217 | }, 218 | { 219 | "ID": 19, 220 | "tool_name": "/tv/{tv_id}/images", 221 | "tool_description": "Get the images that belong to a TV show. You should first know the tv_id and thus this tool should be used after /search/tv.", 222 | "tool_usage": "GET /tv/{tv_id}/images", 223 | "Example": { 224 | "Scenario": "if you want to get the images of a TV show with tv_id 456.", 225 | "Parameters": { 226 | "input": "GET /tv/456/images" 227 | } 228 | } 229 | }, 230 | { 231 | "ID": 20, 232 | "tool_name": "/tv/{tv_id}/season/{season_number}/credits", 233 | "tool_description": "Get the credits for TV season. You should first know the tv_id and thus this tool should be used after /search/tv.", 234 | "tool_usage": "GET /tv/{tv_id}/season/{season_number}/credits", 235 | "Example": { 236 | "Scenario": "if you want to get the credits for a TV season with tv_id 456 and season_number 2.", 237 | "Parameters": { 238 | "input": "GET /tv/456/season/2/credits" 239 | } 240 | } 241 | }, 242 | { 243 | "ID": 21, 244 | "tool_name": "/movie/now_playing", 245 | "tool_description": "Get a list of movies in theatres.", 246 | "tool_usage": "GET /movie/now_playing", 247 | "Example": { 248 | "Scenario": "If you want to get a list of movies currently playing in theatres.", 249 | "Parameters": { 250 | "input": "GET /movie/now_playing" 251 | } 252 | } 253 | }, 254 | { 255 | "ID": 22, 256 | "tool_name": "/review/{review_id}", 257 | "tool_description": "Get review from a reviewer", 258 | "tool_usage": "GET /review/{review_id}", 259 | "Example": { 260 | "Scenario": "If you want to get a review with review_id 456.", 261 | "Parameters": { 262 | "input": "GET /review/456" 263 | } 264 | } 265 | }, 266 | { 267 | "ID": 23, 268 | "tool_name": "/tv/on_the_air", 269 | "tool_description": "Get a list of shows that are currently on the air.", 270 | "tool_usage": "GET /tv/on_the_air", 271 | "Example": { 272 | "Scenario": "If you want to know the list of shows currently on air.", 273 | "Parameters": { 274 | "input": "GET /tv/on_the_air" 275 | } 276 | } 277 | }, 278 | { 279 | "ID": 24, 280 | "tool_name": "/movie/{movie_id}", 281 | "tool_description": "Get the primary information about a movie. You should first know the movie_id and thus this tool should be used after /search/movie.", 282 | "tool_usage": "GET /movie/{movie_id}", 283 | "Example": { 284 | "Scenario": "if you want to get the primary information about a movie with movie_id 456.", 285 | "Parameters": { 286 | "input": "GET /movie/456" 287 | } 288 | } 289 | }, 290 | { 291 | "ID": 25, 292 | "tool_name": "/tv/{tv_id}/season/{season_number}/images", 293 | "tool_description": "Get the images that belong to a TV season. You should first know the tv_id and thus this tool should be used after /search/tv.", 294 | "tool_usage": "GET /tv/{tv_id}/season/{season_number}/images", 295 | "Example": { 296 | "Scenario": "if you want to get the images of a specific season of a TV show with tv_id 456 and season_number 2.", 297 | "Parameters": { 298 | "input": "GET /tv/456/season/2/images" 299 | } 300 | } 301 | }, 302 | { 303 | "ID": 26, 304 | "tool_name": "/company/{company_id}/images", 305 | "tool_description": "Get a companies logos by id . You should first know the company_id and thus this tool should be used after /search/company.", 306 | "tool_usage": "GET /company/{company_id}/images", 307 | "Example": { 308 | "Scenario": "if you want to fetch the logos of the company with company_id 789.", 309 | "Parameters": { 310 | "input": "GET /company/789/images" 311 | } 312 | } 313 | }, 314 | { 315 | "ID": 27, 316 | "tool_name": "/tv/{tv_id}/reviews", 317 | "tool_description": "Get the reviews for a TV show. You should first know the tv_id and thus this tool should be used after /search/tv.", 318 | "tool_usage": "GET /tv/{tv_id}/reviews", 319 | "Example": { 320 | "Scenario": "if you want to read the reviews of a TV show with tv_id 567.", 321 | "Parameters": { 322 | "input": "GET /tv/567/reviews" 323 | } 324 | } 325 | }, 326 | { 327 | "ID": 28, 328 | "tool_name": "/tv/{tv_id}/similar", 329 | "tool_description": "Get a list of similar TV shows. You should first know the tv_id and thus this tool should be used after /search/tv.", 330 | "tool_usage": "GET /tv/{tv_id}/similar", 331 | "Example": { 332 | "Scenario": "If you want to find similar TV shows to the one with tv_id 456.", 333 | "Parameters": { 334 | "input": "GET /tv/456/similar" 335 | } 336 | } 337 | }, 338 | { 339 | "ID": 29, 340 | "tool_name": "/network/{network_id}", 341 | "tool_description": "Get the details of a network.", 342 | "tool_usage": "GET /network/{network_id}", 343 | "Example": { 344 | "Scenario": "If you want to know the details of a network with network_id 456.", 345 | "Parameters": { 346 | "input": "GET /network/456" 347 | } 348 | } 349 | }, 350 | { 351 | "ID": 30, 352 | "tool_name": "/tv/{tv_id}/recommendations", 353 | "tool_description": "Get the list of TV show recommendations for this item. You should first know the tv_id and thus this tool should be used after /search/tv.", 354 | "tool_usage": "GET /tv/{tv_id}/recommendations", 355 | "Example": { 356 | "Scenario": "if you want to get recommendations for a TV show with tv_id 456.", 357 | "Parameters": { 358 | "input": "GET /tv/456/recommendations" 359 | } 360 | } 361 | }, 362 | { 363 | "ID": 31, 364 | "tool_name": "/tv/{tv_id}/season/{season_number}/episode/{episode_number}/images", 365 | "tool_description": "Get the images that belong to a TV episode. You should first know the tv_id and thus this tool should be used after /search/tv.", 366 | "tool_usage": "GET /tv/{tv_id}/season/{season_number}/episode/{episode_number}/images", 367 | "Example": { 368 | "Scenario": "If you want to know the images of a TV episode with tv_id 456, season_number 2 and episode_number 3.", 369 | "Parameters": { 370 | "input": "GET /tv/456/season/2/episode/3/images" 371 | } 372 | } 373 | }, 374 | { 375 | "ID": 32, 376 | "tool_name": "/movie/popular", 377 | "tool_description": "Get a list of the current popular movies on TMDb.", 378 | "tool_usage": "GET /movie/popular", 379 | "Example": { 380 | "Scenario": "if you want to get a list of the current popular movies on TMDb.", 381 | "Parameters": { 382 | "input": "GET /movie/popular" 383 | } 384 | } 385 | }, 386 | { 387 | "ID": 33, 388 | "tool_name": "/tv/airing_today", 389 | "tool_description": "Get a list of TV shows that are airing today.", 390 | "tool_usage": "GET /tv/airing_today", 391 | "Example": { 392 | "Scenario": "If you want to get a list of TV shows that are airing today.", 393 | "Parameters": { 394 | "input": "GET /tv/airing_today" 395 | } 396 | } 397 | }, 398 | { 399 | "ID": 34, 400 | "tool_name": "/tv/{tv_id}/keywords", 401 | "tool_description": "Get the keywords that have been added to a TV show. You should first know the tv_id and thus this tool should be used after /search/tv.", 402 | "tool_usage": "GET /tv/{tv_id}/keywords", 403 | "Example": { 404 | "Scenario": "if you want to know the keywords of a TV show with tv_id 456.", 405 | "Parameters": { 406 | "input": "GET /tv/456/keywords" 407 | } 408 | } 409 | }, 410 | { 411 | "ID": 35, 412 | "tool_name": "/search/person", 413 | "tool_description": "Search for people, which can obtain person_id.", 414 | "tool_usage": "GET /search/person", 415 | "Example": { 416 | "Scenario": "If you want to search for the id of a person with the name 'Leonardo DiCaprio'", 417 | "Parameters": { 418 | "input": "GET /search/person?query=Leonardo%20DiCaprio" 419 | } 420 | } 421 | }, 422 | { 423 | "ID": 36, 424 | "tool_name": "/search/company", 425 | "tool_description": "Search for companies, which can obtain company_id.", 426 | "tool_usage": "GET /search/company", 427 | "Example": { 428 | "Scenario": "If you want to search for the id of a company with the name 'Disney'", 429 | "Parameters": { 430 | "input": "GET /search/company?query=Disney" 431 | } 432 | } 433 | }, 434 | { 435 | "ID": 37, 436 | "tool_name": "/discover/tv", 437 | "tool_description": "Discover TV shows by different types of data like average rating, number of votes, genres, the network they aired on and air dates.", 438 | "tool_usage": "GET /discover/tv", 439 | "Example": { 440 | "Scenario": "If you want to discover TV shows by their average rating, number of votes, genres, the network they aired on and air dates.", 441 | "Parameters": { 442 | "input": "GET /discover/tv" 443 | } 444 | } 445 | }, 446 | { 447 | "ID": 38, 448 | "tool_name": "/movie/top_rated", 449 | "tool_description": "Get the top rated movies on TMDb.", 450 | "tool_usage": "GET /movie/top_rated", 451 | "Example": { 452 | "Scenario": "If you want to get the top rated movies on TMDb.", 453 | "Parameters": { 454 | "input": "GET /movie/top_rated" 455 | } 456 | } 457 | }, 458 | { 459 | "ID": 39, 460 | "tool_name": "/movie/latest", 461 | "tool_description": "Get the most newly created movie.", 462 | "tool_usage": "GET /movie/latest", 463 | "Example": { 464 | "Scenario": "if you want to know the details of the most recently created movie.", 465 | "Parameters": { 466 | "input": "GET /movie/latest" 467 | } 468 | } 469 | }, 470 | { 471 | "ID": 40, 472 | "tool_name": "/tv/top_rated", 473 | "tool_description": "Get a list of the top rated TV shows on TMDb.", 474 | "tool_usage": "GET /tv/top_rated", 475 | "Example": { 476 | "Scenario": "If you want to get a list of top rated TV shows on TMDb.", 477 | "Parameters": { 478 | "input": "GET /tv/top_rated" 479 | } 480 | } 481 | }, 482 | { 483 | "ID": 41, 484 | "tool_name": "/trending/{media_type}/{time_window}", 485 | "tool_description": "Get the daily or weekly trending items.", 486 | "tool_usage": "GET /trending/{media_type}/{time_window}", 487 | "Example": { 488 | "Scenario": "if you want to know the weekly trending movies.", 489 | "Parameters": { 490 | "input": "GET /trending/movie/week" 491 | } 492 | } 493 | }, 494 | { 495 | "ID": 42, 496 | "tool_name": "/genre/movie/list", 497 | "tool_description": "Get the list of official genres for movies.", 498 | "tool_usage": "GET /genre/movie/list", 499 | "Example": { 500 | "Scenario": "If you want to get the list of official genres for movies.", 501 | "Parameters": { 502 | "input": "GET /genre/movie/list" 503 | } 504 | } 505 | }, 506 | { 507 | "ID": 43, 508 | "tool_name": "/tv/{tv_id}/season/{season_number}", 509 | "tool_description": "Get the TV season details by id. You should first know the tv_id and thus this tool should be used after /search/tv.", 510 | "tool_usage": "GET /tv/{tv_id}/season/{season_number}", 511 | "Example": { 512 | "Scenario": "if you want to know the details of season 2 from the TV series with tv_id 456.", 513 | "Parameters": { 514 | "input": "GET /tv/456/season/2" 515 | } 516 | } 517 | }, 518 | { 519 | "ID": 44, 520 | "tool_name": "/collection/{collection_id}", 521 | "tool_description": "Get collection details by id. You should first know the collection_id and thus this tool should be used after /search/collection.", 522 | "tool_usage": "GET /collection/{collection_id}", 523 | "Example": { 524 | "Scenario": "if you want to get details of a collection with collection_id 456.", 525 | "Parameters": { 526 | "input": "GET /collection/456" 527 | } 528 | } 529 | }, 530 | { 531 | "ID": 45, 532 | "tool_name": "/person/{person_id}/images", 533 | "tool_description": "Get the images for a person. You should first know the person_id and thus this tool should be used after /search/person.", 534 | "tool_usage": "GET /person/{person_id}/images", 535 | "Example": { 536 | "Scenario": "if you want to get the images of a person with person_id 456.", 537 | "Parameters": { 538 | "input": "GET /person/456/images" 539 | } 540 | } 541 | }, 542 | { 543 | "ID": 46, 544 | "tool_name": "/tv/{tv_id}", 545 | "tool_description": "Get the primary TV show details by id. You should first know the tv_id and thus this tool should be used after /search/tv.", 546 | "tool_usage": "GET /tv/{tv_id}", 547 | "Example": { 548 | "Scenario": "If you want to retrieve the primary details of a TV show with tv_id 789.", 549 | "Parameters": { 550 | "input": "GET /tv/789" 551 | } 552 | } 553 | }, 554 | { 555 | "ID": 47, 556 | "tool_name": "/person/popular", 557 | "tool_description": "Get the list of popular people on TMDb.", 558 | "tool_usage": "GET /person/popular", 559 | "Example": { 560 | "Scenario": "If you want to fetch the list of popular people on TMDb.", 561 | "Parameters": { 562 | "input": "GET /person/popular" 563 | } 564 | } 565 | }, 566 | { 567 | "ID": 48, 568 | "tool_name": "/collection/{collection_id}/images", 569 | "tool_description": "Get the images for a collection by id. You should first know the collection_id and thus this tool should be used after /search/collection.", 570 | "tool_usage": "GET /collection/{collection_id}/images", 571 | "Example": { 572 | "Scenario": "if you want to get the images of a collection with collection_id 456.", 573 | "Parameters": { 574 | "input": "GET /collection/456/images" 575 | } 576 | } 577 | }, 578 | { 579 | "ID": 49, 580 | "tool_name": "/tv/{tv_id}/credits", 581 | "tool_description": "Get the credits (cast and crew) that have been added to a TV show. You should first know the tv_id and thus this tool should be used after /search/tv.", 582 | "tool_usage": "GET /tv/{tv_id}/credits", 583 | "Example": { 584 | "Scenario": "if you want to know the credits of a TV show with tv_id 456.", 585 | "Parameters": { 586 | "input": "GET /tv/456/credits" 587 | } 588 | } 589 | }, 590 | { 591 | "ID": 50, 592 | "tool_name": "/person/{person_id}/movie_credits", 593 | "tool_description": "Get the movie credits for a person, the results contains various information such as popularity and release date. You should first know the person_id and thus this tool should be used after /search/person.", 594 | "tool_usage": "GET /person/{person_id}/movie_credits", 595 | "Example": { 596 | "Scenario": "if you want to get the movie credits for a person with person_id 456.", 597 | "Parameters": { 598 | "input": "GET /person/456/movie_credits" 599 | } 600 | } 601 | }, 602 | { 603 | "ID": 51, 604 | "tool_name": "/movie/{movie_id}/recommendations", 605 | "tool_description": "Get a list of recommended movies for a movie. You should first know the movie_id and thus this tool should be used after /search/movie.", 606 | "tool_usage": "GET /movie/{movie_id}/recommendations", 607 | "Example": { 608 | "Scenario": "if you want to get a list of recommended movies for the movie with movie_id 456.", 609 | "Parameters": { 610 | "input": "GET /movie/456/recommendations" 611 | } 612 | } 613 | }, 614 | { 615 | "ID": 52, 616 | "tool_name": "/search/collection", 617 | "tool_description": "Search for collections, which can obtain collection_id.", 618 | "tool_usage": "GET /search/collection", 619 | "Example": { 620 | "Scenario": "if you want to search for a collection id of Star Wars.", 621 | "Parameters": { 622 | "input": "GET /search/collection?query=Star%20Wars" 623 | } 624 | } 625 | }, 626 | { 627 | "ID": 53, 628 | "tool_name": "/movie/{movie_id}/credits", 629 | "tool_description": "Get the cast and crew for a movie. You should first know the movie_id and thus this tool should be used after /search/movie.", 630 | "tool_usage": "GET /movie/{movie_id}/credits", 631 | "Example": { 632 | "Scenario": "if you want to know the cast and crew of a movie with movie_id 789.", 633 | "Parameters": { 634 | "input": "GET /movie/789/credits" 635 | } 636 | } 637 | }, 638 | { 639 | "ID": 54, 640 | "tool_name": "/movie/{movie_id}/similar", 641 | "tool_description": "Get a list of similar movies. You should first know the movie_id and thus this tool should be used after /search/movie.", 642 | "tool_usage": "GET /movie/{movie_id}/similar", 643 | "Example": { 644 | "Scenario": "if you want to find movies similar to the movie with movie_id 456.", 645 | "Parameters": { 646 | "input": "GET /movie/456/similar" 647 | } 648 | } 649 | } 650 | ] --------------------------------------------------------------------------------