├── .env ├── LICENSE ├── README.md ├── chatbot.py ├── requirements.txt └── utils.py /.env: -------------------------------------------------------------------------------- 1 | OPENAI_API_KEY='' 2 | ACTIVELOOP_TOKEN='' -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Priyanka Dwivedi 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## Chat-with-your-code 2 | 3 | This project is a codebase chatbot that allows users to interact with their codebase using the OpenAI Language Model (LLM). It utilizes a Streamlit app to provide a user-friendly chat interface. 4 | 5 | ### Features 6 | Users can enter their OpenAI key and the name of their GitHub repository. 7 | The repository is then cloned, chunked and embedded. Langchain is used to build a QA retriever so users can chat with their code. 8 | The chat interface allows users to ask questions and interact with the codebase. 9 | Usage 10 | 11 | To use this codebase chatbot, follow these steps: 12 | 13 | 1. Clone the repository: 14 | 15 | ```git clone https://github.com/example/repository.git``` 16 | 17 | 2. Install the required dependencies: 18 | 19 | ```pip install -r requirements.txt``` 20 | 21 | 3. Set your environment variables in the `.env` file 22 | * Get your OpenAI API Key and add it here 23 | * Set up a free account on [Deeplake](https://www.deeplake.ai) and store the API key 24 | 25 | 3. Run the Streamlit app: 26 | 27 | ```streamlit run chatbot.py``` 28 | 29 | 30 | Access the chat interface by opening your web browser and navigating to http://localhost:8501. 31 | 32 | Enter your OpenAI key and the name of your GitHub repository in the provided input fields. 33 | 34 | The codebase will be chunked and embedded, and the chat interface will be displayed. 35 | 36 | Ask questions or provide instructions using natural language, and the chatbot will respond accordingly. 37 | 38 | ### Limitations 39 | * The codebase chatbot relies on the OpenAI Language Model and its capabilities. 40 | * Large codebases or repositories with complex structures may take longer to chunk and embed. 41 | * The accuracy and quality of responses depend on the accuracy of the language model and the code embeddings. 42 | 43 | ### Future Improvements 44 | * Integrate with external tools and services to provide more advanced codebase analysis and insights. 45 | 46 | ### Contributing 47 | Contributions to this codebase chatbot project are welcome. If you find any issues or have suggestions for improvements, please feel free to open an issue or submit a pull request. 48 | 49 | ### License 50 | This project is licensed under the MIT License. 51 | 52 | ### Acknowledgements 53 | This project was inspired by the power of OpenAI's Language Models, Langchain and the need for a more interactive and user-friendly codebase analysis tool. 54 | Special thanks to the contributors and maintainers of the libraries and frameworks used in this project. 55 | 56 | -------------------------------------------------------------------------------- /chatbot.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | import utils 3 | import time 4 | import os 5 | local = False 6 | if local: 7 | from dotenv import load_dotenv 8 | load_dotenv() 9 | 10 | st.title("Chat-with-your-codebase") 11 | 12 | ## Get some user inputs 13 | if local: 14 | user_key = os.environ.get('OPENAI_API_KEY') 15 | else: 16 | user_key = st.text_input("Enter your OpenAI Key", "") 17 | if user_key: 18 | os.environ['OPENAI_API_KEY'] = user_key 19 | 20 | user_repo = st.text_input("Github Link to your public codebase", "https://github.com/facebookresearch/segment-anything.git") 21 | if user_repo: 22 | st.write("You entered:", user_repo) 23 | 24 | ## Load the Github Repo 25 | embedder = utils.Embedder(user_repo) 26 | embedder.clone_repo() 27 | st.write("Your repo has been cloned") 28 | 29 | ## Chunk and Create DB 30 | st.write("Parsing the content and embedding it. This may take some time") 31 | embedder.load_db() 32 | st.write("Done Loading. Ready to take your questions") 33 | 34 | # Initialize chat history 35 | if "messages" not in st.session_state: 36 | st.session_state.messages = [] 37 | 38 | # Display chat messages from history on app rerun 39 | for message in st.session_state.messages: 40 | with st.chat_message(message["role"]): 41 | st.markdown(message["content"]) 42 | 43 | # Accept user input 44 | if prompt := st.chat_input("Type your question here."): 45 | # Add user message to chat history 46 | st.session_state.messages.append({"role": "user", "content": prompt}) 47 | # Display user message in chat message container 48 | with st.chat_message("user"): 49 | st.markdown(prompt) 50 | # Display assistant response in chat message container 51 | response = embedder.retrieve_results(prompt) 52 | # Display assistant response in chat message container 53 | with st.chat_message("assistant"): 54 | st.markdown(response) 55 | # Add assistant response to chat history 56 | st.session_state.messages.append({"role": "assistant", "content": response}) 57 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # This file may be used to create an environment using: 2 | # $ conda create --name --file 3 | # platform: osx-arm64 4 | langchain 5 | deeplake 6 | streamlit 7 | python-dotenv 8 | openai 9 | sentence_transformers 10 | langchain-community -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | from langchain.document_loaders import TextLoader 2 | from langchain.text_splitter import CharacterTextSplitter 3 | import git 4 | import os 5 | import deeplake 6 | from queue import Queue 7 | local = False 8 | if local: 9 | from dotenv import load_dotenv 10 | load_dotenv() 11 | 12 | from langchain.embeddings.openai import OpenAIEmbeddings 13 | from langchain.vectorstores import DeepLake 14 | from langchain.embeddings import HuggingFaceEmbeddings 15 | model_name = "sentence-transformers/all-MiniLM-L6-v2" 16 | model_kwargs = {"device": "cpu"} 17 | allowed_extensions = ['.py', '.ipynb', '.md'] 18 | 19 | from langchain.chat_models import ChatOpenAI 20 | from langchain.chains import ConversationalRetrievalChain 21 | 22 | class Embedder: 23 | def __init__(self, git_link) -> None: 24 | self.git_link = git_link 25 | last_name = self.git_link.split('/')[-1] 26 | self.clone_path = last_name.split('.')[0] 27 | self.deeplake_path = f"hub://priyadwivedi/{self.clone_path}" 28 | self.model = ChatOpenAI(model_name="gpt-3.5-turbo-0125") # switch to 'gpt-4' 29 | self.hf = HuggingFaceEmbeddings(model_name=model_name) 30 | self.openai = OpenAIEmbeddings() 31 | self.MyQueue = Queue(maxsize=2) 32 | 33 | def add_to_queue(self, value): 34 | if self.MyQueue.full(): 35 | self.MyQueue.get() 36 | self.MyQueue.put(value) 37 | 38 | def clone_repo(self): 39 | if not os.path.exists(self.clone_path): 40 | # Clone the repository 41 | git.Repo.clone_from(self.git_link, self.clone_path) 42 | 43 | def extract_all_files(self): 44 | root_dir = self.clone_path 45 | self.docs = [] 46 | for dirpath, dirnames, filenames in os.walk(root_dir): 47 | for file in filenames: 48 | file_extension = os.path.splitext(file)[1] 49 | if file_extension in allowed_extensions: 50 | try: 51 | loader = TextLoader(os.path.join(dirpath, file), encoding='utf-8') 52 | self.docs.extend(loader.load_and_split()) 53 | except Exception as e: 54 | pass 55 | 56 | def chunk_files(self): 57 | text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) 58 | self.texts = text_splitter.split_documents(self.docs) 59 | self.num_texts = len(self.texts) 60 | 61 | def embed_deeplake(self): 62 | # db = DeepLake(dataset_path=self.deeplake_path, embedding_function= OpenAIEmbeddings()) 63 | db = DeepLake(dataset_path=self.deeplake_path, embedding_function= self.hf) 64 | db.add_documents(self.texts) 65 | ## Remove data from the cloned path 66 | self.delete_directory(self.clone_path) 67 | return db 68 | 69 | def delete_directory(self, path): 70 | if os.path.exists(path): 71 | for root, dirs, files in os.walk(path, topdown=False): 72 | for file in files: 73 | file_path = os.path.join(root, file) 74 | os.remove(file_path) 75 | for dir in dirs: 76 | dir_path = os.path.join(root, dir) 77 | os.rmdir(dir_path) 78 | os.rmdir(path) 79 | 80 | def load_db(self): 81 | exists = deeplake.exists(self.deeplake_path) 82 | if exists: 83 | ## Just load the DB 84 | self.db = DeepLake( 85 | dataset_path=self.deeplake_path, 86 | read_only=True, 87 | embedding_function=self.hf, 88 | ) 89 | else: 90 | ## Create and load 91 | self.extract_all_files() 92 | self.chunk_files() 93 | self.db = self.embed_deeplake() 94 | 95 | self.retriever = self.db.as_retriever() 96 | self.retriever.search_kwargs['distance_metric'] = 'cos' 97 | self.retriever.search_kwargs['fetch_k'] = 100 98 | self.retriever.search_kwargs['k'] = 3 99 | 100 | 101 | def retrieve_results(self, query): 102 | chat_history = list(self.MyQueue.queue) 103 | qa = ConversationalRetrievalChain.from_llm(self.model, chain_type="stuff", retriever=self.retriever, condense_question_llm = ChatOpenAI(temperature=0, model='gpt-3.5-turbo')) 104 | result = qa({"question": query, "chat_history": chat_history}) 105 | self.add_to_queue((query, result["answer"])) 106 | return result['answer'] 107 | --------------------------------------------------------------------------------