├── .github └── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md ├── .gitignore ├── LICENSE-2.0.txt ├── app.py ├── coderag ├── __init__.py ├── config.py ├── embeddings.py ├── index.py ├── monitor.py └── search.py ├── example.env ├── main.py ├── prompt_flow.py ├── readme.rst ├── requirements.txt ├── scripts ├── initialize_index.py └── run_monitor.py └── tests └── test_faiss.py /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. Go to '...' 16 | 2. Click on '....' 17 | 3. Scroll down to '....' 18 | 4. See error 19 | 20 | **Expected behavior** 21 | A clear and concise description of what you expected to happen. 22 | 23 | **Screenshots** 24 | If applicable, add screenshots to help explain your problem. 25 | 26 | **Desktop (please complete the following information):** 27 | - OS: [e.g. iOS] 28 | - Browser [e.g. chrome, safari] 29 | - Version [e.g. 22] 30 | 31 | **Smartphone (please complete the following information):** 32 | - Device: [e.g. iPhone6] 33 | - OS: [e.g. iOS8.1] 34 | - Browser [e.g. stock browser, safari] 35 | - Version [e.g. 22] 36 | 37 | **Additional context** 38 | Add any other context about the problem here. 39 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Ignore Python bytecode and cache 2 | __pycache__/ 3 | *.py[cod] 4 | 5 | # Ignore virtual environment directories 6 | .venv/ 7 | env/ 8 | venv/ 9 | 10 | # Ignore environment variables file 11 | .env 12 | 13 | # Ignore node_modules 14 | node_modules/ 15 | 16 | # Ignore FAISS index file 17 | *.faiss 18 | 19 | # Ignore Git directory 20 | .git/ 21 | 22 | # Ignore Streamlit temporary files 23 | .streamlit/ 24 | 25 | # Ignore logs and temporary files 26 | *.log 27 | *.tmp 28 | plan.md 29 | metadata.npy 30 | -------------------------------------------------------------------------------- /LICENSE-2.0.txt: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /app.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | from openai import OpenAI 3 | from coderag.config import OPENAI_API_KEY, OPENAI_CHAT_MODEL 4 | from prompt_flow import execute_rag_flow 5 | 6 | # Initialize the OpenAI client 7 | client = OpenAI(api_key=OPENAI_API_KEY) 8 | 9 | st.title("CodeRAG: Your Coding Assistant") 10 | 11 | # Initialize chat history 12 | if "messages" not in st.session_state: 13 | st.session_state.messages = [] 14 | 15 | # Display chat history 16 | for message in st.session_state.messages: 17 | with st.chat_message(message["role"]): 18 | st.markdown(message["content"]) 19 | 20 | # Chat input 21 | if prompt := st.chat_input("What is your coding question?"): 22 | st.session_state.messages.append({"role": "user", "content": prompt}) 23 | with st.chat_message("user"): 24 | st.markdown(prompt) 25 | 26 | with st.chat_message("assistant"): 27 | message_placeholder = st.empty() 28 | full_response = "" 29 | 30 | try: 31 | response = execute_rag_flow(prompt) 32 | message_placeholder.markdown(response) 33 | full_response = response 34 | except Exception as e: 35 | error_message = f"Error in RAG flow execution: {str(e)}" 36 | st.error(error_message) 37 | full_response = error_message 38 | 39 | st.session_state.messages.append({"role": "assistant", "content": full_response}) -------------------------------------------------------------------------------- /coderag/__init__.py: -------------------------------------------------------------------------------- 1 | # __init__.py -------------------------------------------------------------------------------- /coderag/config.py: -------------------------------------------------------------------------------- 1 | import os 2 | from dotenv import load_dotenv 3 | 4 | # Load environment variables from the .env file 5 | load_dotenv() 6 | 7 | # === Environment Variables === 8 | # OpenAI API key and model settings (loaded from .env) 9 | OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") 10 | OPENAI_EMBEDDING_MODEL = os.getenv("OPENAI_EMBEDDING_MODEL", "text-embedding-ada-002") # Default to ada-002 11 | OPENAI_CHAT_MODEL = os.getenv("OPENAI_CHAT_MODEL", "gpt-4") # Default to GPT-4 12 | 13 | # Embedding dimension (from .env or fallback) 14 | EMBEDDING_DIM = int(os.getenv("EMBEDDING_DIM", 1536)) # Default to 1536 if not in .env 15 | 16 | # Project directory (from .env) 17 | WATCHED_DIR = os.getenv("WATCHED_DIR", os.path.join(os.getcwd(), 'CodeRAG')) 18 | 19 | # Path to FAISS index (from .env or fallback) 20 | FAISS_INDEX_FILE = os.getenv("FAISS_INDEX_FILE", os.path.join(WATCHED_DIR, 'coderag_index.faiss')) 21 | 22 | # === Project-Specific Configuration === 23 | # Define the root directory of the project 24 | ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) 25 | 26 | # Additional directories to ignore during indexing (these can remain static) 27 | IGNORE_PATHS = [ 28 | os.path.join(WATCHED_DIR, ".venv"), 29 | os.path.join(WATCHED_DIR, "node_modules"), 30 | os.path.join(WATCHED_DIR, "__pycache__"), 31 | os.path.join(WATCHED_DIR, ".git"), 32 | os.path.join(WATCHED_DIR, "tests"), 33 | ] 34 | -------------------------------------------------------------------------------- /coderag/embeddings.py: -------------------------------------------------------------------------------- 1 | from openai import OpenAI 2 | import numpy as np 3 | from coderag.config import OPENAI_API_KEY, OPENAI_EMBEDDING_MODEL 4 | 5 | # Initialize the OpenAI client 6 | client = OpenAI(api_key=OPENAI_API_KEY) 7 | 8 | def generate_embeddings(text): 9 | """Generate embeddings using the updated OpenAI API.""" 10 | try: 11 | response = client.embeddings.create( 12 | model=OPENAI_EMBEDDING_MODEL, 13 | input=[text] # Input should be a list of strings 14 | ) 15 | # Extract the embedding from the response 16 | embeddings = response.data[0].embedding 17 | return np.array(embeddings).astype('float32').reshape(1, -1) 18 | except Exception as e: 19 | print(f"Error generating embeddings with OpenAI: {e}") 20 | return None -------------------------------------------------------------------------------- /coderag/index.py: -------------------------------------------------------------------------------- 1 | import os 2 | import faiss 3 | import numpy as np 4 | from coderag.config import EMBEDDING_DIM, FAISS_INDEX_FILE, WATCHED_DIR 5 | 6 | index = faiss.IndexFlatL2(EMBEDDING_DIM) 7 | metadata = [] 8 | 9 | def clear_index(): 10 | """Delete the FAISS index and metadata files if they exist, and reinitialize the index.""" 11 | global index, metadata 12 | 13 | # Delete the FAISS index file 14 | if os.path.exists(FAISS_INDEX_FILE): 15 | os.remove(FAISS_INDEX_FILE) 16 | print(f"Deleted FAISS index file: {FAISS_INDEX_FILE}") 17 | 18 | # Delete the metadata file 19 | metadata_file = "metadata.npy" 20 | if os.path.exists(metadata_file): 21 | os.remove(metadata_file) 22 | print(f"Deleted metadata file: {metadata_file}") 23 | 24 | # Reinitialize the FAISS index and metadata 25 | index = faiss.IndexFlatL2(EMBEDDING_DIM) 26 | metadata = [] 27 | print("FAISS index and metadata cleared and reinitialized.") 28 | 29 | def add_to_index(embeddings, full_content, filename, filepath): 30 | global index, metadata 31 | 32 | if embeddings.shape[1] != index.d: 33 | raise ValueError(f"Embedding dimension {embeddings.shape[1]} does not match FAISS index dimension {index.d}") 34 | 35 | # Convert absolute filepath to relative path 36 | relative_filepath = os.path.relpath(filepath, WATCHED_DIR) 37 | 38 | index.add(embeddings) 39 | metadata.append({ 40 | "content": full_content, 41 | "filename": filename, 42 | "filepath": relative_filepath # Store relative filepath 43 | }) 44 | 45 | def save_index(): 46 | faiss.write_index(index, FAISS_INDEX_FILE) 47 | with open("metadata.npy", "wb") as f: 48 | np.save(f, metadata) 49 | 50 | def load_index(): 51 | global index, metadata 52 | index = faiss.read_index(FAISS_INDEX_FILE) 53 | with open("metadata.npy", "rb") as f: 54 | metadata = np.load(f, allow_pickle=True).tolist() 55 | return index 56 | 57 | def get_metadata(): 58 | return metadata 59 | 60 | def retrieve_vectors(n=5): 61 | n = min(n, index.ntotal) 62 | vectors = np.zeros((n, EMBEDDING_DIM), dtype=np.float32) 63 | for i in range(n): 64 | vectors[i] = index.reconstruct(i) 65 | return vectors 66 | 67 | def inspect_metadata(n=5): 68 | metadata = get_metadata() 69 | print(f"Inspecting the first {n} metadata entries:") 70 | for i, data in enumerate(metadata[:n]): 71 | print(f"Entry {i}:") 72 | print(f"Filename: {data['filename']}") 73 | print(f"Filepath: {data['filepath']}") 74 | print(f"Content: {data['content'][:100]}...") # Show the first 100 characters 75 | print() 76 | -------------------------------------------------------------------------------- /coderag/monitor.py: -------------------------------------------------------------------------------- 1 | import time 2 | import os 3 | from watchdog.observers import Observer 4 | from watchdog.events import FileSystemEventHandler 5 | from coderag.index import add_to_index, save_index 6 | from coderag.embeddings import generate_embeddings 7 | from coderag.config import WATCHED_DIR, IGNORE_PATHS 8 | 9 | def should_ignore_path(path): 10 | """Check if the given path should be ignored based on the IGNORE_PATHS list.""" 11 | for ignore_path in IGNORE_PATHS: 12 | if path.startswith(ignore_path): 13 | return True 14 | return False 15 | 16 | class CodeChangeHandler(FileSystemEventHandler): 17 | def on_modified(self, event): 18 | if event.is_directory or should_ignore_path(event.src_path): 19 | return 20 | 21 | if event.src_path.endswith(".py"): 22 | print(f"Detected change in file: {event.src_path}") 23 | with open(event.src_path, 'r', encoding='utf-8') as f: 24 | full_content = f.read() 25 | embeddings = generate_embeddings(full_content) 26 | if embeddings is not None and len(embeddings) > 0: 27 | filename = os.path.basename(event.src_path) 28 | add_to_index(embeddings, full_content, filename, event.src_path) 29 | save_index() 30 | print(f"Updated FAISS index for file: {event.src_path}") 31 | 32 | def start_monitoring(): 33 | event_handler = CodeChangeHandler() 34 | observer = Observer() 35 | observer.schedule(event_handler, path=WATCHED_DIR, recursive=True) 36 | observer.start() 37 | print(f"Started monitoring {WATCHED_DIR}...") 38 | 39 | try: 40 | while True: 41 | time.sleep(1) 42 | except KeyboardInterrupt: 43 | observer.stop() 44 | observer.join() 45 | -------------------------------------------------------------------------------- /coderag/search.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from coderag.index import load_index, get_metadata 3 | from coderag.embeddings import generate_embeddings 4 | 5 | def search_code(query, k=5): 6 | """Search the FAISS index using a text query.""" 7 | index = load_index() # Load the FAISS index 8 | query_embedding = generate_embeddings(query) # Generate embedding for the query 9 | 10 | if query_embedding is None: 11 | print("Failed to generate query embedding.") 12 | return [] 13 | 14 | # Perform the search in FAISS 15 | distances, indices = index.search(query_embedding, k) 16 | 17 | results = [] 18 | for i, idx in enumerate(indices[0]): # Iterate over the search results 19 | if idx < len(get_metadata()): # Ensure the index is within bounds 20 | file_data = get_metadata()[idx] 21 | results.append({ 22 | "filename": file_data["filename"], 23 | "filepath": file_data["filepath"], 24 | "content": file_data["content"], 25 | "distance": distances[0][i] # Access distance using the correct index 26 | }) 27 | else: 28 | print(f"Warning: Index {idx} is out of bounds for metadata with length {len(get_metadata())}") 29 | return results 30 | -------------------------------------------------------------------------------- /example.env: -------------------------------------------------------------------------------- 1 | # OpenAI API Configuration 2 | OPENAI_API_KEY=sk-1234567890abcdefghijklmnopqrstuvwxyz1234 3 | OPENAI_EMBEDDING_MODEL=text-embedding-ada-002 4 | OPENAI_CHAT_MODEL=gpt-4 5 | 6 | # Project Directory Configuration 7 | WATCHED_DIR=/home/user/projects/my_codebase 8 | 9 | # FAISS Configuration 10 | FAISS_INDEX_FILE=/home/user/projects/coderag/faiss_index.bin 11 | EMBEDDING_DIM=1536 -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import os 2 | import logging 3 | import atexit 4 | import warnings 5 | from coderag.index import clear_index, add_to_index, save_index 6 | from coderag.embeddings import generate_embeddings 7 | from coderag.config import WATCHED_DIR 8 | from coderag.monitor import start_monitoring, should_ignore_path 9 | 10 | # Configure logging 11 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') 12 | 13 | # Suppress transformers warnings 14 | warnings.filterwarnings("ignore", category=FutureWarning, module="transformers.tokenization_utils_base") 15 | 16 | def full_reindex(): 17 | """Perform a full reindex of the entire codebase.""" 18 | logging.info("Starting full reindexing of the codebase...") 19 | files_processed = 0 20 | for root, _, files in os.walk(WATCHED_DIR): 21 | if should_ignore_path(root): # Check if the directory should be ignored 22 | logging.info(f"Ignoring directory: {root}") 23 | continue 24 | 25 | for file in files: 26 | filepath = os.path.join(root, file) 27 | if should_ignore_path(filepath): # Check if the file should be ignored 28 | logging.info(f"Ignoring file: {filepath}") 29 | continue 30 | 31 | if file.endswith(".py"): 32 | logging.info(f"Processing file: {filepath}") 33 | try: 34 | with open(filepath, 'r', encoding='utf-8') as f: 35 | full_content = f.read() 36 | 37 | embeddings = generate_embeddings(full_content) # Generate embeddings 38 | if embeddings is not None: 39 | add_to_index(embeddings, full_content, file, filepath) 40 | else: 41 | logging.warning(f"Failed to generate embeddings for {filepath}") 42 | files_processed += 1 43 | except Exception as e: 44 | logging.error(f"Error processing file {filepath}: {e}") 45 | 46 | save_index() 47 | logging.info(f"Full reindexing completed. {files_processed} files processed.") 48 | 49 | def main(): 50 | # Completely clear the FAISS index and metadata 51 | clear_index() 52 | 53 | # Perform a full reindex of the codebase 54 | full_reindex() 55 | 56 | # Start monitoring the directory for changes 57 | start_monitoring() 58 | 59 | if __name__ == "__main__": 60 | main() -------------------------------------------------------------------------------- /prompt_flow.py: -------------------------------------------------------------------------------- 1 | from openai import OpenAI 2 | from coderag.config import OPENAI_API_KEY, OPENAI_CHAT_MODEL 3 | from coderag.search import search_code 4 | 5 | client = OpenAI(api_key=OPENAI_API_KEY) 6 | 7 | SYSTEM_PROMPT = """ 8 | You are an expert coding assistant. Your task is to help users with their question. Use the retrieved code context to inform your responses, but feel free to suggest better solutions if appropriate. 9 | """ 10 | 11 | PRE_PROMPT = """ 12 | Based on the user's query and the following code context, provide a helpful response. If improvements can be made, suggest them with explanations. 13 | 14 | User Query: {query} 15 | 16 | Retrieved Code Context: 17 | {code_context} 18 | 19 | Your response: 20 | """ 21 | 22 | def execute_rag_flow(user_query): 23 | try: 24 | # Perform code search 25 | search_results = search_code(user_query) 26 | 27 | if not search_results: 28 | return "No relevant code found for your query." 29 | 30 | # Prepare code context 31 | code_context = "\n\n".join([ 32 | f"File: {result['filename']}\n{result['content']}" 33 | for result in search_results[:3] # Limit to top 3 results 34 | ]) 35 | 36 | # Construct the full prompt 37 | full_prompt = PRE_PROMPT.format(query=user_query, code_context=code_context) 38 | 39 | # Generate response using OpenAI 40 | response = client.chat.completions.create( 41 | model=OPENAI_CHAT_MODEL, 42 | messages=[ 43 | {"role": "system", "content": SYSTEM_PROMPT}, 44 | {"role": "user", "content": full_prompt} 45 | ], 46 | temperature=0.3, 47 | max_tokens=4000 48 | ) 49 | 50 | return response.choices[0].message.content.strip() 51 | 52 | except Exception as e: 53 | return f"Error in RAG flow execution: {e}" -------------------------------------------------------------------------------- /readme.rst: -------------------------------------------------------------------------------- 1 | Important Note 2 | ================== 3 | 4 | This POC was nice for it's time. However tools like Cursor and Windsurf are now applying this principle embedded in the IDE. 5 | 6 | Project Motivation 7 | ================== 8 | 9 | This project came from a simple idea: what if you could provide an entire codebase to an LLM instead of just small pieces? 10 | Most coding assistants, like co-pilots, work on a limited scope, but I wanted something that could handle the full context of a project. 11 | 12 | By integrating the full codebase with Retrieval-Augmented Generation (RAG), this POC aims to improve the quality and relevance of 13 | code suggestions. The goal is to see how having the complete code available for real-time querying can enhance productivity. 14 | 15 | CodeRAG 16 | ======= 17 | CodeRAG is an AI-powered code retrieval and augmentation tool that leverages OpenAI's models (such as ``gpt-4`` or ``gpt-3.5-turbo``) for real-time codebase querying, indexing, and improvement. This project integrates a Retrieval-Augmented Generation (RAG) system to help developers seamlessly search through code, receive suggestions, and implement improvements. 18 | 19 | Features 20 | -------- 21 | 22 | - **Real-time Codebase Indexing**: Automatically indexes code files upon changes, with real-time updates. 23 | - **Vector Database Search**: Utilizes FAISS or a similar vector database for fast, efficient code search using embeddings. 24 | - **Conversational Coding Assistance**: Integrates OpenAI's GPT models to provide contextual code suggestions, improvements, and fixes. 25 | - **Configurable Settings**: Environment-specific settings are managed using a ``.env`` file for API keys, model selection, and directories. 26 | 27 | Tech Stack 28 | ---------- 29 | 30 | - **OpenAI API**: Leverages GPT-4o (or any other OpenAI model) for conversational and coding improvements. 31 | - **Python**: Core functionality and API interactions. 32 | - **FAISS (Facebook AI Similarity Search)**: For vector-based searching. 33 | - **python-dotenv**: For managing environment variables. 34 | - **Retrieval-Augmented Generation (RAG)**: Combines search and generative models. 35 | 36 | Setup Instructions 37 | ------------------ 38 | 39 | Prerequisites 40 | ^^^^^^^^^^^^^ 41 | 42 | - **Python 3.8+** 43 | - **OpenAI API Key** (You can get one `here `_) 44 | - **FAISS** 45 | 46 | Step 1: Clone the Repository 47 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 48 | 49 | .. code-block:: bash 50 | 51 | git clone https://github.com/yourusername/CodeRAG.git 52 | cd CodeRAG 53 | 54 | Step 2: Install Dependencies 55 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 56 | 57 | Create a virtual environment (recommended): 58 | 59 | .. code-block:: bash 60 | 61 | python3 -m venv venv 62 | source venv/bin/activate # On Windows use `venv\Scripts\activate` 63 | 64 | Install required packages: 65 | 66 | .. code-block:: bash 67 | 68 | pip install -r requirements.txt 69 | 70 | Step 3: Configure Environment Variables 71 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 72 | 73 | Create a ``.env`` file in the root of the project and add the following variables: 74 | 75 | .. code-block:: bash 76 | 77 | OPENAI_API_KEY=your_openai_api_key 78 | OPENAI_EMBEDDING_MODEL=text-embedding-ada-002 79 | OPENAI_CHAT_MODEL=gpt-4o 80 | WATCHED_DIR=path_to_your_code_directory 81 | FAISS_INDEX_FILE=path_to_faiss_index 82 | EMBEDDING_DIM=1536 # Modify if you're using a different embedding model 83 | 84 | Step 4: Run the Application 85 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ 86 | 87 | 1. **Start the Backend**: 88 | 89 | To start the backend (indexing, embeddings, and monitoring): 90 | 91 | .. code-block:: bash 92 | 93 | python main.py 94 | 95 | 2. **Start the Frontend**: 96 | 97 | To launch the Streamlit UI: 98 | 99 | .. code-block:: bash 100 | 101 | streamlit run app.py 102 | 103 | Usage 104 | ----- 105 | 106 | 1. **Ask a Question**: Type your question or code request into the interface. The model will search the indexed codebase and provide suggestions or improvements. 107 | 2. **Review Suggestions**: You'll receive a merged or fixed version of the code based on the model's analysis. 108 | 3. **Conversational History**: The system keeps track of your queries and the AI responses for better context in future interactions. 109 | 110 | Project Structure 111 | ----------------- 112 | 113 | - ``main.py``: The main script to run the application. 114 | - ``prompt_flow.py``: Handles querying OpenAI's API and manages the search and conversational history. 115 | - ``coderag/config.py``: Stores configuration and environment variables. 116 | - ``coderag/search.py``: Manages vector database (FAISS) searches for relevant code snippets. 117 | - ``.env``: Holds environment-specific settings (OpenAI API keys, model configuration, etc.). 118 | - ``requirements.txt``: Lists the Python dependencies needed to run the project. 119 | 120 | Contributing 121 | ------------ 122 | 123 | Feel free to fork this repository, open issues, and submit pull requests. 124 | 125 | 1. Fork the repository. 126 | 2. Create your feature branch (``git checkout -b feature/your-feature``). 127 | 3. Commit your changes (``git commit -am 'Add new feature'``). 128 | 4. Push to the branch (``git push origin feature/your-feature``). 129 | 5. Open a pull request. 130 | 131 | License 132 | ------- 133 | 134 | This project is licensed under the Apache License. See the LICENSE file for details. 135 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Neverdecel/CodeRAG/83977e32cba106fbd4abc31a9e2470961414cb50/requirements.txt -------------------------------------------------------------------------------- /scripts/initialize_index.py: -------------------------------------------------------------------------------- 1 | from coderag.index import save_index 2 | 3 | def initialize_index(): 4 | save_index() 5 | print("FAISS index initialized and saved.") 6 | 7 | if __name__ == "__main__": 8 | initialize_index() 9 | -------------------------------------------------------------------------------- /scripts/run_monitor.py: -------------------------------------------------------------------------------- 1 | from coderag.monitor import start_monitoring 2 | 3 | if __name__ == "__main__": 4 | start_monitoring() 5 | -------------------------------------------------------------------------------- /tests/test_faiss.py: -------------------------------------------------------------------------------- 1 | import faiss 2 | from coderag.index import load_index, retrieve_vectors, inspect_metadata, add_to_index, save_index, clear_index 3 | from coderag.embeddings import generate_embeddings 4 | import os 5 | 6 | def test_faiss_index(): 7 | # Clear the index before testing 8 | clear_index() 9 | 10 | # Example text to generate embeddings 11 | example_text = "This is a test document to be indexed." 12 | 13 | # Generate embeddings 14 | embeddings = generate_embeddings(example_text) 15 | if embeddings is None: 16 | print("Embedding generation failed.") 17 | return 18 | 19 | # Add to index 20 | add_to_index(embeddings, example_text, "test_file.py", "test_file.py") 21 | save_index() 22 | 23 | # Load the index 24 | index = load_index() 25 | 26 | # Check if index has vectors 27 | assert index.ntotal > 0, "FAISS index is empty. No vectors found!" 28 | print(f"FAISS index has {index.ntotal} vectors.") 29 | 30 | # Retrieve and inspect vectors 31 | vectors = retrieve_vectors(5) 32 | print(f"Retrieved {len(vectors)} vectors from the index.") 33 | 34 | # Inspect metadata 35 | inspect_metadata(5) 36 | 37 | if __name__ == "__main__": 38 | test_faiss_index() 39 | --------------------------------------------------------------------------------