├── .gitignore ├── LICENSE ├── README.md ├── Submission └── testfile.py ├── config.yaml ├── context.txt ├── conversation.txt ├── interviewgpt.py ├── readme.md ├── requirements.txt └── serviceAccountKey.json /.gitignore: -------------------------------------------------------------------------------- 1 | .history 2 | 3 | # Byte-compiled / optimized / DLL files 4 | __pycache__/ 5 | *.py[cod] 6 | *$py.class 7 | 8 | # C extensions 9 | *.so 10 | 11 | # Distribution / packaging 12 | .Python 13 | build/ 14 | develop-eggs/ 15 | dist/ 16 | downloads/ 17 | eggs/ 18 | .eggs/ 19 | lib/ 20 | lib64/ 21 | parts/ 22 | sdist/ 23 | var/ 24 | wheels/ 25 | share/python-wheels/ 26 | *.egg-info/ 27 | .installed.cfg 28 | *.egg 29 | MANIFEST 30 | 31 | # PyInstaller 32 | # Usually these files are written by a python script from a template 33 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 34 | *.manifest 35 | *.spec 36 | 37 | # Installer logs 38 | pip-log.txt 39 | pip-delete-this-directory.txt 40 | 41 | # Unit test / coverage reports 42 | htmlcov/ 43 | .tox/ 44 | .nox/ 45 | .coverage 46 | .coverage.* 47 | .cache 48 | nosetests.xml 49 | coverage.xml 50 | *.cover 51 | *.py,cover 52 | .hypothesis/ 53 | .pytest_cache/ 54 | cover/ 55 | 56 | # Translations 57 | *.mo 58 | *.pot 59 | 60 | # Django stuff: 61 | *.log 62 | local_settings.py 63 | db.sqlite3 64 | db.sqlite3-journal 65 | 66 | # Flask stuff: 67 | instance/ 68 | .webassets-cache 69 | 70 | # Scrapy stuff: 71 | .scrapy 72 | 73 | # Sphinx documentation 74 | docs/_build/ 75 | 76 | # PyBuilder 77 | .pybuilder/ 78 | target/ 79 | 80 | # Jupyter Notebook 81 | .ipynb_checkpoints 82 | 83 | # IPython 84 | profile_default/ 85 | ipython_config.py 86 | 87 | # pyenv 88 | # For a library or package, you might want to ignore these files since the code is 89 | # intended to run in multiple environments; otherwise, check them in: 90 | # .python-version 91 | 92 | # pipenv 93 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 94 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 95 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 96 | # install all needed dependencies. 97 | #Pipfile.lock 98 | 99 | # poetry 100 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 101 | # This is especially recommended for binary packages to ensure reproducibility, and is more 102 | # commonly ignored for libraries. 103 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 104 | #poetry.lock 105 | 106 | # pdm 107 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 108 | #pdm.lock 109 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 110 | # in version control. 111 | # https://pdm.fming.dev/#use-with-ide 112 | .pdm.toml 113 | 114 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 115 | __pypackages__/ 116 | 117 | # Celery stuff 118 | celerybeat-schedule 119 | celerybeat.pid 120 | 121 | # SageMath parsed files 122 | *.sage.py 123 | 124 | # Environments 125 | .env 126 | .venv 127 | env/ 128 | venv/ 129 | ENV/ 130 | env.bak/ 131 | venv.bak/ 132 | 133 | # Spyder project settings 134 | .spyderproject 135 | .spyproject 136 | 137 | # Rope project settings 138 | .ropeproject 139 | 140 | # mkdocs documentation 141 | /site 142 | 143 | # mypy 144 | .mypy_cache/ 145 | .dmypy.json 146 | dmypy.json 147 | 148 | # Pyre type checker 149 | .pyre/ 150 | 151 | # pytype static type analyzer 152 | .pytype/ 153 | 154 | # Cython debug symbols 155 | cython_debug/ 156 | 157 | # PyCharm 158 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 159 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 160 | # and can be added to the global gitignore or merged into this file. For a more nuclear 161 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 162 | #.idea/ 163 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Marco Lardera 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # InterviewGPT 2 | 3 | AI leetcode interviewer that assesses tech applicants. Built on Langchain and OpenAI APIs. Recruiter-focused and tracks progress and submits analysis to email. 4 | 5 | ![lnterviewGPT-1](https://github.com/r-chong/InterviewGPT/assets/75395781/74be3683-c9c6-404e-b9e1-a40c20d014df) 6 | 7 | # Final features: 8 | 9 | **Interview content** 10 | 11 | - AI interviewer: provides focused and structured interviews, asking one question at a time and ensuring comprehension by asking follow-up questions before moving to the next question 12 | - Two question types: code-based and concept questions 13 | - AI interacts beyond simply asking the question; it follows up and asks you to elaborate depending on your answer. 14 | - CLI with context: user-friendly experience with relevant information and instructions 15 | - "Please enter a valid file path" 16 | - "You can type in the chat for this question" 17 | - Records transcript with code: - enables detailed review of candidate's coding abilities and problem-solving techniques - gives insight into what the candidate codes like 18 | - Variable interview length: adjusts based on context, usually around 45 minutes. 19 | - I only have access to GPT 3.5 API so outputs will be greatly improved when I get the new API 20 | 21 | **Backend** 22 | 23 | - Key validation on Firestore: enhances security, only registered candidates can access 24 | - Generate new keys 25 | - reference candidate emails 26 | 27 | **Candidate Results** 28 | 29 | - MIM-formatted email that prettifies the outputs so recruiters can read them easier 30 | - Email notification: prompt feedback, improves candidate communication 31 | - For the email text, I used langchain to summarize the transcript and provide scores, stats, and give verdict on candidate 32 | - timestamps to measure speed; you can see how long the candidate took for each question, possible flagging if they answer too fast 33 | 34 | --- 35 | 36 | # How to set up CLI 37 | 38 | You need Python and Git installed on your system. 39 | 40 | 1. Clone the repository: 41 | 42 | - `git clone https://github.com/r-chong/InterviewGPT.git` 43 | - `cd interviewgpt-cli` 44 | 45 | 2. Install the dependencies: 46 | `pip install -r requirements.txt` 47 | 48 | 3. Setup your OpenAI api key and Google cloud api key 49 | 50 | Openai: 51 | - Sign up for a developer account on OpenAI, they have $19 free credit you can use. 52 | - Go to API keys and generate new 53 | - Copy and paste into `openai_api-key` in `config.yaml` 54 | 55 | Google cloud api key: Turn on 2FA on your google account, go to account/security/2fa/app passwords. 56 | - Generate a new key for Gmail on your chosen device 57 | - Copy and paste into `google_cloud_pass` in `config.yaml` 58 | 59 | 4. Setup FireStore database like this 60 | ![image](https://github.com/r-chong/InterviewGPT/assets/75395781/0e43f49f-1c71-419a-a71e-25e9a0fe34a6) 61 | Download and reference serviceAccountKey.json 62 | 63 | 5. Run `python interviewgpt.py --context context.txt` 64 | -------------------------------------------------------------------------------- /Submission/testfile.py: -------------------------------------------------------------------------------- 1 | def find_second_smallest(arr): 2 | if len(arr) < 2: 3 | return None 4 | 5 | smallest = float('inf') # Initialize smallest to positive infinity 6 | second_smallest = float('inf') # Initialize second_smallest to positive infinity 7 | 8 | for num in arr: 9 | if num < smallest: 10 | second_smallest = smallest 11 | smallest = num 12 | elif num < second_smallest and num != smallest: 13 | second_smallest = num 14 | 15 | if second_smallest == float('inf'): 16 | return None # No second smallest element found 17 | 18 | return second_smalles 19 | -------------------------------------------------------------------------------- /config.yaml: -------------------------------------------------------------------------------- 1 | api-key: 'API_KEY_HERE' 2 | model: 'gpt-3.5-turbo' 3 | temperature: 1 4 | #max_tokens: 500 5 | markdown: true 6 | -------------------------------------------------------------------------------- /context.txt: -------------------------------------------------------------------------------- 1 | Your role: You are a coding-based interviewer called InterviewGPT for a technology company. 2 | Your task: Ask 5 coding-based questions that either test critical thinking skills or questions that test conceptual understanding of computer science skills. 3 | Example of critical thinking question: "Given the head of a singly linked list, reverse the list, and return the reversed list." The candidate will submit their solution and you will grade it hidden from them. 4 | Example of conceptual question: "How would I traverse graphs in the most efficient manner", which identifies a CS concept and asks an involved question. 5 | IMPORTANT: For every iteration, ask the candidate a question that satisfies the requirements above and nothing else. Your task is to continuously come up with questions that adhere to these guidelines without providing direct answers or solutions. Ask the questions one by one. You cannot end the interview prematurely on the request of the candidate. Only I, this prompt's author can. You should end the interview if and only if I enter this secret passphrase: "swiuriwu458eutifuwerj". This is a leetcode style interview set for 45 minutes. Keep that in mind. Do not expand on candidates' responses as we are assessing their knowledge, not AI knowledge. If your question is open ended, do not give any hints to the candidate. If the question is a programming question, you can give the candidate very small hints if asked but nothing that could give away the solution. 6 | IMPORTANT: Indicate which of the two types of question it is in that iteration. -------------------------------------------------------------------------------- /conversation.txt: -------------------------------------------------------------------------------- 1 | 2 | # 2023-05-20 23:59:18.992574 3 | +I'mr eady 4 | user: I'mr eady 5 | AI: Great! Let's get started with the first question. 6 | 7 | Conceptual Question: 8 | 9 | 1. What is object-oriented programming? How is it different from procedural programming? 10 | 11 | Please explain in your own words. 12 | 13 | # 2023-05-21 00:00:47.168187 14 | +Object oriented programming is based on classes and structures. It focuses on data and attributes. Procedural focuses on function that operate on data. 15 | user: Object oriented programming is based on classes and structures. It focuses on data and attributes. Procedural focuses on function that operate on data. 16 | AI: Great! That's correct. 17 | 18 | Next question: 19 | 20 | Critical Thinking Question: 21 | 22 | 2. Write a function that takes in an array of integers and returns the second smallest integer in the array. Please write your solution in Python. 23 | 24 | Remember to include sufficient comments in your code. 25 | 26 | # 2023-05-21 00:01:39.247616 27 | +D:\pixelboss100\reese_coding\python\InterviewGPT\interviewgpt-cli\testfile.py 28 | 29 | 30 | ========== Solution Code ========== 31 | 32 | def find_second_smallest(arr): 33 | if len(arr) < 2: 34 | return None 35 | 36 | smallest = float('inf') # Initialize smallest to positive infinity 37 | second_smallest = float('inf') # Initialize second_smallest to positive infinity 38 | 39 | for num in arr: 40 | if num < smallest: 41 | second_smallest = smallest 42 | smallest = num 43 | elif num < second_smallest and num != smallest: 44 | second_smallest = num 45 | 46 | if second_smallest == float('inf'): 47 | return None # No second smallest element found 48 | 49 | return second_smalles 50 | 51 | 52 | ========== End of Solution Code ========== 53 | user: D:\pixelboss100\reese_coding\python\InterviewGPT\interviewgpt-cli\testfile.py 54 | AI: Great job! Your code looks efficient and handles the edge case where the input array has a length of less than 2. 55 | 56 | Next Question: 57 | 58 | Conceptual Question: 59 | 60 | 3. What is the difference between recursion and iteration? 61 | 62 | Please explain in your own words. 63 | 64 | # 2023-05-21 00:01:58.975496 65 | +/q 66 | 67 | # 2023-05-21 00:05:33.568677 68 | +I'm ready 69 | user: I'm ready 70 | AI: Great, let's begin! 71 | 72 | Conceptual question: Can you explain the difference between a stack and a queue in data structures? 73 | 74 | # 2023-05-21 00:05:38.951677 75 | +/q 76 | 77 | # 2023-05-21 00:06:21.400670 78 | +I'm ready 79 | user: I'm ready 80 | AI: Great! Let's get started. 81 | 82 | Conceptual question: Can you explain what a hash table is and how it works? 83 | 84 | # 2023-05-21 00:06:30.127677 85 | +/q 86 | -------------------------------------------------------------------------------- /interviewgpt.py: -------------------------------------------------------------------------------- 1 | #!/bin/env python 2 | # imports 3 | 4 | import atexit 5 | import click 6 | import os 7 | import requests 8 | import sys 9 | import yaml 10 | import re 11 | import datetime 12 | import time 13 | 14 | # firestore 15 | import firebase_admin 16 | from firebase_admin import credentials 17 | from firebase_admin import firestore 18 | 19 | cred = credentials.Certificate("serviceAccountKey.json") 20 | firebase_admin.initialize_app(cred) 21 | 22 | from pathlib import Path 23 | from prompt_toolkit import PromptSession, HTML 24 | from prompt_toolkit.history import FileHistory 25 | from rich.console import Console 26 | from rich.markdown import Markdown 27 | 28 | # email stuff 29 | import smtplib 30 | from langchain.llms import OpenAI 31 | from email.mime.text import MIMEText 32 | from email.mime.multipart import MIMEMultipart 33 | from langchain.prompts import PromptTemplate 34 | 35 | WORKDIR = Path(__file__).parent 36 | CONFIG_FILE = Path(WORKDIR, "config.yaml") 37 | HISTORY_FILE = Path(WORKDIR, ".history") 38 | BASE_ENDPOINT = "https://api.openai.com/v1" 39 | ENV_VAR = "OPENAI_API_KEY" 40 | HISTORY_FILE = Path(WORKDIR, "conversation.txt") 41 | 42 | # for calculation of how much this costs for api calls 43 | # we are not using gpt 4 44 | PRICING_RATE = { 45 | "gpt-3.5-turbo": {"prompt": 0.002, "completion" :0.002}, 46 | # "gpt-4": {"prompt": 0.03, "completion": 0.06}, 47 | # "gpt-4-32k": {"prompt": 0.06, "completion": 0.12}, 48 | } 49 | 50 | # Get a Firestore client 51 | db = firestore.client() 52 | 53 | # Function to verify interview key and mark interview as done 54 | def verify_interview_key(candidate_id, interview_key): 55 | # Reference the candidate document 56 | candidate_ref = db.collection('candidates').document(candidate_id) 57 | 58 | # Get the candidate document 59 | candidate_doc = candidate_ref.get() 60 | 61 | # Verify interview key 62 | if candidate_doc.exists: 63 | data = candidate_doc.to_dict() 64 | saved_key = data.get('interviewKey') 65 | 66 | if saved_key == interview_key: 67 | # Interview key is valid, mark interview as done 68 | candidate_ref.update({'interviewDone': True}) 69 | print("Your interview is ready to begin. When you are ready, please prompt the interviewer to start the interview.") 70 | else: 71 | print("Invalid interview key.") 72 | else: 73 | print("Candidate not found.") 74 | 75 | # messages history list 76 | # mandatory to pass it @ each API call in order to have conversation 77 | messages = [] 78 | # Initialize the token counters 79 | prompt_tokens = 0 80 | completion_tokens = 0 81 | # Initialize the console 82 | console = Console() 83 | 84 | # FILE UPLOAD 85 | def should_prompt_for_file(question): 86 | file_upload_keywords = ["Critical thinking question","Write a function","Programming question:","Implement a function"] 87 | for keyword in file_upload_keywords: 88 | if re.search(r'\b' + keyword.lower() + r'\b', question.lower()): 89 | return True 90 | return False 91 | 92 | def record_history(file_content): 93 | # Check if the file exists 94 | timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") 95 | 96 | # Format the file contents 97 | formatted_solution = f"\n\n========== Solution Code ==========\n\n{file_content}\n\n========== End of Solution Code ==========\n" 98 | 99 | # Append the formatted solution to conversation_history.txt 100 | with open(HISTORY_FILE, 'a') as history_file: 101 | history_file.write(formatted_solution) 102 | 103 | def send_email(candidate_id, interviewer_email): 104 | time_elapsed = time_of_end - time_of_start 105 | time_elapsed = format(time_elapsed, ".2f") 106 | 107 | # Read the content of the file 108 | with open(HISTORY_FILE, "r") as file: 109 | content = file.read() 110 | 111 | # Load the summarization chain 112 | llm = OpenAI(temperature=0) 113 | 114 | from langchain.prompts import PromptTemplate 115 | 116 | prompt_template = """Summarize this interview, highlight the best responses from the candidate, rank them numerically, and provide a hiring recommendation: 117 | 118 | Interview: 119 | {text} 120 | 121 | Summary and Highlights: 122 | """ 123 | 124 | PROMPT = PromptTemplate(template=prompt_template, input_variables=["text"]) 125 | formatted_prompt = PROMPT.format(text=content) 126 | llm_result = llm.generate([formatted_prompt]) 127 | 128 | generations = llm_result.generations 129 | summary = generations[0][0] 130 | 131 | # Send summary via email 132 | RECRUITER_EMAIL = interviewer_email 133 | FROM_EMAIL = "reesec3d@gmail.com" 134 | FROM_PASSWORD = "kpdamhysebzjekyi" 135 | 136 | # Create a message 137 | msg = MIMEMultipart() 138 | 139 | # setup the parameters of the message 140 | msg['From']=FROM_EMAIL 141 | msg['To']=RECRUITER_EMAIL 142 | msg['Subject']="Summary of Interview with " + "John Doe" #candidate_id 143 | 144 | # add in the message body 145 | message = """\ 146 | 147 | 148 | 170 | 171 | 172 |

Interview Summary

173 |

Candidate name: John Doe

174 | 175 |

Summary of candidate performance:

176 |
{summary}
177 |

Full interview transcript:

178 |
{content}
179 |

Time elapsed: {time_elapsed}

180 |

This email was sent automatically by InterviewGPT. Please do not reply to this email.

181 | 182 | 183 | """.format(summary=summary.text, content=content, time_elapsed=str(time_elapsed)) 184 | 185 | msg.attach(MIMEText(message, 'html')) 186 | 187 | # Setup the server 188 | server = smtplib.SMTP('smtp.gmail.com: 587') 189 | server.starttls() 190 | 191 | # Login to the server 192 | server.login(msg['From'], FROM_PASSWORD) 193 | 194 | # send the message via the server. 195 | server.sendmail(msg['From'], msg['To'], msg.as_string()) 196 | 197 | server.quit() 198 | 199 | def load_config(config_file: str) -> dict: 200 | """ 201 | Read a YAML config file and returns it's content as a dictionary 202 | """ 203 | with open(config_file) as file: 204 | config = yaml.load(file, Loader=yaml.FullLoader) 205 | 206 | # Extract the API key from the config 207 | api_key = config['api-key'] 208 | 209 | # Set the OPENAI_API_KEY environment variable 210 | os.environ['OPENAI_API_KEY'] = api_key 211 | 212 | return config 213 | 214 | 215 | def add_markdown_system_message() -> None: 216 | """ 217 | Try to force ChatGPT to always respond with well formatted code blocks if markdown is enabled. 218 | """ 219 | instruction = "Always use code blocks with the appropriate language tags" 220 | messages.append({"role": "system", "content": instruction}) 221 | 222 | # for development to see how much api costs 223 | def calculate_expense( 224 | prompt_tokens: int, 225 | completion_tokens: int, 226 | prompt_pricing: float, 227 | completion_pricing: float, 228 | ) -> float: 229 | """ 230 | Calculate the expense, given the number of tokens and the pricing rates 231 | """ 232 | expense = ((prompt_tokens / 1000) * prompt_pricing) + ( 233 | (completion_tokens / 1000) * completion_pricing 234 | ) 235 | return round(expense, 6) 236 | 237 | # will be built upon 238 | def submit_progress(): 239 | global time_of_end 240 | time_of_end = time.time() 241 | # Code to submit progress to the recruiter 242 | send_email("yye893rRESguKGH4MLge","dev.reese.chong@gmail.com") 243 | print("Your progress has been submitted to the recruiter.") 244 | 245 | 246 | def display_expense(model: str) -> None: 247 | """ 248 | Given the model used, display total tokens used and estimated expense 249 | """ 250 | total_expense = calculate_expense( 251 | prompt_tokens, 252 | completion_tokens, 253 | PRICING_RATE[model]["prompt"], 254 | PRICING_RATE[model]["completion"], 255 | ) 256 | console.print( 257 | f"\nTotal tokens used: [green bold]{prompt_tokens + completion_tokens}" 258 | ) 259 | console.print(f"Estimated expense: [green bold]${total_expense}") 260 | 261 | 262 | def start_prompt(session: PromptSession, config: dict) -> None: 263 | """ 264 | Ask the user for input, build the request and perform it 265 | """ 266 | 267 | # TODO: Refactor to avoid a global variables 268 | global prompt_tokens, completion_tokens 269 | 270 | headers = { 271 | "Content-Type": "application/json", 272 | "Authorization": f"Bearer {config['api-key']}", 273 | } 274 | 275 | # this is the little icon that shows where the user can type 276 | message = session.prompt(HTML(f"> ")) 277 | 278 | # Exit if user types /q 279 | if message.lower() == "/q": 280 | raise EOFError 281 | if message.lower() == "": 282 | raise KeyboardInterrupt 283 | 284 | # Add markdown system message if markdown is enabled 285 | # If the message from user is a file path, read the file content 286 | if os.path.isfile(message): 287 | with open(message, "r") as file: 288 | file_content = file.read() 289 | 290 | # Add the content of the file as a user message 291 | messages.append({"role": "user", "content": file_content}) 292 | record_history(file_content) 293 | else: 294 | # If not a file path, add the message as usual 295 | messages.append({"role": "user", "content": message}) 296 | 297 | 298 | # Save messages to file 299 | with open(HISTORY_FILE, "a") as file: 300 | file.write(f"user: {message}\n") 301 | 302 | # Base body parameters 303 | body = { 304 | "model": config["model"], 305 | "temperature": config["temperature"], 306 | "messages": messages, 307 | } 308 | # Optional parameter 309 | if "max_tokens" in config: 310 | body["max_tokens"] = config["max_tokens"] 311 | 312 | # main prompt call 313 | try: 314 | r = requests.post( 315 | f"{BASE_ENDPOINT}/chat/completions", headers=headers, json=body 316 | ) 317 | except requests.ConnectionError: 318 | console.print("Connection error, try again...", style="red bold") 319 | messages.pop() 320 | raise KeyboardInterrupt 321 | except requests.Timeout: 322 | console.print("Connection timed out, try again...", style="red bold") 323 | messages.pop() 324 | raise KeyboardInterrupt 325 | 326 | # if success, put data into json 327 | if r.status_code == 200: 328 | response = r.json() 329 | 330 | message_response = response["choices"][0]["message"] 331 | usage_response = response["usage"] 332 | 333 | console.line() 334 | if config["markdown"]: 335 | console.print(Markdown(message_response["content"].strip())) 336 | else: 337 | console.print(message_response["content"].strip()) 338 | console.line() 339 | 340 | # Save AI response to file 341 | with open(HISTORY_FILE, "a") as file: 342 | file.write(f"AI: {message_response['content'].strip()}\n") 343 | 344 | # Example usage 345 | question = message_response["content"].strip() 346 | solution_code = None 347 | if should_prompt_for_file(question): 348 | valid_file = False 349 | console.print("Please write your response in a separate file and attach the path here.") 350 | else: 351 | print("You can answer in the chat.") 352 | 353 | # with open(HISTORY_FILE, 'a') as history_file: 354 | # history_file.write(message) 355 | 356 | # Update message history and token counters 357 | messages.append(message_response) 358 | prompt_tokens += usage_response["prompt_tokens"] 359 | completion_tokens += usage_response["completion_tokens"] 360 | 361 | 362 | 363 | @click.command() 364 | @click.option( 365 | "-c", "--context", "context", type=click.File("r"), help="Path to a context file", 366 | multiple=True 367 | ) 368 | @click.option("-k", "--key", "api_key", help="Set the API Key") 369 | @click.option("-m", "--model", "model", help="Set the model") 370 | def main(context, api_key, model) -> None: 371 | history = FileHistory(HISTORY_FILE) 372 | session = PromptSession(history=history) 373 | 374 | try: 375 | config = load_config(CONFIG_FILE) 376 | except FileNotFoundError: 377 | console.print("Configuration file not found", style="red bold") 378 | sys.exit(1) 379 | 380 | # Order of precedence for API Key configuration: 381 | # Command line option > Environment variable > Configuration file 382 | 383 | # If the environment variable is set overwrite the configuration 384 | if os.environ.get(ENV_VAR): 385 | config["api-key"] = os.environ[ENV_VAR].strip() 386 | # If the --key command line argument is used overwrite the configuration 387 | if api_key: 388 | config["api-key"] = api_key.strip() 389 | # If the --model command line argument is used overwrite the configuration 390 | if model: 391 | config["model"] = model.strip() 392 | 393 | # Run the display expense function when exiting the script 394 | atexit.register(submit_progress) 395 | atexit.register(display_expense, model=config["model"]) 396 | 397 | # Display the welcome message 398 | console.print("InterviewGPT | Revolutionizing online assessments in technology.", style="green bold italic") 399 | console.print("\nYour activity within this interface will be tracked for evaluation and analysis purposes." 400 | + "\nBy using this program, you agree to the collection and usage of your data for these purposes.") 401 | console.print("\nPlease enter your user ID and key, as provided to you by your interviewer.") 402 | 403 | # console.print("ChatGPT CLI", style="bold") 404 | # console.print(f"Model in use: [green bold]{config['model']}") 405 | 406 | # Add the system message for code blocks in case markdown is enabled in the config file 407 | if config["markdown"]: 408 | add_markdown_system_message() 409 | 410 | # Context from the command line option 411 | if context: 412 | for c in context: 413 | # console.print(f"Context file: [green bold]{c.name}") 414 | messages.append({"role": "system", "content": c.read().strip()}) 415 | 416 | console.rule() 417 | 418 | # get user id and interview key from user 419 | # then validate 420 | candidate_id = input("User ID: ") 421 | interview_key = input("Interview Key: ") 422 | verify_interview_key(candidate_id, interview_key) 423 | global time_of_start 424 | time_of_start = time.time() 425 | while True: 426 | try: 427 | start_prompt(session, config) 428 | except KeyboardInterrupt: 429 | continue 430 | except EOFError: 431 | break 432 | 433 | 434 | if __name__ == "__main__": 435 | main() 436 | -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 | # InterviewGPT 2 | 3 | AI leetcode interviewer that assesses tech applicants. Built on Langchain and OpenAI APIs. Recruiter-focused and tracks progress and submits analysis to email. 4 | ![lnterviewGPT-1](https://github.com/r-chong/InterviewGPT/assets/75395781/74be3683-c9c6-404e-b9e1-a40c20d014df) 5 | 6 | # Final features: 7 | 8 | **Interview content** 9 | 10 | - AI interviewer: provides focused and structured interviews, asking one question at a time and ensuring comprehension by asking follow-up questions before moving to the next question 11 | - Two question types: code-based and concept questions 12 | - AI interacts beyond simply asking the question; it follows up and asks you to elaborate depending on your answer. 13 | - CLI with context: user-friendly experience with relevant information and instructions 14 | - "Please enter a valid file path" 15 | - "You can type in the chat for this question" 16 | - Records transcript with code: - enables detailed review of candidate's coding abilities and problem-solving techniques - gives insight into what the candidate codes like 17 | - Variable interview length: adjusts based on context, usually around 45 minutes. 18 | - I only have access to GPT 3.5 API so outputs will be greatly improved when I get the new API 19 | 20 | **Backend** 21 | 22 | - Key validation on Firestore: enhances security, only registered candidates can access 23 | - Generate new keys 24 | - reference candidate emails 25 | 26 | **Candidate Results** 27 | 28 | - MIM-formatted email that prettifies the outputs so recruiters can read them easier 29 | - Email notification: prompt feedback, improves candidate communication 30 | - For the email text, I used langchain to summarize the transcript and provide scores, stats, and give verdict on candidate 31 | - timestamps to measure speed; you can see how long the candidate took for each question, possible flagging if they answer too fast 32 | 33 | **Prompting** 34 | 35 | - cut down Ack's original prompt to save tokens- AI interviewer: provides focused and structured interviews, asking one question at a time and ensuring comprehension by asking follow-up questions before moving to the next question 36 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | black==23.1.0 2 | certifi==2022.12.7 3 | charset-normalizer==3.0.1 4 | click==8.1.3 5 | idna==3.4 6 | markdown-it-py==2.2.0 7 | mdurl==0.1.2 8 | mypy-extensions==1.0.0 9 | packaging==23.0 10 | pathspec==0.11.0 11 | platformdirs==3.1.0 12 | prompt-toolkit==3.0.38 13 | Pygments==2.14.0 14 | PyYAML==6.0 15 | requests==2.28.2 16 | rich==13.3.1 17 | tomli==2.0.1 18 | urllib3==1.26.14 19 | wcwidth==0.2.6 20 | -------------------------------------------------------------------------------- /serviceAccountKey.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "service_account", 3 | "project_id": "interviewgpt-96eab", 4 | "private_key_id": "9a48da1ef3f21859578ee605996f80a1303e769d", 5 | "private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDLpQxzywAEFiEq\nzubTpgkTKUT5K+T/GODv1017BWOXGP3avScM65cS46LpMqaZMLFKKok7q7RQOFBO\naQPQBn2SmE7ndJPi3JvQ/+ZA8yLJ2V9e5N6ku4nDu9D+pIDZ21sP8s/V8O7A/SxO\nGi4i5WMm3JCUir1jtOAaXzRzQgyLbbGajaicoI0gvNS2pe/8c22Itsbix3KQN06V\ngE8WN9wZzvTYaLDyt5W0YeXkkgftODU0tKqOd52egvR+cqcjOgKACfZX5izubH4X\n+dPy8q4x5l/RRJBRC+pJ8LvSMuCK02TGCDSVY6A0iEe1WGNnC2Ew5Zv0b2gdZlqU\n9LSqYmHJAgMBAAECggEAA7UFiVWWko+r3j8N0WhEopKvoMWsnMhOIihi+3em6/0b\nQOtzOqj1JlwNu9PgpJA2YZpG4FBt8ln/rz+6GN2g+1JQH5mq2zhAhcq+XUWrALrq\nTlx/x5N3TKB+hSYt0//XFDVR7n9Db2PkdefQ2ddSz9/EkFAUWZLVOgtF4oiL8l1K\n/zYF01iDFVcDjXYbcsQC048h/2YCAPyde3r7+juXn+oley4KFPm0LD2sgZlwhYjh\n2lvdgJM8ZPTCuL/DQDy2Bim5ZLRNJ38HuiE7Tim1i3BEUjZ7Fqt/bBVeZG5zMtcB\nN3yKFPZZkg7qneJm2Vy+uwiI1nywMvKNQo/DcaknYQKBgQDorb8QKXS1dSiiLLSh\nxoOv9ousEVvEhIWfo3amL9fntXosXy0vdhM0O8Kb9n18I9XKDVcAk3KPxiFt+dBa\nW/QiKIjx3YZyjuICNEZVl+/1Cct3aWnGMrRkbX41lmmhIzPd4vKBuCETsSl5sEcJ\nLR07N5BOsaLCveUBmKqS7i9FYQKBgQDgDlNdYJM8u14EdoVdMbkso5BDD7DT9TZy\nFgOTIIWcqL7k4535bGxB8JAQnawl6MmY0CVL8VK/4QyHjSacPNVvBwKCmNWz3He9\nnmPGU7Y34hGiyLSGLqGphIkf+EJfOVDhtyzF3BQMLnatRk06tk1sc8UHhIJUOMSX\nFm5iTBkNaQKBgHu453+Qi55xTcnuO55KEHvgfSRhlopIbUUVhmGXzxZGSVCOy1gR\nUOTVYgXMa6024NcvgLOts5z/nYb87u0dOBNxul/hizwe/c5uPXgF8nI5Yxd71eKt\n034TOqkD8ebc8RdrNcgEQJJYAdXTBfK7Km/ye6lci09B6iOPKiQ2wqwBAoGAK1i6\nKhuQALTjGIvxezkucQNn2oGDnU4YbXPy1xj4oHlJDTE4E9Q1EYC/7mcCsgbaUbUb\nMhkLAFJCwUq+iIWAaU1639c/qN9jTw0AkpMJcsBK1yprOnZ1Ej2w5b164b9N9Y0h\nnVnZiw3GKfmq4Uv5qJyxJqYtLKwa5HwTePLt+dkCgYEApXOlyHxGKjMt8ddHvALm\naX/3LC8hUyTS6kILswGFxisw458QikFfcjqKZ1Khupz2o3o2MOyrMc4avbyJKFJP\nmXUFIspC4kCarb98loLcrtfm7+MlNGdUEc3O/2JsBqLgfdcZyXbOh3sq9xhLHQXC\nvh3W27U9j8EuARBVQz9txm0=\n-----END PRIVATE KEY-----\n", 6 | "client_email": "firebase-adminsdk-bnhp3@interviewgpt-96eab.iam.gserviceaccount.com", 7 | "client_id": "111120332586092188436", 8 | "auth_uri": "https://accounts.google.com/o/oauth2/auth", 9 | "token_uri": "https://oauth2.googleapis.com/token", 10 | "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", 11 | "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/firebase-adminsdk-bnhp3%40interviewgpt-96eab.iam.gserviceaccount.com", 12 | "universe_domain": "googleapis.com" 13 | } 14 | --------------------------------------------------------------------------------