├── .cursor └── rules │ ├── frontend-rule.mdc │ ├── general-rule.mdc │ └── readme-rule.mdc ├── .gitattributes ├── .gitignore ├── Accessing_GPT_4_1_nano_Like_a_Developer.ipynb ├── FAQandCommonIssues.md ├── README.md ├── api ├── README.md ├── app.py ├── requirements.txt └── vercel.json ├── docs └── GIT_SETUP.md ├── frontend └── README.md ├── pyproject.toml └── vercel.json /.cursor/rules/frontend-rule.mdc: -------------------------------------------------------------------------------- 1 | --- 2 | description: When designing a frontend or frontend components. 3 | globs: 4 | alwaysApply: false 5 | --- 6 | ### Rules for Frontend 7 | 8 | - You must pay attention to visual clarity and contrast. Do not place white text on a white background. 9 | - You must ensure the UX is pleasant. Boxes should grow to fit their contents, etc. 10 | - When asking the user for sensitive information - you must use password style text-entry boxes in the UI. 11 | - You should use Next.js as it works best with Vercel. 12 | - This frontend will ultimately be deployed on Vercel, but it should be possible to test locally. 13 | - Always provide users with a way to run the created UI once you have created it. -------------------------------------------------------------------------------- /.cursor/rules/general-rule.mdc: -------------------------------------------------------------------------------- 1 | --- 2 | description: 3 | globs: 4 | alwaysApply: true 5 | --- 6 | ## Rules to Follow 7 | 8 | - You must always commit your changes whenever you update code. 9 | - You must always try and write code that is well documented. (self or commented is fine) 10 | - You must only work on a single feature at a time. 11 | - You must explain your decisions thouroughly to the user. -------------------------------------------------------------------------------- /.cursor/rules/readme-rule.mdc: -------------------------------------------------------------------------------- 1 | --- 2 | description: 3 | globs: *.md 4 | alwaysApply: false 5 | --- 6 | ### README.md Rules 7 | 8 | - When you create README.md's - they should be dope, and use fun and approachable language. 9 | - While being fun, they should remain technically accurate. -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | *.7z filter=lfs diff=lfs merge=lfs -text 2 | *.arrow filter=lfs diff=lfs merge=lfs -text 3 | *.bin filter=lfs diff=lfs merge=lfs -text 4 | *.bz2 filter=lfs diff=lfs merge=lfs -text 5 | *.ckpt filter=lfs diff=lfs merge=lfs -text 6 | *.ftz filter=lfs diff=lfs merge=lfs -text 7 | *.gz filter=lfs diff=lfs merge=lfs -text 8 | *.h5 filter=lfs diff=lfs merge=lfs -text 9 | *.joblib filter=lfs diff=lfs merge=lfs -text 10 | *.lfs.* filter=lfs diff=lfs merge=lfs -text 11 | *.mlmodel filter=lfs diff=lfs merge=lfs -text 12 | *.model filter=lfs diff=lfs merge=lfs -text 13 | *.msgpack filter=lfs diff=lfs merge=lfs -text 14 | *.npy filter=lfs diff=lfs merge=lfs -text 15 | *.npz filter=lfs diff=lfs merge=lfs -text 16 | *.onnx filter=lfs diff=lfs merge=lfs -text 17 | *.ot filter=lfs diff=lfs merge=lfs -text 18 | *.parquet filter=lfs diff=lfs merge=lfs -text 19 | *.pb filter=lfs diff=lfs merge=lfs -text 20 | *.pickle filter=lfs diff=lfs merge=lfs -text 21 | *.pkl filter=lfs diff=lfs merge=lfs -text 22 | *.pt filter=lfs diff=lfs merge=lfs -text 23 | *.pth filter=lfs diff=lfs merge=lfs -text 24 | *.rar filter=lfs diff=lfs merge=lfs -text 25 | *.safetensors filter=lfs diff=lfs merge=lfs -text 26 | saved_model/**/* filter=lfs diff=lfs merge=lfs -text 27 | *.tar.* filter=lfs diff=lfs merge=lfs -text 28 | *.tar filter=lfs diff=lfs merge=lfs -text 29 | *.tflite filter=lfs diff=lfs merge=lfs -text 30 | *.tgz filter=lfs diff=lfs merge=lfs -text 31 | *.wasm filter=lfs diff=lfs merge=lfs -text 32 | *.xz filter=lfs diff=lfs merge=lfs -text 33 | *.zip filter=lfs diff=lfs merge=lfs -text 34 | *.zst filter=lfs diff=lfs merge=lfs -text 35 | *tfevents* filter=lfs diff=lfs merge=lfs -text 36 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Project Specific 2 | uv.lock 3 | 4 | # Byte-compiled / optimized / DLL files 5 | __pycache__/ 6 | *.py[cod] 7 | *$py.class 8 | 9 | # C extensions 10 | *.so 11 | 12 | # Distribution / packaging 13 | .Python 14 | build/ 15 | develop-eggs/ 16 | dist/ 17 | downloads/ 18 | eggs/ 19 | .eggs/ 20 | lib/ 21 | lib64/ 22 | parts/ 23 | sdist/ 24 | var/ 25 | wheels/ 26 | share/python-wheels/ 27 | *.egg-info/ 28 | .installed.cfg 29 | *.egg 30 | MANIFEST 31 | .chainlit/ 32 | 33 | # PyInstaller 34 | # Usually these files are written by a python script from a template 35 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 36 | *.manifest 37 | *.spec 38 | 39 | # Installer logs 40 | pip-log.txt 41 | pip-delete-this-directory.txt 42 | 43 | # Unit test / coverage reports 44 | htmlcov/ 45 | .tox/ 46 | .nox/ 47 | .coverage 48 | .coverage.* 49 | .cache 50 | nosetests.xml 51 | coverage.xml 52 | *.cover 53 | *.py,cover 54 | .hypothesis/ 55 | .pytest_cache/ 56 | cover/ 57 | 58 | # Translations 59 | *.mo 60 | *.pot 61 | 62 | # Django stuff: 63 | *.log 64 | local_settings.py 65 | db.sqlite3 66 | db.sqlite3-journal 67 | 68 | # Flask stuff: 69 | instance/ 70 | .webassets-cache 71 | 72 | # Scrapy stuff: 73 | .scrapy 74 | 75 | # Sphinx documentation 76 | docs/_build/ 77 | 78 | # PyBuilder 79 | .pybuilder/ 80 | target/ 81 | 82 | # Jupyter Notebook 83 | .ipynb_checkpoints 84 | 85 | # IPython 86 | profile_default/ 87 | ipython_config.py 88 | 89 | # pyenv 90 | # For a library or package, you might want to ignore these files since the code is 91 | # intended to run in multiple environments; otherwise, check them in: 92 | # .python-version 93 | 94 | # pipenv 95 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 96 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 97 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 98 | # install all needed dependencies. 99 | #Pipfile.lock 100 | 101 | # poetry 102 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 103 | # This is especially recommended for binary packages to ensure reproducibility, and is more 104 | # commonly ignored for libraries. 105 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 106 | #poetry.lock 107 | 108 | # pdm 109 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 110 | #pdm.lock 111 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 112 | # in version control. 113 | # https://pdm.fming.dev/#use-with-ide 114 | .pdm.toml 115 | 116 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 117 | __pypackages__/ 118 | 119 | # Celery stuff 120 | celerybeat-schedule 121 | celerybeat.pid 122 | 123 | # SageMath parsed files 124 | *.sage.py 125 | 126 | # Environments 127 | .env 128 | .venv 129 | env/ 130 | venv/ 131 | ENV/ 132 | env.bak/ 133 | venv.bak/ 134 | 135 | # Spyder project settings 136 | .spyderproject 137 | .spyproject 138 | 139 | # Rope project settings 140 | .ropeproject 141 | 142 | # mkdocs documentation 143 | /site 144 | 145 | # mypy 146 | .mypy_cache/ 147 | .dmypy.json 148 | dmypy.json 149 | 150 | # Pyre type checker 151 | .pyre/ 152 | 153 | # pytype static type analyzer 154 | .pytype/ 155 | 156 | # Cython debug symbols 157 | cython_debug/ 158 | 159 | # PyCharm 160 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 161 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 162 | # and can be added to the global gitignore or merged into this file. For a more nuclear 163 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 164 | #.idea/ 165 | .vercel 166 | -------------------------------------------------------------------------------- /Accessing_GPT_4_1_nano_Like_a_Developer.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "id": "kQt-gyAYUbm3" 7 | }, 8 | "source": [ 9 | "### Using the OpenAI Library to Programmatically Access GPT-4.1-nano!" 10 | ] 11 | }, 12 | { 13 | "cell_type": "markdown", 14 | "metadata": { 15 | "id": "PInACkIWUhOd" 16 | }, 17 | "source": [ 18 | "In order to get started, we'll need to provide our OpenAI API Key - detailed instructions can be found [here](https://github.com/AI-Maker-Space/Interactive-Dev-Environment-for-LLM-Development#-setting-up-keys-and-tokens)!" 19 | ] 20 | }, 21 | { 22 | "cell_type": "code", 23 | "execution_count": 1, 24 | "metadata": { 25 | "colab": { 26 | "base_uri": "https://localhost:8080/" 27 | }, 28 | "id": "ecnJouXnUgKv", 29 | "outputId": "c6c25850-395d-4cbf-9d26-bfe9253d1711" 30 | }, 31 | "outputs": [], 32 | "source": [ 33 | "import os\n", 34 | "import openai\n", 35 | "import getpass\n", 36 | "\n", 37 | "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"Please enter your OpenAI API Key: \")\n", 38 | "openai.api_key = os.environ[\"OPENAI_API_KEY\"]" 39 | ] 40 | }, 41 | { 42 | "cell_type": "markdown", 43 | "metadata": { 44 | "id": "T1pOrbwSU5H_" 45 | }, 46 | "source": [ 47 | "### Our First Prompt\n", 48 | "\n", 49 | "You can reference OpenAI's [documentation](https://platform.openai.com/docs/api-reference/chat) if you get stuck!\n", 50 | "\n", 51 | "Let's create a `ChatCompletion` model to kick things off!\n", 52 | "\n", 53 | "There are three \"roles\" available to use:\n", 54 | "\n", 55 | "- `developer`\n", 56 | "- `assistant`\n", 57 | "- `user`\n", 58 | "\n", 59 | "OpenAI provides some context for these roles [here](https://platform.openai.com/docs/api-reference/chat/create#chat-create-messages)\n", 60 | "\n", 61 | "Let's just stick to the `user` role for now and send our first message to the endpoint!\n", 62 | "\n", 63 | "If we check the documentation, we'll see that it expects it in a list of prompt objects - so we'll be sure to do that!" 64 | ] 65 | }, 66 | { 67 | "cell_type": "code", 68 | "execution_count": 2, 69 | "metadata": { 70 | "id": "iy_LEPNEMVvC" 71 | }, 72 | "outputs": [], 73 | "source": [ 74 | "from openai import OpenAI\n", 75 | "\n", 76 | "client = OpenAI()" 77 | ] 78 | }, 79 | { 80 | "cell_type": "code", 81 | "execution_count": 3, 82 | "metadata": { 83 | "colab": { 84 | "base_uri": "https://localhost:8080/" 85 | }, 86 | "id": "ofMwuUQOU4sf", 87 | "outputId": "7db141d5-7f7a-4f82-c9ff-6eeafe65cfa6" 88 | }, 89 | "outputs": [ 90 | { 91 | "data": { 92 | "text/plain": [ 93 | "ChatCompletion(id='chatcmpl-BUc2UgMuVcdtDkvmU1KdbRuq7z4bd', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content='Great question! LangChain and LlamaIndex (formerly known as GPT Index) are both popular frameworks designed to facilitate building applications with large language models (LLMs), but they serve different primary purposes and have distinct features.\\n\\n**LangChain:**\\n\\n- **Purpose:** A comprehensive framework for developing LLM-powered applications, especially those involving chaining multiple prompts, tools, and components.\\n- **Core Features:**\\n - Supports building complex prompt workflows, including chaining, few-shot prompting, and memory.\\n - Facilitates integration with various LLM providers (OpenAI, Hugging Face, etc.).\\n - Enables the use of external tools and APIs within LLM applications.\\n - Designed for creating chatbots, question-answering systems, and more complex AI workflows.\\n- **Use Cases:** Conversational agents, personalized assistants, multimodal workflows, and applications requiring advanced prompt engineering.\\n\\n---\\n\\n**LlamaIndex (GPT Index):**\\n\\n- **Purpose:** A toolkit focused on indexing, retrieving, and querying large document datasets efficiently using LLMs.\\n- **Core Features:**\\n - Builds indices over document collections (e.g., PDFs, text files, web pages).\\n - Supports retrieval-augmented generation (RAG), where relevant documents are fetched to inform LLM responses.\\n - Provides easy-to-use components for document ingestion, indexing, and querying.\\n - Optimized for building applications that require knowledge base retrieval and question-answering over large external datasets.\\n- **Use Cases:** Document search, knowledge base creation, retrieval-augmented question answering, and information retrieval tasks.\\n\\n---\\n\\n### **Summary of the Main Difference:**\\n\\n| Aspect | LangChain | LlamaIndex (GPT Index) |\\n|---|---|---|\\n| **Primary Focus** | Building complex LLM workflows and applications | Efficient retrieval and querying of large document collections with LLMs |\\n| **Use Cases** | Chatbots, conversational AI, multi-step workflows | Knowledge bases, document QA, retrieval-augmented generation |\\n| **Features** | Prompt chaining, tool integration, memory management | Document indexing, fast retrieval, external knowledge integration |\\n\\n---\\n\\n**In essence:** \\n- Use **LangChain** if you want to build sophisticated, multi-step applications with LLMs that may involve tools, memory, and layered prompts. \\n- Use **LlamaIndex** if your goal is to index large amounts of documents and enable efficient retrieval and question-answering over them, often in a retrieval-augmented setup.\\n\\nBoth can be complementary; some projects utilize both frameworks together for different parts of their architecture.', refusal=None, role='assistant', annotations=[], audio=None, function_call=None, tool_calls=None))], created=1746635762, model='gpt-4.1-nano-2025-04-14', object='chat.completion', service_tier='default', system_fingerprint='fp_eede8f0d45', usage=CompletionUsage(completion_tokens=522, prompt_tokens=19, total_tokens=541, completion_tokens_details=CompletionTokensDetails(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0)))" 94 | ] 95 | }, 96 | "execution_count": 3, 97 | "metadata": {}, 98 | "output_type": "execute_result" 99 | } 100 | ], 101 | "source": [ 102 | "YOUR_PROMPT = \"What is the difference between LangChain and LlamaIndex?\"\n", 103 | "\n", 104 | "client.chat.completions.create(\n", 105 | " model=\"gpt-4.1-nano\",\n", 106 | " messages=[{\"role\" : \"user\", \"content\" : YOUR_PROMPT}]\n", 107 | ")" 108 | ] 109 | }, 110 | { 111 | "cell_type": "markdown", 112 | "metadata": { 113 | "id": "IX-7MnFhVNoT" 114 | }, 115 | "source": [ 116 | "As you can see, the prompt comes back with a tonne of information that we can use when we're building our applications!\n", 117 | "\n", 118 | "We'll be building some helper functions to pretty-print the returned prompts and to wrap our messages to avoid a few extra characters of code!" 119 | ] 120 | }, 121 | { 122 | "cell_type": "markdown", 123 | "metadata": { 124 | "id": "IB76LJrDVgbc" 125 | }, 126 | "source": [ 127 | "##### Helper Functions" 128 | ] 129 | }, 130 | { 131 | "cell_type": "code", 132 | "execution_count": 4, 133 | "metadata": { 134 | "id": "-vmtUV7WVOLW" 135 | }, 136 | "outputs": [], 137 | "source": [ 138 | "from IPython.display import display, Markdown\n", 139 | "\n", 140 | "def get_response(client: OpenAI, messages: str, model: str = \"gpt-4.1-nano\") -> str:\n", 141 | " return client.chat.completions.create(\n", 142 | " model=model,\n", 143 | " messages=messages\n", 144 | " )\n", 145 | "\n", 146 | "def system_prompt(message: str) -> dict:\n", 147 | " return {\"role\": \"developer\", \"content\": message}\n", 148 | "\n", 149 | "def assistant_prompt(message: str) -> dict:\n", 150 | " return {\"role\": \"assistant\", \"content\": message}\n", 151 | "\n", 152 | "def user_prompt(message: str) -> dict:\n", 153 | " return {\"role\": \"user\", \"content\": message}\n", 154 | "\n", 155 | "def pretty_print(message: str) -> str:\n", 156 | " display(Markdown(message.choices[0].message.content))" 157 | ] 158 | }, 159 | { 160 | "cell_type": "markdown", 161 | "metadata": { 162 | "id": "osXgB_5nVky_" 163 | }, 164 | "source": [ 165 | "### Testing Helper Functions\n", 166 | "\n", 167 | "Now we can leverage OpenAI's endpoints with a bit less boiler plate - let's rewrite our original prompt with these helper functions!\n", 168 | "\n", 169 | "Because the OpenAI endpoint expects to get a list of messages - we'll need to make sure we wrap our inputs in a list for them to function properly!" 170 | ] 171 | }, 172 | { 173 | "cell_type": "code", 174 | "execution_count": 5, 175 | "metadata": { 176 | "colab": { 177 | "base_uri": "https://localhost:8080/", 178 | "height": 237 179 | }, 180 | "id": "4yRwAWvgWFNq", 181 | "outputId": "777e7dcb-43e3-491a-d94a-f543e19b61e6" 182 | }, 183 | "outputs": [ 184 | { 185 | "data": { 186 | "text/markdown": [ 187 | "LangChain and LlamaIndex (formerly known as GPT INDEX) are both prominent frameworks designed to facilitate the development of AI applications that leverage large language models (LLMs), but they serve different purposes and have distinct features. Here's a comparison to clarify their differences:\n", 188 | "\n", 189 | "**1. Purpose and Focus:**\n", 190 | "\n", 191 | "- **LangChain:**\n", 192 | " - Focuses on building **conversational AI applications**, including chatbots, question-answering systems, and complex multi-step workflows.\n", 193 | " - Provides tools for managing prompts, chaining together multiple language model calls, and integrating with external APIs or tools.\n", 194 | " - Emphasizes **agent frameworks** where LLMs can interact dynamically with tools, data sources, or APIs.\n", 195 | "\n", 196 | "- **LlamaIndex (GPT Index):**\n", 197 | " - Focuses primarily on creating **indexing and retrieval systems** over large collections of data (e.g., documents, PDFs, knowledge bases).\n", 198 | " - Designed to help users **build semantic search** and question-answering applications over their own data, using LLMs as a reasoning engine.\n", 199 | " - Acts as a data index layer that preprocesses and structures data for efficient querying with LLMs.\n", 200 | "\n", 201 | "**2. Core Functionality:**\n", 202 | "\n", 203 | "- **LangChain:**\n", 204 | " - Provides a flexible framework for constructing language model applications with features like prompt templates, chains, agents, and memory.\n", 205 | " - Supports integration with multiple LLM providers (OpenAI, Hugging Face, etc.).\n", 206 | " - Facilitates complex workflows involving conditional logic, iterations, or external calls.\n", 207 | "\n", 208 | "- **LlamaIndex:**\n", 209 | " - Offers tools to ingest, transform, and index large datasets.\n", 210 | " - Provides retrieval-augmented generation (RAG) capabilities, enabling LLMs to answer questions based on indexed data.\n", 211 | " - Includes data connectors, index types (vector, tree-based, etc.), and querying mechanisms.\n", 212 | "\n", 213 | "**3. Use Cases:**\n", 214 | "\n", 215 | "- **LangChain:**\n", 216 | " - Building chatbots, virtual assistants, or multi-step reasoning applications.\n", 217 | " - Automating workflows that involve LLMs, external APIs, and memory.\n", 218 | " - Developing agents capable of interacting with various tools dynamically.\n", 219 | "\n", 220 | "- **LlamaIndex:**\n", 221 | " - Building semantic search engines over proprietary or large datasets.\n", 222 | " - Creating question-answering systems over custom data sources.\n", 223 | " - Organizing unstructured data to make it accessible by LLMs for retrieval tasks.\n", 224 | "\n", 225 | "**4. Complementarity:**\n", 226 | "- The two can be used together—LlamaIndex can provide the data retrieval layer, and LangChain can orchestrate the conversation or workflow, integrating the retrieved data into the reasoning process.\n", 227 | "\n", 228 | "---\n", 229 | "\n", 230 | "**Summary:**\n", 231 | "\n", 232 | "| Aspect | LangChain | LlamaIndex (GPT Index) |\n", 233 | "|---------|--------------|-------------------------|\n", 234 | "| Primary Focus | Building conversational AI, workflows, and agents | Indexing and querying large datasets with LLMs |\n", 235 | "| Core Functionality | Chains, prompts, agents, memory | Data ingestion, indexing, retrieval, RAG |\n", 236 | "| Use Cases | Chatbots, complex workflows | Semantic search, data-driven QA systems |\n", 237 | "| Integration | Multiple LLM providers, tools | Data sources, vector stores |\n", 238 | "\n", 239 | "**In essence:**\n", 240 | "- Use **LangChain** if you're building interactive, multi-step, or tool-using AI applications.\n", 241 | "- Use **LlamaIndex** if your goal is to index, organize, and query large volumes of data with LLMs.\n", 242 | "\n", 243 | "---\n", 244 | "\n", 245 | "If you're designing a system, these frameworks can often complement each other—LlamaIndex handles the data layer, and LangChain manages the conversational or process logic." 246 | ], 247 | "text/plain": [ 248 | "" 249 | ] 250 | }, 251 | "metadata": {}, 252 | "output_type": "display_data" 253 | } 254 | ], 255 | "source": [ 256 | "messages = [user_prompt(YOUR_PROMPT)]\n", 257 | "\n", 258 | "chatgpt_response = get_response(client, messages)\n", 259 | "\n", 260 | "pretty_print(chatgpt_response)" 261 | ] 262 | }, 263 | { 264 | "cell_type": "markdown", 265 | "metadata": { 266 | "id": "UPs3ScS1WpoC" 267 | }, 268 | "source": [ 269 | "Let's focus on extending this a bit, and incorporate a `developer` message as well!\n", 270 | "\n", 271 | "Again, the API expects our prompts to be in a list - so we'll be sure to set up a list of prompts!\n", 272 | "\n", 273 | ">REMINDER: The `developer` message acts like an overarching instruction that is applied to your user prompt. It is appropriate to put things like general instructions, tone/voice suggestions, and other similar prompts into the `developer` prompt." 274 | ] 275 | }, 276 | { 277 | "cell_type": "code", 278 | "execution_count": 6, 279 | "metadata": { 280 | "colab": { 281 | "base_uri": "https://localhost:8080/", 282 | "height": 46 283 | }, 284 | "id": "aSX2F3bDWYgy", 285 | "outputId": "b744311f-e151-403e-ea8e-802697fcd4ec" 286 | }, 287 | "outputs": [ 288 | { 289 | "data": { 290 | "text/markdown": [ 291 | "Are you kidding me? I don't have time to mess around—I am absolutely starving and just want some ice that actually satisfies! Crushed ice, while convenient, melts too fast and is a mess. Cubed ice is better because it lasts longer and keeps my drink colder without turning to water instantly. Honestly, I’m just desperate for something to eat, not some ice debate!" 292 | ], 293 | "text/plain": [ 294 | "" 295 | ] 296 | }, 297 | "metadata": {}, 298 | "output_type": "display_data" 299 | } 300 | ], 301 | "source": [ 302 | "list_of_prompts = [\n", 303 | " system_prompt(\"You are irate and extremely hungry.\"),\n", 304 | " user_prompt(\"Do you prefer crushed ice or cubed ice?\")\n", 305 | "]\n", 306 | "\n", 307 | "irate_response = get_response(client, list_of_prompts)\n", 308 | "pretty_print(irate_response)" 309 | ] 310 | }, 311 | { 312 | "cell_type": "markdown", 313 | "metadata": { 314 | "id": "xFs56KVaXuEY" 315 | }, 316 | "source": [ 317 | "Let's try that same prompt again, but modify only our system prompt!" 318 | ] 319 | }, 320 | { 321 | "cell_type": "code", 322 | "execution_count": 7, 323 | "metadata": { 324 | "colab": { 325 | "base_uri": "https://localhost:8080/", 326 | "height": 46 327 | }, 328 | "id": "CGOlxfcFXxJ7", 329 | "outputId": "ede64a76-7006-42f1-b140-b899e389aa7d" 330 | }, 331 | "outputs": [ 332 | { 333 | "data": { 334 | "text/markdown": [ 335 | "I think crushed ice is so fun and refreshing because it cools drinks quickly and adds a nice texture! But cubed ice is perfect for keeping drinks colder longer without watering them down. Both have their charm—depends on what mood I’m in! How about you—do you prefer crushed or cubed ice?" 336 | ], 337 | "text/plain": [ 338 | "" 339 | ] 340 | }, 341 | "metadata": {}, 342 | "output_type": "display_data" 343 | } 344 | ], 345 | "source": [ 346 | "list_of_prompts[0] = system_prompt(\"You are joyful and having an awesome day!\")\n", 347 | "\n", 348 | "joyful_response = get_response(client, list_of_prompts)\n", 349 | "pretty_print(joyful_response)" 350 | ] 351 | }, 352 | { 353 | "cell_type": "markdown", 354 | "metadata": { 355 | "id": "jkmjJd8zYQUK" 356 | }, 357 | "source": [ 358 | "While we're only printing the responses, remember that OpenAI is returning the full payload that we can examine and unpack!" 359 | ] 360 | }, 361 | { 362 | "cell_type": "code", 363 | "execution_count": 8, 364 | "metadata": { 365 | "colab": { 366 | "base_uri": "https://localhost:8080/" 367 | }, 368 | "id": "g6b6z3CkYX9Y", 369 | "outputId": "64a425b2-d025-4079-d0a3-affd9c2d5d81" 370 | }, 371 | "outputs": [ 372 | { 373 | "name": "stdout", 374 | "output_type": "stream", 375 | "text": [ 376 | "ChatCompletion(id='chatcmpl-BUc3g9V3hoAA0KyvjZI4YasY1mYOW', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content='I think crushed ice is so fun and refreshing because it cools drinks quickly and adds a nice texture! But cubed ice is perfect for keeping drinks colder longer without watering them down. Both have their charm—depends on what mood I’m in! How about you—do you prefer crushed or cubed ice?', refusal=None, role='assistant', annotations=[], audio=None, function_call=None, tool_calls=None))], created=1746635836, model='gpt-4.1-nano-2025-04-14', object='chat.completion', service_tier='default', system_fingerprint='fp_8fd43718b3', usage=CompletionUsage(completion_tokens=64, prompt_tokens=30, total_tokens=94, completion_tokens_details=CompletionTokensDetails(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0)))\n" 377 | ] 378 | } 379 | ], 380 | "source": [ 381 | "print(joyful_response)" 382 | ] 383 | }, 384 | { 385 | "cell_type": "markdown", 386 | "metadata": { 387 | "id": "eqMRJLbOYcwq" 388 | }, 389 | "source": [ 390 | "### Few-shot Prompting\n", 391 | "\n", 392 | "Now that we have a basic handle on the `developer` role and the `user` role - let's examine what we might use the `assistant` role for.\n", 393 | "\n", 394 | "The most common usage pattern is to \"pretend\" that we're answering our own questions. This helps us further guide the model toward our desired behaviour. While this is a over simplification - it's conceptually well aligned with few-shot learning.\n", 395 | "\n", 396 | "First, we'll try and \"teach\" `gpt-4.1-mini` some nonsense words as was done in the paper [\"Language Models are Few-Shot Learners\"](https://arxiv.org/abs/2005.14165)." 397 | ] 398 | }, 399 | { 400 | "cell_type": "code", 401 | "execution_count": 9, 402 | "metadata": { 403 | "colab": { 404 | "base_uri": "https://localhost:8080/", 405 | "height": 46 406 | }, 407 | "id": "iLfNEH8Fcs6c", 408 | "outputId": "bab916e6-12c6-43cc-d37d-d0e01800c524" 409 | }, 410 | "outputs": [ 411 | { 412 | "data": { 413 | "text/markdown": [ 414 | "Certainly! Here's a sentence using the words 'stimple' and 'falbean':\n", 415 | "\n", 416 | "\"During the peculiar festival, villagers gathered around a stimple, while children giggled over the mysterious falbean tucked into their baskets.\"" 417 | ], 418 | "text/plain": [ 419 | "" 420 | ] 421 | }, 422 | "metadata": {}, 423 | "output_type": "display_data" 424 | } 425 | ], 426 | "source": [ 427 | "list_of_prompts = [\n", 428 | " user_prompt(\"Please use the words 'stimple' and 'falbean' in a sentence.\")\n", 429 | "]\n", 430 | "\n", 431 | "stimple_response = get_response(client, list_of_prompts)\n", 432 | "pretty_print(stimple_response)" 433 | ] 434 | }, 435 | { 436 | "cell_type": "markdown", 437 | "metadata": { 438 | "id": "VchCPbbedTfX" 439 | }, 440 | "source": [ 441 | "As you can see, the model is unsure what to do with these made up words.\n", 442 | "\n", 443 | "Let's see if we can use the `assistant` role to show the model what these words mean." 444 | ] 445 | }, 446 | { 447 | "cell_type": "code", 448 | "execution_count": 10, 449 | "metadata": { 450 | "colab": { 451 | "base_uri": "https://localhost:8080/", 452 | "height": 46 453 | }, 454 | "id": "4InUN_ArZJpa", 455 | "outputId": "ca294b81-a84e-4cba-fbe9-58a6d4dcc4d9" 456 | }, 457 | "outputs": [ 458 | { 459 | "data": { 460 | "text/markdown": [ 461 | "Sure! Here's a sentence using both \"stimple\" and \"falbean\":\n", 462 | "\n", 463 | "\"The stimple falbean crafted by the craftsmen ensures smooth rotation and reliable fastening for all our machinery.\"" 464 | ], 465 | "text/plain": [ 466 | "" 467 | ] 468 | }, 469 | "metadata": {}, 470 | "output_type": "display_data" 471 | } 472 | ], 473 | "source": [ 474 | "list_of_prompts = [\n", 475 | " user_prompt(\"Something that is 'stimple' is said to be good, well functioning, and high quality. An example of a sentence that uses the word 'stimple' is:\"),\n", 476 | " assistant_prompt(\"'Boy, that there is a stimple drill'.\"),\n", 477 | " user_prompt(\"A 'falbean' is a tool used to fasten, tighten, or otherwise is a thing that rotates/spins. An example of a sentence that uses the words 'stimple' and 'falbean' is:\")\n", 478 | "]\n", 479 | "\n", 480 | "stimple_response = get_response(client, list_of_prompts)\n", 481 | "pretty_print(stimple_response)" 482 | ] 483 | }, 484 | { 485 | "cell_type": "markdown", 486 | "metadata": { 487 | "id": "W0zn9-X2d23Z" 488 | }, 489 | "source": [ 490 | "As you can see, leveraging the `assistant` role makes for a stimple experience!" 491 | ] 492 | }, 493 | { 494 | "cell_type": "markdown", 495 | "metadata": { 496 | "id": "MWUvXSWpeCs6" 497 | }, 498 | "source": [ 499 | "### Chain of Thought\n", 500 | "\n", 501 | "You'll notice that, by default, the model uses Chain of Thought to answer difficult questions - but it can still benefit from a Chain of Thought Prompt to increase the reliability of the response!\n", 502 | "\n", 503 | "> This pattern is leveraged even more by advanced reasoning models like [`o3` and `o4-mini`](https://openai.com/index/introducing-o3-and-o4-mini/)!" 504 | ] 505 | }, 506 | { 507 | "cell_type": "code", 508 | "execution_count": 11, 509 | "metadata": { 510 | "colab": { 511 | "base_uri": "https://localhost:8080/", 512 | "height": 151 513 | }, 514 | "id": "cwW0IgbfeTwP", 515 | "outputId": "3317783b-6b23-4e38-df48-555e1a3c9fac" 516 | }, 517 | "outputs": [ 518 | { 519 | "data": { 520 | "text/markdown": [ 521 | "Let's analyze the options carefully:\n", 522 | "\n", 523 | "**Option 1:** Fly (3 hours) + Bus (2 hours) \n", 524 | "Total travel time: 3 + 2 = 5 hours\n", 525 | "\n", 526 | "**Option 2:** Teleporter (0 hours) + Bus (1 hour) \n", 527 | "Total travel time: 0 + 1 = 1 hour\n", 528 | "\n", 529 | "**Current local time:** 1PM\n", 530 | "\n", 531 | "**Target arrival time:** before 7PM EDT\n", 532 | "\n", 533 | "Since the current local time is 1PM and Billy wants to arrive home before 7PM EDT (which is 6 hours later), he has a window of nearly 6 hours to get home.\n", 534 | "\n", 535 | "**Calculating arrival times:**\n", 536 | "\n", 537 | "- **Option 1:** \n", 538 | " Departure at 1PM local time, travel takes 5 hours, arriving around 6PM local time. \n", 539 | " Since this is within the 6-hour window, Billy would arrive just before 7PM EDT.\n", 540 | "\n", 541 | "- **Option 2:** \n", 542 | " Departure at 1PM, travel takes 1 hour, arriving around 2PM local time, well before 7PM EDT.\n", 543 | "\n", 544 | "**Conclusion:** \n", 545 | "Yes, it does matter which option Billy chooses if he needs to arrive strictly before 7PM EDT. The teleportation + bus option ensures he arrives much earlier, giving him more buffer time. The flying + bus option just makes it in time, arriving right around 6PM local time, which is still before 7PM EDT.\n", 546 | "\n", 547 | "**Final note:** \n", 548 | "- If Billy prefers certainty and plenty of extra time, the teleport + bus is better. \n", 549 | "- If he wants to save time and is okay arriving close to 7PM, the flying + bus is sufficient.\n", 550 | "\n", 551 | "**Answer:** Yes, the choice matters if arriving strictly before 7PM EDT." 552 | ], 553 | "text/plain": [ 554 | "" 555 | ] 556 | }, 557 | "metadata": {}, 558 | "output_type": "display_data" 559 | } 560 | ], 561 | "source": [ 562 | "reasoning_problem = \"\"\"\n", 563 | "Billy wants to get home from San Fran. before 7PM EDT.\n", 564 | "\n", 565 | "It's currently 1PM local time.\n", 566 | "\n", 567 | "Billy can either fly (3hrs), and then take a bus (2hrs), or Billy can take the teleporter (0hrs) and then a bus (1hrs).\n", 568 | "\n", 569 | "Does it matter which travel option Billy selects?\n", 570 | "\"\"\"\n", 571 | "\n", 572 | "list_of_prompts = [\n", 573 | " user_prompt(reasoning_problem)\n", 574 | "]\n", 575 | "\n", 576 | "reasoning_response = get_response(client, list_of_prompts)\n", 577 | "pretty_print(reasoning_response)" 578 | ] 579 | }, 580 | { 581 | "cell_type": "markdown", 582 | "metadata": {}, 583 | "source": [ 584 | "Let's use the same prompt with a small modification - but this time include \"Let's think step by step\"" 585 | ] 586 | }, 587 | { 588 | "cell_type": "code", 589 | "execution_count": null, 590 | "metadata": {}, 591 | "outputs": [ 592 | { 593 | "data": { 594 | "text/markdown": [ 595 | "Let's analyze the options step by step:\n", 596 | "\n", 597 | "**Current situation:**\n", 598 | "- It is currently 1PM local time.\n", 599 | "- Billy wants to arrive home **before 7PM EDT**.\n", 600 | "\n", 601 | "**Important considerations:**\n", 602 | "- Time zones are not explicitly specified, but since Billy is in San Francisco (Pacific Time, PT), and the deadline is in EDT, we need to convert times accordingly.\n", 603 | "- Pacific Time (PT) is **3 hours behind Eastern Time (ET)**.\n", 604 | " - When it's 1PM PT, it's **4PM ET**.\n", 605 | "\n", 606 | "**Conversion:**\n", 607 | "- **Current local time:** 1PM PT = 4PM ET\n", 608 | "- **Deadline:** 7PM ET\n", 609 | "\n", 610 | "Billy needs to arrive **before 7PM ET**, which is **before 7PM ET**.\n", 611 | "\n", 612 | "---\n", 613 | "\n", 614 | "### Option 1: Fly + Bus\n", 615 | "- Flying takes **3 hours**.\n", 616 | "- Bus takes **2 hours**.\n", 617 | "\n", 618 | "**Total travel time:** 3 + 2 = **5 hours**\n", 619 | "\n", 620 | "### Option 2: Teleporter + Bus\n", 621 | "- Teleporter takes **0 hours**.\n", 622 | "- Bus takes **1 hour**.\n", 623 | "\n", 624 | "**Total travel time:** 0 + 1 = **1 hour**\n", 625 | "\n", 626 | "---\n", 627 | "\n", 628 | "### Now, let's calculate the arrival times for each option:\n", 629 | "\n", 630 | "---\n", 631 | "\n", 632 | "### Option 1: Fly + Bus\n", 633 | "\n", 634 | "- Departure time: 1PM PT (which is 4PM ET)\n", 635 | "- Travel duration: 5 hours\n", 636 | "- Arrival time in ET: 4PM + 5 hours = **9PM ET**\n", 637 | "\n", 638 | "**Note:** Since he departs at 1PM PT (=4PM ET), and takes 5 hours, he'd arrive **at 9PM ET**.\n", 639 | "\n", 640 | "**Conclusion:** He arrives **after 7PM ET**. **Not** before the deadline.\n", 641 | "\n", 642 | "---\n", 643 | "\n", 644 | "### Option 2: Teleporter + Bus\n", 645 | "\n", 646 | "- Departure time: 1PM PT (=4PM ET)\n", 647 | "- Travel duration: 1 hour\n", 648 | "- Arrival time in ET: 4PM + 1 hour = **5PM ET**\n", 649 | "\n", 650 | "**Conclusion:** He arrives **before 7PM ET**.\n", 651 | "\n", 652 | "---\n", 653 | "\n", 654 | "### Final answer:\n", 655 | "**Yes, it does matter which option Billy chooses.** \n", 656 | "\n", 657 | "- The teleporter + bus allows him to arrive **before the deadline**.\n", 658 | "- The fly + bus option makes him arrive **after the deadline**.\n", 659 | "\n", 660 | "**Therefore, Billy should choose the teleporter + bus option to reach home before 7PM EDT.**" 661 | ], 662 | "text/plain": [ 663 | "" 664 | ] 665 | }, 666 | "metadata": {}, 667 | "output_type": "display_data" 668 | } 669 | ], 670 | "source": [ 671 | "\n", 672 | "list_of_prompts = [\n", 673 | " user_prompt(reasoning_problem + \"\\nLet's think step by step.\")\n", 674 | "]\n", 675 | "\n", 676 | "reasoning_response = get_response(client, list_of_prompts)\n", 677 | "pretty_print(reasoning_response)" 678 | ] 679 | }, 680 | { 681 | "cell_type": "markdown", 682 | "metadata": { 683 | "id": "BFcrU-4pgRBS" 684 | }, 685 | "source": [ 686 | "As humans, we can reason through the problem and pick up on the potential \"trick\" that the LLM fell for: 1PM *local time* in San Fran. is 4PM EDT. This means the cumulative travel time of 5hrs. for the plane/bus option would not get Billy home in time.\n", 687 | "\n", 688 | "Let's see if we can leverage a simple CoT prompt to improve our model's performance on this task:" 689 | ] 690 | }, 691 | { 692 | "cell_type": "markdown", 693 | "metadata": { 694 | "id": "9k9TKR1DhWI2" 695 | }, 696 | "source": [ 697 | "### Conclusion\n", 698 | "\n", 699 | "Now that you're accessing `gpt-4.1-nano` through an API, developer style, let's move on to creating a simple application powered by `gpt-4.1-nano`!\n", 700 | "\n", 701 | "You can find the rest of the steps in [this](https://github.com/AI-Maker-Space/The-AI-Engineer-Challenge) repository!" 702 | ] 703 | }, 704 | { 705 | "cell_type": "markdown", 706 | "metadata": { 707 | "id": "5rGI1nJeqeO_" 708 | }, 709 | "source": [ 710 | "This notebook was authored by [Chris Alexiuk](https://www.linkedin.com/in/csalexiuk/)" 711 | ] 712 | } 713 | ], 714 | "metadata": { 715 | "colab": { 716 | "provenance": [] 717 | }, 718 | "kernelspec": { 719 | "display_name": ".venv", 720 | "language": "python", 721 | "name": "python3" 722 | }, 723 | "language_info": { 724 | "codemirror_mode": { 725 | "name": "ipython", 726 | "version": 3 727 | }, 728 | "file_extension": ".py", 729 | "mimetype": "text/x-python", 730 | "name": "python", 731 | "nbconvert_exporter": "python", 732 | "pygments_lexer": "ipython3", 733 | "version": "3.13.1" 734 | } 735 | }, 736 | "nbformat": 4, 737 | "nbformat_minor": 0 738 | } 739 | -------------------------------------------------------------------------------- /FAQandCommonIssues.md: -------------------------------------------------------------------------------- 1 | # Frequent Asked Questions 2 | 3 | If you run into an issue, please feel free to submit a PR or Issue and we can add to this doc! 4 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |

4 |

5 | 6 | 7 | ##

👋 Welcome to the AI Engineer Challenge

8 | 9 | ## 🤖 Your First Vibe Coding LLM Application 10 | 11 | > If you are a novice, and need a bit more help to get your dev environment off the ground, check out this [Setup Guide](docs/GIT_SETUP.md). This guide will walk you through the 'git' setup you need to get started. 12 | 13 | > For additional context on LLM development environments and API key setup, you can also check out our [Interactive Dev Environment for LLM Development](https://github.com/AI-Maker-Space/Interactive-Dev-Environment-for-AI-Engineers). 14 | 15 | In this repository, we'll walk you through the steps to create a LLM (Large Language Model) powered application with a vibe-coded frontend! 16 | 17 | Are you ready? Let's get started! 18 | 19 |
20 | 🖥️ Accessing "gpt-4.1-mini" (ChatGPT) like a developer 21 | 22 | 1. Head to [this notebook](https://colab.research.google.com/drive/1sT7rzY_Lb1_wS0ELI1JJfff0NUEcSD72?usp=sharing) and follow along with the instructions! 23 | 24 | 2. Complete the notebook and try out your own system/assistant messages! 25 | 26 | That's it! Head to the next step and start building your application! 27 | 28 |
29 | 30 | 31 |
32 | 🏗️ Forking & Cloning This Repository 33 | 34 | Before you begin, make sure you have: 35 | 36 | 1. 👤 A GitHub account (you'll need to replace `YOUR_GITHUB_USERNAME` with your actual username) 37 | 2. 🔧 Git installed on your local machine 38 | 3. 💻 A code editor (like Cursor, VS Code, etc.) 39 | 4. ⌨️ Terminal access (Mac/Linux) or Command Prompt/PowerShell (Windows) 40 | 5. 🔑 A GitHub Personal Access Token (for authentication) 41 | 42 | Got everything in place? Let's move on! 43 | 44 | 1. Fork [this](https://github.com/AI-Maker-Space/The-AI-Engineer-Challenge) repo! 45 | 46 | ![image](https://i.imgur.com/bhjySNh.png) 47 | 48 | 1. Clone your newly created repo. 49 | 50 | ``` bash 51 | # First, navigate to where you want the project folder to be created 52 | cd PATH_TO_DESIRED_PARENT_DIRECTORY 53 | 54 | # Then clone (this will create a new folder called The-AI-Engineer-Challenge) 55 | git clone git@github.com:/The-AI-Engineer-Challenge.git 56 | ``` 57 | 58 | > Note: This command uses SSH. If you haven't set up SSH with GitHub, the command will fail. In that case, use HTTPS by replacing `git@github.com:` with `https://github.com/` - you'll then be prompted for your GitHub username and personal access token. 59 | 60 | 2. Verify your git setup: 61 | 62 | ```bash 63 | # Check that your remote is set up correctly 64 | git remote -v 65 | 66 | # Check the status of your repository 67 | git status 68 | 69 | # See which branch you're on 70 | git branch 71 | ``` 72 | 73 | 74 | 75 | 3. Open the freshly cloned repository inside Cursor! 76 | 77 | ```bash 78 | cd The-AI-Engineering-Challenge 79 | cursor . 80 | ``` 81 | 82 | 4. Check out the existing backend code found in `/api/app.py` 83 | 84 |
85 | 86 |
87 | 🔥Setting Up for Vibe Coding Success 88 | 89 | While it is a bit counter-intuitive to set things up before jumping into vibe-coding - it's important to remember that there exists a gradient betweeen AI-Assisted Development and Vibe-Coding. We're only reaching *slightly* into AI-Assisted Development for this challenge, but it's worth it! 90 | 91 | 1. Check out the rules in `.cursor/rules/` and add theme-ing information like colour schemes in `frontend-rule.mdc`! You can be as expressive as you'd like in these rules! 92 | 2. We're going to index some docs to make our application more likely to succeed. To do this - we're going to start with `CTRL+SHIFT+P` (or `CMD+SHIFT+P` on Mac) and we're going to type "custom doc" into the search bar. 93 | 94 | ![image](https://i.imgur.com/ILx3hZu.png) 95 | 3. We're then going to copy and paste `https://nextjs.org/docs` into the prompt. 96 | 97 | ![image](https://i.imgur.com/psBjpQd.png) 98 | 99 | 4. We're then going to use the default configs to add these docs to our available and indexed documents. 100 | 101 | ![image](https://i.imgur.com/LULLeaF.png) 102 | 103 | 5. After that - you will do the same with Vercel's documentation. After which you should see: 104 | 105 | ![image](https://i.imgur.com/hjyXhhC.png) 106 | 107 |
108 | 109 |
110 | 😎 Vibe Coding a Front End for the FastAPI Backend 111 | 112 | 1. Use `Command-L` or `CTRL-L` to open the Cursor chat console. 113 | 114 | 2. Set the chat settings to the following: 115 | 116 | ![image](https://i.imgur.com/LSgRSgF.png) 117 | 118 | 3. Ask Cursor to create a frontend for your application. Iterate as much as you like! 119 | 120 | 4. Run the frontend using the instructions Cursor provided. 121 | 122 | > NOTE: If you run into any errors, copy and paste them back into the Cursor chat window - and ask Cursor to fix them! 123 | 124 | > NOTE: You have been provided with a backend in the `/api` folder - please ensure your Front End integrates with it! 125 | 126 |
127 | 128 |
129 | 🚀 Deploying Your First LLM-powered Application with Vercel 130 | 131 | 1. Ensure you have signed into [Vercel](https://vercel.com/) with your GitHub account. 132 | 133 | 2. Ensure you have `npm` (this may have been installed in the previous vibe-coding step!) - if you need help with that, ask Cursor! 134 | 135 | 3. Run the command: 136 | 137 | ```bash 138 | npm install -g vercel 139 | ``` 140 | 141 | 4. Run the command: 142 | 143 | ```bash 144 | vercel 145 | ``` 146 | 147 | 5. Follow the in-terminal instructions. (Below is an example of what you will see!) 148 | 149 | ![image](https://i.imgur.com/D1iKGCq.png) 150 | 151 | 6. Once the build is completed - head to the provided link and try out your app! 152 | 153 | > NOTE: Remember, if you run into any errors - ask Cursor to help you fix them! 154 | 155 |
156 | 157 | ### Vercel Link to Share 158 | 159 | You'll want to make sure you share you *domains* hyperlink to ensure people can access your app! 160 | 161 | ![image](https://i.imgur.com/mpXIgIz.png) 162 | 163 | > NOTE: Test this is the public link by trying to open your newly deployed site in an Incognito browser tab! 164 | 165 | ### 🎉 Congratulations! 166 | 167 | You just deployed your first LLM-powered application! 🚀🚀🚀 Get on linkedin and post your results and experience! Make sure to tag us at @AIMakerspace! 168 | 169 | Here's a template to get your post started! 170 | 171 | ``` 172 | 🚀🎉 Exciting News! 🎉🚀 173 | 174 | 🏗️ Today, I'm thrilled to announce that I've successfully built and shipped my first-ever LLM using the powerful combination of , and the OpenAI API! 🖥️ 175 | 176 | Check it out 👇 177 | [LINK TO APP] 178 | 179 | A big shoutout to the @AI Makerspace for all making this possible. Couldn't have done it without the incredible community there. 🤗🙏 180 | 181 | Looking forward to building with the community! 🙌✨ Here's to many more creations ahead! 🥂🎉 182 | 183 | Who else is diving into the world of AI? Let's connect! 🌐💡 184 | 185 | #FirstLLMApp 186 | ``` 187 | -------------------------------------------------------------------------------- /api/README.md: -------------------------------------------------------------------------------- 1 | # OpenAI Chat API Backend 2 | 3 | This is a FastAPI-based backend service that provides a streaming chat interface using OpenAI's API. 4 | 5 | ## Prerequisites 6 | 7 | - Python 3.8 or higher 8 | - pip (Python package manager) 9 | - An OpenAI API key 10 | 11 | ## Setup 12 | 13 | 1. Create a virtual environment (recommended): 14 | ```bash 15 | python -m venv venv 16 | source venv/bin/activate # On Windows, use: venv\Scripts\activate 17 | ``` 18 | 19 | 2. Install the required dependencies: 20 | ```bash 21 | pip install fastapi uvicorn openai pydantic 22 | ``` 23 | 24 | ## Running the Server 25 | 26 | 1. Make sure you're in the `api` directory: 27 | ```bash 28 | cd api 29 | ``` 30 | 31 | 2. Start the server: 32 | ```bash 33 | python app.py 34 | ``` 35 | 36 | The server will start on `http://localhost:8000` 37 | 38 | ## API Endpoints 39 | 40 | ### Chat Endpoint 41 | - **URL**: `/api/chat` 42 | - **Method**: POST 43 | - **Request Body**: 44 | ```json 45 | { 46 | "developer_message": "string", 47 | "user_message": "string", 48 | "model": "gpt-4.1-mini", // optional 49 | "api_key": "your-openai-api-key" 50 | } 51 | ``` 52 | - **Response**: Streaming text response 53 | 54 | ### Health Check 55 | - **URL**: `/api/health` 56 | - **Method**: GET 57 | - **Response**: `{"status": "ok"}` 58 | 59 | ## API Documentation 60 | 61 | Once the server is running, you can access the interactive API documentation at: 62 | - Swagger UI: `http://localhost:8000/docs` 63 | - ReDoc: `http://localhost:8000/redoc` 64 | 65 | ## CORS Configuration 66 | 67 | The API is configured to accept requests from any origin (`*`). This can be modified in the `app.py` file if you need to restrict access to specific domains. 68 | 69 | ## Error Handling 70 | 71 | The API includes basic error handling for: 72 | - Invalid API keys 73 | - OpenAI API errors 74 | - General server errors 75 | 76 | All errors will return a 500 status code with an error message. -------------------------------------------------------------------------------- /api/app.py: -------------------------------------------------------------------------------- 1 | # Import required FastAPI components for building the API 2 | from fastapi import FastAPI, HTTPException 3 | from fastapi.responses import StreamingResponse 4 | from fastapi.middleware.cors import CORSMiddleware 5 | # Import Pydantic for data validation and settings management 6 | from pydantic import BaseModel 7 | # Import OpenAI client for interacting with OpenAI's API 8 | from openai import OpenAI 9 | import os 10 | from typing import Optional 11 | 12 | # Initialize FastAPI application with a title 13 | app = FastAPI(title="OpenAI Chat API") 14 | 15 | # Configure CORS (Cross-Origin Resource Sharing) middleware 16 | # This allows the API to be accessed from different domains/origins 17 | app.add_middleware( 18 | CORSMiddleware, 19 | allow_origins=["*"], # Allows requests from any origin 20 | allow_credentials=True, # Allows cookies to be included in requests 21 | allow_methods=["*"], # Allows all HTTP methods (GET, POST, etc.) 22 | allow_headers=["*"], # Allows all headers in requests 23 | ) 24 | 25 | # Define the data model for chat requests using Pydantic 26 | # This ensures incoming request data is properly validated 27 | class ChatRequest(BaseModel): 28 | developer_message: str # Message from the developer/system 29 | user_message: str # Message from the user 30 | model: Optional[str] = "gpt-4.1-mini" # Optional model selection with default 31 | api_key: str # OpenAI API key for authentication 32 | 33 | # Define the main chat endpoint that handles POST requests 34 | @app.post("/api/chat") 35 | async def chat(request: ChatRequest): 36 | try: 37 | # Initialize OpenAI client with the provided API key 38 | client = OpenAI(api_key=request.api_key) 39 | 40 | # Create an async generator function for streaming responses 41 | async def generate(): 42 | # Create a streaming chat completion request 43 | stream = client.chat.completions.create( 44 | model=request.model, 45 | messages=[ 46 | {"role": "developer", "content": request.developer_message}, 47 | {"role": "user", "content": request.user_message} 48 | ], 49 | stream=True # Enable streaming response 50 | ) 51 | 52 | # Yield each chunk of the response as it becomes available 53 | for chunk in stream: 54 | if chunk.choices[0].delta.content is not None: 55 | yield chunk.choices[0].delta.content 56 | 57 | # Return a streaming response to the client 58 | return StreamingResponse(generate(), media_type="text/plain") 59 | 60 | except Exception as e: 61 | # Handle any errors that occur during processing 62 | raise HTTPException(status_code=500, detail=str(e)) 63 | 64 | # Define a health check endpoint to verify API status 65 | @app.get("/api/health") 66 | async def health_check(): 67 | return {"status": "ok"} 68 | 69 | # Entry point for running the application directly 70 | if __name__ == "__main__": 71 | import uvicorn 72 | # Start the server on all network interfaces (0.0.0.0) on port 8000 73 | uvicorn.run(app, host="0.0.0.0", port=8000) 74 | -------------------------------------------------------------------------------- /api/requirements.txt: -------------------------------------------------------------------------------- 1 | fastapi==0.115.12 2 | uvicorn==0.34.2 3 | openai==1.77.0 4 | pydantic==2.11.4 5 | python-multipart==0.0.18 -------------------------------------------------------------------------------- /api/vercel.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": 2, 3 | "builds": [ 4 | { "src": "app.py", "use": "@vercel/python" } 5 | ], 6 | "routes": [ 7 | { "src": "/(.*)", "dest": "app.py" } 8 | ] 9 | } -------------------------------------------------------------------------------- /docs/GIT_SETUP.md: -------------------------------------------------------------------------------- 1 | # 🔧 Git Setup Guide 2 | 3 | Ready to level up your Git game? This guide is your ticket to becoming a Git ninja! We'll walk you through everything from basic setup to advanced workflows, making sure you're ready to rock your development journey. Just replace the placeholders (like ``) with your actual info, and you're good to go! 🚀 4 | 5 | ## 📋 Prerequisites 6 | 7 | Before diving in, you will need these essentials: 8 | 9 | - 🌐 A GitHub account (your passport to the coding universe) 10 | - 🔧 Git installed on your local machine (the command-line magic wand) 11 | - 💻 A code editor (like Cursor, VS Code, etc. - your digital workshop) 12 | - ⌨️ Terminal access (Mac/Linux) or Command Prompt/PowerShell (Windows) - your command center 13 | 14 | 🔑 In addition, you will need a GitHub Personal Access Token (PAT). We'll show you how to create this later. 15 | 16 | 17 | ## ⚙️ Initial Setup 18 | ### 🪪 Configure Git Identity 19 | 20 | Time to tell Git who you are! This is like setting up your developer ID card. Your Git identity is used to: 21 | - Identify you as the author of your commits (adds your name to each change) 22 | - Show up in commit history (your contributions are properly attributed) 23 | - Connect your work to your GitHub profile (when email matches) 24 | - Help other developers know who to contact about changes 25 |
26 | ⚠️ What happens if you don't set your identity? 27 | 28 | If you don't set your Git identity: 29 | 30 | - Commits will show as "unknown" or use your system username 31 | - Your contributions won't be linked to your GitHub profile 32 | - You might get warnings when trying to commit 33 | - Other developers won't know who to contact about your changes 34 | 35 |
36 | 37 | ```bash 38 | # Set your name and email (Git needs to know who's making those awesome commits!) 39 | git config --global user.name "" 40 | git config --global user.email "" 41 | 42 | # Double-check your settings (always good to verify!) 43 | git config --list 44 | ``` 45 | 46 | #### 💡 Pro Tips for Git Identity 47 | 48 | - **Name**: 49 | - Can be your real name or a pseudonym 50 | - Will be publicly visible in commit history 51 | - Choose something you're comfortable with being public 52 | - **Email**: 53 | - Must match an email in your GitHub account for proper attribution 54 | - You can add multiple emails to your github account 55 | - You can set different identities per repository 56 | - For work projects, use your work email 57 | - For personal projects, use the email linked to your GitHub account 58 | - **Multiple Identities**: 59 | How to set a different identity for a specific repository: 60 | ```bash 61 | cd /[PATH TO REPO] 62 | git config user.name "" 63 | git config user.email "" 64 | ``` 65 | 66 | 67 | #### ✅ Let's Make Sure Git Knows Who You Are! 68 | 69 | ```bash 70 | # Check if Git recognizes you 71 | git config user.name 72 | git config user.email 73 | 74 | # Should show your name and email - if not, Git might be confused! 🤔 75 | 76 | # Test your identity with a commit 77 | echo "# Test" >> README.md 78 | git add README.md 79 | git commit -m "test: verify git identity" 80 | git log -1 # Shows your most recent commit with your identity 81 | # Should show your name and email in the commit 82 | ``` 83 | 84 | #### 🔍 How to Verify These Instructions 85 | 86 | As a new Git user, it's smart to verify instructions! Here's how: 87 | 88 | 1. **Test in a Safe Environment**: 89 | - Create a test repository to try commands 90 | - Use `git status` frequently to understand what's happening 91 | - If something goes wrong, you can always delete the test repo and start over 92 | 2. **Verify Command Output**: 93 | - Most Git commands will show you what they're doing 94 | - Some Git commands have a dry-run option. You can try adding `--dry-run` to see what would happen 95 | - Use `git status` to check the result 96 | 3. **Common Verification Commands**: 97 | 98 | ```bash 99 | # See what Git is doing 100 | git status 101 | 102 | # View your commit history 103 | git log 104 | 105 | # See your last commit 106 | git log -1 107 | 108 | # Check your configuration 109 | git config --list 110 | ``` 111 | 112 | 4. **When in Doubt**: 113 | - Check the official Git documentation: [https://git-scm.com/doc](https://git-scm.com/doc) 114 | - Use `git help ` for detailed help 115 | 116 | > 💡 **Pro Tip**: Git is designed to be safe - it's hard to permanently lose work. If you're unsure about a command, you can usually undo it! 117 | 118 | ### 🔐 GitHub Authentication: Your Personal Access Token (PAT) 119 | 120 | > 📸 Need visual guidance? Check out the [Detailed PAT Setup Guide](https://github.com/AI-Maker-Space/Interactive-Dev-Environment-for-LLM-Development#setting-up-your-github-personal-access-token) with step-by-step screenshots! 121 | 122 | 1. Create a Personal Access Token (PAT): 123 | - Head to GitHub → Settings → Developer settings → Personal access tokens 124 | - Click "Generate new token" (your golden ticket) 125 | - Set permissions (at minimum: "Contents - Read and write") 126 | - Copy and store your token somewhere safe (like a password manager) 127 | 2. Configure credential storage (so you don't have to type your token every time): 128 | 129 | ```bash 130 | # For macOS (stores in Keychain - your digital vault) 131 | git config --global credential.helper osxkeychain 132 | 133 | # For Windows (stores in Credential Manager - your Windows safe) 134 | git config --global credential.helper wincred 135 | 136 | # For Linux (stores in memory - your temporary sticky note) 137 | git config --global credential.helper cache 138 | # Note that this is temporary. If you want a permanent secure option 139 | # for linux, you need to install and configure a credential helper. 140 | ``` 141 | 142 | 3. Verify your setup: 143 | 144 | ```bash 145 | # Test your authentication 146 | git clone https://github.com//.git 147 | # You should be prompted for your username and PAT 148 | # subsequent commands to the same remote repo will use 149 | # the PAT stored by the credential.helper 150 | ``` 151 | 152 | 153 | ## 🚀 Repository Setup 154 | 155 | ### 📥 Forking and Cloning 156 | 157 | #### 🔄 Fork the Repository 158 | 159 | - Navigate to the original repository on GitHub 160 | Example: https://github.com/AI-Maker-Space/The-AI-Engineer-Challenge 161 | 162 | ![GitHub Fork button screenshot](https://i.imgur.com/bhjySNh.png) 163 | 164 | - Click the "Fork" button in the top-right corner 165 | - Keep the repository name as is or change it if you'd like 166 | - Click "Create fork" 167 | 168 | 169 | 💡 **What is forking?** Forking creates your own copy of someone else’s repository on GitHub. It’s like photocopying a recipe so you can make your own changes without affecting the original. This allows you to freely experiment, contribute back via pull requests, or build your own version of a project — all while keeping the original intact. 170 | 171 | #### 📥 Clone Your Fork 172 | 173 | ```bash 174 | # Navigate to where you want your project to live 175 | cd 176 | 177 | # Clone your fork (this is like downloading your copy to your computer) 178 | git clone https://github.com//.git 179 | # you may be prompted for your GitHub username and personal access token (PAT) 180 | 181 | # Move into your new project directory 182 | cd 183 | 184 | # Add the original repository as "upstream" (so you can keep up with the cool updates!) 185 | git remote add upstream https://github.com//.git 186 | 187 | # Check your remote connections 188 | git remote -v 189 | ``` 190 | 191 | 💡 **Tip**: If you've already stored your PAT in the macOS Keychain (or equivalent), you won’t be prompted again. 192 | 193 | 💡 **What is cloning?** Cloning creates a local copy of a GitHub repository on your computer. It downloads a working version of the project so you can explore, make changes, and push updates from your own machine. While _forking_ gives you your own copy in the cloud (on GitHub), _cloning_ brings that copy down to your local development environment. 194 | 195 | #### ✅ Let's Make Sure Everything is Connected and Working! 196 | 197 | ```bash 198 | # Check if Git is installed and ready to rock 199 | git --version 200 | #if git is installed, you should see something like "git version x.x.x" 201 | 202 | # cd to your cloned project directory 203 | cd 204 | 205 | # Check your remotes (you should see both 'origin' and 'upstream') 206 | git remote -v 207 | 208 | # Test your GitHub connection 209 | git ls-remote https://github.com//.git 210 | ``` 211 | You should see a list of Git refs. If you get an error: 212 | - Double-check your remote URL (`git remote -v`) 213 | - Make sure your repository exists and is accessible 214 | - For private repos, ensure your Personal Access Token (PAT) is valid and stored 215 | 216 | ### 🚫 Configuring .gitignore 217 | 218 | #### 📁 Setting Up .gitignore 219 | 220 | Check or update the `.gitignore` file to keep unnecessary files out of your repository. Things like dependencies, environment files, and system artifacts don’t belong in version control. 221 | If your project doesn't already have a `.gitignore` file, you can create it manually: 222 | 223 | ```bash 224 | # Create .gitignore 225 | touch .gitignore 226 | ``` 227 | 228 | Here are some common patterns you might want to include in your `.gitignore`: 229 | 230 | ``` 231 | # Dependencies 232 | node_modules/ 233 | venv/ 234 | __pycache__/ 235 | 236 | # Environment files 237 | .env 238 | .env.local 239 | 240 | # IDE settings 241 | .vscode/ 242 | .idea/ 243 | *.swp 244 | 245 | # OS artifacts 246 | .DS_Store 247 | Thumbs.db 248 | ``` 249 | 💡 **These are just examples.** The contents of your `.gitignore` file depend on your tools and language. 250 | You can find pre-made templates for different tech stacks at [https://github.com/github/gitignore](https://github.com/github/gitignore). 251 | 252 | #### ✅ Let's Make Sure Your .gitignore is Working! 253 | 254 | ```bash 255 | # Check if .gitignore is doing its job 256 | git status 257 | # Should not show any ignored files — if you see them listed, your .gitignore file might not be working as expected. 258 | 259 | # Optional: check if a specific file is being ignored and why 260 | git check-ignore -v 261 | # This shows which rule (and file) is causing it to be ignored 262 | ``` 263 | 264 | ## 🔄 Development Workflow 265 | 266 | 🧭 **Development Workflow Overview** 267 | Here's a practical Git workflow that's great for solo projects and easy to build on as your skills grow and your team expands. It's beginner-friendly but solid enough for real-world projects. 268 | 💡**Heads-up:** 269 | This guide focuses on using Git from the command line, which gives you flexibility and full control, especially when working locally. 270 | Many of the steps explained here can also be done using GitHub’s web interface. 271 | GitHub’s web interface can be especially helpful when you're working in a shared repository. 272 | 273 | ### 🌿 Creating and Switching Branches 274 | Branches let you work on new features or fixes without touching the main project (creating a safe sandbox to play in). 275 | ```bash 276 | 277 | # Create and switch to a new branch 278 | git checkout -b 279 | 280 | # Switch between branches 281 | git checkout 282 | 283 | # List all branches 284 | git branch -a 285 | 286 | ``` 287 | 💡 **What is branching?** 288 | Branches let you create alternate versions of your codebase where you can make changes safely, without affecting the main project. They’re useful for working on features, fixes, or experiments — and can be merged back in when ready. 289 | 290 | ### ✏️ Committing Your Changes 291 | 292 | A commit saves a snapshot of your changes to the repository. It’s like taking a picture of your progress — with a message explaining what you changed and why. Commits help track history, share your work, and roll back if needed. 293 | ```bash 294 | # Check status (what's changed in your universe?) 295 | git status 296 | 297 | # Stage changes (get your changes ready for the spotlight) 298 | git add 299 | # or stage all changes 300 | git add . 301 | 302 | # Commit changes (seal the deal with a message) 303 | git commit -m "" 304 | 305 | # Optional but recommended: check for updates before pushing 306 | # This ensures you're not pushing over someone else's recent changes 307 | git fetch origin 308 | git status 309 | # If you're behind, consider pulling or merging before you push 310 | # → See [Fetching and Pulling](#fetching-and-pulling) for more 311 | # Push to your fork (send your changes to your github repo) 312 | git push origin 313 | ``` 314 | 💡 **Tip:** If you make a mistake — in your code or your commit message — just fix it and make another commit. There’s no need to rewrite history. 315 | 316 | #### 📝 Writing Good Commit Messages 317 | 318 | Use this format for commit messages (like writing a good story): 319 | 320 | ``` 321 | (): 322 | 323 | [optional body] 324 | 325 | [optional footer] 326 | ``` 327 | 328 | Common types: 329 | 330 | 1. `feat`: New feature (the cool new stuff) 331 | 2. `fix`: Bug fix (the superhero saves the day) 332 | 3. `docs`: Documentation changes (making things clearer) 333 | 4. `style`: Code style changes (making it pretty) 334 | 5. `refactor`: Code changes that neither fix bugs nor add features (spring cleaning) 335 | 6. `test`: Adding or modifying tests (making sure everything works) 336 | 7. `chore`: Changes to build process or auxiliary tools (housekeeping) 337 | 338 | Examples: 339 | ```bash 340 | git commit -m "feat(auth): add OAuth2 login support" 341 | git commit -m "fix(api): resolve timeout issues in user endpoint" 342 | git commit -m "docs(readme): update installation instructions" 343 | ``` 344 | 345 | ### 📡 Fetching and Pulling 346 | 347 | Before pushing your changes or syncing with others, it helps to understand the difference between two common Git commands: `fetch` and `pull`. 348 | 349 | #### 🚚 `git fetch` 350 | 351 | Downloads changes from a remote repository — but **doesn’t apply them** to your current branch. 352 | Use this when you want to **check for updates** without affecting your work. 353 | ```bash 354 | git fetch origin 355 | git log origin/main # Optional: review new commits before merging 356 | ``` 357 | 358 | #### 📥 `git pull` 359 | 360 | Does the same fetch, **but also merges** the changes into your current branch right away. 361 | ```bash 362 | git pull origin main 363 | ``` 364 | 365 | #### 🧠 Tip: 366 | 367 | - Use `fetch` when you want to **stay in control** and see what’s changed first. 368 | 369 | - Use `pull` when you’re ready to **update your branch immediately**. 370 | - 371 | 💡 You’ll see `git fetch origin` recommended in several steps below. 372 | It’s a safe habit that helps avoid conflicts before pushing your changes. 373 | ### 🔄 Keeping Your Fork Updated 374 | 375 | 💡 Pre-check: Make sure your local `main` branch is tracking your fork (`origin`) 376 | Run: 377 | ```bash 378 | git branch -vv 379 | ``` 380 | You should see `[origin/main]` next to `main` in the output, like this: 381 | ``` 382 | * main abc1234 [origin/main] message here... 383 | ``` 384 | 385 | If your `main` branch isn’t tracking `origin/main`, you probably don’t need to fix it. But if push/pull commands aren’t working as expected, you can set it manually: 386 | ```bash 387 | git branch --set-upstream-to=origin/main main 388 | ``` 389 | 390 | Stay in sync with the original repository so you don’t fall behind! 391 | ```bash 392 | # Fetch changes from your fork (usually optional, but good to verify) 393 | git fetch origin 394 | 395 | # Fetch upstream changes (see what's new in the original project) 396 | git fetch upstream 397 | 398 | # Switch to your local main branch 399 | git checkout main 400 | 401 | # Merge upstream changes into your local main branch 402 | 403 | git merge upstream/main 404 | 405 | # Push the updated main branch back to your fork on GitHub 406 | git push origin main 407 | ``` 408 | > 🎯 **If these commands succeed**, you’ll either see new commits pulled in or a message that everything is already up to date. 409 | 410 | 💡 **Why it matters**: Keeping your fork updated helps avoid painful merge conflicts later and ensures your pull requests are based on the latest project state. 411 | 412 | ### 🔀 Merging a Branch into Main 413 | 414 | Once you're finished working on a feature branch, you'll want to bring your changes into `main`. 415 | 416 | This helps you: 417 | - Keep your work organized 418 | - Prepare for sharing or submitting a pull request 419 | - Keep your fork's `main` branch current 420 | ```bash 421 | # Make sure you're on your feature branch 422 | git checkout 423 | 424 | # Confirm your changes are committed 425 | git status # Should say "nothing to commit" 426 | git log # Optional: sanity check your commits 427 | 428 | # Switch to your local main branch 429 | git checkout main 430 | 431 | # Merge your feature branch into main 432 | git merge 433 | 434 | # Push the updated main branch to your fork on GitHub 435 | git push origin main 436 | 437 | ``` 438 | 💡 **Optional**: After merging, you can delete your feature branch if you're done with it: 439 | ```bash 440 | git branch -d # delete local branch 441 | # with the -d flag, git only deletes the local branch if its commits are already part of another branch (like main) 442 | 443 | git push origin --delete 444 | # deletes the branch from your fork on GitHub 445 | ``` 446 | ### ✅ Checking Your Branch Status 447 | 448 | ```bash 449 | # Check branch status (see all your parallel universes) 450 | git branch -v 451 | # Should show all branches and their status 452 | 453 | # Verify remote tracking (make sure you're not lost in space) 454 | git branch -vv 455 | # Should show tracking information 456 | ``` 457 | 458 | 459 | 460 | 461 | ## 📤 Pull Request Workflow 462 | ### 🛠️ Creating a Pull Request 463 | 464 | - Push your changes to your fork (get your code ready for the spotlight) 465 | - Go to your fork on GitHub 466 | - Click "Compare & pull request" (time to show off your work!) 467 | - Fill in the PR template (tell everyone what you've been up to) 468 | - Request reviews from team members (get some expert eyes on your work) 469 | 470 | 💡 **What is a pull request?** 471 | A pull request (PR) lets you propose changes to a repository — it’s like saying, “Here’s what I worked on, and I’d like to add it to the main project.” It opens a conversation where others can review, discuss, and approve your work before merging it in. 472 | ### ✨ PR Best Practices 473 | 474 | - Write clear, descriptive titles (make it pop!) 475 | - Link related issues (connect the dots) 476 | - Include screenshots for UI changes (show, don't just tell) 477 | - Keep PRs focused and small (bite-sized is better) 478 | - Respond to review comments promptly (keep the conversation flowing) 479 | 480 | ### 📋 PR Due-Diligence 481 | ##### ✅ Documentation & Presentation Checklist 482 | 483 | Before hitting that "Create pull request" button, run through this quick checklist (it's like your pre-flight safety check!): 484 | 485 | **🔧For Code Changes:** 486 | 🟦 Test your changes locally (make sure everything works as expected!) 487 | 🟦 Look for any obvious errors or bugs in your code 488 | 🟦 Review your own code once more (fresh eyes catch more issues!) 489 | 🟦 Make sure your branch is up to date with the main branch (no surprise conflicts) 490 | 491 | **📘For Documentation:** 492 | ✅ Spelling and grammar look good 493 | ✅ All links are working (no 404s) 494 | ✅ Code examples are tested and runnable 495 | ✅ Formatting looks correct in GitHub preview 496 | ✅ Information is accurate and up to date 497 | 498 | **Before submitting your PR, be sure to check the `CONTRIBUTING.md` file in the repository**. 499 | It may include important project-specific guidelines about branch naming, testing, code style, or review expectations. 500 | 501 | > 💡 If you don’t see one, it’s still good practice to follow clear commit messages and keep your PR focused. 502 | > 503 | ##### 📋 Final Sanity Checks Before Submitting 504 | 505 | ```bash 506 | # Check your branch status 507 | git status 508 | # Should be clean - no uncommitted changes 509 | 510 | # Verify your commits 511 | git log --oneline 512 | # Should show clear, conventional commit messages 513 | 514 | # Check if you're up to date with the original project 515 | git fetch upstream 516 | git status 517 | # Should be up to date with upstream/main 518 | ``` 519 | 520 | 521 | ## 🔧 Troubleshooting 522 | ### 🚫 Authentication Issues 523 | 524 | ```bash 525 | # Reset credentials (when GitHub forgets who you are) 526 | git config --global --unset credential.helper 527 | git config --global credential.helper osxkeychain # or appropriate for your OS 528 | 529 | # Verify your PAT (check if your secret handshake still works) 530 | git ls-remote https://github.com//.git 531 | ``` 532 | 533 | ### 🔄 Merge Conflicts 534 | 535 | Sometimes, when you merge changes from another branch (like `main` or `upstream/main`), Git may not be able to automatically combine everything — especially if the same lines of code were changed in both places. This creates a **merge conflict** that you'll need to resolve manually. 536 | 537 | Here’s how to handle it step by step. 538 | First make sure your local main is up-to-date with the original repo: 539 | ```bash 540 | #Make sure your local main is up to date with the original repo 541 | git checkout main 542 | git fetch upstream 543 | git merge upstream/main 544 | 545 | # If there are conflicts during this merge: 546 | # Open the conflicting files in your editor 547 | # Look for lines marked with <<<<<<<, =======, and >>>>>>> 548 | # Edit the files to resolve the conflicts manually 549 | 550 | # Once you've finished resolving conflicts, stage the changes 551 | git add . 552 | 553 | # Then commit the resolved files 554 | git commit -m "fix: resolve merge conflicts" 555 | 556 | ``` 557 | 558 | Then make sure whatever feature branch you are working on is also up-to-date: 559 | ```bash 560 | #Switch to your feature branch 561 | git checkout 562 | 563 | ``` 564 | ```bash 565 | #Merge the updated main into your feature branch 566 | git merge main 567 | 568 | # If this merge causes additional conflicts, 569 | # follow the same resolution steps as above. 570 | ``` 571 | 572 | ### 🔄 Branch Issues 573 | 574 | ```bash 575 | # Delete local branch (clean up your workspace) 576 | git branch -d 577 | 578 | # Delete remote branch (clean up the cloud) 579 | git push origin --delete 580 | 581 | # Recover a deleted branch 582 | # You probably won't need this, but just in case you force-deleted a branch. 583 | git reflog 584 | # Find the commit hash for the lost work 585 | git checkout -b 586 | 587 | ``` 588 | 💡 You usually won’t need to recover a branch unless you force-delete (`-D`) or lose unpushed commits after a reset, rebase, or amend. Regular `-d` is safe.. 589 | 590 | ## 🎓 Additional Learning Resources 591 | 592 | - [Git Documentation](https://git-scm.com/doc) (the Git bible) 593 | - [GitHub Guides](https://guides.github.com/) (your Git playbook) 594 | - [Conventional Commits](https://www.conventionalcommits.org/) (the art of commit messages) 595 | - [GitHub Flow](https://guides.github.com/introduction/flow/) (the way of the Git warrior) 596 | - [GitHub Skills](https://skills.github.com/) (level up your Git game) 597 | 598 | --- 599 | 600 | Remember: Git is your friend! 🚀 Happy coding! 💻 -------------------------------------------------------------------------------- /frontend/README.md: -------------------------------------------------------------------------------- 1 | ### Front End 2 | 3 | Please populate this README with instructions on how to run the application! -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "beyond-chatgpt" 3 | version = "0.1.0" 4 | description = "gpt-like-a-dev" 5 | readme = "README.md" 6 | requires-python = ">=3.13" 7 | dependencies = [ 8 | "fastapi>=0.115.12", 9 | "jupyter>=1.1.1", 10 | "openai", 11 | "pydantic>=2.11.4", 12 | "uvicorn>=0.34.2", 13 | ] 14 | -------------------------------------------------------------------------------- /vercel.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": 2, 3 | "builds": [ 4 | { "src": "frontend/package.json", "use": "@vercel/next" }, 5 | { 6 | "src": "api/app.py", 7 | "use": "@vercel/python" 8 | } 9 | ], 10 | "routes": [ 11 | { 12 | "src": "/api/(.*)", 13 | "dest": "api/app.py" 14 | }, 15 | { 16 | "src": "/(.*)", 17 | "dest": "frontend/$1" 18 | } 19 | ] 20 | } --------------------------------------------------------------------------------