├── .gitignore ├── Guide to Jupyter.ipynb ├── Intermediate Python.ipynb ├── LICENSE ├── README.md ├── SETUP-PC.md ├── SETUP-mac.md ├── business.jpg ├── choose.jpg ├── diagnostics.ipynb ├── diagnostics.py ├── environment.yml ├── important.jpg ├── intro └── lab1.ipynb ├── outputs ├── assistant-output.ipynb ├── expert-output.ipynb ├── lab1-output.ipynb └── prototype-output.ipynb ├── pickme.png ├── project1 - Airline AI Assistant └── assistant.ipynb ├── project2 - Expert knowledge worker ├── expert.ipynb └── knowledge-base │ ├── company │ ├── about.md │ ├── careers.md │ └── overview.md │ ├── contracts │ ├── .md │ ├── Contract with Apex Reinsurance for Rellm - AI-Powered Enterprise Reinsurance Solution.md │ ├── Contract with Belvedere Insurance for Markellm.md │ ├── Contract with BrightWay Solutions for Markellm.md │ ├── Contract with EverGuard Insurance for Rellm - AI-Powered Enterprise Reinsurance Solution.md │ ├── Contract with GreenField Holdings for Markellm.md │ ├── Contract with GreenValley Insurance for Homellm.md │ ├── Contract with Greenstone Insurance for Homellm.md │ ├── Contract with Pinnacle Insurance Co. for Homellm.md │ ├── Contract with Roadway Insurance Inc. for Carllm.md │ ├── Contract with Stellar Insurance Co. for Rellm.md │ ├── Contract with TechDrive Insurance for Carllm.md │ └── Contract with Velocity Auto Solutions for Carllm.md │ ├── employees │ ├── Alex Chen.md │ ├── Alex Harper.md │ ├── Alex Thomson.md │ ├── Avery Lancaster.md │ ├── Emily Carter.md │ ├── Emily Tran.md │ ├── Jordan Blake.md │ ├── Jordan K. Bishop.md │ ├── Maxine Thompson.md │ ├── Oliver Spencer.md │ ├── Samantha Greene.md │ └── Samuel Trenton.md │ └── products │ ├── Carllm.md │ ├── Homellm.md │ ├── Markellm.md │ └── Rellm.md ├── project3 - Price intelligence └── data.ipynb ├── project4 - Resume parser ├── my_resume.txt ├── parser.ipynb └── sample.json ├── project5 - Code generator ├── curator.ipynb ├── trades_claude.py ├── trades_gemini.py └── trades_gpt-4o.py ├── project6 - simple RAG example with FAISS ├── expert.ipynb └── product.md ├── project7 - llamacpp local inference └── llamacpp.ipynb ├── project8 - connect4 ├── board.py ├── board_view.py ├── c4.py ├── game.py ├── llm.py ├── player.py └── prototype.ipynb ├── requirements.txt ├── resources.jpg └── troubleshooting.ipynb /.gitignore: -------------------------------------------------------------------------------- 1 | # Github's default gitignore for Python 2 | 3 | # Byte-compiled / optimized / DLL files 4 | __pycache__/ 5 | *.py[cod] 6 | *$py.class 7 | 8 | # C extensions 9 | *.so 10 | 11 | # Distribution / packaging 12 | .Python 13 | build/ 14 | develop-eggs/ 15 | dist/ 16 | downloads/ 17 | eggs/ 18 | .eggs/ 19 | lib/ 20 | lib64/ 21 | parts/ 22 | sdist/ 23 | var/ 24 | wheels/ 25 | share/python-wheels/ 26 | *.egg-info/ 27 | .installed.cfg 28 | *.egg 29 | MANIFEST 30 | 31 | # PyInstaller 32 | # Usually these files are written by a python script from a template 33 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 34 | *.manifest 35 | *.spec 36 | 37 | # Installer logs 38 | pip-log.txt 39 | pip-delete-this-directory.txt 40 | 41 | # Unit test / coverage reports 42 | htmlcov/ 43 | .tox/ 44 | .nox/ 45 | .coverage 46 | .coverage.* 47 | .cache 48 | nosetests.xml 49 | coverage.xml 50 | *.cover 51 | *.py,cover 52 | .hypothesis/ 53 | .pytest_cache/ 54 | cover/ 55 | 56 | # Translations 57 | *.mo 58 | *.pot 59 | 60 | # Django stuff: 61 | *.log 62 | local_settings.py 63 | db.sqlite3 64 | db.sqlite3-journal 65 | 66 | # Flask stuff: 67 | instance/ 68 | .webassets-cache 69 | 70 | # Scrapy stuff: 71 | .scrapy 72 | 73 | # Sphinx documentation 74 | docs/_build/ 75 | 76 | # PyBuilder 77 | .pybuilder/ 78 | target/ 79 | 80 | # Jupyter Notebook 81 | .ipynb_checkpoints 82 | 83 | # IPython 84 | profile_default/ 85 | ipython_config.py 86 | 87 | # pyenv 88 | # For a library or package, you might want to ignore these files since the code is 89 | # intended to run in multiple environments; otherwise, check them in: 90 | # .python-version 91 | 92 | # pipenv 93 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 94 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 95 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 96 | # install all needed dependencies. 97 | #Pipfile.lock 98 | 99 | # poetry 100 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 101 | # This is especially recommended for binary packages to ensure reproducibility, and is more 102 | # commonly ignored for libraries. 103 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 104 | #poetry.lock 105 | 106 | # pdm 107 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 108 | #pdm.lock 109 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 110 | # in version control. 111 | # https://pdm.fming.dev/#use-with-ide 112 | .pdm.toml 113 | 114 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 115 | __pypackages__/ 116 | 117 | # Celery stuff 118 | celerybeat-schedule 119 | celerybeat.pid 120 | 121 | # SageMath parsed files 122 | *.sage.py 123 | 124 | # Environments 125 | .env 126 | .venv 127 | env/ 128 | venv/ 129 | ENV/ 130 | env.bak/ 131 | venv.bak/ 132 | choose/ 133 | choose.bak/ 134 | 135 | # Spyder project settings 136 | .spyderproject 137 | .spyproject 138 | 139 | # Rope project settings 140 | .ropeproject 141 | 142 | # mkdocs documentation 143 | /site 144 | 145 | # mypy 146 | .mypy_cache/ 147 | .dmypy.json 148 | dmypy.json 149 | 150 | # Pyre type checker 151 | .pyre/ 152 | 153 | # pytype static type analyzer 154 | .pytype/ 155 | 156 | # Cython debug symbols 157 | cython_debug/ 158 | 159 | # PyCharm 160 | .idea/ 161 | 162 | # Added this to ignore models downloaded from HF 163 | model_cache/ 164 | # Ignore finder files 165 | .DS_Store 166 | /.DS_Store 167 | 168 | # Ignore Chroma vector database 169 | vector_db/ 170 | 171 | # Ignore diagnostics report 172 | report.txt 173 | -------------------------------------------------------------------------------- /Guide to Jupyter.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "5c291475-8c7c-461c-9b12-545a887b2432", 6 | "metadata": {}, 7 | "source": [ 8 | "# Jupyter Lab\n", 9 | "\n", 10 | "## A Quick Start Guide\n", 11 | "\n", 12 | "Welcome to the wonderful world of Jupyter lab! \n", 13 | "This is a Data Science playground where you can easily write code and investigate the results. It's an ideal environment for: \n", 14 | "- Research & Development\n", 15 | "- Prototyping\n", 16 | "- Learning (that's us!)\n", 17 | "\n", 18 | "It's not typically used for shipping production code, and in Week 8 we'll explore the bridge between Jupyter and python code.\n", 19 | "\n", 20 | "A file in Jupyter Lab, like this one, is called a **Notebook**.\n", 21 | "\n", 22 | "A long time ago, Jupyter used to be called \"IPython\", and so the extensions of notebooks are \".ipynb\" which stands for \"IPython Notebook\".\n", 23 | "\n", 24 | "On the left is a File Browser that lets you navigate around the directories and choose different notebooks. But you probably know that already, or you wouldn't have got here!\n", 25 | "\n", 26 | "The notebook consists of a series of square boxes called \"cells\". Some of them contain text, like this cell, and some of them contain code, like the cell below.\n", 27 | "\n", 28 | "Click in a cell with code and press `Shift + Return` (or `Shift + Enter`) to run the code and print the output.\n", 29 | "\n", 30 | "Do that now for the cell below this:" 31 | ] 32 | }, 33 | { 34 | "cell_type": "code", 35 | "execution_count": null, 36 | "id": "33d37cd8-55c9-4e03-868c-34aa9cab2c80", 37 | "metadata": {}, 38 | "outputs": [], 39 | "source": [ 40 | "# Click anywhere in this cell and press Shift + Return\n", 41 | "\n", 42 | "2 + 2" 43 | ] 44 | }, 45 | { 46 | "cell_type": "markdown", 47 | "id": "9e95df7b-55c6-4204-b8f9-cae83360fc23", 48 | "metadata": {}, 49 | "source": [ 50 | "## Congrats!\n", 51 | "\n", 52 | "Now run the next cell which sets a value, followed by the cells after it to print the value" 53 | ] 54 | }, 55 | { 56 | "cell_type": "code", 57 | "execution_count": null, 58 | "id": "585eb9c1-85ee-4c27-8dc2-b4d8d022eda0", 59 | "metadata": {}, 60 | "outputs": [], 61 | "source": [ 62 | "# Set a value for a variable\n", 63 | "\n", 64 | "favorite_fruit = \"bananas\"" 65 | ] 66 | }, 67 | { 68 | "cell_type": "code", 69 | "execution_count": null, 70 | "id": "07792faa-761d-46cb-b9b7-2bbf70bb1628", 71 | "metadata": {}, 72 | "outputs": [], 73 | "source": [ 74 | "# The result of the last statement is shown after you run it\n", 75 | "\n", 76 | "favorite_fruit" 77 | ] 78 | }, 79 | { 80 | "cell_type": "code", 81 | "execution_count": null, 82 | "id": "a067d2b1-53d5-4aeb-8a3c-574d39ff654a", 83 | "metadata": {}, 84 | "outputs": [], 85 | "source": [ 86 | "# Use the variable\n", 87 | "\n", 88 | "print(f\"My favorite fruit is {favorite_fruit}\")" 89 | ] 90 | }, 91 | { 92 | "cell_type": "code", 93 | "execution_count": null, 94 | "id": "4c5a4e60-b7f4-4953-9e80-6d84ba4664ad", 95 | "metadata": {}, 96 | "outputs": [], 97 | "source": [ 98 | "# Now change the variable\n", 99 | "\n", 100 | "favorite_fruit = f\"anything but {favorite_fruit}\"" 101 | ] 102 | }, 103 | { 104 | "cell_type": "markdown", 105 | "id": "9442d5c9-f57d-4839-b0af-dce58646c04f", 106 | "metadata": {}, 107 | "source": [ 108 | "## Now go back and rerun the cell with the print statement, two cells back\n", 109 | "\n", 110 | "See how it prints something different, even though favorite_fruit was changed further down in the notebook? \n", 111 | "\n", 112 | "The order that code appears in the notebook doesn't matter. What matters is the order that the code is **executed**. There's a python process sitting behind this notebook in which the variables are being changed.\n", 113 | "\n", 114 | "This catches some people out when they first use Jupyter." 115 | ] 116 | }, 117 | { 118 | "cell_type": "code", 119 | "execution_count": null, 120 | "id": "8e5ec81d-7c5b-4025-bd2e-468d67b581b6", 121 | "metadata": {}, 122 | "outputs": [], 123 | "source": [ 124 | "# Then run this cell twice, and see if you understand what's going on\n", 125 | "\n", 126 | "print(f\"My favorite fruit is {favorite_fruit}\")\n", 127 | "\n", 128 | "favorite_fruit = \"apples\"" 129 | ] 130 | }, 131 | { 132 | "cell_type": "markdown", 133 | "id": "a29dab2d-bab9-4a54-8504-05e62594cc6f", 134 | "metadata": {}, 135 | "source": [ 136 | "# Explaining the 'kernel'\n", 137 | "\n", 138 | "Sitting behind this notebook is a Python process which executes each cell when you run it. That Python process is known as the Kernel. Each notebook has its own separate Kernel.\n", 139 | "\n", 140 | "You can go to the Kernel menu and select \"Restart Kernel\".\n", 141 | "\n", 142 | "If you then try to run the next cell, you'll get an error, because favorite_fruit is no longer defined. You'll need to run the cells from the top of the notebook again. Then the next cell should run fine." 143 | ] 144 | }, 145 | { 146 | "cell_type": "code", 147 | "execution_count": null, 148 | "id": "84b1e410-5eda-4e2c-97ce-4eebcff816c5", 149 | "metadata": {}, 150 | "outputs": [], 151 | "source": [ 152 | "print(f\"My favorite fruit is {favorite_fruit}\")" 153 | ] 154 | }, 155 | { 156 | "cell_type": "markdown", 157 | "id": "4d4188fc-d9cc-42be-8b4e-ae8630456764", 158 | "metadata": {}, 159 | "source": [ 160 | "# Adding and moving cells\n", 161 | "\n", 162 | "Click in this cell, then click the \\[+\\] button in the toolbar above to create a new cell immediately below this one. Copy and paste in the code in the prior cell, then run it! There are also icons in the top right of the selected cell to delete it (bin), duplicate it, and move it up and down.\n" 163 | ] 164 | }, 165 | { 166 | "cell_type": "code", 167 | "execution_count": null, 168 | "id": "ce258424-40c3-49a7-9462-e6fa25014b03", 169 | "metadata": {}, 170 | "outputs": [], 171 | "source": [] 172 | }, 173 | { 174 | "cell_type": "markdown", 175 | "id": "30e71f50-8f01-470a-9d7a-b82a6cef4236", 176 | "metadata": {}, 177 | "source": [ 178 | "# Cell output\n", 179 | "\n", 180 | "When you execute a cell, the standard output and the result of the last statement is written to the area immediately under the code, known as the 'cell output'. When you save a Notebook from the file menu (or command+S), the output is also saved, making it a useful record of what happened.\n", 181 | "\n", 182 | "You can clean this up by going to Edit menu >> Clear Outputs of All Cells, or Kernel menu >> Restart Kernel and Clear Outputs of All Cells." 183 | ] 184 | }, 185 | { 186 | "cell_type": "code", 187 | "execution_count": null, 188 | "id": "a4d021e2-c284-411f-8ab1-030530cfbe72", 189 | "metadata": {}, 190 | "outputs": [], 191 | "source": [ 192 | "spams = [\"spam\"] * 1000\n", 193 | "print(spams)\n", 194 | "\n", 195 | "# Might be worth clearing output after running this!" 196 | ] 197 | }, 198 | { 199 | "cell_type": "markdown", 200 | "id": "eac060f2-7a71-46e7-8235-b6ad0a76f5f8", 201 | "metadata": {}, 202 | "source": [ 203 | "# Using markdown\n", 204 | "\n", 205 | "So what's going on with these areas with writing in them, like this one? Well, there's actually a different kind of cell called a 'Markdown' cell for adding explanations like this. Click the + button to add a cell. Then in the toolbar, click where it says 'Code' and change it to 'Markdown'.\n", 206 | "\n", 207 | "Add some comments using Markdown format, perhaps copying and pasting from here:\n", 208 | "\n", 209 | "```\n", 210 | "# This is a heading\n", 211 | "## This is a sub-head\n", 212 | "### And a sub-sub-head\n", 213 | "\n", 214 | "I like Jupyter Lab because it's\n", 215 | "- Easy\n", 216 | "- Flexible\n", 217 | "- Satisfying\n", 218 | "```\n", 219 | "\n", 220 | "And to turn this into formatted text simply with Shift+Return in the cell.\n", 221 | "Click in the cell and press the Bin icon if you want to remove it." 222 | ] 223 | }, 224 | { 225 | "cell_type": "code", 226 | "execution_count": null, 227 | "id": "e1586320-c90f-4f22-8b39-df6865484950", 228 | "metadata": {}, 229 | "outputs": [], 230 | "source": [] 231 | }, 232 | { 233 | "cell_type": "markdown", 234 | "id": "1330c83c-67ac-4ca0-ac92-a71699e0c31b", 235 | "metadata": {}, 236 | "source": [ 237 | "# The exclamation point\n", 238 | "\n", 239 | "There's a super useful feature of jupyter labs; you can type a command with a ! in front of it in a code cell, like:\n", 240 | "\n", 241 | "!pip install \\[some_package\\]\n", 242 | "\n", 243 | "And it will run it at the command line (as if in Windows Powershell or Mac Terminal) and print the result" 244 | ] 245 | }, 246 | { 247 | "cell_type": "code", 248 | "execution_count": null, 249 | "id": "82042fc5-a907-4381-a4b8-eb9386df19cd", 250 | "metadata": {}, 251 | "outputs": [], 252 | "source": [ 253 | "# list the current directory\n", 254 | "\n", 255 | "!ls" 256 | ] 257 | }, 258 | { 259 | "cell_type": "code", 260 | "execution_count": null, 261 | "id": "4fc3e3da-8a55-40cc-9706-48bf12a0e20e", 262 | "metadata": {}, 263 | "outputs": [], 264 | "source": [ 265 | "# ping cnn.com - press the stop button in the toolbar when you're bored\n", 266 | "\n", 267 | "!ping cnn.com" 268 | ] 269 | }, 270 | { 271 | "cell_type": "code", 272 | "execution_count": null, 273 | "id": "a58e9462-89a2-4b4f-b4aa-51c4bd9f796b", 274 | "metadata": {}, 275 | "outputs": [], 276 | "source": [ 277 | "# This is a useful command that ensures your Anaconda environment \n", 278 | "# is up to date with any new upgrades to packages;\n", 279 | "# But it might take a minute and will print a lot to output\n", 280 | "\n", 281 | "!conda env update -f ../environment.yml --prune" 282 | ] 283 | }, 284 | { 285 | "cell_type": "markdown", 286 | "id": "4688baaf-a72c-41b5-90b6-474cb24790a7", 287 | "metadata": {}, 288 | "source": [ 289 | "# Minor things we encounter on the course\n", 290 | "\n", 291 | "This isn't necessarily a feature of Jupyter, but it's a nice package to know about that is useful in Jupyter Labs, and I use it in the course.\n", 292 | "\n", 293 | "The package `tqdm` will print a nice progress bar if you wrap any iterable." 294 | ] 295 | }, 296 | { 297 | "cell_type": "code", 298 | "execution_count": null, 299 | "id": "2646a4e5-3c23-4aee-a34d-d623815187d2", 300 | "metadata": {}, 301 | "outputs": [], 302 | "source": [ 303 | "# Here's some code with no progress bar\n", 304 | "# It will take 10 seconds while you wonder what's happpening..\n", 305 | "\n", 306 | "import time\n", 307 | "\n", 308 | "spams = [\"spam\"] * 1000\n", 309 | "\n", 310 | "for spam in spams:\n", 311 | " time.sleep(0.01)" 312 | ] 313 | }, 314 | { 315 | "cell_type": "code", 316 | "execution_count": null, 317 | "id": "6e96be3d-fa82-42a3-a8aa-b81dd20563a5", 318 | "metadata": {}, 319 | "outputs": [], 320 | "source": [ 321 | "# And now, with a nice little progress bar:\n", 322 | "\n", 323 | "import time\n", 324 | "from tqdm import tqdm\n", 325 | "\n", 326 | "spams = [\"spam\"] * 1000\n", 327 | "\n", 328 | "for spam in tqdm(spams):\n", 329 | " time.sleep(0.01)" 330 | ] 331 | }, 332 | { 333 | "cell_type": "code", 334 | "execution_count": null, 335 | "id": "63c788dd-4618-4bb4-a5ce-204411a38ade", 336 | "metadata": {}, 337 | "outputs": [], 338 | "source": [ 339 | "# On a different topic, here's a useful way to print output in markdown\n", 340 | "\n", 341 | "from IPython.display import Markdown, display\n", 342 | "\n", 343 | "display(Markdown(\"# This is a big heading!\\n\\n- And this is a bullet-point\\n- So is this\\n- Me, too!\"))\n" 344 | ] 345 | }, 346 | { 347 | "cell_type": "markdown", 348 | "id": "9d14c1fb-3321-4387-b6ca-9af27676f980", 349 | "metadata": {}, 350 | "source": [ 351 | "# That's it! You're up to speed on Jupyter Lab.\n", 352 | "\n", 353 | "## Want to be even more advanced?\n", 354 | "\n", 355 | "If you want to become a pro at Jupyter Lab, you can read their tutorial [here](https://jupyterlab.readthedocs.io/en/latest/). But this isn't required for our course; just a good technique for hitting Shift + Return and enjoying the result!" 356 | ] 357 | } 358 | ], 359 | "metadata": { 360 | "kernelspec": { 361 | "display_name": "Python 3 (ipykernel)", 362 | "language": "python", 363 | "name": "python3" 364 | }, 365 | "language_info": { 366 | "codemirror_mode": { 367 | "name": "ipython", 368 | "version": 3 369 | }, 370 | "file_extension": ".py", 371 | "mimetype": "text/x-python", 372 | "name": "python", 373 | "nbconvert_exporter": "python", 374 | "pygments_lexer": "ipython3", 375 | "version": "3.11.10" 376 | } 377 | }, 378 | "nbformat": 4, 379 | "nbformat_minor": 5 380 | } 381 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Ed Donner 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Choosing the Right LLM 2 | ### How to select, train and apply state-of-the-art LLMs to real-world business use cases. 3 | 4 | ![Choosing the right LLM](pickme.png) 5 | 6 | This repo has companion code for my class on comparing and training LLMs. 7 | 8 | ### Resources to accompany the class 9 | 10 | The resources are [here](https://edwarddonner.com/2024/06/26/choosing-the-right-llm-resources/) 11 | 12 | ### A note before you begin 13 | 14 | I'm here to help you be most successful with your learning! If you hit any snafus, or if you have any ideas on how I can improve the course, please do reach out in the platform or by emailing me direct (ed@edwarddonner.com). It's always great to connect with people on LinkedIn to build up the community - you'll find me here: 15 | https://www.linkedin.com/in/eddonner/ 16 | 17 | If you'd like to go more deeply into LLMs and Agents: 18 | - I'm running a number of [Live Events](https://www.oreilly.com/search/?q=author%3A%20%22Ed%20Donner%22) with O'Reilly and Pearson 19 | - I also have a comprehensive, hands-on 8-week [Mastering LLM engineering](https://www.udemy.com/course/llm-engineering-master-ai-and-large-language-models/?referralCode=35EB41EBB11DD247CF54) course that builds an entire Agentic AI platform from the ground up, including RAG and fine-tuning. 20 | 21 | ## For the introduction section - using Ollama 22 | 23 | In the first section, we use Ollama to run a model locally 24 | 1. Download and install Ollama from https://ollama.com noting that on a PC you might need to have administrator permissions for the install to work properly 25 | 2. On a PC, start a Command prompt / Powershell (Press Win + R, type `cmd`, and press Enter). On a Mac, start a Terminal (Applications > Utilities > Terminal). 26 | 3. Run `ollama run llama3.2` or for smaller machines try `ollama run llama3.2:1b` 27 | 4. If this doesn't work, you may need to run `ollama serve` in another Powershell (Windows) or Terminal (Mac), and try step 3 again 28 | 5. And if that doesn't work on your box, I've set up this on the cloud. This is on Google Colab, which will need you to have a Google account to sign in, but is free: https://colab.research.google.com/drive/1-_f5XZPsChvfU1sJ0QqCePtIuc55LSdu?usp=sharing 29 | 30 | Any problems, please contact me! 31 | 32 | Now on to the main setup: 33 | 34 | ## Setup instructions 35 | 36 | Hopefully I've done a decent job of making these guides bulletproof - but please contact me right away if you hit roadblocks: 37 | 38 | - PC people please follow the instructions in [SETUP-PC.md](SETUP-PC.md) 39 | - Mac people please follow the instructions in [SETUP-mac.md](SETUP-mac.md) 40 | - Linux people, the Mac instructions should be close enough! 41 | 42 | ### An important point on API costs (which are optional! No need to spend if you don't wish) 43 | 44 | During this example project, I'll suggest you try out the leading models at the forefront of progress, known as the Frontier models. These services have some charges, but I'll keep cost minimal - like, a few cents at a time. And I'll provide alternatives if you'd prefer not to use them. 45 | 46 | Please do monitor your API usage to ensure you're comfortable with spend; I've included links below. There's no need to spend anything more than a couple of dollars. Some AI providers such as OpenAI require a minimum credit like \$5 or local equivalent; we should only spend a fraction of it, and you'll have plenty of opportunity to put it to good use in your own projects. But it's not necessary in the least; the important part is that you focus on learning. 47 | 48 | ### The most important part 49 | 50 | The best way to learn is by **DOING**. I don't type all the code during the workshop; I execute it for you to see the results. You should work through afterwards, running each cell, inspecting the objects to get a detailed understanding of what's happening. Then tweak the code and make it your own. 51 | 52 | ### Monitoring API charges 53 | 54 | You can keep your API spend very low; you can monitor spend at the dashboards: [here](https://platform.openai.com/usage) for OpenAI, [here](https://console.anthropic.com/settings/cost) for Anthropic and [here](https://console.cloud.google.com/apis/api/generativelanguage.googleapis.com/cost) for Google Gemini. 55 | 56 | The charges for the exercises in this course should always be quite low, but if you'd prefer to keep them minimal, then be sure to always choose the cheapest versions of models: 57 | 1. For OpenAI: Always use model `gpt-4o-mini` in the code instead of `gpt-4o` 58 | 2. For Anthropic: Always use model `claude-3-haiku-20240307` in the code instead of the other Claude models 59 | 60 | Please do message me or email me at ed@edwarddonner.com if this doesn't work or if I can help with anything. I can't wait to hear how you get on. 61 | 62 | 63 | 64 | 67 | 74 | 75 |
65 | 66 | 68 |

Other resources

69 | I've put together this webpage with useful resources.
70 | https://edwarddonner.com/2024/06/26/choosing-the-right-llm-resources/
71 | Please keep this bookmarked, and I'll continue to add more useful links there over time. 72 |
73 |
76 | -------------------------------------------------------------------------------- /SETUP-PC.md: -------------------------------------------------------------------------------- 1 | ## Setup instructions for Windows 2 | 3 | Welcome, PC people! 4 | 5 | I should confess up-front: setting up a powerful environment to work at the forefront of AI is not as simple as I'd like. For most people these instructions will go great; but in some cases, for whatever reason, you'll hit a problem. Please don't hesitate to reach out - I am here to get you up and running quickly. There's nothing worse than feeling _stuck_. Message me, email me or LinkedIn message me and I will unstick you quickly! 6 | 7 | Email: ed@edwarddonner.com 8 | LinkedIn: https://www.linkedin.com/in/eddonner/ 9 | 10 | I use a platform called Anaconda to set up your environment. It's a powerful tool that builds a complete science environment. Anaconda ensures that you're working with the right version of Python and all your packages are compatible with mine, even if our systems are completely different. It takes more time to set up, and it uses more hard drive space (5+ GB) but it's very reliable once its working. 11 | 12 | Having said that: if you have any problems with Anaconda, I've provided an alternative approach. It's faster and simpler and should have you running quickly, with less of a guarantee around compatibility. 13 | 14 | ### Part 1: Clone the Repo 15 | 16 | This gets you a local copy of the code on your box. 17 | 18 | 1. **Install Git** (if not already installed): 19 | 20 | - Download Git from https://git-scm.com/download/win 21 | - Run the installer and follow the prompts, using default options (press OK lots of times!) 22 | 23 | 2. **Open Command Prompt:** 24 | 25 | - Press Win + R, type `cmd`, and press Enter 26 | 27 | 3. **Navigate to your projects folder:** 28 | 29 | If you have a specific folder for projects, navigate to it using the cd command. For example: 30 | `cd C:\Users\YourUsername\Documents\Projects` 31 | Replacing YourUsername with your actual Windows user 32 | 33 | If you don't have a projects folder, you can create one: 34 | ``` 35 | mkdir C:\Users\YourUsername\Documents\Projects 36 | cd C:\Users\YourUsername\Documents\Projects 37 | ``` 38 | 39 | 4. **Clone the repository:** 40 | 41 | Enter this in the command prompt in the Projects folder: 42 | 43 | `git clone https://github.com/ed-donner/choose_llm.git` 44 | 45 | This creates a new directory `choose_llm` within your Projects folder and downloads the code for the class. Do `cd choose_llm` to go into it. This `choose_llm` directory is known as the "project root directory". 46 | 47 | ### Part 2: Install Anaconda environment 48 | 49 | If this Part 2 gives you any problems, there is an alternative Part 2B below that can be used instead. 50 | 51 | 1. **Install Anaconda:** 52 | 53 | - Download Anaconda from https://docs.anaconda.com/anaconda/install/windows/ 54 | - Run the installer and follow the prompts. Note that it takes up several GB and take a while to install, but it will be a powerful platform for you to use in the future. 55 | 56 | 2. **Set up the environment:** 57 | 58 | - Open **Anaconda Prompt** (search for it in the Start menu) 59 | - Navigate to the "project root directory" by entering something like `cd C:\Users\YourUsername\Documents\Projects\choose_llm` using the actual path to your choose_llm project root directory. Do a `dir` and check you can see subdirectories for each week of the course. 60 | - Create the environment: `conda env create -f environment.yml` 61 | - Wait for a few minutes for all packages to be installed - in some cases, this can literally take 20-30 minutes if you've not used Anaconda before, and even longer depending on your internet connection. Important stuff is happening! If this runs for more than 1 hour 15 mins, or gives you other problems, please go to Part 2B instead. 62 | - You have now built an isolated, dedicated AI environment for engineering LLMs, running vector datastores, and so much more! You now need to **activate** it using this command: `conda activate choose` 63 | 64 | You should see `(choose)` in your prompt, which indicates you've activated your new environment. 65 | 66 | 3. **Start Jupyter Lab:** 67 | 68 | - In the Anaconda Prompt, from within the `choose_llm` folder, type: `jupyter lab` 69 | 70 | ...and Jupyter Lab should open up in a browser. If you've not seen Jupyter Lab before, I'll explain it in a moment! Now close the jupyter lab browser tab, and close the Anaconda prompt, and move on to Part 3. 71 | 72 | ### Part 2B - Alternative to Part 2 if Anaconda gives you trouble 73 | 74 | 1. **Open Command Prompt** 75 | 76 | Press Win + R, type `cmd`, and press Enter 77 | 78 | Run `python --version` to find out which python you're on. Ideally you'd be using a version of Python 3.11, so we're completely in sync. 79 | If not, it's not a big deal, but we might need to come back to this later if you have compatibility issues. 80 | You can download python here: 81 | https://www.python.org/downloads/ 82 | 83 | 2. Navigate to the "project root directory" by entering something like `cd C:\Users\YourUsername\Documents\Projects\choose_llm` using the actual path to your choose_llm project root directory. Do a `dir` and check you can see subdirectories for each week of the course. 84 | 85 | Then, create a new virtual environment with this command: 86 | `python -m venv choose` 87 | 88 | 3. Activate the virtual environment with 89 | `choose\Scripts\activate` 90 | You should see (choose) in your command prompt, which is your sign that things are going well. 91 | 92 | 4. Run `pip install -r requirements.txt` 93 | This may take a few minutes to install. 94 | 95 | 5. **Start Jupyter Lab:** 96 | 97 | From within the `choose_llm` folder, type: `jupyter lab` 98 | ...and Jupyter Lab should open up, ready for you to get started. Open the `intro` folder and double click on `joke.ipynb`. Success! Now close down jupyter lab and move on to Part 3. 99 | 100 | If there are any problems, contact me! 101 | 102 | ### Part 3 - OpenAI key (OPTIONAL but recommended) 103 | 104 | You'll be writing code to call the APIs of Frontier models (models at the forefront of AI). 105 | 106 | You only need OpenAI initially, and you can add the others if you wish later on. 107 | 108 | 1. Create an OpenAI account if you don't have one by visiting: 109 | https://platform.openai.com/ 110 | 111 | 2. OpenAI asks for a minimum credit to use the API. For me in the US, it's \$5. The API calls will spend against this \$5. On this course, we'll only use a small portion of this. I do recommend you make the investment as you'll be able to put it to excellent use. But if you'd prefer not to pay for the API, I give you an alternative using Ollama. 112 | 113 | You can add your credit balance to OpenAI at Settings > Billing: 114 | https://platform.openai.com/settings/organization/billing/overview 115 | 116 | I recommend you disable the automatic recharge! 117 | 118 | 3. Create your API key 119 | 120 | The webpage where you set up your OpenAI key is at https://platform.openai.com/api-keys - press the green 'Create new secret key' button and press 'Create secret key'. Keep a record of the API key somewhere private; you won't be able to retrieve it from the OpenAI screens in the future. It should start `sk-proj-`. 121 | 122 | Optionally: we will also set up keys for Anthropic and Google: 123 | - Claude API at https://console.anthropic.com/ from Anthropic 124 | - Gemini API at https://ai.google.dev/gemini-api from Google 125 | 126 | You'll be using the fabulous HuggingFace platform; an account is available for free at https://huggingface.co - you can create an API token from the Avatar menu >> Settings >> Access Tokens. 127 | 128 | And optionally you'll be using the terrific Weights & Biases at https://wandb.ai to watch over your training batches. Accounts are also free, and you can set up a token in a similar way. 129 | 130 | ### PART 4 - .env file 131 | 132 | When you have these keys, please create a new file called `.env` in your project root directory. The filename needs to be exactly the four characters ".env" rather than "my-keys.env" or ".env.txt". Here's how to do it: 133 | 134 | 1. Open the Notepad (Windows + R to open the Run box, enter `notepad`) 135 | 136 | 2. In the Notepad, type this, replacing xxxx with your API key (starting `sk-proj-`). 137 | 138 | ``` 139 | OPENAI_API_KEY=xxxx 140 | ``` 141 | 142 | If you have other keys, you can add them too: 143 | ``` 144 | GOOGLE_API_KEY=xxxx 145 | ANTHROPIC_API_KEY=xxxx 146 | HF_TOKEN=xxxx 147 | ``` 148 | 149 | Double check there are no spaces before or after the `=` sign, and no spaces at the end of the key. 150 | 151 | 3. Go to File > Save As. In the "Save as type" dropdown, select All Files. In the "File name" field, type exactly **.env** as the filename. Choose to save this in the project root directory (the folder called `choose_llm`) and click Save. 152 | 153 | 4. Navigate to the folder where you saved the file in Explorer and ensure it was saved as ".env" not ".env.txt" - if necessary rename it to ".env" - you might need to ensure that "Show file extensions" is set to "On" so that you see the file extensions. Message or email me if that doesn't make sense! 154 | 155 | This file won't appear in Jupyter Lab because jupyter hides files starting with a dot. This file is listed in the `.gitignore` file, so it won't get checked in and your keys stay safe. 156 | 157 | ### Part 5 - Showtime!! 158 | 159 | - Open **Anaconda Prompt** (search for it in the Start menu) if you used Anaconda, otherwise open a Powershell if you used the alternative approach in Part 2B 160 | 161 | - Navigate to the "project root directory" by entering something like `cd C:\Users\YourUsername\Documents\Projects\choose_llm` using the actual path to your choose_llm project root directory. Do a `dir` and check you can see subdirectories for each week of the course. 162 | 163 | - Activate your environment with `conda activate choose` if you used Anaconda or `choose\Scripts\activate` if you used the alternative approach in Part 2B 164 | 165 | - You should see (choose) in your prompt which is your sign that all is well. And now, type: `jupyter lab` and Jupyter Lab should open up, ready for you to get started. Open the `intro` folder and double click on `joke.ipynb`. 166 | 167 | And you're off to the races! 168 | 169 | Note that any time you start jupyter lab in the future, you'll need to follow these Part 5 instructions to start it from within the `choose_llm` directory with the `choose` environment activated. 170 | 171 | For those new to Jupyter Lab / Jupyter Notebook, it's a delightful Data Science environment where you can simply hit shift+return in any cell to run it; start at the top and work your way down! There's a notebook with a [Guide to Jupyter Lab](Guide%20to%20Jupyter.ipynb), and an [Intermediate Python](Intermediate%20Python.ipynb) tutorial, if that would be helpful. 172 | 173 | If you have any problems, I've included a notebook called [troubleshooting.ipynb](troubleshooting.ipynb) to figure it out. 174 | 175 | Please do message me or email me at ed@edwarddonner.com if this doesn't work or if I can help with anything. I can't wait to hear how you get on. -------------------------------------------------------------------------------- /SETUP-mac.md: -------------------------------------------------------------------------------- 1 | ## Setup instructions for Mac 2 | 3 | Welcome, Mac people! 4 | 5 | I should confess up-front: setting up a powerful environment to work at the forefront of AI is not as simple as I'd like. For most people these instructions will go great; but in some cases, for whatever reason, you'll hit a problem. Please don't hesitate to reach out - I am here to get you up and running quickly. There's nothing worse than feeling _stuck_. Message me, email me or LinkedIn message me and I will unstick you quickly! 6 | 7 | Email: ed@edwarddonner.com 8 | LinkedIn: https://www.linkedin.com/in/eddonner/ 9 | 10 | I use a platform called Anaconda to set up your environment. It's a powerful tool that builds a complete science environment. Anaconda ensures that you're working with the right version of Python and all your packages are compatible with mine, even if our systems are completely different. It takes more time to set up, and it uses more hard drive space (5+ GB) but it's very reliable once its working. 11 | 12 | Having said that: if you have any problems with Anaconda, I've provided an alternative approach. It's faster and simpler and should have you running quickly, with less of a guarantee around compatibility. 13 | 14 | ### Part 1: Clone the Repo 15 | 16 | This gets you a local copy of the code on your box. 17 | 18 | 1. **Install Git** if not already installed (it will be in most cases) 19 | 20 | - Open Terminal (Applications > Utilities > Terminal) 21 | - Type `git --version` If not installed, you'll be prompted to install it 22 | 23 | 2. **Navigate to your projects folder:** 24 | 25 | If you have a specific folder for projects, navigate to it using the cd command. For example: 26 | `cd ~/Documents/Projects` 27 | 28 | If you don't have a projects folder, you can create one: 29 | ``` 30 | mkdir ~/Documents/Projects 31 | cd ~/Documents/Projects 32 | ``` 33 | 34 | 3. **Clone the repository:** 35 | 36 | Enter this in the terminal in the Projects folder: 37 | 38 | `git clone https://github.com/ed-donner/choose_llm.git` 39 | 40 | This creates a new directory `choose_llm` within your Projects folder and downloads the code for the class. Do `cd choose_llm` to go into it. This `choose_llm` directory is known as the "project root directory". 41 | 42 | ### Part 2: Install Anaconda environment 43 | 44 | If this Part 2 gives you any problems, there is an alternative Part 2B below that can be used instead. 45 | 46 | 1. **Install Anaconda:** 47 | 48 | - Download Anaconda from https://docs.anaconda.com/anaconda/install/mac-os/ 49 | - Double-click the downloaded file and follow the installation prompts. Note that it takes up several GB and take a while to install, but it will be a powerful platform for you to use in the future. 50 | 51 | 2. **Set up the environment:** 52 | 53 | - Open a new Terminal (Applications > Utilities > Terminal) 54 | - Navigate to the "project root directory" using `cd ~/Documents/Projects/choose_llm` (replace this path as needed with the actual path to the choose_llm directory, your locally cloned version of the repo). Do `ls` and check you can see subdirectories for each week of the course. 55 | - Create the environment: `conda env create -f environment.yml` 56 | - Wait for a few minutes for all packages to be installed - in some cases, this can literally take 20-30 minutes if you've not used Anaconda before, and even longer depending on your internet connection. Important stuff is happening! If this runs for more than 1 hour 15 mins, or gives you other problems, please go to Part 2B instead. 57 | - You have now built an isolated, dedicated AI environment for engineering LLMs, running vector datastores, and so much more! You now need to **activate** it using this command: `conda activate choose` 58 | 59 | You should see `(choose)` in your prompt, which indicates you've activated your new environment. 60 | 61 | 3. **Start Jupyter Lab:** 62 | 63 | - In the Terminal window, from within the `choose_llm` folder, type: `jupyter lab` 64 | 65 | ...and Jupyter Lab should open up in a browser. If you've not seen Jupyter Lab before, I'll explain it in a moment! Now close the jupyter lab browser tab, and close the Terminal, and move on to Part 3. 66 | 67 | ### Part 2B - Alternative to Part 2 if Anaconda gives you trouble 68 | 69 | 1. **Open a new Terminal** (Applications > Utilities > Terminal) 70 | 71 | Run `python --version` to find out which python you're on. Ideally you'd be using a version of Python 3.11, so we're completely in sync. 72 | If not, it's not a big deal, but we might need to come back to this later if you have compatibility issues. 73 | You can download python here: 74 | https://www.python.org/downloads/ 75 | 76 | 2. Navigate to the "project root directory" using `cd ~/Documents/Projects/choose_llm` (replace this path with the actual path to the choose_llm directory, your locally cloned version of the repo). Do `ls` and check you can see subdirectories for each week of the course. 77 | 78 | Then, create a new virtual environment with this command: 79 | `python -m venv choose` 80 | 81 | 3. Activate the virtual environment with 82 | `source choose/bin/activate` 83 | You should see (choose) in your command prompt, which is your sign that things are going well. 84 | 85 | 4. Run `pip install -r requirements.txt` 86 | This may take a few minutes to install. 87 | 88 | 5. **Start Jupyter Lab:** 89 | 90 | From within the `choose_llm` folder, type: `jupyter lab` 91 | ...and Jupyter Lab should open up, ready for you to get started. Open the `intro` folder and double click on `joke.ipynb`. Success! Now close down jupyter lab and move on to Part 3. 92 | 93 | If there are any problems, contact me! 94 | 95 | ### Part 3 - OpenAI key (OPTIONAL but recommended) 96 | 97 | You'll be writing code to call the APIs of Frontier models (models at the forefront of AI). 98 | 99 | You only need OpenAI initially, and you can add the others if you wish later on. 100 | 101 | 1. Create an OpenAI account if you don't have one by visiting: 102 | https://platform.openai.com/ 103 | 104 | 2. OpenAI asks for a minimum credit to use the API. For me in the US, it's \$5. The API calls will spend against this \$5. On this course, we'll only use a small portion of this. I do recommend you make the investment as you'll be able to put it to excellent use. But if you'd prefer not to pay for the API, I give you an alternative using Ollama. 105 | 106 | You can add your credit balance to OpenAI at Settings > Billing: 107 | https://platform.openai.com/settings/organization/billing/overview 108 | 109 | I recommend you disable the automatic recharge! 110 | 111 | 3. Create your API key 112 | 113 | The webpage where you set up your OpenAI key is at https://platform.openai.com/api-keys - press the green 'Create new secret key' button and press 'Create secret key'. Keep a record of the API key somewhere private; you won't be able to retrieve it from the OpenAI screens in the future. It should start `sk-proj-`. 114 | 115 | Optionally: we will also set up keys for Anthropic and Google: 116 | - Claude API at https://console.anthropic.com/ from Anthropic 117 | - Gemini API at https://ai.google.dev/gemini-api from Google 118 | 119 | You'll also be using the fabulous HuggingFace platform; an account is available for free at https://huggingface.co - you can create an API token from the Avatar menu >> Settings >> Access Tokens. 120 | 121 | And optionally you can use the terrific Weights & Biases at https://wandb.ai to watch over your training batches. Accounts are also free, and you can set up a token in a similar way. 122 | 123 | ### PART 4 - .env file 124 | 125 | When you have these keys, please create a new file called `.env` in your project root directory. The filename needs to be exactly the four characters ".env" rather than "my-keys.env" or ".env.txt". Here's how to do it: 126 | 127 | 1. Open Terminal (Applications > Utilities > Terminal) 128 | 129 | 2. Navigate to the "project root directory" using `cd ~/Documents/Projects/choose_llm` (replace this path with the actual path to the choose_llm directory, your locally cloned version of the repo). 130 | 131 | 3. Create the .env file with 132 | 133 | nano .env 134 | 135 | 4. Then type your API keys into nano, replacing xxxx with your API key (starting `sk-proj-`). 136 | 137 | ``` 138 | OPENAI_API_KEY=xxxx 139 | ``` 140 | 141 | If you have other keys, you can add them too: 142 | ``` 143 | GOOGLE_API_KEY=xxxx 144 | ANTHROPIC_API_KEY=xxxx 145 | HF_TOKEN=xxxx 146 | ``` 147 | 148 | 5. Save the file: 149 | 150 | Control + O 151 | Enter (to confirm save the file) 152 | Control + X to exit the editor 153 | 154 | 6. Use this command to list files in your project root directory: 155 | 156 | `ls -a` 157 | 158 | And confirm that the `.env` file is there. 159 | 160 | This file won't appear in Jupyter Lab because jupyter hides files starting with a dot. This file is listed in the `.gitignore` file, so it won't get checked in and your keys stay safe. 161 | 162 | ### Part 5 - Showtime!! 163 | 164 | - Open Terminal (Applications > Utilities > Terminal) 165 | 166 | - Navigate to the "project root directory" using `cd ~/Documents/Projects/choose_llm` (replace this path with the actual path to the choose_llm directory, your locally cloned version of the repo). Do `ls` and check you can see subdirectories for each week of the course. 167 | 168 | - Activate your environment with `conda activate choose` (or `source choose/bin/activate` if you used the alternative approach in Part 2B) 169 | 170 | - You should see (choose) in your prompt which is your sign that all is well. And now, type: `jupyter lab` and Jupyter Lab should open up, ready for you to get started. Open the `intro` folder and double click on `joke.ipynb`. 171 | 172 | And you're off to the races! 173 | 174 | Note that any time you start jupyter lab in the future, you'll need to follow these Part 5 instructions to start it from within the `choose_llm` directory with the `choose` environment activated. 175 | 176 | For those new to Jupyter Lab / Jupyter Notebook, it's a delightful Data Science environment where you can simply hit shift+return in any cell to run it; start at the top and work your way down! There's a notebook with a [Guide to Jupyter Lab](Guide%20to%20Jupyter.ipynb), and an [Intermediate Python](Intermediate%20Python.ipynb) tutorial, if that would be helpful. 177 | 178 | If you have any problems, I've included a notebook called [troubleshooting.ipynb](troubleshooting.ipynb) to figure it out. 179 | 180 | Please do message me or email me at ed@edwarddonner.com if this doesn't work or if I can help with anything. I can't wait to hear how you get on. -------------------------------------------------------------------------------- /business.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ed-donner/choose_llm/398f96625f3cae34600dda9d4ca32dfa3896cfd3/business.jpg -------------------------------------------------------------------------------- /choose.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ed-donner/choose_llm/398f96625f3cae34600dda9d4ca32dfa3896cfd3/choose.jpg -------------------------------------------------------------------------------- /diagnostics.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "73287ed4-81e3-496a-9e47-f0e8c3770ce9", 6 | "metadata": {}, 7 | "source": [ 8 | "# Gathering Essential Diagnostic information\n", 9 | "\n", 10 | "## Please run this next cell to gather some important data\n", 11 | "\n", 12 | "Please run the next cell; it should take a minute or so to run (mostly the network test).\n", 13 | "Then email me the output of the last cell to ed@edwarddonner.com. \n", 14 | "Alternatively: this will create a file called report.txt - just attach the file to your email." 15 | ] 16 | }, 17 | { 18 | "cell_type": "code", 19 | "execution_count": null, 20 | "id": "ed8056e8-efa2-4b6f-a4bb-e7ceb733c517", 21 | "metadata": {}, 22 | "outputs": [], 23 | "source": [ 24 | "# Run my diagnostics report to collect key information for debugging\n", 25 | "# Please email me the results. Either copy & paste the output, or attach the file report.txt\n", 26 | "\n", 27 | "!pip install -q requests speedtest-cli psutil setuptools\n", 28 | "from diagnostics import Diagnostics\n", 29 | "Diagnostics().run()" 30 | ] 31 | } 32 | ], 33 | "metadata": { 34 | "kernelspec": { 35 | "display_name": "Python 3 (ipykernel)", 36 | "language": "python", 37 | "name": "python3" 38 | }, 39 | "language_info": { 40 | "codemirror_mode": { 41 | "name": "ipython", 42 | "version": 3 43 | }, 44 | "file_extension": ".py", 45 | "mimetype": "text/x-python", 46 | "name": "python", 47 | "nbconvert_exporter": "python", 48 | "pygments_lexer": "ipython3", 49 | "version": "3.11.10" 50 | } 51 | }, 52 | "nbformat": 4, 53 | "nbformat_minor": 5 54 | } 55 | -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | name: choose 2 | channels: 3 | - conda-forge 4 | - defaults 5 | dependencies: 6 | - python=3.11 7 | - pip 8 | - python-dotenv 9 | - requests 10 | - beautifulsoup4 11 | - pydub 12 | - numpy 13 | - pandas 14 | - scipy 15 | - pytorch 16 | - jupyterlab 17 | - ipywidgets 18 | - pyarrow 19 | - anthropic 20 | - google-generativeai 21 | - matplotlib 22 | - scikit-learn 23 | - chromadb 24 | - langchain 25 | - langchain-text-splitters 26 | - langchain-openai 27 | - langchain-experimental 28 | - langchain-chroma 29 | - faiss-cpu 30 | - tiktoken 31 | - jupyter-dash 32 | - plotly 33 | - duckdb 34 | - feedparser 35 | - pip: 36 | - transformers 37 | - sentence-transformers 38 | - datasets 39 | - accelerate 40 | - sentencepiece 41 | - bitsandbytes 42 | - openai 43 | - gradio 44 | - gensim 45 | - modal 46 | - ollama 47 | - psutil 48 | - setuptools 49 | - speedtest-cli 50 | - groq 51 | -------------------------------------------------------------------------------- /important.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ed-donner/choose_llm/398f96625f3cae34600dda9d4ca32dfa3896cfd3/important.jpg -------------------------------------------------------------------------------- /pickme.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ed-donner/choose_llm/398f96625f3cae34600dda9d4ca32dfa3896cfd3/pickme.png -------------------------------------------------------------------------------- /project2 - Expert knowledge worker/knowledge-base/company/about.md: -------------------------------------------------------------------------------- 1 | # About Insurellm 2 | 3 | Insurellm was founded by Avery Lancaster in 2015 as an insurance tech startup designed to disrupt an industry in need of innovative products. It's first product was Markellm, the marketplace connecting consumers with insurance providers. 4 | It rapidly expanded, adding new products and clients, reaching 200 emmployees by 2024 with 12 offices across the US. -------------------------------------------------------------------------------- /project2 - Expert knowledge worker/knowledge-base/company/careers.md: -------------------------------------------------------------------------------- 1 | # Careers at Insurellm 2 | 3 | Insurellm is hiring! We are looking for talented software engineers, data scientists and account executives to join our growing team. Come be a part of our movement to disrupt the insurance sector. -------------------------------------------------------------------------------- /project2 - Expert knowledge worker/knowledge-base/company/overview.md: -------------------------------------------------------------------------------- 1 | # Overview of Insurellm 2 | 3 | Insurellm is an innovative insurance tech firm with 200 employees across the US. 4 | Insurellm offers 4 insurance software products: 5 | - Carllm, a portal for auto insurance companies 6 | - Homellm, a portal for home insurance companies 7 | - Rellm, an enterprise platform for the reinsurance sector 8 | - Marketllm, a marketplace for connecting consumers with insurance providers 9 | 10 | Insurellm has more than 300 clients worldwide. -------------------------------------------------------------------------------- /project2 - Expert knowledge worker/knowledge-base/contracts/.md: -------------------------------------------------------------------------------- 1 | 2 | # Contract Agreement 3 | 4 | **Client Name**: Premier Auto Insurance Co. 5 | **Product Name**: Carllm 6 | 7 | --- 8 | 9 | ## Terms 10 | 11 | 1. **Agreement Duration**: This contract is effective from January 1, 2025, and shall remain in effect for a period of twelve (12) months, concluding on December 31, 2025. 12 | 2. **Payment**: The Client agrees to pay Insurellm a subscription fee of $2,500 per month for the duration of the contract, payable within 30 days of the invoice date. 13 | 3. **Scope of Services**: The services provided under this contract include access to the Carllm platform, integration support, AI-powered risk assessment tools, customizable coverage plans, and automated customer support. 14 | 4. **Data Security**: Insurellm commits to implementing industry-standard security measures to protect Client data, in accordance with applicable privacy laws. 15 | 16 | --- 17 | 18 | ## Renewal 19 | 20 | 1. **Automatic Renewal**: This contract will automatically renew for successive one-year terms unless either party provides written notice of termination at least thirty (30) days prior to the end of the current term. 21 | 2. **Renewal Terms**: Upon renewal, the subscription fee may be subject to adjustments based on changes in the Consumer Price Index or any significant value additions to the service. 22 | 3. **Review Period**: Prior to the renewal, both parties shall engage in a service review to ensure the satisfaction of both parties with the terms and performance of Carllm. 23 | 24 | --- 25 | 26 | ## Features 27 | 28 | 1. **AI-Powered Risk Assessment**: Comprehensive tools that analyze driver behavior and vehicle conditions. 29 | 2. **Instant Quoting**: Near-instant quotes provided for enhanced customer experience. 30 | 3. **Customizable Coverage Plans**: Flexibility to create tailored insurance packages. 31 | 4. **Fraud Detection**: Advanced analytics to help identify potentially fraudulent claims. 32 | 5. **Customer Insights Dashboard**: Access to deep insights for informed decision-making. 33 | 6. **Mobile Integration**: Mobile app compatibility for policy management on the go. 34 | 7. **Automated Customer Support**: 24/7 customer service via AI chatbots. 35 | 36 | --- 37 | 38 | ## Support 39 | 40 | 1. **Customer Support Availability**: Insurellm shall provide technical support to the Client via phone and email during standard business hours (9 AM - 5 PM ET, Monday to Friday). 41 | 2. **Emergency Support**: Emergency support will be available 24/7 for critical issues impacting the Client’s operations, response time not to exceed 2 hours. 42 | 3. **Training and Resources**: Insurellm will provide training materials and sessions to ensure successful deployment and use of Carllm. 43 | 4. **Feedback and Updates**: The Client will have regular opportunities to provide feedback on service performance. Updates and new features will be communicated promptly. 44 | 45 | --- 46 | 47 | This contract represents the complete understanding between the parties concerning the subject matter herein and supersedes all prior agreements and understandings, whether written or oral. 48 | 49 | **Authorized Signatures**: 50 | 51 | *Premier Auto Insurance Co.* 52 | _________________________ 53 | Name: [Client Representative Name] 54 | Title: [Client Representative Title] 55 | Date: ________________ 56 | 57 | *Insurellm* 58 | _________________________ 59 | Name: [Insurellm Representative Name] 60 | Title: [Insurellm Representative Title] 61 | Date: ________________ 62 | 63 | --- 64 | -------------------------------------------------------------------------------- /project2 - Expert knowledge worker/knowledge-base/contracts/Contract with Apex Reinsurance for Rellm - AI-Powered Enterprise Reinsurance Solution.md: -------------------------------------------------------------------------------- 1 | # Contract with Apex Reinsurance for Rellm: AI-Powered Enterprise Reinsurance Solution 2 | 3 | ## Terms 4 | 5 | 1. **Parties Involved**: This contract (“Agreement”) is entered into between Insurellm, Inc. (“Provider”) and Apex Reinsurance (“Client”) on this [Date]. 6 | 7 | 2. **Scope of Services**: Provider agrees to deliver the Rellm solution, which includes AI-driven analytics, seamless integrations, risk assessment modules, customizable dashboards, regulatory compliance tools, and client and broker portals as described in the product summary. 8 | 9 | 3. **Payment Terms**: Client shall pay the Provider the sum of $10,000 per month for the duration of this agreement. Payments are due on the first day of each month and will be processed via electronic funds transfer. 10 | 11 | 4. **Contract Duration**: This Agreement shall commence on [Start Date] and shall remain in effect for a period of twelve (12) months unless terminated earlier in accordance with the terms set forth herein. 12 | 13 | ## Renewal 14 | 15 | 1. **Automatic Renewal**: This Agreement will automatically renew for successive one-year terms unless either party provides a written notice of intent to terminate at least thirty (30) days prior to the expiration of the current term. 16 | 17 | 2. **Renewal Pricing**: Upon renewal, the pricing may be subject to adjustment by the Provider. The Provider will give a minimum of sixty (60) days’ notice of any changes in pricing. 18 | 19 | ## Features 20 | 21 | 1. **AI-Driven Analytics**: The Rellm platform will utilize AI algorithms to provide predictive insights into risk exposures, allowing the Client to make informed decisions with real-time data analysis. 22 | 23 | 2. **Seamless Integrations**: The architecture of Rellm allows for easy integration with existing systems used by the Client, including policy management and claims processing. 24 | 25 | 3. **Customizable Dashboard**: The dashboard will be tailored to display metrics specific to the Client's operational needs, enhancing productivity and facilitating more efficient data access. 26 | 27 | 4. **Regulatory Compliance**: The solution will include compliance tracking features to assist the Client in maintaining adherence to relevant regulations. 28 | 29 | 5. **Dedicated Client Portal**: A portal for the Client will facilitate real-time communication and document sharing, ensuring seamless collaboration throughout the partnership. 30 | 31 | ## Support 32 | 33 | 1. **Technical Support**: Provider shall offer dedicated technical support to the Client via phone, email, and a ticketing system during business hours (Monday to Friday, 9 AM to 5 PM EST). 34 | 35 | 2. **Training and Onboarding**: Provider will deliver comprehensive onboarding training for up to ten (10) members of the Client's staff to ensure effective use of the Rellm solution. 36 | 37 | 3. **Updates and Maintenance**: Provider is responsible for providing updates to the Rellm platform to improve functionality and security, at no additional cost to the Client. 38 | 39 | 4. **Escalation Protocol**: Issues that cannot be resolved at the first level of support will be escalated to the senior support team, ensuring that critical concerns are addressed promptly. 40 | 41 | --- 42 | 43 | **Acceptance of Terms**: By signing below, both parties agree to the Terms, Renewal, Features, and Support outlined in this Agreement. 44 | 45 | **Insurellm, Inc.** 46 | _____________________________ 47 | Authorized Signature 48 | Date: ___________________ 49 | 50 | **Apex Reinsurance** 51 | _____________________________ 52 | Authorized Signature 53 | Date: ___________________ 54 | -------------------------------------------------------------------------------- /project2 - Expert knowledge worker/knowledge-base/contracts/Contract with Belvedere Insurance for Markellm.md: -------------------------------------------------------------------------------- 1 | # Contract with Belvedere Insurance for Markellm 2 | 3 | ## Terms 4 | This Contract ("Agreement") is made and entered into as of [Date] by and between Insurellm, Inc., a corporation registered in the United States, ("Provider") and Belvedere Insurance, ("Client"). 5 | 6 | 1. **Service Commencement**: The services described herein will commence on [Start Date]. 7 | 2. **Contract Duration**: This Agreement shall remain in effect for a period of 1 year from the Commencement Date, unless terminated earlier in accordance with the termination clause of this Agreement. 8 | 3. **Fees**: Client agrees to pay a Basic Listing Fee of $199/month for accessing the Markellm platform along with a performance-based pricing of $25 per lead generated. 9 | 4. **Payment Terms**: Payments shall be made monthly, in advance, with invoices issued on the 1st of each month, payable within 15 days of receipt. 10 | 11 | ## Renewal 12 | 1. **Renewal Terms**: This Agreement may be renewed for additional one-year terms upon mutual written consent of both parties no later than 30 days before the end of the current term. 13 | 2. **Fee Adjustments**: Any changes to the fees or terms will be communicated in writing at least 60 days prior to the renewal date. 14 | 15 | ## Features 16 | 1. **AI-Powered Matching**: Belvedere Insurance will benefit from Markellm's AI-powered matching, ensuring the best-fit customers are identified and connected. 17 | 2. **Real-Time Quotes**: Access to real-time quotes will enhance the customer acquisition process, facilitating timely and informed decision-making. 18 | 3. **Data Insights**: Client shall have access to Markellm's analytics dashboard, allowing insights into consumer behavior and market trends. 19 | 4. **Customization Options**: Belvedere Insurance can leverage optional premium features and analytics upon payment of an additional $9.99/month. 20 | 5. **Customer Support**: Insurellm will provide dedicated support to Belvedere Insurance, ensuring any issues or queries are promptly addressed. 21 | 22 | ## Support 23 | 1. **Technical Support**: Technical support will be available from 9 AM to 7 PM EST, Monday through Friday via email and phone. 24 | 2. **Response Times**: Insurellm agrees to respond to all support queries within 24 business hours. Emergency support will be prioritized throughout the contract period. 25 | 3. **Training**: Insurellm will offer a comprehensive training session for the Client’s staff upon beginning the service to ensure effective utilization of the features. 26 | 27 | ## Acceptance 28 | By signing below, the parties agree to the terms of this Agreement. 29 | 30 | **Insurellm, Inc.** 31 | Signature: ______________________ 32 | Name: [Authorized Signatory] 33 | Title: [Title] 34 | Date: ______________________ 35 | 36 | **Belvedere Insurance** 37 | Signature: ______________________ 38 | Name: [Authorized Signatory] 39 | Title: [Title] 40 | Date: ______________________ 41 | 42 | --- 43 | This synthetic contract document outlines a fictional agreement between Insurellm and a fictional insurance client, Belvedere Insurance, which engages with the Markellm platform. The contract contains creative yet realistic terms for potential use in training and development in insurance technology scenarios. -------------------------------------------------------------------------------- /project2 - Expert knowledge worker/knowledge-base/contracts/Contract with BrightWay Solutions for Markellm.md: -------------------------------------------------------------------------------- 1 | # Contract with BrightWay Solutions for Markellm 2 | 3 | **Contract Date:** October 5, 2023 4 | **Contract ID:** INS-2023-0092 5 | 6 | ### Terms 7 | This contract (“Contract”) is made between Insurellm, a company incorporated in the United States, and BrightWay Solutions, a technology provider specializing in insurance services. 8 | 9 | 1. **Scope of Services:** 10 | Insurellm shall provide BrightWay Solutions access to the Markellm platform under the agreed pricing structure for a duration of one year from the effective date. 11 | 12 | 2. **Payment Terms:** 13 | BrightWay Solutions agrees to pay an initial setup fee of $1,000 for integration services, followed by the Basic Listing Fee of $199 per month for featured listing on Markellm. Payment shall be made within 30 days of invoice. 14 | 15 | 3. **Service Level Agreement (SLA):** 16 | Insurellm commits to a 99.9% uptime for the platform with dedicated support response times not exceeding 4 business hours. 17 | 18 | ### Renewal 19 | 1. **Automatic Renewal:** 20 | This Contract will automatically renew for additional one-year terms unless either party provides a written notice of intent to terminate at least 30 days prior to the renewal date. 21 | 22 | 2. **Review Period:** 23 | Both parties will enter a review period each year, during which they will discuss potential amendments to the pricing or contract terms based on market conditions and performance metrics. 24 | 25 | ### Features 26 | 1. **Access to AI-Powered Matching:** 27 | BrightWay Solutions will benefit from the AI algorithms for optimal customer matches, helping them connect with consumers looking for their specific insurance offerings. 28 | 29 | 2. **Real-Time Quote Availability:** 30 | Consumers sourced via BrightWay Solutions will receive real-time quotes, allowing for a seamless customer experience. 31 | 32 | 3. **Analytics Dashboard:** 33 | Access to Markellm’s analytics dashboard will provide BrightWay Solutions with insights into consumer behavior and market trends, assisting them in refining their insurance offerings. 34 | 35 | 4. **Customization Options:** 36 | BrightWay Solutions may request customizations to their listing page on Markellm, within the capabilities of the platform. 37 | 38 | ### Support 39 | 1. **Dedicated Customer Support:** 40 | BrightWay Solutions will have access to a dedicated support team from Insurellm during standard business hours (9 AM - 7 PM EST). 41 | 42 | 2. **Additional Support Services:** 43 | Technical support for integration and maintenance will be available. An optional premium support package can be purchased for $49.99/month, which includes 24/7 support and advanced troubleshooting. 44 | 45 | 3. **Training and Onboarding:** 46 | Insurellm agrees to provide one free training session on how to utilize the Markellm platform effectively for BrightWay Solutions’ team upon contract signing. 47 | 48 | ### Signatures 49 | By signing below, both parties agree to the terms and conditions outlined in this Contract. 50 | 51 | __________________________ 52 | **[Name], [Title]** 53 | **Insurellm** 54 | Date: ______________________ 55 | 56 | __________________________ 57 | **[Name], [Title]** 58 | **BrightWay Solutions** 59 | Date: ______________________ 60 | 61 | --- 62 | 63 | This document serves as a formal agreement between Insurellm and BrightWay Solutions, ensuring a successful partnership focused on enhancing the insurance shopping experience for consumers. -------------------------------------------------------------------------------- /project2 - Expert knowledge worker/knowledge-base/contracts/Contract with EverGuard Insurance for Rellm - AI-Powered Enterprise Reinsurance Solution.md: -------------------------------------------------------------------------------- 1 | # Contract with EverGuard Insurance for Rellm: AI-Powered Enterprise Reinsurance Solution 2 | 3 | **Contract Number:** IG-2023-EG 4 | **Effective Date:** January 1, 2024 5 | **Expiration Date:** December 31, 2026 6 | 7 | ## Terms 8 | 9 | 1. **Parties**: This agreement is made between Insurellm, located at 123 Innovation Drive, Tech City, USA, and EverGuard Insurance, located at 456 Safety Lane, Protectville, USA. 10 | 11 | 2. **Product Description**: This contract pertains to the use of the Rellm platform, an AI-powered enterprise reinsurance solution provided by Insurellm. EverGuard Insurance will implement Rellm to enhance its reinsurance operations. 12 | 13 | 3. **Payment Terms**: EverGuard Insurance agrees to pay Insurellm a monthly fee of $10,000 for the duration of this contract, covering the Professional Plan features of Rellm, which includes all advanced integrations and priority customer support. 14 | 15 | 4. **Usage Rights**: EverGuard Insurance is granted a non-exclusive, non-transferable license to access and use Rellm for the duration of this contract. Unauthorized sharing or distribution is strictly prohibited. 16 | 17 | ## Renewal 18 | 19 | 1. **Automatic Renewal**: This contract will automatically renew for successive one-year terms unless either party provides written notice of termination at least 60 days prior to the expiration date. 20 | 21 | 2. **Price Adjustment**: In the event of a renewal, Insurellm reserves the right to adjust the monthly fee based on market conditions and the value of services offered, with a minimum notice of 30 days. 22 | 23 | ## Features 24 | 25 | 1. **Core Functionality**: Rellm provides EverGuard Insurance with advanced AI-driven analytics, seamless integrations, and a comprehensive risk assessment module designed to optimize risk management. 26 | 27 | 2. **Customizable Dashboard**: Users at EverGuard Insurance will have access to a customizable dashboard that allows them to tailor their experience based on their specific operational metrics. 28 | 29 | 3. **Compliance Tools**: The built-in regulatory compliance tools will ensure that EverGuard Insurance meets industry standards while managing its reinsurance practices. 30 | 31 | 4. **Client Portal Access**: EverGuard Insurance will have access to both client and broker portals, enhancing communication and collaboration with its partners. 32 | 33 | ## Support 34 | 35 | 1. **Customer Support**: Insurellm will provide EverGuard Insurance with 24/7 customer support, including live chat, email, and phone assistance for any technical issues or inquiries regarding Rellm. 36 | 37 | 2. **Training Services**: Insurellm will provide initial training for EverGuard Insurance staff to ensure proper utilization of Rellm features. Additional training sessions can be scheduled upon request at an agreed fee. 38 | 39 | 3. **Updates and Upgrades**: EverGuard Insurance will receive all platform updates and upgrades at no additional cost during the contract term, including enhancements outlined in Insurellm’s 2025-2026 roadmap. 40 | 41 | 4. **Feedback Mechanisms**: EverGuard Insurance is encouraged to provide feedback regarding Rellm’s functionalities and any desired features, which will be considered for future updates. 42 | 43 | --- 44 | 45 | **Signatures** 46 | **For Insurellm**: __________________________ 47 | **Name**: John Smith 48 | **Title**: Chief Operating Officer 49 | **Date**: _________________ 50 | 51 | **For EverGuard Insurance**: __________________________ 52 | **Name**: Sarah Johnson 53 | **Title**: Chief Executive Officer 54 | **Date**: _________________ 55 | 56 | --- 57 | 58 | This contract seeks to foster a strong partnership between Insurellm and EverGuard Insurance, leveraging Rellm to innovate and enhance reinsurance capabilities while ensuring mutual growth and compliance in the ever-evolving insurance landscape. 59 | -------------------------------------------------------------------------------- /project2 - Expert knowledge worker/knowledge-base/contracts/Contract with GreenField Holdings for Markellm.md: -------------------------------------------------------------------------------- 1 | # Contract with GreenField Holdings for Markellm 2 | 3 | **Effective Date:** November 15, 2023 4 | **Contract Duration:** 12 months 5 | 6 | ## Terms 7 | 1. **Parties to the Agreement**: This contract is entered into between Insurellm, hereafter referred to as "Provider," and GreenField Holdings, hereafter referred to as "Client." 8 | 2. **Scope of Services**: Provider agrees to grant the Client access to the Markellm platform, enabling GreenField Holdings to connect with potential insurance customers through the AI-powered marketplace. 9 | 3. **Compliance**: Both parties agree to adhere to applicable laws and regulations that govern information security and consumer data protection. 10 | 11 | ## Renewal 12 | 1. **Automatic Renewal**: This contract will automatically renew for sequential one-year terms unless either party provides a written notice of non-renewal at least 30 days prior to the expiration of the current term. 13 | 2. **Annual Review**: Upon renewal, both parties may review and negotiate the terms, including any modifications to pricing based on performance metrics outlined in Section 4. 14 | 15 | ## Features 16 | 1. **AI-Powered Matching**: Access to advanced algorithms that connect GreenField Holdings with tailored insurance leads. 17 | 2. **Real-Time Quotes**: Ability to provide customers with instant quotes from multiple insurance providers, facilitating faster decision-making processes. 18 | 3. **Customized Recommendations**: Utilization of customizable consumer profiles to enhance marketing strategies and optimize customer engagement. 19 | 4. **Data Insights**: Access to analytics dashboards for real-time insights into market trends and consumer behavior, helping GreenField Holdings refine their product offerings. 20 | 21 | ## Support 22 | 1. **Customer Support Access**: The Client will have access to dedicated support through phone and email during normal business hours to address any inquiries or technical issues. 23 | 2. **Training and Resources**: Provider will offer onboarding training resources to ensure GreenField Holdings can effectively utilize the Markellm platform. 24 | 3. **Performance Reviews**: Quarterly performance reviews will be conducted to analyze platform effectiveness, customer acquisition rates, and marketing strategies, ensuring both parties are aligned on objectives. 25 | 26 | ## Pricing 27 | - **Basic Listing Fee**: GreenField Holdings agrees to pay a monthly fee of $199 for a featured listing on the Markellm platform. 28 | - **Performance-Based Pricing**: An additional fee of $25 per acquired customer lead will be charged, reflecting successful connections made through the Markellm platform. 29 | 30 | **Signatures:** 31 | _________________________ _________________________ 32 | **[Name], Title** **[Name], Title** 33 | Insurellm GreenField Holdings 34 | **Date:** ____________ **Date:** ____________ -------------------------------------------------------------------------------- /project2 - Expert knowledge worker/knowledge-base/contracts/Contract with GreenValley Insurance for Homellm.md: -------------------------------------------------------------------------------- 1 | # Contract with GreenValley Insurance for Homellm 2 | 3 | **Contract Date:** October 6, 2023 4 | **Contract Number:** HV-2023-0458 5 | **Parties:** 6 | - Insurellm, Inc. 7 | - GreenValley Insurance, LLC 8 | 9 | --- 10 | 11 | ## Terms 12 | 13 | 1. **Coverage:** Insurellm agrees to provide GreenValley Insurance with access to the Homellm product, allowing for personalized home insurance offerings tailored to customers. 14 | 15 | 2. **Duration:** This agreement is effective for a period of 12 months from the contract date, after which it will automatically renew unless terminated by either party with a written 30-day notice. 16 | 17 | 3. **Payment:** GreenValley Insurance shall pay a monthly fee of $10,000, due by the 5th of every month for the Standard Tier package. 18 | 19 | 4. **Confidentiality:** Both parties agree to maintain the confidentiality of proprietary information disclosed during the execution of this contract. 20 | 21 | 5. **Liability:** Insurellm's liability under this agreement shall be limited to direct damages and shall not exceed the total fees paid by GreenValley Insurance in the last 12 months prior to the date of the claim. 22 | 23 | --- 24 | 25 | ## Renewal 26 | 27 | Unless either party provides a written notice of termination at least 30 days prior to the expiration of the contract term, this agreement will automatically renew for an additional one-year term under the same terms and conditions. 28 | 29 | --- 30 | 31 | ## Features 32 | 33 | GreenValley Insurance will receive the following features with Homellm: 34 | 35 | 1. **AI-Powered Risk Assessment:** Access to advanced AI algorithms for real-time risk evaluations. 36 | 37 | 2. **Dynamic Pricing Model:** Flexible premium adjustments based on ongoing risk analysis. 38 | 39 | 3. **Instant Claim Processing:** Automated claim management to accelerate processing times significantly. 40 | 41 | 4. **Predictive Maintenance Alerts:** Alerts for potential maintenance needs to mitigate risks. 42 | 43 | 5. **Multi-Channel Integration:** Capability to integrate seamlessly with existing systems for unified customer management. 44 | 45 | 6. **Customer Portal:** A user-friendly portal for their customers for policy and claims management. 46 | 47 | --- 48 | 49 | ## Support 50 | 51 | Insurellm commits to providing comprehensive support to GreenValley Insurance, which includes: 52 | 53 | 1. **Onboarding:** An extensive training program for the GreenValley staff to ensure effective use of Homellm. 54 | 55 | 2. **Dedicated Support Team:** A dedicated support team available 24/7 to address any technical issues or inquiries. 56 | 57 | 3. **Regular Updates:** Insurellm will offer ongoing updates and enhancements to the Homellm platform, including new features and security improvements. 58 | 59 | 4. **Feedback Implementation:** Insurellm will actively solicit feedback from GreenValley Insurance to ensure Homellm continues to meet their evolving needs. 60 | 61 | --- 62 | 63 | **Signatures:** 64 | 65 | _________________________________ 66 | **[Name]** 67 | **Title**: CEO 68 | **Insurellm, Inc.** 69 | 70 | _________________________________ 71 | **[Name]** 72 | **Title**: COO 73 | **GreenValley Insurance, LLC** 74 | 75 | --- 76 | 77 | This agreement represents the complete understanding of both parties regarding the use of the Homellm product and supersedes any prior agreements or communications. -------------------------------------------------------------------------------- /project2 - Expert knowledge worker/knowledge-base/contracts/Contract with Greenstone Insurance for Homellm.md: -------------------------------------------------------------------------------- 1 | # Contract with Greenstone Insurance for Homellm 2 | 3 | --- 4 | 5 | ## Terms 6 | 7 | 1. **Parties**: This Contract ("Agreement") is entered into on this day, [Insert Date], between Insurellm ("Provider"), located at [Provider Address], and Greenstone Insurance ("Customer"), located at [Customer Address]. 8 | 9 | 2. **Services Provided**: Provider agrees to deliver the Homellm product, which includes AI-powered risk assessment, dynamic pricing model, instant claim processing, predictive maintenance alerts, multi-channel integration, and access to a customer portal as specified in the provided Product Summary. 10 | 11 | 3. **Contract Duration**: This Agreement shall commence on [Insert Start Date] and continue for a period of [Insert Duration, e.g., 12 months] unless terminated earlier as per the provisions herein. 12 | 13 | 4. **Payment Terms**: 14 | - The Customer shall pay an amount of $10,000 per month for the Standard Tier of the Homellm service. 15 | - Payments are due within 30 days of invoicing. 16 | 17 | 5. **Customization**: Any additional customization requests outside the standard offerings will require a separate agreement and associated costs. 18 | 19 | --- 20 | 21 | ## Renewal 22 | 23 | 1. **Automatic Renewal**: This Agreement will automatically renew for additional one-year terms unless either party provides written notice of termination at least 60 days prior to the end of the current term. 24 | 25 | 2. **Renewal Terms Review**: Prior to each renewal, the Provider and Customer will review the terms and pricing. Adjustments may be made based on the current features and market conditions. 26 | 27 | --- 28 | 29 | ## Features 30 | 31 | - **AI-Powered Risk Assessment**: Customer will have access to enhanced risk evaluation tools, allowing for personalized underwriting based on real-time data analysis. 32 | 33 | - **Dynamic Pricing Model**: The Customer can leverage flexible premiums adjusted according to customer risk profiles. 34 | 35 | - **Instant Claim Processing**: Claims submitted by the Customer's clients will be processed through an automated system, with most claims resolved within hours. 36 | 37 | - **Predictive Maintenance Alerts**: The Customer will receive alerts regarding potential maintenance needs for insured properties, enhancing client satisfaction and reducing claims. 38 | 39 | - **Multi-Channel Integration**: Homellm will integrate with the Customer's existing platforms to create seamless service delivery. 40 | 41 | - **Customer Portal**: A dedicated portal will be provided, allowing the Customer's clients to manage their accounts 24/7. 42 | 43 | --- 44 | 45 | ## Support 46 | 47 | 1. **Training**: Provider will offer a comprehensive training program at the start of the term to ensure the Customer's staff can effectively use the Homellm product. 48 | 49 | 2. **Ongoing Support**: The Provider will supply ongoing technical support via email and phone during business hours (9 am - 5 pm EST) throughout the contract duration. 50 | 51 | 3. **Updates and Improvements**: Customer will receive all software updates and feature enhancements as they become available, without additional charge. 52 | 53 | --- 54 | 55 | **AGREEMENT SIGNATURES** 56 | 57 | By signing below, the parties acknowledge their acceptance of the terms of this Agreement. 58 | 59 | **For Insurellm:** 60 | 61 | ______________________________ 62 | [Name], [Title] 63 | Date: ______________________ 64 | 65 | **For Greenstone Insurance:** 66 | 67 | ______________________________ 68 | [Name], [Title] 69 | Date: ______________________ 70 | 71 | --- 72 | 73 | *This contract is intended for illustrative purposes only and does not constitute a real legal document.* -------------------------------------------------------------------------------- /project2 - Expert knowledge worker/knowledge-base/contracts/Contract with Pinnacle Insurance Co. for Homellm.md: -------------------------------------------------------------------------------- 1 | # Contract with Pinnacle Insurance Co. for Homellm 2 | 3 | ## Terms 4 | This contract ("Contract") is entered into as of this 1st day of January 2024 ("Effective Date") by and between Insurellm ("Provider"), a Delaware corporation with its principal place of business at 1234 Innovation Drive, San Francisco, CA 94105, and Pinnacle Insurance Co. ("Client"), a Texas corporation with its principal place of business at 4567 Protection Plaza, Houston, TX 77001. 5 | 6 | 1. **License Grant**: Insurellm hereby grants the Client a non-exclusive, non-transferable license to use Homellm in accordance with the terms of this Contract. 7 | 2. **Payment Terms**: The Client agrees to pay an initial setup fee of $15,000 and a monthly subscription fee of $10,000 for the duration of the Contract. 8 | 3. **Term**: The initial term of this Contract shall last for a period of two (2) years from the Effective Date. 9 | 10 | ## Renewal 11 | 1. **Renewal Terms**: At the end of the initial term, this Contract shall automatically renew for additional one-year terms unless either party provides written notice of termination at least thirty (30) days prior to the expiration of the current term. 12 | 2. **Adjustment of Fees**: Subscription fees may be adjusted annually based on consumer price index changes, not to exceed 5% per year. 13 | 14 | ## Features 15 | 1. **AI-Powered Risk Assessment**: Utilized for tailored underwriting decisions specific to individual homeowner policies. 16 | 2. **Dynamic Pricing Model**: Monthly premiums adjusted based on real-time risk evaluations, ensuring fair pricing for Pinnacle’s customers. 17 | 3. **Instant Claim Processing**: Claims resolved in hours rather than weeks, significantly improving customer satisfaction and operational efficiency. 18 | 4. **Predictive Maintenance Alerts**: Alerts sent to customers advising them of potential risks unique to their property, supporting proactive maintenance. 19 | 5. **Multi-Channel Integration**: Seamless access to customer data through existing systems in Pinnacle Insurance's infrastructure. 20 | 6. **Customer Portal**: A user-friendly interface allowing policy management, claims submission, and coverage updates at any time. 21 | 22 | ## Support 23 | 1. **Technical Support**: Insurellm shall provide 24/7 technical support via an email and phone assistance for the duration of this Contract. 24 | 2. **Training**: Insurellm will conduct an onsite training session for Client employees upon implementation, and quarterly training webinars will be made available thereafter. 25 | 3. **Updates and Maintenance**: Insurellm will provide regular system updates and maintenance, ensuring that the software is operating at peak efficiency. 26 | 27 | By signing below, both parties agree to the terms set forth in this Contract for the use of the Homellm product. 28 | 29 | ____ 30 | **Insurellm Authorized Signature** 31 | Name: Sarah Johnson 32 | Title: VP of Sales 33 | Date: ____________ 34 | 35 | ____ 36 | **Pinnacle Insurance Co. Authorized Signature** 37 | Name: Tom Anderson 38 | Title: Chief Operating Officer 39 | Date: ____________ -------------------------------------------------------------------------------- /project2 - Expert knowledge worker/knowledge-base/contracts/Contract with Roadway Insurance Inc. for Carllm.md: -------------------------------------------------------------------------------- 1 | # Contract with Roadway Insurance Inc. for Carllm 2 | 3 | --- 4 | 5 | ## Terms 6 | 7 | 1. **Agreement Effective Date**: This contract is effective as of January 1, 2025. 8 | 2. **Duration**: This agreement will remain in effect for a term of 12 months, concluding on December 31, 2025. 9 | 3. **Subscription Type**: Roadway Insurance Inc. agrees to subscribe to the **Professional Tier** of Carllm, at a cost of $2,500/month, totaling $30,000 for the duration of this contract. 10 | 4. **Payment Terms**: Payments are due on the first of each month. Late payments will incur a penalty of 1.5% per month. 11 | 5. **Termination Clause**: Either party may terminate this agreement with 30 days' written notice prior to the end of the term. If terminated early, fees will be calculated on a pro-rata basis. 12 | 13 | --- 14 | 15 | ## Renewal 16 | 17 | 1. **Automatic Renewal**: This agreement will automatically renew for an additional 12-month term unless either party provides written notice of non-renewal at least 30 days before the expiration date. 18 | 2. **Price Adjustments**: Subscription fees may be adjusted for the renewal term in accordance with market conditions and the company's pricing policies, with 60 days' prior notice provided to Roadway Insurance Inc. 19 | 20 | --- 21 | 22 | ## Features 23 | 24 | 1. **Access to Core Features**: Roadway Insurance Inc. will have access to all Professional Tier features, including: 25 | - AI-Powered Risk Assessment 26 | - Advanced Analytics & Fraud Detection 27 | - Instant Quoting System 28 | - Customizable Coverage Plans 29 | - Customer Insights Dashboard 30 | 31 | 2. **Mobile Integration**: All features will be accessible through a mobile application that Insurellm will provide. 32 | 3. **Customer Support**: Includes 24/7 automated customer support via AI chatbots and access to dedicated account management support during business hours. 33 | 34 | --- 35 | 36 | ## Support 37 | 38 | 1. **Technical Support**: Roadway Insurance Inc. will receive priority technical support from Insurellm for any issues arising from the Carllm product. 39 | 2. **Training**: Insurellm will provide up to 5 training sessions for Roadway Insurance Inc. staff on the effective use of the Carllm platform, scheduled at mutual convenience. 40 | 3. **Updates and Maintenance**: Regular updates to the Carllm platform will be conducted quarterly, and any maintenance outages will be communicated at least 48 hours in advance. 41 | 42 | --- 43 | 44 | *This contract outlines the terms of the relationship between Insurellm and Roadway Insurance Inc. for the Carllm product, emphasizing the collaborative spirit aimed at transforming the auto insurance landscape.* -------------------------------------------------------------------------------- /project2 - Expert knowledge worker/knowledge-base/contracts/Contract with Stellar Insurance Co. for Rellm.md: -------------------------------------------------------------------------------- 1 | # Contract with Stellar Insurance Co. for Rellm 2 | 3 | ## Terms 4 | This contract is made between **Insurellm**, located at 123 Innovation Lane, San Francisco, CA, and **Stellar Insurance Co.**, located at 456 Galaxy Road, Chicago, IL. The effective date of this agreement is **January 1, 2024**. 5 | 6 | ### Duration 7 | The initial term of this agreement shall be for **12 months**, commencing from the effective date. The contract will automatically renew for successive **12-month periods** unless either party provides written notice of non-renewal at least **30 days** prior to the expiration of the current term. 8 | 9 | ### Payment Terms 10 | Stellar Insurance Co. agrees to pay Insurellm a monthly subscription fee of **$10,000** for the **Professional Plan** of the Rellm product. Payments are due on the **1st of each month**. 11 | 12 | ### Termination 13 | Either party may terminate this agreement with a **30-day written notice**. In the event of a material breach, the non-breaching party may terminate immediately, provided a written notice is given. 14 | 15 | ## Renewal 16 | This contract will renew automatically for additional 12-month terms unless written notice is provided by either party 30 days prior to the renewal date. Upon renewal, pricing may be adjusted based on agreed-upon inflation adjustments or additional services requested by Stellar Insurance Co. 17 | 18 | ## Features 19 | Stellar Insurance Co. will receive access to the following features of the Rellm product: 20 | 21 | - **AI-Driven Analytics**: Predictive insights into risk exposures tailored for the reinsurance industry. 22 | - **Seamless Integrations**: Compatibility with existing systems for policy management and claims processing. 23 | - **Risk Assessment Module**: Comprehensive evaluation of risk profiles using advanced modeling techniques. 24 | - **Customizable Dashboard**: Tailored user interface presenting relevant metrics and performance indicators. 25 | - **Regulatory Compliance Tools**: Features to ensure adherence to local and international regulations. 26 | - **Client and Broker Portals**: Dedicated portals for enhanced communication and document sharing. 27 | 28 | ## Support 29 | Insurellm provides Stellar Insurance Co. with the following support services: 30 | 31 | - **24/7 Technical Support**: Access to dedicated support representatives via phone and online chat. 32 | - **Quarterly Account Review**: Meetings to discuss performance metrics and uncover additional needs. 33 | - **Training Sessions**: Initial orientation and ongoing training opportunities to maximize the effectiveness of Rellm usage. 34 | - **Updates and Upgrades**: Regular software updates and enhancements are included as part of the subscription. 35 | 36 | Stellar Insurance Co. acknowledges receipt of the Rellm product summary and agrees to the terms set forth above. By signing below, both parties confirm their acceptance of this contract. 37 | 38 | **For Insurellm** 39 | ______________________________ 40 | [Signature] 41 | [Name, Title] 42 | [Date] 43 | 44 | **For Stellar Insurance Co.** 45 | ______________________________ 46 | [Signature] 47 | [Name, Title] 48 | [Date] -------------------------------------------------------------------------------- /project2 - Expert knowledge worker/knowledge-base/contracts/Contract with TechDrive Insurance for Carllm.md: -------------------------------------------------------------------------------- 1 | # Contract with TechDrive Insurance for Carllm 2 | 3 | **Contract Date:** October 1, 2024 4 | **Contract Duration:** 12 months 5 | 6 | --- 7 | 8 | ## Terms 9 | 10 | 1. **Parties Involved**: This contract is entered into between Insurellm (the "Provider") and TechDrive Insurance (the "Customer"). 11 | 12 | 2. **License Grant**: Insurellm grants TechDrive Insurance a non-exclusive, non-transferable license to use the Carllm product as per the selected pricing tier (Professional Tier at $2,500/month). 13 | 14 | 3. **Payment Terms**: TechDrive Insurance agrees to make monthly payments of $2,500 for the duration of this contract, due on the 5th of each month. 15 | 16 | 4. **Confidentiality**: Both parties shall maintain confidentiality regarding each other’s proprietary information throughout the duration of this contract and for three years following its termination. 17 | 18 | ## Renewal 19 | 20 | 1. **Automatic Renewal**: This contract shall automatically renew for additional one-year terms unless either party provides written notice of non-renewal at least 30 days prior to the contract expiration. 21 | 22 | 2. **Pricing Review**: The pricing for any renewal period shall be discussed 60 days prior to the end of the term and agreed upon in writing. 23 | 24 | ## Features 25 | 26 | 1. **Included Features**: Under the Professional Tier, TechDrive Insurance will have access to the following features of Carllm: 27 | - AI-Powered Risk Assessment 28 | - Instant Quoting 29 | - Customizable Coverage Plans 30 | - Fraud Detection 31 | - Customer Insights Dashboard 32 | - Mobile Integration 33 | - Automated Customer Support 34 | 35 | 2. **System Requirements**: TechDrive Insurance must ensure that their existing systems meet the technical requirements to integrate with Carllm, as outlined in the onboarding documentation provided by Insurellm. 36 | 37 | ## Support 38 | 39 | 1. **Customer Support**: Insurellm will provide 24/7 customer support to TechDrive Insurance via AI-driven chatbots, ensuring timely resolution of inquiries and issues. 40 | 41 | 2. **Training**: TechDrive Insurance staff will receive onboarding training sessions to ensure effective utilization of the Carllm platform, scheduled within the first two weeks of contract commencement. 42 | 43 | 3. **System Updates**: The Provider will push regular updates to improve system performance and add new features. TechDrive Insurance will receive prior notification of any significant upgrades that may affect current operations. 44 | 45 | --- 46 | 47 | **Signatures:** 48 | 49 | **Insurellm Representative:** 50 | Name: John Smith 51 | Title: Account Manager 52 | Date: ____________ 53 | 54 | **TechDrive Insurance Representative:** 55 | Name: Sarah Johnson 56 | Title: Operations Director 57 | Date: ____________ 58 | 59 | This contract will serve as the foundational agreement for the ongoing collaboration between Insurellm and TechDrive Insurance in optimizing their auto insurance offerings through the Carllm product. -------------------------------------------------------------------------------- /project2 - Expert knowledge worker/knowledge-base/contracts/Contract with Velocity Auto Solutions for Carllm.md: -------------------------------------------------------------------------------- 1 | # Contract with Velocity Auto Solutions for Carllm 2 | 3 | **Contract Date:** October 1, 2023 4 | **Contract Number:** C-12345-2023 5 | **Client:** Velocity Auto Solutions 6 | **Product:** Carllm Auto Insurance Solution 7 | 8 | --- 9 | 10 | ## Terms 11 | 12 | 1. **Duration**: This contract is effective for a period of 12 months from the contract date. 13 | 2. **Payment Schedule**: Velocity Auto Solutions agrees to pay Insurellm the total fee associated with the selected subscription tier on a monthly basis, beginning on the contract date. 14 | 3. **Confidentiality**: Both parties agree to keep all proprietary information confidential and not to disclose it to any third parties without written consent. 15 | 4. **Intellectual Property**: All components of Carllm and any related technology are the property of Insurellm, and license is granted to Velocity Auto Solutions for internal use only. 16 | 17 | ## Renewal 18 | 19 | 1. **Automatic Renewal**: This contract will automatically renew for successive 12-month periods unless either party provides written notice at least 30 days prior to the end of the initial term or any renewal term. 20 | 2. **Rate Adjustment**: Subscription pricing may be subject to adjustment, with Insurellm providing a 60-day advance notice of any changes prior to renewal. 21 | 22 | ## Features 23 | 24 | 1. **Included Features**: 25 | - AI-Powered Risk Assessment 26 | - Instant Quoting and Customizable Coverage Plans 27 | - Fraud Detection Systems 28 | - Customer Insights Dashboard 29 | - Automated Customer Support 30 | 31 | 2. **Feature Enhancements**: Velocity Auto Solutions will receive updates to the Carllm product as outlined in the Insurellm 2025-2026 Roadmap, including mobile integration and telematics-based pricing enhancements. 32 | 33 | ## Support 34 | 35 | 1. **Customer Support**: Velocity Auto Solutions will have access to Insurellm’s customer support team via email or chatbot, available 24/7. 36 | 2. **Technical Maintenance**: Regular maintenance and updates to the Carllm platform will be conducted by Insurellm, with any downtime communicated in advance. 37 | 3. **Training & Resources**: Initial training sessions will be provided for Velocity Auto Solutions’ staff to ensure effective use of the Carllm suite. Regular resources and documentation will be made available online. 38 | 39 | --- 40 | 41 | **Accepted and Agreed:** 42 | **For Velocity Auto Solutions** 43 | Signature: _____________________ 44 | Name: John Doe 45 | Title: CEO 46 | Date: _____________________ 47 | 48 | **For Insurellm** 49 | Signature: _____________________ 50 | Name: Jane Smith 51 | Title: VP of Sales 52 | Date: _____________________ -------------------------------------------------------------------------------- /project2 - Expert knowledge worker/knowledge-base/employees/Alex Chen.md: -------------------------------------------------------------------------------- 1 | # HR Record 2 | 3 | # Alex Chen 4 | 5 | ## Summary 6 | - **Date of Birth:** March 15, 1990 7 | - **Job Title:** Backend Software Engineer 8 | - **Location:** San Francisco, California 9 | 10 | ## Insurellm Career Progression 11 | - **April 2020:** Joined Insurellm as a Junior Backend Developer. Focused on building APIs to enhance customer data security. 12 | - **October 2021:** Promoted to Backend Software Engineer. Took on leadership for a key project developing a microservices architecture to support the company's growing platform. 13 | - **March 2023:** Awarded the title of Senior Backend Software Engineer due to exemplary performance in scaling backend services, reducing downtime by 30% over six months. 14 | 15 | ## Annual Performance History 16 | - **2020:** 17 | - Completed onboarding successfully. 18 | - Met expectations in delivering project milestones. 19 | - Received positive feedback from the team leads. 20 | 21 | - **2021:** 22 | - Achieved a 95% success rate in project delivery timelines. 23 | - Awarded "Rising Star" at the annual company gala for outstanding contributions. 24 | 25 | - **2022:** 26 | - Exceeded goals by optimizing existing backend code, improving system performance by 25%. 27 | - Conducted training sessions for junior developers, fostering knowledge sharing. 28 | 29 | - **2023:** 30 | - Led a major overhaul of the API internal architecture, enhancing security protocols. 31 | - Contributed to the company’s transition to a cloud-based infrastructure. 32 | - Received an overall performance rating of 4.8/5. 33 | 34 | ## Compensation History 35 | - **2020:** Base Salary: $80,000 36 | - **2021:** Base Salary Increase to $90,000; Received a performance bonus of $5,000. 37 | - **2022:** Base Salary Increase to $100,000; Performance bonus of $7,500 due to exceptional project outcomes. 38 | - **2023:** Base Salary Increase to $115,000; Performance bonus of $10,000 for leading pivotal projects. 39 | 40 | ## Other HR Notes 41 | - Participates regularly in Insurellm's Diversity & Inclusion initiatives, championing tech accessibility for underrepresented communities. 42 | - Completed several certifications in cloud architecture and DevOps, contributing to professional growth. 43 | - Plans for a professional development course in AI and machine learning to further enhance backend capabilities in Insurellm's offerings. 44 | - Acknowledged for volunteer efforts in local tech meetups, bringing seasoned engineers to mentor aspiring coders. 45 | 46 | Alex Chen continues to be a vital asset at Insurellm, contributing significantly to innovative backend solutions that help shape the future of insurance technology. -------------------------------------------------------------------------------- /project2 - Expert knowledge worker/knowledge-base/employees/Alex Harper.md: -------------------------------------------------------------------------------- 1 | # HR Record 2 | 3 | # Alex Harper 4 | 5 | ## Summary 6 | - **Date of Birth**: March 15, 1993 7 | - **Job Title**: Sales Development Representative (SDR) 8 | - **Location**: Denver, Colorado 9 | 10 | ## Insurellm Career Progression 11 | - **July 2021**: Joined Insurellm as a Sales Development Representative, focusing on lead generation and nurturing B2B relationships. 12 | - **January 2022**: Promoted to Senior Sales Development Representative due to exceptional performance in converting leads into clients. 13 | - **October 2022**: Completed an Internal Leadership Training Program, enhancing skills in team collaboration and strategic selling. Currently mentoring junior SDRs. 14 | - **April 2023**: Became involved in a cross-departmental project to streamline the customer onboarding process, showcasing initiative and leadership. 15 | 16 | ## Annual Performance History 17 | - **2021**: 18 | - **Performance Rating**: 4.5/5 19 | - **Key Achievements**: Exceeded lead generation targets by 30%. Introduced a new CRM analytics tool resulting in improved tracking of customer interactions. 20 | 21 | - **2022**: 22 | - **Performance Rating**: 4.8/5 23 | - **Key Achievements**: Awarded "SDR of the Year" for outstanding contributions. Instrumental in securing 15 new B2B contracts, surpassing targets by 40%. 24 | 25 | - **2023**: 26 | - **Performance Rating**: 4.7/5 27 | - **Key Achievements**: Played a key role in the launch of a new product line with a 25% increase in lead-to-conversion rates. Completed advanced sales negotiation training with high marks. 28 | 29 | ## Compensation History 30 | - **2021**: 31 | - **Base Salary**: $55,000 32 | - **Bonus**: $5,500 (10% of base due to performance) 33 | 34 | - **2022**: 35 | - **Base Salary**: $65,000 (Promotion to Senior SDR) 36 | - **Bonus**: $13,000 (20% of base due to performance) 37 | 38 | - **2023**: 39 | - **Base Salary**: $75,000 40 | - **Bonus**: $15,000 (20% of base) 41 | 42 | ## Other HR Notes 43 | - **Training Completed**: 44 | - CRM Analytics & Data Management Workshop (2021) 45 | - Leadership Training Program (2022) 46 | - Advanced Sales Negotiation Course (2023) 47 | 48 | - **Awards**: 49 | - Insurellm "SDR of the Year" Award (2022) 50 | - Monthly MVP Recognition (3 times in 2023) 51 | 52 | - **Interests**: 53 | - In Alex's spare time, they enjoy participating in community volunteer programs, particularly those focused on financial literacy. 54 | - Alex is also an avid runner and has participated in several charity marathons. 55 | 56 | - **Feedback from HR**: 57 | - Alex Harper is noted for their work ethic, positive attitude, and willingness to go above and beyond for both clients and colleagues. Recognized for fostering a team spirit within the SDR team. -------------------------------------------------------------------------------- /project2 - Expert knowledge worker/knowledge-base/employees/Alex Thomson.md: -------------------------------------------------------------------------------- 1 | # HR Record 2 | 3 | # Alex Thomson 4 | 5 | ## Summary 6 | - **Date of Birth:** March 15, 1995 7 | - **Job Title:** Sales Development Representative (SDR) 8 | - **Location:** Austin, Texas 9 | 10 | ## Insurellm Career Progression 11 | - **November 2022** - Joined Insurellm as a Sales Development Representative. Alex Thomson quickly adapted to the team, demonstrating exceptional communication and rapport-building skills. 12 | - **January 2023** - Promoted to Team Lead for special projects due to Alex's initiative in driving B2B customer outreach programs. 13 | - **August 2023** - Developed a training module for new SDRs at Insurellm, enhancing onboarding processes based on feedback and strategies that Alex Thomson pioneered. 14 | - **Current** - Continues to excel in the role, leading a small team of 5 SDRs while collaborating closely with the marketing department to identify new lead-generation strategies. 15 | 16 | ## Annual Performance History 17 | - **2022** - Rated as "Exceeds Expectations." Alex Thomson achieved 150% of the sales target within the first three months. 18 | - **2023** - Rated "Outstanding." Recognized for innovative lead-generation tactics which contributed to a 30% increase in qualified leads for the sales team. 19 | 20 | ### Highlights: 21 | - Consistently maintained a 30-minute response time to inbound leads. 22 | - Successfully coordinated webinars for product launches, which attracted over 2,000 potential customers. 23 | 24 | ## Compensation History 25 | - **2022**: Base Salary - $55,000 | Bonus - $5,000 26 | - **2023**: Base Salary - $65,000 | Bonus - $10,000 (for exceeding sales targets and exceptional teamwork) 27 | - **Projected for 2024**: Anticipated salary increase due to Alex Thomson's significant contributions and successful completion of leadership training. 28 | 29 | ## Other HR Notes 30 | - Alex Thomson is an active member of the Diversity and Inclusion committee at Insurellm and has participated in various community outreach programs. 31 | - Alex has received external training on advanced CRM usage, which has subsequently improved team efficiency and productivity. 32 | - Continuous professional development through attending sales conventions and workshops, with plans to pursue certification in Sales Enablement in 2024. 33 | - Recognized by peers for promoting a supportive and high-energy team environment, often organizing team-building activities to enhance camaraderie within the SDR department. 34 | 35 | --- 36 | **Comment:** Alex Thomson is considered a cornerstone of Insurellm’s sales team and has a bright future within the organization. -------------------------------------------------------------------------------- /project2 - Expert knowledge worker/knowledge-base/employees/Avery Lancaster.md: -------------------------------------------------------------------------------- 1 | # Avery Lancaster 2 | 3 | ## Summary 4 | - **Date of Birth**: March 15, 1985 5 | - **Job Title**: Co-Founder & Chief Executive Officer (CEO) 6 | - **Location**: San Francisco, California 7 | 8 | ## Insurellm Career Progression 9 | - **2015 - Present**: Co-Founder & CEO 10 | Avery Lancaster co-founded Insurellm in 2015 and has since guided the company to its current position as a leading Insurance Tech provider. Avery is known for her innovative leadership strategies and risk management expertise that have catapulted the company into the mainstream insurance market. 11 | 12 | - **2013 - 2015**: Senior Product Manager at Innovate Insurance Solutions 13 | Before launching Insurellm, Avery was a leading Senior Product Manager at Innovate Insurance Solutions, where she developed groundbreaking insurance products aimed at the tech sector. 14 | 15 | - **2010 - 2013**: Business Analyst at Edge Analytics 16 | Prior to joining Innovate, Avery worked as a Business Analyst, focusing on market trends and consumer preferences in the insurance space. This position laid the groundwork for Avery’s future entrepreneurial endeavors. 17 | 18 | ## Annual Performance History 19 | - **2015**: **Exceeds Expectations** 20 | Avery’s leadership during Insurellm's foundational year led to successful product launches and securing initial funding. 21 | 22 | - **2016**: **Meets Expectations** 23 | Growth continued, though challenges arose in operational efficiency that required Avery's attention. 24 | 25 | - **2017**: **Developing** 26 | Market competition intensified, and monthly sales metrics were below targets. Avery implemented new strategies which required a steep learning curve. 27 | 28 | - **2018**: **Exceeds Expectations** 29 | Under Avery’s pivoted vision, Insurellm launched two new successful products that significantly increased market share. 30 | 31 | - **2019**: **Meets Expectations** 32 | Steady growth, however, some team tensions led to a minor drop in employee morale. Avery recognized the need to enhance company culture. 33 | 34 | - **2020**: **Below Expectations** 35 | The COVID-19 pandemic posed unforeseen operational difficulties. Avery faced criticism for delayed strategy shifts, although efforts were eventually made to stabilize the company. 36 | 37 | - **2021**: **Exceptional** 38 | Avery's decisive transition to remote work and rapid adoption of digital tools led to record-high customer satisfaction levels and increased sales. 39 | 40 | - **2022**: **Satisfactory** 41 | Avery focused on rebuilding team dynamics and addressing employee concerns, leading to overall improvement despite a saturated market. 42 | 43 | - **2023**: **Exceeds Expectations** 44 | Market leadership was regained with innovative approaches to personalized insurance solutions. Avery is now recognized in industry publications as a leading voice in Insurance Tech innovation. 45 | 46 | ## Compensation History 47 | - **2015**: $150,000 base salary + Significant equity stake 48 | - **2016**: $160,000 base salary + Equity increase 49 | - **2017**: $150,000 base salary + Decrease in bonus due to performance 50 | - **2018**: $180,000 base salary + performance bonus of $30,000 51 | - **2019**: $185,000 base salary + market adjustment + $5,000 bonus 52 | - **2020**: $170,000 base salary (temporary reduction due to COVID-19) 53 | - **2021**: $200,000 base salary + performance bonus of $50,000 54 | - **2022**: $210,000 base salary + retention bonus 55 | - **2023**: $225,000 base salary + $75,000 performance bonus 56 | 57 | ## Other HR Notes 58 | - **Professional Development**: Avery has actively participated in leadership training programs and industry conferences, representing Insurellm and fostering partnerships. 59 | - **Diversity & Inclusion Initiatives**: Avery has championed a commitment to diversity in hiring practices, seeing visible improvements in team representation since 2021. 60 | - **Work-Life Balance**: Feedback revealed concerns regarding work-life balance, which Avery has approached by implementing flexible working conditions and ensuring regular check-ins with the team. 61 | - **Community Engagement**: Avery led community outreach efforts, focusing on financial literacy programs, particularly aimed at underserved populations, improving Insurellm's corporate social responsibility image. 62 | 63 | Avery Lancaster has demonstrated resilience and adaptability throughout her career at Insurellm, positioning the company as a key player in the insurance technology landscape. -------------------------------------------------------------------------------- /project2 - Expert knowledge worker/knowledge-base/employees/Emily Carter.md: -------------------------------------------------------------------------------- 1 | # HR Record 2 | 3 | # Emily Carter 4 | 5 | ## Summary 6 | - **Date of Birth:** August 12, 1990 7 | - **Job Title:** Account Executive 8 | - **Location:** Austin, Texas 9 | 10 | ## Insurellm Career Progression 11 | - **2021-Present:** Account Executive 12 | - Responsibilities include managing a portfolio of B2B clients, conducting sales presentations, and ensuring customer satisfaction. 13 | - Achievements: 14 | - Exceeded annual sales target by 30% in 2022. 15 | - Instrumental in acquiring 15 new corporate clients in half a year. 16 | 17 | - **2019-2021:** Sales Coordinator 18 | - Supported the sales team with administrative tasks, lead generation, and customer follow-ups. 19 | - Achievements: 20 | - Implemented a new lead tracking system that improved workflow efficiency by 25%. 21 | - Received "Employee of the Month" award twice for outstanding contribution to team goals. 22 | 23 | - **2017-2019:** Marketing Intern 24 | - Assisted with market research and campaign development for social media outreach. 25 | - Achievements: 26 | - Contributed ideas for a social media campaign that increased brand awareness by 40% within 6 months. 27 | 28 | ## Annual Performance History 29 | | Year | Performance Rating | Key Highlights | 30 | |------|--------------------|----------------| 31 | | 2023 | 4.8/5 | Recognized for exceptional client feedback and teamwork during product launches. | 32 | | 2022 | 4.5/5 | Led a successful cross-selling initiative that boosted revenue in existing accounts. | 33 | | 2021 | 4.2/5 | Successfully onboarded new clients and established strong relationships that resulted in renewals. | 34 | 35 | ## Compensation History 36 | | Year | Base Salary | Bonus | Total Compensation | 37 | |------|-------------|---------------|--------------------| 38 | | 2023 | $70,000 | $10,000 | $80,000 | 39 | | 2022 | $65,000 | $8,000 | $73,000 | 40 | | 2021 | $60,000 | $5,000 | $65,000 | 41 | 42 | ## Other HR Notes 43 | - **Professional Development:** Emily is currently enrolled in a leadership training program to enhance her management skills and aims to move into a senior account role within the next 2 years. 44 | - **Volunteer Work:** Actively participates in community outreach programs, representing Insurellm in charity events to promote corporate social responsibility. 45 | - **Interests:** In her spare time, Emily enjoys hiking, photography, and volunteering at local animal shelters. 46 | - **Team Feedback:** Colleagues describe Emily as a highly motivated team player who consistently uplifts everyone around her. 47 | 48 | Emily Carter exemplifies the kind of talent that drives Insurellm's success and is an invaluable asset to the company. -------------------------------------------------------------------------------- /project2 - Expert knowledge worker/knowledge-base/employees/Emily Tran.md: -------------------------------------------------------------------------------- 1 | # HR Record 2 | 3 | # Emily Tran 4 | 5 | ## Summary 6 | - **Date of Birth:** March 18, 1991 7 | - **Job Title:** Digital Marketing Specialist 8 | - **Location:** San Francisco, CA 9 | 10 | --- 11 | 12 | ## Insurellm Career Progression 13 | - **February 2020 - Present**: Digital Marketing Specialist 14 | - Emily Tran has been pivotal in enhancing Insurellm's online presence through targeted social media campaigns and SEO strategies. 15 | - Successfully managed a team of interns for the 'Spring Into Safety' initiative, increasing customer engagement by 35%. 16 | 17 | - **June 2018 - January 2020**: Marketing Coordinator 18 | - Assisted in the development and execution of marketing campaigns to promote Insurellm's products. 19 | - Collected and analyzed data on customer demographics to inform Insurellm’s marketing strategies. 20 | 21 | - **January 2017 - May 2018**: Marketing Intern 22 | - Supported the Marketing team by collaborating on content creation and digital advertising projects. 23 | - Gained hands-on experience with marketing automation tools, enriching her skillset for her role in Insurellm. 24 | 25 | --- 26 | 27 | ## Annual Performance History 28 | - **2023**: 29 | - Performance Rating: Exceeds Expectations 30 | - Key Achievements: Led the "Tech the Halls" campaign that resulted in a 50% increase in leads during the holiday season. 31 | - Emily Tran's innovative strategies and attention to detail have made her stand out among her peers. 32 | 33 | - **2022**: 34 | - Performance Rating: Meets Expectations 35 | - Key Achievements: Enhanced Insurellm's email marketing strategy, achieving a 25% open rate increase. 36 | 37 | - **2021**: 38 | - Performance Rating: Meets Expectations 39 | - Key Achievements: Contributed to the launch of a customer referral program that resulted in a 15% growth in B2C customers. 40 | 41 | --- 42 | 43 | ## Compensation History 44 | - **2023**: 45 | - Base Salary: $75,000 46 | - Bonus: $10,000 for exceeding annual targets. 47 | 48 | - **2022**: 49 | - Base Salary: $70,000 50 | - Bonus: $5,000 for achieving marketing milestones. 51 | 52 | - **2021**: 53 | - Base Salary: $67,500 54 | - No bonus due to reallocation of marketing funds during the pandemic. 55 | 56 | --- 57 | 58 | ## Other HR Notes 59 | - **Training Completed**: 60 | - Advanced Digital Marketing Workshop (2021) 61 | - Analytics and Reporting in Digital Advertising (2022) 62 | 63 | - **Professional Development Goals**: 64 | - Emily Tran aims to become a Marketing Manager within the next two years, focusing on leading larger campaigns and developing junior team members. 65 | 66 | - **Hobbies**: 67 | - Emily enjoys photography and regularly contributes to Insurellm's social media content with her own high-quality images. 68 | - She is also passionate about sustainability and organizes monthly team volunteer events for environmental awareness. 69 | 70 | --- 71 | 72 | Emily Tran continues to be a valuable asset to Insurellm, driving innovative marketing strategies that resonate with a diverse customer base. Her contributions have significantly enhanced the company's branding and customer outreach efforts. -------------------------------------------------------------------------------- /project2 - Expert knowledge worker/knowledge-base/employees/Jordan Blake.md: -------------------------------------------------------------------------------- 1 | # HR Record 2 | 3 | # Jordan Blake 4 | 5 | ## Summary 6 | - **Date of Birth:** March 15, 1993 7 | - **Job Title:** Sales Development Representative (SDR) 8 | - **Location:** Austin, Texas 9 | 10 | ## Insurellm Career Progression 11 | - **2021-06:** Joined Insurellm as an Entry-Level SDR 12 | - **2022-02:** Promoted to Junior SDR after exceeding quarterly targets by 25% 13 | - **2022-12:** Recognized as SDR of the Month for three consecutive months 14 | - **2023-05:** Participated in the Insurellm Leadership Training Program 15 | 16 | ## Annual Performance History 17 | - **2021:** First year at Insurellm; achieved 90% of monthly targets. 18 | - **Feedback:** Strong potential shown in lead generation; needs improvement in follow-up techniques. 19 | - **2022:** Achieved 120% of targets; pioneered outreach strategies that increased customer engagement. 20 | - **Feedback:** Jordan's innovative approach contributed significantly to team success; recommended for leadership training. 21 | - **2023:** Set to exceed annual targets by 30% in Q3; initiated successful partnerships that broadened market reach. 22 | - **Feedback:** Exceptional communicator; exemplifies the values of Insurellm and promotes team collaboration. 23 | 24 | ## Compensation History 25 | - **2021-06:** Starting Salary: $50,000 26 | - **2022-04:** Merit-based increase: $55,000 (based on performance review) 27 | - **2023-06:** Performance bonus awarded: $5,000 (for exceeding goals as recognized in annual review) 28 | - **2023-09:** Salary adjustment due to promotion to Senior SDR: $65,000 29 | 30 | ## Other HR Notes 31 | - Jordan has shown an interest in continuing education, actively participating in company-sponsored sales webinars. 32 | - Notable for involvement in the Insurellm volunteer program, assisting local charity events related to financial literacy. 33 | - Employee wellness advocate, consistently promotes team bonding activities and stress-relief workshops. 34 | - Plans to enroll in a course for advanced sales strategies in Q4 2023, aiming to further enhance his skills at Insurellm. -------------------------------------------------------------------------------- /project2 - Expert knowledge worker/knowledge-base/employees/Jordan K. Bishop.md: -------------------------------------------------------------------------------- 1 | # HR Record 2 | 3 | # Jordan K. Bishop 4 | 5 | ## Summary 6 | - **Date of Birth:** March 15, 1990 7 | - **Job Title:** Frontend Software Engineer 8 | - **Location:** Austin, Texas 9 | 10 | ## Insurellm Career Progression 11 | - **June 2018:** Hired as a Frontend Software Engineer. 12 | - **August 2019:** Promoted to Senior Frontend Software Engineer due to outstanding contributions to the Insurellm web application redesign project. 13 | - **March 2021:** Led a cross-functional team for the launch of Insurellm's customer portal, enhancing user experience and engagement. 14 | - **January 2022:** Transitioned to a mentorship role, where Jordan K. Bishop began training junior engineers, which affected the focus on personal projects. 15 | - **August 2023:** Returned to core development tasks but faced challenges adapting to new frameworks, leading to performance reviews reflecting a need for improvement. 16 | 17 | ## Annual Performance History 18 | - **2019:** Exceeds Expectations - Continuously delivered high-quality code and participated actively in team meetings. 19 | - **2020:** Meets Expectations - Jordan K. Bishop maintained steady performance but faced challenges due to a higher workload from multiple projects. 20 | - **2021:** Exceeds Expectations - Recognized for leadership during the customer portal project; received the “Innovation Award” for creative problem-solving. 21 | - **2022:** Meets Expectations - While mentoring others, the shift in focus led to fewer contributions to new features, marking a decrease in performance. 22 | - **2023:** Needs Improvement - Transitioning back to development has resulted in difficulties with recent technologies, prompting a performance improvement plan. 23 | 24 | ## Compensation History 25 | - **June 2018:** Starting Salary - $85,000 26 | - **June 2019:** Salary Increase - $95,000 (Promotion to Senior Engineer) 27 | - **June 2021:** Salary Increase - $105,000 with bonus for project leadership. 28 | - **June 2022:** Salary Freeze due to company budget adjustments. 29 | - **June 2023:** Salary Adjustment - $92,000 after performance review; adjustments made in consideration of recent struggles with adaptation. 30 | 31 | ## Other HR Notes 32 | - Jordan K. Bishop has been an integral part of club initiatives, including the Insurellm Code Reviews and Feedback Group, providing peer support. 33 | - Active participant in the company's Diversity and Inclusion committee, promoting a positive work culture. 34 | - Jordan has expressed interest in professional development courses, particularly those focused on modern web technologies, which are being considered for sponsorship by Insurellm. 35 | - Engaged in a 6-month performance improvement plan as of August 2023, focusing on skill development and consistent performance monitoring. 36 | 37 | Jordan K. Bishop is a valued member of the Insurellm family, exhibiting a commitment to growth and development despite recent challenges. -------------------------------------------------------------------------------- /project2 - Expert knowledge worker/knowledge-base/employees/Maxine Thompson.md: -------------------------------------------------------------------------------- 1 | # HR Record 2 | 3 | # Maxine Thompson 4 | 5 | ## Summary 6 | - **Date of Birth:** January 15, 1991 7 | - **Job Title:** Data Engineer 8 | - **Location:** Austin, Texas 9 | 10 | ## Insurellm Career Progression 11 | - **January 2017 - October 2018**: **Junior Data Engineer** 12 | * Maxine joined Insurellm as a Junior Data Engineer, focusing primarily on ETL processes and data integration tasks. She quickly learned Insurellm's data architecture, collaborating with other team members to streamline data workflows. 13 | - **November 2018 - December 2020**: **Data Engineer** 14 | * In her new role, Maxine expanded her responsibilities to include designing comprehensive data models and improving data quality measures. Though she excelled in technical skills, communication issues with non-technical teams led to some project delays. 15 | - **January 2021 - Present**: **Senior Data Engineer** 16 | * Maxine was promoted to Senior Data Engineer after successfully leading a pivotal project that improved data retrieval times by 30%. She now mentors junior engineers and is involved in strategic data initiatives, solidifying her position as a valued asset at Insurellm. She was recognized as Insurellm Innovator of the year in 2023, receiving the prestigious IIOTY 2023 award. 17 | 18 | ## Annual Performance History 19 | - **2017**: *Meets Expectations* 20 | Maxine showed potential in her role but struggled with initial project deadlines. Her adaptability and willingness to learn made positive impacts on her team. 21 | 22 | - **2018**: *Exceeds Expectations* 23 | Maxine improved significantly, becoming a reliable team member with strong problem-solving skills. She took on leadership in a project that automated data entry processes. 24 | 25 | - **2019**: *Needs Improvement* 26 | During this year, difficult personal circumstances affected Maxine's performance. She missed key deadlines and had several communication issues with stakeholders. 27 | 28 | - **2020**: *Meets Expectations* 29 | Maxine focused on regaining her footing and excelling with technical skills. She was stable, though not standout, in her contributions. Feedback indicated a need for more proactivity. 30 | 31 | - **2021**: *Exceeds Expectations* 32 | Maxine spearheaded the transition to a new data warehousing solution, significantly enhancing Insurellm’s data analytics capabilities. This major achievement bolstered her reputation within the company. 33 | 34 | - **2022**: *Outstanding* 35 | Maxine continued her upward trajectory, successfully implementing machine learning algorithms to predict customer behavior, which was well-received by the leadership team and improved client satisfaction. 36 | 37 | - **2023**: *Exceeds Expectations* 38 | Maxine has taken on mentoring responsibilities and is leading a cross-functional team for data governance initiatives, showcasing her leadership and solidifying her role at Insurellm. 39 | 40 | ## Compensation History 41 | - **2017**: $70,000 (Junior Data Engineer) 42 | - **2018**: $75,000 (Junior Data Engineer) 43 | - **2019**: $80,000 (Data Engineer) 44 | - **2020**: $84,000 (Data Engineer) 45 | - **2021**: $95,000 (Senior Data Engineer) 46 | - **2022**: $110,000 (Senior Data Engineer) 47 | - **2023**: $120,000 (Senior Data Engineer) 48 | 49 | ## Other HR Notes 50 | - Maxine participated in various company-sponsored trainings related to big data technologies and cloud infrastructure. 51 | - She was recognized for her contributions with the prestigious Insurellm IIOTY Innovator Award in 2023. 52 | - Maxine is currently involved in the women-in-tech initiative and participates in mentorship programs to guide junior employees. 53 | - Future development areas include improving her stakeholder communication skills to ensure smoother project transitions and collaboration. -------------------------------------------------------------------------------- /project2 - Expert knowledge worker/knowledge-base/employees/Oliver Spencer.md: -------------------------------------------------------------------------------- 1 | # HR Record 2 | 3 | # Oliver Spencer 4 | 5 | ## Summary 6 | - **Date of Birth**: May 14, 1990 7 | - **Job Title**: Backend Software Engineer 8 | - **Location**: Austin, Texas 9 | 10 | ## Insurellm Career Progression 11 | - **March 2018**: Joined Insurellm as a Backend Developer I, focusing on API development for customer management systems. 12 | - **July 2019**: Promoted to Backend Developer II after successfully leading a team project to revamp the claims processing system, reducing response time by 30%. 13 | - **June 2021**: Transitioned to Backend Software Engineer with a broader role in architecture and system design, collaborating closely with the DevOps team. 14 | - **September 2022**: Assigned as the lead engineer for the new "Innovate" initiative, aimed at integrating AI-driven solutions into existing products. 15 | - **January 2023**: Awarded a mentorship role to guide new hires in backend technology and best practices within Insurellm. 16 | 17 | ## Annual Performance History 18 | - **2018**: **3/5** - Adaptable team player but still learning to take initiative. 19 | - **2019**: **4/5** - Demonstrated strong problem-solving skills, outstanding contribution on the claims project. 20 | - **2020**: **2/5** - Struggled with time management; fell behind on deadlines during a high-traffic release period. 21 | - **2021**: **4/5** - Made a significant turnaround with organized work habits and successful project management. 22 | - **2022**: **5/5** - Exceptional performance during the "Innovate" initiative, showcasing leadership and creativity. 23 | - **2023**: **3/5** - Maintaining steady work; expectations for innovation not fully met, leading to discussions about goals. 24 | 25 | ## Compensation History 26 | - **March 2018**: Initial salary of $80,000. 27 | - **July 2019**: Salary increased to $90,000 post-promotion. 28 | - **June 2021**: Salary raised to $105,000 after role transition. 29 | - **September 2022**: Salary adjustment to $120,000 due to increased responsibilities and performance. 30 | - **January 2023**: Revised salary of $125,000 in recognition of mentorship role. 31 | 32 | ## Other HR Notes 33 | - Oliver enjoys a strong rapport with team members and is known for organizing regular team-building activities. 34 | - Participated in Insurellm’s Hackathon in 2022, where he led a project that won “Best Overall Solution.” 35 | - Pursuing AWS Certified Solutions Architect certification to enhance cloud skillset. 36 | - Has expressed interest in further leadership opportunities within Insurellm and may consider project management roles in the future. -------------------------------------------------------------------------------- /project2 - Expert knowledge worker/knowledge-base/employees/Samantha Greene.md: -------------------------------------------------------------------------------- 1 | # Samantha Greene 2 | 3 | ## Summary 4 | - **Date of Birth:** October 14, 1990 5 | - **Job Title:** HR Generalist 6 | - **Location:** Denver, Colorado 7 | 8 | ## Insurellm Career Progression 9 | - **2020** - Joined Insurellm as a HR Coordinator 10 | - Responsibilities included assisting with recruitment processes and managing employee onboarding. 11 | - **2021** - Promoted to HR Generalist 12 | - Transitioned to a role with expanded responsibilities, including handling employee relations and benefits administration. 13 | - **2022** - Completed the HR Leadership Development Program 14 | - Enhanced skills in conflict resolution and strategic planning. 15 | - **2023** - Actively involved in initiating the company’s Diversity and Inclusion programs. 16 | - Samantha Greene played a key role in launching mentorship initiatives and employee resource groups. 17 | 18 | ## Annual Performance History 19 | - **2020:** Exceeds Expectations 20 | Samantha Greene demonstrated exceptional organizational skills and contributed to a streamlined onboarding process, earning commendations from senior leadership. 21 | 22 | - **2021:** Meets Expectations 23 | While proficient in her new role, Samantha Greene struggled with time management during peak recruitment seasons, resulting in occasional missed deadlines. 24 | 25 | - **2022:** Below Expectations 26 | Samantha Greene faced challenges in balancing employee relations issues, thereby impacting her performance. Gaps in communication and follow-up led to a push for additional training. 27 | 28 | - **2023:** Meets Expectations 29 | After attending workshops focused on conflict resolution, Samantha Greene successfully improved her handling of employee grievances, though minor issues still arose in managing multitasking within projects. 30 | 31 | ## Compensation History 32 | - **2020:** Base Salary - $55,000 33 | The entry-level salary matched industry standards for HR Coordinators with limited experience. 34 | 35 | - **2021:** Base Salary - $65,000 36 | Following her promotion, Samantha Greene received a raise commensurate with her new responsibilities. 37 | 38 | - **2022:** Base Salary - $65,000 39 | No increase as a result of performance concerns; however, Samantha Greene continued to receive positive feedback for her participation in diversity initiatives. 40 | 41 | - **2023:** Base Salary - $70,000 42 | Recognized for substantial improvement in employee relations management and contributions to company culture, leading to a well-deserved increase. 43 | 44 | ## Other HR Notes 45 | - Samantha Greene has expressed interest in pursuing an HR certification (SHRM-CP) to further her career growth within Insurellm. 46 | - Participated in Insurellm's employee wellness program, promoting mental health resources among staff. 47 | - Actively volunteers with local nonprofits and encourages staff involvement in community outreach programs, enhancing Insurellm's corporate social responsibility initiatives. 48 | 49 | Samantha Greene is a valuable asset to Insurellm, continuously working on professional development and contributing to a supportive workplace culture. -------------------------------------------------------------------------------- /project2 - Expert knowledge worker/knowledge-base/employees/Samuel Trenton.md: -------------------------------------------------------------------------------- 1 | # HR Record 2 | 3 | # Samuel Trenton 4 | 5 | ## Summary 6 | - **Date of Birth:** April 12, 1989 7 | - **Job Title:** Senior Data Scientist 8 | - **Location:** Austin, Texas 9 | 10 | ## Insurellm Career Progression 11 | - **January 2020 - Present:** Senior Data Scientist 12 | *Promoted for demonstrating exceptional analytical skills and leadership potential. Led several projects that improved customer segmentation strategies, resulting in a 15% increase in customer retention.* 13 | 14 | - **June 2018 - December 2019:** Data Scientist 15 | *Joined the Insurellm team and worked on developing predictive modeling techniques to assess risk for both B2B and B2C customers. Received recognition for the success of the "Risk Assessment Model" project.* 16 | 17 | - **August 2016 - May 2018:** Junior Data Analyst 18 | *Started at Insurellm as a Junior Data Analyst, focusing on data cleaning and preliminary analysis of customer data. Received training in various data visualization techniques, which aided in the transition to a Data Scientist role.* 19 | 20 | ## Annual Performance History 21 | - **2023:** Rating: 4.5/5 22 | *Samuel exceeded expectations, successfully leading a cross-departmental project on AI-driven underwriting processes.* 23 | 24 | - **2022:** Rating: 3.0/5 25 | *Some challenges in meeting deadlines and collaboration with the engineering team. Received constructive feedback and participated in a team communication workshop.* 26 | 27 | - **2021:** Rating: 4.0/5 28 | *There was notable improvement in performance. Worked to enhance model accuracy, leading to improved risk assessment outcomes for B2C customers.* 29 | 30 | - **2020:** Rating: 3.5/5 31 | *Exhibited a solid performance during the initial year as a Senior Data Scientist but had struggles adapting to new leadership expectations.* 32 | 33 | ## Compensation History 34 | - **2023:** Base Salary: $115,000 + Bonus: $15,000 35 | *Annual bonus based on successful project completions and performance metrics.* 36 | 37 | - **2022:** Base Salary: $110,000 + Bonus: $10,000 38 | *Slight decrease in bonus due to performance challenges during the year.* 39 | 40 | - **2021:** Base Salary: $105,000 + Bonus: $12,000 41 | *Merit-based increase, reflecting consistent contributions to the data science team.* 42 | 43 | - **2020:** Base Salary: $100,000 + Bonus: $8,000 44 | *Initial compensation as Senior Data Scientist, with a focus on building rapport with cross-functional teams.* 45 | 46 | ## Other HR Notes 47 | - **Professional Development:** Completed several workshops on machine learning and AI applications in insurance. Currently pursuing an online certification in deep learning. 48 | 49 | - **Engagement in Company Culture:** Regularly participates in team-building events and contributes to the internal newsletter, sharing insights on data science trends. 50 | 51 | - **Areas for Improvement:** Collaboration with engineering teams has been noted as an area needing focus. Samuel has expressed a desire to work closely with tech teams to align data initiatives better. 52 | 53 | - **Personal Interests:** Has a keen interest in hiking and photography, often sharing his photography from weekend hikes with colleagues, fostering positive team relationships. -------------------------------------------------------------------------------- /project2 - Expert knowledge worker/knowledge-base/products/Carllm.md: -------------------------------------------------------------------------------- 1 | # Product Summary 2 | 3 | # Carllm 4 | 5 | ## Summary 6 | 7 | Carllm is an innovative auto insurance product developed by Insurellm, designed to streamline the way insurance companies offer coverage to their customers. Powered by cutting-edge artificial intelligence, Carllm utilizes advanced algorithms to deliver personalized auto insurance solutions, ensuring optimal coverage while minimizing costs. With a robust infrastructure that supports both B2B and B2C customers, Carllm redefines the auto insurance landscape and empowers insurance providers to enhance customer satisfaction and retention. 8 | 9 | ## Features 10 | 11 | - **AI-Powered Risk Assessment**: Carllm leverages artificial intelligence to analyze driver behavior, vehicle conditions, and historical claims data. This enables insurers to make informed decisions and set competitive premiums that reflect true risk profiles. 12 | 13 | - **Instant Quoting**: With Carllm, insurance companies can offer near-instant quotes to customers, enhancing the customer experience. The AI engine processes data in real-time, drastically reducing the time it takes to generate quotes. 14 | 15 | - **Customizable Coverage Plans**: Carllm allows insurers to create flexible and tailored insurance packages based on individual customer needs. This customization improves customer engagement and retention. 16 | 17 | - **Fraud Detection**: The product incorporates advanced analytics to identify potentially fraudulent claims, significantly reducing the risk of losses for insurance providers. 18 | 19 | - **Customer Insights Dashboard**: Carllm provides insurers with a powerful dashboard that offers deep insights into customer behavior, claims patterns, and market trends, enabling informed decision-making and strategic planning. 20 | 21 | - **Mobile Integration**: Carllm is designed to work seamlessly with mobile applications, providing both insurers and end-users access to policy management and claims reporting on the go. 22 | 23 | - **Automated Customer Support**: Leveraging AI chatbots, Carllm offers 24/7 customer support, helping to resolve inquiries quickly and efficiently, thus improving customer satisfaction. 24 | 25 | ## Pricing 26 | 27 | Carllm is offered under a subscription-based pricing model tailored to meet the needs of insurance companies of all sizes. Our pricing tiers are designed to provide maximum flexibility and value: 28 | 29 | - **Basic Tier**: $1,000/month 30 | - Ideal for small insurance firms. 31 | - Access to core features and standard reporting. 32 | 33 | - **Professional Tier**: $2,500/month 34 | - For medium-sized companies. 35 | - All Basic Tier features plus advanced analytics and fraud detection. 36 | 37 | - **Enterprise Tier**: $5,000/month 38 | - Customized solutions for large insurance firms. 39 | - Comprehensive support, full feature access, and integration with existing systems. 40 | 41 | Contact our sales team for a personalized quote and discover how Carllm can transform your auto insurance offerings! 42 | 43 | ## 2025-2026 Roadmap 44 | 45 | In our commitment to continuous improvement and innovation, Insurellm has outlined the following roadmap for Carllm: 46 | 47 | ### Q1 2025: Launch Feature Enhancements 48 | - **Expanded data integrations** for better risk assessment. 49 | - **Enhanced fraud detection algorithms** to reduce losses. 50 | 51 | ### Q2 2025: Customer Experience Improvements 52 | - Launch of a new **mobile app** for end-users. 53 | - Introduction of **telematics-based pricing** to provide even more tailored coverage options. 54 | 55 | ### Q3 2025: Global Expansion 56 | - Begin pilot programs for international insurance markets. 57 | - Collaborate with local insurers to offer compliant, localized versions of Carllm. 58 | 59 | ### Q4 2025: AI and Machine Learning Upgrades 60 | - Implement next-gen machine learning models for predictive analysis. 61 | - Roll out customer insights dashboard updates based on user feedback. 62 | 63 | ### 2026: Scaling and Partnerships 64 | - Increase partnerships with automakers for integrated insurance solutions. 65 | - Enhance the **AI customer support system** to include multi-language support. 66 | 67 | Carllm is not just an auto insurance product; it is a transformative tool for the insurance industry. Join us on this exciting journey as we redefine the future of auto insurance with technology and customer-centric solutions. -------------------------------------------------------------------------------- /project2 - Expert knowledge worker/knowledge-base/products/Homellm.md: -------------------------------------------------------------------------------- 1 | # Product Summary 2 | 3 | # Homellm 4 | 5 | ## Summary 6 | Homellm is an innovative home insurance product developed by Insurellm that leverages advanced AI technology to revolutionize the way insurance providers offer coverage to homeowners. Designed for both B2B and B2C segments, Homellm empowers insurers to provide personalized, data-driven policies, enhancing customer experience while minimizing risk and operational costs. By integrating seamlessly with existing systems, Homellm helps insurance companies streamline their processes and stay competitive in the ever-evolving insurance industry. 7 | 8 | ## Features 9 | ### 1. AI-Powered Risk Assessment 10 | Homellm utilizes sophisticated AI algorithms to analyze vast datasets, allowing insurance companies to assess risks accurately. This feature provides real-time insights for underwriting decisions, enabling insurers to tailor policies to individual customer needs. 11 | 12 | ### 2. Dynamic Pricing Model 13 | With Homellm's innovative dynamic pricing model, insurance providers can offer flexible premiums based on real-time risk evaluations and historical data. This adaptability ensures that customers pay a fair price that accurately reflects their unique risk profile. 14 | 15 | ### 3. Instant Claim Processing 16 | The AI-driven claims management system in Homellm automates the entire claims process, reducing processing time from weeks to hours. Insurers can resolve claims quickly and efficiently, leading to enhanced customer satisfaction. 17 | 18 | ### 4. Predictive Maintenance Alerts 19 | Homellm incorporates predictive analytics to advise homeowners on potential risks and maintenance needs. By preventing issues before they arise, this feature helps customers minimize hazards, lowering the likelihood of claims. 20 | 21 | ### 5. Multi-Channel Integration 22 | Homellm seamlessly integrates into existing insurance platforms, providing a centralized hub for managing customer policies and claims. Insurance providers can easily access customer data, allowing for improved service delivery across various channels. 23 | 24 | ### 6. Customer Portal 25 | A user-friendly online portal and mobile application enables customers to manage their policies, submit claims, and view coverage details 24/7. Homellm prioritizes transparency and ease of use, helping insurers foster trust and long-term relationships with their customers. 26 | 27 | ## Pricing 28 | At Insurellm, we believe in providing value without compromising quality. The pricing for Homellm is structured based on the size of the insurance provider and the level of customization required. 29 | 30 | - **Basic Tier:** Starting at $5,000/month for small insurers with basic integration features. 31 | - **Standard Tier:** Starting at $10,000/month for medium-sized insurers including advanced analytics and reporting tools. 32 | - **Enterprise Tier:** Custom pricing for large insurance companies that require full customization, dedicated support, and additional features, such as enterprise-grade security and compliance. 33 | 34 | All tiers include a comprehensive training program and ongoing updates to ensure optimal performance. 35 | 36 | ## Roadmap 37 | The development roadmap for Homellm includes the following key milestones: 38 | 39 | - **Q1 2024:** Launch of Homellm version 1.0, featuring core functionalities and integrations. 40 | - **Q3 2024:** Introduction of enhanced analytics capabilities, including visualization tools and advanced reporting features. 41 | - **Q1 2025:** Release of Homellm version 2.0, with expanded predictive maintenance alerts and automated underwriting processes. 42 | - **Q3 2025:** Establish partnerships with IoT device manufacturers to provide integrated solutions for proactive risk management. 43 | - **Q1 2026:** Ongoing improvements based on user feedback and industry trends, ensuring that Homellm remains at the forefront of home insurance technology. 44 | 45 | With Homellm, Insurellm is committed to transforming the landscape of home insurance, ensuring both innovation and reliability for all insurance providers and their customers. Explore the future of home insurance today with Homellm! -------------------------------------------------------------------------------- /project2 - Expert knowledge worker/knowledge-base/products/Markellm.md: -------------------------------------------------------------------------------- 1 | # Product Summary 2 | 3 | # Markellm 4 | 5 | ## Summary 6 | 7 | Markellm is an innovative two-sided marketplace designed to seamlessly connect consumers with insurance companies. Powered by advanced matching AI, Markellm transforms the insurance shopping experience, making it more efficient, personalized, and accessible. Whether you're a homeowner searching for the best rates on home insurance or an insurer looking to reach new customers, Markellm acts as the ultimate bridge, delivering tailored solutions for all parties involved. With a user-friendly interface and powerful algorithms, Markellm not only saves time but also enhances decision-making in the often-complex insurance landscape. 8 | 9 | ## Features 10 | 11 | - **AI-Powered Matching**: Markellm utilizes sophisticated AI algorithms to match consumers with the most suitable insurance products based on their individual needs and preferences. This ensures that both parties get the best possible options. 12 | 13 | - **User-Friendly Interface**: Designed with user experience in mind, Markellm features an intuitive interface that allows consumers to easily browse and compare various insurance offerings from multiple providers. 14 | 15 | - **Real-Time Quotes**: Consumers can receive real-time quotes from different insurance companies, empowering them to make informed decisions quickly without endless back-and-forth communication. 16 | 17 | - **Customized Recommendations**: Based on user profiles and preferences, Markellm provides personalized insurance recommendations, ensuring consumers find the right coverage at competitive rates. 18 | 19 | - **Secure Transactions**: Markellm prioritizes security, employing robust encryption methods to ensure that all transactions and data exchanges are safe and secure. 20 | 21 | - **Customer Support**: Our dedicated support team is always available to assist both consumers and insurers throughout the process, providing guidance and answering any questions that may arise. 22 | 23 | - **Data Insights**: Insurers gain access to valuable data insights through Markellm's analytics dashboard, helping them understand market trends and consumer behavior to refine their offerings. 24 | 25 | ## Pricing 26 | 27 | At Markellm, we believe in transparency and flexibility. Our pricing structure is designed to accommodate different types of users—whether you're a consumer seeking insurance or an insurance provider seeking customers. 28 | 29 | ### For Consumers: 30 | - **Free Membership**: Access to the marketplace at no cost, allowing unlimited browsing and comparisons. 31 | - **Premium Features**: Optional subscription at $9.99/month for advanced analytics on choices, priority customer support, and enhanced customization options. 32 | 33 | ### For Insurance Companies: 34 | - **Basic Listing Fee**: $199/month for a featured listing on the platform, providing exposure to thousands of potential customers. 35 | - **Performance-Based Pricing**: Option for variable pricing based on successful customer acquisitions— pay $25 per lead generated through Markellm. 36 | 37 | ## 2025-2026 Roadmap 38 | 39 | ### Q1 2025 40 | - Launch a mobile app version of Markellm, making it even easier for consumers and insurers to connect on-the-go. 41 | - Introduce a referral program that rewards users for promoting Markellm to their network. 42 | 43 | ### Q2 2025 44 | - Expand the marketplace to include additional insurance products, such as life and health insurance. 45 | - Partner with third-party data aggregators to enhance the accuracy of our AI matching capabilities. 46 | 47 | ### Q3 2025 48 | - Initiate a comprehensive marketing campaign targeting both consumers and insurers to increase user acquisition and brand awareness. 49 | - Release user testimonials and case studies showcasing successful matches made through Markellm. 50 | 51 | ### Q4 2026 52 | - Implement machine learning enhancements to our AI algorithm, further increasing the precision and personalization of matches. 53 | - Explore international expansion opportunities, launching in select markets outside the US. 54 | 55 | Markellm is committed to improving the insurance experience for both consumers and providers. By leveraging technology and user insights, we aim to become the leading platform in the insurance marketplace ecosystem. Join us on this exciting journey towards smarter, more efficient insurance solutions! -------------------------------------------------------------------------------- /project2 - Expert knowledge worker/knowledge-base/products/Rellm.md: -------------------------------------------------------------------------------- 1 | # Product Summary 2 | 3 | # Rellm: AI-Powered Enterprise Reinsurance Solution 4 | 5 | ## Summary 6 | 7 | Rellm is an innovative enterprise reinsurance product developed by Insurellm, designed to transform the way reinsurance companies operate. Harnessing the power of artificial intelligence, Rellm offers an advanced platform that redefines risk management, enhances decision-making processes, and optimizes operational efficiencies within the reinsurance industry. With seamless integrations and robust analytics, Rellm enables insurers to proactively manage their portfolios and respond to market dynamics with agility. 8 | 9 | ## Features 10 | 11 | ### AI-Driven Analytics 12 | Rellm utilizes cutting-edge AI algorithms to provide predictive insights into risk exposures, enabling users to forecast trends and make informed decisions. Its real-time data analysis empowers reinsurance professionals with actionable intelligence. 13 | 14 | ### Seamless Integrations 15 | Rellm's architecture is designed for effortless integration with existing systems. Whether it's policy management, claims processing, or financial reporting, Rellm connects seamlessly with diverse data sources to create a unified ecosystem. 16 | 17 | ### Risk Assessment Module 18 | The comprehensive risk assessment module within Rellm allows insurers to evaluate risk profiles accurately. By leveraging historical data and advanced modeling techniques, Rellm provides a clear picture of potential liabilities and expected outcomes. 19 | 20 | ### Customizable Dashboard 21 | Rellm features a customizable dashboard that presents key metrics and performance indicators in an intuitive interface. Users can tailor their view to focus on what matters most to their business, enhancing user experience and productivity. 22 | 23 | ### Regulatory Compliance Tools 24 | Rellm includes built-in compliance tracking features to help organizations meet local and international regulatory standards. This ensures that reinsurance practices remain transparent and accountable. 25 | 26 | ### Client and Broker Portals 27 | Rellm offers dedicated portals for both clients and brokers, facilitating real-time communication and documentation sharing. This strengthens partnerships and drives operational excellence across the board. 28 | 29 | ## Pricing 30 | 31 | Insurellm offers flexible pricing plans for Rellm to cater to various business needs: 32 | 33 | - **Basic Plan**: $5,000/month 34 | - Includes access to core features and standard integrations. 35 | 36 | - **Professional Plan**: $10,000/month 37 | - Includes all features, advanced integrations, and priority customer support. 38 | 39 | - **Enterprise Plan**: Custom pricing 40 | - Tailored solutions with personalized features, extensive integrations, and dedicated account management. 41 | 42 | Join the growing number of organizations leveraging Rellm to enhance their reinsurance processes while driving profitability and compliance. 43 | 44 | ## 2025-2026 Roadmap 45 | 46 | At Insurellm, we are committed to the continuous improvement of Rellm. Our roadmap for 2025-2026 includes: 47 | 48 | - **Q3 2025**: 49 | - Launch of the Rellm Mobile App for on-the-go insights and management. 50 | - Introduction of augmented reality (AR) features for interactive risk assessments. 51 | 52 | - **Q1 2026**: 53 | - Deployment of advanced machine learning models for even more accurate risk predictions. 54 | - Expansion of integration capabilities to support emerging technologies in the insurance sector. 55 | 56 | - **Q3 2026**: 57 | - Release of a community platform for Rellm users to exchange insights, tips, and best practices. 58 | - Launch of Rellm 2.0, featuring enhanced user interface and premium features based on user feedback. 59 | 60 | Experience the future of reinsurance with Rellm, where innovation meets reliability. Let Insurellm help you navigate the complexities of the reinsurance market smarter and faster. -------------------------------------------------------------------------------- /project3 - Price intelligence/data.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "46d90d45-2d19-49c7-b853-6809dc417ea7", 6 | "metadata": {}, 7 | "source": [ 8 | "## Dataset Curator Amazon Appliance project\n", 9 | "\n", 10 | "Please now head over to Google Colab:\n", 11 | "\n", 12 | "Data curation: https://colab.research.google.com/drive/1cYLqi3_XlXzbzYMKd8j0VDycIKmWABAS \n", 13 | "\n", 14 | "Training: https://colab.research.google.com/drive/1TA_GwdrpWwRZfUw8I9y2fqqwFv9CBU1O \n", 15 | "\n", 16 | "Inference: https://colab.research.google.com/drive/1V6_F3r6Tge3EASyffdcWWMrA7vOzHNL8" 17 | ] 18 | }, 19 | { 20 | "cell_type": "code", 21 | "execution_count": null, 22 | "id": "8dd16f72-3ba9-4c0c-9eac-4f6773c4f1a6", 23 | "metadata": {}, 24 | "outputs": [], 25 | "source": [] 26 | } 27 | ], 28 | "metadata": { 29 | "kernelspec": { 30 | "display_name": "Python 3 (ipykernel)", 31 | "language": "python", 32 | "name": "python3" 33 | }, 34 | "language_info": { 35 | "codemirror_mode": { 36 | "name": "ipython", 37 | "version": 3 38 | }, 39 | "file_extension": ".py", 40 | "mimetype": "text/x-python", 41 | "name": "python", 42 | "nbconvert_exporter": "python", 43 | "pygments_lexer": "ipython3", 44 | "version": "3.11.11" 45 | } 46 | }, 47 | "nbformat": 4, 48 | "nbformat_minor": 5 49 | } 50 | -------------------------------------------------------------------------------- /project4 - Resume parser/my_resume.txt: -------------------------------------------------------------------------------- 1 | Edward Donner 2 | 10 W 14th Street, 3 | New York NY 10001 4 | Phone: 212-123-7878 5 | Email: ed@edwarddonner.com 6 | US Citizen 7 | UK Citizen 8 | 9 | Summary: Currently Co-Founder and CTO of AI startup Nebula.io; previously founder and CEO of AI startup untapt, acquired in 2020. 20+ year career in technology and data science. 10 | 11 | Skills: 12 | 13 | Entrepreneurship, Technical leadership, Python, Data Science, LLMs, Generative AI 14 | 15 | Work Experience: 16 | 17 | June 2021 - Present 18 | Co-Founder and CTO 19 | Nebula.io 20 | 21 | I’m the co-founder and CTO of Nebula.io. We help recruiters source, understand, engage and manage talent, using Generative AI and other forms of machine learning. Our patented model matches people with roles with greater accuracy and speed than previously imaginable — no keywords required. Take a look for yourself at https://nebula.io; it’s completely free to try. 22 | 23 | Our long term goal is to help people discover their potential and pursue their reason for being, motivated by a concept called Ikigai. We help people find roles where they will be most fulfilled and successful; as a result, we will raise the level of human prosperity. It sounds grandiose, but since 77% of people don’t consider themselves inspired or engaged at work, it’s completely within our reach. 24 | 25 | Oct 2013 - June 2021 26 | Founder, CEO, CTO 27 | untapt.com 28 | I founded untapt in October 2013; emerged from stealth in 2014 and went into production with first product in 2015. I 29 | 30 | July 1997 - Oct 2013 31 | Managing Director 32 | JPMorgan Chase 33 | 34 | Aug 1995 - June 1997 35 | Software Developer 36 | IBM 37 | 38 | Education: 39 | 40 | 1992-1995 41 | Physics 42 | University of Oxford 43 | 44 | Hobbies: 45 | 46 | Coding 47 | Amateur DJ -------------------------------------------------------------------------------- /project5 - Code generator/curator.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "46d90d45-2d19-49c7-b853-6809dc417ea7", 6 | "metadata": {}, 7 | "source": [ 8 | "## Dataset Curator for Project 3\n", 9 | "### Example code for simple equity trading decisions\n", 10 | "\n", 11 | "There are 3 sample python files generated (via multiple queries) by GPT-4o, Claude 3 Opus and Gemini 1.5 Pro.\n", 12 | "This notebook creates training data from these files, then converts to the HuggingFace format and uploads to the hub.\n", 13 | "\n", 14 | "It goes without saying: this trading code was generated by LLMs, is over-simplified and untrusted - do not make actual trading decisions based on this!" 15 | ] 16 | }, 17 | { 18 | "cell_type": "code", 19 | "execution_count": null, 20 | "id": "16cf3aa2-f407-4b95-8b9e-c3c586f67835", 21 | "metadata": {}, 22 | "outputs": [], 23 | "source": [ 24 | "import os\n", 25 | "import glob\n", 26 | "import matplotlib.pyplot as plt\n", 27 | "import random\n", 28 | "from datasets import Dataset\n", 29 | "from dotenv import load_dotenv\n", 30 | "from huggingface_hub import login\n", 31 | "import transformers\n", 32 | "from transformers import AutoTokenizer" 33 | ] 34 | }, 35 | { 36 | "cell_type": "code", 37 | "execution_count": null, 38 | "id": "375302b6-b6a7-46ea-a74c-c2400dbd8bbe", 39 | "metadata": {}, 40 | "outputs": [], 41 | "source": [ 42 | "# Load environment variables in a file called .env\n", 43 | "from datasets import load_dataset, Dataset\n", 44 | "load_dotenv()\n", 45 | "os.environ['HF_TOKEN'] = os.getenv('HF_TOKEN', 'your-hf-token-if-not-using-env')" 46 | ] 47 | }, 48 | { 49 | "cell_type": "code", 50 | "execution_count": null, 51 | "id": "8a0c9fff-9eff-42fd-971b-403c99d9b726", 52 | "metadata": {}, 53 | "outputs": [], 54 | "source": [ 55 | "# Constants\n", 56 | "\n", 57 | "DATASET_NAME = \"trade_code_dataset\"\n", 58 | "BASE_MODEL = \"bigcode/starcoder2-3b\"" 59 | ] 60 | }, 61 | { 62 | "cell_type": "code", 63 | "execution_count": null, 64 | "id": "586b07ba-5396-4c34-a696-01c8bc3597a0", 65 | "metadata": {}, 66 | "outputs": [], 67 | "source": [ 68 | "# A utility method to convert the text contents of a file into a list of methods\n", 69 | "\n", 70 | "def extract_method_bodies(text):\n", 71 | " chunks = text.split('def trade')[1:]\n", 72 | " results = []\n", 73 | " for chunk in chunks:\n", 74 | " lines = chunk.split('\\n')[1:]\n", 75 | " body = '\\n'.join(line for line in lines if line!='\\n')\n", 76 | " results.append(body)\n", 77 | " return results " 78 | ] 79 | }, 80 | { 81 | "cell_type": "code", 82 | "execution_count": null, 83 | "id": "953422d0-2e75-4d01-862e-6383df54d9e5", 84 | "metadata": {}, 85 | "outputs": [], 86 | "source": [ 87 | "# Read all .py files and convert into training data\n", 88 | "\n", 89 | "bodies = []\n", 90 | "for filename in glob.glob(\"*.py\"):\n", 91 | " with open(filename, 'r') as file:\n", 92 | " content = file.read()\n", 93 | " extracted = extract_method_bodies(content)\n", 94 | " bodies += extracted\n", 95 | "\n", 96 | "print(f\"Extracted {len(bodies)} trade method bodies\")" 97 | ] 98 | }, 99 | { 100 | "cell_type": "code", 101 | "execution_count": null, 102 | "id": "836480e9-ba23-4aa3-a7e2-2666884e9a06", 103 | "metadata": {}, 104 | "outputs": [], 105 | "source": [ 106 | "# Let's look at one\n", 107 | "\n", 108 | "print(random.choice(bodies))" 109 | ] 110 | }, 111 | { 112 | "cell_type": "code", 113 | "execution_count": null, 114 | "id": "47b10e7e-a562-4968-af3f-254a9b424ac8", 115 | "metadata": {}, 116 | "outputs": [], 117 | "source": [ 118 | "# To visualize the lines of code in each \n", 119 | "\n", 120 | "%matplotlib inline\n", 121 | "fig, ax = plt.subplots(1, 1)\n", 122 | "lengths = [len(body.split('\\n')) for body in bodies]\n", 123 | "ax.set_xlabel('Lines of code')\n", 124 | "ax.set_ylabel('Count of training samples');\n", 125 | "_ = ax.hist(lengths, rwidth=0.7, color=\"green\", bins=range(0, max(lengths)))" 126 | ] 127 | }, 128 | { 129 | "cell_type": "code", 130 | "execution_count": null, 131 | "id": "03b37f62-679e-4c3d-9e5b-5878a82696e6", 132 | "metadata": {}, 133 | "outputs": [], 134 | "source": [ 135 | "# Add the prompt to the start of every training example\n", 136 | "\n", 137 | "prompt = \"\"\"\n", 138 | "# tickers is a list of stock tickers\n", 139 | "import tickers\n", 140 | "\n", 141 | "# prices is a dict; the key is a ticker and the value is a list of historic prices, today first\n", 142 | "import prices\n", 143 | "\n", 144 | "# Trade represents a decision to buy or sell a quantity of a ticker\n", 145 | "import Trade\n", 146 | "\n", 147 | "import random\n", 148 | "import numpy as np\n", 149 | "\n", 150 | "def trade():\n", 151 | "\"\"\"\n", 152 | "\n", 153 | "data = [prompt + body for body in bodies]\n", 154 | "print(random.choice(data))" 155 | ] 156 | }, 157 | { 158 | "cell_type": "code", 159 | "execution_count": null, 160 | "id": "28fdb82f-3864-4023-8263-547d17571a5c", 161 | "metadata": {}, 162 | "outputs": [], 163 | "source": [ 164 | "# Distribution of tokens in our dataset\n", 165 | "\n", 166 | "tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL, trust_remote_code=True)\n", 167 | "tokenized_data = [tokenizer.encode(each) for each in data]\n", 168 | "token_counts = [len(tokens) for tokens in tokenized_data]\n", 169 | "\n", 170 | "%matplotlib inline\n", 171 | "fig, ax = plt.subplots(1, 1)\n", 172 | "ax.set_xlabel('Number of tokens')\n", 173 | "ax.set_ylabel('Count of training samples');\n", 174 | "_ = ax.hist(token_counts, rwidth=0.7, color=\"purple\", bins=range(0, max(token_counts), 20))" 175 | ] 176 | }, 177 | { 178 | "cell_type": "code", 179 | "execution_count": null, 180 | "id": "ffb0d55c-5602-4518-b811-fa385c0959a7", 181 | "metadata": {}, 182 | "outputs": [], 183 | "source": [ 184 | "CUTOFF = 320\n", 185 | "truncated = len([tokens for tokens in tokenized_data if len(tokens) > CUTOFF])\n", 186 | "percentage = truncated/len(tokenized_data)*100\n", 187 | "print(f\"With cutoff at {CUTOFF}, we truncate {truncated} datapoints which is {percentage:.1f}% of the dataset\")" 188 | ] 189 | }, 190 | { 191 | "cell_type": "code", 192 | "execution_count": null, 193 | "id": "fb2bb067-2bd3-498b-9fc8-5e8186afbe27", 194 | "metadata": {}, 195 | "outputs": [], 196 | "source": [ 197 | "random.seed(42)\n", 198 | "random.shuffle(data)" 199 | ] 200 | }, 201 | { 202 | "cell_type": "code", 203 | "execution_count": null, 204 | "id": "26713fb9-765f-4524-b9db-447e97686d1a", 205 | "metadata": {}, 206 | "outputs": [], 207 | "source": [ 208 | "# I don't make a Training / Test split - if we had more training data, we would!\n", 209 | "\n", 210 | "dataset = Dataset.from_dict({'text':data})" 211 | ] 212 | }, 213 | { 214 | "cell_type": "code", 215 | "execution_count": null, 216 | "id": "bfabba27-ef47-46a8-a26b-4d650ae3b193", 217 | "metadata": {}, 218 | "outputs": [], 219 | "source": [ 220 | "login(token=os.environ['HF_TOKEN'])" 221 | ] 222 | }, 223 | { 224 | "cell_type": "code", 225 | "execution_count": null, 226 | "id": "55b595cd-2df7-4be4-aec1-0667b17d36f1", 227 | "metadata": {}, 228 | "outputs": [], 229 | "source": [ 230 | "dataset.push_to_hub(DATASET_NAME, private=True)" 231 | ] 232 | }, 233 | { 234 | "cell_type": "markdown", 235 | "id": "4691a025-9800-4e97-a20f-a65f102401f1", 236 | "metadata": {}, 237 | "source": [ 238 | "## And now to head over to a Google Colab for fine-tuning in the cloud\n", 239 | "\n", 240 | "Follow this link for the Colab: https://colab.research.google.com/drive/19E9hoAzWKvn9c9SHqM4Xan_Ph4wNewHS?usp=sharing\n" 241 | ] 242 | }, 243 | { 244 | "cell_type": "code", 245 | "execution_count": null, 246 | "id": "04a6c3e0-a2e6-4115-a01a-45e79dfdb730", 247 | "metadata": {}, 248 | "outputs": [], 249 | "source": [] 250 | } 251 | ], 252 | "metadata": { 253 | "kernelspec": { 254 | "display_name": "Python 3 (ipykernel)", 255 | "language": "python", 256 | "name": "python3" 257 | }, 258 | "language_info": { 259 | "codemirror_mode": { 260 | "name": "ipython", 261 | "version": 3 262 | }, 263 | "file_extension": ".py", 264 | "mimetype": "text/x-python", 265 | "name": "python", 266 | "nbconvert_exporter": "python", 267 | "pygments_lexer": "ipython3", 268 | "version": "3.11.10" 269 | } 270 | }, 271 | "nbformat": 4, 272 | "nbformat_minor": 5 273 | } 274 | -------------------------------------------------------------------------------- /project6 - simple RAG example with FAISS/expert.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "dfe37963-1af6-44fc-a841-8e462443f5e6", 6 | "metadata": {}, 7 | "source": [ 8 | "## Project 2: Expert\n", 9 | "\n", 10 | "### A question answering agent that is an expert in a new product being launched\n", 11 | "### The agent needs to be accurate and the solution should be low cost.\n", 12 | "\n", 13 | "This project will use RAG (Retrieval Augmented Generation) to ensure our question/answering assistant has high accuracy.\n", 14 | "\n", 15 | "We will be using the LangChain framework which does most of the heavy lifting for us! We'll also be using Gradio's chat interface." 16 | ] 17 | }, 18 | { 19 | "cell_type": "code", 20 | "execution_count": null, 21 | "id": "ba2779af-84ef-4227-9e9e-6eaf0df87e77", 22 | "metadata": {}, 23 | "outputs": [], 24 | "source": [ 25 | "# imports\n", 26 | "\n", 27 | "import os\n", 28 | "from dotenv import load_dotenv\n", 29 | "import gradio as gr" 30 | ] 31 | }, 32 | { 33 | "cell_type": "code", 34 | "execution_count": null, 35 | "id": "58c85082-e417-4708-9efe-81a5d55d1424", 36 | "metadata": {}, 37 | "outputs": [], 38 | "source": [ 39 | "# price is a factor for our company, so we're going to use a low cost model\n", 40 | "\n", 41 | "MODEL = \"gpt-4o-mini\"" 42 | ] 43 | }, 44 | { 45 | "cell_type": "code", 46 | "execution_count": null, 47 | "id": "ee78efcb-60fe-449e-a944-40bab26261af", 48 | "metadata": {}, 49 | "outputs": [], 50 | "source": [ 51 | "# Load environment variables in a file called .env\n", 52 | "\n", 53 | "load_dotenv()\n", 54 | "os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY', 'your-key-if-not-using-env')" 55 | ] 56 | }, 57 | { 58 | "cell_type": "code", 59 | "execution_count": null, 60 | "id": "730711a9-6ffe-4eee-8f48-d6cfb7314905", 61 | "metadata": {}, 62 | "outputs": [], 63 | "source": [ 64 | "# Read in the document using LangChain's loaders\n", 65 | "\n", 66 | "from langchain.document_loaders import TextLoader\n", 67 | "text_loader = TextLoader('product.md')\n", 68 | "loaded_data = text_loader.load()\n", 69 | "\n", 70 | "# Split the document into chunks of 1000 characters, aiming to preserve paragraphs, and with some overlap between chunks\n", 71 | "\n", 72 | "from langchain.text_splitter import CharacterTextSplitter\n", 73 | "text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=200)\n", 74 | "chunks = text_splitter.split_documents(loaded_data)\n", 75 | "\n", 76 | "print(f\"The document was divided into {len(chunks)} chunks\")" 77 | ] 78 | }, 79 | { 80 | "cell_type": "markdown", 81 | "id": "77f7d2a6-ccfa-425b-a1c3-5e55b23bd013", 82 | "metadata": {}, 83 | "source": [ 84 | "## A sidenote on Embeddings, and \"Auto-Encoding LLMs\"\n", 85 | "\n", 86 | "We will be mapping each chunk of text into a Vector that represents the meaning of the text, known as an embedding.\n", 87 | "\n", 88 | "OpenAI offers a model to do this, which we will use by calling their API with some LangChain code.\n", 89 | "\n", 90 | "This model is an example of an \"Auto-Encoding LLM\" which generates an output given a complete input.\n", 91 | "It's different to all the other LLMs we've discussed today, which are known as \"Auto-Regressive LLMs\", and generate future tokens based only on past context.\n", 92 | "\n", 93 | "Another example of an Auto-Encoding LLMs is BERT from Google. In addition to embedding, Auto-encoding LLMs are often used for classification.\n", 94 | "\n", 95 | "More details in the resources." 96 | ] 97 | }, 98 | { 99 | "cell_type": "code", 100 | "execution_count": null, 101 | "id": "78998399-ac17-4e28-b15f-0b5f51e6ee23", 102 | "metadata": {}, 103 | "outputs": [], 104 | "source": [ 105 | "# Put the chunks of data into a Vector Store that associates a Vector Embedding with each chunk\n", 106 | "# FAISS (\"Facebook AI Similarity Search\") is a library from Meta for quickly finding similar documents using Vector Embeddings\n", 107 | "\n", 108 | "from langchain_openai import OpenAIEmbeddings\n", 109 | "from langchain.vectorstores import FAISS\n", 110 | "\n", 111 | "# A sidenote\n", 112 | "# The OpenAiEmbeddings class in LangChain uses the OpenAI API.\n", 113 | "# OpenAI provides an Embeddings model to turn text into emdeddings\n", 114 | "# This is an example of an \"Auto-Encoding LLM' like Bert\n", 115 | "# All other models in this class are \"Auto-Regressive\" and generate future tokens based on past context\n", 116 | "# We'll ask LangChain to use FAISS to create our VectorStore, using the OpenAIEmbeddings to generate embeddings\n", 117 | "\n", 118 | "embeddings = OpenAIEmbeddings()\n", 119 | "vectorstore = FAISS.from_documents(chunks, embedding=embeddings)" 120 | ] 121 | }, 122 | { 123 | "cell_type": "code", 124 | "execution_count": null, 125 | "id": "ff2e7687-60d4-4920-a1d7-a34b9f70a250", 126 | "metadata": {}, 127 | "outputs": [], 128 | "source": [ 129 | "# Let's look at the vectors themselves for our Chunks\n", 130 | "\n", 131 | "vectors = vectorstore.index.reconstruct_n(0, vectorstore.index.ntotal)\n", 132 | "dimensions = vectorstore.index.d\n", 133 | "\n", 134 | "print(f\"There are {len(vectors)} vectors with {dimensions:,} dimensions in the vector store\")" 135 | ] 136 | }, 137 | { 138 | "cell_type": "markdown", 139 | "id": "b0d45462-a818-441c-b010-b85b32bcf618", 140 | "metadata": {}, 141 | "source": [ 142 | "## Visualizing the Vector Store\n", 143 | "\n", 144 | "Let's take a minute to look at the documents and their embedding vectors to see what's going on." 145 | ] 146 | }, 147 | { 148 | "cell_type": "code", 149 | "execution_count": null, 150 | "id": "427149d5-e5d8-4abd-bb6f-7ef0333cca21", 151 | "metadata": {}, 152 | "outputs": [], 153 | "source": [ 154 | "import matplotlib.pyplot as plt\n", 155 | "from sklearn.manifold import TSNE\n", 156 | "import numpy as np\n", 157 | "\n", 158 | "# Reduce the dimensionality of the vectors using t-SNE (\"t-distributed Stochastic Neighbor Embedding\")\n", 159 | "perplexity_value = min(30, len(vectors) - 1)\n", 160 | "tsne = TSNE(n_components=2, random_state=42, perplexity=perplexity_value)\n", 161 | "reduced_vectors = tsne.fit_transform(vectors)\n", 162 | "\n", 163 | "# Plot the reduced vectors using Matplotlib\n", 164 | "plt.figure(figsize=(10, 10))\n", 165 | "plt.scatter(reduced_vectors[:, 0], reduced_vectors[:, 1], s=10)\n", 166 | "\n", 167 | "# Uncomment the next lines to see the text by each point\n", 168 | "for i, txt in enumerate(chunks):\n", 169 | " plt.annotate(txt.page_content[:30], (reduced_vectors[i, 0], reduced_vectors[i, 1]))\n", 170 | "\n", 171 | "plt.title(\"FAISS Vector Store Visualization with t-SNE\")\n", 172 | "plt.xlabel(\"Dimension 1\")\n", 173 | "plt.ylabel(\"Dimension 2\")\n", 174 | "plt.show()" 175 | ] 176 | }, 177 | { 178 | "cell_type": "markdown", 179 | "id": "9468860b-86a2-41df-af01-b2400cc985be", 180 | "metadata": {}, 181 | "source": [ 182 | "## Time to use LangChain to bring it all together" 183 | ] 184 | }, 185 | { 186 | "cell_type": "code", 187 | "execution_count": null, 188 | "id": "129c7d1e-0094-4479-9459-f9360b95f244", 189 | "metadata": {}, 190 | "outputs": [], 191 | "source": [ 192 | "from langchain_openai import ChatOpenAI\n", 193 | "from langchain.memory import ConversationBufferMemory\n", 194 | "from langchain.chains import ConversationalRetrievalChain\n", 195 | "\n", 196 | "# create a new Chat with OpenAI\n", 197 | "llm = ChatOpenAI(temperature=0.7, model_name=MODEL)\n", 198 | "\n", 199 | "# set up the conversation memory for the chat\n", 200 | "memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)\n", 201 | "\n", 202 | "# putting it together: set up the conversation chain with the GPT 3.5 LLM, the vector store and memory\n", 203 | "conversation_chain = ConversationalRetrievalChain.from_llm(llm=llm, retriever=vectorstore.as_retriever(), memory=memory)" 204 | ] 205 | }, 206 | { 207 | "cell_type": "code", 208 | "execution_count": null, 209 | "id": "968e7bf2-e862-4679-a11f-6c1efb6ec8ca", 210 | "metadata": {}, 211 | "outputs": [], 212 | "source": [ 213 | "# Let's try a simple question\n", 214 | "\n", 215 | "query = \"Please explain what WealthAI is in a couple of sentences\"\n", 216 | "result = conversation_chain.invoke({\"question\": query})\n", 217 | "answer = result[\"answer\"]\n", 218 | "print(answer)" 219 | ] 220 | }, 221 | { 222 | "cell_type": "markdown", 223 | "id": "bbbcb659-13ce-47ab-8a5e-01b930494964", 224 | "metadata": {}, 225 | "source": [ 226 | "## Now we will bring this up in Gradio using the Chat interface -\n", 227 | "\n", 228 | "A quick and easy way to prototype a chat with an LLM" 229 | ] 230 | }, 231 | { 232 | "cell_type": "code", 233 | "execution_count": null, 234 | "id": "c3536590-85c7-4155-bd87-ae78a1467670", 235 | "metadata": {}, 236 | "outputs": [], 237 | "source": [ 238 | "# Wrapping that in a function\n", 239 | "\n", 240 | "def expert(question, history):\n", 241 | " result = conversation_chain.invoke({\"question\": question})\n", 242 | " return result[\"answer\"]" 243 | ] 244 | }, 245 | { 246 | "cell_type": "code", 247 | "execution_count": null, 248 | "id": "b252d8c1-61a8-406d-b57a-8f708a62b014", 249 | "metadata": {}, 250 | "outputs": [], 251 | "source": [ 252 | "# And in Gradio:\n", 253 | "\n", 254 | "view = gr.ChatInterface(expert).launch()" 255 | ] 256 | }, 257 | { 258 | "cell_type": "code", 259 | "execution_count": null, 260 | "id": "b55e9abb-e1da-46c5-acba-911868aee329", 261 | "metadata": {}, 262 | "outputs": [], 263 | "source": [ 264 | "from langchain_core.callbacks import StdOutCallbackHandler\n", 265 | "\n", 266 | "# Create the conversation\n", 267 | "llm = ChatOpenAI(temperature=0.7, model_name=MODEL)\n", 268 | "memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)\n", 269 | "conversation_chain = ConversationalRetrievalChain.from_llm(\n", 270 | " llm=llm, \n", 271 | " retriever=vectorstore.as_retriever(), \n", 272 | " memory=memory,\n", 273 | " callbacks=[StdOutCallbackHandler()]\n", 274 | ")\n", 275 | "\n", 276 | "# Try it out\n", 277 | "query = \"Does WealthAI offer tax advice\"\n", 278 | "result = conversation_chain.invoke({\"question\": query})\n", 279 | "answer = result[\"answer\"]\n", 280 | "print(\"\\nAnswer:\", answer)" 281 | ] 282 | }, 283 | { 284 | "cell_type": "markdown", 285 | "id": "644753e7-17f3-4999-a37a-b6aebf1e4579", 286 | "metadata": {}, 287 | "source": [ 288 | "# Exercises\n", 289 | "\n", 290 | "Break this example by adding information at the bottom of the product documentation with Tax information so that the wrong chunk is provided to the model and it answers the question wrongly.\n", 291 | "\n", 292 | "Then find a way to fix the break." 293 | ] 294 | } 295 | ], 296 | "metadata": { 297 | "kernelspec": { 298 | "display_name": "Python 3 (ipykernel)", 299 | "language": "python", 300 | "name": "python3" 301 | }, 302 | "language_info": { 303 | "codemirror_mode": { 304 | "name": "ipython", 305 | "version": 3 306 | }, 307 | "file_extension": ".py", 308 | "mimetype": "text/x-python", 309 | "name": "python", 310 | "nbconvert_exporter": "python", 311 | "pygments_lexer": "ipython3", 312 | "version": "3.11.10" 313 | } 314 | }, 315 | "nbformat": 4, 316 | "nbformat_minor": 5 317 | } 318 | -------------------------------------------------------------------------------- /project6 - simple RAG example with FAISS/product.md: -------------------------------------------------------------------------------- 1 | # WealthAI 2 | 3 | The world's first AI holistic financial manager - the trusted advisor and CoPilot for all your wealth planning. 4 | 5 | ### Introduction 6 | 7 | WealthAI is a new platform for managing your entire personal wealth, in one place. 8 | It's like having your own dedicated personal wealth planner, looking after your finances, monitoring your portfolio and tracking your savings and retirement. 9 | It's backed by a groundbreaking, proprietary Generative AI model that acts as your personal wealth CoPilot. 10 | 11 | ### Key features: 12 | 13 | 1. Single holistic view of your entire wealth, including liquid and illiquid assets 14 | 2. Aggregated bank account statements across your financial institutions 15 | 3. Tracking of 401k and pension plans 16 | 4. Incorporates your full financial picture, including brokerage accounts, and including loans and credit card debt 17 | 5. AI CoPilot to give you up to the minute advice on balancing your portfolio 18 | 19 | WealthAI connects to your financial institutions and provides you with a single, one-stop-shop for managing your wealth. 20 | For property and other illiquid assets, you will provide the most recent valuations, and the platform will send you periodic reminders for reappraisals. 21 | 22 | ### What makes WealthAI unique? 23 | 24 | - WealthAI is integrated with over 300 financial institutions, ensuring a streamlined ability to bring in your account details and aggregate them in real time 25 | - WealthAI can provide hollistic advice that takes into account your complete financial picture, including assessing concentration risks across your retirement funds and other portfolios 26 | - WealthAI includes an intelligent AI agent that acts as your personal financial planner, always working to optimize your wealth 27 | 28 | ### Add-ons 29 | 30 | For an additional cost of $39 per month, the following features can be included: 31 | 32 | - WealthAITax can provide tax planning advice to optimize the tax efficiency across your portfolio 33 | - WealthAICredit identifies ways to combine your credit cards and other forms of debt 34 | 35 | ## User Guide 36 | 37 | This section will help you get started and make the most of your personal financial planning CoPilot. Follow these step-by-step instructions to seamlessly manage your wealth and achieve your financial goals. 38 | 39 | ### Step 1: Sign Up and Set Up Your Account: 40 | 41 | Visit the WealthAI Website: Go to Wealth.AI and click on the "Sign Up" button. 42 | Create an Account: Enter your personal details, including your name, email address, and create a secure password. 43 | Verify Your Email: Check your email for a verification link from WealthAI and click on it to verify your account. 44 | Complete Your Profile: Provide additional information such as your age, income, financial goals, and risk tolerance. 45 | 46 | ### Step 2: Connect Your Financial Institutions 47 | 48 | Navigate to the Connections Page: Once logged in, go to the "Connections" tab on the dashboard. 49 | Link Your Accounts: Select your financial institutions from the list of over 300 supported providers. Enter your login credentials for each account you want to link. 50 | Authorize Access: Follow the prompts to authorize WealthAI to securely access your financial data. 51 | 52 | ### Step 3: Add Your Assets and Liabilities 53 | 54 | Enter Liquid Assets: Input details of your liquid assets, such as bank accounts, brokerage accounts, and retirement funds. 55 | Enter Illiquid Assets: Add information about your property, real estate, and other illiquid assets, including recent valuations. 56 | Record Liabilities: Include your loans, mortgages, and credit card debts to get a complete financial picture. 57 | 58 | ### Step 4: Customize Your Dashboard 59 | 60 | Personalize Your View: Customize your dashboard to display the information most relevant to you, such as asset allocation, net worth, and spending patterns. 61 | Set Alerts and Notifications: Configure alerts for important financial events, such as low balances, upcoming bills, or reappraisals of illiquid assets. 62 | 63 | ### Step 5: Use the AI CoPilot 64 | 65 | Access AI CoPilot: Click on the "AI CoPilot" tab on your dashboard to start interacting with your personal financial planner. 66 | Ask Questions: Type in any financial question, such as "How should I rebalance my portfolio?" or "What is the best way to save for retirement?" 67 | Get Advice: The AI CoPilot will provide tailored advice based on your entire financial picture, helping you make informed decisions. 68 | 69 | ### Step 6: Monitor and Review 70 | 71 | Regular Check-Ins: Log in regularly to review your financial status and track your progress towards your goals. 72 | Update Information: Keep your profile and account information up-to-date, especially when there are significant changes in your financial situation. 73 | Utilize Reports: Generate and review detailed financial reports to gain deeper insights into your wealth management. 74 | 75 | ### Step 7: Seek Help and Support 76 | Help Center: Visit the WealthAI Help Center for FAQs, guides, and troubleshooting tips. 77 | Customer Support: Contact WealthAI’s customer support via email or live chat for personalized assistance; or simply ask your CoPilot. 78 | 79 | ## Safety and Security of Your Information is Paramount 80 | 81 | At WealthAI, we understand the importance of keeping your financial data safe and secure. We prioritize the protection of your personal and financial information by implementing the highest standards of security measures. Here’s how we ensure your data remains confidential and secure: 82 | 83 | 1. Bank-Grade Encryption: All data transmitted between your devices and WealthAI is encrypted using industry-standard SSL/TLS protocols. This ensures that your information is safe from interception by unauthorized parties. 84 | 85 | 2. Secure Data Storage: Your financial data is stored in secure, encrypted databases that are protected by advanced security protocols. Access to these databases is strictly controlled and monitored to prevent unauthorized access. 86 | 87 | 3. Multi-Factor Authentication (MFA): To further safeguard your account, WealthAI employs multi-factor authentication. This adds an extra layer of security by requiring additional verification steps beyond just your password. 88 | 89 | 4. No Third-Party Sharing: WealthAI values your privacy. We do not share your banking or financial information with any third party without your explicit consent. Your data is used solely for providing you with personalized financial advice and management. 90 | 91 | 5. User Consent for Trading Decisions: WealthAI acts as your financial CoPilot, providing you with advice and recommendations. However, no trading or investment decisions are executed without your express consent. You remain in full control of your financial decisions at all times. 92 | 93 | 6. Regular Security Audits: Our platform undergoes regular security audits and assessments by independent security experts to ensure that our systems are robust and up-to-date with the latest security practices. 94 | 95 | By prioritizing the safety and security of your information, WealthAI ensures that you can manage your wealth with confidence, knowing that your data is protected by state-of-the-art security measures. 96 | 97 | ## Your CoPilot Team 98 | 99 | At the heart of WealthAI is the CoPilot functionality, a revolutionary approach to personal financial planning. This isn't just one AI agent — it's a team of specialized Generative AI agents, each an expert in their domain, working together to achieve your financial goals. Coordinated by the WealthAI CoPilot Manager, these agents provide a seamless, comprehensive service akin to having a team of leading financial experts dedicated to your personal wealth 24/7. 100 | 101 | *Portfolio Analyst* 102 | 103 | The Portfolio Analyst agent continuously monitors your investment portfolio, assessing performance, risk, and diversification. It provides real-time insights and recommendations to ensure your investments align with your financial goals and risk tolerance. 104 | 105 | *Market Specialist* 106 | 107 | The Market Specialist agent keeps a pulse on the financial markets, analyzing trends, news, and economic indicators. It helps you stay informed about market movements and potential opportunities or risks that could impact your wealth. 108 | 109 | *Retirement Planner* 110 | 111 | The Retirement Planner agent focuses on your long-term financial security, analyzing your retirement accounts, pension plans, and projected future needs. It ensures your retirement strategy is on track and suggests adjustments to optimize your retirement outlook. 112 | 113 | *Savings Monitor* 114 | 115 | The Savings Monitor agent tracks your savings goals, ensuring you are on target to meet your short-term and long-term savings objectives. It offers tips and adjustments to keep you on track. 116 | 117 | Together, these agents form your WealthAI CoPilot Team, each bringing their specialized knowledge to ensure every aspect of your financial life is optimized. The *WealthAI CoPilot Manager* coordinates the efforts of these agents, providing a holistic and integrated approach to managing your wealth. This multi-agent system ensures you receive the most comprehensive and personalized financial advice possible, making WealthAI not just a tool, but your dedicated partner in achieving financial success. -------------------------------------------------------------------------------- /project7 - llamacpp local inference/llamacpp.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "1ef4de33-ff7a-4d59-bc0f-dee72df6c6d1", 6 | "metadata": {}, 7 | "source": [ 8 | "# Another way to call LLMs - directly on your box using Llama.cpp\n", 9 | "\n", 10 | "This notebook runs inference for models directly on your local box, using the open source C++ library llama.cpp." 11 | ] 12 | }, 13 | { 14 | "cell_type": "code", 15 | "execution_count": null, 16 | "id": "5fcd7d28-148c-47b2-ad1b-35ce28557bd4", 17 | "metadata": {}, 18 | "outputs": [], 19 | "source": [ 20 | "# First, install the llama-cpp library. Do some googling if you have problems with this install, or contact me\n", 21 | "\n", 22 | "!pip install llama-cpp-python" 23 | ] 24 | }, 25 | { 26 | "cell_type": "code", 27 | "execution_count": null, 28 | "id": "3b293860-cf1e-4c16-9bd3-852e62e9a402", 29 | "metadata": {}, 30 | "outputs": [], 31 | "source": [ 32 | "# If any problems with this import, some investigating may be required..\n", 33 | "\n", 34 | "from llama_cpp import Llama" 35 | ] 36 | }, 37 | { 38 | "cell_type": "markdown", 39 | "id": "49024a57-824c-40b4-8592-129dcb8ff116", 40 | "metadata": {}, 41 | "source": [ 42 | "## First, download a model locally\n", 43 | "\n", 44 | "The llama.cpp library uses its own model format called GGUF.\n", 45 | "\n", 46 | "Here are all the HuggingFace models that can be downloaded as a GGUF file:\n", 47 | "https://huggingface.co/models?library=gguf\n", 48 | "\n", 49 | "For this notebook, I downloaded 3 models to try. For each of these models, click download, and move the file from your downloads folder into the `model_cache` folder in this directory (which is .gitignored).\n", 50 | "\n", 51 | "First, this medium sized version of Microsoft's Phi-3:\n", 52 | "https://huggingface.co/microsoft/Phi-3-mini-4k-instruct-gguf/blob/main/Phi-3-mini-4k-instruct-q4.gguf\n", 53 | "\n", 54 | "Then, this version of Qwen2:\n", 55 | "https://huggingface.co/Qwen/Qwen2-7B-Instruct-GGUF/blob/main/qwen2-7b-instruct-q4_k_m.gguf\n", 56 | "\n", 57 | "Finally, I chose the medium sized version of StarCoder2 for some coding inference.\n", 58 | "https://huggingface.co/second-state/StarCoder2-3B-GGUF/blob/main/starcoder2-3b-Q4_K_M.gguf" 59 | ] 60 | }, 61 | { 62 | "cell_type": "code", 63 | "execution_count": null, 64 | "id": "bf2f7d54-ea6c-4572-8bd5-3b5b022c93e4", 65 | "metadata": {}, 66 | "outputs": [], 67 | "source": [ 68 | "# Here is where my GGUF files are located\n", 69 | "\n", 70 | "phi3_model_path = \"model_cache/Phi-3-mini-4k-instruct-q4.gguf\"\n", 71 | "qwen2_model_path = \"model_cache/qwen2-7b-instruct-q4_k_m.gguf\"\n", 72 | "starcoder2_model_path = \"model_cache/starcoder2-3b-Q4_K_M.gguf\"" 73 | ] 74 | }, 75 | { 76 | "cell_type": "code", 77 | "execution_count": null, 78 | "id": "05d981d8-42dd-4e1d-afa0-b598b2d24185", 79 | "metadata": {}, 80 | "outputs": [], 81 | "source": [ 82 | "# Now use llama.cpp to create the models for Phi 3, Qwen2 and Starcoder2\n", 83 | "\n", 84 | "phi3 = Llama(model_path=phi3_model_path, n_ctx=300)\n", 85 | "qwen2 = Llama(model_path=qwen2_model_path, n_ctx=300)\n", 86 | "starcoder2 = Llama(model_path=starcoder2_model_path, n_ctx=300)" 87 | ] 88 | }, 89 | { 90 | "cell_type": "code", 91 | "execution_count": null, 92 | "id": "599ecb28-804d-4807-b0a9-5d15e1e1a430", 93 | "metadata": {}, 94 | "outputs": [], 95 | "source": [ 96 | "# phi3 tell us a joke! Remember this is a tiny model!\n", 97 | "# The prompt has some special characters in it - we'll cover this shortly\n", 98 | "\n", 99 | "prompt = \"\"\"<|user|>\n", 100 | "Tell a light joke for a room full of data scientists<|end|>\n", 101 | "<|assistant|>\"\"\"\n", 102 | "\n", 103 | "response = phi3(prompt, max_tokens=200, temperature=1, echo=True, stream=True)\n", 104 | "for chunk in response:\n", 105 | " print(chunk[\"choices\"][0][\"text\"], end='')" 106 | ] 107 | }, 108 | { 109 | "cell_type": "code", 110 | "execution_count": null, 111 | "id": "128e5cc7-76a1-4056-a3d0-7d5ba1b5077d", 112 | "metadata": {}, 113 | "outputs": [], 114 | "source": [ 115 | "# qwen2 tell us a joke!\n", 116 | "\n", 117 | "prompt = \"\"\"<|im_start|>system\n", 118 | "You are a helpful assistant.<|im_end|>\n", 119 | "<|im_start|>user\n", 120 | "Tell a light joke for a room full of data scientists<|im_end|>\n", 121 | "<|im_start|>assistant\"\"\"\n", 122 | "\n", 123 | "response = qwen2(prompt, max_tokens=200, temperature=1, echo=True, stream=True)\n", 124 | "for chunk in response:\n", 125 | " print(chunk[\"choices\"][0][\"text\"], end='')" 126 | ] 127 | }, 128 | { 129 | "cell_type": "code", 130 | "execution_count": null, 131 | "id": "adca9c0b-4b4e-4457-9f48-03ea43a1755e", 132 | "metadata": {}, 133 | "outputs": [], 134 | "source": [ 135 | "# OK enough with the jokes - starcoder2 please write a function for us\n", 136 | "\n", 137 | "prompt = \"def hello_world():\\n\"\n", 138 | "response = starcoder2(prompt, max_tokens=100, temperature=1, echo=True, stream=True)\n", 139 | "for chunk in response:\n", 140 | " print(chunk[\"choices\"][0][\"text\"], end='')" 141 | ] 142 | }, 143 | { 144 | "cell_type": "markdown", 145 | "id": "fb62a609-e1d0-4395-a89a-f3ab5ce89761", 146 | "metadata": {}, 147 | "source": [ 148 | "## Finally: to try the other approach for direct inference: on a cloud box with a GPU\n", 149 | "### Using Hugging Face Hub and Transformers library\n", 150 | "\n", 151 | "Visit this Google Colab notebook:\n", 152 | "https://colab.research.google.com/drive/1CRgX6RVqnWZDexXLACbq91pX2I7O7Swu?usp=sharing\n" 153 | ] 154 | }, 155 | { 156 | "cell_type": "code", 157 | "execution_count": null, 158 | "id": "b37bdf1c-7314-4bbb-9939-155d9cb85bb6", 159 | "metadata": {}, 160 | "outputs": [], 161 | "source": [] 162 | } 163 | ], 164 | "metadata": { 165 | "kernelspec": { 166 | "display_name": "Python 3 (ipykernel)", 167 | "language": "python", 168 | "name": "python3" 169 | }, 170 | "language_info": { 171 | "codemirror_mode": { 172 | "name": "ipython", 173 | "version": 3 174 | }, 175 | "file_extension": ".py", 176 | "mimetype": "text/x-python", 177 | "name": "python", 178 | "nbconvert_exporter": "python", 179 | "pygments_lexer": "ipython3", 180 | "version": "3.11.10" 181 | } 182 | }, 183 | "nbformat": 4, 184 | "nbformat_minor": 5 185 | } 186 | -------------------------------------------------------------------------------- /project8 - connect4/board.py: -------------------------------------------------------------------------------- 1 | from board_view import to_svg 2 | 3 | RED = 1 4 | YELLOW = -1 5 | EMPTY = 0 6 | show = {EMPTY:"⚪️", RED: "🔴", YELLOW: "🟡"} 7 | pieces = {EMPTY: "", RED: "red", YELLOW: "yellow"} 8 | simple = {EMPTY: ".", RED: "R", YELLOW: "Y"} 9 | cols = "ABCDEFG" 10 | 11 | class Board: 12 | 13 | def __init__(self): 14 | self.cells = [[0 for _ in range(7)] for _ in range(6)] 15 | self.player = RED 16 | self.winner = EMPTY 17 | self.draw = False 18 | self.forfeit = False 19 | self.latest_x, self.latest_y = -1, -1 20 | 21 | def __repr__(self): 22 | result = "" 23 | for y in range(6): 24 | for x in range(7): 25 | result += show[self.cells[5-y][x]] 26 | result += "\n" 27 | result += "\n" + self.message() 28 | return result 29 | 30 | def message(self): 31 | if self.winner and self.forfeit: 32 | return f"{show[self.winner]} wins after an illegal move by {show[-1*self.winner]}\n" 33 | elif self.winner: 34 | return f"{show[self.winner]} wins\n" 35 | elif self.draw: 36 | return "The game is a draw\n" 37 | else: 38 | return f"{show[self.player]} to play\n" 39 | 40 | def html(self): 41 | result = '
' 42 | result += self.__repr__().replace("\n","
") 43 | result += '
' 44 | return result 45 | 46 | def svg(self): 47 | """Convert the board state to an SVG representation""" 48 | return to_svg(self) 49 | 50 | def json(self): 51 | result = "{\n" 52 | result += ' "Column names": ["A", "B", "C", "D", "E", "F", "G"],\n' 53 | for y in range(6): 54 | result += f' "Row {6-y}": [' 55 | for x in range(7): 56 | result += f'"{pieces[self.cells[5-y][x]]}", ' 57 | result = result[:-2] + '],\n' 58 | result = result[:-2]+'\n}' 59 | return result 60 | 61 | def alternative(self): 62 | result = "ABCDEFG\n" 63 | for y in range(6): 64 | for x in range(7): 65 | result += simple[self.cells[5-y][x]] 66 | result += "\n" 67 | return result 68 | 69 | def height(self, x): 70 | height = 0 71 | while height<6 and self.cells[height][x] != EMPTY: 72 | height += 1 73 | return height 74 | 75 | def legal_moves(self): 76 | return [cols[x] for x in range(7) if self.height(x)<6] 77 | 78 | def illegal_moves(self): 79 | return [cols[x] for x in range(7) if self.height(x)==6] 80 | 81 | def winning_line(self, x, y, dx, dy): 82 | color = self.cells[y][x] 83 | for pointer in range(1, 4): 84 | xp = x + dx * pointer 85 | yp = y + dy * pointer 86 | if not (0 <= xp <= 6 and 0 <= yp <= 5) or self.cells[yp][xp] != color: 87 | return EMPTY 88 | return color 89 | 90 | def winning_cell(self, x, y): 91 | for dx, dy in ((0, 1), (1, 1), (1, 0), (1, -1)): 92 | if winner := self.winning_line(x, y, dx, dy): 93 | return winner 94 | return EMPTY 95 | 96 | def wins(self): 97 | for y in range(6): 98 | for x in range(7): 99 | if winner := self.winning_cell(x, y): 100 | return winner 101 | return EMPTY 102 | 103 | def move(self, x): 104 | y = self.height(x) 105 | self.cells[y][x] = self.player 106 | self.latest_x, self.latest_y = x, y 107 | if winner := self.wins(): 108 | self.winner = winner 109 | elif not self.legal_moves: 110 | self.draw = True 111 | else: 112 | self.player = -1 * self.player 113 | return self 114 | 115 | def is_active(self): 116 | return not self.winner and not self.draw -------------------------------------------------------------------------------- /project8 - connect4/board_view.py: -------------------------------------------------------------------------------- 1 | RED = 1 2 | YELLOW = -1 3 | EMPTY = 0 4 | 5 | def to_svg(board): 6 | """Convert the board state to an SVG representation""" 7 | svg = ''' 8 |
9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | ''' 29 | # Add the holes to the mask 30 | svg += ''.join(f''' 31 | 37 | ''' 38 | for y in range(6) 39 | for x, cell in enumerate(board.cells[5-y]) 40 | ) 41 | 42 | svg += ''' 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | ''' 51 | 52 | # Add pieces 53 | svg += ''.join(f''' 54 | 71 | 83 | ''' 84 | for y in range(6) 85 | for x, cell in enumerate(board.cells[5-y]) 86 | if cell != EMPTY 87 | ) 88 | 89 | svg += ''' 90 | 91 | 92 | 93 | 94 | 95 | ''' 96 | 97 | # Add hole borders on top 98 | svg += ''.join(f''' 99 | 107 | ''' 108 | for y in range(6) 109 | for x, cell in enumerate(board.cells[5-y]) 110 | ) 111 | 112 | svg += ''' 113 | 114 |
115 | 131 | ''' 132 | return svg -------------------------------------------------------------------------------- /project8 - connect4/c4.py: -------------------------------------------------------------------------------- 1 | from game import Game 2 | from board import RED, YELLOW 3 | from llm import LLM 4 | import gradio as gr 5 | 6 | all_model_names = LLM.all_model_names() 7 | 8 | css = "footer{display:none !important}" 9 | 10 | js = """ 11 | function refresh() { 12 | const url = new URL(window.location); 13 | 14 | if (url.searchParams.get('__theme') !== 'dark') { 15 | url.searchParams.set('__theme', 'dark'); 16 | window.location.href = url.href; 17 | } 18 | } 19 | """ 20 | 21 | def message_html(game): 22 | return f'
{game.board.message()}
' 23 | 24 | def load_callback(red_llm, yellow_llm): 25 | game = Game(red_llm, yellow_llm) 26 | enabled = gr.Button(interactive=True) 27 | message = message_html(game) 28 | return game, game.board.svg(), message, "", "", enabled, enabled, enabled 29 | 30 | def move_callback(game): 31 | game.move() 32 | message = message_html(game) 33 | if_active = gr.Button(interactive=game.board.is_active()) 34 | return game, game.board.svg(), message, game.thoughts(RED), game.thoughts(YELLOW), if_active, if_active 35 | 36 | def run_callback(game): 37 | enabled = gr.Button(interactive=True) 38 | disabled = gr.Button(interactive=False) 39 | message = message_html(game) 40 | yield game, game.board.svg(), message, game.thoughts(RED), game.thoughts(YELLOW), disabled, disabled, disabled 41 | while game.board.is_active(): 42 | game.move() 43 | message = message_html(game) 44 | yield game, game.board.svg(), message, game.thoughts(RED), game.thoughts(YELLOW), disabled, disabled, disabled 45 | yield game, game.board.svg(), message, game.thoughts(RED), game.thoughts(YELLOW), disabled, disabled, enabled 46 | 47 | def model_callback(player_name, game, new_model_name): 48 | player = game.players[player_name] 49 | player.switch_model(new_model_name) 50 | return game 51 | 52 | def red_model_callback(game, new_model_name): 53 | return model_callback(RED, game, new_model_name) 54 | 55 | def yellow_model_callback(game, new_model_name): 56 | return model_callback(YELLOW, game, new_model_name) 57 | 58 | def player_section(name, default): 59 | with gr.Row(): 60 | gr.Markdown(f'
{name} Player
') 61 | with gr.Row(): 62 | dropdown = gr.Dropdown(all_model_names, value=default, label="LLM", interactive=True) 63 | with gr.Row(): 64 | gr.Markdown(f'
Inner thoughts
') 65 | with gr.Row(): 66 | thoughts = gr.Markdown(label="Thoughts") 67 | return thoughts, dropdown 68 | 69 | with gr.Blocks(title="C4 Battle", css=css, js=js, theme=gr.themes.Default(primary_hue="sky")) as blocks: 70 | 71 | game = gr.State() 72 | 73 | with gr.Row(): 74 | gr.Markdown('
Four-in-a-row LLM Showdown
') 75 | with gr.Row(): 76 | with gr.Column(scale=1): 77 | red_thoughts, red_dropdown = player_section("Red", "gpt-4o") 78 | with gr.Column(scale=2): 79 | with gr.Row(): 80 | message = gr.Markdown('
The Board
') 81 | with gr.Row(): 82 | board_display = gr.HTML() 83 | with gr.Row(): 84 | with gr.Column(scale=1): 85 | move_button = gr.Button("Next move") 86 | with gr.Column(scale=1): 87 | run_button = gr.Button("Run game", variant="primary") 88 | with gr.Column(scale=1): 89 | reset_button = gr.Button("Start Over", variant="stop") 90 | with gr.Column(scale=1): 91 | yellow_thoughts, yellow_dropdown = player_section("Yellow", "claude-3-5-sonnet-latest") 92 | 93 | 94 | blocks.load(load_callback, inputs=[red_dropdown, yellow_dropdown], outputs=[game, board_display, message, red_thoughts, yellow_thoughts, move_button, run_button, reset_button]) 95 | move_button.click(move_callback, inputs=[game], outputs=[game, board_display, message, red_thoughts, yellow_thoughts, move_button, run_button]) 96 | red_dropdown.change(red_model_callback, inputs=[game, red_dropdown], outputs=[game]) 97 | yellow_dropdown.change(yellow_model_callback, inputs=[game, yellow_dropdown], outputs=[game]) 98 | run_button.click(run_callback, inputs=[game], outputs=[game, board_display, message, red_thoughts, yellow_thoughts, move_button, run_button, reset_button]) 99 | reset_button.click(load_callback, inputs=[red_dropdown, yellow_dropdown], outputs=[game, board_display, message, red_thoughts, yellow_thoughts, move_button, run_button, reset_button]) 100 | 101 | 102 | blocks.launch(share=False, inbrowser=True) 103 | 104 | -------------------------------------------------------------------------------- /project8 - connect4/game.py: -------------------------------------------------------------------------------- 1 | from board import Board, RED, YELLOW, EMPTY, pieces 2 | from player import Player 3 | from dotenv import load_dotenv 4 | 5 | class Game: 6 | 7 | def __init__(self, model_red, model_yellow): 8 | load_dotenv(override=True) 9 | self.board = Board() 10 | self.players = {RED: Player(model_red, RED), YELLOW: Player(model_yellow, YELLOW)} 11 | 12 | def move(self): 13 | self.players[self.board.player].move(self.board) 14 | 15 | def is_active(self): 16 | return self.board.is_active() 17 | 18 | def thoughts(self, player): 19 | return self.players[player].thoughts() 20 | 21 | def run(self): 22 | while self.is_active(): 23 | self.move() 24 | print(self.board) -------------------------------------------------------------------------------- /project8 - connect4/player.py: -------------------------------------------------------------------------------- 1 | from llm import LLM 2 | from board import pieces, cols 3 | import json 4 | import random 5 | 6 | class Player: 7 | 8 | def __init__(self, model, color): 9 | self.color = color 10 | self.model = model 11 | self.llm = LLM.create(self.model) 12 | self.evaluation = "" 13 | self.threats = "" 14 | self.opportunities = "" 15 | self.strategy = "" 16 | 17 | def system(self, board, legal_moves, illegal_moves): 18 | return f"""You are playing the board game Connect 4. 19 | Players take turns to drop counters into one of 7 columns A, B, C, D, E, F, G. 20 | The winner is the first player to get 4 counters in a row in any direction. 21 | You are {pieces[self.color]} and your opponent is {pieces[self.color * -1]}. 22 | You must pick a column for your move. You must pick one of the following legal moves: {legal_moves}. 23 | You should respond in JSON according to this spec: 24 | 25 | {{ 26 | "evaluation": "my assessment of the board", 27 | "threats": "any threats from my opponent that I should block", 28 | "opportunities": "my best chances to win", 29 | "strategy": "my thought process", 30 | "move_column": "one letter from this list of legal moves: {legal_moves}" 31 | }} 32 | 33 | You must pick one of these letters for your move_column: {legal_moves}{illegal_moves}""" 34 | 35 | def user(self, board, legal_moves, illegal_moves): 36 | return f"""It is your turn to make a move as {pieces[self.color]}. 37 | Here is the current board, with row 1 at the bottom of the board: 38 | 39 | {board.json()} 40 | 41 | Here's another way of looking at the board visually, where R represents a red counter and Y for a yellow counter. 42 | 43 | {board.alternative()} 44 | 45 | Your final response should be only in JSON strictly according to this spec: 46 | 47 | {{ 48 | "evaluation": "my assessment of the board", 49 | "threats": "any threats from my opponent that I should block", 50 | "opportunities": "my best chances to win", 51 | "strategy": "my thought process", 52 | "move_column": "one of {legal_moves} which are the legal moves" 53 | }} 54 | 55 | For example, the following could be a response: 56 | 57 | {{ 58 | "evaluation": "the board is equally balanced but I have a slight advantage", 59 | "threats": "my opponent has a threat but I can block it", 60 | "opportunities": "I've developed several promising 3 in a row opportunities", 61 | "strategy": "I must first block my opponent, then I can continue to develop", 62 | "move_column": "{random.choice(board.legal_moves())}" 63 | }} 64 | 65 | And this is another example of a well formed response: 66 | 67 | {{ 68 | "evaluation": "although my opponent has more threats, I can win immediately", 69 | "threats": "my opponent has several threats", 70 | "opportunities": "I can immediately win the game by making a diagonal 4", 71 | "strategy": "I will take the winning move", 72 | "move_column": "{random.choice(board.legal_moves())}" 73 | }} 74 | 75 | 76 | Now make your decision. 77 | You must pick one of these letters for your move_column: {legal_moves}{illegal_moves} 78 | """ 79 | 80 | def process_move(self, reply, board): 81 | print(reply) 82 | try: 83 | if len(reply)==3 and reply[0]=="{" and reply[2]=="}": 84 | reply = f'{{"move_column": "{reply[1]}"}}' 85 | result = json.loads(reply) 86 | move = result.get("move_column") or "missing" 87 | move = move.upper() 88 | col = cols.find(move) 89 | if not (0 <= col <= 6) or board.height(col)==6: 90 | raise ValueError("Illegal move") 91 | board.move(col) 92 | self.evaluation = result.get("evaluation") or "" 93 | self.threats = result.get("threats") or "" 94 | self.opportunities = result.get("opportunities") or "" 95 | self.strategy = result.get("strategy") or "" 96 | except Exception as e: 97 | print(f"Exception {e}") 98 | board.forfeit = True 99 | board.winner = -1 * board.player 100 | 101 | def move(self, board): 102 | legal_moves = ", ".join(board.legal_moves()) 103 | if illegal := board.illegal_moves(): 104 | illegal_moves = "\nYou must NOT make any of these moves which are ILLEGAL: " + ", ".join(illegal) 105 | else: 106 | illegal_moves = "" 107 | system = self.system(board, legal_moves, illegal_moves) 108 | user = self.user(board, legal_moves, illegal_moves) 109 | reply = self.llm.send(system, user) 110 | self.process_move(reply, board) 111 | 112 | def thoughts(self): 113 | result = '

' 114 | result += f'Evaluation:
{self.evaluation}

' 115 | result += f'Threats:
{self.threats}

' 116 | result += f'Opportunities:
{self.opportunities}

' 117 | result += f'Strategy:
{self.strategy}' 118 | result += '
' 119 | return result 120 | 121 | def switch_model(self, new_model_name): 122 | self.llm = LLM.create(new_model_name) -------------------------------------------------------------------------------- /project8 - connect4/prototype.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "e6da6609-8887-416f-ba9a-d08892fb5cee", 6 | "metadata": {}, 7 | "source": [ 8 | "# Prototype Connect Four battle\n", 9 | "\n", 10 | "Pit LLMs against each other in a game of Connect Four" 11 | ] 12 | }, 13 | { 14 | "cell_type": "code", 15 | "execution_count": null, 16 | "id": "7c154063-8f36-426f-ae54-346acc5ba58d", 17 | "metadata": {}, 18 | "outputs": [], 19 | "source": [ 20 | "from openai import OpenAI\n", 21 | "from dotenv import load_dotenv\n", 22 | "import json" 23 | ] 24 | }, 25 | { 26 | "cell_type": "code", 27 | "execution_count": null, 28 | "id": "7446303a-d699-4816-b8dd-52da09add974", 29 | "metadata": {}, 30 | "outputs": [], 31 | "source": [ 32 | "load_dotenv(override=True)" 33 | ] 34 | }, 35 | { 36 | "cell_type": "code", 37 | "execution_count": null, 38 | "id": "28740858-38f3-4aaf-9185-ec636a45ba75", 39 | "metadata": {}, 40 | "outputs": [], 41 | "source": [ 42 | "RED = 1\n", 43 | "YELLOW = -1\n", 44 | "EMPTY = 0\n", 45 | "show = {EMPTY:\"⚪️\", RED: \"🔴\", YELLOW: \"🟡\"}\n", 46 | "pieces = {EMPTY: \"empty\", RED: \"red\", YELLOW: \"yellow\"}\n", 47 | "cols = \"ABCDEFG\"" 48 | ] 49 | }, 50 | { 51 | "cell_type": "code", 52 | "execution_count": null, 53 | "id": "668559eb-3542-4320-8029-eb20fda90fde", 54 | "metadata": {}, 55 | "outputs": [], 56 | "source": [ 57 | "class Board:\n", 58 | "\n", 59 | " def __init__(self):\n", 60 | " self.cells = [[0 for _ in range(7)] for _ in range(6)]\n", 61 | " self.player = RED\n", 62 | " self.winner = EMPTY\n", 63 | "\n", 64 | " def __repr__(self):\n", 65 | " result = \"\"\n", 66 | " for y in range(6):\n", 67 | " for x in range(7):\n", 68 | " result += show[self.cells[5-y][x]]\n", 69 | " result += \"\\n\"\n", 70 | " if self.winner:\n", 71 | " result += f\"\\n{show[self.winner]} wins\\n\"\n", 72 | " else:\n", 73 | " result += f\"\\n{show[self.player]} to play\\n\"\n", 74 | " return result\n", 75 | "\n", 76 | " def json(self):\n", 77 | " result = \"{\\n\"\n", 78 | " result += ' \"Column names\": [\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\"],\\n'\n", 79 | " for y in range(6):\n", 80 | " result += f' \"Row {6-y}\": [' \n", 81 | " for x in range(7):\n", 82 | " result += f'\"{pieces[self.cells[5-y][x]]}\", '\n", 83 | " result = result[:-2] + '],\\n'\n", 84 | " result = result[:-2]+'\\n}'\n", 85 | " return result " 86 | ] 87 | }, 88 | { 89 | "cell_type": "code", 90 | "execution_count": null, 91 | "id": "ec586c38-f396-4cb2-95ef-c8e6d067d19b", 92 | "metadata": {}, 93 | "outputs": [], 94 | "source": [ 95 | "Board()" 96 | ] 97 | }, 98 | { 99 | "cell_type": "code", 100 | "execution_count": null, 101 | "id": "2a17c172-f711-4ebd-9aae-3f1f35665433", 102 | "metadata": {}, 103 | "outputs": [], 104 | "source": [ 105 | "print(Board().json())" 106 | ] 107 | }, 108 | { 109 | "cell_type": "code", 110 | "execution_count": null, 111 | "id": "c5ad4649-0a48-47b1-bb04-ee28fd6808b2", 112 | "metadata": {}, 113 | "outputs": [], 114 | "source": [ 115 | "def height(self, x):\n", 116 | " height = 0\n", 117 | " while height<6 and self.cells[height][x] != EMPTY:\n", 118 | " height += 1\n", 119 | " return height\n", 120 | "\n", 121 | "def legal_moves(self):\n", 122 | " return [cols[x] for x in range(7) if self.height(x)<6]\n", 123 | "\n", 124 | "def move(self, x):\n", 125 | " self.cells[self.height(x)][x] = self.player\n", 126 | " self.player = -1 * self.player\n", 127 | "\n", 128 | "Board.height = height\n", 129 | "Board.legal_moves = legal_moves\n", 130 | "Board.move = move" 131 | ] 132 | }, 133 | { 134 | "cell_type": "code", 135 | "execution_count": null, 136 | "id": "3da4a6df-0217-492b-a050-9488849ff2a3", 137 | "metadata": {}, 138 | "outputs": [], 139 | "source": [ 140 | "b = Board()\n", 141 | "b.move(3)\n", 142 | "b.move(3)\n", 143 | "b.move(2)\n", 144 | "b" 145 | ] 146 | }, 147 | { 148 | "cell_type": "code", 149 | "execution_count": null, 150 | "id": "7f5d0b85-aaf8-4d55-bf18-8602dade4cf3", 151 | "metadata": {}, 152 | "outputs": [], 153 | "source": [ 154 | "b.legal_moves()" 155 | ] 156 | }, 157 | { 158 | "cell_type": "code", 159 | "execution_count": null, 160 | "id": "9dffa507-e865-4379-a765-cd27032ff657", 161 | "metadata": {}, 162 | "outputs": [], 163 | "source": [ 164 | "def winning_line(self, x, y, dx, dy):\n", 165 | " color = self.cells[y][x]\n", 166 | " for pointer in range(1, 4):\n", 167 | " xp = x + dx * pointer\n", 168 | " yp = y + dy * pointer\n", 169 | " if not (0 <= xp <= 6 and 0 <= yp <= 5) or self.cells[yp][xp] != color:\n", 170 | " return EMPTY\n", 171 | " return color\n", 172 | "\n", 173 | "def winning_cell(self, x, y):\n", 174 | " for dx, dy in ((0, 1), (1, 1), (1, 0), (1, -1)):\n", 175 | " if winner := self.winning_line(x, y, dx, dy):\n", 176 | " return winner\n", 177 | " return EMPTY\n", 178 | "\n", 179 | "def wins(self):\n", 180 | " for y in range(6):\n", 181 | " for x in range(7):\n", 182 | " if winner := self.winning_cell(x, y):\n", 183 | " return winner\n", 184 | " return EMPTY\n", 185 | "\n", 186 | "def move(self, x):\n", 187 | " self.cells[self.height(x)][x] = self.player\n", 188 | " if winner := self.wins():\n", 189 | " self.winner = winner\n", 190 | " else:\n", 191 | " self.player = -1 * self.player\n", 192 | " return self\n", 193 | "\n", 194 | "Board.winning_line = winning_line\n", 195 | "Board.winning_cell = winning_cell\n", 196 | "Board.wins = wins\n", 197 | "Board.move = move" 198 | ] 199 | }, 200 | { 201 | "cell_type": "code", 202 | "execution_count": null, 203 | "id": "65dc68e5-54a3-4e9f-8cee-008bbe313b20", 204 | "metadata": {}, 205 | "outputs": [], 206 | "source": [ 207 | "b = Board()\n", 208 | "b.move(2).move(3).move(2).move(3).move(2).move(3).move(2)" 209 | ] 210 | }, 211 | { 212 | "cell_type": "code", 213 | "execution_count": null, 214 | "id": "29c68043-cd96-49aa-9a30-ad1ac7f9c00b", 215 | "metadata": {}, 216 | "outputs": [], 217 | "source": [ 218 | "class Player:\n", 219 | "\n", 220 | " def __init__(self, model, color):\n", 221 | " self.color = color\n", 222 | " self.model = model\n", 223 | " self.llm = OpenAI()\n", 224 | "\n", 225 | " def system(self, board):\n", 226 | " legal_moves = \", \".join(board.legal_moves())\n", 227 | " return f\"\"\"You are an expert player of the board game Connect 4.\n", 228 | "Players take turns to drop counters into one of 6 columns labelled A, B, C, D, E, F.\n", 229 | "The winner is the first player to get 4 coins in a row in a straight or diagonal line.\n", 230 | "You are playing with the {pieces[self.color]} coins.\n", 231 | "And your opponent is playing with the {pieces[self.color * -1]} coins.\n", 232 | "You will be presented with the board and asked to pick a column to drop your piece.\n", 233 | "You must pick one of the following legal moves: {legal_moves}. You must pick one of those letters.\n", 234 | "You should respond in JSON, and only in JSON, according to this spec:\n", 235 | "\n", 236 | "{{\n", 237 | " \"evaluation\": \"brief assessment of the board\",\n", 238 | " \"threats\": \"any threats from your opponent or weaknesses in your position\",\n", 239 | " \"opportunities\": \"any opportunities to gain the upper hand or strengths in your position\",\n", 240 | " \"strategy\": \"the thought process behind your next move\",\n", 241 | " \"move_column\": \"one letter from this list of legal moves: {legal_moves}\"\n", 242 | "}}\"\"\"\n", 243 | "\n", 244 | " def user(self, board):\n", 245 | " legal_moves = \", \".join(board.legal_moves())\n", 246 | " return f\"\"\"It is your turn to make a move as {pieces[self.color]}.\n", 247 | "The current board position is:\n", 248 | "\n", 249 | "{board.json()}\n", 250 | "\n", 251 | "Now with this in mind, make your decision. Respond only in JSON strictly according to this spec:\n", 252 | "\n", 253 | "{{\n", 254 | " \"evaluation\": \"brief assessment of the board\",\n", 255 | " \"threats\": \"any threats from your opponent or weaknesses in your position\",\n", 256 | " \"opportunities\": \"any opportunities to gain the upper hand or strengths in your position\",\n", 257 | " \"strategy\": \"the thought process behind your next move\",\n", 258 | " \"move_column\": \"one of {legal_moves} which are the legal moves\"\n", 259 | "}}\n", 260 | "\n", 261 | "You must pick one of these letters for your move_column: {legal_moves}\n", 262 | "\n", 263 | "\"\"\"\n", 264 | "\n", 265 | " def process_move(self, reply):\n", 266 | " print(reply)\n", 267 | " try:\n", 268 | " result = json.loads(reply)\n", 269 | " move = result.get(\"move_column\") or \"\"\n", 270 | " move = move.upper()\n", 271 | " col = cols.find(move)\n", 272 | " if not (0 <= col <= 6) or board.height(col)==6:\n", 273 | " raise ValueError(\"Illegal move\")\n", 274 | " board.move(col)\n", 275 | " except Exception as e:\n", 276 | " print(f\"Exception {e}\")\n", 277 | " board.winner = -1 * board.player\n", 278 | " \n", 279 | " \n", 280 | " def move(self, board):\n", 281 | " system = self.system(board)\n", 282 | " user = self.user(board)\n", 283 | " reply = self.llm.chat.completions.create(\n", 284 | " model=self.model,\n", 285 | " messages=[\n", 286 | " {\"role\": \"system\", \"content\": system},\n", 287 | " {\"role\": \"user\", \"content\": user}\n", 288 | " ]\n", 289 | " )\n", 290 | " self.process_move(reply.choices[0].message.content)" 291 | ] 292 | }, 293 | { 294 | "cell_type": "code", 295 | "execution_count": null, 296 | "id": "163e2af5-83c1-444d-8194-92e8b495c0af", 297 | "metadata": {}, 298 | "outputs": [], 299 | "source": [ 300 | "board = Board()\n", 301 | "red = Player(\"gpt-4o-mini\", RED)\n", 302 | "yellow = Player(\"gpt-4o-mini\", YELLOW)\n", 303 | "while not board.winner:\n", 304 | " red.move(board)\n", 305 | " print(board)\n", 306 | " if not board.winner:\n", 307 | " yellow.move(board)\n", 308 | " print(board)" 309 | ] 310 | }, 311 | { 312 | "cell_type": "markdown", 313 | "id": "9a3f6030-d6b9-4cc1-acb0-447c56873c19", 314 | "metadata": {}, 315 | "source": [ 316 | "## A fancier version\n", 317 | "\n", 318 | "Basically the same code, but a bit more organized.. and with a Gradio UI" 319 | ] 320 | }, 321 | { 322 | "cell_type": "code", 323 | "execution_count": null, 324 | "id": "71c72f43-24a5-4b83-a396-466a79e50d90", 325 | "metadata": {}, 326 | "outputs": [], 327 | "source": [ 328 | "!python c4.py" 329 | ] 330 | }, 331 | { 332 | "cell_type": "code", 333 | "execution_count": null, 334 | "id": "98d10ea7-aaf9-4264-bc91-258d5bef3029", 335 | "metadata": {}, 336 | "outputs": [], 337 | "source": [] 338 | } 339 | ], 340 | "metadata": { 341 | "kernelspec": { 342 | "display_name": "Python 3 (ipykernel)", 343 | "language": "python", 344 | "name": "python3" 345 | }, 346 | "language_info": { 347 | "codemirror_mode": { 348 | "name": "ipython", 349 | "version": 3 350 | }, 351 | "file_extension": ".py", 352 | "mimetype": "text/x-python", 353 | "name": "python", 354 | "nbconvert_exporter": "python", 355 | "pygments_lexer": "ipython3", 356 | "version": "3.11.11" 357 | } 358 | }, 359 | "nbformat": 4, 360 | "nbformat_minor": 5 361 | } 362 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | python-dotenv 2 | jupyterlab 3 | ipywidgets 4 | urllib3 5 | torch 6 | transformers 7 | accelerate 8 | sentencepiece 9 | bitsandbytes 10 | tqdm 11 | openai 12 | gradio 13 | langchain 14 | tiktoken 15 | faiss-cpu 16 | langchain-openai 17 | langchain_experimental 18 | langchain_chroma 19 | langchain[docarray] 20 | datasets 21 | matplotlib 22 | google.generativeai 23 | anthropic 24 | scikit-learn 25 | unstructured 26 | chromadb 27 | plotly 28 | jupyter-dash 29 | groq -------------------------------------------------------------------------------- /resources.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ed-donner/choose_llm/398f96625f3cae34600dda9d4ca32dfa3896cfd3/resources.jpg --------------------------------------------------------------------------------