├── .DS_Store ├── .gitignore ├── 1.llmops-project-1-chatgpt-docker ├── Dockerfile ├── Project_1_Open_AI_Assistant.ipynb ├── README.md ├── __pycache__ │ ├── config.cpython-311.pyc │ └── main.cpython-311.pyc ├── config.py ├── main.py ├── requirements.txt └── story.txt ├── 2.llmops-project-1-chatgpt-kubernetes ├── Dockerfile ├── Project_1_Open_AI_Assistant.ipynb ├── README.md ├── __pycache__ │ ├── config.cpython-311.pyc │ └── main.cpython-311.pyc ├── config.py ├── deploy.yaml ├── main.py ├── requirements.txt └── story.txt ├── 3.llmops-project-1-chatgpt-kubernetes-gke ├── Dockerfile ├── Project_1_Open_AI_Assistant.ipynb ├── README.md ├── __pycache__ │ ├── config.cpython-311.pyc │ └── main.cpython-311.pyc ├── config.py ├── deploy.yaml ├── main.py ├── requirements.txt └── story.txt ├── 4.ci-cd-automation-chatgpt ├── .github │ └── workflows │ │ └── actions.yaml ├── Dockerfile ├── Project_1_Open_AI_Assistant.ipynb ├── README.md ├── config.py ├── deploy.yaml ├── kustomization.yaml ├── main.py ├── requirements.txt └── story.txt ├── 5.llmops-project-2-huggingface ├── .DS_Store ├── .github │ └── workflows │ │ └── actions.yaml ├── Dockerfile ├── Project_2_GPT_Generator.ipynb ├── README.md ├── deploy.yaml ├── kustomization.yaml ├── main.py └── requirements.txt ├── 6.monitor-llm-production └── Monitoring_LLM_Models_in_Production_Langkit.ipynb ├── 7.ci-cd-aws-ec2 ├── .DS_Store ├── .github │ └── workflows │ │ └── actions.py ├── Dockerfile ├── README.md ├── app.py ├── requirements.txt └── tests.py ├── README.md ├── Working-with-GKE └── README.md ├── docker-quickstart ├── Dockerfile ├── README.md └── index.html ├── github-actions ├── README.md ├── first-github.yaml └── github-actions-demo.yaml ├── introducing-huggingface ├── 1-introduction-to-transformers.ipynb └── 2-text-classification.ipynb └── kubernetes-quickstart ├── .DS_Store ├── Dockerfile ├── README.md ├── cm.yaml ├── deployment.yaml ├── index.html ├── pod.yaml ├── rolling-update.yaml ├── secret.yaml └── svc-local.yaml /.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/manifoldailearning/llmops-chatgpt-huggingface/31b394fce372d122a766e57888a04e0ca24bc07e/.DS_Store -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__/* 2 | */__pycache__/* 3 | .ipynb_checkpoints/* 4 | */.ipynb_checkpoints 5 | */.ipynb_checkpoints/* 6 | -------------------------------------------------------------------------------- /1.llmops-project-1-chatgpt-docker/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.10 2 | COPY . . 3 | RUN pip install -r requirements.txt 4 | EXPOSE 80 5 | ENTRYPOINT [ "python" ] 6 | CMD [ "main.py" ] -------------------------------------------------------------------------------- /1.llmops-project-1-chatgpt-docker/Project_1_Open_AI_Assistant.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": { 7 | "id": "yqyLUYg5sUhK" 8 | }, 9 | "outputs": [], 10 | "source": [ 11 | "!pip install openai" 12 | ] 13 | }, 14 | { 15 | "cell_type": "code", 16 | "execution_count": null, 17 | "metadata": { 18 | "id": "cK0YPnzVsbhO" 19 | }, 20 | "outputs": [], 21 | "source": [ 22 | "from openai import OpenAI\n", 23 | "client = OpenAI(api_key = \"api-key\")\n", 24 | "client" 25 | ] 26 | }, 27 | { 28 | "cell_type": "code", 29 | "execution_count": null, 30 | "metadata": { 31 | "id": "3k6yFCausbeG" 32 | }, 33 | "outputs": [], 34 | "source": [ 35 | "uploaded_file = client.files.create(\n", 36 | " file=open(\"story.txt\",'rb'),\n", 37 | " purpose='assistants'\n", 38 | ")" 39 | ] 40 | }, 41 | { 42 | "cell_type": "code", 43 | "execution_count": null, 44 | "metadata": { 45 | "id": "e4vEodClsbbS" 46 | }, 47 | "outputs": [], 48 | "source": [ 49 | "uploaded_file" 50 | ] 51 | }, 52 | { 53 | "cell_type": "code", 54 | "source": [ 55 | "# Create a vector store caled Story Database\n", 56 | "vector_store = client.beta.vector_stores.create(name=\"Story Data\")\n", 57 | "file_batch = client.beta.vector_stores.file_batches.upload_and_poll(\n", 58 | " vector_store_id=vector_store.id, files=[open(\"story.txt\",'rb')]\n", 59 | ")" 60 | ], 61 | "metadata": { 62 | "id": "Csnb6owgaiyg" 63 | }, 64 | "execution_count": null, 65 | "outputs": [] 66 | }, 67 | { 68 | "cell_type": "code", 69 | "source": [ 70 | "print(file_batch.status)\n", 71 | "print(file_batch.file_counts)" 72 | ], 73 | "metadata": { 74 | "id": "DMOEFflDbFTd" 75 | }, 76 | "execution_count": null, 77 | "outputs": [] 78 | }, 79 | { 80 | "cell_type": "code", 81 | "source": [ 82 | "assistant = client.beta.assistants.create(\n", 83 | " name=\"Story helper\",\n", 84 | " instructions=\"You are a motivator who answers the question based on the story file\",\n", 85 | " model=\"gpt-4o\",\n", 86 | " tools=[{\"type\": \"file_search\"}],\n", 87 | " tool_resources={\"file_search\": {\"vector_store_ids\": [vector_store.id]}}\n", 88 | ")" 89 | ], 90 | "metadata": { 91 | "id": "WO7WcWjfbN-c" 92 | }, 93 | "execution_count": null, 94 | "outputs": [] 95 | }, 96 | { 97 | "cell_type": "code", 98 | "execution_count": null, 99 | "metadata": { 100 | "id": "v6BgrO7PsbVQ" 101 | }, 102 | "outputs": [], 103 | "source": [ 104 | "thread = client.beta.threads.create()\n", 105 | "thread" 106 | ] 107 | }, 108 | { 109 | "cell_type": "code", 110 | "execution_count": null, 111 | "metadata": { 112 | "id": "CmCvLAZ5sbSI" 113 | }, 114 | "outputs": [], 115 | "source": [ 116 | "message = client.beta.threads.messages.create(\n", 117 | " thread_id=thread.id,\n", 118 | " role=\"user\",\n", 119 | " content=\"Who is the hero of the story?\"\n", 120 | ")" 121 | ] 122 | }, 123 | { 124 | "cell_type": "code", 125 | "execution_count": null, 126 | "metadata": { 127 | "id": "BhWJG0VQsbPV" 128 | }, 129 | "outputs": [], 130 | "source": [ 131 | "message" 132 | ] 133 | }, 134 | { 135 | "cell_type": "code", 136 | "execution_count": null, 137 | "metadata": { 138 | "id": "HWceqG1qwRor" 139 | }, 140 | "outputs": [], 141 | "source": [ 142 | "assistant" 143 | ] 144 | }, 145 | { 146 | "cell_type": "code", 147 | "execution_count": null, 148 | "metadata": { 149 | "id": "n167t7_bvzWc" 150 | }, 151 | "outputs": [], 152 | "source": [ 153 | "run = client.beta.threads.runs.create(\n", 154 | " thread_id=thread.id,\n", 155 | " assistant_id=assistant.id\n", 156 | ")" 157 | ] 158 | }, 159 | { 160 | "cell_type": "code", 161 | "execution_count": null, 162 | "metadata": { 163 | "id": "xDFA3GpAwVs1" 164 | }, 165 | "outputs": [], 166 | "source": [ 167 | "run = client.beta.threads.runs.retrieve(\n", 168 | " thread_id=thread.id,\n", 169 | " run_id=run.id\n", 170 | ")" 171 | ] 172 | }, 173 | { 174 | "cell_type": "code", 175 | "execution_count": null, 176 | "metadata": { 177 | "id": "LTxbm44dwa9G" 178 | }, 179 | "outputs": [], 180 | "source": [ 181 | "run.status" 182 | ] 183 | }, 184 | { 185 | "cell_type": "code", 186 | "execution_count": null, 187 | "metadata": { 188 | "id": "kI8ita5dwcPA" 189 | }, 190 | "outputs": [], 191 | "source": [ 192 | "message" 193 | ] 194 | }, 195 | { 196 | "cell_type": "code", 197 | "execution_count": null, 198 | "metadata": { 199 | "id": "k2gaGxZ8whye" 200 | }, 201 | "outputs": [], 202 | "source": [ 203 | "messages = client.beta.threads.messages.list(thread_id=thread.id)\n", 204 | "messages" 205 | ] 206 | }, 207 | { 208 | "cell_type": "code", 209 | "execution_count": null, 210 | "metadata": { 211 | "id": "bhIPBLQ4wvc2" 212 | }, 213 | "outputs": [], 214 | "source": [ 215 | "while True:\n", 216 | " run = client.beta.threads.runs.retrieve(thread_id=thread.id, run_id=run.id)\n", 217 | " if run.status==\"completed\":\n", 218 | " messages = client.beta.threads.messages.list(thread_id=thread.id)\n", 219 | " latest_message = messages.data[0]\n", 220 | " text = latest_message.content[0].text.value\n", 221 | " print(text)\n", 222 | " break;" 223 | ] 224 | }, 225 | { 226 | "cell_type": "code", 227 | "execution_count": null, 228 | "metadata": { 229 | "id": "Y13Gor4ZxLe3" 230 | }, 231 | "outputs": [], 232 | "source": [] 233 | } 234 | ], 235 | "metadata": { 236 | "colab": { 237 | "provenance": [] 238 | }, 239 | "kernelspec": { 240 | "display_name": "Python 3", 241 | "name": "python3" 242 | }, 243 | "language_info": { 244 | "name": "python" 245 | } 246 | }, 247 | "nbformat": 4, 248 | "nbformat_minor": 0 249 | } -------------------------------------------------------------------------------- /1.llmops-project-1-chatgpt-docker/README.md: -------------------------------------------------------------------------------- 1 | # About the Repo 2 | This repo is part of the LLMOps course by Manifold AI Learning, 3 | Course link - https://www.manifoldailearning.in/courses/LLMOps-with-ChatGPT-Deploy-on-Production-65cb265ae4b086660d2836ae 4 | 5 | Reach the Instructor at - https://www.linkedin.com/in/nachiketh-murthy/ 6 | 7 | For any support reach out to : support@manifoldailearning.in 8 | 9 | # Help on FAST API 10 | 11 | ``` 12 | pip install "fastapi[all]" 13 | 14 | uvicorn main:app --reload 15 | ``` 16 | 17 | # Test with Postman 18 | 19 | URL - http://127.0.0.1:80/response 20 | (POST) 21 | 22 | ```json 23 | { 24 | "text": "Who is the hero of the story" 25 | } 26 | 27 | ``` 28 | 29 | # Docker Commands 30 | 31 | ``` 32 | docker build -t chatgpt-project1 . 33 | docker run -d -p 8080:80 chatgpt-project1 34 | docker tag chatgpt-project1 yourusername/chatgpt-project1 35 | docker push yourusername/chatgpt-project1 36 | ``` -------------------------------------------------------------------------------- /1.llmops-project-1-chatgpt-docker/__pycache__/config.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/manifoldailearning/llmops-chatgpt-huggingface/31b394fce372d122a766e57888a04e0ca24bc07e/1.llmops-project-1-chatgpt-docker/__pycache__/config.cpython-311.pyc -------------------------------------------------------------------------------- /1.llmops-project-1-chatgpt-docker/__pycache__/main.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/manifoldailearning/llmops-chatgpt-huggingface/31b394fce372d122a766e57888a04e0ca24bc07e/1.llmops-project-1-chatgpt-docker/__pycache__/main.cpython-311.pyc -------------------------------------------------------------------------------- /1.llmops-project-1-chatgpt-docker/config.py: -------------------------------------------------------------------------------- 1 | api_key = "api-id" 2 | assistant_id = "asst_VaRmvfp30jPvLuVJgYzxGmXf" -------------------------------------------------------------------------------- /1.llmops-project-1-chatgpt-docker/main.py: -------------------------------------------------------------------------------- 1 | from fastapi import FastAPI 2 | from openai import OpenAI 3 | import config 4 | from pydantic import BaseModel 5 | import uvicorn 6 | 7 | assistant_id = config.assistant_id 8 | api_key = config.api_key 9 | 10 | client = OpenAI(api_key=api_key) 11 | 12 | app = FastAPI() 13 | 14 | class Body(BaseModel): 15 | text: str 16 | 17 | # get, post, put, and delete 18 | 19 | @app.get("/") 20 | def welcome(): 21 | return {"message": "Welcome to ChatGPT AI Application"} 22 | 23 | @app.get("/home") 24 | def welcome(): 25 | return {"message": "welcome home"} 26 | 27 | @app.post("/dummy") 28 | def demo_function(data): 29 | return {"message": data} 30 | 31 | @app.post("/response") 32 | def generate(body: Body): 33 | prompt = body.text # user input 34 | thread = client.beta.threads.create() 35 | message = client.beta.threads.messages.create( 36 | thread_id=thread.id, 37 | role="user", 38 | content=prompt 39 | ) 40 | 41 | run = client.beta.threads.runs.create( 42 | thread_id = thread.id, 43 | assistant_id=assistant_id 44 | ) 45 | 46 | while True: 47 | run = client.beta.threads.runs.retrieve(thread_id=thread.id, run_id=run.id) 48 | if run.status == "completed": 49 | messages = client.beta.threads.messages.list(thread_id=thread.id) 50 | latest_message = messages.data[0] 51 | text = latest_message.content[0].text.value 52 | break; 53 | return text 54 | 55 | if __name__ == "__main__": 56 | uvicorn.run(app,host="0.0.0.0",port=80) -------------------------------------------------------------------------------- /1.llmops-project-1-chatgpt-docker/requirements.txt: -------------------------------------------------------------------------------- 1 | openai 2 | fastapi==0.109.0 3 | uvicorn[standard] -------------------------------------------------------------------------------- /1.llmops-project-1-chatgpt-docker/story.txt: -------------------------------------------------------------------------------- 1 | Title: The Odyssey of Lumina: Illuminating Lives 2 | 3 | Once upon a time, in a world not too different from our own, there was a brilliant inventor named Dr. Michael Greene. Dr. Greene was a visionary, constantly seeking ways to improve the lives of people around him. One day, while tinkering in his laboratory, he stumbled upon an extraordinary discovery—a revolutionary product he called "Lumina." 4 | 5 | Lumina was not just any product; it was a breakthrough in technology, designed to enhance human potential and productivity. At its core, Lumina was a wearable device that emitted a gentle light, scientifically proven to boost cognitive function, mood, and overall well-being. Dr. Greene was convinced that Lumina could transform society, empowering individuals to reach their full potential and make the world a better place. 6 | 7 | Excited by his discovery, Dr. Greene poured his heart and soul into perfecting Lumina. After years of research and development, he finally unveiled his creation to the world. The response was overwhelming—people from all walks of life clamored to get their hands on Lumina, eager to experience its transformative effects. 8 | 9 | At first, Lumina was hailed as a game-changer. Students wore it while studying, professionals wore it during work hours, and even athletes used it to sharpen their focus and performance. The world seemed brighter, more vibrant, and full of possibilities with Lumina by their side. 10 | 11 | As demand for Lumina soared, Dr. Greene found himself catapulted into the spotlight. He became a symbol of innovation and progress, admired and revered by millions around the globe. But amidst the acclaim and adulation, Dr. Greene remained grounded, always mindful of the responsibility that came with his creation. 12 | 13 | However, as with any great invention, challenges soon arose. Some skeptics questioned the long-term effects of prolonged Lumina usage, while others raised concerns about its potential to disrupt natural sleep patterns. Despite Dr. Greene's assurances and rigorous testing, doubts lingered in the minds of many. 14 | 15 | Then came the unforeseen consequences. People became dependent on Lumina, relying on its artificial glow to navigate through life's challenges. Some began to neglect their own innate abilities, believing that Lumina held the key to their success. Others experienced withdrawal symptoms when separated from their beloved device, highlighting a troubling addiction that had taken root. 16 | 17 | Caught in the midst of these complexities, Dr. Greene faced a moral dilemma. Was Lumina truly a force for good, or had it inadvertently become a crutch that hindered human progress? As the debate raged on, Dr. Greene found himself grappling with doubts and uncertainties he had never anticipated. 18 | 19 | Amidst the growing scrutiny and criticism, Lumina's sales began to decline. The once-heralded product now faced a reckoning—a moment of truth that would determine its fate. Dr. Greene knew that he had to make a choice: either adapt and evolve Lumina to address the concerns, or let it fade into obscurity. 20 | 21 | In a bold move, Dr. Greene decided to take action. He assembled a team of experts from various fields—scientists, psychologists, and ethicists—to reevaluate Lumina's design and functionality. Together, they conducted extensive research and analysis, seeking to understand both the benefits and pitfalls of Lumina. 22 | 23 | After months of painstaking work, the team unveiled Lumina 2.0—a revamped version that prioritized balance and moderation. The new Lumina incorporated customizable settings, allowing users to adjust the intensity and duration of light exposure based on their individual needs. It also featured built-in reminders to encourage breaks and promote healthy habits. 24 | 25 | The response to Lumina 2.0 was overwhelmingly positive. People welcomed the changes, embracing Lumina not as a crutch, but as a tool to supplement their natural abilities. Dr. Greene breathed a sigh of relief, knowing that he had made the right decision for both Lumina and humanity. 26 | 27 | As time passed, Lumina continued to thrive, evolving alongside society's changing needs. It became more than just a product—it became a symbol of resilience, adaptability, and the enduring human spirit. Dr. Greene's journey had taught him valuable lessons about the power of innovation, the importance of responsibility, and the true meaning of success. 28 | 29 | In the end, Lumina's impact transcended mere profit or fame. It had illuminated the lives of millions, inspiring them to embrace their potential and forge their own paths forward. And as Dr. Greene looked back on his remarkable journey, he knew that the greatest achievement of all was not the invention itself, but the profound impact it had on the world—and the hearts of those it touched. 30 | 31 | **Motivational Message:** 32 | 33 | In the pursuit of innovation and progress, we must always remain mindful of the impact our creations have on humanity. Success is not measured solely by accolades or profits, but by the positive change we bring to the world. Let us strive to create with purpose, integrity, and a steadfast commitment to improving the lives of others. For in the end, it is not the brilliance of our inventions that defines us, but the depth of our compassion and the legacy of our contributions to the greater good. -------------------------------------------------------------------------------- /2.llmops-project-1-chatgpt-kubernetes/Dockerfile: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | FROM python:3.10 3 | COPY . . 4 | RUN pip install -r requirements.txt 5 | EXPOSE 80 6 | ENTRYPOINT [ "python" ] 7 | CMD [ "main.py" ] -------------------------------------------------------------------------------- /2.llmops-project-1-chatgpt-kubernetes/Project_1_Open_AI_Assistant.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": { 7 | "colab": { 8 | "base_uri": "https://localhost:8080/" 9 | }, 10 | "id": "yqyLUYg5sUhK", 11 | "outputId": "6d80b0aa-eeb2-4959-d650-ce5e420fc988" 12 | }, 13 | "outputs": [ 14 | { 15 | "name": "stdout", 16 | "output_type": "stream", 17 | "text": [ 18 | "Collecting openai\n", 19 | " Downloading openai-1.13.3-py3-none-any.whl (227 kB)\n", 20 | "\u001b[?25l \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m0.0/227.4 kB\u001b[0m \u001b[31m?\u001b[0m eta \u001b[36m-:--:--\u001b[0m\r\u001b[2K \u001b[91m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[91m╸\u001b[0m \u001b[32m225.3/227.4 kB\u001b[0m \u001b[31m7.6 MB/s\u001b[0m eta \u001b[36m0:00:01\u001b[0m\r\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m227.4/227.4 kB\u001b[0m \u001b[31m5.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 21 | "\u001b[?25hRequirement already satisfied: anyio<5,>=3.5.0 in /usr/local/lib/python3.10/dist-packages (from openai) (3.7.1)\n", 22 | "Requirement already satisfied: distro<2,>=1.7.0 in /usr/lib/python3/dist-packages (from openai) (1.7.0)\n", 23 | "Collecting httpx<1,>=0.23.0 (from openai)\n", 24 | " Downloading httpx-0.27.0-py3-none-any.whl (75 kB)\n", 25 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m75.6/75.6 kB\u001b[0m \u001b[31m7.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 26 | "\u001b[?25hRequirement already satisfied: pydantic<3,>=1.9.0 in /usr/local/lib/python3.10/dist-packages (from openai) (2.6.1)\n", 27 | "Requirement already satisfied: sniffio in /usr/local/lib/python3.10/dist-packages (from openai) (1.3.0)\n", 28 | "Requirement already satisfied: tqdm>4 in /usr/local/lib/python3.10/dist-packages (from openai) (4.66.2)\n", 29 | "Requirement already satisfied: typing-extensions<5,>=4.7 in /usr/local/lib/python3.10/dist-packages (from openai) (4.9.0)\n", 30 | "Requirement already satisfied: idna>=2.8 in /usr/local/lib/python3.10/dist-packages (from anyio<5,>=3.5.0->openai) (3.6)\n", 31 | "Requirement already satisfied: exceptiongroup in /usr/local/lib/python3.10/dist-packages (from anyio<5,>=3.5.0->openai) (1.2.0)\n", 32 | "Requirement already satisfied: certifi in /usr/local/lib/python3.10/dist-packages (from httpx<1,>=0.23.0->openai) (2024.2.2)\n", 33 | "Collecting httpcore==1.* (from httpx<1,>=0.23.0->openai)\n", 34 | " Downloading httpcore-1.0.4-py3-none-any.whl (77 kB)\n", 35 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m77.8/77.8 kB\u001b[0m \u001b[31m9.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 36 | "\u001b[?25hCollecting h11<0.15,>=0.13 (from httpcore==1.*->httpx<1,>=0.23.0->openai)\n", 37 | " Downloading h11-0.14.0-py3-none-any.whl (58 kB)\n", 38 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m58.3/58.3 kB\u001b[0m \u001b[31m6.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 39 | "\u001b[?25hRequirement already satisfied: annotated-types>=0.4.0 in /usr/local/lib/python3.10/dist-packages (from pydantic<3,>=1.9.0->openai) (0.6.0)\n", 40 | "Requirement already satisfied: pydantic-core==2.16.2 in /usr/local/lib/python3.10/dist-packages (from pydantic<3,>=1.9.0->openai) (2.16.2)\n", 41 | "Installing collected packages: h11, httpcore, httpx, openai\n", 42 | "Successfully installed h11-0.14.0 httpcore-1.0.4 httpx-0.27.0 openai-1.13.3\n" 43 | ] 44 | } 45 | ], 46 | "source": [ 47 | "!pip install openai" 48 | ] 49 | }, 50 | { 51 | "cell_type": "code", 52 | "execution_count": 4, 53 | "metadata": { 54 | "colab": { 55 | "base_uri": "https://localhost:8080/" 56 | }, 57 | "id": "cK0YPnzVsbhO", 58 | "outputId": "006eed8b-0e52-43b1-b29c-86e90d10040d" 59 | }, 60 | "outputs": [ 61 | { 62 | "data": { 63 | "text/plain": [ 64 | "" 65 | ] 66 | }, 67 | "execution_count": 4, 68 | "metadata": {}, 69 | "output_type": "execute_result" 70 | } 71 | ], 72 | "source": [ 73 | "from openai import OpenAI\n", 74 | "client = OpenAI(api_key=api_key = \"api-id\")\n", 75 | "client" 76 | ] 77 | }, 78 | { 79 | "cell_type": "code", 80 | "execution_count": 15, 81 | "metadata": { 82 | "id": "3k6yFCausbeG" 83 | }, 84 | "outputs": [], 85 | "source": [ 86 | "uploaded_file = client.files.create(\n", 87 | " file=open(\"story.txt\",'rb'),\n", 88 | " purpose='assistants'\n", 89 | ")" 90 | ] 91 | }, 92 | { 93 | "cell_type": "code", 94 | "execution_count": 16, 95 | "metadata": { 96 | "colab": { 97 | "base_uri": "https://localhost:8080/" 98 | }, 99 | "id": "e4vEodClsbbS", 100 | "outputId": "586fc24e-521e-4773-9aa8-2fba94c615d8" 101 | }, 102 | "outputs": [ 103 | { 104 | "data": { 105 | "text/plain": [ 106 | "FileObject(id='file-qEkiCPGFrAS8OLc22fc8dsJN', bytes=5299, created_at=1709224785, filename='story.txt', object='file', purpose='assistants', status='processed', status_details=None)" 107 | ] 108 | }, 109 | "execution_count": 16, 110 | "metadata": {}, 111 | "output_type": "execute_result" 112 | } 113 | ], 114 | "source": [ 115 | "uploaded_file" 116 | ] 117 | }, 118 | { 119 | "cell_type": "code", 120 | "execution_count": 17, 121 | "metadata": { 122 | "id": "UJ_VjFJ4sbYY" 123 | }, 124 | "outputs": [], 125 | "source": [ 126 | "assistant = client.beta.assistants.create(\n", 127 | " name=\"Story helper\",\n", 128 | " instructions=\"You are a motivator who answers the question based on the story file\",\n", 129 | " tools=[{\"type\": \"retrieval\"}],\n", 130 | " model=\"gpt-4-turbo-preview\",\n", 131 | " file_ids=[uploaded_file.id]\n", 132 | ")" 133 | ] 134 | }, 135 | { 136 | "cell_type": "code", 137 | "execution_count": 18, 138 | "metadata": { 139 | "colab": { 140 | "base_uri": "https://localhost:8080/" 141 | }, 142 | "id": "v6BgrO7PsbVQ", 143 | "outputId": "47d1b31d-6b1b-4b29-d47c-4447ed9488e6" 144 | }, 145 | "outputs": [ 146 | { 147 | "data": { 148 | "text/plain": [ 149 | "Thread(id='thread_CMaQ6801HYvVyxdhVLBum6Ot', created_at=1709224874, metadata={}, object='thread')" 150 | ] 151 | }, 152 | "execution_count": 18, 153 | "metadata": {}, 154 | "output_type": "execute_result" 155 | } 156 | ], 157 | "source": [ 158 | "thread = client.beta.threads.create()\n", 159 | "thread" 160 | ] 161 | }, 162 | { 163 | "cell_type": "code", 164 | "execution_count": 19, 165 | "metadata": { 166 | "id": "CmCvLAZ5sbSI" 167 | }, 168 | "outputs": [], 169 | "source": [ 170 | "message = client.beta.threads.messages.create(\n", 171 | " thread_id=thread.id,\n", 172 | " role=\"user\",\n", 173 | " content=\"Who is the hero of the story?\"\n", 174 | ")" 175 | ] 176 | }, 177 | { 178 | "cell_type": "code", 179 | "execution_count": 20, 180 | "metadata": { 181 | "colab": { 182 | "base_uri": "https://localhost:8080/" 183 | }, 184 | "id": "BhWJG0VQsbPV", 185 | "outputId": "59f47f35-9340-4384-ff61-fbc34b41cfe9" 186 | }, 187 | "outputs": [ 188 | { 189 | "data": { 190 | "text/plain": [ 191 | "ThreadMessage(id='msg_LDlW9vX8791DfLAYv30YDvzQ', assistant_id=None, content=[MessageContentText(text=Text(annotations=[], value='Who is the hero of the story?'), type='text')], created_at=1709224950, file_ids=[], metadata={}, object='thread.message', role='user', run_id=None, thread_id='thread_CMaQ6801HYvVyxdhVLBum6Ot')" 192 | ] 193 | }, 194 | "execution_count": 20, 195 | "metadata": {}, 196 | "output_type": "execute_result" 197 | } 198 | ], 199 | "source": [ 200 | "message" 201 | ] 202 | }, 203 | { 204 | "cell_type": "code", 205 | "execution_count": 21, 206 | "metadata": { 207 | "colab": { 208 | "base_uri": "https://localhost:8080/" 209 | }, 210 | "id": "HWceqG1qwRor", 211 | "outputId": "0b57bcd5-c30b-4ba8-dd76-f3d4b090b1fe" 212 | }, 213 | "outputs": [ 214 | { 215 | "data": { 216 | "text/plain": [ 217 | "Assistant(id='asst_VaRmvfp30jPvLuVJgYzxGmXf', created_at=1709224786, description=None, file_ids=['file-qEkiCPGFrAS8OLc22fc8dsJN'], instructions='You are a motivator who answers the question based on the story file', metadata={}, model='gpt-4-turbo-preview', name='Story helper', object='assistant', tools=[ToolRetrieval(type='retrieval')])" 218 | ] 219 | }, 220 | "execution_count": 21, 221 | "metadata": {}, 222 | "output_type": "execute_result" 223 | } 224 | ], 225 | "source": [ 226 | "assistant" 227 | ] 228 | }, 229 | { 230 | "cell_type": "code", 231 | "execution_count": 22, 232 | "metadata": { 233 | "id": "n167t7_bvzWc" 234 | }, 235 | "outputs": [], 236 | "source": [ 237 | "run = client.beta.threads.runs.create(\n", 238 | " thread_id=thread.id,\n", 239 | " assistant_id=assistant.id\n", 240 | ")" 241 | ] 242 | }, 243 | { 244 | "cell_type": "code", 245 | "execution_count": 23, 246 | "metadata": { 247 | "id": "xDFA3GpAwVs1" 248 | }, 249 | "outputs": [], 250 | "source": [ 251 | "run = client.beta.threads.runs.retrieve(\n", 252 | " thread_id=thread.id,\n", 253 | " run_id=run.id\n", 254 | ")" 255 | ] 256 | }, 257 | { 258 | "cell_type": "code", 259 | "execution_count": 25, 260 | "metadata": { 261 | "colab": { 262 | "base_uri": "https://localhost:8080/", 263 | "height": 35 264 | }, 265 | "id": "LTxbm44dwa9G", 266 | "outputId": "e7d4933d-485b-4b86-b246-e0d75a5e89db" 267 | }, 268 | "outputs": [ 269 | { 270 | "data": { 271 | "application/vnd.google.colaboratory.intrinsic+json": { 272 | "type": "string" 273 | }, 274 | "text/plain": [ 275 | "'completed'" 276 | ] 277 | }, 278 | "execution_count": 25, 279 | "metadata": {}, 280 | "output_type": "execute_result" 281 | } 282 | ], 283 | "source": [ 284 | "run.status" 285 | ] 286 | }, 287 | { 288 | "cell_type": "code", 289 | "execution_count": 27, 290 | "metadata": { 291 | "colab": { 292 | "base_uri": "https://localhost:8080/" 293 | }, 294 | "id": "kI8ita5dwcPA", 295 | "outputId": "dc47a81b-8f7a-43b4-dfad-7a0dc44c762c" 296 | }, 297 | "outputs": [ 298 | { 299 | "data": { 300 | "text/plain": [ 301 | "ThreadMessage(id='msg_LDlW9vX8791DfLAYv30YDvzQ', assistant_id=None, content=[MessageContentText(text=Text(annotations=[], value='Who is the hero of the story?'), type='text')], created_at=1709224950, file_ids=[], metadata={}, object='thread.message', role='user', run_id=None, thread_id='thread_CMaQ6801HYvVyxdhVLBum6Ot')" 302 | ] 303 | }, 304 | "execution_count": 27, 305 | "metadata": {}, 306 | "output_type": "execute_result" 307 | } 308 | ], 309 | "source": [ 310 | "message" 311 | ] 312 | }, 313 | { 314 | "cell_type": "code", 315 | "execution_count": 28, 316 | "metadata": { 317 | "colab": { 318 | "base_uri": "https://localhost:8080/" 319 | }, 320 | "id": "k2gaGxZ8whye", 321 | "outputId": "a3f2fbc9-ba11-40e0-ca8e-19e2cd258739" 322 | }, 323 | "outputs": [ 324 | { 325 | "data": { 326 | "text/plain": [ 327 | "SyncCursorPage[ThreadMessage](data=[ThreadMessage(id='msg_3oc7BbjhcaqWuMhs83XReWz3', assistant_id='asst_VaRmvfp30jPvLuVJgYzxGmXf', content=[MessageContentText(text=Text(annotations=[], value='The hero of the story \"The Odyssey of Lumina: Illuminating Lives\" is Dr. Michael Greene. He is portrayed as a brilliant inventor who creates Lumina, a revolutionary product designed to enhance human potential and productivity. Despite facing challenges, criticism, and the unforeseen consequences of his invention, Dr. Greene remains committed to improving it for the betterment of society. His journey showcases resilience, adaptability, and an enduring commitment to the greater good, making him the unequivocal hero of the story.'), type='text')], created_at=1709225111, file_ids=[], metadata={}, object='thread.message', role='assistant', run_id='run_eoHeWhFTc1JJ1xVB0VjnGhMO', thread_id='thread_CMaQ6801HYvVyxdhVLBum6Ot'), ThreadMessage(id='msg_LDlW9vX8791DfLAYv30YDvzQ', assistant_id=None, content=[MessageContentText(text=Text(annotations=[], value='Who is the hero of the story?'), type='text')], created_at=1709224950, file_ids=[], metadata={}, object='thread.message', role='user', run_id=None, thread_id='thread_CMaQ6801HYvVyxdhVLBum6Ot')], object='list', first_id='msg_3oc7BbjhcaqWuMhs83XReWz3', last_id='msg_LDlW9vX8791DfLAYv30YDvzQ', has_more=False)" 328 | ] 329 | }, 330 | "execution_count": 28, 331 | "metadata": {}, 332 | "output_type": "execute_result" 333 | } 334 | ], 335 | "source": [ 336 | "messages = client.beta.threads.messages.list(thread_id=thread.id)\n", 337 | "messages" 338 | ] 339 | }, 340 | { 341 | "cell_type": "code", 342 | "execution_count": 29, 343 | "metadata": { 344 | "colab": { 345 | "base_uri": "https://localhost:8080/" 346 | }, 347 | "id": "bhIPBLQ4wvc2", 348 | "outputId": "ef589a59-ec78-4ea5-c466-654fb1a1e0a2" 349 | }, 350 | "outputs": [ 351 | { 352 | "name": "stdout", 353 | "output_type": "stream", 354 | "text": [ 355 | "The hero of the story \"The Odyssey of Lumina: Illuminating Lives\" is Dr. Michael Greene. He is portrayed as a brilliant inventor who creates Lumina, a revolutionary product designed to enhance human potential and productivity. Despite facing challenges, criticism, and the unforeseen consequences of his invention, Dr. Greene remains committed to improving it for the betterment of society. His journey showcases resilience, adaptability, and an enduring commitment to the greater good, making him the unequivocal hero of the story.\n" 356 | ] 357 | } 358 | ], 359 | "source": [ 360 | "while True:\n", 361 | " run = client.beta.threads.runs.retrieve(thread_id=thread.id, run_id=run.id)\n", 362 | " if run.status==\"completed\":\n", 363 | " messages = client.beta.threads.messages.list(thread_id=thread.id)\n", 364 | " latest_message = messages.data[0]\n", 365 | " text = latest_message.content[0].text.value\n", 366 | " print(text)\n", 367 | " break;" 368 | ] 369 | }, 370 | { 371 | "cell_type": "code", 372 | "execution_count": null, 373 | "metadata": { 374 | "id": "Y13Gor4ZxLe3" 375 | }, 376 | "outputs": [], 377 | "source": [] 378 | } 379 | ], 380 | "metadata": { 381 | "colab": { 382 | "provenance": [] 383 | }, 384 | "kernelspec": { 385 | "display_name": "Python 3", 386 | "name": "python3" 387 | }, 388 | "language_info": { 389 | "name": "python" 390 | } 391 | }, 392 | "nbformat": 4, 393 | "nbformat_minor": 0 394 | } 395 | -------------------------------------------------------------------------------- /2.llmops-project-1-chatgpt-kubernetes/README.md: -------------------------------------------------------------------------------- 1 | # About the Repo 2 | This repo is part of the LLMOps course by Manifold AI Learning, 3 | Course link - https://www.manifoldailearning.in/courses/LLMOps-with-ChatGPT-Deploy-on-Production-65cb265ae4b086660d2836ae 4 | 5 | Reach the Instructor at - https://www.linkedin.com/in/nachiketh-murthy/ 6 | 7 | For any support reach out to : support@manifoldailearning.in 8 | 9 | # Help on FAST API 10 | 11 | ``` 12 | pip install "fastapi[all]" 13 | 14 | uvicorn main:app --reload 15 | ``` 16 | 17 | # Test with Postman 18 | 19 | URL - http://127.0.0.1:80/response 20 | (POST) 21 | 22 | ```json 23 | { 24 | "text": "Who is the hero of the story" 25 | } 26 | 27 | ``` 28 | 29 | # Docker Commands 30 | 31 | ``` 32 | docker build -t chatgpt-project1 . 33 | docker run -d -p 8080:80 chatgpt-project1 34 | docker tag chatgpt-project1 yourusername/chatgpt-project1 35 | docker push yourusername/chatgpt-project1 36 | ``` 37 | 38 | # Kubernetes Code 39 | 40 | ``` 41 | kubectl create secret generic openai-secret --from-literal=API_KEY= 42 | ``` -------------------------------------------------------------------------------- /2.llmops-project-1-chatgpt-kubernetes/__pycache__/config.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/manifoldailearning/llmops-chatgpt-huggingface/31b394fce372d122a766e57888a04e0ca24bc07e/2.llmops-project-1-chatgpt-kubernetes/__pycache__/config.cpython-311.pyc -------------------------------------------------------------------------------- /2.llmops-project-1-chatgpt-kubernetes/__pycache__/main.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/manifoldailearning/llmops-chatgpt-huggingface/31b394fce372d122a766e57888a04e0ca24bc07e/2.llmops-project-1-chatgpt-kubernetes/__pycache__/main.cpython-311.pyc -------------------------------------------------------------------------------- /2.llmops-project-1-chatgpt-kubernetes/config.py: -------------------------------------------------------------------------------- 1 | #api_key = "api-id" 2 | assistant_id = "asst_VaRmvfp30jPvLuVJgYzxGmXf" -------------------------------------------------------------------------------- /2.llmops-project-1-chatgpt-kubernetes/deploy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: chatgpt-deploy 5 | spec: 6 | replicas: 2 7 | selector: 8 | matchLabels: 9 | app: chatgpt-pod 10 | template: 11 | metadata: 12 | labels: 13 | app: chatgpt-pod 14 | spec: 15 | containers: 16 | - name: mychatgptcontainer 17 | image: manifoldailearning/chatgpt-project:v1 18 | resources: 19 | limits: 20 | memory: "128Mi" 21 | cpu: "500m" 22 | ports: 23 | - containerPort: 80 24 | env: 25 | - name: OPENAI_API 26 | valueFrom: 27 | secretKeyRef: 28 | name: openai-secret 29 | key: API_KEY 30 | 31 | 32 | --- 33 | apiVersion: v1 34 | kind: Service 35 | metadata: 36 | name: mysvc-v2 37 | spec: 38 | type: NodePort 39 | selector: 40 | app: chatgpt-pod 41 | ports: 42 | - port: 5000 # Service IP Port 43 | targetPort: 80 # Container Port 44 | nodePort: 30002 # Node Port/Host Port 45 | protocol: TCP 46 | -------------------------------------------------------------------------------- /2.llmops-project-1-chatgpt-kubernetes/main.py: -------------------------------------------------------------------------------- 1 | #!/bin/python3 2 | from fastapi import FastAPI 3 | from openai import OpenAI 4 | import config 5 | from pydantic import BaseModel 6 | import uvicorn 7 | import os 8 | 9 | assistant_id = config.assistant_id 10 | api_key = os.environ['OPENAI_API'] 11 | 12 | client = OpenAI(api_key=api_key) 13 | 14 | app = FastAPI() 15 | 16 | class Body(BaseModel): 17 | text: str 18 | 19 | # get, post, put, and delete 20 | 21 | @app.get("/") 22 | def welcome(): 23 | return {"message": "Welcome to ChatGPT AI Application"} 24 | 25 | @app.get("/home") 26 | def welcome(): 27 | return {"message": "welcome home"} 28 | 29 | @app.post("/dummy") 30 | def demo_function(data): 31 | return {"message": data} 32 | 33 | @app.post("/response") 34 | def generate(body: Body): 35 | prompt = body.text # user input 36 | thread = client.beta.threads.create() 37 | message = client.beta.threads.messages.create( 38 | thread_id=thread.id, 39 | role="user", 40 | content=prompt 41 | ) 42 | 43 | run = client.beta.threads.runs.create( 44 | thread_id = thread.id, 45 | assistant_id=assistant_id 46 | ) 47 | 48 | while True: 49 | run = client.beta.threads.runs.retrieve(thread_id=thread.id, run_id=run.id) 50 | if run.status == "completed": 51 | messages = client.beta.threads.messages.list(thread_id=thread.id) 52 | latest_message = messages.data[0] 53 | text = latest_message.content[0].text.value 54 | break; 55 | return text 56 | 57 | if __name__ == "__main__": 58 | uvicorn.run(app,host="0.0.0.0",port=80) -------------------------------------------------------------------------------- /2.llmops-project-1-chatgpt-kubernetes/requirements.txt: -------------------------------------------------------------------------------- 1 | openai 2 | fastapi==0.109.0 3 | uvicorn[standard] -------------------------------------------------------------------------------- /2.llmops-project-1-chatgpt-kubernetes/story.txt: -------------------------------------------------------------------------------- 1 | Title: The Odyssey of Lumina: Illuminating Lives 2 | 3 | Once upon a time, in a world not too different from our own, there was a brilliant inventor named Dr. Michael Greene. Dr. Greene was a visionary, constantly seeking ways to improve the lives of people around him. One day, while tinkering in his laboratory, he stumbled upon an extraordinary discovery—a revolutionary product he called "Lumina." 4 | 5 | Lumina was not just any product; it was a breakthrough in technology, designed to enhance human potential and productivity. At its core, Lumina was a wearable device that emitted a gentle light, scientifically proven to boost cognitive function, mood, and overall well-being. Dr. Greene was convinced that Lumina could transform society, empowering individuals to reach their full potential and make the world a better place. 6 | 7 | Excited by his discovery, Dr. Greene poured his heart and soul into perfecting Lumina. After years of research and development, he finally unveiled his creation to the world. The response was overwhelming—people from all walks of life clamored to get their hands on Lumina, eager to experience its transformative effects. 8 | 9 | At first, Lumina was hailed as a game-changer. Students wore it while studying, professionals wore it during work hours, and even athletes used it to sharpen their focus and performance. The world seemed brighter, more vibrant, and full of possibilities with Lumina by their side. 10 | 11 | As demand for Lumina soared, Dr. Greene found himself catapulted into the spotlight. He became a symbol of innovation and progress, admired and revered by millions around the globe. But amidst the acclaim and adulation, Dr. Greene remained grounded, always mindful of the responsibility that came with his creation. 12 | 13 | However, as with any great invention, challenges soon arose. Some skeptics questioned the long-term effects of prolonged Lumina usage, while others raised concerns about its potential to disrupt natural sleep patterns. Despite Dr. Greene's assurances and rigorous testing, doubts lingered in the minds of many. 14 | 15 | Then came the unforeseen consequences. People became dependent on Lumina, relying on its artificial glow to navigate through life's challenges. Some began to neglect their own innate abilities, believing that Lumina held the key to their success. Others experienced withdrawal symptoms when separated from their beloved device, highlighting a troubling addiction that had taken root. 16 | 17 | Caught in the midst of these complexities, Dr. Greene faced a moral dilemma. Was Lumina truly a force for good, or had it inadvertently become a crutch that hindered human progress? As the debate raged on, Dr. Greene found himself grappling with doubts and uncertainties he had never anticipated. 18 | 19 | Amidst the growing scrutiny and criticism, Lumina's sales began to decline. The once-heralded product now faced a reckoning—a moment of truth that would determine its fate. Dr. Greene knew that he had to make a choice: either adapt and evolve Lumina to address the concerns, or let it fade into obscurity. 20 | 21 | In a bold move, Dr. Greene decided to take action. He assembled a team of experts from various fields—scientists, psychologists, and ethicists—to reevaluate Lumina's design and functionality. Together, they conducted extensive research and analysis, seeking to understand both the benefits and pitfalls of Lumina. 22 | 23 | After months of painstaking work, the team unveiled Lumina 2.0—a revamped version that prioritized balance and moderation. The new Lumina incorporated customizable settings, allowing users to adjust the intensity and duration of light exposure based on their individual needs. It also featured built-in reminders to encourage breaks and promote healthy habits. 24 | 25 | The response to Lumina 2.0 was overwhelmingly positive. People welcomed the changes, embracing Lumina not as a crutch, but as a tool to supplement their natural abilities. Dr. Greene breathed a sigh of relief, knowing that he had made the right decision for both Lumina and humanity. 26 | 27 | As time passed, Lumina continued to thrive, evolving alongside society's changing needs. It became more than just a product—it became a symbol of resilience, adaptability, and the enduring human spirit. Dr. Greene's journey had taught him valuable lessons about the power of innovation, the importance of responsibility, and the true meaning of success. 28 | 29 | In the end, Lumina's impact transcended mere profit or fame. It had illuminated the lives of millions, inspiring them to embrace their potential and forge their own paths forward. And as Dr. Greene looked back on his remarkable journey, he knew that the greatest achievement of all was not the invention itself, but the profound impact it had on the world—and the hearts of those it touched. 30 | 31 | **Motivational Message:** 32 | 33 | In the pursuit of innovation and progress, we must always remain mindful of the impact our creations have on humanity. Success is not measured solely by accolades or profits, but by the positive change we bring to the world. Let us strive to create with purpose, integrity, and a steadfast commitment to improving the lives of others. For in the end, it is not the brilliance of our inventions that defines us, but the depth of our compassion and the legacy of our contributions to the greater good. -------------------------------------------------------------------------------- /3.llmops-project-1-chatgpt-kubernetes-gke/Dockerfile: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | FROM --platform=linux/amd64 python:3.10 3 | COPY . . 4 | RUN pip install -r requirements.txt 5 | EXPOSE 80 6 | ENTRYPOINT [ "python" ] 7 | CMD [ "main.py" ] -------------------------------------------------------------------------------- /3.llmops-project-1-chatgpt-kubernetes-gke/Project_1_Open_AI_Assistant.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": { 7 | "colab": { 8 | "base_uri": "https://localhost:8080/" 9 | }, 10 | "id": "yqyLUYg5sUhK", 11 | "outputId": "6d80b0aa-eeb2-4959-d650-ce5e420fc988" 12 | }, 13 | "outputs": [ 14 | { 15 | "name": "stdout", 16 | "output_type": "stream", 17 | "text": [ 18 | "Collecting openai\n", 19 | " Downloading openai-1.13.3-py3-none-any.whl (227 kB)\n", 20 | "\u001b[?25l \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m0.0/227.4 kB\u001b[0m \u001b[31m?\u001b[0m eta \u001b[36m-:--:--\u001b[0m\r\u001b[2K \u001b[91m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[91m╸\u001b[0m \u001b[32m225.3/227.4 kB\u001b[0m \u001b[31m7.6 MB/s\u001b[0m eta \u001b[36m0:00:01\u001b[0m\r\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m227.4/227.4 kB\u001b[0m \u001b[31m5.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 21 | "\u001b[?25hRequirement already satisfied: anyio<5,>=3.5.0 in /usr/local/lib/python3.10/dist-packages (from openai) (3.7.1)\n", 22 | "Requirement already satisfied: distro<2,>=1.7.0 in /usr/lib/python3/dist-packages (from openai) (1.7.0)\n", 23 | "Collecting httpx<1,>=0.23.0 (from openai)\n", 24 | " Downloading httpx-0.27.0-py3-none-any.whl (75 kB)\n", 25 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m75.6/75.6 kB\u001b[0m \u001b[31m7.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 26 | "\u001b[?25hRequirement already satisfied: pydantic<3,>=1.9.0 in /usr/local/lib/python3.10/dist-packages (from openai) (2.6.1)\n", 27 | "Requirement already satisfied: sniffio in /usr/local/lib/python3.10/dist-packages (from openai) (1.3.0)\n", 28 | "Requirement already satisfied: tqdm>4 in /usr/local/lib/python3.10/dist-packages (from openai) (4.66.2)\n", 29 | "Requirement already satisfied: typing-extensions<5,>=4.7 in /usr/local/lib/python3.10/dist-packages (from openai) (4.9.0)\n", 30 | "Requirement already satisfied: idna>=2.8 in /usr/local/lib/python3.10/dist-packages (from anyio<5,>=3.5.0->openai) (3.6)\n", 31 | "Requirement already satisfied: exceptiongroup in /usr/local/lib/python3.10/dist-packages (from anyio<5,>=3.5.0->openai) (1.2.0)\n", 32 | "Requirement already satisfied: certifi in /usr/local/lib/python3.10/dist-packages (from httpx<1,>=0.23.0->openai) (2024.2.2)\n", 33 | "Collecting httpcore==1.* (from httpx<1,>=0.23.0->openai)\n", 34 | " Downloading httpcore-1.0.4-py3-none-any.whl (77 kB)\n", 35 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m77.8/77.8 kB\u001b[0m \u001b[31m9.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 36 | "\u001b[?25hCollecting h11<0.15,>=0.13 (from httpcore==1.*->httpx<1,>=0.23.0->openai)\n", 37 | " Downloading h11-0.14.0-py3-none-any.whl (58 kB)\n", 38 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m58.3/58.3 kB\u001b[0m \u001b[31m6.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 39 | "\u001b[?25hRequirement already satisfied: annotated-types>=0.4.0 in /usr/local/lib/python3.10/dist-packages (from pydantic<3,>=1.9.0->openai) (0.6.0)\n", 40 | "Requirement already satisfied: pydantic-core==2.16.2 in /usr/local/lib/python3.10/dist-packages (from pydantic<3,>=1.9.0->openai) (2.16.2)\n", 41 | "Installing collected packages: h11, httpcore, httpx, openai\n", 42 | "Successfully installed h11-0.14.0 httpcore-1.0.4 httpx-0.27.0 openai-1.13.3\n" 43 | ] 44 | } 45 | ], 46 | "source": [ 47 | "!pip install openai" 48 | ] 49 | }, 50 | { 51 | "cell_type": "code", 52 | "execution_count": 4, 53 | "metadata": { 54 | "colab": { 55 | "base_uri": "https://localhost:8080/" 56 | }, 57 | "id": "cK0YPnzVsbhO", 58 | "outputId": "006eed8b-0e52-43b1-b29c-86e90d10040d" 59 | }, 60 | "outputs": [ 61 | { 62 | "data": { 63 | "text/plain": [ 64 | "" 65 | ] 66 | }, 67 | "execution_count": 4, 68 | "metadata": {}, 69 | "output_type": "execute_result" 70 | } 71 | ], 72 | "source": [ 73 | "from openai import OpenAI\n", 74 | "client = OpenAI(api_key=api_key = \"api-id\")\n", 75 | "client" 76 | ] 77 | }, 78 | { 79 | "cell_type": "code", 80 | "execution_count": 15, 81 | "metadata": { 82 | "id": "3k6yFCausbeG" 83 | }, 84 | "outputs": [], 85 | "source": [ 86 | "uploaded_file = client.files.create(\n", 87 | " file=open(\"story.txt\",'rb'),\n", 88 | " purpose='assistants'\n", 89 | ")" 90 | ] 91 | }, 92 | { 93 | "cell_type": "code", 94 | "execution_count": 16, 95 | "metadata": { 96 | "colab": { 97 | "base_uri": "https://localhost:8080/" 98 | }, 99 | "id": "e4vEodClsbbS", 100 | "outputId": "586fc24e-521e-4773-9aa8-2fba94c615d8" 101 | }, 102 | "outputs": [ 103 | { 104 | "data": { 105 | "text/plain": [ 106 | "FileObject(id='file-qEkiCPGFrAS8OLc22fc8dsJN', bytes=5299, created_at=1709224785, filename='story.txt', object='file', purpose='assistants', status='processed', status_details=None)" 107 | ] 108 | }, 109 | "execution_count": 16, 110 | "metadata": {}, 111 | "output_type": "execute_result" 112 | } 113 | ], 114 | "source": [ 115 | "uploaded_file" 116 | ] 117 | }, 118 | { 119 | "cell_type": "code", 120 | "execution_count": 17, 121 | "metadata": { 122 | "id": "UJ_VjFJ4sbYY" 123 | }, 124 | "outputs": [], 125 | "source": [ 126 | "assistant = client.beta.assistants.create(\n", 127 | " name=\"Story helper\",\n", 128 | " instructions=\"You are a motivator who answers the question based on the story file\",\n", 129 | " tools=[{\"type\": \"retrieval\"}],\n", 130 | " model=\"gpt-4-turbo-preview\",\n", 131 | " file_ids=[uploaded_file.id]\n", 132 | ")" 133 | ] 134 | }, 135 | { 136 | "cell_type": "code", 137 | "execution_count": 18, 138 | "metadata": { 139 | "colab": { 140 | "base_uri": "https://localhost:8080/" 141 | }, 142 | "id": "v6BgrO7PsbVQ", 143 | "outputId": "47d1b31d-6b1b-4b29-d47c-4447ed9488e6" 144 | }, 145 | "outputs": [ 146 | { 147 | "data": { 148 | "text/plain": [ 149 | "Thread(id='thread_CMaQ6801HYvVyxdhVLBum6Ot', created_at=1709224874, metadata={}, object='thread')" 150 | ] 151 | }, 152 | "execution_count": 18, 153 | "metadata": {}, 154 | "output_type": "execute_result" 155 | } 156 | ], 157 | "source": [ 158 | "thread = client.beta.threads.create()\n", 159 | "thread" 160 | ] 161 | }, 162 | { 163 | "cell_type": "code", 164 | "execution_count": 19, 165 | "metadata": { 166 | "id": "CmCvLAZ5sbSI" 167 | }, 168 | "outputs": [], 169 | "source": [ 170 | "message = client.beta.threads.messages.create(\n", 171 | " thread_id=thread.id,\n", 172 | " role=\"user\",\n", 173 | " content=\"Who is the hero of the story?\"\n", 174 | ")" 175 | ] 176 | }, 177 | { 178 | "cell_type": "code", 179 | "execution_count": 20, 180 | "metadata": { 181 | "colab": { 182 | "base_uri": "https://localhost:8080/" 183 | }, 184 | "id": "BhWJG0VQsbPV", 185 | "outputId": "59f47f35-9340-4384-ff61-fbc34b41cfe9" 186 | }, 187 | "outputs": [ 188 | { 189 | "data": { 190 | "text/plain": [ 191 | "ThreadMessage(id='msg_LDlW9vX8791DfLAYv30YDvzQ', assistant_id=None, content=[MessageContentText(text=Text(annotations=[], value='Who is the hero of the story?'), type='text')], created_at=1709224950, file_ids=[], metadata={}, object='thread.message', role='user', run_id=None, thread_id='thread_CMaQ6801HYvVyxdhVLBum6Ot')" 192 | ] 193 | }, 194 | "execution_count": 20, 195 | "metadata": {}, 196 | "output_type": "execute_result" 197 | } 198 | ], 199 | "source": [ 200 | "message" 201 | ] 202 | }, 203 | { 204 | "cell_type": "code", 205 | "execution_count": 21, 206 | "metadata": { 207 | "colab": { 208 | "base_uri": "https://localhost:8080/" 209 | }, 210 | "id": "HWceqG1qwRor", 211 | "outputId": "0b57bcd5-c30b-4ba8-dd76-f3d4b090b1fe" 212 | }, 213 | "outputs": [ 214 | { 215 | "data": { 216 | "text/plain": [ 217 | "Assistant(id='asst_VaRmvfp30jPvLuVJgYzxGmXf', created_at=1709224786, description=None, file_ids=['file-qEkiCPGFrAS8OLc22fc8dsJN'], instructions='You are a motivator who answers the question based on the story file', metadata={}, model='gpt-4-turbo-preview', name='Story helper', object='assistant', tools=[ToolRetrieval(type='retrieval')])" 218 | ] 219 | }, 220 | "execution_count": 21, 221 | "metadata": {}, 222 | "output_type": "execute_result" 223 | } 224 | ], 225 | "source": [ 226 | "assistant" 227 | ] 228 | }, 229 | { 230 | "cell_type": "code", 231 | "execution_count": 22, 232 | "metadata": { 233 | "id": "n167t7_bvzWc" 234 | }, 235 | "outputs": [], 236 | "source": [ 237 | "run = client.beta.threads.runs.create(\n", 238 | " thread_id=thread.id,\n", 239 | " assistant_id=assistant.id\n", 240 | ")" 241 | ] 242 | }, 243 | { 244 | "cell_type": "code", 245 | "execution_count": 23, 246 | "metadata": { 247 | "id": "xDFA3GpAwVs1" 248 | }, 249 | "outputs": [], 250 | "source": [ 251 | "run = client.beta.threads.runs.retrieve(\n", 252 | " thread_id=thread.id,\n", 253 | " run_id=run.id\n", 254 | ")" 255 | ] 256 | }, 257 | { 258 | "cell_type": "code", 259 | "execution_count": 25, 260 | "metadata": { 261 | "colab": { 262 | "base_uri": "https://localhost:8080/", 263 | "height": 35 264 | }, 265 | "id": "LTxbm44dwa9G", 266 | "outputId": "e7d4933d-485b-4b86-b246-e0d75a5e89db" 267 | }, 268 | "outputs": [ 269 | { 270 | "data": { 271 | "application/vnd.google.colaboratory.intrinsic+json": { 272 | "type": "string" 273 | }, 274 | "text/plain": [ 275 | "'completed'" 276 | ] 277 | }, 278 | "execution_count": 25, 279 | "metadata": {}, 280 | "output_type": "execute_result" 281 | } 282 | ], 283 | "source": [ 284 | "run.status" 285 | ] 286 | }, 287 | { 288 | "cell_type": "code", 289 | "execution_count": 27, 290 | "metadata": { 291 | "colab": { 292 | "base_uri": "https://localhost:8080/" 293 | }, 294 | "id": "kI8ita5dwcPA", 295 | "outputId": "dc47a81b-8f7a-43b4-dfad-7a0dc44c762c" 296 | }, 297 | "outputs": [ 298 | { 299 | "data": { 300 | "text/plain": [ 301 | "ThreadMessage(id='msg_LDlW9vX8791DfLAYv30YDvzQ', assistant_id=None, content=[MessageContentText(text=Text(annotations=[], value='Who is the hero of the story?'), type='text')], created_at=1709224950, file_ids=[], metadata={}, object='thread.message', role='user', run_id=None, thread_id='thread_CMaQ6801HYvVyxdhVLBum6Ot')" 302 | ] 303 | }, 304 | "execution_count": 27, 305 | "metadata": {}, 306 | "output_type": "execute_result" 307 | } 308 | ], 309 | "source": [ 310 | "message" 311 | ] 312 | }, 313 | { 314 | "cell_type": "code", 315 | "execution_count": 28, 316 | "metadata": { 317 | "colab": { 318 | "base_uri": "https://localhost:8080/" 319 | }, 320 | "id": "k2gaGxZ8whye", 321 | "outputId": "a3f2fbc9-ba11-40e0-ca8e-19e2cd258739" 322 | }, 323 | "outputs": [ 324 | { 325 | "data": { 326 | "text/plain": [ 327 | "SyncCursorPage[ThreadMessage](data=[ThreadMessage(id='msg_3oc7BbjhcaqWuMhs83XReWz3', assistant_id='asst_VaRmvfp30jPvLuVJgYzxGmXf', content=[MessageContentText(text=Text(annotations=[], value='The hero of the story \"The Odyssey of Lumina: Illuminating Lives\" is Dr. Michael Greene. He is portrayed as a brilliant inventor who creates Lumina, a revolutionary product designed to enhance human potential and productivity. Despite facing challenges, criticism, and the unforeseen consequences of his invention, Dr. Greene remains committed to improving it for the betterment of society. His journey showcases resilience, adaptability, and an enduring commitment to the greater good, making him the unequivocal hero of the story.'), type='text')], created_at=1709225111, file_ids=[], metadata={}, object='thread.message', role='assistant', run_id='run_eoHeWhFTc1JJ1xVB0VjnGhMO', thread_id='thread_CMaQ6801HYvVyxdhVLBum6Ot'), ThreadMessage(id='msg_LDlW9vX8791DfLAYv30YDvzQ', assistant_id=None, content=[MessageContentText(text=Text(annotations=[], value='Who is the hero of the story?'), type='text')], created_at=1709224950, file_ids=[], metadata={}, object='thread.message', role='user', run_id=None, thread_id='thread_CMaQ6801HYvVyxdhVLBum6Ot')], object='list', first_id='msg_3oc7BbjhcaqWuMhs83XReWz3', last_id='msg_LDlW9vX8791DfLAYv30YDvzQ', has_more=False)" 328 | ] 329 | }, 330 | "execution_count": 28, 331 | "metadata": {}, 332 | "output_type": "execute_result" 333 | } 334 | ], 335 | "source": [ 336 | "messages = client.beta.threads.messages.list(thread_id=thread.id)\n", 337 | "messages" 338 | ] 339 | }, 340 | { 341 | "cell_type": "code", 342 | "execution_count": 29, 343 | "metadata": { 344 | "colab": { 345 | "base_uri": "https://localhost:8080/" 346 | }, 347 | "id": "bhIPBLQ4wvc2", 348 | "outputId": "ef589a59-ec78-4ea5-c466-654fb1a1e0a2" 349 | }, 350 | "outputs": [ 351 | { 352 | "name": "stdout", 353 | "output_type": "stream", 354 | "text": [ 355 | "The hero of the story \"The Odyssey of Lumina: Illuminating Lives\" is Dr. Michael Greene. He is portrayed as a brilliant inventor who creates Lumina, a revolutionary product designed to enhance human potential and productivity. Despite facing challenges, criticism, and the unforeseen consequences of his invention, Dr. Greene remains committed to improving it for the betterment of society. His journey showcases resilience, adaptability, and an enduring commitment to the greater good, making him the unequivocal hero of the story.\n" 356 | ] 357 | } 358 | ], 359 | "source": [ 360 | "while True:\n", 361 | " run = client.beta.threads.runs.retrieve(thread_id=thread.id, run_id=run.id)\n", 362 | " if run.status==\"completed\":\n", 363 | " messages = client.beta.threads.messages.list(thread_id=thread.id)\n", 364 | " latest_message = messages.data[0]\n", 365 | " text = latest_message.content[0].text.value\n", 366 | " print(text)\n", 367 | " break;" 368 | ] 369 | }, 370 | { 371 | "cell_type": "code", 372 | "execution_count": null, 373 | "metadata": { 374 | "id": "Y13Gor4ZxLe3" 375 | }, 376 | "outputs": [], 377 | "source": [] 378 | } 379 | ], 380 | "metadata": { 381 | "colab": { 382 | "provenance": [] 383 | }, 384 | "kernelspec": { 385 | "display_name": "Python 3", 386 | "name": "python3" 387 | }, 388 | "language_info": { 389 | "name": "python" 390 | } 391 | }, 392 | "nbformat": 4, 393 | "nbformat_minor": 0 394 | } 395 | -------------------------------------------------------------------------------- /3.llmops-project-1-chatgpt-kubernetes-gke/README.md: -------------------------------------------------------------------------------- 1 | # About the Repo 2 | This repo is part of the LLMOps course by Manifold AI Learning, 3 | Course link - https://www.manifoldailearning.in/courses/LLMOps-with-ChatGPT-Deploy-on-Production-65cb265ae4b086660d2836ae 4 | 5 | Reach the Instructor at - https://www.linkedin.com/in/nachiketh-murthy/ 6 | 7 | For any support reach out to : support@manifoldailearning.in 8 | 9 | # Help on FAST API 10 | 11 | ``` 12 | pip install "fastapi[all]" 13 | 14 | uvicorn main:app --reload 15 | ``` 16 | 17 | # Test with Postman 18 | 19 | URL - http://127.0.0.1:80/response 20 | (POST) 21 | 22 | ```json 23 | { 24 | "text": "Who is the hero of the story" 25 | } 26 | 27 | ``` 28 | 29 | # Docker Commands 30 | 31 | ``` 32 | docker build -t chatgpt-project1 . 33 | docker run -d -p 8080:80 chatgpt-project1 34 | docker tag chatgpt-project1 yourusername/chatgpt-project1 35 | docker push yourusername/chatgpt-project1 36 | ``` 37 | 38 | # Kubernetes Code 39 | 40 | ``` 41 | kubectl create secret generic openai-secret --from-literal=API_KEY= 42 | ``` 43 | 44 | # Important Code for Docker 45 | 46 | ``` 47 | docker buildx build --platform=linux/amd64 -t yourusername/chatgpt-project:v3 . 48 | docker push yourusername/chatgpt-project:v3 49 | ``` -------------------------------------------------------------------------------- /3.llmops-project-1-chatgpt-kubernetes-gke/__pycache__/config.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/manifoldailearning/llmops-chatgpt-huggingface/31b394fce372d122a766e57888a04e0ca24bc07e/3.llmops-project-1-chatgpt-kubernetes-gke/__pycache__/config.cpython-311.pyc -------------------------------------------------------------------------------- /3.llmops-project-1-chatgpt-kubernetes-gke/__pycache__/main.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/manifoldailearning/llmops-chatgpt-huggingface/31b394fce372d122a766e57888a04e0ca24bc07e/3.llmops-project-1-chatgpt-kubernetes-gke/__pycache__/main.cpython-311.pyc -------------------------------------------------------------------------------- /3.llmops-project-1-chatgpt-kubernetes-gke/config.py: -------------------------------------------------------------------------------- 1 | #api_key = "api-id" 2 | assistant_id = "asst_VaRmvfp30jPvLuVJgYzxGmXf" -------------------------------------------------------------------------------- /3.llmops-project-1-chatgpt-kubernetes-gke/deploy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: chatgpt-deploy 5 | spec: 6 | replicas: 2 7 | selector: 8 | matchLabels: 9 | app: chatgpt-pod 10 | template: 11 | metadata: 12 | labels: 13 | app: chatgpt-pod 14 | spec: 15 | containers: 16 | - name: mychatgptcontainer 17 | image: manifoldailearning/chatgpt-project:v3 18 | ports: 19 | - containerPort: 80 20 | env: 21 | - name: OPENAI_API 22 | valueFrom: 23 | secretKeyRef: 24 | name: openai-secret 25 | key: API_KEY 26 | 27 | 28 | --- 29 | apiVersion: v1 30 | kind: Service 31 | metadata: 32 | name: mylb 33 | spec: 34 | type: LoadBalancer 35 | selector: 36 | app: chatgpt-pod 37 | ports: 38 | - port: 80 # Service IP Port 39 | targetPort: 80 # Container Port 40 | -------------------------------------------------------------------------------- /3.llmops-project-1-chatgpt-kubernetes-gke/main.py: -------------------------------------------------------------------------------- 1 | #!/bin/python3 2 | from fastapi import FastAPI 3 | from openai import OpenAI 4 | import config 5 | from pydantic import BaseModel 6 | import uvicorn 7 | import os 8 | 9 | assistant_id = config.assistant_id 10 | api_key = os.environ['OPENAI_API'] 11 | 12 | client = OpenAI(api_key=api_key) 13 | 14 | app = FastAPI() 15 | 16 | class Body(BaseModel): 17 | text: str 18 | 19 | # get, post, put, and delete 20 | 21 | @app.get("/") 22 | def welcome(): 23 | return {"message": "Welcome to ChatGPT AI Application"} 24 | 25 | @app.get("/home") 26 | def welcome(): 27 | return {"message": "welcome home"} 28 | 29 | @app.post("/dummy") 30 | def demo_function(data): 31 | return {"message": data} 32 | 33 | @app.post("/response") 34 | def generate(body: Body): 35 | prompt = body.text # user input 36 | thread = client.beta.threads.create() 37 | message = client.beta.threads.messages.create( 38 | thread_id=thread.id, 39 | role="user", 40 | content=prompt 41 | ) 42 | 43 | run = client.beta.threads.runs.create( 44 | thread_id = thread.id, 45 | assistant_id=assistant_id 46 | ) 47 | 48 | while True: 49 | run = client.beta.threads.runs.retrieve(thread_id=thread.id, run_id=run.id) 50 | if run.status == "completed": 51 | messages = client.beta.threads.messages.list(thread_id=thread.id) 52 | latest_message = messages.data[0] 53 | text = latest_message.content[0].text.value 54 | break; 55 | return text 56 | 57 | if __name__ == "__main__": 58 | uvicorn.run(app,host="0.0.0.0",port=80) -------------------------------------------------------------------------------- /3.llmops-project-1-chatgpt-kubernetes-gke/requirements.txt: -------------------------------------------------------------------------------- 1 | openai 2 | fastapi==0.109.0 3 | uvicorn[standard] -------------------------------------------------------------------------------- /3.llmops-project-1-chatgpt-kubernetes-gke/story.txt: -------------------------------------------------------------------------------- 1 | Title: The Odyssey of Lumina: Illuminating Lives 2 | 3 | Once upon a time, in a world not too different from our own, there was a brilliant inventor named Dr. Michael Greene. Dr. Greene was a visionary, constantly seeking ways to improve the lives of people around him. One day, while tinkering in his laboratory, he stumbled upon an extraordinary discovery—a revolutionary product he called "Lumina." 4 | 5 | Lumina was not just any product; it was a breakthrough in technology, designed to enhance human potential and productivity. At its core, Lumina was a wearable device that emitted a gentle light, scientifically proven to boost cognitive function, mood, and overall well-being. Dr. Greene was convinced that Lumina could transform society, empowering individuals to reach their full potential and make the world a better place. 6 | 7 | Excited by his discovery, Dr. Greene poured his heart and soul into perfecting Lumina. After years of research and development, he finally unveiled his creation to the world. The response was overwhelming—people from all walks of life clamored to get their hands on Lumina, eager to experience its transformative effects. 8 | 9 | At first, Lumina was hailed as a game-changer. Students wore it while studying, professionals wore it during work hours, and even athletes used it to sharpen their focus and performance. The world seemed brighter, more vibrant, and full of possibilities with Lumina by their side. 10 | 11 | As demand for Lumina soared, Dr. Greene found himself catapulted into the spotlight. He became a symbol of innovation and progress, admired and revered by millions around the globe. But amidst the acclaim and adulation, Dr. Greene remained grounded, always mindful of the responsibility that came with his creation. 12 | 13 | However, as with any great invention, challenges soon arose. Some skeptics questioned the long-term effects of prolonged Lumina usage, while others raised concerns about its potential to disrupt natural sleep patterns. Despite Dr. Greene's assurances and rigorous testing, doubts lingered in the minds of many. 14 | 15 | Then came the unforeseen consequences. People became dependent on Lumina, relying on its artificial glow to navigate through life's challenges. Some began to neglect their own innate abilities, believing that Lumina held the key to their success. Others experienced withdrawal symptoms when separated from their beloved device, highlighting a troubling addiction that had taken root. 16 | 17 | Caught in the midst of these complexities, Dr. Greene faced a moral dilemma. Was Lumina truly a force for good, or had it inadvertently become a crutch that hindered human progress? As the debate raged on, Dr. Greene found himself grappling with doubts and uncertainties he had never anticipated. 18 | 19 | Amidst the growing scrutiny and criticism, Lumina's sales began to decline. The once-heralded product now faced a reckoning—a moment of truth that would determine its fate. Dr. Greene knew that he had to make a choice: either adapt and evolve Lumina to address the concerns, or let it fade into obscurity. 20 | 21 | In a bold move, Dr. Greene decided to take action. He assembled a team of experts from various fields—scientists, psychologists, and ethicists—to reevaluate Lumina's design and functionality. Together, they conducted extensive research and analysis, seeking to understand both the benefits and pitfalls of Lumina. 22 | 23 | After months of painstaking work, the team unveiled Lumina 2.0—a revamped version that prioritized balance and moderation. The new Lumina incorporated customizable settings, allowing users to adjust the intensity and duration of light exposure based on their individual needs. It also featured built-in reminders to encourage breaks and promote healthy habits. 24 | 25 | The response to Lumina 2.0 was overwhelmingly positive. People welcomed the changes, embracing Lumina not as a crutch, but as a tool to supplement their natural abilities. Dr. Greene breathed a sigh of relief, knowing that he had made the right decision for both Lumina and humanity. 26 | 27 | As time passed, Lumina continued to thrive, evolving alongside society's changing needs. It became more than just a product—it became a symbol of resilience, adaptability, and the enduring human spirit. Dr. Greene's journey had taught him valuable lessons about the power of innovation, the importance of responsibility, and the true meaning of success. 28 | 29 | In the end, Lumina's impact transcended mere profit or fame. It had illuminated the lives of millions, inspiring them to embrace their potential and forge their own paths forward. And as Dr. Greene looked back on his remarkable journey, he knew that the greatest achievement of all was not the invention itself, but the profound impact it had on the world—and the hearts of those it touched. 30 | 31 | **Motivational Message:** 32 | 33 | In the pursuit of innovation and progress, we must always remain mindful of the impact our creations have on humanity. Success is not measured solely by accolades or profits, but by the positive change we bring to the world. Let us strive to create with purpose, integrity, and a steadfast commitment to improving the lives of others. For in the end, it is not the brilliance of our inventions that defines us, but the depth of our compassion and the legacy of our contributions to the greater good. -------------------------------------------------------------------------------- /4.ci-cd-automation-chatgpt/.github/workflows/actions.yaml: -------------------------------------------------------------------------------- 1 | name: Build and Deploy to GKE 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | 8 | env: 9 | PROJECT_ID: ${{ secrets.GKE_PROJECT }} 10 | GKE_CLUSTER: my-cluster # Add your cluster name here. 11 | GKE_ZONE: us-central1-a # Add your cluster zone here. 12 | DEPLOYMENT_NAME: chatgpt-deploy # Add your deployment name here. 13 | IMAGE: chatgpt # Image Name 14 | 15 | jobs: 16 | setup-build-publish-deploy: 17 | name: Setup, Build, Publish, and Deploy 18 | runs-on: ubuntu-latest 19 | environment: production 20 | 21 | steps: 22 | - name: Checkout 23 | uses: actions/checkout@v4 24 | 25 | # Get the GKE credentials so we can deploy to the cluster 26 | - id: 'auth' 27 | uses: 'google-github-actions/auth@v2' 28 | with: 29 | credentials_json: '${{ secrets.GKE_SA_KEY }}' 30 | 31 | - id: 'get-credentials' 32 | uses: 'google-github-actions/get-gke-credentials@v2' 33 | with: 34 | cluster_name: ${{ env.GKE_CLUSTER }} 35 | location: ${{ env.GKE_ZONE }} 36 | 37 | # Configure Docker to use the gcloud command-line tool as a credential 38 | # helper for authentication 39 | - run: |- 40 | gcloud --quiet auth configure-docker 41 | 42 | # Build the Docker image 43 | 44 | # Build the Docker image 45 | - name: Build 46 | run: |- 47 | docker build \ 48 | --tag "gcr.io/$PROJECT_ID/$IMAGE:$GITHUB_SHA" \ 49 | --build-arg GITHUB_SHA="$GITHUB_SHA" \ 50 | --build-arg GITHUB_REF="$GITHUB_REF" \ 51 | . 52 | 53 | # Push the Docker image to Google Container Registry 54 | - name: Publish 55 | run: |- 56 | docker push "gcr.io/$PROJECT_ID/$IMAGE:$GITHUB_SHA" 57 | 58 | # Set up kustomize 59 | - name: Set up Kustomize 60 | run: |- 61 | curl -sfLo kustomize https://github.com/kubernetes-sigs/kustomize/releases/download/v3.1.0/kustomize_3.1.0_linux_amd64 62 | chmod u+x ./kustomize 63 | kubectl create secret generic openai-secret --from-literal=API_KEY=${{secrets.OPENAI_API}} || true 64 | 65 | # Deploy the Docker image to the GKE cluster 66 | - name: Deploy 67 | run: |- 68 | ./kustomize edit set image gcr.io/PROJECT_ID/IMAGE:TAG=gcr.io/$PROJECT_ID/$IMAGE:$GITHUB_SHA 69 | ./kustomize build . | kubectl apply -f - 70 | kubectl rollout status deployment/$DEPLOYMENT_NAME 71 | kubectl get services -o wide 72 | -------------------------------------------------------------------------------- /4.ci-cd-automation-chatgpt/Dockerfile: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | FROM --platform=linux/amd64 python:3.10 3 | COPY . . 4 | RUN pip install -r requirements.txt 5 | EXPOSE 80 6 | ENTRYPOINT [ "python" ] 7 | CMD [ "main.py" ] -------------------------------------------------------------------------------- /4.ci-cd-automation-chatgpt/Project_1_Open_AI_Assistant.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": { 7 | "colab": { 8 | "base_uri": "https://localhost:8080/" 9 | }, 10 | "id": "yqyLUYg5sUhK", 11 | "outputId": "6d80b0aa-eeb2-4959-d650-ce5e420fc988" 12 | }, 13 | "outputs": [ 14 | { 15 | "name": "stdout", 16 | "output_type": "stream", 17 | "text": [ 18 | "Collecting openai\n", 19 | " Downloading openai-1.13.3-py3-none-any.whl (227 kB)\n", 20 | "\u001b[?25l \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m0.0/227.4 kB\u001b[0m \u001b[31m?\u001b[0m eta \u001b[36m-:--:--\u001b[0m\r\u001b[2K \u001b[91m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[91m╸\u001b[0m \u001b[32m225.3/227.4 kB\u001b[0m \u001b[31m7.6 MB/s\u001b[0m eta \u001b[36m0:00:01\u001b[0m\r\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m227.4/227.4 kB\u001b[0m \u001b[31m5.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 21 | "\u001b[?25hRequirement already satisfied: anyio<5,>=3.5.0 in /usr/local/lib/python3.10/dist-packages (from openai) (3.7.1)\n", 22 | "Requirement already satisfied: distro<2,>=1.7.0 in /usr/lib/python3/dist-packages (from openai) (1.7.0)\n", 23 | "Collecting httpx<1,>=0.23.0 (from openai)\n", 24 | " Downloading httpx-0.27.0-py3-none-any.whl (75 kB)\n", 25 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m75.6/75.6 kB\u001b[0m \u001b[31m7.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 26 | "\u001b[?25hRequirement already satisfied: pydantic<3,>=1.9.0 in /usr/local/lib/python3.10/dist-packages (from openai) (2.6.1)\n", 27 | "Requirement already satisfied: sniffio in /usr/local/lib/python3.10/dist-packages (from openai) (1.3.0)\n", 28 | "Requirement already satisfied: tqdm>4 in /usr/local/lib/python3.10/dist-packages (from openai) (4.66.2)\n", 29 | "Requirement already satisfied: typing-extensions<5,>=4.7 in /usr/local/lib/python3.10/dist-packages (from openai) (4.9.0)\n", 30 | "Requirement already satisfied: idna>=2.8 in /usr/local/lib/python3.10/dist-packages (from anyio<5,>=3.5.0->openai) (3.6)\n", 31 | "Requirement already satisfied: exceptiongroup in /usr/local/lib/python3.10/dist-packages (from anyio<5,>=3.5.0->openai) (1.2.0)\n", 32 | "Requirement already satisfied: certifi in /usr/local/lib/python3.10/dist-packages (from httpx<1,>=0.23.0->openai) (2024.2.2)\n", 33 | "Collecting httpcore==1.* (from httpx<1,>=0.23.0->openai)\n", 34 | " Downloading httpcore-1.0.4-py3-none-any.whl (77 kB)\n", 35 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m77.8/77.8 kB\u001b[0m \u001b[31m9.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 36 | "\u001b[?25hCollecting h11<0.15,>=0.13 (from httpcore==1.*->httpx<1,>=0.23.0->openai)\n", 37 | " Downloading h11-0.14.0-py3-none-any.whl (58 kB)\n", 38 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m58.3/58.3 kB\u001b[0m \u001b[31m6.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 39 | "\u001b[?25hRequirement already satisfied: annotated-types>=0.4.0 in /usr/local/lib/python3.10/dist-packages (from pydantic<3,>=1.9.0->openai) (0.6.0)\n", 40 | "Requirement already satisfied: pydantic-core==2.16.2 in /usr/local/lib/python3.10/dist-packages (from pydantic<3,>=1.9.0->openai) (2.16.2)\n", 41 | "Installing collected packages: h11, httpcore, httpx, openai\n", 42 | "Successfully installed h11-0.14.0 httpcore-1.0.4 httpx-0.27.0 openai-1.13.3\n" 43 | ] 44 | } 45 | ], 46 | "source": [ 47 | "!pip install openai" 48 | ] 49 | }, 50 | { 51 | "cell_type": "code", 52 | "execution_count": 4, 53 | "metadata": { 54 | "colab": { 55 | "base_uri": "https://localhost:8080/" 56 | }, 57 | "id": "cK0YPnzVsbhO", 58 | "outputId": "006eed8b-0e52-43b1-b29c-86e90d10040d" 59 | }, 60 | "outputs": [ 61 | { 62 | "data": { 63 | "text/plain": [ 64 | "" 65 | ] 66 | }, 67 | "execution_count": 4, 68 | "metadata": {}, 69 | "output_type": "execute_result" 70 | } 71 | ], 72 | "source": [ 73 | "from openai import OpenAI\n", 74 | "client = OpenAI(api_key=api_key = \"api-id\")\n", 75 | "client" 76 | ] 77 | }, 78 | { 79 | "cell_type": "code", 80 | "execution_count": 15, 81 | "metadata": { 82 | "id": "3k6yFCausbeG" 83 | }, 84 | "outputs": [], 85 | "source": [ 86 | "uploaded_file = client.files.create(\n", 87 | " file=open(\"story.txt\",'rb'),\n", 88 | " purpose='assistants'\n", 89 | ")" 90 | ] 91 | }, 92 | { 93 | "cell_type": "code", 94 | "execution_count": 16, 95 | "metadata": { 96 | "colab": { 97 | "base_uri": "https://localhost:8080/" 98 | }, 99 | "id": "e4vEodClsbbS", 100 | "outputId": "586fc24e-521e-4773-9aa8-2fba94c615d8" 101 | }, 102 | "outputs": [ 103 | { 104 | "data": { 105 | "text/plain": [ 106 | "FileObject(id='file-qEkiCPGFrAS8OLc22fc8dsJN', bytes=5299, created_at=1709224785, filename='story.txt', object='file', purpose='assistants', status='processed', status_details=None)" 107 | ] 108 | }, 109 | "execution_count": 16, 110 | "metadata": {}, 111 | "output_type": "execute_result" 112 | } 113 | ], 114 | "source": [ 115 | "uploaded_file" 116 | ] 117 | }, 118 | { 119 | "cell_type": "code", 120 | "execution_count": 17, 121 | "metadata": { 122 | "id": "UJ_VjFJ4sbYY" 123 | }, 124 | "outputs": [], 125 | "source": [ 126 | "assistant = client.beta.assistants.create(\n", 127 | " name=\"Story helper\",\n", 128 | " instructions=\"You are a motivator who answers the question based on the story file\",\n", 129 | " tools=[{\"type\": \"retrieval\"}],\n", 130 | " model=\"gpt-4-turbo-preview\",\n", 131 | " file_ids=[uploaded_file.id]\n", 132 | ")" 133 | ] 134 | }, 135 | { 136 | "cell_type": "code", 137 | "execution_count": 18, 138 | "metadata": { 139 | "colab": { 140 | "base_uri": "https://localhost:8080/" 141 | }, 142 | "id": "v6BgrO7PsbVQ", 143 | "outputId": "47d1b31d-6b1b-4b29-d47c-4447ed9488e6" 144 | }, 145 | "outputs": [ 146 | { 147 | "data": { 148 | "text/plain": [ 149 | "Thread(id='thread_CMaQ6801HYvVyxdhVLBum6Ot', created_at=1709224874, metadata={}, object='thread')" 150 | ] 151 | }, 152 | "execution_count": 18, 153 | "metadata": {}, 154 | "output_type": "execute_result" 155 | } 156 | ], 157 | "source": [ 158 | "thread = client.beta.threads.create()\n", 159 | "thread" 160 | ] 161 | }, 162 | { 163 | "cell_type": "code", 164 | "execution_count": 19, 165 | "metadata": { 166 | "id": "CmCvLAZ5sbSI" 167 | }, 168 | "outputs": [], 169 | "source": [ 170 | "message = client.beta.threads.messages.create(\n", 171 | " thread_id=thread.id,\n", 172 | " role=\"user\",\n", 173 | " content=\"Who is the hero of the story?\"\n", 174 | ")" 175 | ] 176 | }, 177 | { 178 | "cell_type": "code", 179 | "execution_count": 20, 180 | "metadata": { 181 | "colab": { 182 | "base_uri": "https://localhost:8080/" 183 | }, 184 | "id": "BhWJG0VQsbPV", 185 | "outputId": "59f47f35-9340-4384-ff61-fbc34b41cfe9" 186 | }, 187 | "outputs": [ 188 | { 189 | "data": { 190 | "text/plain": [ 191 | "ThreadMessage(id='msg_LDlW9vX8791DfLAYv30YDvzQ', assistant_id=None, content=[MessageContentText(text=Text(annotations=[], value='Who is the hero of the story?'), type='text')], created_at=1709224950, file_ids=[], metadata={}, object='thread.message', role='user', run_id=None, thread_id='thread_CMaQ6801HYvVyxdhVLBum6Ot')" 192 | ] 193 | }, 194 | "execution_count": 20, 195 | "metadata": {}, 196 | "output_type": "execute_result" 197 | } 198 | ], 199 | "source": [ 200 | "message" 201 | ] 202 | }, 203 | { 204 | "cell_type": "code", 205 | "execution_count": 21, 206 | "metadata": { 207 | "colab": { 208 | "base_uri": "https://localhost:8080/" 209 | }, 210 | "id": "HWceqG1qwRor", 211 | "outputId": "0b57bcd5-c30b-4ba8-dd76-f3d4b090b1fe" 212 | }, 213 | "outputs": [ 214 | { 215 | "data": { 216 | "text/plain": [ 217 | "Assistant(id='asst_VaRmvfp30jPvLuVJgYzxGmXf', created_at=1709224786, description=None, file_ids=['file-qEkiCPGFrAS8OLc22fc8dsJN'], instructions='You are a motivator who answers the question based on the story file', metadata={}, model='gpt-4-turbo-preview', name='Story helper', object='assistant', tools=[ToolRetrieval(type='retrieval')])" 218 | ] 219 | }, 220 | "execution_count": 21, 221 | "metadata": {}, 222 | "output_type": "execute_result" 223 | } 224 | ], 225 | "source": [ 226 | "assistant" 227 | ] 228 | }, 229 | { 230 | "cell_type": "code", 231 | "execution_count": 22, 232 | "metadata": { 233 | "id": "n167t7_bvzWc" 234 | }, 235 | "outputs": [], 236 | "source": [ 237 | "run = client.beta.threads.runs.create(\n", 238 | " thread_id=thread.id,\n", 239 | " assistant_id=assistant.id\n", 240 | ")" 241 | ] 242 | }, 243 | { 244 | "cell_type": "code", 245 | "execution_count": 23, 246 | "metadata": { 247 | "id": "xDFA3GpAwVs1" 248 | }, 249 | "outputs": [], 250 | "source": [ 251 | "run = client.beta.threads.runs.retrieve(\n", 252 | " thread_id=thread.id,\n", 253 | " run_id=run.id\n", 254 | ")" 255 | ] 256 | }, 257 | { 258 | "cell_type": "code", 259 | "execution_count": 25, 260 | "metadata": { 261 | "colab": { 262 | "base_uri": "https://localhost:8080/", 263 | "height": 35 264 | }, 265 | "id": "LTxbm44dwa9G", 266 | "outputId": "e7d4933d-485b-4b86-b246-e0d75a5e89db" 267 | }, 268 | "outputs": [ 269 | { 270 | "data": { 271 | "application/vnd.google.colaboratory.intrinsic+json": { 272 | "type": "string" 273 | }, 274 | "text/plain": [ 275 | "'completed'" 276 | ] 277 | }, 278 | "execution_count": 25, 279 | "metadata": {}, 280 | "output_type": "execute_result" 281 | } 282 | ], 283 | "source": [ 284 | "run.status" 285 | ] 286 | }, 287 | { 288 | "cell_type": "code", 289 | "execution_count": 27, 290 | "metadata": { 291 | "colab": { 292 | "base_uri": "https://localhost:8080/" 293 | }, 294 | "id": "kI8ita5dwcPA", 295 | "outputId": "dc47a81b-8f7a-43b4-dfad-7a0dc44c762c" 296 | }, 297 | "outputs": [ 298 | { 299 | "data": { 300 | "text/plain": [ 301 | "ThreadMessage(id='msg_LDlW9vX8791DfLAYv30YDvzQ', assistant_id=None, content=[MessageContentText(text=Text(annotations=[], value='Who is the hero of the story?'), type='text')], created_at=1709224950, file_ids=[], metadata={}, object='thread.message', role='user', run_id=None, thread_id='thread_CMaQ6801HYvVyxdhVLBum6Ot')" 302 | ] 303 | }, 304 | "execution_count": 27, 305 | "metadata": {}, 306 | "output_type": "execute_result" 307 | } 308 | ], 309 | "source": [ 310 | "message" 311 | ] 312 | }, 313 | { 314 | "cell_type": "code", 315 | "execution_count": 28, 316 | "metadata": { 317 | "colab": { 318 | "base_uri": "https://localhost:8080/" 319 | }, 320 | "id": "k2gaGxZ8whye", 321 | "outputId": "a3f2fbc9-ba11-40e0-ca8e-19e2cd258739" 322 | }, 323 | "outputs": [ 324 | { 325 | "data": { 326 | "text/plain": [ 327 | "SyncCursorPage[ThreadMessage](data=[ThreadMessage(id='msg_3oc7BbjhcaqWuMhs83XReWz3', assistant_id='asst_VaRmvfp30jPvLuVJgYzxGmXf', content=[MessageContentText(text=Text(annotations=[], value='The hero of the story \"The Odyssey of Lumina: Illuminating Lives\" is Dr. Michael Greene. He is portrayed as a brilliant inventor who creates Lumina, a revolutionary product designed to enhance human potential and productivity. Despite facing challenges, criticism, and the unforeseen consequences of his invention, Dr. Greene remains committed to improving it for the betterment of society. His journey showcases resilience, adaptability, and an enduring commitment to the greater good, making him the unequivocal hero of the story.'), type='text')], created_at=1709225111, file_ids=[], metadata={}, object='thread.message', role='assistant', run_id='run_eoHeWhFTc1JJ1xVB0VjnGhMO', thread_id='thread_CMaQ6801HYvVyxdhVLBum6Ot'), ThreadMessage(id='msg_LDlW9vX8791DfLAYv30YDvzQ', assistant_id=None, content=[MessageContentText(text=Text(annotations=[], value='Who is the hero of the story?'), type='text')], created_at=1709224950, file_ids=[], metadata={}, object='thread.message', role='user', run_id=None, thread_id='thread_CMaQ6801HYvVyxdhVLBum6Ot')], object='list', first_id='msg_3oc7BbjhcaqWuMhs83XReWz3', last_id='msg_LDlW9vX8791DfLAYv30YDvzQ', has_more=False)" 328 | ] 329 | }, 330 | "execution_count": 28, 331 | "metadata": {}, 332 | "output_type": "execute_result" 333 | } 334 | ], 335 | "source": [ 336 | "messages = client.beta.threads.messages.list(thread_id=thread.id)\n", 337 | "messages" 338 | ] 339 | }, 340 | { 341 | "cell_type": "code", 342 | "execution_count": 29, 343 | "metadata": { 344 | "colab": { 345 | "base_uri": "https://localhost:8080/" 346 | }, 347 | "id": "bhIPBLQ4wvc2", 348 | "outputId": "ef589a59-ec78-4ea5-c466-654fb1a1e0a2" 349 | }, 350 | "outputs": [ 351 | { 352 | "name": "stdout", 353 | "output_type": "stream", 354 | "text": [ 355 | "The hero of the story \"The Odyssey of Lumina: Illuminating Lives\" is Dr. Michael Greene. He is portrayed as a brilliant inventor who creates Lumina, a revolutionary product designed to enhance human potential and productivity. Despite facing challenges, criticism, and the unforeseen consequences of his invention, Dr. Greene remains committed to improving it for the betterment of society. His journey showcases resilience, adaptability, and an enduring commitment to the greater good, making him the unequivocal hero of the story.\n" 356 | ] 357 | } 358 | ], 359 | "source": [ 360 | "while True:\n", 361 | " run = client.beta.threads.runs.retrieve(thread_id=thread.id, run_id=run.id)\n", 362 | " if run.status==\"completed\":\n", 363 | " messages = client.beta.threads.messages.list(thread_id=thread.id)\n", 364 | " latest_message = messages.data[0]\n", 365 | " text = latest_message.content[0].text.value\n", 366 | " print(text)\n", 367 | " break;" 368 | ] 369 | }, 370 | { 371 | "cell_type": "code", 372 | "execution_count": null, 373 | "metadata": { 374 | "id": "Y13Gor4ZxLe3" 375 | }, 376 | "outputs": [], 377 | "source": [] 378 | } 379 | ], 380 | "metadata": { 381 | "colab": { 382 | "provenance": [] 383 | }, 384 | "kernelspec": { 385 | "display_name": "Python 3", 386 | "name": "python3" 387 | }, 388 | "language_info": { 389 | "name": "python" 390 | } 391 | }, 392 | "nbformat": 4, 393 | "nbformat_minor": 0 394 | } 395 | -------------------------------------------------------------------------------- /4.ci-cd-automation-chatgpt/README.md: -------------------------------------------------------------------------------- 1 | # About the Repo 2 | This repo is part of the LLMOps course by Manifold AI Learning, 3 | Course link - https://www.manifoldailearning.in/courses/LLMOps-with-ChatGPT-Deploy-on-Production-65cb265ae4b086660d2836ae 4 | 5 | Reach the Instructor at - https://www.linkedin.com/in/nachiketh-murthy/ 6 | 7 | For any support reach out to : support@manifoldailearning.in 8 | 9 | # Help on FAST API 10 | 11 | ``` 12 | pip install "fastapi[all]" 13 | 14 | uvicorn main:app --reload 15 | ``` 16 | 17 | # Test with Postman 18 | 19 | URL - http://127.0.0.1:80/response 20 | (POST) 21 | 22 | ```json 23 | { 24 | "text": "Who is the hero of the story" 25 | } 26 | 27 | ``` 28 | 29 | # Docker Commands 30 | 31 | ``` 32 | docker build -t chatgpt-project1 . 33 | docker run -d -p 8080:80 chatgpt-project1 34 | docker tag chatgpt-project1 yourusername/chatgpt-project1 35 | docker push yourusername/chatgpt-project1 36 | ``` 37 | 38 | # Kubernetes Code 39 | 40 | ``` 41 | kubectl create secret generic openai-secret --from-literal=API_KEY= 42 | ``` 43 | 44 | # Important Code for Docker 45 | 46 | ``` 47 | docker buildx build --platform=linux/amd64 -t yourusername/chatgpt-project:v3 . 48 | docker push yourusername/chatgpt-project:v3 49 | ``` -------------------------------------------------------------------------------- /4.ci-cd-automation-chatgpt/config.py: -------------------------------------------------------------------------------- 1 | #api_key = "api-id" 2 | assistant_id = "asst_VaRmvfp30jPvLuVJgYzxGmXf" -------------------------------------------------------------------------------- /4.ci-cd-automation-chatgpt/deploy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: chatgpt-deploy 5 | spec: 6 | replicas: 3 7 | selector: 8 | matchLabels: 9 | app: chatgpt-pod 10 | template: 11 | metadata: 12 | labels: 13 | app: chatgpt-pod 14 | spec: 15 | containers: 16 | - name: mychatgptcontainer 17 | image: gcr.io/PROJECT_ID/IMAGE:TAG 18 | ports: 19 | - containerPort: 80 20 | env: 21 | - name: OPENAI_API 22 | valueFrom: 23 | secretKeyRef: 24 | name: openai-secret 25 | key: API_KEY 26 | 27 | 28 | --- 29 | apiVersion: v1 30 | kind: Service 31 | metadata: 32 | name: mylb 33 | spec: 34 | type: LoadBalancer 35 | selector: 36 | app: chatgpt-pod 37 | ports: 38 | - port: 80 # Service IP Port 39 | targetPort: 80 # Container Port 40 | -------------------------------------------------------------------------------- /4.ci-cd-automation-chatgpt/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - deploy.yaml -------------------------------------------------------------------------------- /4.ci-cd-automation-chatgpt/main.py: -------------------------------------------------------------------------------- 1 | #!/bin/python3 2 | from fastapi import FastAPI 3 | from openai import OpenAI 4 | import config 5 | from pydantic import BaseModel 6 | import uvicorn 7 | import os 8 | 9 | assistant_id = config.assistant_id 10 | api_key = os.environ['OPENAI_API'] 11 | 12 | client = OpenAI(api_key=api_key) 13 | 14 | app = FastAPI() 15 | 16 | class Body(BaseModel): 17 | text: str 18 | 19 | # get, post, put, and delete 20 | 21 | @app.get("/") 22 | def welcome(): 23 | return {"message": "Welcome to ChatGPT AI Application"} 24 | 25 | @app.get("/home") 26 | def welcome(): 27 | return {"message": "welcome home"} 28 | 29 | @app.post("/dummy") 30 | def demo_function(data): 31 | return {"message": data} 32 | 33 | @app.post("/response") 34 | def generate(body: Body): 35 | prompt = body.text # user input 36 | thread = client.beta.threads.create() 37 | message = client.beta.threads.messages.create( 38 | thread_id=thread.id, 39 | role="user", 40 | content=prompt 41 | ) 42 | 43 | run = client.beta.threads.runs.create( 44 | thread_id = thread.id, 45 | assistant_id=assistant_id 46 | ) 47 | 48 | while True: 49 | run = client.beta.threads.runs.retrieve(thread_id=thread.id, run_id=run.id) 50 | if run.status == "completed": 51 | messages = client.beta.threads.messages.list(thread_id=thread.id) 52 | latest_message = messages.data[0] 53 | text = latest_message.content[0].text.value 54 | break; 55 | return text 56 | 57 | if __name__ == "__main__": 58 | uvicorn.run(app,host="0.0.0.0",port=80) -------------------------------------------------------------------------------- /4.ci-cd-automation-chatgpt/requirements.txt: -------------------------------------------------------------------------------- 1 | openai 2 | fastapi==0.109.0 3 | uvicorn[standard] -------------------------------------------------------------------------------- /4.ci-cd-automation-chatgpt/story.txt: -------------------------------------------------------------------------------- 1 | Title: The Odyssey of Lumina: Illuminating Lives 2 | 3 | Once upon a time, in a world not too different from our own, there was a brilliant inventor named Dr. Michael Greene. Dr. Greene was a visionary, constantly seeking ways to improve the lives of people around him. One day, while tinkering in his laboratory, he stumbled upon an extraordinary discovery—a revolutionary product he called "Lumina." 4 | 5 | Lumina was not just any product; it was a breakthrough in technology, designed to enhance human potential and productivity. At its core, Lumina was a wearable device that emitted a gentle light, scientifically proven to boost cognitive function, mood, and overall well-being. Dr. Greene was convinced that Lumina could transform society, empowering individuals to reach their full potential and make the world a better place. 6 | 7 | Excited by his discovery, Dr. Greene poured his heart and soul into perfecting Lumina. After years of research and development, he finally unveiled his creation to the world. The response was overwhelming—people from all walks of life clamored to get their hands on Lumina, eager to experience its transformative effects. 8 | 9 | At first, Lumina was hailed as a game-changer. Students wore it while studying, professionals wore it during work hours, and even athletes used it to sharpen their focus and performance. The world seemed brighter, more vibrant, and full of possibilities with Lumina by their side. 10 | 11 | As demand for Lumina soared, Dr. Greene found himself catapulted into the spotlight. He became a symbol of innovation and progress, admired and revered by millions around the globe. But amidst the acclaim and adulation, Dr. Greene remained grounded, always mindful of the responsibility that came with his creation. 12 | 13 | However, as with any great invention, challenges soon arose. Some skeptics questioned the long-term effects of prolonged Lumina usage, while others raised concerns about its potential to disrupt natural sleep patterns. Despite Dr. Greene's assurances and rigorous testing, doubts lingered in the minds of many. 14 | 15 | Then came the unforeseen consequences. People became dependent on Lumina, relying on its artificial glow to navigate through life's challenges. Some began to neglect their own innate abilities, believing that Lumina held the key to their success. Others experienced withdrawal symptoms when separated from their beloved device, highlighting a troubling addiction that had taken root. 16 | 17 | Caught in the midst of these complexities, Dr. Greene faced a moral dilemma. Was Lumina truly a force for good, or had it inadvertently become a crutch that hindered human progress? As the debate raged on, Dr. Greene found himself grappling with doubts and uncertainties he had never anticipated. 18 | 19 | Amidst the growing scrutiny and criticism, Lumina's sales began to decline. The once-heralded product now faced a reckoning—a moment of truth that would determine its fate. Dr. Greene knew that he had to make a choice: either adapt and evolve Lumina to address the concerns, or let it fade into obscurity. 20 | 21 | In a bold move, Dr. Greene decided to take action. He assembled a team of experts from various fields—scientists, psychologists, and ethicists—to reevaluate Lumina's design and functionality. Together, they conducted extensive research and analysis, seeking to understand both the benefits and pitfalls of Lumina. 22 | 23 | After months of painstaking work, the team unveiled Lumina 2.0—a revamped version that prioritized balance and moderation. The new Lumina incorporated customizable settings, allowing users to adjust the intensity and duration of light exposure based on their individual needs. It also featured built-in reminders to encourage breaks and promote healthy habits. 24 | 25 | The response to Lumina 2.0 was overwhelmingly positive. People welcomed the changes, embracing Lumina not as a crutch, but as a tool to supplement their natural abilities. Dr. Greene breathed a sigh of relief, knowing that he had made the right decision for both Lumina and humanity. 26 | 27 | As time passed, Lumina continued to thrive, evolving alongside society's changing needs. It became more than just a product—it became a symbol of resilience, adaptability, and the enduring human spirit. Dr. Greene's journey had taught him valuable lessons about the power of innovation, the importance of responsibility, and the true meaning of success. 28 | 29 | In the end, Lumina's impact transcended mere profit or fame. It had illuminated the lives of millions, inspiring them to embrace their potential and forge their own paths forward. And as Dr. Greene looked back on his remarkable journey, he knew that the greatest achievement of all was not the invention itself, but the profound impact it had on the world—and the hearts of those it touched. 30 | 31 | **Motivational Message:** 32 | 33 | In the pursuit of innovation and progress, we must always remain mindful of the impact our creations have on humanity. Success is not measured solely by accolades or profits, but by the positive change we bring to the world. Let us strive to create with purpose, integrity, and a steadfast commitment to improving the lives of others. For in the end, it is not the brilliance of our inventions that defines us, but the depth of our compassion and the legacy of our contributions to the greater good. -------------------------------------------------------------------------------- /5.llmops-project-2-huggingface/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/manifoldailearning/llmops-chatgpt-huggingface/31b394fce372d122a766e57888a04e0ca24bc07e/5.llmops-project-2-huggingface/.DS_Store -------------------------------------------------------------------------------- /5.llmops-project-2-huggingface/.github/workflows/actions.yaml: -------------------------------------------------------------------------------- 1 | name: Build and Deploy to GKE 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | 8 | env: 9 | PROJECT_ID: ${{ secrets.GKE_PROJECT }} 10 | GKE_CLUSTER: cluster-1 # Add your cluster name here. 11 | GKE_ZONE: us-central1-a # Add your cluster zone here. 12 | DEPLOYMENT_NAME: gpt-deploy # Add your deployment name here. 13 | IMAGE: gpthuggingface # Image Name 14 | 15 | jobs: 16 | setup-build-publish-deploy: 17 | name: Setup, Build, Publish, and Deploy 18 | runs-on: ubuntu-latest 19 | environment: production 20 | 21 | steps: 22 | - name: Checkout 23 | uses: actions/checkout@v4 24 | 25 | # Get the GKE credentials so we can deploy to the cluster 26 | - id: 'auth' 27 | uses: 'google-github-actions/auth@v2' 28 | with: 29 | credentials_json: '${{ secrets.GKE_SA_KEY }}' 30 | 31 | - id: 'get-credentials' 32 | uses: 'google-github-actions/get-gke-credentials@v2' 33 | with: 34 | cluster_name: ${{ env.GKE_CLUSTER }} 35 | location: ${{ env.GKE_ZONE }} 36 | 37 | # Configure Docker to use the gcloud command-line tool as a credential 38 | # helper for authentication 39 | - run: |- 40 | gcloud --quiet auth configure-docker 41 | # Build the Docker image 42 | 43 | - name: Build 44 | run: |- 45 | docker build \ 46 | --tag "gcr.io/$PROJECT_ID/$IMAGE:$GITHUB_SHA" \ 47 | --build-arg GITHUB_SHA="$GITHUB_SHA" \ 48 | --build-arg GITHUB_REF="$GITHUB_REF" \ 49 | . 50 | 51 | # Push the Docker image to Google Container Registry 52 | - name: Publish 53 | run: |- 54 | docker push "gcr.io/$PROJECT_ID/$IMAGE:$GITHUB_SHA" 55 | 56 | # Set up kustomize 57 | - name: Set up Kustomize 58 | run: |- 59 | curl -sfLo kustomize https://github.com/kubernetes-sigs/kustomize/releases/download/v3.1.0/kustomize_3.1.0_linux_amd64 60 | chmod u+x ./kustomize 61 | 62 | # Deploy the Docker image to the GKE cluster 63 | - name: Deploy 64 | run: |- 65 | ./kustomize edit set image gcr.io/PROJECT_ID/IMAGE:TAG=gcr.io/$PROJECT_ID/$IMAGE:$GITHUB_SHA 66 | ./kustomize build . | kubectl apply -f - 67 | kubectl rollout status deployment/$DEPLOYMENT_NAME 68 | kubectl get services -o wide 69 | -------------------------------------------------------------------------------- /5.llmops-project-2-huggingface/Dockerfile: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | FROM python:3.10 4 | COPY . . 5 | RUN pip install -r requirements.txt 6 | EXPOSE 80 7 | ENTRYPOINT [ "python" ] 8 | CMD [ "main.py" ] -------------------------------------------------------------------------------- /5.llmops-project-2-huggingface/Project_2_GPT_Generator.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "nbformat": 4, 3 | "nbformat_minor": 0, 4 | "metadata": { 5 | "colab": { 6 | "provenance": [] 7 | }, 8 | "kernelspec": { 9 | "name": "python3", 10 | "display_name": "Python 3" 11 | }, 12 | "language_info": { 13 | "name": "python" 14 | } 15 | }, 16 | "cells": [ 17 | { 18 | "cell_type": "code", 19 | "execution_count": 1, 20 | "metadata": { 21 | "colab": { 22 | "base_uri": "https://localhost:8080/" 23 | }, 24 | "id": "pMVpXiJdZpQ7", 25 | "outputId": "d33aff53-2ed3-40e7-a640-a4029683e662" 26 | }, 27 | "outputs": [ 28 | { 29 | "output_type": "stream", 30 | "name": "stdout", 31 | "text": [ 32 | "Requirement already satisfied: transformers in /usr/local/lib/python3.10/dist-packages (4.38.2)\n", 33 | "Requirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from transformers) (3.13.3)\n", 34 | "Requirement already satisfied: huggingface-hub<1.0,>=0.19.3 in /usr/local/lib/python3.10/dist-packages (from transformers) (0.20.3)\n", 35 | "Requirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.10/dist-packages (from transformers) (1.25.2)\n", 36 | "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.10/dist-packages (from transformers) (24.0)\n", 37 | "Requirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.10/dist-packages (from transformers) (6.0.1)\n", 38 | "Requirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.10/dist-packages (from transformers) (2023.12.25)\n", 39 | "Requirement already satisfied: requests in /usr/local/lib/python3.10/dist-packages (from transformers) (2.31.0)\n", 40 | "Requirement already satisfied: tokenizers<0.19,>=0.14 in /usr/local/lib/python3.10/dist-packages (from transformers) (0.15.2)\n", 41 | "Requirement already satisfied: safetensors>=0.4.1 in /usr/local/lib/python3.10/dist-packages (from transformers) (0.4.2)\n", 42 | "Requirement already satisfied: tqdm>=4.27 in /usr/local/lib/python3.10/dist-packages (from transformers) (4.66.2)\n", 43 | "Requirement already satisfied: fsspec>=2023.5.0 in /usr/local/lib/python3.10/dist-packages (from huggingface-hub<1.0,>=0.19.3->transformers) (2023.6.0)\n", 44 | "Requirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.10/dist-packages (from huggingface-hub<1.0,>=0.19.3->transformers) (4.10.0)\n", 45 | "Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests->transformers) (3.3.2)\n", 46 | "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests->transformers) (3.6)\n", 47 | "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests->transformers) (2.0.7)\n", 48 | "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests->transformers) (2024.2.2)\n" 49 | ] 50 | } 51 | ], 52 | "source": [ 53 | "!pip install transformers\n", 54 | "import transformers" 55 | ] 56 | }, 57 | { 58 | "cell_type": "code", 59 | "source": [ 60 | "transformers.__version__" 61 | ], 62 | "metadata": { 63 | "colab": { 64 | "base_uri": "https://localhost:8080/", 65 | "height": 35 66 | }, 67 | "id": "8BRTN-WmZuxV", 68 | "outputId": "d9815e32-7afa-444d-912c-f9d3721d5cdf" 69 | }, 70 | "execution_count": 2, 71 | "outputs": [ 72 | { 73 | "output_type": "execute_result", 74 | "data": { 75 | "text/plain": [ 76 | "'4.38.2'" 77 | ], 78 | "application/vnd.google.colaboratory.intrinsic+json": { 79 | "type": "string" 80 | } 81 | }, 82 | "metadata": {}, 83 | "execution_count": 2 84 | } 85 | ] 86 | }, 87 | { 88 | "cell_type": "code", 89 | "source": [ 90 | "import tensorflow\n", 91 | "tensorflow.__version__" 92 | ], 93 | "metadata": { 94 | "colab": { 95 | "base_uri": "https://localhost:8080/", 96 | "height": 35 97 | }, 98 | "id": "g86kV3CpaCSk", 99 | "outputId": "55bdab06-ec49-4af7-aad8-1dc6ad66be78" 100 | }, 101 | "execution_count": 3, 102 | "outputs": [ 103 | { 104 | "output_type": "execute_result", 105 | "data": { 106 | "text/plain": [ 107 | "'2.15.0'" 108 | ], 109 | "application/vnd.google.colaboratory.intrinsic+json": { 110 | "type": "string" 111 | } 112 | }, 113 | "metadata": {}, 114 | "execution_count": 3 115 | } 116 | ] 117 | }, 118 | { 119 | "cell_type": "code", 120 | "source": [ 121 | "text = \"\"\"Dear Amazon, last week I ordered an Optimus Prime action figure \\\n", 122 | "from your online store in Germany. Unfortunately, when I opened the package, \\\n", 123 | "I discovered to my horror that I had been sent an action figure of Megatron \\\n", 124 | "instead! As a lifelong enemy of the Decepticons, I hope you can understand my \\\n", 125 | "dilemma. To resolve the issue, I demand an exchange of Megatron for the \\\n", 126 | "Optimus Prime figure I ordered. Enclosed are copies of my records concerning \\\n", 127 | "this purchase. I expect to hear from you soon. Sincerely, Bumblebee.\"\"\"" 128 | ], 129 | "metadata": { 130 | "id": "93vlRx5sZ-_5" 131 | }, 132 | "execution_count": 5, 133 | "outputs": [] 134 | }, 135 | { 136 | "cell_type": "code", 137 | "source": [ 138 | "from transformers import pipeline\n", 139 | "generator = pipeline(\"text-generation\")\n", 140 | "\n", 141 | "response = \"Dear Bumblebee, I am sorry to hear that your order was mixed up.\"\n", 142 | "prompt = text + \"\\n\\nCustomer service response:\\n\" + response\n", 143 | "outputs = generator(prompt, max_length=200)\n", 144 | "print(outputs[0]['generated_text'])" 145 | ], 146 | "metadata": { 147 | "colab": { 148 | "base_uri": "https://localhost:8080/" 149 | }, 150 | "id": "G-j_LhQ_Zptw", 151 | "outputId": "ff65f56e-3085-4252-c127-93fbdf0f98c4" 152 | }, 153 | "execution_count": 6, 154 | "outputs": [ 155 | { 156 | "output_type": "stream", 157 | "name": "stderr", 158 | "text": [ 159 | "No model was supplied, defaulted to openai-community/gpt2 and revision 6c0e608 (https://huggingface.co/openai-community/gpt2).\n", 160 | "Using a pipeline without specifying a model name and revision in production is not recommended.\n", 161 | "Truncation was not explicitly activated but `max_length` is provided a specific value, please use `truncation=True` to explicitly truncate examples to max length. Defaulting to 'longest_first' truncation strategy. If you encode pairs of sequences (GLUE-style) with the tokenizer you can select this strategy more precisely by providing a specific strategy to `truncation`.\n", 162 | "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n" 163 | ] 164 | }, 165 | { 166 | "output_type": "stream", 167 | "name": "stdout", 168 | "text": [ 169 | "Dear Amazon, last week I ordered an Optimus Prime action figure from your online store in Germany. Unfortunately, when I opened the package, I discovered to my horror that I had been sent an action figure of Megatron instead! As a lifelong enemy of the Decepticons, I hope you can understand my dilemma. To resolve the issue, I demand an exchange of Megatron for the Optimus Prime figure I ordered. Enclosed are copies of my records concerning this purchase. I expect to hear from you soon. Sincerely, Bumblebee.\n", 170 | "\n", 171 | "Customer service response:\n", 172 | "Dear Bumblebee, I am sorry to hear that your order was mixed up. I was shocked to receive my request in this way, but this situation occurred long ago, and I still understand your frustration. Sincerely,\n", 173 | "\n", 174 | "Order number: KK-TK4\n", 175 | "\n", 176 | "Product code: TK-K937\n", 177 | "\n", 178 | "Item #: TK-K937\n", 179 | "\n", 180 | "Bundle: 12\n" 181 | ] 182 | } 183 | ] 184 | }, 185 | { 186 | "cell_type": "code", 187 | "source": [ 188 | "print(outputs[0]['generated_text'])" 189 | ], 190 | "metadata": { 191 | "id": "sAwx81ZvZrNl" 192 | }, 193 | "execution_count": null, 194 | "outputs": [] 195 | }, 196 | { 197 | "cell_type": "code", 198 | "source": [ 199 | "outputs = generator(prompt, max_length=200, num_return_sequences=1)\n", 200 | "print(outputs[0]['generated_text'])" 201 | ], 202 | "metadata": { 203 | "id": "n6zUOylGaOUX" 204 | }, 205 | "execution_count": null, 206 | "outputs": [] 207 | }, 208 | { 209 | "cell_type": "code", 210 | "source": [ 211 | "outputs[0]" 212 | ], 213 | "metadata": { 214 | "id": "f4x9f9RnaYhN" 215 | }, 216 | "execution_count": null, 217 | "outputs": [] 218 | } 219 | ] 220 | } -------------------------------------------------------------------------------- /5.llmops-project-2-huggingface/README.md: -------------------------------------------------------------------------------- 1 | # About the Repo 2 | 3 | # Docker Commands 4 | 5 | ``` 6 | docker buildx build --platform=linux/amd64 -t manifoldailearning/gpt-project:v1 . 7 | # test locally 8 | # Test with CI CD 9 | docker push manifoldailearning/gpt-project:v1 10 | 11 | docker run -d -p 8080:80 manifoldailearning/gpt-project:v1 12 | docker run -p 8080:80 manifoldailearning/gpt-project:v1 13 | ``` 14 | 15 | # Kubernetes Code 16 | 17 | ``` 18 | kubectl create secret generic openai-secret --from-literal=API_KEY= 19 | ``` 20 | 21 | # Important Code for Docker 22 | 23 | ``` 24 | docker buildx build --platform=linux/amd64 -t manifoldailearning/gpt-project:v1 . 25 | docker push yourusername/gpt-project:v1 26 | 27 | docker run -d -p 8001:80 manifoldailearning/gpt-project:v1 28 | ``` -------------------------------------------------------------------------------- /5.llmops-project-2-huggingface/deploy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: gpt-deploy 5 | spec: 6 | replicas: 3 7 | selector: 8 | matchLabels: 9 | app: gpt-pod 10 | template: 11 | metadata: 12 | labels: 13 | app: gpt-pod 14 | spec: 15 | containers: 16 | - name: gptcontainer 17 | image: gcr.io/PROJECT_ID/IMAGE:TAG 18 | ports: 19 | - containerPort: 80 20 | 21 | --- 22 | apiVersion: v1 23 | kind: Service 24 | metadata: 25 | name: mylb 26 | spec: 27 | type: LoadBalancer 28 | selector: 29 | app: gpt-pod 30 | ports: 31 | - port: 80 # Service IP Port 32 | targetPort: 80 # Container Port 33 | -------------------------------------------------------------------------------- /5.llmops-project-2-huggingface/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - deploy.yaml -------------------------------------------------------------------------------- /5.llmops-project-2-huggingface/main.py: -------------------------------------------------------------------------------- 1 | #!/bin/python3 2 | from transformers import pipeline 3 | from fastapi import FastAPI 4 | from fastapi.responses import HTMLResponse 5 | from pydantic import BaseModel 6 | import uvicorn 7 | 8 | generator = pipeline('text-generation', model='gpt2') 9 | app = FastAPI( 10 | title="Fast API App for LLM Model", 11 | description = "A Text Generator App", 12 | version='1.0' 13 | ) 14 | 15 | class Body(BaseModel): 16 | text: str 17 | 18 | 19 | @app.get('/') 20 | def index(): 21 | return HTMLResponse("

Welcome to LLMOps Course with a GPT2 model V1

") 22 | 23 | 24 | @app.post('/generate') 25 | def predict(body: Body): 26 | results = generator(body.text, max_length=200, num_return_sequences=1) 27 | return results[0]['generated_text'] 28 | 29 | if __name__== "__main__": 30 | uvicorn.run(app, host="0.0.0.0",port=80) 31 | 32 | # FROM --platform=linux/amd64 python:3.10 -------------------------------------------------------------------------------- /5.llmops-project-2-huggingface/requirements.txt: -------------------------------------------------------------------------------- 1 | transformers==4.37.0 2 | tensorflow==2.15.0 3 | fastapi==0.109.0 4 | uvicorn[standard] -------------------------------------------------------------------------------- /7.ci-cd-aws-ec2/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/manifoldailearning/llmops-chatgpt-huggingface/31b394fce372d122a766e57888a04e0ca24bc07e/7.ci-cd-aws-ec2/.DS_Store -------------------------------------------------------------------------------- /7.ci-cd-aws-ec2/.github/workflows/actions.py: -------------------------------------------------------------------------------- 1 | name: Build, Test, and Deploy to AWS EC2 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | 8 | jobs: 9 | build_test_deploy: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v3 13 | 14 | - name: Set up Python 15 | uses: actions/setup-python@v3 16 | with: 17 | python-version: '3.10' 18 | 19 | - name: Install Dependencies 20 | run: pip install -r requirements.txt 21 | 22 | - name: Run Tests 23 | run: python tests.py 24 | 25 | - name: Build Docker Image 26 | run: docker build -t my-flask-app:latest . 27 | 28 | - name: Configure AWS credentials 29 | uses: aws-actions/configure-aws-credentials@0e613a0980cbf65ed5b322eb7a1e075d28913a83 30 | with: 31 | aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} 32 | aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} 33 | aws-region: ${{ secrets.AWS_REGION }} 34 | 35 | - name: Login to Amazon ECR 36 | id: login-ecr 37 | uses: aws-actions/amazon-ecr-login@62f4f872db3836360b72999f4b87f1ff13310f3a 38 | 39 | - name: Push Docker Image to ECR 40 | run: | 41 | aws ecr get-login-password --region ${{ secrets.AWS_REGION }} | docker login --username AWS --password-stdin ${{ secrets.AWS_ACCOUNT_ID }}.dkr.ecr.${{ secrets.AWS_REGION }}.amazonaws.com 42 | docker tag my-flask-app:latest ${{ secrets.AWS_ACCOUNT_ID }}.dkr.ecr.${{ secrets.AWS_REGION }}.amazonaws.com/my-flask-app:latest 43 | docker push ${{ secrets.AWS_ACCOUNT_ID }}.dkr.ecr.${{ secrets.AWS_REGION }}.amazonaws.com/my-flask-app:latest 44 | 45 | - name: Deploy to EC2 46 | uses: appleboy/ssh-action@master 47 | with: 48 | host: ${{ secrets.EC2_HOST }} 49 | username: ${{ secrets.EC2_USERNAME }} 50 | key: ${{ secrets.EC2_SSH_KEY }} 51 | script: | 52 | docker pull ${{ secrets.AWS_ACCOUNT_ID }}.dkr.ecr.${{ secrets.AWS_REGION }}.amazonaws.com/my-flask-app:latest 53 | docker stop my-flask-app || true # In case the container doesn't exist yet 54 | docker rm my-flask-app || true 55 | docker run -d -p 80:8080 --name my-flask-app ${{ secrets.AWS_ACCOUNT_ID }}.dkr.ecr.${{ secrets.AWS_REGION }}.amazonaws.com/my-flask-app:latest 56 | -------------------------------------------------------------------------------- /7.ci-cd-aws-ec2/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.10 2 | 3 | WORKDIR /app 4 | 5 | COPY requirements.txt ./ 6 | RUN pip install -r requirements.txt 7 | 8 | COPY . . 9 | 10 | EXPOSE 8080 11 | 12 | CMD ["gunicorn", "-b", "0.0.0.0:8080", "app:app"] 13 | -------------------------------------------------------------------------------- /7.ci-cd-aws-ec2/README.md: -------------------------------------------------------------------------------- 1 | # ci-cd-python - Commands to install Docker on EC2 2 | - Ensure port 80 is available 3 | ``` 4 | sudo yum update -y 5 | sudo amazon-linux-extras install docker 6 | sudo service docker start 7 | sudo systemctl start docker 8 | sudo service docker status 9 | sudo groupadd docker 10 | sudo usermod -a -G docker ec2-user 11 | newgrp docker 12 | docker —-version 13 | 14 | # create ECR with name: my-flask-app 15 | aws ecr get-login-password --region us-east-1 | docker login --username AWS --password-stdin 866824485776.dkr.ecr.us-east-1.amazonaws.com 16 | ``` -------------------------------------------------------------------------------- /7.ci-cd-aws-ec2/app.py: -------------------------------------------------------------------------------- 1 | from flask import Flask 2 | 3 | app = Flask(__name__) 4 | 5 | @app.route('/') 6 | def hello_world(): 7 | return 'Hello from my CI/CD powered Flask app! V1' 8 | 9 | if __name__ == '__main__': 10 | app.run(debug=True, host='0.0.0.0') 11 | -------------------------------------------------------------------------------- /7.ci-cd-aws-ec2/requirements.txt: -------------------------------------------------------------------------------- 1 | flask 2 | gunicorn 3 | requests -------------------------------------------------------------------------------- /7.ci-cd-aws-ec2/tests.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import app 3 | 4 | class BasicTestCase(unittest.TestCase): 5 | def test_home(self): 6 | tester = app.app.test_client(self) 7 | response = tester.get('/', content_type='html/text') 8 | self.assertEqual(response.status_code, 200) 9 | self.assertIn(b'Hello', response.data) 10 | 11 | if __name__ == '__main__': 12 | unittest.main() 13 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # About Repo 2 | 3 | This is a Repo containing the souce code of the Course - 4 | LLMOps with ChatGPT - Hugging Face Models - Deploy on Production 5 | 6 | Author : Nachiketh Murthy 7 | 8 | Reach the Instructor at - https://www.linkedin.com/in/nachiketh-murthy/ 9 | 10 | 11 | Course link : https://www.manifoldailearning.in/courses/LLMOps-with-ChatGPT-Deploy-on-Production-65cb265ae4b086660d2836ae 12 | 13 | support email: support@manifoldailearning.in 14 | 15 | Other Courses: 16 | 17 | # Course links 18 | 19 | Generative AI Mastery: 20 | https://www.manifoldailearning.in/courses/Generative-AI-Mastery---2024-65f54b9b914da35a42a7bbdd 21 | 22 | 23 | LLMOps Course : 24 | https://www.manifoldailearning.in/courses/LLMOps-with-ChatGPT-Deploy-on-Production-65cb265ae4b086660d2836ae 25 | 26 | 27 | 28 | MLOps Course: 29 | https://www.manifoldailearning.in/courses/Complete-MLOps-BootCamp-654ddf11e4b004edc19e2649 30 | 31 | 32 | MLOps with AWS: 33 | https://www.manifoldailearning.in/courses/Master-Practical-MLOps-for-Data-Scientists--DevOps-on-AWS-65351f01e4b08600bc438698 -------------------------------------------------------------------------------- /Working-with-GKE/README.md: -------------------------------------------------------------------------------- 1 | # Setting Up Google Cloud and Creating a Kubernetes Cluster with GKE 2 | 3 | This guide provides a step-by-step walkthrough to set up a Google Cloud account, install the Google Cloud CLI (Command Line Interface), and create a Kubernetes cluster using Google Kubernetes Engine (GKE). 4 | 5 | ## 1. Create Google Cloud Account 6 | 7 | If you haven't already, you need to create a Google Cloud account: 8 | 9 | 1. Go to the [Google Cloud Platform website](https://cloud.google.com/). 10 | 2. Click on the "Get started for free" button or "Go to Console" if you already have an account. 11 | 3. Follow the instructions to create your account. 12 | 4. Provide the necessary information, including billing details. 13 | 14 | ## 2. Setting Up Google Cloud CLI 15 | 16 | Once you have a Google Cloud account, you need to set up the Google Cloud CLI: 17 | 18 | 1. Install the Google Cloud SDK by following the instructions provided [here](https://cloud.google.com/sdk/docs/install). 19 | 2. After installation, run the following command in your terminal to authenticate the gcloud CLI tool: 20 | 21 | ```bash 22 | gcloud auth login 23 | ``` 24 | 25 | 3. Follow the prompts to authenticate using your Google Cloud account. 26 | 27 | 4. Set the default project for the gcloud CLI by running: 28 | 29 | ```bash 30 | gcloud config set project YOUR_PROJECT_ID 31 | ``` 32 | 33 | Replace `YOUR_PROJECT_ID` with the ID of your Google Cloud project. 34 | 35 | ## 3. Create Kubernetes Cluster with GKE 36 | 37 | Now that you have Google Cloud CLI set up, you can create a Kubernetes cluster using Google Kubernetes Engine (GKE): 38 | 39 | 1. Open your terminal and run the following command to create a new Kubernetes cluster: 40 | 41 | ```bash 42 | gcloud container clusters create YOUR_CLUSTER_NAME --num-nodes=3 --zone=YOUR_ZONE 43 | ``` 44 | 45 | Replace `YOUR_CLUSTER_NAME` with the desired name for your cluster and `YOUR_ZONE` with the desired zone for your cluster. 46 | 47 | For example: 48 | 49 | ```bash 50 | gcloud container clusters create my-cluster --num-nodes=3 --zone=us-central1-a 51 | ``` 52 | 53 | 2. Wait for the cluster creation process to complete. It may take a few minutes. 54 | 55 | 3. Once the cluster is created, configure `kubectl`, the Kubernetes command-line tool, to use the new cluster: 56 | 57 | ```bash 58 | gcloud container clusters get-credentials YOUR_CLUSTER_NAME --zone=YOUR_ZONE 59 | ``` 60 | 61 | Replace `YOUR_CLUSTER_NAME` and `YOUR_ZONE` with the appropriate values. 62 | 63 | For example: 64 | 65 | ```bash 66 | gcloud container clusters get-credentials my-cluster --zone=us-central1-a 67 | ``` 68 | 69 | 4. Verify that `kubectl` is configured correctly by running: 70 | 71 | ```bash 72 | kubectl get nodes 73 | ``` 74 | 75 | You should see the nodes of your Kubernetes cluster listed. 76 | 77 | Congratulations! You have successfully created a Google Cloud account, set up Google Cloud CLI, and created a Kubernetes cluster using Google Kubernetes Engine (GKE). You can now deploy and manage your applications on Kubernetes. 78 | 79 | # Delete Steps 80 | 81 | 82 | gcloud container clusters delete my-cluster --zone=us-central1-a -------------------------------------------------------------------------------- /docker-quickstart/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nginx 2 | COPY index.html /usr/share/nginx/html/index.html -------------------------------------------------------------------------------- /docker-quickstart/README.md: -------------------------------------------------------------------------------- 1 | # Commands used in the Hands On 2 | ``` 3 | docker --version 4 | docker pull nginx 5 | docker images 6 | docker run -d -p 8080:80 nginx 7 | docker ps 8 | docker stop 9 | 10 | 11 | docker build -t custom-nginx . 12 | docker tag custom-nginx yourusername/custom-nginx 13 | docker push yourusername/custom-nginx 14 | ``` -------------------------------------------------------------------------------- /docker-quickstart/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Generative AI Mastery - 2024 7 | 8 | 9 |

Generative AI Mastery - 2024

10 |

Unleash your creativity and master the art of Generative AI to shape the future in 2024! Join us on this cutting-edge journey to transform ideas into reality through AI innovation.

11 |

Language: English

12 |

Price (Limited Period Offer) :

13 |
    14 |
  • ₹29999 86.24% OFF
  • 15 |
  • ₹3499 excluding GST
  • 16 |
17 | JOIN NOW 18 |

Description

19 |

Generative AI Mastery - 2024 is a comprehensive course that dives deep into the world of generative artificial intelligence. Participants will learn advanced techniques and strategies to create AI systems that can generate content autonomously.

20 |

Key Highlights:

21 |
    22 |
  • In-depth exploration of generative AI concepts
  • 23 |
  • Hands-on projects to enhance practical skills
  • 24 |
  • Expert guidance from industry professionals
  • 25 |
26 |

This is a Package which is consisting of 9 Courses which is of Worth more than 3000$ , Bundled as a Package exclusive to our learners.

27 |

Check on the Individual courses by clicking the Link Below for more information

28 |

What this Package Contains ?

29 |
    30 |
  • Practical Python to Data Science & Machine Learning Bootcamp - 15+ Hours Video Content & Hands On Exercise
  • 31 |
  • Practical Natural Language Processing - Go from Zero to Hero - 25+ Hours of Video Content & Hands On Exercise
  • 32 |
  • Prompt Engineering with LLMOps Course with 2 Hands On Projects - 15+ Hours of Video Content & Hands On Exercise
  • 33 |
  • AWS Certified Machine Learning – Specialty (MLS-C01) - 2024 - 28+ Hours Video Content & Hands On Exercise
  • 34 |
  • Universal Deep Learning Mastery - 2024 Edition with Updated (Tensorflow) - 15+ Hours of Video Content & Hands On Exercise
  • 35 |
  • PyTorch for Deep Learning Computer Vision Bootcamp 2024 - 13+ Hours of Video Content & Hands On Exercise
  • 36 |
  • Linear Algebra Math for AI - Artificial Intelligence - 19+ Hours Video Content & Hands On Exercise
  • 37 |
  • Statistics & Probability for Data Science - Predictive Analytics - 23+ Hours of Video Content & Hands On Exercise
  • 38 |
  • Calculus - Essential Math for AI, Data Science and Deep Learning - 14+ Hours of Video Content & Hands On Exercise
  • 39 |
40 | 41 | 42 | -------------------------------------------------------------------------------- /github-actions/README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/manifoldailearning/llmops-chatgpt-huggingface/31b394fce372d122a766e57888a04e0ca24bc07e/github-actions/README.md -------------------------------------------------------------------------------- /github-actions/first-github.yaml: -------------------------------------------------------------------------------- 1 | name: My Github workflow 2 | on: 3 | push: 4 | branches: 5 | - main 6 | 7 | env: 8 | PROJECT_NAME: githubactions-demo 9 | COURSE: LLMOps-by-Manifold-AI-Learning 10 | 11 | jobs: 12 | myexamplejob: 13 | runs-on: ubuntu-latest 14 | steps: 15 | - run: echo "Checkout the repo" 16 | - name: Checkout Repo 17 | uses: actions/checkout@v4 18 | - run: printenv 19 | - run: echo "this is the env ${{ env.PROJECT_NAME }} and ${{ env.COURSE }} " 20 | -------------------------------------------------------------------------------- /github-actions/github-actions-demo.yaml: -------------------------------------------------------------------------------- 1 | name: GitHub Actions Demo 2 | run-name: ${{ github.actor }} is testing out GitHub Actions 🚀 3 | on: [push] 4 | jobs: 5 | Explore-GitHub-Actions: 6 | runs-on: ubuntu-latest 7 | steps: 8 | - run: echo "🎉 The job was automatically triggered by a ${{ github.event_name }} event." 9 | - run: echo "🐧 This job is now running on a ${{ runner.os }} server hosted by GitHub!" 10 | - run: echo "🔎 The name of your branch is ${{ github.ref }} and your repository is ${{ github.repository }}." 11 | - name: Check out repository code 12 | uses: actions/checkout@v4 13 | - run: echo "💡 The ${{ github.repository }} repository has been cloned to the runner." 14 | - run: echo "🖥️ The workflow is now ready to test your code on the runner." 15 | - name: List files in the repository 16 | run: | 17 | ls ${{ github.workspace }} 18 | - run: echo "🍏 This job's status is ${{ job.status }}." 19 | -------------------------------------------------------------------------------- /kubernetes-quickstart/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/manifoldailearning/llmops-chatgpt-huggingface/31b394fce372d122a766e57888a04e0ca24bc07e/kubernetes-quickstart/.DS_Store -------------------------------------------------------------------------------- /kubernetes-quickstart/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nginx 2 | COPY index.html /usr/share/nginx/html/index.html -------------------------------------------------------------------------------- /kubernetes-quickstart/README.md: -------------------------------------------------------------------------------- 1 | # Commands used in the Hands On 2 | ``` 3 | docker --version 4 | docker pull nginx 5 | docker images 6 | docker run -d -p 8080:80 nginx 7 | docker ps 8 | docker stop 9 | 10 | 11 | docker build -t custom-nginx . 12 | docker tag custom-nginx yourusername/custom-nginx 13 | docker push yourusername/custom-nginx 14 | ``` 15 | 16 | # Commands for Kubernetes 17 | 18 | ``` 19 | kubectl get pods 20 | kubectl apply -f pod.yaml 21 | kubectl describe pod myapp 22 | 23 | kubectl apply -f svc-local.yaml 24 | kubectl get svc 25 | kubectl describe svc mysvc 26 | 27 | kubectl delete pods --all 28 | kubectl delete svc --all 29 | 30 | kubectl api-resources 31 | kebectl get deployments 32 | kubectl apply -f deployment.yaml 33 | kubectl describe deployments 34 | 35 | kubectl rollout status deployment 36 | 37 | 38 | kubectl create configmap app-config --from-literal=DATABASE_URL="mysql://user:password@mysql-server:3306/db_name" 39 | 40 | kubectl describe cm app-config 41 | 42 | kubectl exec -it app-pod -- /bin/bash 43 | 44 | kubectl create secret generic db-secret --from-literal=DB_PASSWORD=password123 45 | 46 | 47 | kubectl exec -it db-pod -- /bin/bash 48 | ``` 49 | 50 | # Deployment Object 51 | - self healing 52 | - scaling 53 | - rolling updates 54 | 55 | # Deployment Controller 56 | - Process which runs on the Control Plane that monitors the cluster making use all the deployment object are running as per the specification -------------------------------------------------------------------------------- /kubernetes-quickstart/cm.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: app-pod 5 | spec: 6 | containers: 7 | - name: app-container 8 | image: nginx 9 | envFrom: 10 | - configMapRef: 11 | name: app-config 12 | -------------------------------------------------------------------------------- /kubernetes-quickstart/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: myapp-deployment 5 | spec: 6 | replicas: 2 7 | selector: 8 | matchLabels: 9 | app: myapp-pod 10 | template: 11 | metadata: 12 | labels: 13 | app: myapp-pod 14 | spec: 15 | containers: 16 | - name: mycontainer 17 | image: manifoldailearning/custom-nginx 18 | resources: 19 | limits: 20 | memory: "128Mi" 21 | cpu: "500m" 22 | ports: 23 | - containerPort: 80 24 | 25 | --- 26 | apiVersion: v1 27 | kind: Service 28 | metadata: 29 | name: mysvc-v2 30 | spec: 31 | type: NodePort 32 | selector: 33 | app: myapp-pod 34 | ports: 35 | - port: 5000 # Service IP Port 36 | targetPort: 80 # Container Port 37 | nodePort: 30002 # Node Port/Host Port 38 | protocol: TCP 39 | -------------------------------------------------------------------------------- /kubernetes-quickstart/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Generative AI Mastery - 2024 7 | 8 | 9 |

Generative AI Mastery - 2024

10 |

Unleash your creativity and master the art of Generative AI to shape the future in 2024! Join us on this cutting-edge journey to transform ideas into reality through AI innovation.

11 |

Language: English

12 |

Price (Limited Period Offer) :

13 |
    14 |
  • ₹29999 86.24% OFF
  • 15 |
  • ₹3499 excluding GST
  • 16 |
17 | JOIN NOW 18 |

Description

19 |

Generative AI Mastery - 2024 is a comprehensive course that dives deep into the world of generative artificial intelligence. Participants will learn advanced techniques and strategies to create AI systems that can generate content autonomously.

20 |

Key Highlights:

21 |
    22 |
  • In-depth exploration of generative AI concepts
  • 23 |
  • Hands-on projects to enhance practical skills
  • 24 |
  • Expert guidance from industry professionals
  • 25 |
26 |

This is a Package which is consisting of 9 Courses which is of Worth more than 3000$ , Bundled as a Package exclusive to our learners.

27 |

Check on the Individual courses by clicking the Link Below for more information

28 |

What this Package Contains ?

29 |
    30 |
  • Practical Python to Data Science & Machine Learning Bootcamp - 15+ Hours Video Content & Hands On Exercise
  • 31 |
  • Practical Natural Language Processing - Go from Zero to Hero - 25+ Hours of Video Content & Hands On Exercise
  • 32 |
  • Prompt Engineering with LLMOps Course with 2 Hands On Projects - 15+ Hours of Video Content & Hands On Exercise
  • 33 |
  • AWS Certified Machine Learning – Specialty (MLS-C01) - 2024 - 28+ Hours Video Content & Hands On Exercise
  • 34 |
  • Universal Deep Learning Mastery - 2024 Edition with Updated (Tensorflow) - 15+ Hours of Video Content & Hands On Exercise
  • 35 |
  • PyTorch for Deep Learning Computer Vision Bootcamp 2024 - 13+ Hours of Video Content & Hands On Exercise
  • 36 |
  • Linear Algebra Math for AI - Artificial Intelligence - 19+ Hours Video Content & Hands On Exercise
  • 37 |
  • Statistics & Probability for Data Science - Predictive Analytics - 23+ Hours of Video Content & Hands On Exercise
  • 38 |
  • Calculus - Essential Math for AI, Data Science and Deep Learning - 14+ Hours of Video Content & Hands On Exercise
  • 39 |
40 | 41 | 42 | -------------------------------------------------------------------------------- /kubernetes-quickstart/pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: myapp 5 | labels: 6 | name: myapp 7 | environment: prod 8 | spec: 9 | containers: 10 | - name: myapp 11 | image: manifoldailearning/custom-nginx 12 | ports: 13 | - containerPort: 80 14 | -------------------------------------------------------------------------------- /kubernetes-quickstart/rolling-update.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: myapp-deployment 5 | spec: 6 | replicas: 5 7 | minReadySeconds: 45 # wait for 45 sec before pod is ready going to next 8 | strategy: 9 | type: RollingUpdate 10 | rollingUpdate: 11 | maxUnavailable: 1 12 | maxSurge: 2 13 | selector: 14 | matchLabels: 15 | app: myapp-pod 16 | template: 17 | metadata: 18 | labels: 19 | app: myapp-pod 20 | spec: 21 | containers: 22 | - name: mycontainer 23 | image: manifoldailearning/custom-nginx:v2 # change to v2 to test 24 | resources: 25 | limits: 26 | memory: "128Mi" 27 | cpu: "500m" 28 | ports: 29 | - containerPort: 80 30 | 31 | --- 32 | apiVersion: v1 33 | kind: Service 34 | metadata: 35 | name: mysvc-v2 36 | spec: 37 | type: NodePort 38 | selector: 39 | app: myapp-pod 40 | ports: 41 | - port: 5000 # Service IP Port 42 | targetPort: 80 # Container Port 43 | nodePort: 30002 # Node Port/Host Port 44 | protocol: TCP 45 | -------------------------------------------------------------------------------- /kubernetes-quickstart/secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: db-pod 5 | spec: 6 | containers: 7 | - name: db-container 8 | image: mysql 9 | env: 10 | - name: MYSQL_ROOT_PASSWORD 11 | valueFrom: 12 | secretKeyRef: 13 | name: db-secret 14 | key: DB_PASSWORD 15 | -------------------------------------------------------------------------------- /kubernetes-quickstart/svc-local.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: mysvc 5 | spec: 6 | type: NodePort 7 | selector: 8 | name: myapp 9 | environment: prod 10 | ports: 11 | - port: 5000 # Service IP Port 12 | targetPort: 80 # Container Port 13 | nodePort: 30001 # Node Port/Host Port 14 | protocol: TCP 15 | --------------------------------------------------------------------------------