├── LICENSE ├── README.md └── alpaca_vicuna_webui.ipynb /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Nathan Fargo 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # TextGenWebUI-Colab 2 | 3 | Text Generation WebUI via Vicuna or Alpaca in Google Colaboratory. 4 | 5 | ### ⚠️ Ensure that Google Drive is available for storage. ⚠️ 6 | 7 | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/ntfargo/TextGenWebUI-Colab/blob/main/alpaca_vicuna_webui.ipynb) 8 | 9 | ## Overview 10 | 11 | This project provides a web-based user interface for text generation using either the Alpaca or Vicuna models. It is intended to be run in Google Colaboratory, and requires access to Google Drive for storage. 12 | 13 | The web UI is based on the [text-generation-webui](https://github.com/oobabooga/text-generation-webui) repository, which provides a simple interface for generating text using a pre-trained language model. 14 | 15 | ## Model 16 | 17 | This project includes two pre-trained language models: 18 | 19 | - Alpaca 13B (4-bit, 128G) - https://huggingface.co/anon8231489123/gpt4-x-alpaca-13b-native-4bit-128g 20 | - Vicuna 13B (4-bit, 128G) - https://huggingface.co/anon8231489123/vicuna-13b-GPTQ-4bit-128g -------------------------------------------------------------------------------- /alpaca_vicuna_webui.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "source": [ 6 | "import os\n", 7 | "from google.colab import drive\n", 8 | "\n", 9 | "# Mount Google Drive\n", 10 | "drive.mount('/content/drive')" 11 | ], 12 | "metadata": { 13 | "id": "p0kP1FMpVS64" 14 | }, 15 | "execution_count": null, 16 | "outputs": [] 17 | }, 18 | { 19 | "cell_type": "code", 20 | "source": [ 21 | "!git clone -b v1.2 https://github.com/camenduru/text-generation-webui.git\n", 22 | "%cd /content/text-generation-webui\n", 23 | "!pip install -r requirements.txt " 24 | ], 25 | "metadata": { 26 | "id": "MXLqqaXO6FwH" 27 | }, 28 | "execution_count": null, 29 | "outputs": [] 30 | }, 31 | { 32 | "cell_type": "code", 33 | "execution_count": null, 34 | "metadata": { 35 | "id": "oWIEIapOAstd" 36 | }, 37 | "outputs": [], 38 | "source": [ 39 | "!mkdir /content/text-generation-webui/repositories\n", 40 | "%cd /content/text-generation-webui/repositories\n", 41 | "!git clone -b v1.2 https://github.com/camenduru/GPTQ-for-LLaMa.git\n", 42 | "%cd GPTQ-for-LLaMa\n", 43 | "!python setup_cuda.py install\n", 44 | "\n", 45 | "drive_folder = '/content/drive/MyDrive/text-generation-webui'\n", 46 | "if not os.path.exists(drive_folder):\n", 47 | " os.makedirs(drive_folder)\n", 48 | "!mv /content/text-generation-webui/* \"$drive_folder/\"" 49 | ] 50 | }, 51 | { 52 | "cell_type": "code", 53 | "execution_count": null, 54 | "metadata": { 55 | "id": "eEGdDI1SED4j" 56 | }, 57 | "outputs": [], 58 | "source": [ 59 | "!apt-get -y install -qq aria2" 60 | ] 61 | }, 62 | { 63 | "cell_type": "code", 64 | "execution_count": null, 65 | "metadata": { 66 | "id": "mht9fpviBonB" 67 | }, 68 | "outputs": [], 69 | "source": [ 70 | "import subprocess\n", 71 | "\n", 72 | "def download_file(url, path, filename):\n", 73 | " command = f\"aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {url} -d {path} -o {filename}\"\n", 74 | " print(f\"Downloading {filename}...\")\n", 75 | " subprocess.run(command, shell=True)\n", 76 | " print(f\"{filename} downloaded successfully.\")\n", 77 | " \n", 78 | "model = \"vicuna\" # Set this to \"alpaca\" to select the alpaca model\n", 79 | "if model == \"alpaca\":\n", 80 | " base_url = \"https://huggingface.co/anon8231489123/gpt4-x-alpaca-13b-native-4bit-128g\"\n", 81 | " safetensors = \"4bit-128g.safetensors\"\n", 82 | "elif model == \"vicuna\":\n", 83 | " base_url = \"https://huggingface.co/anon8231489123/vicuna-13b-GPTQ-4bit-128g\"\n", 84 | " safetensors = \"vicuna-13b-4bit-128g.safetensors\"\n", 85 | "else:\n", 86 | " raise ValueError(\"Invalid model selected\")\n", 87 | "\n", 88 | "base_path = \"/content/drive/MyDrive/text-generation-webui/models/gpt4-x-\" + model + \"-13b-native-4bit-128g\"\n", 89 | "\n", 90 | "download_file(f\"{base_url}/raw/main/config.json\", base_path, \"config.json\")\n", 91 | "download_file(f\"{base_url}/raw/main/generation_config.json\", base_path, \"generation_config.json\")\n", 92 | "download_file(f\"{base_url}/raw/main/special_tokens_map.json\", base_path, \"special_tokens_map.json\")\n", 93 | "download_file(f\"{base_url}/resolve/main/tokenizer.model\", base_path, \"tokenizer.model\")\n", 94 | "download_file(f\"{base_url}/raw/main/tokenizer_config.json\", base_path, \"tokenizer_config.json\")\n", 95 | "download_file(f\"{base_url}/resolve/main/{safetensors}\", base_path, safetensors)" 96 | ] 97 | }, 98 | { 99 | "cell_type": "code", 100 | "execution_count": null, 101 | "metadata": { 102 | "id": "Q1157a1yBpq4" 103 | }, 104 | "outputs": [], 105 | "source": [ 106 | "import os \n", 107 | "\n", 108 | "%cd /content/drive/MyDrive/text-generation-webui\n", 109 | "\n", 110 | "auth_token_path = \"/content/drive/MyDrive/text-generation-webui/auth_token.txt\"\n", 111 | "\n", 112 | "# Set your username and password here (or use the default)\n", 113 | "user = \"username\" \n", 114 | "password = \"password\"\n", 115 | " \n", 116 | "auth = input(\"Do you want to enable authentication? (yes/no): \") \n", 117 | "\n", 118 | "if auth.lower() == \"yes\":\n", 119 | " with open(auth_token_path, \"w\") as f:\n", 120 | " f.write(f\"{user}:{password}\")\n", 121 | " print(\"auth_token.txt created successfully.\") \n", 122 | "\n", 123 | "if os.path.exists(auth_token_path):\n", 124 | " !python server.py --share --chat --wbits 4 --groupsize 128 --gradio-auth-path {auth_token_path}\n", 125 | "else:\n", 126 | " !python server.py --share --chat --wbits 4 --groupsize 128" 127 | ] 128 | } 129 | ], 130 | "metadata": { 131 | "accelerator": "GPU", 132 | "colab": { 133 | "provenance": [] 134 | }, 135 | "gpuClass": "standard", 136 | "kernelspec": { 137 | "display_name": "Python 3", 138 | "name": "python3" 139 | }, 140 | "language_info": { 141 | "name": "python" 142 | } 143 | }, 144 | "nbformat": 4, 145 | "nbformat_minor": 0 146 | } --------------------------------------------------------------------------------