├── README.md ├── Dockerfile.llamacpp └── Sakura-13B-Galgame-Colab.ipynb /README.md: -------------------------------------------------------------------------------- 1 | # SakuraLLM-Notebooks 2 | ## **:warning: 警告 :warning:** 3 | - **警告,Kaggle 官方已经采取措施封禁 SakuraLLM 所有模型,[参见](https://github.com/Isotr0py/SakuraLLM-Notebooks/issues/14) ,在 Kaggle 上使用 SakuraLLM 将会导致永久性封号。请转移至租卡或者利用机翻站算力共享工具(为防止滥用,请自行搜索)。** 4 | - **为防止滥用,本仓库的 Notebook 将不再提供内网穿透及 Sakura API 部署功能,仅提供翻译 Epub/Txt 功能。如需要使用 Sakura API,请自行部署。** 5 | 6 | ## 介绍 7 | - 在 colab 上运行[Sakura-13B-Galgame](https://github.com/SakuraLLM/Sakura-13B-Galgame)模型 8 | 9 | ## 致谢 10 | - [SakuraLLM/Sakura-13B-Galgame](https://github.com/SakuraLLM/Sakura-13B-Galgame) 11 | -------------------------------------------------------------------------------- /Dockerfile.llamacpp: -------------------------------------------------------------------------------- 1 | # adapted from https://github.com/vllm-project/vllm/blob/main/Dockerfile 2 | # Docker image to build llama-cpp-python for kaggle 3 | 4 | ARG CUDA_VERSION=12.4.1 5 | #################### BASE BUILD IMAGE #################### 6 | # prepare basic build environment 7 | FROM nvidia/cuda:${CUDA_VERSION}-devel-ubuntu20.04 AS base 8 | 9 | ARG CUDA_VERSION=12.4.1 10 | ARG PYTHON_VERSION=3.10 11 | 12 | ENV DEBIAN_FRONTEND=noninteractive 13 | 14 | RUN echo 'tzdata tzdata/Areas select America' | debconf-set-selections \ 15 | && echo 'tzdata tzdata/Zones/America select Los_Angeles' | debconf-set-selections \ 16 | && apt-get update -y \ 17 | && apt-get install -y ccache software-properties-common \ 18 | && add-apt-repository ppa:deadsnakes/ppa \ 19 | && apt-get update -y \ 20 | && apt-get install -y python${PYTHON_VERSION} python${PYTHON_VERSION}-dev python${PYTHON_VERSION}-venv \ 21 | && if [ "${PYTHON_VERSION}" != "3" ]; then update-alternatives --install /usr/bin/python3 python3 /usr/bin/python${PYTHON_VERSION} 1; fi \ 22 | && python3 --version 23 | 24 | RUN apt-get update -y \ 25 | && apt-get install -y git curl sudo 26 | 27 | # Install pip s.t. it will be compatible with our PYTHON_VERSION 28 | RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python${PYTHON_VERSION} 29 | RUN python3 -m pip --version 30 | 31 | # Workaround for https://github.com/openai/triton/issues/2507 and 32 | # https://github.com/pytorch/pytorch/issues/107960 -- hopefully 33 | # this won't be needed for future versions of this docker image 34 | # or future versions of triton. 35 | RUN ldconfig /usr/local/cuda-$(echo $CUDA_VERSION | cut -d. -f1,2)/compat/ 36 | 37 | WORKDIR /workspace 38 | 39 | #################### BASE BUILD IMAGE #################### 40 | 41 | #################### WHEEL BUILD IMAGE #################### 42 | FROM base AS build 43 | 44 | ARG PYTHON_VERSION=3.10 45 | 46 | # install compiler cache to speed up compilation leveraging local or remote caching 47 | RUN apt-get update -y && apt-get install -y ccache 48 | 49 | ENV CMAKE_ARGS="-DGGML_CUDA=on=on" 50 | RUN pip wheel --no-binary llama-cpp-python llama-cpp-python -i https://pypi.tuna.tsinghua.edu.cn/simple 51 | -------------------------------------------------------------------------------- /Sakura-13B-Galgame-Colab.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "id": "Tu2Qu_P9sK-G" 7 | }, 8 | "source": [ 9 | "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/Isotr0py/SakuraLLM-Notebooks/blob/main/Sakura-13B-Galgame-Colab.ipynb)" 10 | ] 11 | }, 12 | { 13 | "cell_type": "code", 14 | "execution_count": 1, 15 | "metadata": { 16 | "colab": { 17 | "base_uri": "https://localhost:8080/" 18 | }, 19 | "id": "tvkI52m5DRsL", 20 | "outputId": "1d23e002-65ef-47dc-94fd-7436dcf7e671" 21 | }, 22 | "outputs": [ 23 | { 24 | "name": "stdout", 25 | "output_type": "stream", 26 | "text": [ 27 | "Fri Sep 6 02:46:52 2024 \n", 28 | "+---------------------------------------------------------------------------------------+\n", 29 | "| NVIDIA-SMI 535.104.05 Driver Version: 535.104.05 CUDA Version: 12.2 |\n", 30 | "|-----------------------------------------+----------------------+----------------------+\n", 31 | "| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC |\n", 32 | "| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. |\n", 33 | "| | | MIG M. |\n", 34 | "|=========================================+======================+======================|\n", 35 | "| 0 Tesla T4 Off | 00000000:00:04.0 Off | 0 |\n", 36 | "| N/A 45C P8 11W / 70W | 0MiB / 15360MiB | 0% Default |\n", 37 | "| | | N/A |\n", 38 | "+-----------------------------------------+----------------------+----------------------+\n", 39 | " \n", 40 | "+---------------------------------------------------------------------------------------+\n", 41 | "| Processes: |\n", 42 | "| GPU GI CI PID Type Process name GPU Memory |\n", 43 | "| ID ID Usage |\n", 44 | "|=======================================================================================|\n", 45 | "| No running processes found |\n", 46 | "+---------------------------------------------------------------------------------------+\n" 47 | ] 48 | } 49 | ], 50 | "source": [ 51 | "#@title 初始化环境\n", 52 | "#@markdown 挂载Google网盘\n", 53 | "Mount_GDrive = False # @param {type:\"boolean\"}\n", 54 | "if Mount_GDrive:\n", 55 | " from google.colab import drive\n", 56 | "\n", 57 | " drive.mount('/content/gdrive')\n", 58 | " ROOT_PATH = \"/content/gdrive/MyDrive\"\n", 59 | "else:\n", 60 | " ROOT_PATH = \"/content\"\n", 61 | "!nvidia-smi" 62 | ] 63 | }, 64 | { 65 | "cell_type": "code", 66 | "execution_count": null, 67 | "metadata": { 68 | "colab": { 69 | "base_uri": "https://localhost:8080/" 70 | }, 71 | "id": "gelzXVWEGxZw", 72 | "outputId": "782185e7-1ded-4c4d-a075-4d90114ffaa7" 73 | }, 74 | "outputs": [], 75 | "source": [ 76 | "#@title 安装依赖\n", 77 | "%cd $ROOT_PATH\n", 78 | "!git clone https://github.com/SakuraLLM/Sakura-13B-Galgame.git\n", 79 | "\n", 80 | "%cd Sakura-13B-Galgame\n", 81 | "!git pull\n", 82 | "\n", 83 | "LLAMA_CPP = True # @param {type:\"boolean\"}\n", 84 | "VLLM = True # @param {type:\"boolean\"}\n", 85 | "if LLAMA_CPP:\n", 86 | " !pip install \"diskcache>=5.6.1\"\n", 87 | " !pip install llama-cpp-python -i https://abetlen.github.io/llama-cpp-python/whl/cu122\n", 88 | "if VLLM:\n", 89 | " !pip install -U transformers tokenizers\n", 90 | " !pip install vllm\n", 91 | "!pip install -q -r requirements.txt\n", 92 | "!pip install -q pyngrok" 93 | ] 94 | }, 95 | { 96 | "cell_type": "code", 97 | "execution_count": null, 98 | "metadata": { 99 | "cellView": "form", 100 | "id": "DUQnJQ96Jau8" 101 | }, 102 | "outputs": [], 103 | "source": [ 104 | "#@title 翻译EPUB\n", 105 | "from huggingface_hub import hf_hub_download\n", 106 | "\n", 107 | "repo_id = \"SakuraLLM/Sakura-14B-Qwen2beta-v0.9.2-GGUF\"\n", 108 | "MODEL = \"sakura-14b-qwen2beta-v0.9.2-q4km.gguf\" # @param [\"sakura-14b-qwen2beta-v0.9.2-iq4xs.gguf\", \"sakura-14b-qwen2beta-v0.9.2-q2k.gguf\", \"sakura-14b-qwen2beta-v0.9.2-q3km.gguf\", \"sakura-14b-qwen2beta-v0.9.2-q4km.gguf\", \"sakura-14b-qwen2beta-v0.9.2-q6k.gguf\"]\n", 109 | "hf_hub_download(repo_id=repo_id, filename=MODEL, local_dir=\"models/\")\n", 110 | "MODEL_PATH = f\"./models/{MODEL}\"\n", 111 | "EPUB_PATH = \"novel.epub\" # @param {type:\"string\"}\n", 112 | "GPT_DICT_PATH = \"dict.txt\" # @param {type:\"string\"}\n", 113 | "OUTPUT_FOLDER = \"output/\" # @param {type:\"string\"}\n", 114 | "\n", 115 | "%cd $ROOT_PATH/Sakura-13B-Galgame\n", 116 | "!python translate_epub.py \\\n", 117 | " --model_name_or_path $MODEL_PATH \\\n", 118 | " --llama_cpp \\\n", 119 | " --use_gpu \\\n", 120 | " --model_version 0.9 \\\n", 121 | " --trust_remote_code \\\n", 122 | " --data_path $EPUB_PATH \\\n", 123 | " --gpt_dict_path $GPT_DICT_PATH \\\n", 124 | " --output_folder $OUTPUT_FOLDER" 125 | ] 126 | }, 127 | { 128 | "cell_type": "code", 129 | "execution_count": null, 130 | "metadata": { 131 | "id": "S9gYbA1yMVND" 132 | }, 133 | "outputs": [], 134 | "source": [ 135 | "#@title 翻译文本文件\n", 136 | "from huggingface_hub import hf_hub_download\n", 137 | "\n", 138 | "repo_id = \"SakuraLLM/Sakura-14B-Qwen2beta-v0.9.2-GGUF\"\n", 139 | "MODEL = \"sakura-14b-qwen2beta-v0.9.2-q4km.gguf\" # @param [\"sakura-14b-qwen2beta-v0.9.2-iq4xs.gguf\", \"sakura-14b-qwen2beta-v0.9.2-q2k.gguf\", \"sakura-14b-qwen2beta-v0.9.2-q3km.gguf\", \"sakura-14b-qwen2beta-v0.9.2-q4km.gguf\", \"sakura-14b-qwen2beta-v0.9.2-q6k.gguf\"]\n", 140 | "hf_hub_download(repo_id=repo_id, filename=MODEL, local_dir=\"models/\")\n", 141 | "MODEL_PATH = f\"./models/{MODEL}\"\n", 142 | "DATA_PATH = \"novel.txt\" # @param {type:\"string\"}\n", 143 | "OUTPUT_PATH = \"novel_translated.txt\" # @param {type:\"string\"}\n", 144 | "\n", 145 | "%cd $ROOT_PATH/Sakura-13B-Galgame\n", 146 | "!python translate_novel.py \\\n", 147 | " --model_name_or_path $MODEL_PATH \\\n", 148 | " --llama_cpp \\\n", 149 | " --use_gpu \\\n", 150 | " --model_version 0.9 \\\n", 151 | " --trust_remote_code \\\n", 152 | " --data_path $DATA_PATH \\\n", 153 | " --output_path $OUTPUT_PATH" 154 | ] 155 | } 156 | ], 157 | "metadata": { 158 | "accelerator": "GPU", 159 | "colab": { 160 | "provenance": [] 161 | }, 162 | "kernelspec": { 163 | "display_name": "Python 3", 164 | "name": "python3" 165 | }, 166 | "language_info": { 167 | "name": "python" 168 | }, 169 | "widgets": { 170 | "application/vnd.jupyter.widget-state+json": { 171 | "0aa2d53ce0674619ab7ec990d25fe7c5": { 172 | "model_module": "@jupyter-widgets/base", 173 | "model_module_version": "1.2.0", 174 | "model_name": "LayoutModel", 175 | "state": { 176 | "_model_module": "@jupyter-widgets/base", 177 | "_model_module_version": "1.2.0", 178 | "_model_name": "LayoutModel", 179 | "_view_count": null, 180 | "_view_module": "@jupyter-widgets/base", 181 | "_view_module_version": "1.2.0", 182 | "_view_name": "LayoutView", 183 | "align_content": null, 184 | "align_items": null, 185 | "align_self": null, 186 | "border": null, 187 | "bottom": null, 188 | "display": null, 189 | "flex": null, 190 | "flex_flow": null, 191 | "grid_area": null, 192 | "grid_auto_columns": null, 193 | "grid_auto_flow": null, 194 | "grid_auto_rows": null, 195 | "grid_column": null, 196 | "grid_gap": null, 197 | "grid_row": null, 198 | "grid_template_areas": null, 199 | "grid_template_columns": null, 200 | "grid_template_rows": null, 201 | "height": null, 202 | "justify_content": null, 203 | "justify_items": null, 204 | "left": null, 205 | "margin": null, 206 | "max_height": null, 207 | "max_width": null, 208 | "min_height": null, 209 | "min_width": null, 210 | "object_fit": null, 211 | "object_position": null, 212 | "order": null, 213 | "overflow": null, 214 | "overflow_x": null, 215 | "overflow_y": null, 216 | "padding": null, 217 | "right": null, 218 | "top": null, 219 | "visibility": null, 220 | "width": null 221 | } 222 | }, 223 | "136f8fec849743ff93b2266a499b77a1": { 224 | "model_module": "@jupyter-widgets/base", 225 | "model_module_version": "1.2.0", 226 | "model_name": "LayoutModel", 227 | "state": { 228 | "_model_module": "@jupyter-widgets/base", 229 | "_model_module_version": "1.2.0", 230 | "_model_name": "LayoutModel", 231 | "_view_count": null, 232 | "_view_module": "@jupyter-widgets/base", 233 | "_view_module_version": "1.2.0", 234 | "_view_name": "LayoutView", 235 | "align_content": null, 236 | "align_items": null, 237 | "align_self": null, 238 | "border": null, 239 | "bottom": null, 240 | "display": null, 241 | "flex": null, 242 | "flex_flow": null, 243 | "grid_area": null, 244 | "grid_auto_columns": null, 245 | "grid_auto_flow": null, 246 | "grid_auto_rows": null, 247 | "grid_column": null, 248 | "grid_gap": null, 249 | "grid_row": null, 250 | "grid_template_areas": null, 251 | "grid_template_columns": null, 252 | "grid_template_rows": null, 253 | "height": null, 254 | "justify_content": null, 255 | "justify_items": null, 256 | "left": null, 257 | "margin": null, 258 | "max_height": null, 259 | "max_width": null, 260 | "min_height": null, 261 | "min_width": null, 262 | "object_fit": null, 263 | "object_position": null, 264 | "order": null, 265 | "overflow": null, 266 | "overflow_x": null, 267 | "overflow_y": null, 268 | "padding": null, 269 | "right": null, 270 | "top": null, 271 | "visibility": null, 272 | "width": null 273 | } 274 | }, 275 | "13f0405cc64e4aae89ada2b74663375d": { 276 | "model_module": "@jupyter-widgets/controls", 277 | "model_module_version": "1.5.0", 278 | "model_name": "HBoxModel", 279 | "state": { 280 | "_dom_classes": [], 281 | "_model_module": "@jupyter-widgets/controls", 282 | "_model_module_version": "1.5.0", 283 | "_model_name": "HBoxModel", 284 | "_view_count": null, 285 | "_view_module": "@jupyter-widgets/controls", 286 | "_view_module_version": "1.5.0", 287 | "_view_name": "HBoxView", 288 | "box_style": "", 289 | "children": [ 290 | "IPY_MODEL_36647a271db247b8b679a5d246fe0a06", 291 | "IPY_MODEL_5338c8b5fc844f1291521c89b9e30b0c", 292 | "IPY_MODEL_146b5bbfdb264a998092b74b64dc2fe9" 293 | ], 294 | "layout": "IPY_MODEL_0aa2d53ce0674619ab7ec990d25fe7c5" 295 | } 296 | }, 297 | "146b5bbfdb264a998092b74b64dc2fe9": { 298 | "model_module": "@jupyter-widgets/controls", 299 | "model_module_version": "1.5.0", 300 | "model_name": "HTMLModel", 301 | "state": { 302 | "_dom_classes": [], 303 | "_model_module": "@jupyter-widgets/controls", 304 | "_model_module_version": "1.5.0", 305 | "_model_name": "HTMLModel", 306 | "_view_count": null, 307 | "_view_module": "@jupyter-widgets/controls", 308 | "_view_module_version": "1.5.0", 309 | "_view_name": "HTMLView", 310 | "description": "", 311 | "description_tooltip": null, 312 | "layout": "IPY_MODEL_c906f2c6476c4fd1b8694b0d85c94ea2", 313 | "placeholder": "​", 314 | "style": "IPY_MODEL_e3ab0fa023d6496c900641d9184a3b58", 315 | "value": " 7.91G/7.91G [01:24<00:00, 89.9MB/s]" 316 | } 317 | }, 318 | "36647a271db247b8b679a5d246fe0a06": { 319 | "model_module": "@jupyter-widgets/controls", 320 | "model_module_version": "1.5.0", 321 | "model_name": "HTMLModel", 322 | "state": { 323 | "_dom_classes": [], 324 | "_model_module": "@jupyter-widgets/controls", 325 | "_model_module_version": "1.5.0", 326 | "_model_name": "HTMLModel", 327 | "_view_count": null, 328 | "_view_module": "@jupyter-widgets/controls", 329 | "_view_module_version": "1.5.0", 330 | "_view_name": "HTMLView", 331 | "description": "", 332 | "description_tooltip": null, 333 | "layout": "IPY_MODEL_ce78d44907004dce845dac988eeba198", 334 | "placeholder": "​", 335 | "style": "IPY_MODEL_3a50ab682c34427c8d827fd28abcc2c3", 336 | "value": "sakura-14b-qwen2beta-v0.9.2-iq4xs.gguf: 100%" 337 | } 338 | }, 339 | "3a50ab682c34427c8d827fd28abcc2c3": { 340 | "model_module": "@jupyter-widgets/controls", 341 | "model_module_version": "1.5.0", 342 | "model_name": "DescriptionStyleModel", 343 | "state": { 344 | "_model_module": "@jupyter-widgets/controls", 345 | "_model_module_version": "1.5.0", 346 | "_model_name": "DescriptionStyleModel", 347 | "_view_count": null, 348 | "_view_module": "@jupyter-widgets/base", 349 | "_view_module_version": "1.2.0", 350 | "_view_name": "StyleView", 351 | "description_width": "" 352 | } 353 | }, 354 | "5338c8b5fc844f1291521c89b9e30b0c": { 355 | "model_module": "@jupyter-widgets/controls", 356 | "model_module_version": "1.5.0", 357 | "model_name": "FloatProgressModel", 358 | "state": { 359 | "_dom_classes": [], 360 | "_model_module": "@jupyter-widgets/controls", 361 | "_model_module_version": "1.5.0", 362 | "_model_name": "FloatProgressModel", 363 | "_view_count": null, 364 | "_view_module": "@jupyter-widgets/controls", 365 | "_view_module_version": "1.5.0", 366 | "_view_name": "ProgressView", 367 | "bar_style": "success", 368 | "description": "", 369 | "description_tooltip": null, 370 | "layout": "IPY_MODEL_136f8fec849743ff93b2266a499b77a1", 371 | "max": 7914352256, 372 | "min": 0, 373 | "orientation": "horizontal", 374 | "style": "IPY_MODEL_66469dec5e7440f696c90576c8f9333a", 375 | "value": 7914352256 376 | } 377 | }, 378 | "66469dec5e7440f696c90576c8f9333a": { 379 | "model_module": "@jupyter-widgets/controls", 380 | "model_module_version": "1.5.0", 381 | "model_name": "ProgressStyleModel", 382 | "state": { 383 | "_model_module": "@jupyter-widgets/controls", 384 | "_model_module_version": "1.5.0", 385 | "_model_name": "ProgressStyleModel", 386 | "_view_count": null, 387 | "_view_module": "@jupyter-widgets/base", 388 | "_view_module_version": "1.2.0", 389 | "_view_name": "StyleView", 390 | "bar_color": null, 391 | "description_width": "" 392 | } 393 | }, 394 | "c906f2c6476c4fd1b8694b0d85c94ea2": { 395 | "model_module": "@jupyter-widgets/base", 396 | "model_module_version": "1.2.0", 397 | "model_name": "LayoutModel", 398 | "state": { 399 | "_model_module": "@jupyter-widgets/base", 400 | "_model_module_version": "1.2.0", 401 | "_model_name": "LayoutModel", 402 | "_view_count": null, 403 | "_view_module": "@jupyter-widgets/base", 404 | "_view_module_version": "1.2.0", 405 | "_view_name": "LayoutView", 406 | "align_content": null, 407 | "align_items": null, 408 | "align_self": null, 409 | "border": null, 410 | "bottom": null, 411 | "display": null, 412 | "flex": null, 413 | "flex_flow": null, 414 | "grid_area": null, 415 | "grid_auto_columns": null, 416 | "grid_auto_flow": null, 417 | "grid_auto_rows": null, 418 | "grid_column": null, 419 | "grid_gap": null, 420 | "grid_row": null, 421 | "grid_template_areas": null, 422 | "grid_template_columns": null, 423 | "grid_template_rows": null, 424 | "height": null, 425 | "justify_content": null, 426 | "justify_items": null, 427 | "left": null, 428 | "margin": null, 429 | "max_height": null, 430 | "max_width": null, 431 | "min_height": null, 432 | "min_width": null, 433 | "object_fit": null, 434 | "object_position": null, 435 | "order": null, 436 | "overflow": null, 437 | "overflow_x": null, 438 | "overflow_y": null, 439 | "padding": null, 440 | "right": null, 441 | "top": null, 442 | "visibility": null, 443 | "width": null 444 | } 445 | }, 446 | "ce78d44907004dce845dac988eeba198": { 447 | "model_module": "@jupyter-widgets/base", 448 | "model_module_version": "1.2.0", 449 | "model_name": "LayoutModel", 450 | "state": { 451 | "_model_module": "@jupyter-widgets/base", 452 | "_model_module_version": "1.2.0", 453 | "_model_name": "LayoutModel", 454 | "_view_count": null, 455 | "_view_module": "@jupyter-widgets/base", 456 | "_view_module_version": "1.2.0", 457 | "_view_name": "LayoutView", 458 | "align_content": null, 459 | "align_items": null, 460 | "align_self": null, 461 | "border": null, 462 | "bottom": null, 463 | "display": null, 464 | "flex": null, 465 | "flex_flow": null, 466 | "grid_area": null, 467 | "grid_auto_columns": null, 468 | "grid_auto_flow": null, 469 | "grid_auto_rows": null, 470 | "grid_column": null, 471 | "grid_gap": null, 472 | "grid_row": null, 473 | "grid_template_areas": null, 474 | "grid_template_columns": null, 475 | "grid_template_rows": null, 476 | "height": null, 477 | "justify_content": null, 478 | "justify_items": null, 479 | "left": null, 480 | "margin": null, 481 | "max_height": null, 482 | "max_width": null, 483 | "min_height": null, 484 | "min_width": null, 485 | "object_fit": null, 486 | "object_position": null, 487 | "order": null, 488 | "overflow": null, 489 | "overflow_x": null, 490 | "overflow_y": null, 491 | "padding": null, 492 | "right": null, 493 | "top": null, 494 | "visibility": null, 495 | "width": null 496 | } 497 | }, 498 | "e3ab0fa023d6496c900641d9184a3b58": { 499 | "model_module": "@jupyter-widgets/controls", 500 | "model_module_version": "1.5.0", 501 | "model_name": "DescriptionStyleModel", 502 | "state": { 503 | "_model_module": "@jupyter-widgets/controls", 504 | "_model_module_version": "1.5.0", 505 | "_model_name": "DescriptionStyleModel", 506 | "_view_count": null, 507 | "_view_module": "@jupyter-widgets/base", 508 | "_view_module_version": "1.2.0", 509 | "_view_name": "StyleView", 510 | "description_width": "" 511 | } 512 | } 513 | } 514 | } 515 | }, 516 | "nbformat": 4, 517 | "nbformat_minor": 0 518 | } 519 | --------------------------------------------------------------------------------