├── .gitattributes
├── .github
└── FUNDING.yml
├── LICENSE
├── README.md
├── README_Runpod_LocalLLMsUI.md
├── README_Runpod_LocalLLMsUIandAPI.md
├── build_docker.py
├── build_oneclick.py
├── conf-files
├── bashrc
├── iterm2_shell_integration.bash
├── iterm2_shell_integration.zsh
├── passwd
├── thebloke.zsh-theme
├── tmux.conf
├── vimrc
└── zshrc
├── cuda11.8.0-ubuntu22.04-oneclick-chat
├── Dockerfile
├── scripts
│ └── rp_handler.py
└── start-with-ui.sh
├── cuda11.8.0-ubuntu22.04-oneclick-rp
├── Dockerfile
├── scripts
│ ├── rp_handler.old2
│ ├── rp_handler.orig.py
│ └── rp_handler.py
└── start-with-ui.sh
├── cuda11.8.0-ubuntu22.04-oneclick
├── Dockerfile
├── conf-files
│ ├── iterm2_shell_integration.zsh
│ ├── passwd
│ ├── thebloke.zsh-theme
│ ├── tmux.conf
│ ├── vimrc
│ └── zshrc
├── scripts
│ ├── build-llama-cpp-python.sh
│ ├── check_avx2.py
│ ├── download_model.py
│ ├── fetch-model.py
│ ├── restart-text-generation-webui.sh
│ ├── run-text-generation-webui.sh
│ └── textgen-on-workspace.sh
└── start-with-ui.sh
├── cuda11.8.0-ubuntu22.04-pytorch-conda
├── Dockerfile
├── conf-files
│ ├── .zsh
│ │ ├── conda.zsh
│ │ └── iterm2_shell_integration.zsh
│ ├── passwd
│ ├── thebloke.zsh-theme
│ ├── tmux.conf
│ ├── vimrc
│ └── zshrc
└── start-ssh-only.sh
├── cuda11.8.0-ubuntu22.04-pytorch
├── Dockerfile
└── start-ssh-only.sh
├── cuda12.1.1-ubuntu22.04-pytorch
├── Dockerfile
└── start-ssh-only.sh
├── cuda12.1.1-ubuntu22.04-textgen
└── Dockerfile
├── imgs
├── RunpodTemplateAPI-TCPPortMapping.png
└── TheBlokeAI.header.800.jpg
├── scripts
├── build-llama-cpp-python.sh
├── check_avx2.py
├── download_model.py
├── fetch-model.py
├── restart-text-generation-webui.sh
├── run-text-generation-webui.sh
├── start-ssh-only.sh
└── textgen-on-workspace.sh
└── wheels
└── torch-2.0.0a0+gite9ebda2-cp310-cp310-linux_x86_64.whl
/.gitattributes:
--------------------------------------------------------------------------------
1 | *.whl filter=lfs diff=lfs merge=lfs -text
2 |
--------------------------------------------------------------------------------
/.github/FUNDING.yml:
--------------------------------------------------------------------------------
1 | # These are supported funding model platforms
2 |
3 | patreon: TheBlokeAI
4 | ko_fi: theblokeai
5 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 TheBlokeAI
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # TheBloke's Docker templates
2 |
3 | ### Update: 16 December 2023 - Rebuild to add Mixtral support
4 | * Should now support Mixtral, with updated AutoGPTQ 0.6 and llama-cpp-python 0.2.23
5 | * Updated PyTorch to 2.1.1
6 |
7 | ### Update: 11 October 2023 - Update API command line option
8 | * Container will now launch text-generation-webui with arg `--extensions openai`
9 | * Logs from text-generation-webui will now appear in the Runpod log viewer, as well as `/workspace/logs/text-generation-webui.log`
10 |
11 | ### Update: 8th October 2023 - CUDA 12.1.1, fixed ExLlamav2 issues
12 | * The instances now use CUDA 12.1.1, which fixes issues with EXL2
13 | * Note that for now the main container is still called cuda11.8.0-ubuntu22.04-oneclick
14 | * This is because I need to get in touch with Runpod to update the name of the container used in their instances
15 | * This is just a naming issue; the container does now use CUDA 12.1.1 and EXL2 is confirmed to work again.
16 |
17 | ### Update: 23rd July 2023 - Llama 2 support, including Llama 2 70B in ExLlama
18 | * Llama 2 models, including Llama 2 70B, are now fully supported
19 | * Updated to latest text-generation-webui `requirements.txt`
20 | * Removed the exllama pip package installed by text-generation-webui
21 | * Therefore the ExLlama kernel will build automatically on first use
22 | * This ensures that ExLlama is always up-to-date with any new ExLlama commits (which are pulled automatically on each boot)
23 | * Added simple build script for building the Docker containers
24 |
25 | ### Update: 28th June 2023 - SuperHOT fixed
26 | * Updated to latest ExLlama code, fixing issue with SuperHOT GPTQs
27 | * ExLlama now automaticaly updates on boot, like text-generation-webui already did
28 | * This should result in the template automatically supporting new ExLlama features in future
29 |
30 | ### Update: 19th June 2023
31 | * Major update to the template
32 | * text-generation-webui is now integrated with:
33 | * AutoGPTQ with support for all Runpod GPU types
34 | * ExLlama, turbo-charged Llama GPTQ engine - performs 2x faster than AutoGPTQ (Llama 4bit GPTQs only)
35 | * CUDA-accelerated GGML support, with support for all Runpod systems and GPUs.
36 | * All text-generation-webui extensions are included and supported (Chat, SuperBooga, Whisper, etc).
37 | * text-generation-webui is always up-to-date with the latest code and features.
38 | * Automatic model download and loading via environment variable `MODEL`.
39 | * Pass text-generation-webui parameters via environment variable `UI_ARGS`.
40 |
41 | ## Runpod: TheBloke's Local LLMs UI
42 |
43 | [Runpod template link](https://runpod.io/gsc?template=qk29nkmbfr&ref=eexqfacd)
44 |
45 | [Full documentation is available here](https://github.com/TheBlokeAI/dockerLLM/blob/main/README_Runpod_LocalLLMsUI.md)
46 |
47 | ## Runpod: TheBloke's Local LLMs UI & API
48 |
49 | [Runpod template link](https://runpod.io/gsc?template=f1pf20op0z&ref=eexqfacd)
50 |
51 | [Full documentation is available here](https://github.com/TheBlokeAI/dockerLLM/blob/main/README_Runpod_LocalLLMsUIandAPI.md)
52 |
--------------------------------------------------------------------------------
/README_Runpod_LocalLLMsUI.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |

4 |
5 |
13 |
14 |
15 | ## TheBloke's Local LLMs: One-Click UI
16 |
17 | ### Update: 16 December 2023 - Rebuild to add Mixtral support
18 | * Should now support Mixtral, with updated AutoGPTQ 0.6 and llama-cpp-python 0.2.23
19 | * Updated PyTorch to 2.1.1
20 |
21 | ### Update: 11 October 2023 - Update API command line option
22 | * Container will now launch text-generation-webui with arg `--extensions openai`
23 | * Logs from text-generation-webui will now appear in the Runpod log viewer, as well as `/workspace/logs/text-generation-webui.log`
24 |
25 | ### Update: 8th October 2023 - CUDA 12.1.1, fixed ExLlamav2 issues
26 | * The instances now use CUDA 12.1.1, which fixes issues with EXL2
27 | * Note that for now the main container is still called cuda11.8.0-ubuntu22.04-oneclick
28 | * This is because I need to get in touch with Runpod to update the name of the container used in their instances
29 | * This is just a naming issue; the container does now use CUDA 12.1.1 and EXL2 is confirmed to work again.
30 |
31 | ### Update: 23rd July 2023 - Llama 2 support, including Llama 2 70B in ExLlama
32 | * Llama 2 models, including Llama 2 70B, are now fully supported
33 | * Updated to latest text-generation-webui `requirements.txt`
34 | * Removed the exllama pip package installed by text-generation-webui
35 | * Therefore the ExLlama kernel will build automatically on first use
36 | * This ensures that ExLlama is always up-to-date with any new ExLlama commits (which are pulled automatically on each boot)
37 |
38 | ### Update: 28th June 2023 - SuperHOT fixed
39 | * Updated to latest ExLlama code, fixing issue with SuperHOT GPTQs
40 | * ExLlama now automaticaly updates on boot, like text-generation-webui already did
41 | * This should result in the template automatically supporting new ExLlama features in future
42 |
43 | ### Update: 19th June 2023
44 | * Major update to the template
45 | * text-generation-webui is now integrated with:
46 | * AutoGPTQ with support for all Runpod GPU types
47 | * ExLlama, turbo-charged Llama GPTQ engine - performs 2x faster than AutoGPTQ (Llama 4bit GPTQs only)
48 | * CUDA-accelerated GGML support, with support for all Runpod systems and GPUs.
49 | * All text-generation-webui extensions are included and supported (Chat, SuperBooga, Whisper, etc).
50 | * text-generation-webui is always up-to-date with the latest code and features.
51 | * Automatic model download and loading via environment variable `MODEL`.
52 | * Pass text-generation-webui parameters via environment variable `UI_ARGS`.
53 |
54 | This template will automatically start [oobabooga's text-generation-webui](https://github.com/oobabooga/text-generation-webui) on port 7860.
55 |
56 | It can load quantised GPTQ models (3-bit, 4-bit and 8-bit), quantised GGML models (2, 3, 4, 5, 6 and 8-bit) with full GPU acceleration, as well as pytorch format models in 16-bit, 8-bit and 4-bit.
57 |
58 | It provides:
59 | * [text-generation-webui](https://github.com/oobabooga/text-generation-webui) with all extensions.
60 | * GPTQ support via [AutoGPTQ](https://github.com/PanQiWei/AutoGPTQ) - 2, 3, 4 and 8-bit, all model types.
61 | * GPTQ support via [ExLlama](https://github.com/turboderp/exllama) and ExLlamav2 - 4-bit Llama models only.
62 | * GGUF with GPU acceleration via [llama-cpp-python](https://github.com/abetlen/llama-cpp-python).
63 | * AWQ support via AutoAWQ.
64 |
65 | ## Volume support
66 |
67 | This template supports volumes mounted under `/workspace`.
68 |
69 | On boot, text-generation-webui will be moved to `/workspace/text-generation-webui`. Therefore all downloaded models, and any saved settings/characters/etc, will be persisted on your volume, including Network Volumes.
70 |
71 | With default settings, it will create a 100GB volume on `/workspace`. This storage is persistent after shutting down and then restarting the pod. But its contents will be lost if you delete the pod. Runpod support Network Volumes in some datacentres. If you create a Network Volume and use it with this template, your models will be stored on the Network Volume and will be persistent permanently, including working between different kinds of pods (within that same datacentre).
72 |
73 | ## How to access the UI
74 |
75 | Once the pod is started, click **Connect** and then **HTTP [Port 7860]**.
76 |
77 | ## How to download and use a GPTQ model, using AutoGPTQ
78 |
79 | Once you're in the UI:
80 |
81 | 1. Click the **Model tab**.
82 | 2. Under **Download custom model or LoRA**, enter an HF repo to download, for example: `TheBloke/vicuna-13b-v1.3-GPTQ`.
83 | 3. Click **Download**.
84 | 4. Wait until it says it's finished downloading.
85 | 5. Click the **Refresh** icon next to **Model** in the top left.
86 | 6. In the **Model drop-down**: choose the model you just downloaded, eg `vicuna-13b-v1.3-GPTQ`.
87 | 7. Once it says it's loaded, click the **Text Generation tab** and enter a prompt!
88 |
89 | ## How to use a model with the ExLlama turbo-charged GPTQ engine
90 |
91 | For Llama 4-bit GPTQs, you have the option of using ExLlama instead of AutoGPTQ.
92 |
93 | 1. Download the model as described above.
94 | 2. On the **Models** tab, change the **Loader** dropdown to **ExLlama**
95 | 3. Click **Reload** to load the model with ExLlama.
96 | 3. For most systems, you're done! You can now run inference as normal, and expect to see better performance.
97 | 4. If you're using a dual-GPU system, you can configure ExLlama to use both GPUs:
98 | - In the **gpu-split** text box, enter a comma-separated list of the VRAM to use on each GPU
99 | - For example, if using a system with 2 x 24GB GPUs, you could enter `23,23`
100 | - **Note**: Multiple GPUs should only be used for loading larger models than can load on one GPU. Multi-GPU inference is not faster than single GPU in cases where one GPU has enough VRAM to load the model.
101 | 5. To optionally save ExLlama as the loader for this model, click **Save Settings**.
102 |
103 | If you want to use ExLlama permanently, for all models, you can add the `--loader exllama` parameter to text-generation-webui.
104 |
105 | This can be done either by editing `/workspace/run-text-generation-webui.sh`, or by passing the `UI_ARGS` environment variable via Template Overrides. Both methods are described below.
106 |
107 | ## How to download and use a GGML model
108 |
109 | The UI doesn't currently support downloading a single GGML model via the UI. Therefore it's recommended to either use the `MODEL` environment variable described below, or else to SSH in.
110 |
111 | To download a model via SSH:
112 | * SSH to the server, then:
113 |
114 | ```
115 | cd /workspace/text-generation-webui/models
116 | wget https://huggingface.co//resolve/main/
117 | ```
118 |
119 | For example, to download `vicuna-13b-v1.3.ggmlv3.q4_K_M.bin` from `TheBloke/vicuna-13b-v1.3-GGML`:
120 | ```
121 | wget https://huggingface.co/TheBloke/vicuna-13b-v1.3-GGML/resolve/main/vicuna-13b-v1.3.ggmlv3.q4_K_M.bin
122 | ```
123 |
124 | Once the download is finished, you can access the UI and:
125 | * Click the **Models** tab;
126 | * Untick **Autoload the model**;
127 | * Click the **Refresh* icon next to **Model** in the top left;
128 | * Choose the GGML file you just downloaded;
129 | * In the **Loader** dropdown, choose **llama.cpp**;
130 | * For full GPU acceleration, set **Threads** to **1** and **n-gpu-layers** to 100;
131 | * Note that whether you can do full acceleration will depend on the GPU you've chosen, the size of the model, and the quantisation size. If using one of my models, refer to the README for the list of quant sizes and pay attention to the "Max RAM" column. For full acceleration, pick a quant size which has "Max RAM" 2-3GB lower than the total VRAM of your GPU.
132 | * If you can't do full GPU acceleration, set **Threads** to 8 to start with and **n-gpu-layers** to as many layers as the GPU has VRAM for.
133 | * You can experiment with higher **Threads** value, which depending on the system in question may result in slightly higher performance.
134 | * Click **Save settings** and then **Reload**
135 | * The model will now load.
136 | * Once it says it's loaded, click the **Text generation tab** and enter a prompt!
137 |
138 | ## Template environment variables: automatic model download and UI parameters
139 |
140 | This template supports two environment variables which you can specify via **Template Overrides**.
141 |
142 | * `MODEL`
143 | * Pass in the ID of a Hugging Face repo, or an `https://` link to a single GGML model file
144 | * Examples of valid values for `MODEL`:
145 | * `TheBloke/vicuna-13b-v1.3-GPTQ`
146 | * `https://huggingface.co/TheBloke/vicuna-13b-v1.3-GGML/resolve/main/vicuna-13b-v1.3.ggmlv3.q4_K_M.bin`
147 | * When a `MODEL` value is passed, the following will happen:
148 | * On Docker launch, the passed model will be automatically downloaded to `/workspace/text-generation-webui/models`
149 | * **Note: this may take some time and the UI will not be available until the model has finished downloading.**
150 | * Once the model is downloaded, text-generation-webui will load this model automatically
151 | * To monitor the progress of the download, you can SSH in and run:
152 | * `tail -100f /workspace/logs/fetch-model.log`
153 | * `UI_ARGS`
154 | * Pass in any text-generation-webui launch parameters you want to use
155 | * For a guide to valid parameters, please see: https://github.com/oobabooga/text-generation-webui/tree/main#basic-settings
156 | * Example value: `--n-gpu-layers 100 --threads 1` to ensure a GGML model is fully loaded onto GPU, with optimal performance parameters.
157 | * Note: no checking for valid parameters is currently done. So if invalid params are entered, it can block text-generation-webui from launching.
158 | * If the UI does not launch, SSH in and run:
159 | * `tail -100f /workspace/logs/text-generation-webui.log` to see what the UI is doing.
160 |
161 | ## How to control the UI
162 |
163 | To change in to Chat mode, click the `Interface Settings` tab then change Mode to `chat` and click `Apply and restart the interface`. To go back, change Mode back to `Default`.
164 |
165 | You can also try change other settings in the `Interface Settings` tab, including enabling extensions.
166 |
167 | ### Permanently changing UI settings, and controlling the server via SSH
168 |
169 | To have the UI always launch with certain settings, you can SSH in and edit the script `/workspace/run-text-generation-webui.sh`
170 |
171 | Add or change any command line arguments you want in that script, then save it. Your settings will persist on the volume between pod restarts, and will persist permanently if a Network Volume is being used.
172 |
173 | Once you've changed settings you can then restart the server by running:
174 | ```
175 | /root/scripts/restart-text-generation-webui.sh
176 | ```
177 |
178 | For an alternative method of specifying parameters, refer to the Environment Variables section above for details on how to use `UI_ARGS` in a Template Override.
179 |
180 | ## Server logs
181 |
182 | The logs from launching text-generation-webui are stored at `/workspace/log/text-generation-webui.log`
183 |
184 | You can read them by SSHing in and typing:
185 | ```
186 | cat /workspace/text-generation-webui.log
187 | ```
188 |
189 | Or to watch them live:
190 | ```
191 | tail -100f /workspace/text-generation-webui.log
192 | ```
193 |
194 | ## Looking for models?
195 |
196 | I have over 250 repos at HuggingFace, see them here: https://huggingface.co/TheBloke
197 |
198 | ## About this template
199 |
200 | This template uses Docker `thebloke/cuda11.8.0-ubuntu22.04-oneclick:latest`
201 |
202 | The source files for this Docker can be found at: https://github.com/TheBlokeAI/dockerLLM
203 |
204 | ## Support
205 |
206 | To get support, or to chat about AI/LLM in general, join us at my fast-growing Discord: https://discord.gg/Jq4vkcDakD
207 |
208 | ## Want to contribute?
209 |
210 | I accept donations towards my time and efforts in the open source LLM community:
211 |
212 | * Patreon: https://www.patreon.com/TheBlokeAI
213 | * Ko-Fi: https://ko-fi.com/TheBlokeAI
214 |
--------------------------------------------------------------------------------
/README_Runpod_LocalLLMsUIandAPI.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |

4 |
5 |
13 |
14 |
15 | ## TheBloke's Local LLMs: One-Click UI & API
16 |
17 | ### Update: 16 December 2023 - Rebuild to add Mixtral support
18 | * Should now support Mixtral, with updated AutoGPTQ 0.6 and llama-cpp-python 0.2.23
19 | * Updated PyTorch to 2.1.1
20 |
21 | ### Update: 11 October 2023 - Update API command line option
22 | * Container will now launch text-generation-webui with arg `--extensions openai`
23 | * Update API documentation link
24 | * Logs from text-generation-webui will now appear in the Runpod log viewer, as well as `/workspace/logs/text-generation-webui.log`
25 |
26 | ### Update: 8th October 2023 - CUDA 12.1.1, fixed ExLlamav2 issues
27 | * The instances now use CUDA 12.1.1, which fixes issues with EXL2
28 | * Note that for now the main container is still called cuda11.8.0-ubuntu22.04-oneclick
29 | * This is because I need to get in touch with Runpod to update the name of the container used in their instances
30 | * This is just a naming issue; the container does now use CUDA 12.1.1 and EXL2 is confirmed to work again.
31 |
32 | ### Update: 23rd July 2023 - Llama 2 support, including Llama 2 70B in ExLlama
33 | * Llama 2 models, including Llama 2 70B, are now fully supported
34 | * Updated to latest text-generation-webui `requirements.txt`
35 | * Removed the exllama pip package installed by text-generation-webui
36 | * Therefore the ExLlama kernel will build automatically on first use
37 | * This ensures that ExLlama is always up-to-date with any new ExLlama commits (which are pulled automatically on each boot)
38 |
39 | ### Update: 28th June 2023 - SuperHOT fixed
40 | * Updated to latest ExLlama code, fixing issue with SuperHOT GPTQs
41 | * ExLlama now automaticaly updates on boot, like text-generation-webui already did
42 | * This should result in the template automatically supporting new ExLlama features in future
43 |
44 | ### Update: 19th June 2023
45 | * Major update to the template
46 | * text-generation-webui is now integrated with:
47 | * AutoGPTQ with support for all Runpod GPU types
48 | * ExLlama, turbo-charged Llama GPTQ engine - performs 2x faster than AutoGPTQ (Llama 4bit GPTQs only)
49 | * CUDA-accelerated GGML support, with support for all Runpod systems and GPUs.
50 | * All text-generation-webui extensions are included and supported (Chat, SuperBooga, Whisper, etc).
51 | * text-generation-webui is always up-to-date with the latest code and features.
52 | * Automatic model download and loading via environment variable `MODEL`.
53 | * Pass text-generation-webui parameters via environment variable `UI_ARGS`.
54 |
55 | This template will automatically start [oobabooga's text-generation-webui](https://github.com/oobabooga/text-generation-webui) on port 7860.
56 |
57 | It can load quantised GPTQ models (3-bit, 4-bit and 8-bit), quantised GGML models (2, 3, 4, 5, 6 and 8-bit) with full GPU acceleration, as well as pytorch format models in 16-bit, 8-bit and 4-bit.
58 |
59 | It provides:
60 | * [text-generation-webui](https://github.com/oobabooga/text-generation-webui) with all extensions.
61 | * GPTQ support via [AutoGPTQ](https://github.com/PanQiWei/AutoGPTQ) - 2, 3, 4 and 8-bit, all model types.
62 | * GPTQ support via [ExLlama](https://github.com/turboderp/exllama) and ExLlamav2 - 4-bit Llama models only.
63 | * GGUF with GPU acceleration via [llama-cpp-python](https://github.com/abetlen/llama-cpp-python).
64 | * AWQ support via AutoAWQ.
65 |
66 | ## Volume support
67 |
68 | This template supports volumes mounted under `/workspace`.
69 |
70 | On boot, text-generation-webui will be moved to `/workspace/text-generation-webui`. Therefore all downloaded models, and any saved settings/characters/etc, will be persisted on your volume, including Network Volumes.
71 |
72 | With default settings, it will create a 100GB volume on `/workspace`. This storage is persistent after shutting down and then restarting the pod. But its contents will be lost if you delete the pod. Runpod support Network Volumes in some datacentres. If you create a Network Volume and use it with this template, your models will be stored on the Network Volume and will be persistent permanently, including working between different kinds of pods (within that same datacentre).
73 |
74 | ## How to access the UI
75 |
76 | Once the pod is started, click **Connect** and then **HTTP [Port 7860]**.
77 |
78 | ## How to access the API
79 |
80 | ### HTTP API - non-streaming
81 | The HTTP API on port 5000 can be accessed directly from the **Connect** tab:
82 |
83 | Right-click on **HTTP [Port 5000]** and choose to "Copy Link Address" (or similar wording).
84 |
85 | This will copy your HTTP API URL to your clipboard.
86 |
87 | ### WebSockets API - streaming
88 |
89 | A TCP port will be opened on the public IP of the pod.
90 |
91 | You can see the public IP and the external port under:
92 | * **My Pods** -> **Connect** -> **TCP port forwarding**
93 | * The connect to the **Public IP** on the **External port** that's listed against `Internal: 5005`
94 |
95 | 
96 |
97 | ## API documentation
98 |
99 | [text-generation-webui API documentation](https://github.com/oobabooga/text-generation-webui/blob/main/docs/12%20-%20OpenAI%20API.md)
100 |
101 | ## How to download and use a GPTQ model, using AutoGPTQ
102 |
103 | Once you're in the UI:
104 |
105 | 1. Click the **Model tab**.
106 | 2. Under **Download custom model or LoRA**, enter an HF repo to download, for example: `TheBloke/vicuna-13b-v1.3-GPTQ`.
107 | 3. Click **Download**.
108 | 4. Wait until it says it's finished downloading.
109 | 5. Click the **Refresh** icon next to **Model** in the top left.
110 | 6. In the **Model drop-down**: choose the model you just downloaded, eg `vicuna-13b-v1.3-GPTQ`.
111 | 7. Once it says it's loaded, click the **Text Generation tab** and enter a prompt!
112 |
113 | ## How to use a model with the ExLlama turbo-charged GPTQ engine
114 |
115 | For Llama 4-bit GPTQs, you have the option of using ExLlama instead of AutoGPTQ.
116 |
117 | 1. Download the model as described above.
118 | 2. On the **Models** tab, change the **Loader** dropdown to **ExLlama**
119 | 3. Click **Reload** to load the model with ExLlama.
120 | 3. For most systems, you're done! You can now run inference as normal, and expect to see better performance.
121 | 4. If you're using a dual-GPU system, you can configure ExLlama to use both GPUs:
122 | - In the **gpu-split** text box, enter a comma-separated list of the VRAM to use on each GPU
123 | - For example, if using a system with 2 x 24GB GPUs, you could enter `23,23`
124 | - **Note**: Multiple GPUs should only be used for loading larger models than can load on one GPU. Multi-GPU inference is not faster than single GPU in cases where one GPU has enough VRAM to load the model.
125 | 5. To optionally save ExLlama as the loader for this model, click **Save Settings**.
126 |
127 | If you want to use ExLlama permanently, for all models, you can add the `--loader exllama` parameter to text-generation-webui.
128 |
129 | This can be done either by editing `/workspace/run-text-generation-webui.sh`, or by passing the `UI_ARGS` environment variable via Template Overrides. Both methods are described below.
130 |
131 | ## How to download and use a GGML model
132 |
133 | The UI doesn't currently support downloading a single GGML model via the UI. Therefore it's recommended to either use the `MODEL` environment variable described below, or else to SSH in.
134 |
135 | To download a model via SSH:
136 | * SSH to the server, then:
137 |
138 | ```
139 | cd /workspace/text-generation-webui/models
140 | wget https://huggingface.co//resolve/main/
141 | ```
142 |
143 | For example, to download `vicuna-13b-v1.3.ggmlv3.q4_K_M.bin` from `TheBloke/vicuna-13b-v1.3-GGML`:
144 | ```
145 | wget https://huggingface.co/TheBloke/vicuna-13b-v1.3-GGML/resolve/main/vicuna-13b-v1.3.ggmlv3.q4_K_M.bin
146 | ```
147 |
148 | Once the download is finished, you can access the UI and:
149 | * Click the **Models** tab;
150 | * Untick **Autoload the model**;
151 | * Click the **Refresh* icon next to **Model** in the top left;
152 | * Choose the GGML file you just downloaded;
153 | * In the **Loader** dropdown, choose **llama.cpp**;
154 | * For full GPU acceleration, set **Threads** to **1** and **n-gpu-layers** to 100;
155 | * Note that whether you can do full acceleration will depend on the GPU you've chosen, the size of the model, and the quantisation size. If using one of my models, refer to the README for the list of quant sizes and pay attention to the "Max RAM" column. For full acceleration, pick a quant size which has "Max RAM" 2-3GB lower than the total VRAM of your GPU.
156 | * If you can't do full GPU acceleration, set **Threads** to 8 to start with and **n-gpu-layers** to as many layers as the GPU has VRAM for.
157 | * You can experiment with higher **Threads** value, which depending on the system in question may result in slightly higher performance.
158 | * Click **Save settings** and then **Reload**
159 | * The model will now load.
160 | * Once it says it's loaded, click the **Text generation tab** and enter a prompt!
161 |
162 | ## Template environment variables: automatic model download and UI parameters
163 |
164 | This template supports two environment variables which you can specify via **Template Overrides**.
165 |
166 | * `MODEL`
167 | * Pass in the ID of a Hugging Face repo, or an `https://` link to a single GGML model file
168 | * Examples of valid values for `MODEL`:
169 | * `TheBloke/vicuna-13b-v1.3-GPTQ`
170 | * `https://huggingface.co/TheBloke/vicuna-13b-v1.3-GGML/resolve/main/vicuna-13b-v1.3.ggmlv3.q4_K_M.bin`
171 | * When a `MODEL` value is passed, the following will happen:
172 | * On Docker launch, the passed model will be automatically downloaded to `/workspace/text-generation-webui/models`
173 | * **Note: this may take some time and the UI will not be available until the model has finished downloading.**
174 | * Once the model is downloaded, text-generation-webui will load this model automatically
175 | * To monitor the progress of the download, you can SSH in and run:
176 | * `tail -100f /workspace/logs/fetch-model.log`
177 | * `UI_ARGS`
178 | * Pass in any text-generation-webui launch parameters you want to use
179 | * For a guide to valid parameters, please see: https://github.com/oobabooga/text-generation-webui/tree/main#basic-settings
180 | * Example value: `--n-gpu-layers 100 --threads 1` to ensure a GGML model is fully loaded onto GPU, with optimal performance parameters.
181 | * Note: no checking for valid parameters is currently done. So if invalid params are entered, it can block text-generation-webui from launching.
182 | * If the UI does not launch, SSH in and run:
183 | * `tail -100f /workspace/logs/text-generation-webui.log` to see what the UI is doing.
184 |
185 | ## How to control the UI
186 |
187 | To change in to Chat mode, click the `Interface Settings` tab then change Mode to `chat` and click `Apply and restart the interface`. To go back, change Mode back to `Default`.
188 |
189 | You can also try change other settings in the `Interface Settings` tab, including enabling extensions.
190 |
191 | ### Permanently changing UI settings, and controlling the server via SSH
192 |
193 | To have the UI always launch with certain settings, you can SSH in and edit the script `/workspace/run-text-generation-webui.sh`
194 |
195 | Add or change any command line arguments you want in that script, then save it. Your settings will persist on the volume between pod restarts, and will persist permanently if a Network Volume is being used.
196 |
197 | Once you've changed settings you can then restart the server by running:
198 | ```
199 | /root/scripts/restart-text-generation-webui.sh
200 | ```
201 |
202 | For an alternative method of specifying parameters, refer to the Environment Variables section above for details on how to use `UI_ARGS` in a Template Override.
203 |
204 | ## Server logs
205 |
206 | The logs from launching text-generation-webui are stored at `/workspace/log/text-generation-webui.log`
207 |
208 | You can read them by SSHing in and typing:
209 | ```
210 | cat /workspace/text-generation-webui.log
211 | ```
212 |
213 | Or to watch them live:
214 | ```
215 | tail -100f /workspace/text-generation-webui.log
216 | ```
217 |
218 | ## Looking for models?
219 |
220 | I have over 250 repos at HuggingFace, see them here: https://huggingface.co/TheBloke
221 |
222 | ## About this template
223 |
224 | This template uses Docker `thebloke/cuda11.8.0-ubuntu22.04-oneclick:latest`
225 |
226 | The source files for this Docker can be found at: https://github.com/TheBlokeAI/dockerLLM
227 |
228 | ## Support
229 |
230 | To get support, or to chat about AI/LLM in general, join us at my fast-growing Discord: https://discord.gg/Jq4vkcDakD
231 |
232 | ## Want to contribute?
233 |
234 | I accept donations towards my time and efforts in the open source LLM community:
235 |
236 | * Patreon: https://www.patreon.com/TheBlokeAI
237 | * Ko-Fi: https://ko-fi.com/TheBlokeAI
238 |
--------------------------------------------------------------------------------
/build_docker.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import datetime
3 | import os
4 | import subprocess
5 | import logging
6 | import argparse
7 |
8 | today_tag = datetime.datetime.now().strftime("%d%m%Y")
9 |
10 | # Creating argparse parser
11 | parser = argparse.ArgumentParser(description="Build Dockerfile")
12 | parser.add_argument('docker', type=str, help='Name of the Dockerfile to build - should match a folder name in this repo')
13 | parser.add_argument('--username', type=str, default="thebloke", help=f"Tag to use. Defaults to today's date: thebloke")
14 | parser.add_argument('--tag', type=str, default=today_tag, help=f"Tag to use. Defaults to today's date: {today_tag}")
15 | parser.add_argument('--latest', action="store_true", help='If specified, we will also tag and push :latest')
16 | args = parser.parse_args()
17 |
18 | logger = logging.getLogger()
19 | logging.basicConfig(
20 | format="%(asctime)s %(levelname)s [%(name)s] %(message)s", level=logging.INFO, datefmt="%Y-%m-%d %H:%M:%S"
21 | )
22 |
23 | dockerLLM_dir = os.path.dirname(os.path.realpath(__file__))
24 | username = args.username
25 |
26 | def docker_command(command):
27 | try:
28 | logger.info(f"Running docker command: {command}")
29 | subprocess.check_call(command, shell=True)
30 | except subprocess.CalledProcessError as e:
31 | logger.error(f"Got error while executing docker command: {e}")
32 | raise
33 | except Exception as e:
34 | raise e
35 |
36 | def build(docker_repo, tag, from_docker=None):
37 | docker_container = f"{username}/{docker_repo}:{tag}"
38 | logger.info(f"Building and pushing {docker_container}")
39 |
40 | docker_build_arg = f"--progress=plain -t {docker_container}"
41 | if from_docker is not None:
42 | docker_build_arg += f" --build-arg DOCKER_FROM={from_docker}"
43 |
44 | build_command = f"docker build {docker_build_arg} {dockerLLM_dir}/{docker_repo}"
45 | push_command = f"docker push {docker_container}"
46 |
47 | docker_command(build_command)
48 | docker_command(push_command)
49 |
50 | return docker_container
51 |
52 | def tag(source_container, target_container):
53 | tag_command = f"docker tag {source_container} {target_container}"
54 | docker_command(tag_command)
55 | docker_command(f"docker push {target_container}")
56 |
57 |
58 | try:
59 | container = build(args.docker, args.tag)
60 | logger.info(f"Successfully built and pushed the container to {container}")
61 |
62 | if args.latest:
63 | latest = f"{username}/{args.docker}:latest"
64 | tag(container, latest)
65 | logger.info(f"Successfully tagged and pushed to {latest}")
66 |
67 | except subprocess.CalledProcessError as e:
68 | logger.error(f"Process aborted due to error running Docker commands")
69 | except Exception as e:
70 | raise e
71 |
72 |
--------------------------------------------------------------------------------
/build_oneclick.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import datetime
3 | import os
4 | import subprocess
5 | import logging
6 |
7 | logger = logging.getLogger()
8 | logging.basicConfig(
9 | format="%(asctime)s %(levelname)s [%(name)s] %(message)s", level=logging.INFO, datefmt="%Y-%m-%d %H:%M:%S"
10 | )
11 |
12 | dockerLLM_dir = os.path.dirname(os.path.realpath(__file__))
13 | username = "thebloke"
14 |
15 | def build(docker_repo, tag, from_docker=None):
16 | docker_container = f"{username}/{docker_repo}:{tag}"
17 | logger.info(f"Building and pushing {docker_container}")
18 |
19 | docker_build_arg = f"--no-cache --progress=plain -t {docker_container}"
20 | if from_docker is not None:
21 | docker_build_arg += f" --build-arg DOCKER_FROM={from_docker}"
22 |
23 | build_command = f"docker build {docker_build_arg} {dockerLLM_dir}/{docker_repo}"
24 | push_command = f"docker push {docker_container}"
25 |
26 | try:
27 | logger.info(f"Building {docker_repo} using command: {build_command}")
28 | subprocess.check_call(build_command, shell=True)
29 |
30 | logger.info(f"Pushing {docker_repo} using command: {push_command}")
31 | subprocess.check_call(push_command, shell=True)
32 |
33 | return docker_container
34 | except subprocess.CalledProcessError as e:
35 | logger.error(f"Got error while executing docker command: {e}")
36 | raise
37 | except Exception as e:
38 | raise e
39 |
40 | today_tag = datetime.datetime.now().strftime("%d%m%Y")
41 |
42 | try:
43 | pytorch_container = build("cuda12.1.1-ubuntu22.04-pytorch", "1")
44 | textgen_container = build("cuda12.1.1-ubuntu22.04-textgen", today_tag, pytorch_container)
45 | oneclick_container = build("cuda11.8.0-ubuntu22.04-oneclick", today_tag, textgen_container)
46 |
47 | logger.info(f"Successfully built and pushed {oneclick_container}")
48 | except subprocess.CalledProcessError as e:
49 | logger.error(f"Process aborted due to error running Docker commands")
50 | except Exception as e:
51 | raise e
52 |
--------------------------------------------------------------------------------
/conf-files/bashrc:
--------------------------------------------------------------------------------
1 | # ~/.bashrc: executed by bash(1) for non-login shells.
2 | # see /usr/share/doc/bash/examples/startup-files (in the package bash-doc)
3 | # for examples
4 |
5 | # If not running interactively, don't do anything
6 | [ -z "$PS1" ] && return
7 |
8 | # don't put duplicate lines in the history. See bash(1) for more options
9 | # ... or force ignoredups and ignorespace
10 | HISTCONTROL=ignoredups:ignorespace
11 |
12 | # append to the history file, don't overwrite it
13 | shopt -s histappend
14 | shopt -s no_empty_cmd_completion
15 |
16 | # for setting history length see HISTSIZE and HISTFILESIZE in bash(1)
17 | PROMPT_COMMAND='history -a ; echo -ne "\033]0;${LOGNAME}@${HOSTNAME} : ${PWD}\007"'
18 | unset HISTFILESIZE
19 | HISTSIZE=20000
20 | export HISTTIMEFORMAT='%d/%m/%y-%H:%M:%S '
21 |
22 | EDITOR=vim
23 | alias vi=vim
24 | alias more=less
25 |
26 |
27 | if [[ -f /root/.bash_history && ! -h /root/.bash_history ]]
28 | then
29 | mv /root/.bash_history /tmp/bash_history.mv
30 | cat /workspace/.bash_history /tmp/bash_history.mv > /tmp/bash.cat
31 | mv /tmp/bash.cat /workspace/.bash_history
32 | fi
33 |
34 | if [[ ! -h /root/.bash_history ]]
35 | then
36 | ln -s /workspace/.bash_history /root/.bash_history
37 | fi
38 |
39 | # check the window size after each command and, if necessary,
40 | # update the values of LINES and COLUMNS.
41 | shopt -s checkwinsize
42 |
43 | # make less more friendly for non-text input files, see lesspipe(1)
44 | [ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)"
45 |
46 | # set variable identifying the chroot you work in (used in the prompt below)
47 | if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then
48 | debian_chroot=$(cat /etc/debian_chroot)
49 | fi
50 |
51 | # set a fancy prompt (non-color, unless we know we "want" color)
52 | case "$TERM" in
53 | xterm-color) color_prompt=yes;;
54 | esac
55 |
56 | # uncomment for a colored prompt, if the terminal has the capability; turned
57 | # off by default to not distract the user: the focus in a terminal window
58 | # should be on the output of commands, not on the prompt
59 | #force_color_prompt=yes
60 |
61 | if [ -n "$force_color_prompt" ]; then
62 | if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then
63 | # We have color support; assume it's compliant with Ecma-48
64 | # (ISO/IEC-6429). (Lack of such support is extremely rare, and such
65 | # a case would tend to support setf rather than setaf.)
66 | color_prompt=yes
67 | else
68 | color_prompt=
69 | fi
70 | fi
71 |
72 | if [ "$color_prompt" = yes ]; then
73 | PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '
74 | else
75 | PS1='${debian_chroot:+($debian_chroot)}\u@\h:\w\$ '
76 | fi
77 | unset color_prompt force_color_prompt
78 |
79 | # If this is an xterm set the title to user@host:dir
80 | case "$TERM" in
81 | xterm*|rxvt*)
82 | PS1="\[\e]0;${debian_chroot:+($debian_chroot)}\u@\h: \w\a\]$PS1"
83 | ;;
84 | *)
85 | ;;
86 | esac
87 |
88 | # enable color support of ls and also add handy aliases
89 | LS_COLORS="*~=0;38;2;102;102;102:bd=0;38;2;154;237;254;48;2;51;51;51:ca=0:cd=0;38;2;255;106;193;48;2;51;51;51:di=0;38;2;87;199;255:do=0;38;2;0;0;0;48;2;255;106;193:ex=1;38;2;255;92;87:fi=0:ln=0;38;2;255;106;193:mh=0:mi=0;38;2;0;0;0;48;2;255;92;87:no=0:or=0;38;2;0;0;0;48;2;255;92;87:ow=0:pi=0;38;2;0;0;0;48;2;87;199;255:rs=0:sg=0:so=0;38;2;0;0;0;48;2;255;106;193:st=0:su=0:tw=0:*.a=1;38;2;255;92;87:*.c=0;38;2;90;247;142:*.d=0;38;2;90;247;142:*.h=0;38;2;90;247;142:*.m=0;38;2;90;247;142:*.o=0;38;2;102;102;102:*.p=0;38;2;90;247;142:*.r=0;38;2;90;247;142:*.t=0;38;2;90;247;142:*.z=4;38;2;154;237;254:*.7z=4;38;2;154;237;254:*.as=0;38;2;90;247;142:*.bc=0;38;2;102;102;102:*.bz=4;38;2;154;237;254:*.cc=0;38;2;90;247;142:*.cp=0;38;2;90;247;142:*.cr=0;38;2;90;247;142:*.cs=0;38;2;90;247;142:*.di=0;38;2;90;247;142:*.el=0;38;2;90;247;142:*.ex=0;38;2;90;247;142:*.fs=0;38;2;90;247;142:*.go=0;38;2;90;247;142:*.gv=0;38;2;90;247;142:*.gz=4;38;2;154;237;254:*.hh=0;38;2;90;247;142:*.hi=0;38;2;102;102;102:*.hs=0;38;2;90;247;142:*.jl=0;38;2;90;247;142:*.js=0;38;2;90;247;142:*.ko=1;38;2;255;92;87:*.kt=0;38;2;90;247;142:*.la=0;38;2;102;102;102:*.ll=0;38;2;90;247;142:*.lo=0;38;2;102;102;102:*.md=0;38;2;243;249;157:*.ml=0;38;2;90;247;142:*.mn=0;38;2;90;247;142:*.nb=0;38;2;90;247;142:*.pl=0;38;2;90;247;142:*.pm=0;38;2;90;247;142:*.pp=0;38;2;90;247;142:*.ps=0;38;2;255;92;87:*.py=0;38;2;90;247;142:*.rb=0;38;2;90;247;142:*.rm=0;38;2;255;180;223:*.rs=0;38;2;90;247;142:*.sh=0;38;2;90;247;142:*.so=1;38;2;255;92;87:*.td=0;38;2;90;247;142:*.ts=0;38;2;90;247;142:*.ui=0;38;2;243;249;157:*.vb=0;38;2;90;247;142:*.wv=0;38;2;255;180;223:*.xz=4;38;2;154;237;254:*.aif=0;38;2;255;180;223:*.ape=0;38;2;255;180;223:*.apk=4;38;2;154;237;254:*.arj=4;38;2;154;237;254:*.asa=0;38;2;90;247;142:*.aux=0;38;2;102;102;102:*.avi=0;38;2;255;180;223:*.awk=0;38;2;90;247;142:*.bag=4;38;2;154;237;254:*.bak=0;38;2;102;102;102:*.bat=1;38;2;255;92;87:*.bbl=0;38;2;102;102;102:*.bcf=0;38;2;102;102;102:*.bib=0;38;2;243;249;157:*.bin=4;38;2;154;237;254:*.blg=0;38;2;102;102;102:*.bmp=0;38;2;255;180;223:*.bsh=0;38;2;90;247;142:*.bst=0;38;2;243;249;157:*.bz2=4;38;2;154;237;254:*.c++=0;38;2;90;247;142:*.cfg=0;38;2;243;249;157:*.cgi=0;38;2;90;247;142:*.clj=0;38;2;90;247;142:*.com=1;38;2;255;92;87:*.cpp=0;38;2;90;247;142:*.css=0;38;2;90;247;142:*.csv=0;38;2;243;249;157:*.csx=0;38;2;90;247;142:*.cxx=0;38;2;90;247;142:*.deb=4;38;2;154;237;254:*.def=0;38;2;90;247;142:*.dll=1;38;2;255;92;87:*.dmg=4;38;2;154;237;254:*.doc=0;38;2;255;92;87:*.dot=0;38;2;90;247;142:*.dox=0;38;2;165;255;195:*.dpr=0;38;2;90;247;142:*.elc=0;38;2;90;247;142:*.elm=0;38;2;90;247;142:*.epp=0;38;2;90;247;142:*.eps=0;38;2;255;180;223:*.erl=0;38;2;90;247;142:*.exe=1;38;2;255;92;87:*.exs=0;38;2;90;247;142:*.fls=0;38;2;102;102;102:*.flv=0;38;2;255;180;223:*.fnt=0;38;2;255;180;223:*.fon=0;38;2;255;180;223:*.fsi=0;38;2;90;247;142:*.fsx=0;38;2;90;247;142:*.gif=0;38;2;255;180;223:*.git=0;38;2;102;102;102:*.gvy=0;38;2;90;247;142:*.h++=0;38;2;90;247;142:*.hpp=0;38;2;90;247;142:*.htc=0;38;2;90;247;142:*.htm=0;38;2;243;249;157:*.hxx=0;38;2;90;247;142:*.ico=0;38;2;255;180;223:*.ics=0;38;2;255;92;87:*.idx=0;38;2;102;102;102:*.ilg=0;38;2;102;102;102:*.img=4;38;2;154;237;254:*.inc=0;38;2;90;247;142:*.ind=0;38;2;102;102;102:*.ini=0;38;2;243;249;157:*.inl=0;38;2;90;247;142:*.ipp=0;38;2;90;247;142:*.iso=4;38;2;154;237;254:*.jar=4;38;2;154;237;254:*.jpg=0;38;2;255;180;223:*.kex=0;38;2;255;92;87:*.kts=0;38;2;90;247;142:*.log=0;38;2;102;102;102:*.ltx=0;38;2;90;247;142:*.lua=0;38;2;90;247;142:*.m3u=0;38;2;255;180;223:*.m4a=0;38;2;255;180;223:*.m4v=0;38;2;255;180;223:*.mid=0;38;2;255;180;223:*.mir=0;38;2;90;247;142:*.mkv=0;38;2;255;180;223:*.mli=0;38;2;90;247;142:*.mov=0;38;2;255;180;223:*.mp3=0;38;2;255;180;223:*.mp4=0;38;2;255;180;223:*.mpg=0;38;2;255;180;223:*.nix=0;38;2;243;249;157:*.odp=0;38;2;255;92;87:*.ods=0;38;2;255;92;87:*.odt=0;38;2;255;92;87:*.ogg=0;38;2;255;180;223:*.org=0;38;2;243;249;157:*.otf=0;38;2;255;180;223:*.out=0;38;2;102;102;102:*.pas=0;38;2;90;247;142:*.pbm=0;38;2;255;180;223:*.pdf=0;38;2;255;92;87:*.pgm=0;38;2;255;180;223:*.php=0;38;2;90;247;142:*.pid=0;38;2;102;102;102:*.pkg=4;38;2;154;237;254:*.png=0;38;2;255;180;223:*.pod=0;38;2;90;247;142:*.ppm=0;38;2;255;180;223:*.pps=0;38;2;255;92;87:*.ppt=0;38;2;255;92;87:*.pro=0;38;2;165;255;195:*.ps1=0;38;2;90;247;142:*.psd=0;38;2;255;180;223:*.pyc=0;38;2;102;102;102:*.pyd=0;38;2;102;102;102:*.pyo=0;38;2;102;102;102:*.rar=4;38;2;154;237;254:*.rpm=4;38;2;154;237;254:*.rst=0;38;2;243;249;157:*.rtf=0;38;2;255;92;87:*.sbt=0;38;2;90;247;142:*.sql=0;38;2;90;247;142:*.sty=0;38;2;102;102;102:*.svg=0;38;2;255;180;223:*.swf=0;38;2;255;180;223:*.swp=0;38;2;102;102;102:*.sxi=0;38;2;255;92;87:*.sxw=0;38;2;255;92;87:*.tar=4;38;2;154;237;254:*.tbz=4;38;2;154;237;254:*.tcl=0;38;2;90;247;142:*.tex=0;38;2;90;247;142:*.tgz=4;38;2;154;237;254:*.tif=0;38;2;255;180;223:*.tml=0;38;2;243;249;157:*.tmp=0;38;2;102;102;102:*.toc=0;38;2;102;102;102:*.tsx=0;38;2;90;247;142:*.ttf=0;38;2;255;180;223:*.txt=0;38;2;243;249;157:*.vcd=4;38;2;154;237;254:*.vim=0;38;2;90;247;142:*.vob=0;38;2;255;180;223:*.wav=0;38;2;255;180;223:*.wma=0;38;2;255;180;223:*.wmv=0;38;2;255;180;223:*.xcf=0;38;2;255;180;223:*.xlr=0;38;2;255;92;87:*.xls=0;38;2;255;92;87:*.xml=0;38;2;243;249;157:*.xmp=0;38;2;243;249;157:*.yml=0;38;2;243;249;157:*.zip=4;38;2;154;237;254:*.zsh=0;38;2;90;247;142:*.zst=4;38;2;154;237;254:*TODO=1:*hgrc=0;38;2;165;255;195:*.bash=0;38;2;90;247;142:*.conf=0;38;2;243;249;157:*.dart=0;38;2;90;247;142:*.diff=0;38;2;90;247;142:*.docx=0;38;2;255;92;87:*.epub=0;38;2;255;92;87:*.fish=0;38;2;90;247;142:*.flac=0;38;2;255;180;223:*.h264=0;38;2;255;180;223:*.hgrc=0;38;2;165;255;195:*.html=0;38;2;243;249;157:*.java=0;38;2;90;247;142:*.jpeg=0;38;2;255;180;223:*.json=0;38;2;243;249;157:*.less=0;38;2;90;247;142:*.lisp=0;38;2;90;247;142:*.lock=0;38;2;102;102;102:*.make=0;38;2;165;255;195:*.mpeg=0;38;2;255;180;223:*.opus=0;38;2;255;180;223:*.orig=0;38;2;102;102;102:*.pptx=0;38;2;255;92;87:*.psd1=0;38;2;90;247;142:*.psm1=0;38;2;90;247;142:*.purs=0;38;2;90;247;142:*.rlib=0;38;2;102;102;102:*.sass=0;38;2;90;247;142:*.scss=0;38;2;90;247;142:*.tbz2=4;38;2;154;237;254:*.tiff=0;38;2;255;180;223:*.toml=0;38;2;243;249;157:*.webm=0;38;2;255;180;223:*.webp=0;38;2;255;180;223:*.woff=0;38;2;255;180;223:*.xbps=4;38;2;154;237;254:*.xlsx=0;38;2;255;92;87:*.yaml=0;38;2;243;249;157:*.cabal=0;38;2;90;247;142:*.cache=0;38;2;102;102;102:*.class=0;38;2;102;102;102:*.cmake=0;38;2;165;255;195:*.dyn_o=0;38;2;102;102;102:*.ipynb=0;38;2;90;247;142:*.mdown=0;38;2;243;249;157:*.patch=0;38;2;90;247;142:*.scala=0;38;2;90;247;142:*.shtml=0;38;2;243;249;157:*.swift=0;38;2;90;247;142:*.toast=4;38;2;154;237;254:*.xhtml=0;38;2;243;249;157:*README=0;38;2;40;42;54;48;2;243;249;157:*passwd=0;38;2;243;249;157:*shadow=0;38;2;243;249;157:*.config=0;38;2;243;249;157:*.dyn_hi=0;38;2;102;102;102:*.flake8=0;38;2;165;255;195:*.gradle=0;38;2;90;247;142:*.groovy=0;38;2;90;247;142:*.ignore=0;38;2;165;255;195:*.matlab=0;38;2;90;247;142:*COPYING=0;38;2;153;153;153:*INSTALL=0;38;2;40;42;54;48;2;243;249;157:*LICENSE=0;38;2;153;153;153:*TODO.md=1:*.desktop=0;38;2;243;249;157:*.gemspec=0;38;2;165;255;195:*Doxyfile=0;38;2;165;255;195:*Makefile=0;38;2;165;255;195:*TODO.txt=1:*setup.py=0;38;2;165;255;195:*.DS_Store=0;38;2;102;102;102:*.cmake.in=0;38;2;165;255;195:*.fdignore=0;38;2;165;255;195:*.kdevelop=0;38;2;165;255;195:*.markdown=0;38;2;243;249;157:*.rgignore=0;38;2;165;255;195:*COPYRIGHT=0;38;2;153;153;153:*README.md=0;38;2;40;42;54;48;2;243;249;157:*configure=0;38;2;165;255;195:*.gitconfig=0;38;2;165;255;195:*.gitignore=0;38;2;165;255;195:*.localized=0;38;2;102;102;102:*.scons_opt=0;38;2;102;102;102:*CODEOWNERS=0;38;2;165;255;195:*Dockerfile=0;38;2;243;249;157:*INSTALL.md=0;38;2;40;42;54;48;2;243;249;157:*README.txt=0;38;2;40;42;54;48;2;243;249;157:*SConscript=0;38;2;165;255;195:*SConstruct=0;38;2;165;255;195:*.gitmodules=0;38;2;165;255;195:*.synctex.gz=0;38;2;102;102;102:*.travis.yml=0;38;2;90;247;142:*INSTALL.txt=0;38;2;40;42;54;48;2;243;249;157:*LICENSE-MIT=0;38;2;153;153;153:*MANIFEST.in=0;38;2;165;255;195:*Makefile.am=0;38;2;165;255;195:*Makefile.in=0;38;2;102;102;102:*.applescript=0;38;2;90;247;142:*.fdb_latexmk=0;38;2;102;102;102:*CONTRIBUTORS=0;38;2;40;42;54;48;2;243;249;157:*appveyor.yml=0;38;2;90;247;142:*configure.ac=0;38;2;165;255;195:*.clang-format=0;38;2;165;255;195:*.gitattributes=0;38;2;165;255;195:*.gitlab-ci.yml=0;38;2;90;247;142:*CMakeCache.txt=0;38;2;102;102;102:*CMakeLists.txt=0;38;2;165;255;195:*LICENSE-APACHE=0;38;2;153;153;153:*CONTRIBUTORS.md=0;38;2;40;42;54;48;2;243;249;157:*.sconsign.dblite=0;38;2;102;102;102:*CONTRIBUTORS.txt=0;38;2;40;42;54;48;2;243;249;157:*requirements.txt=0;38;2;165;255;195:*package-lock.json=0;38;2;102;102;102:*.CFUserTextEncoding=0;38;2;102;102;102"
90 | export LS_COLORS
91 | alias ls='ls --color=auto'
92 | #alias dir='dir --color=auto'
93 | #alias vdir='vdir --color=auto'
94 |
95 | alias grep='grep --color=auto'
96 | alias fgrep='fgrep --color=auto'
97 | alias egrep='egrep --color=auto'
98 |
99 | # some more ls aliases
100 | alias ll='ls -alF'
101 | alias la='ls -A'
102 | alias l='ls -CF'
103 |
104 | # Alias definitions.
105 | # You may want to put all your additions into a separate file like
106 | # ~/.bash_aliases, instead of adding them here directly.
107 | # See /usr/share/doc/bash-doc/examples in the bash-doc package.
108 |
109 | if [ -f ~/.bash_aliases ]; then
110 | . ~/.bash_aliases
111 | fi
112 |
113 | alias python=python3
114 | alias pip=pip3
115 |
116 | export PATH=/usr/local/nvidia/bin:/usr/local/cuda/bin:"$PATH"
117 |
118 |
119 | if [[ -d /workspace/venv/pytorch2 ]]
120 | then
121 | source /workspace/venv/pytorch2/bin/activate
122 | fi
123 |
124 | source ~/.iterm2_shell_integration.bash
125 |
126 | # enable programmable completion features (you don't need to enable
127 | # this, if it's already enabled in /etc/bash.bashrc and /etc/profile
128 | # sources /etc/bash.bashrc).
129 | #if [ -f /etc/bash_completion ] && ! shopt -oq posix; then
130 | # . /etc/bash_completion
131 | #fi
132 |
--------------------------------------------------------------------------------
/conf-files/iterm2_shell_integration.zsh:
--------------------------------------------------------------------------------
1 | # This program is free software; you can redistribute it and/or
2 | # modify it under the terms of the GNU General Public License
3 | # as published by the Free Software Foundation; either version 2
4 | # of the License, or (at your option) any later version.
5 | #
6 | # This program is distributed in the hope that it will be useful,
7 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 | # GNU General Public License for more details.
10 | #
11 | # You should have received a copy of the GNU General Public License
12 | # along with this program; if not, write to the Free Software
13 | # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 |
15 | if [[ -o interactive ]]; then
16 | if [ "${ITERM_ENABLE_SHELL_INTEGRATION_WITH_TMUX-}""$TERM" != "tmux-256color" -a "${ITERM_ENABLE_SHELL_INTEGRATION_WITH_TMUX-}""$TERM" != "screen" -a "${ITERM_SHELL_INTEGRATION_INSTALLED-}" = "" -a "$TERM" != linux -a "$TERM" != dumb ]; then
17 | ITERM_SHELL_INTEGRATION_INSTALLED=Yes
18 | ITERM2_SHOULD_DECORATE_PROMPT="1"
19 | # Indicates start of command output. Runs just before command executes.
20 | iterm2_before_cmd_executes() {
21 | if [ "$TERM_PROGRAM" = "iTerm.app" ]; then
22 | printf "\033]133;C;\r\007"
23 | else
24 | printf "\033]133;C;\007"
25 | fi
26 | }
27 |
28 | iterm2_set_user_var() {
29 | printf "\033]1337;SetUserVar=%s=%s\007" "$1" $(printf "%s" "$2" | base64 | tr -d '\n')
30 | }
31 |
32 | # Users can write their own version of this method. It should call
33 | # iterm2_set_user_var but not produce any other output.
34 | # e.g., iterm2_set_user_var currentDirectory $PWD
35 | # Accessible in iTerm2 (in a badge now, elsewhere in the future) as
36 | # \(user.currentDirectory).
37 | whence -v iterm2_print_user_vars > /dev/null 2>&1
38 | if [ $? -ne 0 ]; then
39 | iterm2_print_user_vars() {
40 | true
41 | }
42 | fi
43 |
44 | iterm2_print_state_data() {
45 | local _iterm2_hostname="${iterm2_hostname-}"
46 | if [ -z "${iterm2_hostname:-}" ]; then
47 | _iterm2_hostname=$(hostname -f 2>/dev/null)
48 | fi
49 | printf "\033]1337;RemoteHost=%s@%s\007" "$USER" "${_iterm2_hostname-}"
50 | printf "\033]1337;CurrentDir=%s\007" "$PWD"
51 | iterm2_print_user_vars
52 | }
53 |
54 | # Report return code of command; runs after command finishes but before prompt
55 | iterm2_after_cmd_executes() {
56 | printf "\033]133;D;%s\007" "$STATUS"
57 | iterm2_print_state_data
58 | }
59 |
60 | # Mark start of prompt
61 | iterm2_prompt_mark() {
62 | printf "\033]133;A\007"
63 | }
64 |
65 | # Mark end of prompt
66 | iterm2_prompt_end() {
67 | printf "\033]133;B\007"
68 | }
69 |
70 | # There are three possible paths in life.
71 | #
72 | # 1) A command is entered at the prompt and you press return.
73 | # The following steps happen:
74 | # * iterm2_preexec is invoked
75 | # * PS1 is set to ITERM2_PRECMD_PS1
76 | # * ITERM2_SHOULD_DECORATE_PROMPT is set to 1
77 | # * The command executes (possibly reading or modifying PS1)
78 | # * iterm2_precmd is invoked
79 | # * ITERM2_PRECMD_PS1 is set to PS1 (as modified by command execution)
80 | # * PS1 gets our escape sequences added to it
81 | # * zsh displays your prompt
82 | # * You start entering a command
83 | #
84 | # 2) You press ^C while entering a command at the prompt.
85 | # The following steps happen:
86 | # * (iterm2_preexec is NOT invoked)
87 | # * iterm2_precmd is invoked
88 | # * iterm2_before_cmd_executes is called since we detected that iterm2_preexec was not run
89 | # * (ITERM2_PRECMD_PS1 and PS1 are not messed with, since PS1 already has our escape
90 | # sequences and ITERM2_PRECMD_PS1 already has PS1's original value)
91 | # * zsh displays your prompt
92 | # * You start entering a command
93 | #
94 | # 3) A new shell is born.
95 | # * PS1 has some initial value, either zsh's default or a value set before this script is sourced.
96 | # * iterm2_precmd is invoked
97 | # * ITERM2_SHOULD_DECORATE_PROMPT is initialized to 1
98 | # * ITERM2_PRECMD_PS1 is set to the initial value of PS1
99 | # * PS1 gets our escape sequences added to it
100 | # * Your prompt is shown and you may begin entering a command.
101 | #
102 | # Invariants:
103 | # * ITERM2_SHOULD_DECORATE_PROMPT is 1 during and just after command execution, and "" while the prompt is
104 | # shown and until you enter a command and press return.
105 | # * PS1 does not have our escape sequences during command execution
106 | # * After the command executes but before a new one begins, PS1 has escape sequences and
107 | # ITERM2_PRECMD_PS1 has PS1's original value.
108 | iterm2_decorate_prompt() {
109 | # This should be a raw PS1 without iTerm2's stuff. It could be changed during command
110 | # execution.
111 | ITERM2_PRECMD_PS1="$PS1"
112 | ITERM2_SHOULD_DECORATE_PROMPT=""
113 |
114 | # Add our escape sequences just before the prompt is shown.
115 | # Use ITERM2_SQUELCH_MARK for people who can't mdoify PS1 directly, like powerlevel9k users.
116 | # This is gross but I had a heck of a time writing a correct if statetment for zsh 5.0.2.
117 | local PREFIX=""
118 | if [[ $PS1 == *"$(iterm2_prompt_mark)"* ]]; then
119 | PREFIX=""
120 | elif [[ "${ITERM2_SQUELCH_MARK-}" != "" ]]; then
121 | PREFIX=""
122 | else
123 | PREFIX="%{$(iterm2_prompt_mark)%}"
124 | fi
125 | PS1="$PREFIX$PS1%{$(iterm2_prompt_end)%}"
126 | ITERM2_DECORATED_PS1="$PS1"
127 | }
128 |
129 | iterm2_precmd() {
130 | local STATUS="$?"
131 | if [ -z "${ITERM2_SHOULD_DECORATE_PROMPT-}" ]; then
132 | # You pressed ^C while entering a command (iterm2_preexec did not run)
133 | iterm2_before_cmd_executes
134 | if [ "$PS1" != "${ITERM2_DECORATED_PS1-}" ]; then
135 | # PS1 changed, perhaps in another precmd. See issue 9938.
136 | ITERM2_SHOULD_DECORATE_PROMPT="1"
137 | fi
138 | fi
139 |
140 | iterm2_after_cmd_executes "$STATUS"
141 |
142 | if [ -n "$ITERM2_SHOULD_DECORATE_PROMPT" ]; then
143 | iterm2_decorate_prompt
144 | fi
145 | }
146 |
147 | # This is not run if you press ^C while entering a command.
148 | iterm2_preexec() {
149 | # Set PS1 back to its raw value prior to executing the command.
150 | PS1="$ITERM2_PRECMD_PS1"
151 | ITERM2_SHOULD_DECORATE_PROMPT="1"
152 | iterm2_before_cmd_executes
153 | }
154 |
155 | # If hostname -f is slow on your system set iterm2_hostname prior to
156 | # sourcing this script. We know it is fast on macOS so we don't cache
157 | # it. That lets us handle the hostname changing like when you attach
158 | # to a VPN.
159 | if [ -z "${iterm2_hostname-}" ]; then
160 | if [ "$(uname)" != "Darwin" ]; then
161 | iterm2_hostname=`hostname -f 2>/dev/null`
162 | # Some flavors of BSD (i.e. NetBSD and OpenBSD) don't have the -f option.
163 | if [ $? -ne 0 ]; then
164 | iterm2_hostname=`hostname`
165 | fi
166 | fi
167 | fi
168 |
169 | [[ -z ${precmd_functions-} ]] && precmd_functions=()
170 | precmd_functions=($precmd_functions iterm2_precmd)
171 |
172 | [[ -z ${preexec_functions-} ]] && preexec_functions=()
173 | preexec_functions=($preexec_functions iterm2_preexec)
174 |
175 | iterm2_print_state_data
176 | printf "\033]1337;ShellIntegrationVersion=14;shell=zsh\007"
177 | fi
178 | fi
179 |
--------------------------------------------------------------------------------
/conf-files/passwd:
--------------------------------------------------------------------------------
1 | root:x:0:0:root:/root:/usr/bin/zsh
2 | daemon:x:1:1:daemon:/usr/sbin:/usr/sbin/nologin
3 | bin:x:2:2:bin:/bin:/usr/sbin/nologin
4 | sys:x:3:3:sys:/dev:/usr/sbin/nologin
5 | sync:x:4:65534:sync:/bin:/bin/sync
6 | games:x:5:60:games:/usr/games:/usr/sbin/nologin
7 | man:x:6:12:man:/var/cache/man:/usr/sbin/nologin
8 | lp:x:7:7:lp:/var/spool/lpd:/usr/sbin/nologin
9 | mail:x:8:8:mail:/var/mail:/usr/sbin/nologin
10 | news:x:9:9:news:/var/spool/news:/usr/sbin/nologin
11 | uucp:x:10:10:uucp:/var/spool/uucp:/usr/sbin/nologin
12 | proxy:x:13:13:proxy:/bin:/usr/sbin/nologin
13 | www-data:x:33:33:www-data:/var/www:/usr/sbin/nologin
14 | backup:x:34:34:backup:/var/backups:/usr/sbin/nologin
15 | list:x:38:38:Mailing List Manager:/var/list:/usr/sbin/nologin
16 | irc:x:39:39:ircd:/run/ircd:/usr/sbin/nologin
17 | gnats:x:41:41:Gnats Bug-Reporting System (admin):/var/lib/gnats:/usr/sbin/nologin
18 | nobody:x:65534:65534:nobody:/nonexistent:/usr/sbin/nologin
19 | _apt:x:100:65534::/nonexistent:/usr/sbin/nologin
20 | sshd:x:101:65534::/run/sshd:/usr/sbin/nologin
21 |
--------------------------------------------------------------------------------
/conf-files/thebloke.zsh-theme:
--------------------------------------------------------------------------------
1 | PROMPT="\$(virtualenv_prompt_info)${FG[133]} ${USER}@${HOST}:${FG[117]}%~%{$reset_color%}\$(git_prompt_info)\$(git_prompt_status) ${FG[077]}ᐅ%{$reset_color%} "
2 |
3 | ZSH_THEME_GIT_PROMPT_PREFIX=" ${FG[012]}("
4 | ZSH_THEME_GIT_PROMPT_SUFFIX="${FG[012]})%{$reset_color%}"
5 | ZSH_THEME_GIT_PROMPT_DIRTY=" ${FG[133]}✘"
6 | ZSH_THEME_GIT_PROMPT_CLEAN=" ${FG[118]}✔"
7 |
8 | ZSH_THEME_GIT_PROMPT_ADDED="${FG[082]}✚%{$reset_color%}"
9 | ZSH_THEME_GIT_PROMPT_MODIFIED="${FG[166]}✹%{$reset_color%}"
10 | ZSH_THEME_GIT_PROMPT_DELETED="${FG[160]}✖%{$reset_color%}"
11 | ZSH_THEME_GIT_PROMPT_RENAMED="${FG[220]}➜%{$reset_color%}"
12 | ZSH_THEME_GIT_PROMPT_UNMERGED="${FG[082]}═%{$reset_color%}"
13 | ZSH_THEME_GIT_PROMPT_UNTRACKED="${FG[190]}✭%{$reset_color%}"
14 |
15 | ZSH_THEME_VIRTUALENV_PREFIX=" ["
16 | ZSH_THEME_VIRTUALENV_SUFFIX="]"
17 |
--------------------------------------------------------------------------------
/conf-files/tmux.conf:
--------------------------------------------------------------------------------
1 | # Ring the bell if any background window rang a bell
2 | set -g bell-action any
3 |
4 | # Default termtype. If the rcfile sets $TERM, that overrides this value.
5 | set -g default-terminal screen-256color
6 |
7 | # Keep your finger on ctrl, or don't
8 | #bind-key ^D detach-client
9 |
10 | # Create splits and vertical splits
11 | bind-key v split-window -h -p 50 -c "#{pane_current_path}"
12 | bind-key ^V split-window -h -p 50 -c "#{pane_current_path}"
13 | bind-key s split-window -p 50 -c "#{pane_current_path}"
14 | bind-key ^S split-window -p 50 -c "#{pane_current_path}"
15 |
16 | # Pane resize in all four directions using vi bindings.
17 | # Can use these raw but I map them to shift-ctrl- in iTerm.
18 | bind -r H resize-pane -L 5
19 | bind -r J resize-pane -D 5
20 | bind -r K resize-pane -U 5
21 | bind -r L resize-pane -R 5
22 |
23 | # Smart pane switching with awareness of Vim splits.
24 | # See: https://github.com/christoomey/vim-tmux-navigator
25 | is_vim="ps -o state= -o comm= -t '#{pane_tty}' \
26 | | grep -iqE '^[^TXZ ]+ +(\\S+\\/)?g?(view|n?vim?x?)(diff)?$'"
27 | #bind-key -n 'C-h' if-shell "$is_vim" 'send-keys C-h' 'select-pane -L'
28 | #bind-key -n 'C-j' if-shell "$is_vim" 'send-keys C-j' 'select-pane -D'
29 | #bind-key -n 'C-k' if-shell "$is_vim" 'send-keys C-k' 'select-pane -U'
30 | #bind-key -n 'C-l' if-shell "$is_vim" 'send-keys C-l' 'select-pane -R'
31 |
32 | tmux_version='$(tmux -V | sed -En "s/^tmux ([0-9]+(.[0-9]+)?).*/\1/p")'
33 | if-shell -b '[ "$(echo "$tmux_version < 3.0" | bc)" = 1 ]' \
34 | "bind-key -n 'C-\\' if-shell \"$is_vim\" 'send-keys C-\\' 'select-pane -l'"
35 | if-shell -b '[ "$(echo "$tmux_version >= 3.0" | bc)" = 1 ]' \
36 | "bind-key -n 'C-\\' if-shell \"$is_vim\" 'send-keys C-\\\\' 'select-pane -l'"
37 |
38 | bind-key -T copy-mode-vi 'C-h' select-pane -L
39 | bind-key -T copy-mode-vi 'C-j' select-pane -D
40 | bind-key -T copy-mode-vi 'C-k' select-pane -U
41 | bind-key -T copy-mode-vi 'C-l' select-pane -R
42 | bind-key -T copy-mode-vi 'C-\' select-pane -l
43 |
44 | # Use vi keybindings for tmux commandline input.
45 | # Note that to get command mode you need to hit ESC twice...
46 | #set -g status-keys vi
47 |
48 | # Use vi keybindings in copy and choice modes
49 | #setw -g mode-keys vi
50 |
51 | # easily toggle synchronization (mnemonic: e is for echo)
52 | # sends input to all panes in a given window.
53 | bind e setw synchronize-panes on
54 | bind E setw synchronize-panes off
55 |
56 | # set first window to index 1 (not 0) to map more to the keyboard layout...
57 | set-option -g base-index 1
58 | set-window-option -g pane-base-index 1
59 | set-window-option -g mouse on
60 |
61 | # color scheme (styled as vim-powerline)
62 | set -g status-left-length 52
63 | set -g status-right-length 451
64 | set -g status-style fg=white,bg=colour234
65 | set -g pane-border-style fg=colour245
66 | set -g pane-active-border-style fg=colour39
67 | set -g message-style fg=colour16,bg=colour221,bold
68 | set -g status-left '#[fg=colour235,bg=colour252,bold] ❐ #S #[fg=colour252,bg=colour238,nobold]⮀#[fg=colour245,bg=colour238,bold] #(whoami) #[fg=colour238,bg=colour234,nobold]⮀'
69 | set -g window-status-format '#[fg=colour235,bg=colour252,bold] #I #(pwd="#{pane_current_path}"; echo ${pwd####*/}) #W '
70 | set -g window-status-current-format '#[fg=colour234,bg=colour39]⮀#[fg=black,bg=colour39,noreverse,bold] #{?window_zoomed_flag,#[fg=colour228],} #I #(pwd="#{pane_current_path}"; echo ${pwd####*/}) #W #[fg=colour39,bg=colour234,nobold]⮀'
71 | set-option -g status-interval 2
72 |
73 | # Patch for OS X pbpaste and pbcopy under tmux.
74 | #set-option -g default-command "which reattach-to-user-namespace > /dev/null && reattach-to-user-namespace -l $SHELL || $SHELL"
75 |
76 | # No escape time for vi mode
77 | set -sg escape-time 0
78 |
79 | # Screen like binding for last window
80 | unbind l
81 | bind C-a last-window
82 |
83 | # Bigger history
84 | set -g history-limit 50000
85 |
86 | # New windows/pane in $PWD
87 | bind c new-window -c "#{pane_current_path}"
88 |
89 | # Fix key bindings broken in tmux 2.1
90 | set -g assume-paste-time 0
91 |
92 | # force a reload of the config file
93 | unbind r
94 | bind r source-file ~/.tmux.conf \; display "Reloaded!"
95 |
96 | # Local config
97 | if-shell "[ -f ~/.tmux.conf.user ]" 'source ~/.tmux.conf.user'
98 |
--------------------------------------------------------------------------------
/conf-files/vimrc:
--------------------------------------------------------------------------------
1 | " Use Vim settings, rather then Vi settings (much better!).
2 | " This must be first, because it changes other options as a side effect.
3 | set nocompatible
4 |
5 | " TODO: this may not be in the correct place. It is intended to allow overriding .
6 | " source ~/.vimrc.before if it exists.
7 | if filereadable(expand("~/.vimrc.before"))
8 | source ~/.vimrc.before
9 | endif
10 |
11 | " ================ General Config ====================
12 |
13 | set number "Line numbers are good
14 | set backspace=indent,eol,start "Allow backspace in insert mode
15 | set history=1000 "Store lots of :cmdline history
16 | set showcmd "Show incomplete cmds down the bottom
17 | set showmode "Show current mode down the bottom
18 | set gcr=a:blinkon0 "Disable cursor blink
19 | set visualbell "No sounds
20 | set autoread "Reload files changed outside vim
21 |
22 | " This makes vim act like all other editors, buffers can
23 | " exist in the background without being in a window.
24 | " http://items.sjbach.com/319/configuring-vim-right
25 | set hidden
26 |
27 | "turn on syntax highlighting
28 | syntax on
29 |
30 | " Change leader to a comma because the backslash is too far away
31 | " That means all \x commands turn into ,x
32 | " The mapleader has to be set before vundle starts loading all
33 | " the plugins.
34 | let mapleader=","
35 |
36 | " =============== Vundle Initialization ===============
37 | " This loads all the plugins specified in ~/.vim/vundles.vim
38 | " Use Vundle plugin to manage all other plugins
39 | if filereadable(expand("~/.vim/vundles.vim"))
40 | source ~/.vim/vundles.vim
41 | endif
42 | au BufNewFile,BufRead *.vundle set filetype=vim
43 |
44 | " ================ Turn Off Swap Files ==============
45 |
46 | set noswapfile
47 | set nobackup
48 | set nowb
49 |
50 | " ================ Persistent Undo ==================
51 | " Keep undo history across sessions, by storing in file.
52 | " Only works all the time.
53 | if has('persistent_undo') && isdirectory(expand('~').'/.vim/backups')
54 | silent !mkdir ~/.vim/backups > /dev/null 2>&1
55 | set undodir=~/.vim/backups
56 | set undofile
57 | endif
58 |
59 | " ================ Indentation ======================
60 |
61 | set autoindent
62 | set smartindent
63 | set smarttab
64 | set shiftwidth=2
65 | set softtabstop=2
66 | set tabstop=2
67 | set expandtab
68 |
69 | " Auto indent pasted text
70 | nnoremap p p=`]
71 | nnoremap P P=`]
72 |
73 | filetype plugin on
74 | filetype indent on
75 |
76 | " Display tabs and trailing spaces visually
77 |
78 | set nowrap "Don't wrap lines
79 | set linebreak "Wrap lines at convenient points
80 |
81 | " ================ Folds ============================
82 |
83 | set foldmethod=indent "fold based on indent
84 | set foldnestmax=3 "deepest fold is 3 levels
85 | set nofoldenable "dont fold by default
86 |
87 | " ================ Completion =======================
88 |
89 | set wildmode=list:longest
90 | set wildmenu "enable ctrl-n and ctrl-p to scroll thru matches
91 | set wildignore=*.o,*.obj,*~ "stuff to ignore when tab completing
92 | set wildignore+=*vim/backups*
93 | set wildignore+=*sass-cache*
94 | set wildignore+=*DS_Store*
95 | set wildignore+=vendor/rails/**
96 | set wildignore+=vendor/cache/**
97 | set wildignore+=*.gem
98 | set wildignore+=log/**
99 | set wildignore+=tmp/**
100 | set wildignore+=*.png,*.jpg,*.gif
101 |
102 | " ================ Scrolling ========================
103 |
104 | set scrolloff=8 "Start scrolling when we're 8 lines away from margins
105 | set sidescrolloff=15
106 | set sidescroll=1
107 |
108 | " ================ Search ===========================
109 |
110 | set incsearch " Find the next match as we type the search
111 | set hlsearch " Highlight searches by default
112 | set ignorecase " Ignore case when searching...
113 | set smartcase " ...unless we type a capital
114 |
115 | " ================ Security ==========================
116 | set modelines=0
117 | set nomodeline
118 |
119 | " ================ Custom Settings ========================
120 |
--------------------------------------------------------------------------------
/conf-files/zshrc:
--------------------------------------------------------------------------------
1 | # If you come from bash you might have to change your $PATH.
2 | # export PATH=$HOME/bin:/usr/local/bin:$PATH
3 |
4 | # Path to your oh-my-zsh installation.
5 | export ZSH="/root/ohmyzsh"
6 |
7 | HISTFILE=/workspace/.zsh_history
8 |
9 | # Set name of the theme to load --- if set to "random", it will
10 | # load a random theme each time oh-my-zsh is loaded, in which case,
11 | # to know which specific one was loaded, run: echo $RANDOM_THEME
12 | # See https://github.com/ohmyzsh/ohmyzsh/wiki/Themes
13 | ZSH_THEME="thebloke"
14 |
15 | # Set list of themes to pick from when loading at random
16 | # Setting this variable when ZSH_THEME=random will cause zsh to load
17 | # a theme from this variable instead of looking in $ZSH/themes/
18 | # If set to an empty array, this variable will have no effect.
19 | # ZSH_THEME_RANDOM_CANDIDATES=( "robbyrussell" "agnoster" )
20 |
21 | # Uncomment the following line to use case-sensitive completion.
22 | # CASE_SENSITIVE="true"
23 |
24 | # Uncomment the following line to use hyphen-insensitive completion.
25 | # Case-sensitive completion must be off. _ and - will be interchangeable.
26 | # HYPHEN_INSENSITIVE="true"
27 |
28 | # Uncomment one of the following lines to change the auto-update behavior
29 | # zstyle ':omz:update' mode disabled # disable automatic updates
30 | # zstyle ':omz:update' mode auto # update automatically without asking
31 | zstyle ':omz:update' mode reminder # just remind me to update when it's time
32 |
33 | # Uncomment the following line to change how often to auto-update (in days).
34 | # zstyle ':omz:update' frequency 13
35 |
36 | # Uncomment the following line if pasting URLs and other text is messed up.
37 | # DISABLE_MAGIC_FUNCTIONS="true"
38 |
39 | # Uncomment the following line to disable colors in ls.
40 | # DISABLE_LS_COLORS="true"
41 |
42 | # Uncomment the following line to disable auto-setting terminal title.
43 | # DISABLE_AUTO_TITLE="true"
44 |
45 | # Uncomment the following line to enable command auto-correction.
46 | # ENABLE_CORRECTION="true"
47 |
48 | # Uncomment the following line to display red dots whilst waiting for completion.
49 | # You can also set it to another string to have that shown instead of the default red dots.
50 | # e.g. COMPLETION_WAITING_DOTS="%F{yellow}waiting...%f"
51 | # Caution: this setting can cause issues with multiline prompts in zsh < 5.7.1 (see #5765)
52 | COMPLETION_WAITING_DOTS="true"
53 |
54 | # Uncomment the following line if you want to disable marking untracked files
55 | # under VCS as dirty. This makes repository status check for large repositories
56 | # much, much faster.
57 | # DISABLE_UNTRACKED_FILES_DIRTY="true"
58 |
59 | # Uncomment the following line if you want to change the command execution time
60 | # stamp shown in the history command output.
61 | # You can set one of the optional three formats:
62 | # "mm/dd/yyyy"|"dd.mm.yyyy"|"yyyy-mm-dd"
63 | # or set a custom format using the strftime function format specifications,
64 | # see 'man strftime' for details.
65 | HIST_STAMPS="dd.mm.yyyy"
66 |
67 | # Would you like to use another custom folder than $ZSH/custom?
68 | # ZSH_CUSTOM=/path/to/new-custom-folder
69 |
70 | # Which plugins would you like to load?
71 | # Standard plugins can be found in $ZSH/plugins/
72 | # Custom plugins may be added to $ZSH_CUSTOM/plugins/
73 | # Example format: plugins=(rails git textmate ruby lighthouse)
74 | # Add wisely, as too many plugins slow down shell startup.
75 | plugins=(git python tmux iterm2 fasd pip tmux virtualenv)
76 |
77 | zstyle :omz:plugins:iterm2 shell-integration yes
78 |
79 | export ZSH_TMUX_AUTOSTART=false
80 | export ZSH_DISABLE_COMPFIX=true
81 |
82 | source $ZSH/oh-my-zsh.sh
83 |
84 | LS_COLORS="*~=0;38;2;102;102;102:bd=0;38;2;154;237;254;48;2;51;51;51:ca=0:cd=0;38;2;255;106;193;48;2;51;51;51:di=0;38;2;87;199;255:do=0;38;2;0;0;0;48;2;255;106;193:ex=1;38;2;255;92;87:fi=0:ln=0;38;2;255;106;193:mh=0:mi=0;38;2;0;0;0;48;2;255;92;87:no=0:or=0;38;2;0;0;0;48;2;255;92;87:ow=0:pi=0;38;2;0;0;0;48;2;87;199;255:rs=0:sg=0:so=0;38;2;0;0;0;48;2;255;106;193:st=0:su=0:tw=0:*.a=1;38;2;255;92;87:*.c=0;38;2;90;247;142:*.d=0;38;2;90;247;142:*.h=0;38;2;90;247;142:*.m=0;38;2;90;247;142:*.o=0;38;2;102;102;102:*.p=0;38;2;90;247;142:*.r=0;38;2;90;247;142:*.t=0;38;2;90;247;142:*.z=4;38;2;154;237;254:*.7z=4;38;2;154;237;254:*.as=0;38;2;90;247;142:*.bc=0;38;2;102;102;102:*.bz=4;38;2;154;237;254:*.cc=0;38;2;90;247;142:*.cp=0;38;2;90;247;142:*.cr=0;38;2;90;247;142:*.cs=0;38;2;90;247;142:*.di=0;38;2;90;247;142:*.el=0;38;2;90;247;142:*.ex=0;38;2;90;247;142:*.fs=0;38;2;90;247;142:*.go=0;38;2;90;247;142:*.gv=0;38;2;90;247;142:*.gz=4;38;2;154;237;254:*.hh=0;38;2;90;247;142:*.hi=0;38;2;102;102;102:*.hs=0;38;2;90;247;142:*.jl=0;38;2;90;247;142:*.js=0;38;2;90;247;142:*.ko=1;38;2;255;92;87:*.kt=0;38;2;90;247;142:*.la=0;38;2;102;102;102:*.ll=0;38;2;90;247;142:*.lo=0;38;2;102;102;102:*.md=0;38;2;243;249;157:*.ml=0;38;2;90;247;142:*.mn=0;38;2;90;247;142:*.nb=0;38;2;90;247;142:*.pl=0;38;2;90;247;142:*.pm=0;38;2;90;247;142:*.pp=0;38;2;90;247;142:*.ps=0;38;2;255;92;87:*.py=0;38;2;90;247;142:*.rb=0;38;2;90;247;142:*.rm=0;38;2;255;180;223:*.rs=0;38;2;90;247;142:*.sh=0;38;2;90;247;142:*.so=1;38;2;255;92;87:*.td=0;38;2;90;247;142:*.ts=0;38;2;90;247;142:*.ui=0;38;2;243;249;157:*.vb=0;38;2;90;247;142:*.wv=0;38;2;255;180;223:*.xz=4;38;2;154;237;254:*.aif=0;38;2;255;180;223:*.ape=0;38;2;255;180;223:*.apk=4;38;2;154;237;254:*.arj=4;38;2;154;237;254:*.asa=0;38;2;90;247;142:*.aux=0;38;2;102;102;102:*.avi=0;38;2;255;180;223:*.awk=0;38;2;90;247;142:*.bag=4;38;2;154;237;254:*.bak=0;38;2;102;102;102:*.bat=1;38;2;255;92;87:*.bbl=0;38;2;102;102;102:*.bcf=0;38;2;102;102;102:*.bib=0;38;2;243;249;157:*.bin=4;38;2;154;237;254:*.blg=0;38;2;102;102;102:*.bmp=0;38;2;255;180;223:*.bsh=0;38;2;90;247;142:*.bst=0;38;2;243;249;157:*.bz2=4;38;2;154;237;254:*.c++=0;38;2;90;247;142:*.cfg=0;38;2;243;249;157:*.cgi=0;38;2;90;247;142:*.clj=0;38;2;90;247;142:*.com=1;38;2;255;92;87:*.cpp=0;38;2;90;247;142:*.css=0;38;2;90;247;142:*.csv=0;38;2;243;249;157:*.csx=0;38;2;90;247;142:*.cxx=0;38;2;90;247;142:*.deb=4;38;2;154;237;254:*.def=0;38;2;90;247;142:*.dll=1;38;2;255;92;87:*.dmg=4;38;2;154;237;254:*.doc=0;38;2;255;92;87:*.dot=0;38;2;90;247;142:*.dox=0;38;2;165;255;195:*.dpr=0;38;2;90;247;142:*.elc=0;38;2;90;247;142:*.elm=0;38;2;90;247;142:*.epp=0;38;2;90;247;142:*.eps=0;38;2;255;180;223:*.erl=0;38;2;90;247;142:*.exe=1;38;2;255;92;87:*.exs=0;38;2;90;247;142:*.fls=0;38;2;102;102;102:*.flv=0;38;2;255;180;223:*.fnt=0;38;2;255;180;223:*.fon=0;38;2;255;180;223:*.fsi=0;38;2;90;247;142:*.fsx=0;38;2;90;247;142:*.gif=0;38;2;255;180;223:*.git=0;38;2;102;102;102:*.gvy=0;38;2;90;247;142:*.h++=0;38;2;90;247;142:*.hpp=0;38;2;90;247;142:*.htc=0;38;2;90;247;142:*.htm=0;38;2;243;249;157:*.hxx=0;38;2;90;247;142:*.ico=0;38;2;255;180;223:*.ics=0;38;2;255;92;87:*.idx=0;38;2;102;102;102:*.ilg=0;38;2;102;102;102:*.img=4;38;2;154;237;254:*.inc=0;38;2;90;247;142:*.ind=0;38;2;102;102;102:*.ini=0;38;2;243;249;157:*.inl=0;38;2;90;247;142:*.ipp=0;38;2;90;247;142:*.iso=4;38;2;154;237;254:*.jar=4;38;2;154;237;254:*.jpg=0;38;2;255;180;223:*.kex=0;38;2;255;92;87:*.kts=0;38;2;90;247;142:*.log=0;38;2;102;102;102:*.ltx=0;38;2;90;247;142:*.lua=0;38;2;90;247;142:*.m3u=0;38;2;255;180;223:*.m4a=0;38;2;255;180;223:*.m4v=0;38;2;255;180;223:*.mid=0;38;2;255;180;223:*.mir=0;38;2;90;247;142:*.mkv=0;38;2;255;180;223:*.mli=0;38;2;90;247;142:*.mov=0;38;2;255;180;223:*.mp3=0;38;2;255;180;223:*.mp4=0;38;2;255;180;223:*.mpg=0;38;2;255;180;223:*.nix=0;38;2;243;249;157:*.odp=0;38;2;255;92;87:*.ods=0;38;2;255;92;87:*.odt=0;38;2;255;92;87:*.ogg=0;38;2;255;180;223:*.org=0;38;2;243;249;157:*.otf=0;38;2;255;180;223:*.out=0;38;2;102;102;102:*.pas=0;38;2;90;247;142:*.pbm=0;38;2;255;180;223:*.pdf=0;38;2;255;92;87:*.pgm=0;38;2;255;180;223:*.php=0;38;2;90;247;142:*.pid=0;38;2;102;102;102:*.pkg=4;38;2;154;237;254:*.png=0;38;2;255;180;223:*.pod=0;38;2;90;247;142:*.ppm=0;38;2;255;180;223:*.pps=0;38;2;255;92;87:*.ppt=0;38;2;255;92;87:*.pro=0;38;2;165;255;195:*.ps1=0;38;2;90;247;142:*.psd=0;38;2;255;180;223:*.pyc=0;38;2;102;102;102:*.pyd=0;38;2;102;102;102:*.pyo=0;38;2;102;102;102:*.rar=4;38;2;154;237;254:*.rpm=4;38;2;154;237;254:*.rst=0;38;2;243;249;157:*.rtf=0;38;2;255;92;87:*.sbt=0;38;2;90;247;142:*.sql=0;38;2;90;247;142:*.sty=0;38;2;102;102;102:*.svg=0;38;2;255;180;223:*.swf=0;38;2;255;180;223:*.swp=0;38;2;102;102;102:*.sxi=0;38;2;255;92;87:*.sxw=0;38;2;255;92;87:*.tar=4;38;2;154;237;254:*.tbz=4;38;2;154;237;254:*.tcl=0;38;2;90;247;142:*.tex=0;38;2;90;247;142:*.tgz=4;38;2;154;237;254:*.tif=0;38;2;255;180;223:*.tml=0;38;2;243;249;157:*.tmp=0;38;2;102;102;102:*.toc=0;38;2;102;102;102:*.tsx=0;38;2;90;247;142:*.ttf=0;38;2;255;180;223:*.txt=0;38;2;243;249;157:*.vcd=4;38;2;154;237;254:*.vim=0;38;2;90;247;142:*.vob=0;38;2;255;180;223:*.wav=0;38;2;255;180;223:*.wma=0;38;2;255;180;223:*.wmv=0;38;2;255;180;223:*.xcf=0;38;2;255;180;223:*.xlr=0;38;2;255;92;87:*.xls=0;38;2;255;92;87:*.xml=0;38;2;243;249;157:*.xmp=0;38;2;243;249;157:*.yml=0;38;2;243;249;157:*.zip=4;38;2;154;237;254:*.zsh=0;38;2;90;247;142:*.zst=4;38;2;154;237;254:*TODO=1:*hgrc=0;38;2;165;255;195:*.bash=0;38;2;90;247;142:*.conf=0;38;2;243;249;157:*.dart=0;38;2;90;247;142:*.diff=0;38;2;90;247;142:*.docx=0;38;2;255;92;87:*.epub=0;38;2;255;92;87:*.fish=0;38;2;90;247;142:*.flac=0;38;2;255;180;223:*.h264=0;38;2;255;180;223:*.hgrc=0;38;2;165;255;195:*.html=0;38;2;243;249;157:*.java=0;38;2;90;247;142:*.jpeg=0;38;2;255;180;223:*.json=0;38;2;243;249;157:*.less=0;38;2;90;247;142:*.lisp=0;38;2;90;247;142:*.lock=0;38;2;102;102;102:*.make=0;38;2;165;255;195:*.mpeg=0;38;2;255;180;223:*.opus=0;38;2;255;180;223:*.orig=0;38;2;102;102;102:*.pptx=0;38;2;255;92;87:*.psd1=0;38;2;90;247;142:*.psm1=0;38;2;90;247;142:*.purs=0;38;2;90;247;142:*.rlib=0;38;2;102;102;102:*.sass=0;38;2;90;247;142:*.scss=0;38;2;90;247;142:*.tbz2=4;38;2;154;237;254:*.tiff=0;38;2;255;180;223:*.toml=0;38;2;243;249;157:*.webm=0;38;2;255;180;223:*.webp=0;38;2;255;180;223:*.woff=0;38;2;255;180;223:*.xbps=4;38;2;154;237;254:*.xlsx=0;38;2;255;92;87:*.yaml=0;38;2;243;249;157:*.cabal=0;38;2;90;247;142:*.cache=0;38;2;102;102;102:*.class=0;38;2;102;102;102:*.cmake=0;38;2;165;255;195:*.dyn_o=0;38;2;102;102;102:*.ipynb=0;38;2;90;247;142:*.mdown=0;38;2;243;249;157:*.patch=0;38;2;90;247;142:*.scala=0;38;2;90;247;142:*.shtml=0;38;2;243;249;157:*.swift=0;38;2;90;247;142:*.toast=4;38;2;154;237;254:*.xhtml=0;38;2;243;249;157:*README=0;38;2;40;42;54;48;2;243;249;157:*passwd=0;38;2;243;249;157:*shadow=0;38;2;243;249;157:*.config=0;38;2;243;249;157:*.dyn_hi=0;38;2;102;102;102:*.flake8=0;38;2;165;255;195:*.gradle=0;38;2;90;247;142:*.groovy=0;38;2;90;247;142:*.ignore=0;38;2;165;255;195:*.matlab=0;38;2;90;247;142:*COPYING=0;38;2;153;153;153:*INSTALL=0;38;2;40;42;54;48;2;243;249;157:*LICENSE=0;38;2;153;153;153:*TODO.md=1:*.desktop=0;38;2;243;249;157:*.gemspec=0;38;2;165;255;195:*Doxyfile=0;38;2;165;255;195:*Makefile=0;38;2;165;255;195:*TODO.txt=1:*setup.py=0;38;2;165;255;195:*.DS_Store=0;38;2;102;102;102:*.cmake.in=0;38;2;165;255;195:*.fdignore=0;38;2;165;255;195:*.kdevelop=0;38;2;165;255;195:*.markdown=0;38;2;243;249;157:*.rgignore=0;38;2;165;255;195:*COPYRIGHT=0;38;2;153;153;153:*README.md=0;38;2;40;42;54;48;2;243;249;157:*configure=0;38;2;165;255;195:*.gitconfig=0;38;2;165;255;195:*.gitignore=0;38;2;165;255;195:*.localized=0;38;2;102;102;102:*.scons_opt=0;38;2;102;102;102:*CODEOWNERS=0;38;2;165;255;195:*Dockerfile=0;38;2;243;249;157:*INSTALL.md=0;38;2;40;42;54;48;2;243;249;157:*README.txt=0;38;2;40;42;54;48;2;243;249;157:*SConscript=0;38;2;165;255;195:*SConstruct=0;38;2;165;255;195:*.gitmodules=0;38;2;165;255;195:*.synctex.gz=0;38;2;102;102;102:*.travis.yml=0;38;2;90;247;142:*INSTALL.txt=0;38;2;40;42;54;48;2;243;249;157:*LICENSE-MIT=0;38;2;153;153;153:*MANIFEST.in=0;38;2;165;255;195:*Makefile.am=0;38;2;165;255;195:*Makefile.in=0;38;2;102;102;102:*.applescript=0;38;2;90;247;142:*.fdb_latexmk=0;38;2;102;102;102:*CONTRIBUTORS=0;38;2;40;42;54;48;2;243;249;157:*appveyor.yml=0;38;2;90;247;142:*configure.ac=0;38;2;165;255;195:*.clang-format=0;38;2;165;255;195:*.gitattributes=0;38;2;165;255;195:*.gitlab-ci.yml=0;38;2;90;247;142:*CMakeCache.txt=0;38;2;102;102;102:*CMakeLists.txt=0;38;2;165;255;195:*LICENSE-APACHE=0;38;2;153;153;153:*CONTRIBUTORS.md=0;38;2;40;42;54;48;2;243;249;157:*.sconsign.dblite=0;38;2;102;102;102:*CONTRIBUTORS.txt=0;38;2;40;42;54;48;2;243;249;157:*requirements.txt=0;38;2;165;255;195:*package-lock.json=0;38;2;102;102;102:*.CFUserTextEncoding=0;38;2;102;102;102"
85 | export LS_COLORS
86 | # User configuration
87 |
88 | # export MANPATH="/usr/local/man:$MANPATH"
89 |
90 | # You may need to manually set your language environment
91 | # export LANG=en_US.UTF-8
92 |
93 | # Preferred editor for local and remote sessions
94 | # if [[ -n $SSH_CONNECTION ]]; then
95 | # export EDITOR='vim'
96 | # else
97 | # export EDITOR='mvim'
98 | # fi
99 | export EDITOR=vim
100 |
101 | # Compilation flags
102 | # export ARCHFLAGS="-arch x86_64"
103 |
104 | # Set personal aliases, overriding those provided by oh-my-zsh libs,
105 | # plugins, and themes. Aliases can be placed here, though oh-my-zsh
106 | # users are encouraged to define aliases within the ZSH_CUSTOM folder.
107 | # For a full list of active aliases, run `alias`.
108 | #
109 | # Example aliases
110 | # alias zshconfig="mate ~/.zshrc"
111 | # alias ohmyzsh="mate ~/.oh-my-zsh"
112 |
113 | alias python=python3
114 | alias pip=pip3
115 |
116 | source ~/.iterm2_shell_integration.zsh
117 |
118 | bindkey "^U" kill-region
119 |
120 | if [[ -d /workspace/venv/pytorch2 ]]
121 | then
122 | source /workspace/venv/pytorch2/bin/activate
123 | fi
124 |
125 | export PATH=/usr/local/cuda/bin:"$PATH"
126 |
127 | cd /workspace
128 |
129 |
--------------------------------------------------------------------------------
/cuda11.8.0-ubuntu22.04-oneclick-chat/Dockerfile:
--------------------------------------------------------------------------------
1 | ARG CUDA_VERSION="11.8.0"
2 | ARG CUDNN_VERSION="8"
3 | ARG UBUNTU_VERSION="22.04"
4 |
5 | # Base image
6 | FROM thebloke/cuda$CUDA_VERSION-ubuntu$UBUNTU_VERSION-oneclick:latest as base
7 |
8 | RUN pip3 install runpod requests
9 |
10 | COPY scripts/rp_handler.py /root/scripts
11 |
12 | COPY --chmod=755 start-with-ui.sh /start.sh
13 |
14 | WORKDIR /workspace
15 |
16 | CMD [ "/start.sh" ]
17 |
--------------------------------------------------------------------------------
/cuda11.8.0-ubuntu22.04-oneclick-chat/scripts/rp_handler.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 | import runpod
4 | import requests
5 | from requests.adapters import HTTPAdapter, Retry
6 |
7 |
8 | automatic_session = requests.Session()
9 | retries = Retry(total=10, backoff_factor=0.1, status_forcelist=[502, 503, 504])
10 | automatic_session.mount('http://', HTTPAdapter(max_retries=retries))
11 |
12 |
13 | # ---------------------------------------------------------------------------- #
14 | # Automatic Functions #
15 | # ---------------------------------------------------------------------------- #
16 | def wait_for_service(url):
17 | '''
18 | Check if the service is ready to receive requests.
19 | '''
20 | while True:
21 | try:
22 | requests.get(url)
23 | return
24 | except requests.exceptions.RequestException:
25 | print("Service not ready yet. Retrying...")
26 | except Exception as err:
27 | print("Error: ", err)
28 |
29 | time.sleep(0.2)
30 |
31 |
32 | def run_inference(inference_request):
33 | '''
34 | Run inference on a request.
35 | '''
36 | response = automatic_session.post(url='http://127.0.0.1:5000/api/v1/chat',
37 | json=inference_request, timeout=600)
38 | return response.json()
39 |
40 |
41 | # ---------------------------------------------------------------------------- #
42 | # RunPod Handler #
43 | # ----- #
44 | def handler(event):
45 | '''
46 | This is the handler function that will be called by the serverless.
47 | '''
48 |
49 | json = run_inference(event["input"])
50 |
51 | # return the output that you want to be returned like pre-signed URLs to output artifacts
52 | return json
53 |
54 |
55 | if __name__ == "__main__":
56 | wait_for_service(url='http://127.0.0.1:5000/api/v1/generate')
57 |
58 | print("WebUI API Service is ready. Starting RunPod...")
59 | runpod.serverless.start({"handler": handler})
60 |
--------------------------------------------------------------------------------
/cuda11.8.0-ubuntu22.04-oneclick-chat/start-with-ui.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | echo "TheBloke's Local LLMs: Pod started"
3 |
4 | SCRIPTDIR=/root/scripts
5 | VOLUME=/workspace
6 |
7 | # If a volume is already defined, $VOLUME will already exist
8 | # If a volume is not being used, we'll still use /worksapce to ensure everything is in a known place.
9 | mkdir -p $VOLUME/logs
10 |
11 | # Start build of llama-cpp-python in background
12 | if [[ ! -f /.built.llama-cpp-python ]]; then
13 | "$SCRIPTDIR"/build-llama-cpp-python.sh >>$VOLUME/logs/build-llama-cpp-python.log 2>&1 &
14 | fi
15 |
16 | if [[ $PUBLIC_KEY ]]; then
17 | mkdir -p ~/.ssh
18 | chmod 700 ~/.ssh
19 | cd ~/.ssh
20 | echo "$PUBLIC_KEY" >>authorized_keys
21 | chmod 700 -R ~/.ssh
22 | service ssh start
23 | fi
24 |
25 | # Move text-generation-webui's folder to $VOLUME so models and all config will persist
26 | "$SCRIPTDIR"/textgen-on-workspace.sh
27 |
28 | # If passed a MODEL variable from Runpod template, start it downloading
29 | # This will block the UI until completed
30 | # MODEL can be a HF repo name, eg 'TheBloke/guanaco-7B-GPTQ'
31 | # or it can be a direct link to a single GGML file, eg 'https://huggingface.co/TheBloke/tulu-7B-GGML/resolve/main/tulu-7b.ggmlv3.q2_K.bin'
32 | if [[ $MODEL ]]; then
33 | "$SCRIPTDIR"/fetch-model.py "$MODEL" $VOLUME/text-generation-webui/models >>$VOLUME/logs/fetch-model.log 2>&1
34 | fi
35 |
36 | # Update text-generation-webui to the latest commit
37 | cd /workspace/text-generation-webui && git pull
38 |
39 | # Move the script that launches text-gen to $VOLUME, so users can make persistent changes to CLI arguments
40 | if [[ ! -f $VOLUME/run-text-generation-webui.sh ]]; then
41 | mv "$SCRIPTDIR"/run-text-generation-webui.sh $VOLUME/run-text-generation-webui.sh
42 | fi
43 |
44 | python3 /root/scripts/rp_handler.py >/workspace/logs/rp_handler.log 2>&1 &
45 |
46 | ARGS=()
47 | while true; do
48 | # If the user wants to stop the UI from auto launching, they can run:
49 | # touch $VOLUME/do.not.launch.UI
50 | if [[ ! -f $VOLUME/do.not.launch.UI ]]; then
51 | # Launch the UI in a loop forever, allowing UI restart
52 | if [[ -f /tmp/text-gen-model ]]; then
53 | # If this file exists, we successfully downloaded a model file or folder
54 | # Therefore we auto load this model
55 | ARGS=(--model "$(&1) >>$VOLUME/logs/text-generation-webui.log
63 |
64 | fi
65 | sleep 2
66 | done
67 |
68 | # shouldn't actually reach this point
69 | sleep infinity
70 |
--------------------------------------------------------------------------------
/cuda11.8.0-ubuntu22.04-oneclick-rp/Dockerfile:
--------------------------------------------------------------------------------
1 | ARG CUDA_VERSION="11.8.0"
2 | ARG CUDNN_VERSION="8"
3 | ARG UBUNTU_VERSION="22.04"
4 |
5 | # Base image
6 | FROM thebloke/cuda$CUDA_VERSION-ubuntu$UBUNTU_VERSION-oneclick:latest as base
7 |
8 | RUN pip3 install runpod requests
9 |
10 | COPY scripts/rp_handler.py /root/scripts
11 |
12 | COPY --chmod=755 start-with-ui.sh /start.sh
13 |
14 | WORKDIR /workspace
15 |
16 | CMD [ "/start.sh" ]
17 |
--------------------------------------------------------------------------------
/cuda11.8.0-ubuntu22.04-oneclick-rp/scripts/rp_handler.old2:
--------------------------------------------------------------------------------
1 | import time
2 |
3 | import runpod
4 | import requests
5 | import json
6 | from requests.adapters import HTTPAdapter, Retry
7 |
8 |
9 | automatic_session = requests.Session()
10 | retries = Retry(total=10, backoff_factor=0.1, status_forcelist=[502, 503, 504])
11 | automatic_session.mount('http://', HTTPAdapter(max_retries=retries))
12 |
13 |
14 | # ---------------------------------------------------------------------------- #
15 | # Automatic Functions #
16 | # ---------------------------------------------------------------------------- #
17 | def wait_for_service(url):
18 | '''
19 | Check if the service is ready to receive requests.
20 | '''
21 | while True:
22 | try:
23 | requests.get(url)
24 | return
25 | except requests.exceptions.RequestException:
26 | print("Service not ready yet. Retrying...")
27 | except Exception as err:
28 | print("Error: ", err)
29 |
30 | time.sleep(0.2)
31 |
32 |
33 | def run_inference(inference_request):
34 | '''
35 | Run inference on a request.
36 | '''
37 | response = automatic_session.post(url='http://127.0.0.1:5000/api/v1/generate',
38 | json=inference_request, timeout=600)
39 | print("Running inference:" + json.dumps(response.json()))
40 | return response.json()
41 |
42 |
43 | # ---------------------------------------------------------------------------- #
44 | # RunPod Handler #
45 | # ----- #
46 | def handler(event):
47 | '''
48 | This is the handler function that will be called by the serverless.
49 | '''
50 |
51 | json = run_inference(event["input"])
52 |
53 | print("Handler received request:" + json.dumps(json))
54 | # return the output that you want to be returned like pre-signed URLs to output artifacts
55 | return json
56 |
57 |
58 | if __name__ == "__main__":
59 | wait_for_service(url='http://127.0.0.1:5000/api/v1/generate')
60 |
61 | print("WebUI API Service is ready. Starting RunPod...")
62 |
63 | runpod.serverless.start({"handler": handler})
64 |
--------------------------------------------------------------------------------
/cuda11.8.0-ubuntu22.04-oneclick-rp/scripts/rp_handler.orig.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 | import runpod
4 | import requests
5 | from requests.adapters import HTTPAdapter, Retry
6 |
7 |
8 | automatic_session = requests.Session()
9 | retries = Retry(total=10, backoff_factor=0.1, status_forcelist=[502, 503, 504])
10 | automatic_session.mount('http://', HTTPAdapter(max_retries=retries))
11 |
12 |
13 | # ---------------------------------------------------------------------------- #
14 | # Automatic Functions #
15 | # ---------------------------------------------------------------------------- #
16 | def wait_for_service(url):
17 | '''
18 | Check if the service is ready to receive requests.
19 | '''
20 | while True:
21 | try:
22 | requests.get(url)
23 | return
24 | except requests.exceptions.RequestException:
25 | print("Service not ready yet. Retrying...")
26 | except Exception as err:
27 | print("Error: ", err)
28 |
29 | time.sleep(0.2)
30 |
31 |
32 | def run_inference(inference_request):
33 | '''
34 | Run inference on a request.
35 | '''
36 | response = automatic_session.post(url='http://127.0.0.1:5000/api/v1/generate',
37 | json=inference_request, timeout=600)
38 | return response.json()
39 |
40 |
41 | # ---------------------------------------------------------------------------- #
42 | # RunPod Handler #
43 | # ---------------------------------------------------------------------------- #
44 | def handler(event):
45 | '''
46 | This is the handler function that will be called by the serverless.
47 | '''
48 |
49 | json = run_inference(event["input"])
50 |
51 | # return the output that you want to be returned like pre-signed URLs to output artifacts
52 | return json
53 |
54 |
55 | if __name__ == "__main__":
56 | wait_for_service(url='http://127.0.0.1:5000/api/v1/generate')
57 |
58 | print("WebUI API Service is ready. Starting RunPod...")
59 |
60 | runpod.serverless.start({"handler": handler})
61 |
--------------------------------------------------------------------------------
/cuda11.8.0-ubuntu22.04-oneclick-rp/scripts/rp_handler.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 | import runpod
4 | import requests
5 | from requests.adapters import HTTPAdapter, Retry
6 |
7 |
8 | automatic_session = requests.Session()
9 | retries = Retry(total=10, backoff_factor=0.1, status_forcelist=[502, 503, 504])
10 | automatic_session.mount('http://', HTTPAdapter(max_retries=retries))
11 |
12 |
13 | # ---------------------------------------------------------------------------- #
14 | # Automatic Functions #
15 | # ---------------------------------------------------------------------------- #
16 | def wait_for_service(url):
17 | '''
18 | Check if the service is ready to receive requests.
19 | '''
20 | while True:
21 | try:
22 | requests.get(url)
23 | return
24 | except requests.exceptions.RequestException:
25 | print("Service not ready yet. Retrying...")
26 | except Exception as err:
27 | print("Error: ", err)
28 |
29 | time.sleep(0.2)
30 |
31 |
32 | def run_inference(inference_request):
33 | '''
34 | Run inference on a request.
35 | '''
36 | response = automatic_session.post(url='http://127.0.0.1:5000/api/v1/generate',
37 | json=inference_request, timeout=600)
38 | return response.json()
39 |
40 |
41 | # ---------------------------------------------------------------------------- #
42 | # RunPod Handler #
43 | # ----- #
44 | def handler(event):
45 | '''
46 | This is the handler function that will be called by the serverless.
47 | '''
48 |
49 | json = run_inference(event["input"])
50 |
51 | # return the output that you want to be returned like pre-signed URLs to output artifacts
52 | return json
53 |
54 |
55 | if __name__ == "__main__":
56 | wait_for_service(url='http://127.0.0.1:5000/api/v1/generate')
57 |
58 | print("WebUI API Service is ready. Starting RunPod...")
59 | runpod.serverless.start({"handler": handler})
60 |
--------------------------------------------------------------------------------
/cuda11.8.0-ubuntu22.04-oneclick-rp/start-with-ui.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | echo "TheBloke's Local LLMs: Pod started"
3 |
4 | SCRIPTDIR=/root/scripts
5 | VOLUME=/workspace
6 |
7 | # If a volume is already defined, $VOLUME will already exist
8 | # If a volume is not being used, we'll still use /worksapce to ensure everything is in a known place.
9 | mkdir -p $VOLUME/logs
10 |
11 | # Start build of llama-cpp-python in background
12 | if [[ ! -f /.built.llama-cpp-python ]]; then
13 | "$SCRIPTDIR"/build-llama-cpp-python.sh >>$VOLUME/logs/build-llama-cpp-python.log 2>&1 &
14 | fi
15 |
16 | if [[ $PUBLIC_KEY ]]; then
17 | mkdir -p ~/.ssh
18 | chmod 700 ~/.ssh
19 | cd ~/.ssh
20 | echo "$PUBLIC_KEY" >>authorized_keys
21 | chmod 700 -R ~/.ssh
22 | service ssh start
23 | fi
24 |
25 | # Move text-generation-webui's folder to $VOLUME so models and all config will persist
26 | "$SCRIPTDIR"/textgen-on-workspace.sh
27 |
28 | # If passed a MODEL variable from Runpod template, start it downloading
29 | # This will block the UI until completed
30 | # MODEL can be a HF repo name, eg 'TheBloke/guanaco-7B-GPTQ'
31 | # or it can be a direct link to a single GGML file, eg 'https://huggingface.co/TheBloke/tulu-7B-GGML/resolve/main/tulu-7b.ggmlv3.q2_K.bin'
32 | if [[ $MODEL ]]; then
33 | "$SCRIPTDIR"/fetch-model.py "$MODEL" $VOLUME/text-generation-webui/models >>$VOLUME/logs/fetch-model.log 2>&1
34 | fi
35 |
36 | # Update text-generation-webui to the latest commit
37 | cd /workspace/text-generation-webui && git pull
38 |
39 | # Move the script that launches text-gen to $VOLUME, so users can make persistent changes to CLI arguments
40 | if [[ ! -f $VOLUME/run-text-generation-webui.sh ]]; then
41 | mv "$SCRIPTDIR"/run-text-generation-webui.sh $VOLUME/run-text-generation-webui.sh
42 | fi
43 |
44 | python3 /root/scripts/rp_handler.py >/workspace/logs/rp_handler.log 2>&1 &
45 |
46 | ARGS=()
47 | while true; do
48 | # If the user wants to stop the UI from auto launching, they can run:
49 | # touch $VOLUME/do.not.launch.UI
50 | if [[ ! -f $VOLUME/do.not.launch.UI ]]; then
51 | # Launch the UI in a loop forever, allowing UI restart
52 | if [[ -f /tmp/text-gen-model ]]; then
53 | # If this file exists, we successfully downloaded a model file or folder
54 | # Therefore we auto load this model
55 | ARGS=(--model "$(&1) >>$VOLUME/logs/text-generation-webui.log
63 |
64 | fi
65 | sleep 2
66 | done
67 |
68 | # shouldn't actually reach this point
69 | sleep infinity
70 |
--------------------------------------------------------------------------------
/cuda11.8.0-ubuntu22.04-oneclick/Dockerfile:
--------------------------------------------------------------------------------
1 | ARG CUDA_VERSION="12.1.1"
2 | ARG CUDNN_VERSION="8"
3 | ARG UBUNTU_VERSION="22.04"
4 | ARG DOCKER_FROM=thebloke/cuda$CUDA_VERSION-ubuntu$UBUNTU_VERSION-textgen:latest
5 |
6 | # Base image
7 | FROM $DOCKER_FROM as base
8 |
9 | ARG APTPKGS="zsh wget tmux tldr nvtop vim neovim curl rsync net-tools less iputils-ping 7zip zip unzip"
10 |
11 | # Install useful command line utility software
12 | RUN apt-get update -y && \
13 | apt-get install -y --no-install-recommends $APTPKGS && \
14 | apt-get clean && \
15 | rm -rf /var/lib/apt/lists/*
16 |
17 | # Set up git to support LFS, and to store credentials; useful for Huggingface Hub
18 | RUN git config --global credential.helper store && \
19 | git lfs install
20 |
21 | # Install Oh My Zsh for better command line experience: https://github.com/ohmyzsh/ohmyzsh
22 | RUN bash -c "ZSH=/root/ohmyzsh $(curl -fsSL https://raw.githubusercontent.com/ohmyzsh/ohmyzsh/master/tools/install.sh)" "" --unattended
23 |
24 | # Add some config files for a nicer command line setup
25 | COPY conf-files/vimrc /root/.vimrc
26 | COPY conf-files/zshrc /root/.zshrc
27 | COPY conf-files/thebloke.zsh-theme /root/ohmyzsh/custom/themes/
28 | COPY conf-files/tmux.conf /root/.tmux.conf
29 | # This file is for macOS users using iTerm2. It provides these features: https://iterm2.com/documentation-shell-integration.html
30 | COPY conf-files/iterm2_shell_integration.zsh /root/.iterm2_shell_integration.zsh
31 | # Set default shell to ZSH
32 | COPY conf-files/passwd /etc/passwd
33 |
34 | COPY scripts /root/scripts
35 |
36 | COPY --chmod=755 start-with-ui.sh /start.sh
37 |
38 | WORKDIR /workspace
39 |
40 | ENTRYPOINT [ "/start.sh" ]
41 |
--------------------------------------------------------------------------------
/cuda11.8.0-ubuntu22.04-oneclick/conf-files/iterm2_shell_integration.zsh:
--------------------------------------------------------------------------------
1 | # This program is free software; you can redistribute it and/or
2 | # modify it under the terms of the GNU General Public License
3 | # as published by the Free Software Foundation; either version 2
4 | # of the License, or (at your option) any later version.
5 | #
6 | # This program is distributed in the hope that it will be useful,
7 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 | # GNU General Public License for more details.
10 | #
11 | # You should have received a copy of the GNU General Public License
12 | # along with this program; if not, write to the Free Software
13 | # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 |
15 | if [[ -o interactive ]]; then
16 | if [ "${ITERM_ENABLE_SHELL_INTEGRATION_WITH_TMUX-}""$TERM" != "tmux-256color" -a "${ITERM_ENABLE_SHELL_INTEGRATION_WITH_TMUX-}""$TERM" != "screen" -a "${ITERM_SHELL_INTEGRATION_INSTALLED-}" = "" -a "$TERM" != linux -a "$TERM" != dumb ]; then
17 | ITERM_SHELL_INTEGRATION_INSTALLED=Yes
18 | ITERM2_SHOULD_DECORATE_PROMPT="1"
19 | # Indicates start of command output. Runs just before command executes.
20 | iterm2_before_cmd_executes() {
21 | if [ "$TERM_PROGRAM" = "iTerm.app" ]; then
22 | printf "\033]133;C;\r\007"
23 | else
24 | printf "\033]133;C;\007"
25 | fi
26 | }
27 |
28 | iterm2_set_user_var() {
29 | printf "\033]1337;SetUserVar=%s=%s\007" "$1" $(printf "%s" "$2" | base64 | tr -d '\n')
30 | }
31 |
32 | # Users can write their own version of this method. It should call
33 | # iterm2_set_user_var but not produce any other output.
34 | # e.g., iterm2_set_user_var currentDirectory $PWD
35 | # Accessible in iTerm2 (in a badge now, elsewhere in the future) as
36 | # \(user.currentDirectory).
37 | whence -v iterm2_print_user_vars > /dev/null 2>&1
38 | if [ $? -ne 0 ]; then
39 | iterm2_print_user_vars() {
40 | true
41 | }
42 | fi
43 |
44 | iterm2_print_state_data() {
45 | local _iterm2_hostname="${iterm2_hostname-}"
46 | if [ -z "${iterm2_hostname:-}" ]; then
47 | _iterm2_hostname=$(hostname -f 2>/dev/null)
48 | fi
49 | printf "\033]1337;RemoteHost=%s@%s\007" "$USER" "${_iterm2_hostname-}"
50 | printf "\033]1337;CurrentDir=%s\007" "$PWD"
51 | iterm2_print_user_vars
52 | }
53 |
54 | # Report return code of command; runs after command finishes but before prompt
55 | iterm2_after_cmd_executes() {
56 | printf "\033]133;D;%s\007" "$STATUS"
57 | iterm2_print_state_data
58 | }
59 |
60 | # Mark start of prompt
61 | iterm2_prompt_mark() {
62 | printf "\033]133;A\007"
63 | }
64 |
65 | # Mark end of prompt
66 | iterm2_prompt_end() {
67 | printf "\033]133;B\007"
68 | }
69 |
70 | # There are three possible paths in life.
71 | #
72 | # 1) A command is entered at the prompt and you press return.
73 | # The following steps happen:
74 | # * iterm2_preexec is invoked
75 | # * PS1 is set to ITERM2_PRECMD_PS1
76 | # * ITERM2_SHOULD_DECORATE_PROMPT is set to 1
77 | # * The command executes (possibly reading or modifying PS1)
78 | # * iterm2_precmd is invoked
79 | # * ITERM2_PRECMD_PS1 is set to PS1 (as modified by command execution)
80 | # * PS1 gets our escape sequences added to it
81 | # * zsh displays your prompt
82 | # * You start entering a command
83 | #
84 | # 2) You press ^C while entering a command at the prompt.
85 | # The following steps happen:
86 | # * (iterm2_preexec is NOT invoked)
87 | # * iterm2_precmd is invoked
88 | # * iterm2_before_cmd_executes is called since we detected that iterm2_preexec was not run
89 | # * (ITERM2_PRECMD_PS1 and PS1 are not messed with, since PS1 already has our escape
90 | # sequences and ITERM2_PRECMD_PS1 already has PS1's original value)
91 | # * zsh displays your prompt
92 | # * You start entering a command
93 | #
94 | # 3) A new shell is born.
95 | # * PS1 has some initial value, either zsh's default or a value set before this script is sourced.
96 | # * iterm2_precmd is invoked
97 | # * ITERM2_SHOULD_DECORATE_PROMPT is initialized to 1
98 | # * ITERM2_PRECMD_PS1 is set to the initial value of PS1
99 | # * PS1 gets our escape sequences added to it
100 | # * Your prompt is shown and you may begin entering a command.
101 | #
102 | # Invariants:
103 | # * ITERM2_SHOULD_DECORATE_PROMPT is 1 during and just after command execution, and "" while the prompt is
104 | # shown and until you enter a command and press return.
105 | # * PS1 does not have our escape sequences during command execution
106 | # * After the command executes but before a new one begins, PS1 has escape sequences and
107 | # ITERM2_PRECMD_PS1 has PS1's original value.
108 | iterm2_decorate_prompt() {
109 | # This should be a raw PS1 without iTerm2's stuff. It could be changed during command
110 | # execution.
111 | ITERM2_PRECMD_PS1="$PS1"
112 | ITERM2_SHOULD_DECORATE_PROMPT=""
113 |
114 | # Add our escape sequences just before the prompt is shown.
115 | # Use ITERM2_SQUELCH_MARK for people who can't mdoify PS1 directly, like powerlevel9k users.
116 | # This is gross but I had a heck of a time writing a correct if statetment for zsh 5.0.2.
117 | local PREFIX=""
118 | if [[ $PS1 == *"$(iterm2_prompt_mark)"* ]]; then
119 | PREFIX=""
120 | elif [[ "${ITERM2_SQUELCH_MARK-}" != "" ]]; then
121 | PREFIX=""
122 | else
123 | PREFIX="%{$(iterm2_prompt_mark)%}"
124 | fi
125 | PS1="$PREFIX$PS1%{$(iterm2_prompt_end)%}"
126 | ITERM2_DECORATED_PS1="$PS1"
127 | }
128 |
129 | iterm2_precmd() {
130 | local STATUS="$?"
131 | if [ -z "${ITERM2_SHOULD_DECORATE_PROMPT-}" ]; then
132 | # You pressed ^C while entering a command (iterm2_preexec did not run)
133 | iterm2_before_cmd_executes
134 | if [ "$PS1" != "${ITERM2_DECORATED_PS1-}" ]; then
135 | # PS1 changed, perhaps in another precmd. See issue 9938.
136 | ITERM2_SHOULD_DECORATE_PROMPT="1"
137 | fi
138 | fi
139 |
140 | iterm2_after_cmd_executes "$STATUS"
141 |
142 | if [ -n "$ITERM2_SHOULD_DECORATE_PROMPT" ]; then
143 | iterm2_decorate_prompt
144 | fi
145 | }
146 |
147 | # This is not run if you press ^C while entering a command.
148 | iterm2_preexec() {
149 | # Set PS1 back to its raw value prior to executing the command.
150 | PS1="$ITERM2_PRECMD_PS1"
151 | ITERM2_SHOULD_DECORATE_PROMPT="1"
152 | iterm2_before_cmd_executes
153 | }
154 |
155 | # If hostname -f is slow on your system set iterm2_hostname prior to
156 | # sourcing this script. We know it is fast on macOS so we don't cache
157 | # it. That lets us handle the hostname changing like when you attach
158 | # to a VPN.
159 | if [ -z "${iterm2_hostname-}" ]; then
160 | if [ "$(uname)" != "Darwin" ]; then
161 | iterm2_hostname=`hostname -f 2>/dev/null`
162 | # Some flavors of BSD (i.e. NetBSD and OpenBSD) don't have the -f option.
163 | if [ $? -ne 0 ]; then
164 | iterm2_hostname=`hostname`
165 | fi
166 | fi
167 | fi
168 |
169 | [[ -z ${precmd_functions-} ]] && precmd_functions=()
170 | precmd_functions=($precmd_functions iterm2_precmd)
171 |
172 | [[ -z ${preexec_functions-} ]] && preexec_functions=()
173 | preexec_functions=($preexec_functions iterm2_preexec)
174 |
175 | iterm2_print_state_data
176 | printf "\033]1337;ShellIntegrationVersion=14;shell=zsh\007"
177 | fi
178 | fi
179 |
--------------------------------------------------------------------------------
/cuda11.8.0-ubuntu22.04-oneclick/conf-files/passwd:
--------------------------------------------------------------------------------
1 | root:x:0:0:root:/root:/usr/bin/zsh
2 | daemon:x:1:1:daemon:/usr/sbin:/usr/sbin/nologin
3 | bin:x:2:2:bin:/bin:/usr/sbin/nologin
4 | sys:x:3:3:sys:/dev:/usr/sbin/nologin
5 | sync:x:4:65534:sync:/bin:/bin/sync
6 | games:x:5:60:games:/usr/games:/usr/sbin/nologin
7 | man:x:6:12:man:/var/cache/man:/usr/sbin/nologin
8 | lp:x:7:7:lp:/var/spool/lpd:/usr/sbin/nologin
9 | mail:x:8:8:mail:/var/mail:/usr/sbin/nologin
10 | news:x:9:9:news:/var/spool/news:/usr/sbin/nologin
11 | uucp:x:10:10:uucp:/var/spool/uucp:/usr/sbin/nologin
12 | proxy:x:13:13:proxy:/bin:/usr/sbin/nologin
13 | www-data:x:33:33:www-data:/var/www:/usr/sbin/nologin
14 | backup:x:34:34:backup:/var/backups:/usr/sbin/nologin
15 | list:x:38:38:Mailing List Manager:/var/list:/usr/sbin/nologin
16 | irc:x:39:39:ircd:/run/ircd:/usr/sbin/nologin
17 | gnats:x:41:41:Gnats Bug-Reporting System (admin):/var/lib/gnats:/usr/sbin/nologin
18 | nobody:x:65534:65534:nobody:/nonexistent:/usr/sbin/nologin
19 | _apt:x:100:65534::/nonexistent:/usr/sbin/nologin
20 | sshd:x:101:65534::/run/sshd:/usr/sbin/nologin
21 |
--------------------------------------------------------------------------------
/cuda11.8.0-ubuntu22.04-oneclick/conf-files/thebloke.zsh-theme:
--------------------------------------------------------------------------------
1 | PROMPT="\$(virtualenv_prompt_info)${FG[133]} ${USER}@${HOST}:${FG[117]}%~%{$reset_color%}\$(git_prompt_info)\$(git_prompt_status) ${FG[077]}ᐅ%{$reset_color%} "
2 |
3 | ZSH_THEME_GIT_PROMPT_PREFIX=" ${FG[012]}("
4 | ZSH_THEME_GIT_PROMPT_SUFFIX="${FG[012]})%{$reset_color%}"
5 | ZSH_THEME_GIT_PROMPT_DIRTY=" ${FG[133]}✘"
6 | ZSH_THEME_GIT_PROMPT_CLEAN=" ${FG[118]}✔"
7 |
8 | ZSH_THEME_GIT_PROMPT_ADDED="${FG[082]}✚%{$reset_color%}"
9 | ZSH_THEME_GIT_PROMPT_MODIFIED="${FG[166]}✹%{$reset_color%}"
10 | ZSH_THEME_GIT_PROMPT_DELETED="${FG[160]}✖%{$reset_color%}"
11 | ZSH_THEME_GIT_PROMPT_RENAMED="${FG[220]}➜%{$reset_color%}"
12 | ZSH_THEME_GIT_PROMPT_UNMERGED="${FG[082]}═%{$reset_color%}"
13 | ZSH_THEME_GIT_PROMPT_UNTRACKED="${FG[190]}✭%{$reset_color%}"
14 |
15 | ZSH_THEME_VIRTUALENV_PREFIX=" ["
16 | ZSH_THEME_VIRTUALENV_SUFFIX="]"
17 |
--------------------------------------------------------------------------------
/cuda11.8.0-ubuntu22.04-oneclick/conf-files/tmux.conf:
--------------------------------------------------------------------------------
1 | # Ring the bell if any background window rang a bell
2 | set -g bell-action any
3 |
4 | # Default termtype. If the rcfile sets $TERM, that overrides this value.
5 | set -g default-terminal screen-256color
6 |
7 | # Keep your finger on ctrl, or don't
8 | #bind-key ^D detach-client
9 |
10 | # Create splits and vertical splits
11 | bind-key v split-window -h -p 50 -c "#{pane_current_path}"
12 | bind-key ^V split-window -h -p 50 -c "#{pane_current_path}"
13 | bind-key s split-window -p 50 -c "#{pane_current_path}"
14 | bind-key ^S split-window -p 50 -c "#{pane_current_path}"
15 |
16 | # Pane resize in all four directions using vi bindings.
17 | # Can use these raw but I map them to shift-ctrl- in iTerm.
18 | bind -r H resize-pane -L 5
19 | bind -r J resize-pane -D 5
20 | bind -r K resize-pane -U 5
21 | bind -r L resize-pane -R 5
22 |
23 | # Smart pane switching with awareness of Vim splits.
24 | # See: https://github.com/christoomey/vim-tmux-navigator
25 | is_vim="ps -o state= -o comm= -t '#{pane_tty}' \
26 | | grep -iqE '^[^TXZ ]+ +(\\S+\\/)?g?(view|n?vim?x?)(diff)?$'"
27 | #bind-key -n 'C-h' if-shell "$is_vim" 'send-keys C-h' 'select-pane -L'
28 | #bind-key -n 'C-j' if-shell "$is_vim" 'send-keys C-j' 'select-pane -D'
29 | #bind-key -n 'C-k' if-shell "$is_vim" 'send-keys C-k' 'select-pane -U'
30 | #bind-key -n 'C-l' if-shell "$is_vim" 'send-keys C-l' 'select-pane -R'
31 |
32 | tmux_version='$(tmux -V | sed -En "s/^tmux ([0-9]+(.[0-9]+)?).*/\1/p")'
33 | if-shell -b '[ "$(echo "$tmux_version < 3.0" | bc)" = 1 ]' \
34 | "bind-key -n 'C-\\' if-shell \"$is_vim\" 'send-keys C-\\' 'select-pane -l'"
35 | if-shell -b '[ "$(echo "$tmux_version >= 3.0" | bc)" = 1 ]' \
36 | "bind-key -n 'C-\\' if-shell \"$is_vim\" 'send-keys C-\\\\' 'select-pane -l'"
37 |
38 | bind-key -T copy-mode-vi 'C-h' select-pane -L
39 | bind-key -T copy-mode-vi 'C-j' select-pane -D
40 | bind-key -T copy-mode-vi 'C-k' select-pane -U
41 | bind-key -T copy-mode-vi 'C-l' select-pane -R
42 | bind-key -T copy-mode-vi 'C-\' select-pane -l
43 |
44 | # Use vi keybindings for tmux commandline input.
45 | # Note that to get command mode you need to hit ESC twice...
46 | #set -g status-keys vi
47 |
48 | # Use vi keybindings in copy and choice modes
49 | #setw -g mode-keys vi
50 |
51 | # easily toggle synchronization (mnemonic: e is for echo)
52 | # sends input to all panes in a given window.
53 | bind e setw synchronize-panes on
54 | bind E setw synchronize-panes off
55 |
56 | # set first window to index 1 (not 0) to map more to the keyboard layout...
57 | set-option -g base-index 1
58 | set-window-option -g pane-base-index 1
59 | set-window-option -g mouse on
60 |
61 | # color scheme (styled as vim-powerline)
62 | set -g status-left-length 52
63 | set -g status-right-length 451
64 | set -g status-style fg=white,bg=colour234
65 | set -g pane-border-style fg=colour245
66 | set -g pane-active-border-style fg=colour39
67 | set -g message-style fg=colour16,bg=colour221,bold
68 | set -g status-left '#[fg=colour235,bg=colour252,bold] ❐ #S #[fg=colour252,bg=colour238,nobold]⮀#[fg=colour245,bg=colour238,bold] #(whoami) #[fg=colour238,bg=colour234,nobold]⮀'
69 | set -g window-status-format '#[fg=colour235,bg=colour252,bold] #I #(pwd="#{pane_current_path}"; echo ${pwd####*/}) #W '
70 | set -g window-status-current-format '#[fg=colour234,bg=colour39]⮀#[fg=black,bg=colour39,noreverse,bold] #{?window_zoomed_flag,#[fg=colour228],} #I #(pwd="#{pane_current_path}"; echo ${pwd####*/}) #W #[fg=colour39,bg=colour234,nobold]⮀'
71 | set-option -g status-interval 2
72 |
73 | # Patch for OS X pbpaste and pbcopy under tmux.
74 | #set-option -g default-command "which reattach-to-user-namespace > /dev/null && reattach-to-user-namespace -l $SHELL || $SHELL"
75 |
76 | # No escape time for vi mode
77 | set -sg escape-time 0
78 |
79 | # Screen like binding for last window
80 | unbind l
81 | bind C-a last-window
82 |
83 | # Bigger history
84 | set -g history-limit 50000
85 |
86 | # New windows/pane in $PWD
87 | bind c new-window -c "#{pane_current_path}"
88 |
89 | # Fix key bindings broken in tmux 2.1
90 | set -g assume-paste-time 0
91 |
92 | # force a reload of the config file
93 | unbind r
94 | bind r source-file ~/.tmux.conf \; display "Reloaded!"
95 |
96 | # Local config
97 | if-shell "[ -f ~/.tmux.conf.user ]" 'source ~/.tmux.conf.user'
98 |
--------------------------------------------------------------------------------
/cuda11.8.0-ubuntu22.04-oneclick/conf-files/vimrc:
--------------------------------------------------------------------------------
1 | " Use Vim settings, rather then Vi settings (much better!).
2 | " This must be first, because it changes other options as a side effect.
3 | set nocompatible
4 |
5 | " TODO: this may not be in the correct place. It is intended to allow overriding .
6 | " source ~/.vimrc.before if it exists.
7 | if filereadable(expand("~/.vimrc.before"))
8 | source ~/.vimrc.before
9 | endif
10 |
11 | " ================ General Config ====================
12 |
13 | set number "Line numbers are good
14 | set backspace=indent,eol,start "Allow backspace in insert mode
15 | set history=1000 "Store lots of :cmdline history
16 | set showcmd "Show incomplete cmds down the bottom
17 | set showmode "Show current mode down the bottom
18 | set gcr=a:blinkon0 "Disable cursor blink
19 | set visualbell "No sounds
20 | set autoread "Reload files changed outside vim
21 |
22 | " This makes vim act like all other editors, buffers can
23 | " exist in the background without being in a window.
24 | " http://items.sjbach.com/319/configuring-vim-right
25 | set hidden
26 |
27 | "turn on syntax highlighting
28 | syntax on
29 |
30 | " Change leader to a comma because the backslash is too far away
31 | " That means all \x commands turn into ,x
32 | " The mapleader has to be set before vundle starts loading all
33 | " the plugins.
34 | let mapleader=","
35 |
36 | " =============== Vundle Initialization ===============
37 | " This loads all the plugins specified in ~/.vim/vundles.vim
38 | " Use Vundle plugin to manage all other plugins
39 | if filereadable(expand("~/.vim/vundles.vim"))
40 | source ~/.vim/vundles.vim
41 | endif
42 | au BufNewFile,BufRead *.vundle set filetype=vim
43 |
44 | " ================ Turn Off Swap Files ==============
45 |
46 | set noswapfile
47 | set nobackup
48 | set nowb
49 |
50 | " ================ Persistent Undo ==================
51 | " Keep undo history across sessions, by storing in file.
52 | " Only works all the time.
53 | if has('persistent_undo') && isdirectory(expand('~').'/.vim/backups')
54 | silent !mkdir ~/.vim/backups > /dev/null 2>&1
55 | set undodir=~/.vim/backups
56 | set undofile
57 | endif
58 |
59 | " ================ Indentation ======================
60 |
61 | set autoindent
62 | set smartindent
63 | set smarttab
64 | set shiftwidth=2
65 | set softtabstop=2
66 | set tabstop=2
67 | set expandtab
68 |
69 | " Auto indent pasted text
70 | nnoremap p p=`]
71 | nnoremap P P=`]
72 |
73 | filetype plugin on
74 | filetype indent on
75 |
76 | " Display tabs and trailing spaces visually
77 |
78 | set nowrap "Don't wrap lines
79 | set linebreak "Wrap lines at convenient points
80 |
81 | " ================ Folds ============================
82 |
83 | set foldmethod=indent "fold based on indent
84 | set foldnestmax=3 "deepest fold is 3 levels
85 | set nofoldenable "dont fold by default
86 |
87 | " ================ Completion =======================
88 |
89 | set wildmode=list:longest
90 | set wildmenu "enable ctrl-n and ctrl-p to scroll thru matches
91 | set wildignore=*.o,*.obj,*~ "stuff to ignore when tab completing
92 | set wildignore+=*vim/backups*
93 | set wildignore+=*sass-cache*
94 | set wildignore+=*DS_Store*
95 | set wildignore+=vendor/rails/**
96 | set wildignore+=vendor/cache/**
97 | set wildignore+=*.gem
98 | set wildignore+=log/**
99 | set wildignore+=tmp/**
100 | set wildignore+=*.png,*.jpg,*.gif
101 |
102 | " ================ Scrolling ========================
103 |
104 | set scrolloff=8 "Start scrolling when we're 8 lines away from margins
105 | set sidescrolloff=15
106 | set sidescroll=1
107 |
108 | " ================ Search ===========================
109 |
110 | set incsearch " Find the next match as we type the search
111 | set hlsearch " Highlight searches by default
112 | set ignorecase " Ignore case when searching...
113 | set smartcase " ...unless we type a capital
114 |
115 | " ================ Security ==========================
116 | set modelines=0
117 | set nomodeline
118 |
119 | " ================ Custom Settings ========================
120 |
--------------------------------------------------------------------------------
/cuda11.8.0-ubuntu22.04-oneclick/conf-files/zshrc:
--------------------------------------------------------------------------------
1 | # If you come from bash you might have to change your $PATH.
2 | # export PATH=$HOME/bin:/usr/local/bin:$PATH
3 |
4 | # Path to your oh-my-zsh installation.
5 | export ZSH="/root/ohmyzsh"
6 |
7 | HISTFILE=/workspace/.zsh_history
8 |
9 | # Set name of the theme to load --- if set to "random", it will
10 | # load a random theme each time oh-my-zsh is loaded, in which case,
11 | # to know which specific one was loaded, run: echo $RANDOM_THEME
12 | # See https://github.com/ohmyzsh/ohmyzsh/wiki/Themes
13 | ZSH_THEME="thebloke"
14 |
15 | # Set list of themes to pick from when loading at random
16 | # Setting this variable when ZSH_THEME=random will cause zsh to load
17 | # a theme from this variable instead of looking in $ZSH/themes/
18 | # If set to an empty array, this variable will have no effect.
19 | # ZSH_THEME_RANDOM_CANDIDATES=( "robbyrussell" "agnoster" )
20 |
21 | # Uncomment the following line to use case-sensitive completion.
22 | # CASE_SENSITIVE="true"
23 |
24 | # Uncomment the following line to use hyphen-insensitive completion.
25 | # Case-sensitive completion must be off. _ and - will be interchangeable.
26 | # HYPHEN_INSENSITIVE="true"
27 |
28 | # Uncomment one of the following lines to change the auto-update behavior
29 | # zstyle ':omz:update' mode disabled # disable automatic updates
30 | # zstyle ':omz:update' mode auto # update automatically without asking
31 | zstyle ':omz:update' mode reminder # just remind me to update when it's time
32 |
33 | # Uncomment the following line to change how often to auto-update (in days).
34 | # zstyle ':omz:update' frequency 13
35 |
36 | # Uncomment the following line if pasting URLs and other text is messed up.
37 | # DISABLE_MAGIC_FUNCTIONS="true"
38 |
39 | # Uncomment the following line to disable colors in ls.
40 | # DISABLE_LS_COLORS="true"
41 |
42 | # Uncomment the following line to disable auto-setting terminal title.
43 | # DISABLE_AUTO_TITLE="true"
44 |
45 | # Uncomment the following line to enable command auto-correction.
46 | # ENABLE_CORRECTION="true"
47 |
48 | # Uncomment the following line to display red dots whilst waiting for completion.
49 | # You can also set it to another string to have that shown instead of the default red dots.
50 | # e.g. COMPLETION_WAITING_DOTS="%F{yellow}waiting...%f"
51 | # Caution: this setting can cause issues with multiline prompts in zsh < 5.7.1 (see #5765)
52 | COMPLETION_WAITING_DOTS="true"
53 |
54 | # Uncomment the following line if you want to disable marking untracked files
55 | # under VCS as dirty. This makes repository status check for large repositories
56 | # much, much faster.
57 | # DISABLE_UNTRACKED_FILES_DIRTY="true"
58 |
59 | # Uncomment the following line if you want to change the command execution time
60 | # stamp shown in the history command output.
61 | # You can set one of the optional three formats:
62 | # "mm/dd/yyyy"|"dd.mm.yyyy"|"yyyy-mm-dd"
63 | # or set a custom format using the strftime function format specifications,
64 | # see 'man strftime' for details.
65 | HIST_STAMPS="dd.mm.yyyy"
66 |
67 | # Would you like to use another custom folder than $ZSH/custom?
68 | # ZSH_CUSTOM=/path/to/new-custom-folder
69 |
70 | # Which plugins would you like to load?
71 | # Standard plugins can be found in $ZSH/plugins/
72 | # Custom plugins may be added to $ZSH_CUSTOM/plugins/
73 | # Example format: plugins=(rails git textmate ruby lighthouse)
74 | # Add wisely, as too many plugins slow down shell startup.
75 | plugins=(git python tmux iterm2 fasd pip tmux virtualenv)
76 |
77 | zstyle :omz:plugins:iterm2 shell-integration yes
78 |
79 | export ZSH_TMUX_AUTOSTART=false
80 | export ZSH_DISABLE_COMPFIX=true
81 |
82 | source $ZSH/oh-my-zsh.sh
83 |
84 | LS_COLORS="*~=0;38;2;102;102;102:bd=0;38;2;154;237;254;48;2;51;51;51:ca=0:cd=0;38;2;255;106;193;48;2;51;51;51:di=0;38;2;87;199;255:do=0;38;2;0;0;0;48;2;255;106;193:ex=1;38;2;255;92;87:fi=0:ln=0;38;2;255;106;193:mh=0:mi=0;38;2;0;0;0;48;2;255;92;87:no=0:or=0;38;2;0;0;0;48;2;255;92;87:ow=0:pi=0;38;2;0;0;0;48;2;87;199;255:rs=0:sg=0:so=0;38;2;0;0;0;48;2;255;106;193:st=0:su=0:tw=0:*.a=1;38;2;255;92;87:*.c=0;38;2;90;247;142:*.d=0;38;2;90;247;142:*.h=0;38;2;90;247;142:*.m=0;38;2;90;247;142:*.o=0;38;2;102;102;102:*.p=0;38;2;90;247;142:*.r=0;38;2;90;247;142:*.t=0;38;2;90;247;142:*.z=4;38;2;154;237;254:*.7z=4;38;2;154;237;254:*.as=0;38;2;90;247;142:*.bc=0;38;2;102;102;102:*.bz=4;38;2;154;237;254:*.cc=0;38;2;90;247;142:*.cp=0;38;2;90;247;142:*.cr=0;38;2;90;247;142:*.cs=0;38;2;90;247;142:*.di=0;38;2;90;247;142:*.el=0;38;2;90;247;142:*.ex=0;38;2;90;247;142:*.fs=0;38;2;90;247;142:*.go=0;38;2;90;247;142:*.gv=0;38;2;90;247;142:*.gz=4;38;2;154;237;254:*.hh=0;38;2;90;247;142:*.hi=0;38;2;102;102;102:*.hs=0;38;2;90;247;142:*.jl=0;38;2;90;247;142:*.js=0;38;2;90;247;142:*.ko=1;38;2;255;92;87:*.kt=0;38;2;90;247;142:*.la=0;38;2;102;102;102:*.ll=0;38;2;90;247;142:*.lo=0;38;2;102;102;102:*.md=0;38;2;243;249;157:*.ml=0;38;2;90;247;142:*.mn=0;38;2;90;247;142:*.nb=0;38;2;90;247;142:*.pl=0;38;2;90;247;142:*.pm=0;38;2;90;247;142:*.pp=0;38;2;90;247;142:*.ps=0;38;2;255;92;87:*.py=0;38;2;90;247;142:*.rb=0;38;2;90;247;142:*.rm=0;38;2;255;180;223:*.rs=0;38;2;90;247;142:*.sh=0;38;2;90;247;142:*.so=1;38;2;255;92;87:*.td=0;38;2;90;247;142:*.ts=0;38;2;90;247;142:*.ui=0;38;2;243;249;157:*.vb=0;38;2;90;247;142:*.wv=0;38;2;255;180;223:*.xz=4;38;2;154;237;254:*.aif=0;38;2;255;180;223:*.ape=0;38;2;255;180;223:*.apk=4;38;2;154;237;254:*.arj=4;38;2;154;237;254:*.asa=0;38;2;90;247;142:*.aux=0;38;2;102;102;102:*.avi=0;38;2;255;180;223:*.awk=0;38;2;90;247;142:*.bag=4;38;2;154;237;254:*.bak=0;38;2;102;102;102:*.bat=1;38;2;255;92;87:*.bbl=0;38;2;102;102;102:*.bcf=0;38;2;102;102;102:*.bib=0;38;2;243;249;157:*.bin=4;38;2;154;237;254:*.blg=0;38;2;102;102;102:*.bmp=0;38;2;255;180;223:*.bsh=0;38;2;90;247;142:*.bst=0;38;2;243;249;157:*.bz2=4;38;2;154;237;254:*.c++=0;38;2;90;247;142:*.cfg=0;38;2;243;249;157:*.cgi=0;38;2;90;247;142:*.clj=0;38;2;90;247;142:*.com=1;38;2;255;92;87:*.cpp=0;38;2;90;247;142:*.css=0;38;2;90;247;142:*.csv=0;38;2;243;249;157:*.csx=0;38;2;90;247;142:*.cxx=0;38;2;90;247;142:*.deb=4;38;2;154;237;254:*.def=0;38;2;90;247;142:*.dll=1;38;2;255;92;87:*.dmg=4;38;2;154;237;254:*.doc=0;38;2;255;92;87:*.dot=0;38;2;90;247;142:*.dox=0;38;2;165;255;195:*.dpr=0;38;2;90;247;142:*.elc=0;38;2;90;247;142:*.elm=0;38;2;90;247;142:*.epp=0;38;2;90;247;142:*.eps=0;38;2;255;180;223:*.erl=0;38;2;90;247;142:*.exe=1;38;2;255;92;87:*.exs=0;38;2;90;247;142:*.fls=0;38;2;102;102;102:*.flv=0;38;2;255;180;223:*.fnt=0;38;2;255;180;223:*.fon=0;38;2;255;180;223:*.fsi=0;38;2;90;247;142:*.fsx=0;38;2;90;247;142:*.gif=0;38;2;255;180;223:*.git=0;38;2;102;102;102:*.gvy=0;38;2;90;247;142:*.h++=0;38;2;90;247;142:*.hpp=0;38;2;90;247;142:*.htc=0;38;2;90;247;142:*.htm=0;38;2;243;249;157:*.hxx=0;38;2;90;247;142:*.ico=0;38;2;255;180;223:*.ics=0;38;2;255;92;87:*.idx=0;38;2;102;102;102:*.ilg=0;38;2;102;102;102:*.img=4;38;2;154;237;254:*.inc=0;38;2;90;247;142:*.ind=0;38;2;102;102;102:*.ini=0;38;2;243;249;157:*.inl=0;38;2;90;247;142:*.ipp=0;38;2;90;247;142:*.iso=4;38;2;154;237;254:*.jar=4;38;2;154;237;254:*.jpg=0;38;2;255;180;223:*.kex=0;38;2;255;92;87:*.kts=0;38;2;90;247;142:*.log=0;38;2;102;102;102:*.ltx=0;38;2;90;247;142:*.lua=0;38;2;90;247;142:*.m3u=0;38;2;255;180;223:*.m4a=0;38;2;255;180;223:*.m4v=0;38;2;255;180;223:*.mid=0;38;2;255;180;223:*.mir=0;38;2;90;247;142:*.mkv=0;38;2;255;180;223:*.mli=0;38;2;90;247;142:*.mov=0;38;2;255;180;223:*.mp3=0;38;2;255;180;223:*.mp4=0;38;2;255;180;223:*.mpg=0;38;2;255;180;223:*.nix=0;38;2;243;249;157:*.odp=0;38;2;255;92;87:*.ods=0;38;2;255;92;87:*.odt=0;38;2;255;92;87:*.ogg=0;38;2;255;180;223:*.org=0;38;2;243;249;157:*.otf=0;38;2;255;180;223:*.out=0;38;2;102;102;102:*.pas=0;38;2;90;247;142:*.pbm=0;38;2;255;180;223:*.pdf=0;38;2;255;92;87:*.pgm=0;38;2;255;180;223:*.php=0;38;2;90;247;142:*.pid=0;38;2;102;102;102:*.pkg=4;38;2;154;237;254:*.png=0;38;2;255;180;223:*.pod=0;38;2;90;247;142:*.ppm=0;38;2;255;180;223:*.pps=0;38;2;255;92;87:*.ppt=0;38;2;255;92;87:*.pro=0;38;2;165;255;195:*.ps1=0;38;2;90;247;142:*.psd=0;38;2;255;180;223:*.pyc=0;38;2;102;102;102:*.pyd=0;38;2;102;102;102:*.pyo=0;38;2;102;102;102:*.rar=4;38;2;154;237;254:*.rpm=4;38;2;154;237;254:*.rst=0;38;2;243;249;157:*.rtf=0;38;2;255;92;87:*.sbt=0;38;2;90;247;142:*.sql=0;38;2;90;247;142:*.sty=0;38;2;102;102;102:*.svg=0;38;2;255;180;223:*.swf=0;38;2;255;180;223:*.swp=0;38;2;102;102;102:*.sxi=0;38;2;255;92;87:*.sxw=0;38;2;255;92;87:*.tar=4;38;2;154;237;254:*.tbz=4;38;2;154;237;254:*.tcl=0;38;2;90;247;142:*.tex=0;38;2;90;247;142:*.tgz=4;38;2;154;237;254:*.tif=0;38;2;255;180;223:*.tml=0;38;2;243;249;157:*.tmp=0;38;2;102;102;102:*.toc=0;38;2;102;102;102:*.tsx=0;38;2;90;247;142:*.ttf=0;38;2;255;180;223:*.txt=0;38;2;243;249;157:*.vcd=4;38;2;154;237;254:*.vim=0;38;2;90;247;142:*.vob=0;38;2;255;180;223:*.wav=0;38;2;255;180;223:*.wma=0;38;2;255;180;223:*.wmv=0;38;2;255;180;223:*.xcf=0;38;2;255;180;223:*.xlr=0;38;2;255;92;87:*.xls=0;38;2;255;92;87:*.xml=0;38;2;243;249;157:*.xmp=0;38;2;243;249;157:*.yml=0;38;2;243;249;157:*.zip=4;38;2;154;237;254:*.zsh=0;38;2;90;247;142:*.zst=4;38;2;154;237;254:*TODO=1:*hgrc=0;38;2;165;255;195:*.bash=0;38;2;90;247;142:*.conf=0;38;2;243;249;157:*.dart=0;38;2;90;247;142:*.diff=0;38;2;90;247;142:*.docx=0;38;2;255;92;87:*.epub=0;38;2;255;92;87:*.fish=0;38;2;90;247;142:*.flac=0;38;2;255;180;223:*.h264=0;38;2;255;180;223:*.hgrc=0;38;2;165;255;195:*.html=0;38;2;243;249;157:*.java=0;38;2;90;247;142:*.jpeg=0;38;2;255;180;223:*.json=0;38;2;243;249;157:*.less=0;38;2;90;247;142:*.lisp=0;38;2;90;247;142:*.lock=0;38;2;102;102;102:*.make=0;38;2;165;255;195:*.mpeg=0;38;2;255;180;223:*.opus=0;38;2;255;180;223:*.orig=0;38;2;102;102;102:*.pptx=0;38;2;255;92;87:*.psd1=0;38;2;90;247;142:*.psm1=0;38;2;90;247;142:*.purs=0;38;2;90;247;142:*.rlib=0;38;2;102;102;102:*.sass=0;38;2;90;247;142:*.scss=0;38;2;90;247;142:*.tbz2=4;38;2;154;237;254:*.tiff=0;38;2;255;180;223:*.toml=0;38;2;243;249;157:*.webm=0;38;2;255;180;223:*.webp=0;38;2;255;180;223:*.woff=0;38;2;255;180;223:*.xbps=4;38;2;154;237;254:*.xlsx=0;38;2;255;92;87:*.yaml=0;38;2;243;249;157:*.cabal=0;38;2;90;247;142:*.cache=0;38;2;102;102;102:*.class=0;38;2;102;102;102:*.cmake=0;38;2;165;255;195:*.dyn_o=0;38;2;102;102;102:*.ipynb=0;38;2;90;247;142:*.mdown=0;38;2;243;249;157:*.patch=0;38;2;90;247;142:*.scala=0;38;2;90;247;142:*.shtml=0;38;2;243;249;157:*.swift=0;38;2;90;247;142:*.toast=4;38;2;154;237;254:*.xhtml=0;38;2;243;249;157:*README=0;38;2;40;42;54;48;2;243;249;157:*passwd=0;38;2;243;249;157:*shadow=0;38;2;243;249;157:*.config=0;38;2;243;249;157:*.dyn_hi=0;38;2;102;102;102:*.flake8=0;38;2;165;255;195:*.gradle=0;38;2;90;247;142:*.groovy=0;38;2;90;247;142:*.ignore=0;38;2;165;255;195:*.matlab=0;38;2;90;247;142:*COPYING=0;38;2;153;153;153:*INSTALL=0;38;2;40;42;54;48;2;243;249;157:*LICENSE=0;38;2;153;153;153:*TODO.md=1:*.desktop=0;38;2;243;249;157:*.gemspec=0;38;2;165;255;195:*Doxyfile=0;38;2;165;255;195:*Makefile=0;38;2;165;255;195:*TODO.txt=1:*setup.py=0;38;2;165;255;195:*.DS_Store=0;38;2;102;102;102:*.cmake.in=0;38;2;165;255;195:*.fdignore=0;38;2;165;255;195:*.kdevelop=0;38;2;165;255;195:*.markdown=0;38;2;243;249;157:*.rgignore=0;38;2;165;255;195:*COPYRIGHT=0;38;2;153;153;153:*README.md=0;38;2;40;42;54;48;2;243;249;157:*configure=0;38;2;165;255;195:*.gitconfig=0;38;2;165;255;195:*.gitignore=0;38;2;165;255;195:*.localized=0;38;2;102;102;102:*.scons_opt=0;38;2;102;102;102:*CODEOWNERS=0;38;2;165;255;195:*Dockerfile=0;38;2;243;249;157:*INSTALL.md=0;38;2;40;42;54;48;2;243;249;157:*README.txt=0;38;2;40;42;54;48;2;243;249;157:*SConscript=0;38;2;165;255;195:*SConstruct=0;38;2;165;255;195:*.gitmodules=0;38;2;165;255;195:*.synctex.gz=0;38;2;102;102;102:*.travis.yml=0;38;2;90;247;142:*INSTALL.txt=0;38;2;40;42;54;48;2;243;249;157:*LICENSE-MIT=0;38;2;153;153;153:*MANIFEST.in=0;38;2;165;255;195:*Makefile.am=0;38;2;165;255;195:*Makefile.in=0;38;2;102;102;102:*.applescript=0;38;2;90;247;142:*.fdb_latexmk=0;38;2;102;102;102:*CONTRIBUTORS=0;38;2;40;42;54;48;2;243;249;157:*appveyor.yml=0;38;2;90;247;142:*configure.ac=0;38;2;165;255;195:*.clang-format=0;38;2;165;255;195:*.gitattributes=0;38;2;165;255;195:*.gitlab-ci.yml=0;38;2;90;247;142:*CMakeCache.txt=0;38;2;102;102;102:*CMakeLists.txt=0;38;2;165;255;195:*LICENSE-APACHE=0;38;2;153;153;153:*CONTRIBUTORS.md=0;38;2;40;42;54;48;2;243;249;157:*.sconsign.dblite=0;38;2;102;102;102:*CONTRIBUTORS.txt=0;38;2;40;42;54;48;2;243;249;157:*requirements.txt=0;38;2;165;255;195:*package-lock.json=0;38;2;102;102;102:*.CFUserTextEncoding=0;38;2;102;102;102"
85 | export LS_COLORS
86 | # User configuration
87 |
88 | # export MANPATH="/usr/local/man:$MANPATH"
89 |
90 | # You may need to manually set your language environment
91 | # export LANG=en_US.UTF-8
92 |
93 | # Preferred editor for local and remote sessions
94 | # if [[ -n $SSH_CONNECTION ]]; then
95 | # export EDITOR='vim'
96 | # else
97 | # export EDITOR='mvim'
98 | # fi
99 | export EDITOR=vim
100 |
101 | # Compilation flags
102 | # export ARCHFLAGS="-arch x86_64"
103 |
104 | # Set personal aliases, overriding those provided by oh-my-zsh libs,
105 | # plugins, and themes. Aliases can be placed here, though oh-my-zsh
106 | # users are encouraged to define aliases within the ZSH_CUSTOM folder.
107 | # For a full list of active aliases, run `alias`.
108 | #
109 | # Example aliases
110 | # alias zshconfig="mate ~/.zshrc"
111 | # alias ohmyzsh="mate ~/.oh-my-zsh"
112 |
113 | alias python=python3
114 | alias pip=pip3
115 |
116 | source ~/.iterm2_shell_integration.zsh
117 |
118 | bindkey "^U" kill-region
119 |
120 | if [[ -d /workspace/venv/pytorch2 ]]
121 | then
122 | source /workspace/venv/pytorch2/bin/activate
123 | fi
124 |
125 | export PATH=/usr/local/cuda/bin:"$PATH"
126 |
127 | cd /workspace
128 |
129 |
--------------------------------------------------------------------------------
/cuda11.8.0-ubuntu22.04-oneclick/scripts/build-llama-cpp-python.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Install llama-cpp-python with GPU acceleration, overwriting the version installed by text-generation-webui
4 | # We do this on boot, so as to ensure the binaries are built with the right CPU instructions for the machine being used
5 |
6 | # Uninstall llama-cpp-python, which will have been auto installed by text-generation-webui's requirements.txt
7 | pip3 uninstall -qy llama-cpp-python
8 |
9 | # Check to see if this machine supports AVX2 instructions
10 | if python3 /root/scripts/check_avx2.py; then
11 | export CMAKE_ARGS="-DLLAMA_CUBLAS=on -DCMAKE_CUDA_COMPILER=/usr/local/cuda/bin/nvcc"
12 | else
13 | # If it does not, we need to specifically tell llama-cpp-python not to use them
14 | # as unfortunately it's otherwise hardcoded to use them
15 | export CMAKE_ARGS="-DLLAMA_AVX2=OFF -DLLAMA_CUBLAS=on -DCMAKE_CUDA_COMPILER=/usr/local/cuda/bin/nvcc"
16 | fi
17 |
18 | export PATH=/usr/local/cuda/bin:"$PATH"
19 | export FORCE_CMAKE=1
20 |
21 | if pip3 install --no-cache-dir llama-cpp-python; then
22 | # touch this file so we don't build again on reboots
23 | touch /.built.llama-cpp-python
24 | fi
25 |
--------------------------------------------------------------------------------
/cuda11.8.0-ubuntu22.04-oneclick/scripts/check_avx2.py:
--------------------------------------------------------------------------------
1 | import subprocess
2 | import sys
3 | import json
4 |
5 | def check_avx2_support():
6 | result = subprocess.run(['lscpu', '--json'], stdout=subprocess.PIPE)
7 | lscpu_output = result.stdout.decode('utf-8')
8 |
9 | lscpu_info = json.loads(lscpu_output)
10 |
11 | for info in lscpu_info["lscpu"]:
12 | if info["field"] == "Flags:":
13 | flags = info["data"].split()
14 | if "avx2" in flags:
15 | print("This system supports AVX2.")
16 | return True
17 | else:
18 | print("This system does not support AVX2.")
19 | return False
20 |
21 | if check_avx2_support():
22 | sys.exit(0)
23 | else:
24 | sys.exit(1)
25 |
--------------------------------------------------------------------------------
/cuda11.8.0-ubuntu22.04-oneclick/scripts/download_model.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import argparse
4 | import base64
5 | import datetime
6 | import hashlib
7 | import json
8 | import re
9 | import sys
10 | from pathlib import Path
11 |
12 | import requests
13 | import tqdm
14 | from tqdm.contrib.concurrent import thread_map
15 |
16 | def sanitize_model_and_branch_names(model, branch):
17 | if model[-1] == '/':
18 | model = model[:-1]
19 | if branch is None:
20 | branch = "main"
21 | else:
22 | pattern = re.compile(r"^[a-zA-Z0-9._-]+$")
23 | if not pattern.match(branch):
24 | raise ValueError("Invalid branch name. Only alphanumeric characters, period, underscore and dash are allowed.")
25 |
26 | return model, branch
27 |
28 | def get_download_links_from_huggingface(model, branch, text_only=False):
29 | base = "https://huggingface.co"
30 | page = f"/api/models/{model}/tree/{branch}"
31 | cursor = b""
32 |
33 | links = []
34 | sha256 = []
35 | classifications = []
36 | has_pytorch = False
37 | has_pt = False
38 | has_ggml = False
39 | has_safetensors = False
40 | is_lora = False
41 | while True:
42 | url = f"{base}{page}" + (f"?cursor={cursor.decode()}" if cursor else "")
43 | r = requests.get(url, timeout=10)
44 | r.raise_for_status()
45 | content = r.content
46 |
47 | dict = json.loads(content)
48 | if len(dict) == 0:
49 | break
50 |
51 | for i in range(len(dict)):
52 | fname = dict[i]['path']
53 | if not is_lora and fname.endswith(('adapter_config.json', 'adapter_model.bin')):
54 | is_lora = True
55 |
56 | is_pytorch = re.match("(pytorch|adapter)_model.*\.bin", fname)
57 | is_safetensors = re.match(".*\.safetensors", fname)
58 | is_pt = re.match(".*\.pt", fname)
59 | is_ggml = re.match(".*ggml.*\.bin", fname)
60 | is_tokenizer = re.match("(tokenizer|ice).*\.model", fname)
61 | is_text = re.match(".*\.(txt|json|py|md)", fname) or is_tokenizer
62 |
63 | if any((is_pytorch, is_safetensors, is_pt, is_ggml, is_tokenizer, is_text)):
64 | if 'lfs' in dict[i]:
65 | sha256.append([fname, dict[i]['lfs']['oid']])
66 | if is_text:
67 | links.append(f"https://huggingface.co/{model}/resolve/{branch}/{fname}")
68 | classifications.append('text')
69 | continue
70 | if not text_only:
71 | links.append(f"https://huggingface.co/{model}/resolve/{branch}/{fname}")
72 | if is_safetensors:
73 | has_safetensors = True
74 | classifications.append('safetensors')
75 | elif is_pytorch:
76 | has_pytorch = True
77 | classifications.append('pytorch')
78 | elif is_pt:
79 | has_pt = True
80 | classifications.append('pt')
81 | elif is_ggml:
82 | has_ggml = True
83 | classifications.append('ggml')
84 |
85 | cursor = base64.b64encode(f'{{"file_name":"{dict[-1]["path"]}"}}'.encode()) + b':50'
86 | cursor = base64.b64encode(cursor)
87 | cursor = cursor.replace(b'=', b'%3D')
88 |
89 | # If both pytorch and safetensors are available, download safetensors only
90 | if (has_pytorch or has_pt) and has_safetensors:
91 | for i in range(len(classifications) - 1, -1, -1):
92 | if classifications[i] in ['pytorch', 'pt']:
93 | links.pop(i)
94 |
95 | return links, sha256, is_lora
96 |
97 |
98 | def get_output_folder(model, branch, is_lora, base_folder=None):
99 | if base_folder is None:
100 | base_folder = 'models' if not is_lora else 'loras'
101 |
102 | output_folder = Path(base_folder)
103 | return output_folder
104 |
105 |
106 | def get_single_file(url, output_folder, start_from_scratch=False):
107 | filename = Path(url.rsplit('/', 1)[1])
108 | output_path = output_folder / filename
109 | if output_path.exists() and not start_from_scratch:
110 | # Check if the file has already been downloaded completely
111 | r = requests.get(url, stream=True, timeout=10)
112 | total_size = int(r.headers.get('content-length', 0))
113 | if output_path.stat().st_size >= total_size:
114 | return
115 | # Otherwise, resume the download from where it left off
116 | headers = {'Range': f'bytes={output_path.stat().st_size}-'}
117 | mode = 'ab'
118 | else:
119 | headers = {}
120 | mode = 'wb'
121 |
122 | r = requests.get(url, stream=True, headers=headers, timeout=10)
123 | with open(output_path, mode) as f:
124 | total_size = int(r.headers.get('content-length', 0))
125 | block_size = 1024
126 | with tqdm.tqdm(total=total_size, unit='iB', unit_scale=True, bar_format='{l_bar}{bar}| {n_fmt:6}/{total_fmt:6} {rate_fmt:6}') as t:
127 | for data in r.iter_content(block_size):
128 | t.update(len(data))
129 | f.write(data)
130 |
131 |
132 | def start_download_threads(file_list, output_folder, start_from_scratch=False, threads=1):
133 | thread_map(lambda url: get_single_file(url, output_folder, start_from_scratch=start_from_scratch), file_list, max_workers=threads, disable=True)
134 |
135 |
136 | def download_model_files(model, branch, links, sha256, output_folder, start_from_scratch=False, threads=1):
137 | # Creating the folder and writing the metadata
138 | if not output_folder.exists():
139 | output_folder.mkdir()
140 | with open(output_folder / 'huggingface-metadata.txt', 'w') as f:
141 | f.write(f'url: https://huggingface.co/{model}\n')
142 | f.write(f'branch: {branch}\n')
143 | f.write(f'download date: {str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))}\n')
144 | sha256_str = ''
145 | for i in range(len(sha256)):
146 | sha256_str += f' {sha256[i][1]} {sha256[i][0]}\n'
147 | if sha256_str != '':
148 | f.write(f'sha256sum:\n{sha256_str}')
149 |
150 | # Downloading the files
151 | print(f"Downloading the model to {output_folder}")
152 | start_download_threads(links, output_folder, start_from_scratch=start_from_scratch, threads=threads)
153 |
154 |
155 | def check_model_files(model, branch, links, sha256, output_folder):
156 | # Validate the checksums
157 | validated = True
158 | for i in range(len(sha256)):
159 | fpath = (output_folder / sha256[i][0])
160 |
161 | if not fpath.exists():
162 | print(f"The following file is missing: {fpath}")
163 | validated = False
164 | continue
165 |
166 | with open(output_folder / sha256[i][0], "rb") as f:
167 | bytes = f.read()
168 | file_hash = hashlib.sha256(bytes).hexdigest()
169 | if file_hash != sha256[i][1]:
170 | print(f'Checksum failed: {sha256[i][0]} {sha256[i][1]}')
171 | validated = False
172 | else:
173 | print(f'Checksum validated: {sha256[i][0]} {sha256[i][1]}')
174 |
175 | if validated:
176 | print('[+] Validated checksums of all model files!')
177 | else:
178 | print('[-] Invalid checksums. Rerun download-model.py with the --clean flag.')
179 |
180 | def download_model(model, base_folder, branch="main", threads=1, text_only=False, check=False):
181 | # Cleaning up the model/branch names
182 | try:
183 | model, branch = sanitize_model_and_branch_names(model, branch)
184 | except ValueError as err_branch:
185 | print(f"Error: {err_branch}")
186 | sys.exit()
187 |
188 | # Getting the download links from Hugging Face
189 | links, sha256, is_lora = get_download_links_from_huggingface(model, branch, text_only=text_only)
190 |
191 | # Getting the output folder
192 | output_folder = get_output_folder(model, branch, is_lora, base_folder)
193 |
194 | if check:
195 | # Check previously downloaded files
196 | check_model_files(model, branch, links, sha256, output_folder)
197 | else:
198 | # Download files
199 | download_model_files(model, branch, links, sha256, output_folder, threads=threads)
200 |
201 | if __name__ == '__main__':
202 | parser = argparse.ArgumentParser()
203 | parser.add_argument('model', type=str, default=None, nargs='?')
204 | parser.add_argument('--branch', type=str, default='main', help='Name of the Git branch to download from.')
205 | parser.add_argument('--threads', type=int, default=1, help='Number of files to download simultaneously.')
206 | parser.add_argument('--text-only', action='store_true', help='Only download text files (txt/json).')
207 | parser.add_argument('--output', type=str, default=None, help='The folder where the model should be saved.')
208 | parser.add_argument('--clean', action='store_true', help='Does not resume the previous download.')
209 | parser.add_argument('--check', action='store_true', help='Validates the checksums of model files.')
210 | args = parser.parse_args()
211 |
212 | download_model(model=args.model, base_folder=args.output, branch=args.branch,
213 | threads=args.threads, text_only=args.text_only, check=args.check)
214 |
--------------------------------------------------------------------------------
/cuda11.8.0-ubuntu22.04-oneclick/scripts/fetch-model.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import sys
3 | import os
4 | import argparse
5 | import subprocess
6 | from urllib.parse import urlparse
7 | from huggingface_hub import model_info
8 |
9 | parser = argparse.ArgumentParser()
10 | parser.add_argument('model', type=str)
11 | parser.add_argument('output_folder', type=str)
12 | args = parser.parse_args()
13 |
14 | SCRIPT_DIR = "/root/scripts"
15 |
16 | model = args.model.strip()
17 | output_folder = args.output_folder
18 |
19 | success=False
20 | retry_count=0
21 | while not success and retry_count < 10:
22 | os.makedirs(output_folder, exist_ok=True)
23 | os.chdir(output_folder)
24 | retry_count += 1
25 | print(f'Downloading {model} to {output_folder}, attempt {retry_count}')
26 | if 'http' in model.lower():
27 | # We've been passed a URL to download
28 | parsed = urlparse(model)
29 | # split the path by '/' and get the filename
30 | filename = parsed.path.split("/")[-1]
31 | print(f"Passed URL: {model}", flush=True)
32 | run = subprocess.run(f'/usr/bin/wget --continue --progress=dot:giga "{model}"', shell=True, check=False)
33 | write = filename
34 | elif model_info(model).id == model:
35 | # We've got an HF model, eg 'TheBloke/WizardLM-7B-Uncensored'
36 | print(f"Passed HF model: {model}", flush=True)
37 | model_folder = model.replace('/','_')
38 | run = subprocess.run(f'{SCRIPT_DIR}/download_model.py --threads 2 --output "{output_folder}/{model_folder}" "{args.model}"', shell=True, check=False)
39 | write = model_folder
40 | else:
41 | print(f"Error, {model} does not seem to be in a supported format.")
42 | success = False
43 | break
44 | if run.returncode == 0:
45 | # Succesful download. Write the model file or folder name to /tmp for use in --model arg
46 | with open('/tmp/text-gen-model', 'w') as f:
47 | f.write(write + '\n')
48 | success = True
49 |
50 | # Exit 0 for success, 1 for failure
51 | sys.exit(not success)
52 |
--------------------------------------------------------------------------------
/cuda11.8.0-ubuntu22.04-oneclick/scripts/restart-text-generation-webui.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo -n "Restarting text-generation-webui: "
4 |
5 | if pkill -f "python3 server.py"
6 | then
7 | echo "DONE"
8 | echo "The UI will auto-restart in 2 seconds"
9 | else
10 | echo "was not running"
11 | fi
12 |
--------------------------------------------------------------------------------
/cuda11.8.0-ubuntu22.04-oneclick/scripts/run-text-generation-webui.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | cd /workspace/text-generation-webui
3 |
4 | # Edit these arguments if you want to customise text-generation-webui launch.
5 | # Don't remove "$@" from the start unless you want to prevent automatic model loading from template arguments
6 | ARGS=("$@" --listen --extensions openai)
7 |
8 | echo "Launching text-generation-webui with args: ${ARGS[@]}"
9 |
10 | python3 server.py "${ARGS[@]}"
11 |
--------------------------------------------------------------------------------
/cuda11.8.0-ubuntu22.04-oneclick/scripts/textgen-on-workspace.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Ensure we have /workspace in all scenarios
4 | mkdir -p /workspace
5 |
6 | if [[ ! -d /workspace/text-generation-webui ]]; then
7 | # If we don't already have /workspace/text-generation-webui, move it there
8 | mv /root/text-generation-webui /workspace
9 | else
10 | # otherwise delete the default text-generation-webui folder which is always re-created on pod start from the Docker
11 | rm -rf /root/text-generation-webui
12 | fi
13 |
14 | # Then link /root/text-generation-webui folder to /workspace so it's available in that familiar location as well
15 | ln -s /workspace/text-generation-webui /root/text-generation-webui
16 |
--------------------------------------------------------------------------------
/cuda11.8.0-ubuntu22.04-oneclick/start-with-ui.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | echo "Starting: Ooobabooga's text-generation-webui. Container provided by TheBloke."
3 |
4 | SCRIPTDIR=/root/scripts
5 | VOLUME=/workspace
6 |
7 | # If a volume is already defined, $VOLUME will already exist
8 | # If a volume is not being used, we'll still use /worksapce to ensure everything is in a known place.
9 | mkdir -p $VOLUME/logs
10 |
11 | # Start build of llama-cpp-python in background
12 | if [[ ! -f /.built.llama-cpp-python ]]; then
13 | "$SCRIPTDIR"/build-llama-cpp-python.sh >>$VOLUME/logs/build-llama-cpp-python.log 2>&1 &
14 | fi
15 |
16 | if [[ $PUBLIC_KEY ]]; then
17 | mkdir -p ~/.ssh
18 | chmod 700 ~/.ssh
19 | cd ~/.ssh
20 | echo "$PUBLIC_KEY" >>authorized_keys
21 | chmod 700 -R ~/.ssh
22 | service ssh start
23 | fi
24 |
25 | # Move text-generation-webui's folder to $VOLUME so models and all config will persist
26 | "$SCRIPTDIR"/textgen-on-workspace.sh
27 |
28 | # If passed a MODEL variable from Runpod template, start it downloading
29 | # This will block the UI until completed
30 | # MODEL can be a HF repo name, eg 'TheBloke/guanaco-7B-GPTQ'
31 | # or it can be a direct link to a single GGML file, eg 'https://huggingface.co/TheBloke/tulu-7B-GGML/resolve/main/tulu-7b.ggmlv3.q2_K.bin'
32 | if [[ $MODEL ]]; then
33 | "$SCRIPTDIR"/fetch-model.py "$MODEL" $VOLUME/text-generation-webui/models >>$VOLUME/logs/fetch-model.log 2>&1
34 | fi
35 |
36 | # Update text-generation-webui to the latest commit
37 | cd /workspace/text-generation-webui && git pull
38 |
39 | # Update exllama to the latest commit
40 | cd /workspace/text-generation-webui/repositories/exllama && git pull
41 |
42 | # Move the script that launches text-gen to $VOLUME, so users can make persistent changes to CLI arguments
43 | if [[ ! -f $VOLUME/run-text-generation-webui.sh ]]; then
44 | mv "$SCRIPTDIR"/run-text-generation-webui.sh $VOLUME/run-text-generation-webui.sh
45 | fi
46 |
47 | ARGS=()
48 | while true; do
49 | # If the user wants to stop the UI from auto launching, they can run:
50 | # touch $VOLUME/do.not.launch.UI
51 | if [[ ! -f $VOLUME/do.not.launch.UI ]]; then
52 | # Launch the UI in a loop forever, allowing UI restart
53 | if [[ -f /tmp/text-gen-model ]]; then
54 | # If this file exists, we successfully downloaded a model file or folder
55 | # Therefore we auto load this model
56 | ARGS=(--model "$(&1) | tee -a $VOLUME/logs/text-generation-webui.log
66 |
67 | fi
68 | sleep 2
69 | done
70 |
71 | # shouldn't actually reach this point
72 | sleep infinity
73 |
--------------------------------------------------------------------------------
/cuda11.8.0-ubuntu22.04-pytorch-conda/Dockerfile:
--------------------------------------------------------------------------------
1 | ARG CUDA_VERSION="11.8.0"
2 | ARG CUDNN_VERSION="8"
3 | ARG UBUNTU_VERSION="22.04"
4 |
5 | # Base NVidia CUDA Ubuntu image
6 | FROM nvidia/cuda:$CUDA_VERSION-cudnn$CUDNN_VERSION-devel-ubuntu$UBUNTU_VERSION AS base
7 |
8 | ENV HOME /root
9 | WORKDIR $HOME
10 | ENV PYTHON_VERSION=3.10
11 | ENV PATH="/usr/local/cuda/bin:${PATH}"
12 | #
13 | # Install Python plus openssh, which is our minimum set of required packages.
14 | # Install useful command line utility software
15 | ARG APTPKGS="zsh wget tmux tldr nvtop vim neovim curl rsync net-tools less iputils-ping 7zip zip unzip"
16 | RUN apt-get update -y && \
17 | apt-get install -y python3 python3-pip python3-venv && \
18 | apt-get install -y --no-install-recommends openssh-server openssh-client git git-lfs && \
19 | python3 -m pip install --upgrade pip && \
20 | apt-get install -y --no-install-recommends $APTPKGS && \
21 | apt-get clean && \
22 | rm -rf /var/lib/apt/lists/*
23 |
24 | # Install Miniconda for Python env management
25 | ENV PATH="${HOME}/miniconda3/bin:${PATH}"
26 | ENV BASEPATH="${PATH}"
27 | RUN wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh \
28 | && mkdir ${HOME}/.conda \
29 | && bash Miniconda3-latest-Linux-x86_64.sh -b -p ${HOME}/miniconda3 \
30 | && rm -f Miniconda3-latest-Linux-x86_64.sh
31 |
32 | # Make base conda environment
33 | ENV CONDA=pytorch
34 | RUN conda create -n "${CONDA}" python="${PYTHON_VERSION}"
35 | ENV PATH="${HOME}/miniconda3/envs/${CONDA}/bin:${BASEPATH}"
36 |
37 | # Install pytorch
38 | ARG PYTORCH="2.0.1"
39 | ARG CUDA="118"
40 | RUN pip3 install --no-cache-dir -U torch==$PYTORCH torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu$CUDA
41 |
42 | # Set up git to support LFS, and to store credentials; useful for Huggingface Hub
43 | RUN git config --global credential.helper store && \
44 | git lfs install
45 |
46 | # Install Oh My Zsh for better command line experience: https://github.com/ohmyzsh/ohmyzsh
47 | RUN bash -c "ZSH=$HOME/ohmyzsh $(curl -fsSL https://raw.githubusercontent.com/ohmyzsh/ohmyzsh/master/tools/install.sh)" "" --unattended
48 |
49 | # Add some config files for a nicer command line setup
50 | COPY conf-files/vimrc $HOME/.vimrc
51 | COPY conf-files/zshrc $HOME/.zshrc
52 | COPY conf-files/thebloke.zsh-theme $HOME/ohmyzsh/custom/themes/
53 | COPY conf-files/tmux.conf $HOME/.tmux.conf
54 | # Additional ZSH config files
55 | COPY conf-files/.zsh $HOME/.zsh
56 | # Set default shell to ZSH
57 | COPY conf-files/passwd /etc/passwd
58 |
59 | # Install Neovim 0.9 and overwrite apt nvim with it (we still installed via apt as it installs various dependencies like LuaJIT)
60 | RUN wget https://github.com/neovim/neovim/releases/download/v0.9.1/nvim-linux64.tar.gz && \
61 | tar xzvf $HOME/nvim-linux64.tar.gz && \
62 | rm nvim-linux64.tar.gz && \
63 | rm /usr/bin/nvim && \
64 | ln -s $HOME/nvim-linux64/bin/nvim /usr/bin/nvim
65 |
66 | # Set up neovim lazy starter pack and launch nvim once to set up the plugins
67 | RUN git clone https://github.com/LazyVim/starter $HOME/.config/nvim && \
68 | /usr/bin/nvim --headless -c ':q'
69 |
70 | COPY --chmod=755 start-ssh-only.sh /start.sh
71 |
72 | WORKDIR /workspace
73 |
74 | CMD [ "/start.sh" ]
75 |
--------------------------------------------------------------------------------
/cuda11.8.0-ubuntu22.04-pytorch-conda/conf-files/.zsh/conda.zsh:
--------------------------------------------------------------------------------
1 | # >>> conda initialize >>>
2 | # !! Contents within this block are managed by 'conda init' !!
3 |
4 | # Default conda home is in the container
5 | CONDA="$HOME/miniconda3"
6 |
7 | # But if there's one on the volume, use that instead
8 | if [[ -d /workspace/miniconda3 ]]; then
9 | CONDA=/workspace/miniconda3
10 | fi
11 |
12 | __conda_setup="$('$CONDA/bin/conda' 'shell.zsh' 'hook' 2> /dev/null)"
13 | if [ $? -eq 0 ]; then
14 | eval "$__conda_setup"
15 | else
16 | if [ -f "$CONDA/etc/profile.d/conda.sh" ]; then
17 | . "$CONDA/etc/profile.d/conda.sh"
18 | else
19 | export PATH="$CONDA/bin:$PATH"
20 | fi
21 | fi
22 | unset __conda_setup
23 | # <<< conda initialize <<<
24 |
25 | if [[ -d "$CONDA/envs/pytorch" ]]; then
26 | conda activate pytorch
27 | fi
28 |
--------------------------------------------------------------------------------
/cuda11.8.0-ubuntu22.04-pytorch-conda/conf-files/.zsh/iterm2_shell_integration.zsh:
--------------------------------------------------------------------------------
1 | # This program is free software; you can redistribute it and/or
2 | # modify it under the terms of the GNU General Public License
3 | # as published by the Free Software Foundation; either version 2
4 | # of the License, or (at your option) any later version.
5 | #
6 | # This program is distributed in the hope that it will be useful,
7 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 | # GNU General Public License for more details.
10 | #
11 | # You should have received a copy of the GNU General Public License
12 | # along with this program; if not, write to the Free Software
13 | # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 |
15 | if [[ -o interactive ]]; then
16 | if [ "${ITERM_ENABLE_SHELL_INTEGRATION_WITH_TMUX-}""$TERM" != "tmux-256color" -a "${ITERM_ENABLE_SHELL_INTEGRATION_WITH_TMUX-}""$TERM" != "screen" -a "${ITERM_SHELL_INTEGRATION_INSTALLED-}" = "" -a "$TERM" != linux -a "$TERM" != dumb ]; then
17 | ITERM_SHELL_INTEGRATION_INSTALLED=Yes
18 | ITERM2_SHOULD_DECORATE_PROMPT="1"
19 | # Indicates start of command output. Runs just before command executes.
20 | iterm2_before_cmd_executes() {
21 | if [ "$TERM_PROGRAM" = "iTerm.app" ]; then
22 | printf "\033]133;C;\r\007"
23 | else
24 | printf "\033]133;C;\007"
25 | fi
26 | }
27 |
28 | iterm2_set_user_var() {
29 | printf "\033]1337;SetUserVar=%s=%s\007" "$1" $(printf "%s" "$2" | base64 | tr -d '\n')
30 | }
31 |
32 | # Users can write their own version of this method. It should call
33 | # iterm2_set_user_var but not produce any other output.
34 | # e.g., iterm2_set_user_var currentDirectory $PWD
35 | # Accessible in iTerm2 (in a badge now, elsewhere in the future) as
36 | # \(user.currentDirectory).
37 | whence -v iterm2_print_user_vars > /dev/null 2>&1
38 | if [ $? -ne 0 ]; then
39 | iterm2_print_user_vars() {
40 | true
41 | }
42 | fi
43 |
44 | iterm2_print_state_data() {
45 | local _iterm2_hostname="${iterm2_hostname-}"
46 | if [ -z "${iterm2_hostname:-}" ]; then
47 | _iterm2_hostname=$(hostname -f 2>/dev/null)
48 | fi
49 | printf "\033]1337;RemoteHost=%s@%s\007" "$USER" "${_iterm2_hostname-}"
50 | printf "\033]1337;CurrentDir=%s\007" "$PWD"
51 | iterm2_print_user_vars
52 | }
53 |
54 | # Report return code of command; runs after command finishes but before prompt
55 | iterm2_after_cmd_executes() {
56 | printf "\033]133;D;%s\007" "$STATUS"
57 | iterm2_print_state_data
58 | }
59 |
60 | # Mark start of prompt
61 | iterm2_prompt_mark() {
62 | printf "\033]133;A\007"
63 | }
64 |
65 | # Mark end of prompt
66 | iterm2_prompt_end() {
67 | printf "\033]133;B\007"
68 | }
69 |
70 | # There are three possible paths in life.
71 | #
72 | # 1) A command is entered at the prompt and you press return.
73 | # The following steps happen:
74 | # * iterm2_preexec is invoked
75 | # * PS1 is set to ITERM2_PRECMD_PS1
76 | # * ITERM2_SHOULD_DECORATE_PROMPT is set to 1
77 | # * The command executes (possibly reading or modifying PS1)
78 | # * iterm2_precmd is invoked
79 | # * ITERM2_PRECMD_PS1 is set to PS1 (as modified by command execution)
80 | # * PS1 gets our escape sequences added to it
81 | # * zsh displays your prompt
82 | # * You start entering a command
83 | #
84 | # 2) You press ^C while entering a command at the prompt.
85 | # The following steps happen:
86 | # * (iterm2_preexec is NOT invoked)
87 | # * iterm2_precmd is invoked
88 | # * iterm2_before_cmd_executes is called since we detected that iterm2_preexec was not run
89 | # * (ITERM2_PRECMD_PS1 and PS1 are not messed with, since PS1 already has our escape
90 | # sequences and ITERM2_PRECMD_PS1 already has PS1's original value)
91 | # * zsh displays your prompt
92 | # * You start entering a command
93 | #
94 | # 3) A new shell is born.
95 | # * PS1 has some initial value, either zsh's default or a value set before this script is sourced.
96 | # * iterm2_precmd is invoked
97 | # * ITERM2_SHOULD_DECORATE_PROMPT is initialized to 1
98 | # * ITERM2_PRECMD_PS1 is set to the initial value of PS1
99 | # * PS1 gets our escape sequences added to it
100 | # * Your prompt is shown and you may begin entering a command.
101 | #
102 | # Invariants:
103 | # * ITERM2_SHOULD_DECORATE_PROMPT is 1 during and just after command execution, and "" while the prompt is
104 | # shown and until you enter a command and press return.
105 | # * PS1 does not have our escape sequences during command execution
106 | # * After the command executes but before a new one begins, PS1 has escape sequences and
107 | # ITERM2_PRECMD_PS1 has PS1's original value.
108 | iterm2_decorate_prompt() {
109 | # This should be a raw PS1 without iTerm2's stuff. It could be changed during command
110 | # execution.
111 | ITERM2_PRECMD_PS1="$PS1"
112 | ITERM2_SHOULD_DECORATE_PROMPT=""
113 |
114 | # Add our escape sequences just before the prompt is shown.
115 | # Use ITERM2_SQUELCH_MARK for people who can't mdoify PS1 directly, like powerlevel9k users.
116 | # This is gross but I had a heck of a time writing a correct if statetment for zsh 5.0.2.
117 | local PREFIX=""
118 | if [[ $PS1 == *"$(iterm2_prompt_mark)"* ]]; then
119 | PREFIX=""
120 | elif [[ "${ITERM2_SQUELCH_MARK-}" != "" ]]; then
121 | PREFIX=""
122 | else
123 | PREFIX="%{$(iterm2_prompt_mark)%}"
124 | fi
125 | PS1="$PREFIX$PS1%{$(iterm2_prompt_end)%}"
126 | ITERM2_DECORATED_PS1="$PS1"
127 | }
128 |
129 | iterm2_precmd() {
130 | local STATUS="$?"
131 | if [ -z "${ITERM2_SHOULD_DECORATE_PROMPT-}" ]; then
132 | # You pressed ^C while entering a command (iterm2_preexec did not run)
133 | iterm2_before_cmd_executes
134 | if [ "$PS1" != "${ITERM2_DECORATED_PS1-}" ]; then
135 | # PS1 changed, perhaps in another precmd. See issue 9938.
136 | ITERM2_SHOULD_DECORATE_PROMPT="1"
137 | fi
138 | fi
139 |
140 | iterm2_after_cmd_executes "$STATUS"
141 |
142 | if [ -n "$ITERM2_SHOULD_DECORATE_PROMPT" ]; then
143 | iterm2_decorate_prompt
144 | fi
145 | }
146 |
147 | # This is not run if you press ^C while entering a command.
148 | iterm2_preexec() {
149 | # Set PS1 back to its raw value prior to executing the command.
150 | PS1="$ITERM2_PRECMD_PS1"
151 | ITERM2_SHOULD_DECORATE_PROMPT="1"
152 | iterm2_before_cmd_executes
153 | }
154 |
155 | # If hostname -f is slow on your system set iterm2_hostname prior to
156 | # sourcing this script. We know it is fast on macOS so we don't cache
157 | # it. That lets us handle the hostname changing like when you attach
158 | # to a VPN.
159 | if [ -z "${iterm2_hostname-}" ]; then
160 | if [ "$(uname)" != "Darwin" ]; then
161 | iterm2_hostname=`hostname -f 2>/dev/null`
162 | # Some flavors of BSD (i.e. NetBSD and OpenBSD) don't have the -f option.
163 | if [ $? -ne 0 ]; then
164 | iterm2_hostname=`hostname`
165 | fi
166 | fi
167 | fi
168 |
169 | [[ -z ${precmd_functions-} ]] && precmd_functions=()
170 | precmd_functions=($precmd_functions iterm2_precmd)
171 |
172 | [[ -z ${preexec_functions-} ]] && preexec_functions=()
173 | preexec_functions=($preexec_functions iterm2_preexec)
174 |
175 | iterm2_print_state_data
176 | printf "\033]1337;ShellIntegrationVersion=14;shell=zsh\007"
177 | fi
178 | fi
179 |
--------------------------------------------------------------------------------
/cuda11.8.0-ubuntu22.04-pytorch-conda/conf-files/passwd:
--------------------------------------------------------------------------------
1 | root:x:0:0:root:/root:/usr/bin/zsh
2 | daemon:x:1:1:daemon:/usr/sbin:/usr/sbin/nologin
3 | bin:x:2:2:bin:/bin:/usr/sbin/nologin
4 | sys:x:3:3:sys:/dev:/usr/sbin/nologin
5 | sync:x:4:65534:sync:/bin:/bin/sync
6 | games:x:5:60:games:/usr/games:/usr/sbin/nologin
7 | man:x:6:12:man:/var/cache/man:/usr/sbin/nologin
8 | lp:x:7:7:lp:/var/spool/lpd:/usr/sbin/nologin
9 | mail:x:8:8:mail:/var/mail:/usr/sbin/nologin
10 | news:x:9:9:news:/var/spool/news:/usr/sbin/nologin
11 | uucp:x:10:10:uucp:/var/spool/uucp:/usr/sbin/nologin
12 | proxy:x:13:13:proxy:/bin:/usr/sbin/nologin
13 | www-data:x:33:33:www-data:/var/www:/usr/sbin/nologin
14 | backup:x:34:34:backup:/var/backups:/usr/sbin/nologin
15 | list:x:38:38:Mailing List Manager:/var/list:/usr/sbin/nologin
16 | irc:x:39:39:ircd:/run/ircd:/usr/sbin/nologin
17 | gnats:x:41:41:Gnats Bug-Reporting System (admin):/var/lib/gnats:/usr/sbin/nologin
18 | nobody:x:65534:65534:nobody:/nonexistent:/usr/sbin/nologin
19 | _apt:x:100:65534::/nonexistent:/usr/sbin/nologin
20 | sshd:x:101:65534::/run/sshd:/usr/sbin/nologin
21 |
--------------------------------------------------------------------------------
/cuda11.8.0-ubuntu22.04-pytorch-conda/conf-files/thebloke.zsh-theme:
--------------------------------------------------------------------------------
1 | PROMPT="\$(virtualenv_prompt_info)${FG[133]} ${USER}@${HOST}:${FG[117]}%~%{$reset_color%}\$(git_prompt_info)\$(git_prompt_status) ${FG[077]}ᐅ%{$reset_color%} "
2 |
3 | ZSH_THEME_GIT_PROMPT_PREFIX=" ${FG[012]}("
4 | ZSH_THEME_GIT_PROMPT_SUFFIX="${FG[012]})%{$reset_color%}"
5 | ZSH_THEME_GIT_PROMPT_DIRTY=" ${FG[133]}✘"
6 | ZSH_THEME_GIT_PROMPT_CLEAN=" ${FG[118]}✔"
7 |
8 | ZSH_THEME_GIT_PROMPT_ADDED="${FG[082]}✚%{$reset_color%}"
9 | ZSH_THEME_GIT_PROMPT_MODIFIED="${FG[166]}✹%{$reset_color%}"
10 | ZSH_THEME_GIT_PROMPT_DELETED="${FG[160]}✖%{$reset_color%}"
11 | ZSH_THEME_GIT_PROMPT_RENAMED="${FG[220]}➜%{$reset_color%}"
12 | ZSH_THEME_GIT_PROMPT_UNMERGED="${FG[082]}═%{$reset_color%}"
13 | ZSH_THEME_GIT_PROMPT_UNTRACKED="${FG[190]}✭%{$reset_color%}"
14 |
15 | ZSH_THEME_VIRTUALENV_PREFIX=" ["
16 | ZSH_THEME_VIRTUALENV_SUFFIX="]"
17 |
--------------------------------------------------------------------------------
/cuda11.8.0-ubuntu22.04-pytorch-conda/conf-files/tmux.conf:
--------------------------------------------------------------------------------
1 | # Ring the bell if any background window rang a bell
2 | set -g bell-action any
3 |
4 | # Default termtype. If the rcfile sets $TERM, that overrides this value.
5 | set -g default-terminal screen-256color
6 |
7 | # Keep your finger on ctrl, or don't
8 | #bind-key ^D detach-client
9 |
10 | # Create splits and vertical splits
11 | bind-key v split-window -h -p 50 -c "#{pane_current_path}"
12 | bind-key ^V split-window -h -p 50 -c "#{pane_current_path}"
13 | bind-key s split-window -p 50 -c "#{pane_current_path}"
14 | bind-key ^S split-window -p 50 -c "#{pane_current_path}"
15 |
16 | # Pane resize in all four directions using vi bindings.
17 | # Can use these raw but I map them to shift-ctrl- in iTerm.
18 | bind -r H resize-pane -L 5
19 | bind -r J resize-pane -D 5
20 | bind -r K resize-pane -U 5
21 | bind -r L resize-pane -R 5
22 |
23 | # Smart pane switching with awareness of Vim splits.
24 | # See: https://github.com/christoomey/vim-tmux-navigator
25 | is_vim="ps -o state= -o comm= -t '#{pane_tty}' \
26 | | grep -iqE '^[^TXZ ]+ +(\\S+\\/)?g?(view|n?vim?x?)(diff)?$'"
27 | #bind-key -n 'C-h' if-shell "$is_vim" 'send-keys C-h' 'select-pane -L'
28 | #bind-key -n 'C-j' if-shell "$is_vim" 'send-keys C-j' 'select-pane -D'
29 | #bind-key -n 'C-k' if-shell "$is_vim" 'send-keys C-k' 'select-pane -U'
30 | #bind-key -n 'C-l' if-shell "$is_vim" 'send-keys C-l' 'select-pane -R'
31 |
32 | tmux_version='$(tmux -V | sed -En "s/^tmux ([0-9]+(.[0-9]+)?).*/\1/p")'
33 | if-shell -b '[ "$(echo "$tmux_version < 3.0" | bc)" = 1 ]' \
34 | "bind-key -n 'C-\\' if-shell \"$is_vim\" 'send-keys C-\\' 'select-pane -l'"
35 | if-shell -b '[ "$(echo "$tmux_version >= 3.0" | bc)" = 1 ]' \
36 | "bind-key -n 'C-\\' if-shell \"$is_vim\" 'send-keys C-\\\\' 'select-pane -l'"
37 |
38 | bind-key -T copy-mode-vi 'C-h' select-pane -L
39 | bind-key -T copy-mode-vi 'C-j' select-pane -D
40 | bind-key -T copy-mode-vi 'C-k' select-pane -U
41 | bind-key -T copy-mode-vi 'C-l' select-pane -R
42 | bind-key -T copy-mode-vi 'C-\' select-pane -l
43 |
44 | # Use vi keybindings for tmux commandline input.
45 | # Note that to get command mode you need to hit ESC twice...
46 | #set -g status-keys vi
47 |
48 | # Use vi keybindings in copy and choice modes
49 | #setw -g mode-keys vi
50 |
51 | # easily toggle synchronization (mnemonic: e is for echo)
52 | # sends input to all panes in a given window.
53 | bind e setw synchronize-panes on
54 | bind E setw synchronize-panes off
55 |
56 | # set first window to index 1 (not 0) to map more to the keyboard layout...
57 | set-option -g base-index 1
58 | set-window-option -g pane-base-index 1
59 | set-window-option -g mouse on
60 |
61 | # color scheme (styled as vim-powerline)
62 | set -g status-left-length 52
63 | set -g status-right-length 451
64 | set -g status-style fg=white,bg=colour234
65 | set -g pane-border-style fg=colour245
66 | set -g pane-active-border-style fg=colour39
67 | set -g message-style fg=colour16,bg=colour221,bold
68 | set -g status-left '#[fg=colour235,bg=colour252,bold] ❐ #S #[fg=colour252,bg=colour238,nobold]⮀#[fg=colour245,bg=colour238,bold] #(whoami) #[fg=colour238,bg=colour234,nobold]⮀'
69 | set -g window-status-format '#[fg=colour235,bg=colour252,bold] #I #(pwd="#{pane_current_path}"; echo ${pwd####*/}) #W '
70 | set -g window-status-current-format '#[fg=colour234,bg=colour39]⮀#[fg=black,bg=colour39,noreverse,bold] #{?window_zoomed_flag,#[fg=colour228],} #I #(pwd="#{pane_current_path}"; echo ${pwd####*/}) #W #[fg=colour39,bg=colour234,nobold]⮀'
71 | set-option -g status-interval 2
72 |
73 | # Patch for OS X pbpaste and pbcopy under tmux.
74 | #set-option -g default-command "which reattach-to-user-namespace > /dev/null && reattach-to-user-namespace -l $SHELL || $SHELL"
75 |
76 | # No escape time for vi mode
77 | set -sg escape-time 0
78 |
79 | # Screen like binding for last window
80 | unbind l
81 | bind C-a last-window
82 |
83 | # Bigger history
84 | set -g history-limit 50000
85 |
86 | # New windows/pane in $PWD
87 | bind c new-window -c "#{pane_current_path}"
88 |
89 | # Fix key bindings broken in tmux 2.1
90 | set -g assume-paste-time 0
91 |
92 | # force a reload of the config file
93 | unbind r
94 | bind r source-file ~/.tmux.conf \; display "Reloaded!"
95 |
96 | # Local config
97 | if-shell "[ -f ~/.tmux.conf.user ]" 'source ~/.tmux.conf.user'
98 |
--------------------------------------------------------------------------------
/cuda11.8.0-ubuntu22.04-pytorch-conda/conf-files/vimrc:
--------------------------------------------------------------------------------
1 | " Use Vim settings, rather then Vi settings (much better!).
2 | " This must be first, because it changes other options as a side effect.
3 | set nocompatible
4 |
5 | " TODO: this may not be in the correct place. It is intended to allow overriding .
6 | " source ~/.vimrc.before if it exists.
7 | if filereadable(expand("~/.vimrc.before"))
8 | source ~/.vimrc.before
9 | endif
10 |
11 | " ================ General Config ====================
12 |
13 | set number "Line numbers are good
14 | set backspace=indent,eol,start "Allow backspace in insert mode
15 | set history=1000 "Store lots of :cmdline history
16 | set showcmd "Show incomplete cmds down the bottom
17 | set showmode "Show current mode down the bottom
18 | set gcr=a:blinkon0 "Disable cursor blink
19 | set visualbell "No sounds
20 | set autoread "Reload files changed outside vim
21 |
22 | " This makes vim act like all other editors, buffers can
23 | " exist in the background without being in a window.
24 | " http://items.sjbach.com/319/configuring-vim-right
25 | set hidden
26 |
27 | "turn on syntax highlighting
28 | syntax on
29 |
30 | " Change leader to a comma because the backslash is too far away
31 | " That means all \x commands turn into ,x
32 | " The mapleader has to be set before vundle starts loading all
33 | " the plugins.
34 | let mapleader=","
35 |
36 | " =============== Vundle Initialization ===============
37 | " This loads all the plugins specified in ~/.vim/vundles.vim
38 | " Use Vundle plugin to manage all other plugins
39 | if filereadable(expand("~/.vim/vundles.vim"))
40 | source ~/.vim/vundles.vim
41 | endif
42 | au BufNewFile,BufRead *.vundle set filetype=vim
43 |
44 | " ================ Turn Off Swap Files ==============
45 |
46 | set noswapfile
47 | set nobackup
48 | set nowb
49 |
50 | " ================ Persistent Undo ==================
51 | " Keep undo history across sessions, by storing in file.
52 | " Only works all the time.
53 | if has('persistent_undo') && isdirectory(expand('~').'/.vim/backups')
54 | silent !mkdir ~/.vim/backups > /dev/null 2>&1
55 | set undodir=~/.vim/backups
56 | set undofile
57 | endif
58 |
59 | " ================ Indentation ======================
60 |
61 | set autoindent
62 | set smartindent
63 | set smarttab
64 | set shiftwidth=2
65 | set softtabstop=2
66 | set tabstop=2
67 | set expandtab
68 |
69 | " Auto indent pasted text
70 | nnoremap p p=`]
71 | nnoremap P P=`]
72 |
73 | filetype plugin on
74 | filetype indent on
75 |
76 | " Display tabs and trailing spaces visually
77 |
78 | set nowrap "Don't wrap lines
79 | set linebreak "Wrap lines at convenient points
80 |
81 | " ================ Folds ============================
82 |
83 | set foldmethod=indent "fold based on indent
84 | set foldnestmax=3 "deepest fold is 3 levels
85 | set nofoldenable "dont fold by default
86 |
87 | " ================ Completion =======================
88 |
89 | set wildmode=list:longest
90 | set wildmenu "enable ctrl-n and ctrl-p to scroll thru matches
91 | set wildignore=*.o,*.obj,*~ "stuff to ignore when tab completing
92 | set wildignore+=*vim/backups*
93 | set wildignore+=*sass-cache*
94 | set wildignore+=*DS_Store*
95 | set wildignore+=vendor/rails/**
96 | set wildignore+=vendor/cache/**
97 | set wildignore+=*.gem
98 | set wildignore+=log/**
99 | set wildignore+=tmp/**
100 | set wildignore+=*.png,*.jpg,*.gif
101 |
102 | " ================ Scrolling ========================
103 |
104 | set scrolloff=8 "Start scrolling when we're 8 lines away from margins
105 | set sidescrolloff=15
106 | set sidescroll=1
107 |
108 | " ================ Search ===========================
109 |
110 | set incsearch " Find the next match as we type the search
111 | set hlsearch " Highlight searches by default
112 | set ignorecase " Ignore case when searching...
113 | set smartcase " ...unless we type a capital
114 |
115 | " ================ Security ==========================
116 | set modelines=0
117 | set nomodeline
118 |
119 | " ================ Custom Settings ========================
120 |
--------------------------------------------------------------------------------
/cuda11.8.0-ubuntu22.04-pytorch-conda/conf-files/zshrc:
--------------------------------------------------------------------------------
1 | # If you come from bash you might have to change your $PATH.
2 | # export PATH=$HOME/bin:/usr/local/bin:$PATH
3 |
4 | # Path to your oh-my-zsh installation.
5 | export ZSH="$HOME/ohmyzsh"
6 |
7 | HISTFILE=/workspace/.zsh_history
8 |
9 | # Set name of the theme to load --- if set to "random", it will
10 | # load a random theme each time oh-my-zsh is loaded, in which case,
11 | # to know which specific one was loaded, run: echo $RANDOM_THEME
12 | # See https://github.com/ohmyzsh/ohmyzsh/wiki/Themes
13 | ZSH_THEME="thebloke"
14 |
15 | # Set list of themes to pick from when loading at random
16 | # Setting this variable when ZSH_THEME=random will cause zsh to load
17 | # a theme from this variable instead of looking in $ZSH/themes/
18 | # If set to an empty array, this variable will have no effect.
19 | # ZSH_THEME_RANDOM_CANDIDATES=( "robbyrussell" "agnoster" )
20 |
21 | # Uncomment the following line to use case-sensitive completion.
22 | # CASE_SENSITIVE="true"
23 |
24 | # Uncomment the following line to use hyphen-insensitive completion.
25 | # Case-sensitive completion must be off. _ and - will be interchangeable.
26 | # HYPHEN_INSENSITIVE="true"
27 |
28 | # Uncomment one of the following lines to change the auto-update behavior
29 | # zstyle ':omz:update' mode disabled # disable automatic updates
30 | # zstyle ':omz:update' mode auto # update automatically without asking
31 | zstyle ':omz:update' mode reminder # just remind me to update when it's time
32 |
33 | # Uncomment the following line to change how often to auto-update (in days).
34 | # zstyle ':omz:update' frequency 13
35 |
36 | # Uncomment the following line if pasting URLs and other text is messed up.
37 | # DISABLE_MAGIC_FUNCTIONS="true"
38 |
39 | # Uncomment the following line to disable colors in ls.
40 | # DISABLE_LS_COLORS="true"
41 |
42 | # Uncomment the following line to disable auto-setting terminal title.
43 | # DISABLE_AUTO_TITLE="true"
44 |
45 | # Uncomment the following line to enable command auto-correction.
46 | # ENABLE_CORRECTION="true"
47 |
48 | # Uncomment the following line to display red dots whilst waiting for completion.
49 | # You can also set it to another string to have that shown instead of the default red dots.
50 | # e.g. COMPLETION_WAITING_DOTS="%F{yellow}waiting...%f"
51 | # Caution: this setting can cause issues with multiline prompts in zsh < 5.7.1 (see #5765)
52 | COMPLETION_WAITING_DOTS="true"
53 |
54 | # Uncomment the following line if you want to disable marking untracked files
55 | # under VCS as dirty. This makes repository status check for large repositories
56 | # much, much faster.
57 | # DISABLE_UNTRACKED_FILES_DIRTY="true"
58 |
59 | # Uncomment the following line if you want to change the command execution time
60 | # stamp shown in the history command output.
61 | # You can set one of the optional three formats:
62 | # "mm/dd/yyyy"|"dd.mm.yyyy"|"yyyy-mm-dd"
63 | # or set a custom format using the strftime function format specifications,
64 | # see 'man strftime' for details.
65 | HIST_STAMPS="dd.mm.yyyy"
66 |
67 | # Would you like to use another custom folder than $ZSH/custom?
68 | # ZSH_CUSTOM=/path/to/new-custom-folder
69 |
70 | # Which plugins would you like to load?
71 | # Standard plugins can be found in $ZSH/plugins/
72 | # Custom plugins may be added to $ZSH_CUSTOM/plugins/
73 | # Example format: plugins=(rails git textmate ruby lighthouse)
74 | # Add wisely, as too many plugins slow down shell startup.
75 | plugins=(git python tmux iterm2 fasd pip tmux virtualenv)
76 |
77 | zstyle :omz:plugins:iterm2 shell-integration yes
78 |
79 | export ZSH_TMUX_AUTOSTART=false
80 | export ZSH_DISABLE_COMPFIX=true
81 |
82 | source $ZSH/oh-my-zsh.sh
83 |
84 | LS_COLORS="*~=0;38;2;102;102;102:bd=0;38;2;154;237;254;48;2;51;51;51:ca=0:cd=0;38;2;255;106;193;48;2;51;51;51:di=0;38;2;87;199;255:do=0;38;2;0;0;0;48;2;255;106;193:ex=1;38;2;255;92;87:fi=0:ln=0;38;2;255;106;193:mh=0:mi=0;38;2;0;0;0;48;2;255;92;87:no=0:or=0;38;2;0;0;0;48;2;255;92;87:ow=0:pi=0;38;2;0;0;0;48;2;87;199;255:rs=0:sg=0:so=0;38;2;0;0;0;48;2;255;106;193:st=0:su=0:tw=0:*.a=1;38;2;255;92;87:*.c=0;38;2;90;247;142:*.d=0;38;2;90;247;142:*.h=0;38;2;90;247;142:*.m=0;38;2;90;247;142:*.o=0;38;2;102;102;102:*.p=0;38;2;90;247;142:*.r=0;38;2;90;247;142:*.t=0;38;2;90;247;142:*.z=4;38;2;154;237;254:*.7z=4;38;2;154;237;254:*.as=0;38;2;90;247;142:*.bc=0;38;2;102;102;102:*.bz=4;38;2;154;237;254:*.cc=0;38;2;90;247;142:*.cp=0;38;2;90;247;142:*.cr=0;38;2;90;247;142:*.cs=0;38;2;90;247;142:*.di=0;38;2;90;247;142:*.el=0;38;2;90;247;142:*.ex=0;38;2;90;247;142:*.fs=0;38;2;90;247;142:*.go=0;38;2;90;247;142:*.gv=0;38;2;90;247;142:*.gz=4;38;2;154;237;254:*.hh=0;38;2;90;247;142:*.hi=0;38;2;102;102;102:*.hs=0;38;2;90;247;142:*.jl=0;38;2;90;247;142:*.js=0;38;2;90;247;142:*.ko=1;38;2;255;92;87:*.kt=0;38;2;90;247;142:*.la=0;38;2;102;102;102:*.ll=0;38;2;90;247;142:*.lo=0;38;2;102;102;102:*.md=0;38;2;243;249;157:*.ml=0;38;2;90;247;142:*.mn=0;38;2;90;247;142:*.nb=0;38;2;90;247;142:*.pl=0;38;2;90;247;142:*.pm=0;38;2;90;247;142:*.pp=0;38;2;90;247;142:*.ps=0;38;2;255;92;87:*.py=0;38;2;90;247;142:*.rb=0;38;2;90;247;142:*.rm=0;38;2;255;180;223:*.rs=0;38;2;90;247;142:*.sh=0;38;2;90;247;142:*.so=1;38;2;255;92;87:*.td=0;38;2;90;247;142:*.ts=0;38;2;90;247;142:*.ui=0;38;2;243;249;157:*.vb=0;38;2;90;247;142:*.wv=0;38;2;255;180;223:*.xz=4;38;2;154;237;254:*.aif=0;38;2;255;180;223:*.ape=0;38;2;255;180;223:*.apk=4;38;2;154;237;254:*.arj=4;38;2;154;237;254:*.asa=0;38;2;90;247;142:*.aux=0;38;2;102;102;102:*.avi=0;38;2;255;180;223:*.awk=0;38;2;90;247;142:*.bag=4;38;2;154;237;254:*.bak=0;38;2;102;102;102:*.bat=1;38;2;255;92;87:*.bbl=0;38;2;102;102;102:*.bcf=0;38;2;102;102;102:*.bib=0;38;2;243;249;157:*.bin=4;38;2;154;237;254:*.blg=0;38;2;102;102;102:*.bmp=0;38;2;255;180;223:*.bsh=0;38;2;90;247;142:*.bst=0;38;2;243;249;157:*.bz2=4;38;2;154;237;254:*.c++=0;38;2;90;247;142:*.cfg=0;38;2;243;249;157:*.cgi=0;38;2;90;247;142:*.clj=0;38;2;90;247;142:*.com=1;38;2;255;92;87:*.cpp=0;38;2;90;247;142:*.css=0;38;2;90;247;142:*.csv=0;38;2;243;249;157:*.csx=0;38;2;90;247;142:*.cxx=0;38;2;90;247;142:*.deb=4;38;2;154;237;254:*.def=0;38;2;90;247;142:*.dll=1;38;2;255;92;87:*.dmg=4;38;2;154;237;254:*.doc=0;38;2;255;92;87:*.dot=0;38;2;90;247;142:*.dox=0;38;2;165;255;195:*.dpr=0;38;2;90;247;142:*.elc=0;38;2;90;247;142:*.elm=0;38;2;90;247;142:*.epp=0;38;2;90;247;142:*.eps=0;38;2;255;180;223:*.erl=0;38;2;90;247;142:*.exe=1;38;2;255;92;87:*.exs=0;38;2;90;247;142:*.fls=0;38;2;102;102;102:*.flv=0;38;2;255;180;223:*.fnt=0;38;2;255;180;223:*.fon=0;38;2;255;180;223:*.fsi=0;38;2;90;247;142:*.fsx=0;38;2;90;247;142:*.gif=0;38;2;255;180;223:*.git=0;38;2;102;102;102:*.gvy=0;38;2;90;247;142:*.h++=0;38;2;90;247;142:*.hpp=0;38;2;90;247;142:*.htc=0;38;2;90;247;142:*.htm=0;38;2;243;249;157:*.hxx=0;38;2;90;247;142:*.ico=0;38;2;255;180;223:*.ics=0;38;2;255;92;87:*.idx=0;38;2;102;102;102:*.ilg=0;38;2;102;102;102:*.img=4;38;2;154;237;254:*.inc=0;38;2;90;247;142:*.ind=0;38;2;102;102;102:*.ini=0;38;2;243;249;157:*.inl=0;38;2;90;247;142:*.ipp=0;38;2;90;247;142:*.iso=4;38;2;154;237;254:*.jar=4;38;2;154;237;254:*.jpg=0;38;2;255;180;223:*.kex=0;38;2;255;92;87:*.kts=0;38;2;90;247;142:*.log=0;38;2;102;102;102:*.ltx=0;38;2;90;247;142:*.lua=0;38;2;90;247;142:*.m3u=0;38;2;255;180;223:*.m4a=0;38;2;255;180;223:*.m4v=0;38;2;255;180;223:*.mid=0;38;2;255;180;223:*.mir=0;38;2;90;247;142:*.mkv=0;38;2;255;180;223:*.mli=0;38;2;90;247;142:*.mov=0;38;2;255;180;223:*.mp3=0;38;2;255;180;223:*.mp4=0;38;2;255;180;223:*.mpg=0;38;2;255;180;223:*.nix=0;38;2;243;249;157:*.odp=0;38;2;255;92;87:*.ods=0;38;2;255;92;87:*.odt=0;38;2;255;92;87:*.ogg=0;38;2;255;180;223:*.org=0;38;2;243;249;157:*.otf=0;38;2;255;180;223:*.out=0;38;2;102;102;102:*.pas=0;38;2;90;247;142:*.pbm=0;38;2;255;180;223:*.pdf=0;38;2;255;92;87:*.pgm=0;38;2;255;180;223:*.php=0;38;2;90;247;142:*.pid=0;38;2;102;102;102:*.pkg=4;38;2;154;237;254:*.png=0;38;2;255;180;223:*.pod=0;38;2;90;247;142:*.ppm=0;38;2;255;180;223:*.pps=0;38;2;255;92;87:*.ppt=0;38;2;255;92;87:*.pro=0;38;2;165;255;195:*.ps1=0;38;2;90;247;142:*.psd=0;38;2;255;180;223:*.pyc=0;38;2;102;102;102:*.pyd=0;38;2;102;102;102:*.pyo=0;38;2;102;102;102:*.rar=4;38;2;154;237;254:*.rpm=4;38;2;154;237;254:*.rst=0;38;2;243;249;157:*.rtf=0;38;2;255;92;87:*.sbt=0;38;2;90;247;142:*.sql=0;38;2;90;247;142:*.sty=0;38;2;102;102;102:*.svg=0;38;2;255;180;223:*.swf=0;38;2;255;180;223:*.swp=0;38;2;102;102;102:*.sxi=0;38;2;255;92;87:*.sxw=0;38;2;255;92;87:*.tar=4;38;2;154;237;254:*.tbz=4;38;2;154;237;254:*.tcl=0;38;2;90;247;142:*.tex=0;38;2;90;247;142:*.tgz=4;38;2;154;237;254:*.tif=0;38;2;255;180;223:*.tml=0;38;2;243;249;157:*.tmp=0;38;2;102;102;102:*.toc=0;38;2;102;102;102:*.tsx=0;38;2;90;247;142:*.ttf=0;38;2;255;180;223:*.txt=0;38;2;243;249;157:*.vcd=4;38;2;154;237;254:*.vim=0;38;2;90;247;142:*.vob=0;38;2;255;180;223:*.wav=0;38;2;255;180;223:*.wma=0;38;2;255;180;223:*.wmv=0;38;2;255;180;223:*.xcf=0;38;2;255;180;223:*.xlr=0;38;2;255;92;87:*.xls=0;38;2;255;92;87:*.xml=0;38;2;243;249;157:*.xmp=0;38;2;243;249;157:*.yml=0;38;2;243;249;157:*.zip=4;38;2;154;237;254:*.zsh=0;38;2;90;247;142:*.zst=4;38;2;154;237;254:*TODO=1:*hgrc=0;38;2;165;255;195:*.bash=0;38;2;90;247;142:*.conf=0;38;2;243;249;157:*.dart=0;38;2;90;247;142:*.diff=0;38;2;90;247;142:*.docx=0;38;2;255;92;87:*.epub=0;38;2;255;92;87:*.fish=0;38;2;90;247;142:*.flac=0;38;2;255;180;223:*.h264=0;38;2;255;180;223:*.hgrc=0;38;2;165;255;195:*.html=0;38;2;243;249;157:*.java=0;38;2;90;247;142:*.jpeg=0;38;2;255;180;223:*.json=0;38;2;243;249;157:*.less=0;38;2;90;247;142:*.lisp=0;38;2;90;247;142:*.lock=0;38;2;102;102;102:*.make=0;38;2;165;255;195:*.mpeg=0;38;2;255;180;223:*.opus=0;38;2;255;180;223:*.orig=0;38;2;102;102;102:*.pptx=0;38;2;255;92;87:*.psd1=0;38;2;90;247;142:*.psm1=0;38;2;90;247;142:*.purs=0;38;2;90;247;142:*.rlib=0;38;2;102;102;102:*.sass=0;38;2;90;247;142:*.scss=0;38;2;90;247;142:*.tbz2=4;38;2;154;237;254:*.tiff=0;38;2;255;180;223:*.toml=0;38;2;243;249;157:*.webm=0;38;2;255;180;223:*.webp=0;38;2;255;180;223:*.woff=0;38;2;255;180;223:*.xbps=4;38;2;154;237;254:*.xlsx=0;38;2;255;92;87:*.yaml=0;38;2;243;249;157:*.cabal=0;38;2;90;247;142:*.cache=0;38;2;102;102;102:*.class=0;38;2;102;102;102:*.cmake=0;38;2;165;255;195:*.dyn_o=0;38;2;102;102;102:*.ipynb=0;38;2;90;247;142:*.mdown=0;38;2;243;249;157:*.patch=0;38;2;90;247;142:*.scala=0;38;2;90;247;142:*.shtml=0;38;2;243;249;157:*.swift=0;38;2;90;247;142:*.toast=4;38;2;154;237;254:*.xhtml=0;38;2;243;249;157:*README=0;38;2;40;42;54;48;2;243;249;157:*passwd=0;38;2;243;249;157:*shadow=0;38;2;243;249;157:*.config=0;38;2;243;249;157:*.dyn_hi=0;38;2;102;102;102:*.flake8=0;38;2;165;255;195:*.gradle=0;38;2;90;247;142:*.groovy=0;38;2;90;247;142:*.ignore=0;38;2;165;255;195:*.matlab=0;38;2;90;247;142:*COPYING=0;38;2;153;153;153:*INSTALL=0;38;2;40;42;54;48;2;243;249;157:*LICENSE=0;38;2;153;153;153:*TODO.md=1:*.desktop=0;38;2;243;249;157:*.gemspec=0;38;2;165;255;195:*Doxyfile=0;38;2;165;255;195:*Makefile=0;38;2;165;255;195:*TODO.txt=1:*setup.py=0;38;2;165;255;195:*.DS_Store=0;38;2;102;102;102:*.cmake.in=0;38;2;165;255;195:*.fdignore=0;38;2;165;255;195:*.kdevelop=0;38;2;165;255;195:*.markdown=0;38;2;243;249;157:*.rgignore=0;38;2;165;255;195:*COPYRIGHT=0;38;2;153;153;153:*README.md=0;38;2;40;42;54;48;2;243;249;157:*configure=0;38;2;165;255;195:*.gitconfig=0;38;2;165;255;195:*.gitignore=0;38;2;165;255;195:*.localized=0;38;2;102;102;102:*.scons_opt=0;38;2;102;102;102:*CODEOWNERS=0;38;2;165;255;195:*Dockerfile=0;38;2;243;249;157:*INSTALL.md=0;38;2;40;42;54;48;2;243;249;157:*README.txt=0;38;2;40;42;54;48;2;243;249;157:*SConscript=0;38;2;165;255;195:*SConstruct=0;38;2;165;255;195:*.gitmodules=0;38;2;165;255;195:*.synctex.gz=0;38;2;102;102;102:*.travis.yml=0;38;2;90;247;142:*INSTALL.txt=0;38;2;40;42;54;48;2;243;249;157:*LICENSE-MIT=0;38;2;153;153;153:*MANIFEST.in=0;38;2;165;255;195:*Makefile.am=0;38;2;165;255;195:*Makefile.in=0;38;2;102;102;102:*.applescript=0;38;2;90;247;142:*.fdb_latexmk=0;38;2;102;102;102:*CONTRIBUTORS=0;38;2;40;42;54;48;2;243;249;157:*appveyor.yml=0;38;2;90;247;142:*configure.ac=0;38;2;165;255;195:*.clang-format=0;38;2;165;255;195:*.gitattributes=0;38;2;165;255;195:*.gitlab-ci.yml=0;38;2;90;247;142:*CMakeCache.txt=0;38;2;102;102;102:*CMakeLists.txt=0;38;2;165;255;195:*LICENSE-APACHE=0;38;2;153;153;153:*CONTRIBUTORS.md=0;38;2;40;42;54;48;2;243;249;157:*.sconsign.dblite=0;38;2;102;102;102:*CONTRIBUTORS.txt=0;38;2;40;42;54;48;2;243;249;157:*requirements.txt=0;38;2;165;255;195:*package-lock.json=0;38;2;102;102;102:*.CFUserTextEncoding=0;38;2;102;102;102"
85 | export LS_COLORS
86 | # User configuration
87 |
88 | # export MANPATH="/usr/local/man:$MANPATH"
89 |
90 | # You may need to manually set your language environment
91 | # export LANG=en_US.UTF-8
92 |
93 | # Preferred editor for local and remote sessions
94 | # if [[ -n $SSH_CONNECTION ]]; then
95 | # export EDITOR='vim'
96 | # else
97 | # export EDITOR='mvim'
98 | # fi
99 | export EDITOR=nvim
100 |
101 | # Compilation flags
102 | # export ARCHFLAGS="-arch x86_64"
103 |
104 | # Set personal aliases, overriding those provided by oh-my-zsh libs,
105 | # plugins, and themes. Aliases can be placed here, though oh-my-zsh
106 | # users are encouraged to define aliases within the ZSH_CUSTOM folder.
107 | # For a full list of active aliases, run `alias`.
108 | #
109 | # Example aliases
110 | # alias zshconfig="mate ~/.zshrc"
111 | # alias ohmyzsh="mate ~/.oh-my-zsh"
112 |
113 | alias python=python3
114 | alias pip=pip3
115 |
116 | # If we have a volume, move our .zsh conf files there so they can be edited persistently
117 | if [[ -d /workspace ]]; then
118 | if [[ ! -d /workspace/.zsh ]]; then
119 | mv $HOME/.zsh /workspace
120 | else
121 | rm -rf $HOME/.zsh
122 | fi
123 | ln -s /workspace/.zsh $HOME/.zsh
124 | fi
125 |
126 | # Include additional ZSH config files
127 | for zshfile in $HOME/.zsh/*.zsh; do
128 | source "$zshfile"
129 | done
130 |
131 | bindkey "^U" kill-region
132 |
133 | # Make sure CUDA toolkit is in path
134 | export PATH=/usr/local/cuda/bin:"$PATH"
135 |
136 | [[ -d /workspace ]] && cd /workspace
137 |
--------------------------------------------------------------------------------
/cuda11.8.0-ubuntu22.04-pytorch-conda/start-ssh-only.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "TheBloke LLM: pod started"
4 | echo "For more info, see: https://github.com/TheBlokeAI/dockerLLM"
5 |
6 | if [[ $PUBLIC_KEY ]]; then
7 | mkdir -p ~/.ssh
8 | chmod 700 ~/.ssh
9 | cd ~/.ssh
10 | echo "${PUBLIC_KEY}" >>authorized_keys
11 | chmod 700 -R ~/.ssh
12 | cd /
13 | service ssh start
14 | fi
15 |
16 | sleep infinity
17 |
--------------------------------------------------------------------------------
/cuda11.8.0-ubuntu22.04-pytorch/Dockerfile:
--------------------------------------------------------------------------------
1 | ARG CUDA_VERSION="11.8.0"
2 | ARG CUDNN_VERSION="8"
3 | ARG UBUNTU_VERSION="22.04"
4 | ARG DOCKER_FROM=nvidia/cuda:$CUDA_VERSION-cudnn$CUDNN_VERSION-devel-ubuntu$UBUNTU_VERSION
5 |
6 | # Base NVidia CUDA Ubuntu image
7 | FROM $DOCKER_FROM AS base
8 |
9 | # Install Python plus openssh, which is our minimum set of required packages.
10 | RUN apt-get update -y && \
11 | apt-get install -y python3 python3-pip python3-venv && \
12 | apt-get install -y --no-install-recommends openssh-server openssh-client git git-lfs && \
13 | python3 -m pip install --upgrade pip && \
14 | apt-get clean && \
15 | rm -rf /var/lib/apt/lists/*
16 |
17 | ENV PATH="/usr/local/cuda/bin:${PATH}"
18 |
19 | # Install pytorch
20 | ARG PYTORCH="2.1.0"
21 | ARG CUDA="118"
22 | RUN pip3 install --no-cache-dir -U torch==$PYTORCH torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu$CUDA
23 |
24 | COPY --chmod=755 start-ssh-only.sh /start.sh
25 |
26 | WORKDIR /workspace
27 |
28 | CMD [ "/start.sh" ]
29 |
--------------------------------------------------------------------------------
/cuda11.8.0-ubuntu22.04-pytorch/start-ssh-only.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "pod started"
4 |
5 | if [[ $PUBLIC_KEY ]]
6 | then
7 | mkdir -p ~/.ssh
8 | chmod 700 ~/.ssh
9 | cd ~/.ssh
10 | echo $PUBLIC_KEY >> authorized_keys
11 | chmod 700 -R ~/.ssh
12 | cd /
13 | service ssh start
14 | fi
15 |
16 | sleep infinity
17 |
--------------------------------------------------------------------------------
/cuda12.1.1-ubuntu22.04-pytorch/Dockerfile:
--------------------------------------------------------------------------------
1 | ARG CUDA_VERSION="12.1.1"
2 | ARG CUDNN_VERSION="8"
3 | ARG UBUNTU_VERSION="22.04"
4 | ARG DOCKER_FROM=nvidia/cuda:$CUDA_VERSION-cudnn$CUDNN_VERSION-devel-ubuntu$UBUNTU_VERSION
5 |
6 | # Base NVidia CUDA Ubuntu image
7 | FROM $DOCKER_FROM AS base
8 |
9 | # Install Python plus openssh, which is our minimum set of required packages.
10 | RUN apt-get update -y && \
11 | apt-get install -y python3 python3-pip python3-venv && \
12 | apt-get install -y --no-install-recommends openssh-server openssh-client git git-lfs && \
13 | python3 -m pip install --upgrade pip && \
14 | apt-get clean && \
15 | rm -rf /var/lib/apt/lists/*
16 |
17 | ENV PATH="/usr/local/cuda/bin:${PATH}"
18 |
19 | # Install pytorch
20 | ARG PYTORCH="2.1.1"
21 | ARG CUDA="121"
22 | RUN pip3 install --no-cache-dir -U torch==$PYTORCH torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu$CUDA
23 |
24 | COPY --chmod=755 start-ssh-only.sh /start.sh
25 |
26 | WORKDIR /workspace
27 |
28 | CMD [ "/start.sh" ]
29 |
--------------------------------------------------------------------------------
/cuda12.1.1-ubuntu22.04-pytorch/start-ssh-only.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "pod started"
4 |
5 | if [[ $PUBLIC_KEY ]]
6 | then
7 | mkdir -p ~/.ssh
8 | chmod 700 ~/.ssh
9 | cd ~/.ssh
10 | echo $PUBLIC_KEY >> authorized_keys
11 | chmod 700 -R ~/.ssh
12 | cd /
13 | service ssh start
14 | fi
15 |
16 | sleep infinity
17 |
--------------------------------------------------------------------------------
/cuda12.1.1-ubuntu22.04-textgen/Dockerfile:
--------------------------------------------------------------------------------
1 | ARG CUDA_VERSION="12.1.1"
2 | ARG CUDNN_VERSION="8"
3 | ARG UBUNTU_VERSION="22.04"
4 | ARG DOCKER_FROM=thebloke/cuda$CUDA_VERSION-ubuntu$UBUNTU_VERSION-pytorch:latest
5 |
6 | # Base pytorch image
7 | FROM $DOCKER_FROM as base
8 |
9 | WORKDIR /root
10 |
11 | # Install text-generation-webui, including all extensions
12 | # Also includes exllama
13 | # We remove the ExLlama automatically installed by text-generation-webui
14 | # so we're always up-to-date with any ExLlama changes, which will auto compile its own extension
15 | RUN git clone https://github.com/oobabooga/text-generation-webui && \
16 | cd text-generation-webui && \
17 | pip3 install -r requirements.txt && \
18 | bash -c 'for req in extensions/*/requirements.txt ; do pip3 install -r "$req" ; done' && \
19 | #pip3 uninstall -y exllama && \
20 | mkdir -p repositories && \
21 | cd repositories && \
22 | git clone https://github.com/turboderp/exllama && \
23 | pip3 install -r exllama/requirements.txt
24 |
25 | # Install AutoGPTQ, overwriting the version automatically installed by text-generation-webui
26 | # May not be needed any more? But just in case
27 | #RUN pip3 uninstall -y auto-gptq && \
28 | # pip3 install auto-gptq --extra-index-url https://huggingface.github.io/autogptq-index/whl/cu118/
29 |
--------------------------------------------------------------------------------
/imgs/RunpodTemplateAPI-TCPPortMapping.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TheBlokeAI/dockerLLM/c8dfb32e7fc7d2bbf17d37f486b574ebe9406727/imgs/RunpodTemplateAPI-TCPPortMapping.png
--------------------------------------------------------------------------------
/imgs/TheBlokeAI.header.800.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TheBlokeAI/dockerLLM/c8dfb32e7fc7d2bbf17d37f486b574ebe9406727/imgs/TheBlokeAI.header.800.jpg
--------------------------------------------------------------------------------
/scripts/build-llama-cpp-python.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Install llama-cpp-python with GPU acceleration, overwriting the version installed by text-generation-webui
4 | # We do this on boot, so as to ensure the binaries are built with the right CPU instructions for the machine being used
5 |
6 | # Uninstall llama-cpp-python, which will have been auto installed by text-generation-webui's requirements.txt
7 | pip3 uninstall -qy llama-cpp-python
8 |
9 | # Check to see if this machine supports AVX2 instructions
10 | if python3 /root/scripts/check_avx2.py; then
11 | export CMAKE_ARGS="-DLLAMA_CUBLAS=on -DCMAKE_CUDA_COMPILER=/usr/local/cuda/bin/nvcc"
12 | else
13 | # If it does not, we need to specifically tell llama-cpp-python not to use them
14 | # as unfortunately it's otherwise hardcoded to use them
15 | export CMAKE_ARGS="-DLLAMA_AVX2=OFF -DLLAMA_CUBLAS=on -DCMAKE_CUDA_COMPILER=/usr/local/cuda/bin/nvcc"
16 | fi
17 |
18 | export PATH=/usr/local/cuda/bin:"$PATH"
19 | export FORCE_CMAKE=1
20 |
21 | if pip3 install --no-cache-dir llama-cpp-python; then
22 | # touch this file so we don't build again on reboots
23 | touch /.built.llama-cpp-python
24 | fi
25 |
--------------------------------------------------------------------------------
/scripts/check_avx2.py:
--------------------------------------------------------------------------------
1 | import subprocess
2 | import sys
3 | import json
4 |
5 | def check_avx2_support():
6 | result = subprocess.run(['lscpu', '--json'], stdout=subprocess.PIPE)
7 | lscpu_output = result.stdout.decode('utf-8')
8 |
9 | lscpu_info = json.loads(lscpu_output)
10 |
11 | for info in lscpu_info["lscpu"]:
12 | if info["field"] == "Flags:":
13 | flags = info["data"].split()
14 | if "avx2" in flags:
15 | print("This system supports AVX2.")
16 | return True
17 | else:
18 | print("This system does not support AVX2.")
19 | return False
20 |
21 | if check_avx2_support():
22 | sys.exit(0)
23 | else:
24 | sys.exit(1)
25 |
--------------------------------------------------------------------------------
/scripts/download_model.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import argparse
4 | import base64
5 | import datetime
6 | import hashlib
7 | import json
8 | import re
9 | import sys
10 | from pathlib import Path
11 |
12 | import requests
13 | import tqdm
14 | from tqdm.contrib.concurrent import thread_map
15 |
16 | def sanitize_model_and_branch_names(model, branch):
17 | if model[-1] == '/':
18 | model = model[:-1]
19 | if branch is None:
20 | branch = "main"
21 | else:
22 | pattern = re.compile(r"^[a-zA-Z0-9._-]+$")
23 | if not pattern.match(branch):
24 | raise ValueError("Invalid branch name. Only alphanumeric characters, period, underscore and dash are allowed.")
25 |
26 | return model, branch
27 |
28 | def get_download_links_from_huggingface(model, branch, text_only=False):
29 | base = "https://huggingface.co"
30 | page = f"/api/models/{model}/tree/{branch}"
31 | cursor = b""
32 |
33 | links = []
34 | sha256 = []
35 | classifications = []
36 | has_pytorch = False
37 | has_pt = False
38 | has_ggml = False
39 | has_safetensors = False
40 | is_lora = False
41 | while True:
42 | url = f"{base}{page}" + (f"?cursor={cursor.decode()}" if cursor else "")
43 | r = requests.get(url, timeout=10)
44 | r.raise_for_status()
45 | content = r.content
46 |
47 | dict = json.loads(content)
48 | if len(dict) == 0:
49 | break
50 |
51 | for i in range(len(dict)):
52 | fname = dict[i]['path']
53 | if not is_lora and fname.endswith(('adapter_config.json', 'adapter_model.bin')):
54 | is_lora = True
55 |
56 | is_pytorch = re.match("(pytorch|adapter)_model.*\.bin", fname)
57 | is_safetensors = re.match(".*\.safetensors", fname)
58 | is_pt = re.match(".*\.pt", fname)
59 | is_ggml = re.match(".*ggml.*\.bin", fname)
60 | is_tokenizer = re.match("(tokenizer|ice).*\.model", fname)
61 | is_text = re.match(".*\.(txt|json|py|md)", fname) or is_tokenizer
62 |
63 | if any((is_pytorch, is_safetensors, is_pt, is_ggml, is_tokenizer, is_text)):
64 | if 'lfs' in dict[i]:
65 | sha256.append([fname, dict[i]['lfs']['oid']])
66 | if is_text:
67 | links.append(f"https://huggingface.co/{model}/resolve/{branch}/{fname}")
68 | classifications.append('text')
69 | continue
70 | if not text_only:
71 | links.append(f"https://huggingface.co/{model}/resolve/{branch}/{fname}")
72 | if is_safetensors:
73 | has_safetensors = True
74 | classifications.append('safetensors')
75 | elif is_pytorch:
76 | has_pytorch = True
77 | classifications.append('pytorch')
78 | elif is_pt:
79 | has_pt = True
80 | classifications.append('pt')
81 | elif is_ggml:
82 | has_ggml = True
83 | classifications.append('ggml')
84 |
85 | cursor = base64.b64encode(f'{{"file_name":"{dict[-1]["path"]}"}}'.encode()) + b':50'
86 | cursor = base64.b64encode(cursor)
87 | cursor = cursor.replace(b'=', b'%3D')
88 |
89 | # If both pytorch and safetensors are available, download safetensors only
90 | if (has_pytorch or has_pt) and has_safetensors:
91 | for i in range(len(classifications) - 1, -1, -1):
92 | if classifications[i] in ['pytorch', 'pt']:
93 | links.pop(i)
94 |
95 | return links, sha256, is_lora
96 |
97 |
98 | def get_output_folder(model, branch, is_lora, base_folder=None):
99 | if base_folder is None:
100 | base_folder = 'models' if not is_lora else 'loras'
101 |
102 | output_folder = Path(base_folder)
103 | return output_folder
104 |
105 |
106 | def get_single_file(url, output_folder, start_from_scratch=False):
107 | filename = Path(url.rsplit('/', 1)[1])
108 | output_path = output_folder / filename
109 | if output_path.exists() and not start_from_scratch:
110 | # Check if the file has already been downloaded completely
111 | r = requests.get(url, stream=True, timeout=10)
112 | total_size = int(r.headers.get('content-length', 0))
113 | if output_path.stat().st_size >= total_size:
114 | return
115 | # Otherwise, resume the download from where it left off
116 | headers = {'Range': f'bytes={output_path.stat().st_size}-'}
117 | mode = 'ab'
118 | else:
119 | headers = {}
120 | mode = 'wb'
121 |
122 | r = requests.get(url, stream=True, headers=headers, timeout=10)
123 | with open(output_path, mode) as f:
124 | total_size = int(r.headers.get('content-length', 0))
125 | block_size = 1024
126 | with tqdm.tqdm(total=total_size, unit='iB', unit_scale=True, bar_format='{l_bar}{bar}| {n_fmt:6}/{total_fmt:6} {rate_fmt:6}') as t:
127 | for data in r.iter_content(block_size):
128 | t.update(len(data))
129 | f.write(data)
130 |
131 |
132 | def start_download_threads(file_list, output_folder, start_from_scratch=False, threads=1):
133 | thread_map(lambda url: get_single_file(url, output_folder, start_from_scratch=start_from_scratch), file_list, max_workers=threads, disable=True)
134 |
135 |
136 | def download_model_files(model, branch, links, sha256, output_folder, start_from_scratch=False, threads=1):
137 | # Creating the folder and writing the metadata
138 | if not output_folder.exists():
139 | output_folder.mkdir()
140 | with open(output_folder / 'huggingface-metadata.txt', 'w') as f:
141 | f.write(f'url: https://huggingface.co/{model}\n')
142 | f.write(f'branch: {branch}\n')
143 | f.write(f'download date: {str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))}\n')
144 | sha256_str = ''
145 | for i in range(len(sha256)):
146 | sha256_str += f' {sha256[i][1]} {sha256[i][0]}\n'
147 | if sha256_str != '':
148 | f.write(f'sha256sum:\n{sha256_str}')
149 |
150 | # Downloading the files
151 | print(f"Downloading the model to {output_folder}")
152 | start_download_threads(links, output_folder, start_from_scratch=start_from_scratch, threads=threads)
153 |
154 |
155 | def check_model_files(model, branch, links, sha256, output_folder):
156 | # Validate the checksums
157 | validated = True
158 | for i in range(len(sha256)):
159 | fpath = (output_folder / sha256[i][0])
160 |
161 | if not fpath.exists():
162 | print(f"The following file is missing: {fpath}")
163 | validated = False
164 | continue
165 |
166 | with open(output_folder / sha256[i][0], "rb") as f:
167 | bytes = f.read()
168 | file_hash = hashlib.sha256(bytes).hexdigest()
169 | if file_hash != sha256[i][1]:
170 | print(f'Checksum failed: {sha256[i][0]} {sha256[i][1]}')
171 | validated = False
172 | else:
173 | print(f'Checksum validated: {sha256[i][0]} {sha256[i][1]}')
174 |
175 | if validated:
176 | print('[+] Validated checksums of all model files!')
177 | else:
178 | print('[-] Invalid checksums. Rerun download-model.py with the --clean flag.')
179 |
180 | def download_model(model, base_folder, branch="main", threads=1, text_only=False, check=False):
181 | # Cleaning up the model/branch names
182 | try:
183 | model, branch = sanitize_model_and_branch_names(model, branch)
184 | except ValueError as err_branch:
185 | print(f"Error: {err_branch}")
186 | sys.exit()
187 |
188 | # Getting the download links from Hugging Face
189 | links, sha256, is_lora = get_download_links_from_huggingface(model, branch, text_only=text_only)
190 |
191 | # Getting the output folder
192 | output_folder = get_output_folder(model, branch, is_lora, base_folder)
193 |
194 | if check:
195 | # Check previously downloaded files
196 | check_model_files(model, branch, links, sha256, output_folder)
197 | else:
198 | # Download files
199 | download_model_files(model, branch, links, sha256, output_folder, threads=threads)
200 |
201 | if __name__ == '__main__':
202 | parser = argparse.ArgumentParser()
203 | parser.add_argument('model', type=str, default=None, nargs='?')
204 | parser.add_argument('--branch', type=str, default='main', help='Name of the Git branch to download from.')
205 | parser.add_argument('--threads', type=int, default=1, help='Number of files to download simultaneously.')
206 | parser.add_argument('--text-only', action='store_true', help='Only download text files (txt/json).')
207 | parser.add_argument('--output', type=str, default=None, help='The folder where the model should be saved.')
208 | parser.add_argument('--clean', action='store_true', help='Does not resume the previous download.')
209 | parser.add_argument('--check', action='store_true', help='Validates the checksums of model files.')
210 | args = parser.parse_args()
211 |
212 | download_model(model=args.model, base_folder=args.output, branch=args.branch,
213 | threads=args.threads, text_only=args.text_only, check=args.check)
214 |
--------------------------------------------------------------------------------
/scripts/fetch-model.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import sys
3 | import os
4 | import argparse
5 | import subprocess
6 | from urllib.parse import urlparse
7 | from huggingface_hub import model_info
8 |
9 | parser = argparse.ArgumentParser()
10 | parser.add_argument('model', type=str)
11 | parser.add_argument('output_folder', type=str)
12 | args = parser.parse_args()
13 |
14 | SCRIPT_DIR = "/root/scripts"
15 |
16 | model = args.model.strip()
17 | output_folder = args.output_folder
18 |
19 | success=False
20 | retry_count=0
21 | while not success and retry_count < 10:
22 | os.makedirs(output_folder, exist_ok=True)
23 | os.chdir(output_folder)
24 | retry_count += 1
25 | print(f'Downloading {model} to {output_folder}, attempt {retry_count}')
26 | if 'http' in model.lower():
27 | # We've been passed a URL to download
28 | parsed = urlparse(model)
29 | # split the path by '/' and get the filename
30 | filename = parsed.path.split("/")[-1]
31 | print(f"Passed URL: {model}", flush=True)
32 | run = subprocess.run(f'/usr/bin/wget --continue --progress=dot:giga "{model}"', shell=True, check=False)
33 | write = filename
34 | elif model_info(model).id == model:
35 | # We've got an HF model, eg 'TheBloke/WizardLM-7B-Uncensored'
36 | print(f"Passed HF model: {model}", flush=True)
37 | model_folder = model.replace('/','_')
38 | run = subprocess.run(f'{SCRIPT_DIR}/download_model.py --threads 2 --output "{output_folder}/{model_folder}" "{args.model}"', shell=True, check=False)
39 | write = model_folder
40 | else:
41 | print(f"Error, {model} does not seem to be in a supported format.")
42 | success = False
43 | break
44 | if run.returncode == 0:
45 | # Succesful download. Write the model file or folder name to /tmp for use in --model arg
46 | with open('/tmp/text-gen-model', 'w') as f:
47 | f.write(write + '\n')
48 | success = True
49 |
50 | # Exit 0 for success, 1 for failure
51 | sys.exit(not success)
52 |
--------------------------------------------------------------------------------
/scripts/restart-text-generation-webui.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo -n "Restarting text-generation-webui: "
4 |
5 | if pkill -f "python3 server.py"
6 | then
7 | echo "DONE"
8 | echo "The UI will auto-restart in 2 seconds"
9 | else
10 | echo "was not running"
11 | fi
12 |
--------------------------------------------------------------------------------
/scripts/run-text-generation-webui.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | cd /workspace/text-generation-webui
3 |
4 | # Edit these arguments if you want to customise text-generation-webui launch.
5 | # Don't remove "$@" from the start unless you want to prevent automatic model loading from template arguments
6 | ARGS=("$@" --listen --api)
7 |
8 | echo "Launching text-generation-webui with args: ${ARGS[@]}"
9 |
10 | python3 server.py "${ARGS[@]}"
11 |
--------------------------------------------------------------------------------
/scripts/start-ssh-only.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "pod started"
4 |
5 | if [[ $PUBLIC_KEY ]]
6 | then
7 | mkdir -p ~/.ssh
8 | chmod 700 ~/.ssh
9 | cd ~/.ssh
10 | echo $PUBLIC_KEY >> authorized_keys
11 | chmod 700 -R ~/.ssh
12 | cd /
13 | service ssh start
14 | fi
15 |
16 | sleep infinity
17 |
--------------------------------------------------------------------------------
/scripts/textgen-on-workspace.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Ensure we have /workspace in all scenarios
4 | mkdir -p /workspace
5 |
6 | if [[ ! -d /workspace/text-generation-webui ]]; then
7 | # If we don't already have /workspace/text-generation-webui, move it there
8 | mv /root/text-generation-webui /workspace
9 | else
10 | # otherwise delete the default text-generation-webui folder which is always re-created on pod start from the Docker
11 | rm -rf /root/text-generation-webui
12 | fi
13 |
14 | # Then link /root/text-generation-webui folder to /workspace so it's available in that familiar location as well
15 | ln -s /workspace/text-generation-webui /root/text-generation-webui
16 |
--------------------------------------------------------------------------------
/wheels/torch-2.0.0a0+gite9ebda2-cp310-cp310-linux_x86_64.whl:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:287bf7a4359ac676434656e64f4ed3e27899f39bee6f5b2750ee262386ad3d3c
3 | size 355170953
4 |
--------------------------------------------------------------------------------