├── host-index.bat
├── host-index.sh
├── .github
├── dependabot.yml
└── workflows
│ ├── build-wheels-oobabooga-rocm.yml
│ ├── build-everything-tgw.yml
│ ├── build-wheels-cpu.yml
│ ├── build-wheels-macos.yml
│ └── build-wheels-oobabooga.yml
├── index
├── AVX
│ ├── rocm5.4.2
│ │ └── index.html
│ ├── rocm5.5.1
│ │ ├── index.html
│ │ └── llama-cpp-python
│ │ │ └── index.html
│ ├── rocm5.5
│ │ └── index.html
│ ├── rocm5.6.1
│ │ └── index.html
│ ├── cpu
│ │ ├── index.html
│ │ └── llama-cpp-python-ggml
│ │ │ └── index.html
│ ├── cu116
│ │ ├── index.html
│ │ └── llama-cpp-python-ggml
│ │ │ └── index.html
│ ├── cu117
│ │ ├── index.html
│ │ └── llama-cpp-python-ggml
│ │ │ └── index.html
│ ├── cu118
│ │ ├── index.html
│ │ └── llama-cpp-python-ggml
│ │ │ └── index.html
│ ├── cu120
│ │ ├── index.html
│ │ └── llama-cpp-python-ggml
│ │ │ └── index.html
│ ├── cu121
│ │ ├── index.html
│ │ └── llama-cpp-python-ggml
│ │ │ └── index.html
│ ├── cu122
│ │ ├── index.html
│ │ └── llama-cpp-python-ggml
│ │ │ └── index.html
│ └── index.html
├── AVX2
│ ├── rocm5.4.2
│ │ └── index.html
│ ├── rocm5.5.1
│ │ └── index.html
│ ├── rocm5.5
│ │ └── index.html
│ ├── rocm5.6.1
│ │ └── index.html
│ ├── cpu
│ │ ├── index.html
│ │ └── llama-cpp-python-ggml
│ │ │ └── index.html
│ ├── cu116
│ │ ├── index.html
│ │ └── llama-cpp-python-ggml
│ │ │ └── index.html
│ ├── cu117
│ │ ├── index.html
│ │ └── llama-cpp-python-ggml
│ │ │ └── index.html
│ ├── cu118
│ │ ├── index.html
│ │ └── llama-cpp-python-ggml
│ │ │ └── index.html
│ ├── cu120
│ │ ├── index.html
│ │ └── llama-cpp-python-ggml
│ │ │ └── index.html
│ ├── cu121
│ │ ├── index.html
│ │ └── llama-cpp-python-ggml
│ │ │ └── index.html
│ ├── cu122
│ │ ├── index.html
│ │ └── llama-cpp-python-ggml
│ │ │ └── index.html
│ └── index.html
├── basic
│ ├── rocm5.5
│ │ └── index.html
│ ├── rocm5.4.2
│ │ └── index.html
│ ├── rocm5.5.1
│ │ ├── index.html
│ │ └── llama-cpp-python
│ │ │ └── index.html
│ ├── rocm5.6.1
│ │ └── index.html
│ ├── cpu
│ │ ├── index.html
│ │ └── llama-cpp-python-ggml
│ │ │ └── index.html
│ ├── cu116
│ │ ├── index.html
│ │ └── llama-cpp-python-ggml
│ │ │ └── index.html
│ ├── cu117
│ │ ├── index.html
│ │ └── llama-cpp-python-ggml
│ │ │ └── index.html
│ ├── cu118
│ │ ├── index.html
│ │ └── llama-cpp-python-ggml
│ │ │ └── index.html
│ ├── cu120
│ │ ├── index.html
│ │ └── llama-cpp-python-ggml
│ │ │ └── index.html
│ ├── cu121
│ │ ├── index.html
│ │ └── llama-cpp-python-ggml
│ │ │ └── index.html
│ ├── cu122
│ │ ├── index.html
│ │ └── llama-cpp-python-ggml
│ │ │ └── index.html
│ └── index.html
├── textgen
│ ├── AVX
│ │ ├── rocm5.5
│ │ │ └── index.html
│ │ ├── rocm5.4.2
│ │ │ └── index.html
│ │ ├── rocm5.5.1
│ │ │ ├── index.html
│ │ │ └── llama-cpp-python-cuda
│ │ │ │ └── index.html
│ │ ├── rocm5.6.1
│ │ │ └── index.html
│ │ ├── cu117
│ │ │ ├── index.html
│ │ │ └── llama-cpp-python-ggml-cuda
│ │ │ │ └── index.html
│ │ ├── cu118
│ │ │ ├── index.html
│ │ │ └── llama-cpp-python-ggml-cuda
│ │ │ │ └── index.html
│ │ ├── cu120
│ │ │ ├── index.html
│ │ │ └── llama-cpp-python-ggml-cuda
│ │ │ │ └── index.html
│ │ ├── cu121
│ │ │ ├── index.html
│ │ │ └── llama-cpp-python-ggml-cuda
│ │ │ │ └── index.html
│ │ ├── cu122
│ │ │ ├── index.html
│ │ │ └── llama-cpp-python-ggml-cuda
│ │ │ │ └── index.html
│ │ └── index.html
│ ├── AVX2
│ │ ├── rocm5.4.2
│ │ │ └── index.html
│ │ ├── rocm5.5.1
│ │ │ └── index.html
│ │ ├── rocm5.5
│ │ │ └── index.html
│ │ ├── rocm5.6.1
│ │ │ └── index.html
│ │ ├── cu117
│ │ │ ├── index.html
│ │ │ └── llama-cpp-python-ggml-cuda
│ │ │ │ └── index.html
│ │ ├── cu118
│ │ │ ├── index.html
│ │ │ └── llama-cpp-python-ggml-cuda
│ │ │ │ └── index.html
│ │ ├── cu120
│ │ │ ├── index.html
│ │ │ └── llama-cpp-python-ggml-cuda
│ │ │ │ └── index.html
│ │ ├── cu121
│ │ │ ├── index.html
│ │ │ └── llama-cpp-python-ggml-cuda
│ │ │ │ └── index.html
│ │ ├── cu122
│ │ │ ├── index.html
│ │ │ └── llama-cpp-python-ggml-cuda
│ │ │ │ └── index.html
│ │ └── index.html
│ ├── basic
│ │ ├── rocm5.4.2
│ │ │ └── index.html
│ │ ├── rocm5.5.1
│ │ │ ├── index.html
│ │ │ └── llama-cpp-python-cuda
│ │ │ │ └── index.html
│ │ ├── rocm5.5
│ │ │ └── index.html
│ │ ├── rocm5.6.1
│ │ │ └── index.html
│ │ ├── cu117
│ │ │ ├── index.html
│ │ │ └── llama-cpp-python-ggml-cuda
│ │ │ │ └── index.html
│ │ ├── cu118
│ │ │ ├── index.html
│ │ │ └── llama-cpp-python-ggml-cuda
│ │ │ │ └── index.html
│ │ ├── cu120
│ │ │ ├── index.html
│ │ │ └── llama-cpp-python-ggml-cuda
│ │ │ │ └── index.html
│ │ ├── cu121
│ │ │ ├── index.html
│ │ │ └── llama-cpp-python-ggml-cuda
│ │ │ │ └── index.html
│ │ ├── cu122
│ │ │ ├── index.html
│ │ │ └── llama-cpp-python-ggml-cuda
│ │ │ │ └── index.html
│ │ └── index.html
│ └── index.html
├── AVX512
│ ├── cpu
│ │ ├── index.html
│ │ └── llama-cpp-python-ggml
│ │ │ └── index.html
│ ├── cu116
│ │ ├── index.html
│ │ └── llama-cpp-python-ggml
│ │ │ └── index.html
│ ├── cu117
│ │ ├── index.html
│ │ └── llama-cpp-python-ggml
│ │ │ └── index.html
│ ├── cu118
│ │ ├── index.html
│ │ └── llama-cpp-python-ggml
│ │ │ └── index.html
│ ├── cu120
│ │ ├── index.html
│ │ └── llama-cpp-python-ggml
│ │ │ └── index.html
│ ├── cu121
│ │ ├── index.html
│ │ └── llama-cpp-python-ggml
│ │ │ └── index.html
│ ├── cu122
│ │ ├── index.html
│ │ └── llama-cpp-python-ggml
│ │ │ └── index.html
│ └── index.html
└── index.html
├── LICENSE
├── README.md
├── workflows.md
├── generate-textgen-html.ps1
└── generate-html.ps1
/host-index.bat:
--------------------------------------------------------------------------------
1 | @echo off
2 |
3 | cd index
4 | python -m http.server 7860
5 |
--------------------------------------------------------------------------------
/host-index.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | cd index
4 | python -m http.server 7860
5 |
--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | version: 2
2 | updates:
3 | - package-ecosystem: "github-actions"
4 | directory: "/"
5 | schedule:
6 | interval: "daily"
7 |
--------------------------------------------------------------------------------
/index/AVX/rocm5.4.2/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python
5 |
6 |
7 |
--------------------------------------------------------------------------------
/index/AVX/rocm5.5.1/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python
5 |
6 |
7 |
--------------------------------------------------------------------------------
/index/AVX/rocm5.5/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python
5 |
6 |
7 |
--------------------------------------------------------------------------------
/index/AVX/rocm5.6.1/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python
5 |
6 |
7 |
--------------------------------------------------------------------------------
/index/AVX2/rocm5.4.2/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python
5 |
6 |
7 |
--------------------------------------------------------------------------------
/index/AVX2/rocm5.5.1/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python
5 |
6 |
7 |
--------------------------------------------------------------------------------
/index/AVX2/rocm5.5/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python
5 |
6 |
7 |
--------------------------------------------------------------------------------
/index/AVX2/rocm5.6.1/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python
5 |
6 |
7 |
--------------------------------------------------------------------------------
/index/basic/rocm5.5/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python
5 |
6 |
7 |
--------------------------------------------------------------------------------
/index/basic/rocm5.4.2/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python
5 |
6 |
7 |
--------------------------------------------------------------------------------
/index/basic/rocm5.5.1/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python
5 |
6 |
7 |
--------------------------------------------------------------------------------
/index/basic/rocm5.6.1/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python
5 |
6 |
7 |
--------------------------------------------------------------------------------
/index/textgen/AVX/rocm5.5/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_cuda
5 |
6 |
7 |
--------------------------------------------------------------------------------
/index/textgen/AVX/rocm5.4.2/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_cuda
5 |
6 |
7 |
--------------------------------------------------------------------------------
/index/textgen/AVX/rocm5.5.1/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_cuda
5 |
6 |
7 |
--------------------------------------------------------------------------------
/index/textgen/AVX/rocm5.6.1/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_cuda
5 |
6 |
7 |
--------------------------------------------------------------------------------
/index/textgen/AVX2/rocm5.4.2/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_cuda
5 |
6 |
7 |
--------------------------------------------------------------------------------
/index/textgen/AVX2/rocm5.5.1/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_cuda
5 |
6 |
7 |
--------------------------------------------------------------------------------
/index/textgen/AVX2/rocm5.5/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_cuda
5 |
6 |
7 |
--------------------------------------------------------------------------------
/index/textgen/AVX2/rocm5.6.1/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_cuda
5 |
6 |
7 |
--------------------------------------------------------------------------------
/index/textgen/basic/rocm5.4.2/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_cuda
5 |
6 |
7 |
--------------------------------------------------------------------------------
/index/textgen/basic/rocm5.5.1/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_cuda
5 |
6 |
7 |
--------------------------------------------------------------------------------
/index/textgen/basic/rocm5.5/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_cuda
5 |
6 |
7 |
--------------------------------------------------------------------------------
/index/textgen/basic/rocm5.6.1/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_cuda
5 |
6 |
7 |
--------------------------------------------------------------------------------
/index/textgen/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | AVX
5 | AVX2
6 | basic
7 |
8 |
9 |
--------------------------------------------------------------------------------
/index/AVX/cpu/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python
5 | llama_cpp_python_ggml
6 |
7 |
8 |
--------------------------------------------------------------------------------
/index/AVX/cu116/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python
5 | llama_cpp_python_ggml
6 |
7 |
8 |
--------------------------------------------------------------------------------
/index/AVX/cu117/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python
5 | llama_cpp_python_ggml
6 |
7 |
8 |
--------------------------------------------------------------------------------
/index/AVX/cu118/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python
5 | llama_cpp_python_ggml
6 |
7 |
8 |
--------------------------------------------------------------------------------
/index/AVX/cu120/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python
5 | llama_cpp_python_ggml
6 |
7 |
8 |
--------------------------------------------------------------------------------
/index/AVX/cu121/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python
5 | llama_cpp_python_ggml
6 |
7 |
8 |
--------------------------------------------------------------------------------
/index/AVX/cu122/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python
5 | llama_cpp_python_ggml
6 |
7 |
8 |
--------------------------------------------------------------------------------
/index/AVX2/cpu/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python
5 | llama_cpp_python_ggml
6 |
7 |
8 |
--------------------------------------------------------------------------------
/index/AVX2/cu116/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python
5 | llama_cpp_python_ggml
6 |
7 |
8 |
--------------------------------------------------------------------------------
/index/AVX2/cu117/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python
5 | llama_cpp_python_ggml
6 |
7 |
8 |
--------------------------------------------------------------------------------
/index/AVX2/cu118/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python
5 | llama_cpp_python_ggml
6 |
7 |
8 |
--------------------------------------------------------------------------------
/index/AVX2/cu120/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python
5 | llama_cpp_python_ggml
6 |
7 |
8 |
--------------------------------------------------------------------------------
/index/AVX2/cu121/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python
5 | llama_cpp_python_ggml
6 |
7 |
8 |
--------------------------------------------------------------------------------
/index/AVX2/cu122/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python
5 | llama_cpp_python_ggml
6 |
7 |
8 |
--------------------------------------------------------------------------------
/index/AVX512/cpu/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python
5 | llama_cpp_python_ggml
6 |
7 |
8 |
--------------------------------------------------------------------------------
/index/AVX512/cu116/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python
5 | llama_cpp_python_ggml
6 |
7 |
8 |
--------------------------------------------------------------------------------
/index/AVX512/cu117/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python
5 | llama_cpp_python_ggml
6 |
7 |
8 |
--------------------------------------------------------------------------------
/index/AVX512/cu118/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python
5 | llama_cpp_python_ggml
6 |
7 |
8 |
--------------------------------------------------------------------------------
/index/AVX512/cu120/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python
5 | llama_cpp_python_ggml
6 |
7 |
8 |
--------------------------------------------------------------------------------
/index/AVX512/cu121/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python
5 | llama_cpp_python_ggml
6 |
7 |
8 |
--------------------------------------------------------------------------------
/index/AVX512/cu122/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python
5 | llama_cpp_python_ggml
6 |
7 |
8 |
--------------------------------------------------------------------------------
/index/basic/cpu/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python
5 | llama_cpp_python_ggml
6 |
7 |
8 |
--------------------------------------------------------------------------------
/index/basic/cu116/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python
5 | llama_cpp_python_ggml
6 |
7 |
8 |
--------------------------------------------------------------------------------
/index/basic/cu117/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python
5 | llama_cpp_python_ggml
6 |
7 |
8 |
--------------------------------------------------------------------------------
/index/basic/cu118/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python
5 | llama_cpp_python_ggml
6 |
7 |
8 |
--------------------------------------------------------------------------------
/index/basic/cu120/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python
5 | llama_cpp_python_ggml
6 |
7 |
8 |
--------------------------------------------------------------------------------
/index/basic/cu121/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python
5 | llama_cpp_python_ggml
6 |
7 |
8 |
--------------------------------------------------------------------------------
/index/basic/cu122/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python
5 | llama_cpp_python_ggml
6 |
7 |
8 |
--------------------------------------------------------------------------------
/index/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | AVX
5 | AVX2
6 | AVX512
7 | basic
8 |
9 |
10 |
--------------------------------------------------------------------------------
/index/textgen/AVX/cu117/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_cuda
5 | llama_cpp_python_ggml_cuda
6 |
7 |
8 |
--------------------------------------------------------------------------------
/index/textgen/AVX/cu118/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_cuda
5 | llama_cpp_python_ggml_cuda
6 |
7 |
8 |
--------------------------------------------------------------------------------
/index/textgen/AVX/cu120/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_cuda
5 | llama_cpp_python_ggml_cuda
6 |
7 |
8 |
--------------------------------------------------------------------------------
/index/textgen/AVX/cu121/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_cuda
5 | llama_cpp_python_ggml_cuda
6 |
7 |
8 |
--------------------------------------------------------------------------------
/index/textgen/AVX/cu122/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_cuda
5 | llama_cpp_python_ggml_cuda
6 |
7 |
8 |
--------------------------------------------------------------------------------
/index/textgen/AVX2/cu117/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_cuda
5 | llama_cpp_python_ggml_cuda
6 |
7 |
8 |
--------------------------------------------------------------------------------
/index/textgen/AVX2/cu118/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_cuda
5 | llama_cpp_python_ggml_cuda
6 |
7 |
8 |
--------------------------------------------------------------------------------
/index/textgen/AVX2/cu120/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_cuda
5 | llama_cpp_python_ggml_cuda
6 |
7 |
8 |
--------------------------------------------------------------------------------
/index/textgen/AVX2/cu121/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_cuda
5 | llama_cpp_python_ggml_cuda
6 |
7 |
8 |
--------------------------------------------------------------------------------
/index/textgen/AVX2/cu122/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_cuda
5 | llama_cpp_python_ggml_cuda
6 |
7 |
8 |
--------------------------------------------------------------------------------
/index/textgen/basic/cu117/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_cuda
5 | llama_cpp_python_ggml_cuda
6 |
7 |
8 |
--------------------------------------------------------------------------------
/index/textgen/basic/cu118/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_cuda
5 | llama_cpp_python_ggml_cuda
6 |
7 |
8 |
--------------------------------------------------------------------------------
/index/textgen/basic/cu120/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_cuda
5 | llama_cpp_python_ggml_cuda
6 |
7 |
8 |
--------------------------------------------------------------------------------
/index/textgen/basic/cu121/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_cuda
5 | llama_cpp_python_ggml_cuda
6 |
7 |
8 |
--------------------------------------------------------------------------------
/index/textgen/basic/cu122/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_cuda
5 | llama_cpp_python_ggml_cuda
6 |
7 |
8 |
--------------------------------------------------------------------------------
/index/AVX512/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | CUDA 11.6
5 | CUDA 11.7
6 | CUDA 11.8
7 | CUDA 12.0
8 | CUDA 12.1
9 | CUDA 12.2
10 | cpu
11 |
12 |
13 |
--------------------------------------------------------------------------------
/index/textgen/AVX/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | CUDA 11.7
5 | CUDA 11.8
6 | CUDA 12.0
7 | CUDA 12.1
8 | CUDA 12.2
9 | ROCm 5.4.2
10 | ROCm 5.5
11 | ROCm 5.5.1
12 | ROCm 5.6.1
13 |
14 |
15 |
--------------------------------------------------------------------------------
/index/textgen/AVX2/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | CUDA 11.7
5 | CUDA 11.8
6 | CUDA 12.0
7 | CUDA 12.1
8 | CUDA 12.2
9 | ROCm 5.4.2
10 | ROCm 5.5
11 | ROCm 5.5.1
12 | ROCm 5.6.1
13 |
14 |
15 |
--------------------------------------------------------------------------------
/index/textgen/basic/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | CUDA 11.7
5 | CUDA 11.8
6 | CUDA 12.0
7 | CUDA 12.1
8 | CUDA 12.2
9 | ROCm 5.4.2
10 | ROCm 5.5
11 | ROCm 5.5.1
12 | ROCm 5.6.1
13 |
14 |
15 |
--------------------------------------------------------------------------------
/index/AVX/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | CUDA 11.6
5 | CUDA 11.7
6 | CUDA 11.8
7 | CUDA 12.0
8 | CUDA 12.1
9 | CUDA 12.2
10 | ROCm 5.4.2
11 | ROCm 5.5
12 | ROCm 5.5.1
13 | ROCm 5.6.1
14 | cpu
15 |
16 |
17 |
--------------------------------------------------------------------------------
/index/AVX2/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | CUDA 11.6
5 | CUDA 11.7
6 | CUDA 11.8
7 | CUDA 12.0
8 | CUDA 12.1
9 | CUDA 12.2
10 | ROCm 5.4.2
11 | ROCm 5.5
12 | ROCm 5.5.1
13 | ROCm 5.6.1
14 | cpu
15 |
16 |
17 |
--------------------------------------------------------------------------------
/index/basic/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | CUDA 11.6
5 | CUDA 11.7
6 | CUDA 11.8
7 | CUDA 12.0
8 | CUDA 12.1
9 | CUDA 12.2
10 | ROCm 5.4.2
11 | ROCm 5.5
12 | ROCm 5.5.1
13 | ROCm 5.6.1
14 | cpu
15 |
16 |
17 |
--------------------------------------------------------------------------------
/.github/workflows/build-wheels-oobabooga-rocm.yml:
--------------------------------------------------------------------------------
1 | name: Build ROCm Wheels
2 |
3 | on:
4 | workflow_dispatch:
5 | inputs:
6 | version:
7 | description: 'Version tag of llama-cpp-python to build: v0.2.9'
8 | default: 'v0.2.9'
9 | required: true
10 | type: string
11 | workflow_call:
12 | inputs:
13 | version:
14 | description: 'Version tag of llama-cpp-python to build: v0.2.9'
15 | default: 'v0.2.9'
16 | required: true
17 | type: string
18 |
19 | permissions:
20 | contents: write
21 |
22 | jobs:
23 | build_wheels_rocm:
24 | name: ROCm Wheels
25 | uses: ./.github/workflows/build-wheels-rocm-full.yml
26 | with:
27 | version: ${{ inputs.version }}
28 | config: 'rename:1'
29 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | This is free and unencumbered software released into the public domain.
2 |
3 | Anyone is free to copy, modify, publish, use, compile, sell, or
4 | distribute this software, either in source code form or as a compiled
5 | binary, for any purpose, commercial or non-commercial, and by any
6 | means.
7 |
8 | In jurisdictions that recognize copyright laws, the author or authors
9 | of this software dedicate any and all copyright interest in the
10 | software to the public domain. We make this dedication for the benefit
11 | of the public at large and to the detriment of our heirs and
12 | successors. We intend this dedication to be an overt act of
13 | relinquishment in perpetuity of all present and future rights to this
14 | software under copyright law.
15 |
16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19 | IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 | OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 | ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 | OTHER DEALINGS IN THE SOFTWARE.
23 |
24 | For more information, please refer to
25 |
--------------------------------------------------------------------------------
/.github/workflows/build-everything-tgw.yml:
--------------------------------------------------------------------------------
1 | name: Build Everything TGW
2 |
3 | on:
4 | workflow_dispatch:
5 | inputs:
6 | version:
7 | description: 'Version tag of llama-cpp-python to build: v0.2.14'
8 | default: 'v0.2.14'
9 | required: true
10 | type: string
11 |
12 | permissions:
13 | contents: write
14 |
15 | jobs:
16 | build_textgen_wheels_windows:
17 | name: CUDA Wheels Windows
18 | uses: ./.github/workflows/build-wheels-oobabooga.yml
19 | with:
20 | version: ${{ inputs.version }}
21 | config: 'os:windows-2019'
22 |
23 | build_textgen_wheels_linux:
24 | name: CUDA Wheels Linux
25 | uses: ./.github/workflows/build-wheels-oobabooga.yml
26 | with:
27 | version: ${{ inputs.version }}
28 | config: 'os:ubuntu-22.04'
29 |
30 | build_wheels_tensorcores_windows:
31 | name: Tensor Core Windows
32 | uses: ./.github/workflows/build-wheels-tensorcores.yml
33 | with:
34 | version: ${{ inputs.version }}
35 | config: 'os:windows-2019'
36 |
37 | build_wheels_tensorcores_linux:
38 | name: Tensor Core Linux
39 | uses: ./.github/workflows/build-wheels-tensorcores.yml
40 | with:
41 | version: ${{ inputs.version }}
42 | config: 'os:ubuntu-22.04'
43 |
44 | build_wheels_cpu:
45 | name: CPU-only Wheels
46 | uses: ./.github/workflows/build-wheels-cpu.yml
47 | with:
48 | version: ${{ inputs.version }}
49 |
50 | build_wheels_macos:
51 | name: MacOS Metal Wheels
52 | uses: ./.github/workflows/build-wheels-macos.yml
53 | with:
54 | version: ${{ inputs.version }}
55 |
56 | build_wheels_rocm:
57 | name: ROCm Wheels
58 | uses: ./.github/workflows/build-wheels-oobabooga-rocm.yml
59 | with:
60 | version: ${{ inputs.version }}
61 |
--------------------------------------------------------------------------------
/index/textgen/AVX2/cu117/llama-cpp-python-ggml-cuda/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_ggml_cuda-0.1.78+cu117-cp38-cp38-linux_x86_64.whl
5 | llama_cpp_python_ggml_cuda-0.1.78+cu117-cp38-cp38-win_amd64.whl
6 | llama_cpp_python_ggml_cuda-0.1.78+cu117-cp39-cp39-linux_x86_64.whl
7 | llama_cpp_python_ggml_cuda-0.1.78+cu117-cp39-cp39-win_amd64.whl
8 | llama_cpp_python_ggml_cuda-0.1.78+cu117-cp310-cp310-linux_x86_64.whl
9 | llama_cpp_python_ggml_cuda-0.1.78+cu117-cp310-cp310-win_amd64.whl
10 | llama_cpp_python_ggml_cuda-0.1.78+cu117-cp311-cp311-linux_x86_64.whl
11 | llama_cpp_python_ggml_cuda-0.1.78+cu117-cp311-cp311-win_amd64.whl
12 |
13 |
14 |
--------------------------------------------------------------------------------
/index/textgen/AVX2/cu118/llama-cpp-python-ggml-cuda/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_ggml_cuda-0.1.78+cu118-cp38-cp38-linux_x86_64.whl
5 | llama_cpp_python_ggml_cuda-0.1.78+cu118-cp38-cp38-win_amd64.whl
6 | llama_cpp_python_ggml_cuda-0.1.78+cu118-cp39-cp39-linux_x86_64.whl
7 | llama_cpp_python_ggml_cuda-0.1.78+cu118-cp39-cp39-win_amd64.whl
8 | llama_cpp_python_ggml_cuda-0.1.78+cu118-cp310-cp310-linux_x86_64.whl
9 | llama_cpp_python_ggml_cuda-0.1.78+cu118-cp310-cp310-win_amd64.whl
10 | llama_cpp_python_ggml_cuda-0.1.78+cu118-cp311-cp311-linux_x86_64.whl
11 | llama_cpp_python_ggml_cuda-0.1.78+cu118-cp311-cp311-win_amd64.whl
12 |
13 |
14 |
--------------------------------------------------------------------------------
/index/textgen/AVX2/cu120/llama-cpp-python-ggml-cuda/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_ggml_cuda-0.1.78+cu120-cp38-cp38-linux_x86_64.whl
5 | llama_cpp_python_ggml_cuda-0.1.78+cu120-cp38-cp38-win_amd64.whl
6 | llama_cpp_python_ggml_cuda-0.1.78+cu120-cp39-cp39-linux_x86_64.whl
7 | llama_cpp_python_ggml_cuda-0.1.78+cu120-cp39-cp39-win_amd64.whl
8 | llama_cpp_python_ggml_cuda-0.1.78+cu120-cp310-cp310-linux_x86_64.whl
9 | llama_cpp_python_ggml_cuda-0.1.78+cu120-cp310-cp310-win_amd64.whl
10 | llama_cpp_python_ggml_cuda-0.1.78+cu120-cp311-cp311-linux_x86_64.whl
11 | llama_cpp_python_ggml_cuda-0.1.78+cu120-cp311-cp311-win_amd64.whl
12 |
13 |
14 |
--------------------------------------------------------------------------------
/index/textgen/AVX2/cu121/llama-cpp-python-ggml-cuda/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_ggml_cuda-0.1.78+cu121-cp38-cp38-linux_x86_64.whl
5 | llama_cpp_python_ggml_cuda-0.1.78+cu121-cp38-cp38-win_amd64.whl
6 | llama_cpp_python_ggml_cuda-0.1.78+cu121-cp39-cp39-linux_x86_64.whl
7 | llama_cpp_python_ggml_cuda-0.1.78+cu121-cp39-cp39-win_amd64.whl
8 | llama_cpp_python_ggml_cuda-0.1.78+cu121-cp310-cp310-linux_x86_64.whl
9 | llama_cpp_python_ggml_cuda-0.1.78+cu121-cp310-cp310-win_amd64.whl
10 | llama_cpp_python_ggml_cuda-0.1.78+cu121-cp311-cp311-linux_x86_64.whl
11 | llama_cpp_python_ggml_cuda-0.1.78+cu121-cp311-cp311-win_amd64.whl
12 |
13 |
14 |
--------------------------------------------------------------------------------
/index/textgen/AVX2/cu122/llama-cpp-python-ggml-cuda/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_ggml_cuda-0.1.78+cu122-cp38-cp38-linux_x86_64.whl
5 | llama_cpp_python_ggml_cuda-0.1.78+cu122-cp38-cp38-win_amd64.whl
6 | llama_cpp_python_ggml_cuda-0.1.78+cu122-cp39-cp39-linux_x86_64.whl
7 | llama_cpp_python_ggml_cuda-0.1.78+cu122-cp39-cp39-win_amd64.whl
8 | llama_cpp_python_ggml_cuda-0.1.78+cu122-cp310-cp310-linux_x86_64.whl
9 | llama_cpp_python_ggml_cuda-0.1.78+cu122-cp310-cp310-win_amd64.whl
10 | llama_cpp_python_ggml_cuda-0.1.78+cu122-cp311-cp311-linux_x86_64.whl
11 | llama_cpp_python_ggml_cuda-0.1.78+cu122-cp311-cp311-win_amd64.whl
12 |
13 |
14 |
--------------------------------------------------------------------------------
/index/textgen/AVX/cu117/llama-cpp-python-ggml-cuda/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_ggml_cuda-0.1.78+cu117avx-cp38-cp38-linux_x86_64.whl
5 | llama_cpp_python_ggml_cuda-0.1.78+cu117avx-cp38-cp38-win_amd64.whl
6 | llama_cpp_python_ggml_cuda-0.1.78+cu117avx-cp39-cp39-linux_x86_64.whl
7 | llama_cpp_python_ggml_cuda-0.1.78+cu117avx-cp39-cp39-win_amd64.whl
8 | llama_cpp_python_ggml_cuda-0.1.78+cu117avx-cp310-cp310-linux_x86_64.whl
9 | llama_cpp_python_ggml_cuda-0.1.78+cu117avx-cp310-cp310-win_amd64.whl
10 | llama_cpp_python_ggml_cuda-0.1.78+cu117avx-cp311-cp311-linux_x86_64.whl
11 | llama_cpp_python_ggml_cuda-0.1.78+cu117avx-cp311-cp311-win_amd64.whl
12 |
13 |
14 |
--------------------------------------------------------------------------------
/index/textgen/AVX/cu118/llama-cpp-python-ggml-cuda/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_ggml_cuda-0.1.78+cu118avx-cp38-cp38-linux_x86_64.whl
5 | llama_cpp_python_ggml_cuda-0.1.78+cu118avx-cp38-cp38-win_amd64.whl
6 | llama_cpp_python_ggml_cuda-0.1.78+cu118avx-cp39-cp39-linux_x86_64.whl
7 | llama_cpp_python_ggml_cuda-0.1.78+cu118avx-cp39-cp39-win_amd64.whl
8 | llama_cpp_python_ggml_cuda-0.1.78+cu118avx-cp310-cp310-linux_x86_64.whl
9 | llama_cpp_python_ggml_cuda-0.1.78+cu118avx-cp310-cp310-win_amd64.whl
10 | llama_cpp_python_ggml_cuda-0.1.78+cu118avx-cp311-cp311-linux_x86_64.whl
11 | llama_cpp_python_ggml_cuda-0.1.78+cu118avx-cp311-cp311-win_amd64.whl
12 |
13 |
14 |
--------------------------------------------------------------------------------
/index/textgen/AVX/cu120/llama-cpp-python-ggml-cuda/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_ggml_cuda-0.1.78+cu120avx-cp38-cp38-linux_x86_64.whl
5 | llama_cpp_python_ggml_cuda-0.1.78+cu120avx-cp38-cp38-win_amd64.whl
6 | llama_cpp_python_ggml_cuda-0.1.78+cu120avx-cp39-cp39-linux_x86_64.whl
7 | llama_cpp_python_ggml_cuda-0.1.78+cu120avx-cp39-cp39-win_amd64.whl
8 | llama_cpp_python_ggml_cuda-0.1.78+cu120avx-cp310-cp310-linux_x86_64.whl
9 | llama_cpp_python_ggml_cuda-0.1.78+cu120avx-cp310-cp310-win_amd64.whl
10 | llama_cpp_python_ggml_cuda-0.1.78+cu120avx-cp311-cp311-linux_x86_64.whl
11 | llama_cpp_python_ggml_cuda-0.1.78+cu120avx-cp311-cp311-win_amd64.whl
12 |
13 |
14 |
--------------------------------------------------------------------------------
/index/textgen/AVX/cu121/llama-cpp-python-ggml-cuda/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_ggml_cuda-0.1.78+cu121avx-cp38-cp38-linux_x86_64.whl
5 | llama_cpp_python_ggml_cuda-0.1.78+cu121avx-cp38-cp38-win_amd64.whl
6 | llama_cpp_python_ggml_cuda-0.1.78+cu121avx-cp39-cp39-linux_x86_64.whl
7 | llama_cpp_python_ggml_cuda-0.1.78+cu121avx-cp39-cp39-win_amd64.whl
8 | llama_cpp_python_ggml_cuda-0.1.78+cu121avx-cp310-cp310-linux_x86_64.whl
9 | llama_cpp_python_ggml_cuda-0.1.78+cu121avx-cp310-cp310-win_amd64.whl
10 | llama_cpp_python_ggml_cuda-0.1.78+cu121avx-cp311-cp311-linux_x86_64.whl
11 | llama_cpp_python_ggml_cuda-0.1.78+cu121avx-cp311-cp311-win_amd64.whl
12 |
13 |
14 |
--------------------------------------------------------------------------------
/index/textgen/AVX/cu122/llama-cpp-python-ggml-cuda/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_ggml_cuda-0.1.78+cu122avx-cp38-cp38-linux_x86_64.whl
5 | llama_cpp_python_ggml_cuda-0.1.78+cu122avx-cp38-cp38-win_amd64.whl
6 | llama_cpp_python_ggml_cuda-0.1.78+cu122avx-cp39-cp39-linux_x86_64.whl
7 | llama_cpp_python_ggml_cuda-0.1.78+cu122avx-cp39-cp39-win_amd64.whl
8 | llama_cpp_python_ggml_cuda-0.1.78+cu122avx-cp310-cp310-linux_x86_64.whl
9 | llama_cpp_python_ggml_cuda-0.1.78+cu122avx-cp310-cp310-win_amd64.whl
10 | llama_cpp_python_ggml_cuda-0.1.78+cu122avx-cp311-cp311-linux_x86_64.whl
11 | llama_cpp_python_ggml_cuda-0.1.78+cu122avx-cp311-cp311-win_amd64.whl
12 |
13 |
14 |
--------------------------------------------------------------------------------
/index/textgen/basic/cu117/llama-cpp-python-ggml-cuda/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_ggml_cuda-0.1.78+cu117basic-cp38-cp38-linux_x86_64.whl
5 | llama_cpp_python_ggml_cuda-0.1.78+cu117basic-cp38-cp38-win_amd64.whl
6 | llama_cpp_python_ggml_cuda-0.1.78+cu117basic-cp39-cp39-linux_x86_64.whl
7 | llama_cpp_python_ggml_cuda-0.1.78+cu117basic-cp39-cp39-win_amd64.whl
8 | llama_cpp_python_ggml_cuda-0.1.78+cu117basic-cp310-cp310-linux_x86_64.whl
9 | llama_cpp_python_ggml_cuda-0.1.78+cu117basic-cp310-cp310-win_amd64.whl
10 | llama_cpp_python_ggml_cuda-0.1.78+cu117basic-cp311-cp311-linux_x86_64.whl
11 | llama_cpp_python_ggml_cuda-0.1.78+cu117basic-cp311-cp311-win_amd64.whl
12 |
13 |
14 |
--------------------------------------------------------------------------------
/index/textgen/basic/cu118/llama-cpp-python-ggml-cuda/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_ggml_cuda-0.1.78+cu118basic-cp38-cp38-linux_x86_64.whl
5 | llama_cpp_python_ggml_cuda-0.1.78+cu118basic-cp38-cp38-win_amd64.whl
6 | llama_cpp_python_ggml_cuda-0.1.78+cu118basic-cp39-cp39-linux_x86_64.whl
7 | llama_cpp_python_ggml_cuda-0.1.78+cu118basic-cp39-cp39-win_amd64.whl
8 | llama_cpp_python_ggml_cuda-0.1.78+cu118basic-cp310-cp310-linux_x86_64.whl
9 | llama_cpp_python_ggml_cuda-0.1.78+cu118basic-cp310-cp310-win_amd64.whl
10 | llama_cpp_python_ggml_cuda-0.1.78+cu118basic-cp311-cp311-linux_x86_64.whl
11 | llama_cpp_python_ggml_cuda-0.1.78+cu118basic-cp311-cp311-win_amd64.whl
12 |
13 |
14 |
--------------------------------------------------------------------------------
/index/textgen/basic/cu120/llama-cpp-python-ggml-cuda/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_ggml_cuda-0.1.78+cu120basic-cp38-cp38-linux_x86_64.whl
5 | llama_cpp_python_ggml_cuda-0.1.78+cu120basic-cp38-cp38-win_amd64.whl
6 | llama_cpp_python_ggml_cuda-0.1.78+cu120basic-cp39-cp39-linux_x86_64.whl
7 | llama_cpp_python_ggml_cuda-0.1.78+cu120basic-cp39-cp39-win_amd64.whl
8 | llama_cpp_python_ggml_cuda-0.1.78+cu120basic-cp310-cp310-linux_x86_64.whl
9 | llama_cpp_python_ggml_cuda-0.1.78+cu120basic-cp310-cp310-win_amd64.whl
10 | llama_cpp_python_ggml_cuda-0.1.78+cu120basic-cp311-cp311-linux_x86_64.whl
11 | llama_cpp_python_ggml_cuda-0.1.78+cu120basic-cp311-cp311-win_amd64.whl
12 |
13 |
14 |
--------------------------------------------------------------------------------
/index/textgen/basic/cu121/llama-cpp-python-ggml-cuda/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_ggml_cuda-0.1.78+cu121basic-cp38-cp38-linux_x86_64.whl
5 | llama_cpp_python_ggml_cuda-0.1.78+cu121basic-cp38-cp38-win_amd64.whl
6 | llama_cpp_python_ggml_cuda-0.1.78+cu121basic-cp39-cp39-linux_x86_64.whl
7 | llama_cpp_python_ggml_cuda-0.1.78+cu121basic-cp39-cp39-win_amd64.whl
8 | llama_cpp_python_ggml_cuda-0.1.78+cu121basic-cp310-cp310-linux_x86_64.whl
9 | llama_cpp_python_ggml_cuda-0.1.78+cu121basic-cp310-cp310-win_amd64.whl
10 | llama_cpp_python_ggml_cuda-0.1.78+cu121basic-cp311-cp311-linux_x86_64.whl
11 | llama_cpp_python_ggml_cuda-0.1.78+cu121basic-cp311-cp311-win_amd64.whl
12 |
13 |
14 |
--------------------------------------------------------------------------------
/index/textgen/basic/cu122/llama-cpp-python-ggml-cuda/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_ggml_cuda-0.1.78+cu122basic-cp38-cp38-linux_x86_64.whl
5 | llama_cpp_python_ggml_cuda-0.1.78+cu122basic-cp38-cp38-win_amd64.whl
6 | llama_cpp_python_ggml_cuda-0.1.78+cu122basic-cp39-cp39-linux_x86_64.whl
7 | llama_cpp_python_ggml_cuda-0.1.78+cu122basic-cp39-cp39-win_amd64.whl
8 | llama_cpp_python_ggml_cuda-0.1.78+cu122basic-cp310-cp310-linux_x86_64.whl
9 | llama_cpp_python_ggml_cuda-0.1.78+cu122basic-cp310-cp310-win_amd64.whl
10 | llama_cpp_python_ggml_cuda-0.1.78+cu122basic-cp311-cp311-linux_x86_64.whl
11 | llama_cpp_python_ggml_cuda-0.1.78+cu122basic-cp311-cp311-win_amd64.whl
12 |
13 |
14 |
--------------------------------------------------------------------------------
/index/AVX/cu116/llama-cpp-python-ggml/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_ggml-0.1.78+cu116-cp37-cp37m-linux_x86_64.whl
5 | llama_cpp_python_ggml-0.1.78+cu116-cp37-cp37m-win_amd64.whl
6 | llama_cpp_python_ggml-0.1.78+cu116-cp38-cp38-linux_x86_64.whl
7 | llama_cpp_python_ggml-0.1.78+cu116-cp38-cp38-win_amd64.whl
8 | llama_cpp_python_ggml-0.1.78+cu116-cp39-cp39-linux_x86_64.whl
9 | llama_cpp_python_ggml-0.1.78+cu116-cp39-cp39-win_amd64.whl
10 | llama_cpp_python_ggml-0.1.78+cu116-cp310-cp310-linux_x86_64.whl
11 | llama_cpp_python_ggml-0.1.78+cu116-cp310-cp310-win_amd64.whl
12 | llama_cpp_python_ggml-0.1.78+cu116-cp311-cp311-linux_x86_64.whl
13 | llama_cpp_python_ggml-0.1.78+cu116-cp311-cp311-win_amd64.whl
14 |
15 |
16 |
--------------------------------------------------------------------------------
/index/AVX/cu117/llama-cpp-python-ggml/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_ggml-0.1.78+cu117-cp37-cp37m-linux_x86_64.whl
5 | llama_cpp_python_ggml-0.1.78+cu117-cp37-cp37m-win_amd64.whl
6 | llama_cpp_python_ggml-0.1.78+cu117-cp38-cp38-linux_x86_64.whl
7 | llama_cpp_python_ggml-0.1.78+cu117-cp38-cp38-win_amd64.whl
8 | llama_cpp_python_ggml-0.1.78+cu117-cp39-cp39-linux_x86_64.whl
9 | llama_cpp_python_ggml-0.1.78+cu117-cp39-cp39-win_amd64.whl
10 | llama_cpp_python_ggml-0.1.78+cu117-cp310-cp310-linux_x86_64.whl
11 | llama_cpp_python_ggml-0.1.78+cu117-cp310-cp310-win_amd64.whl
12 | llama_cpp_python_ggml-0.1.78+cu117-cp311-cp311-linux_x86_64.whl
13 | llama_cpp_python_ggml-0.1.78+cu117-cp311-cp311-win_amd64.whl
14 |
15 |
16 |
--------------------------------------------------------------------------------
/index/AVX/cu118/llama-cpp-python-ggml/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_ggml-0.1.78+cu118-cp37-cp37m-linux_x86_64.whl
5 | llama_cpp_python_ggml-0.1.78+cu118-cp37-cp37m-win_amd64.whl
6 | llama_cpp_python_ggml-0.1.78+cu118-cp38-cp38-linux_x86_64.whl
7 | llama_cpp_python_ggml-0.1.78+cu118-cp38-cp38-win_amd64.whl
8 | llama_cpp_python_ggml-0.1.78+cu118-cp39-cp39-linux_x86_64.whl
9 | llama_cpp_python_ggml-0.1.78+cu118-cp39-cp39-win_amd64.whl
10 | llama_cpp_python_ggml-0.1.78+cu118-cp310-cp310-linux_x86_64.whl
11 | llama_cpp_python_ggml-0.1.78+cu118-cp310-cp310-win_amd64.whl
12 | llama_cpp_python_ggml-0.1.78+cu118-cp311-cp311-linux_x86_64.whl
13 | llama_cpp_python_ggml-0.1.78+cu118-cp311-cp311-win_amd64.whl
14 |
15 |
16 |
--------------------------------------------------------------------------------
/index/AVX/cu120/llama-cpp-python-ggml/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_ggml-0.1.78+cu120-cp37-cp37m-linux_x86_64.whl
5 | llama_cpp_python_ggml-0.1.78+cu120-cp37-cp37m-win_amd64.whl
6 | llama_cpp_python_ggml-0.1.78+cu120-cp38-cp38-linux_x86_64.whl
7 | llama_cpp_python_ggml-0.1.78+cu120-cp38-cp38-win_amd64.whl
8 | llama_cpp_python_ggml-0.1.78+cu120-cp39-cp39-linux_x86_64.whl
9 | llama_cpp_python_ggml-0.1.78+cu120-cp39-cp39-win_amd64.whl
10 | llama_cpp_python_ggml-0.1.78+cu120-cp310-cp310-linux_x86_64.whl
11 | llama_cpp_python_ggml-0.1.78+cu120-cp310-cp310-win_amd64.whl
12 | llama_cpp_python_ggml-0.1.78+cu120-cp311-cp311-linux_x86_64.whl
13 | llama_cpp_python_ggml-0.1.78+cu120-cp311-cp311-win_amd64.whl
14 |
15 |
16 |
--------------------------------------------------------------------------------
/index/AVX/cu121/llama-cpp-python-ggml/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_ggml-0.1.78+cu121-cp37-cp37m-linux_x86_64.whl
5 | llama_cpp_python_ggml-0.1.78+cu121-cp37-cp37m-win_amd64.whl
6 | llama_cpp_python_ggml-0.1.78+cu121-cp38-cp38-linux_x86_64.whl
7 | llama_cpp_python_ggml-0.1.78+cu121-cp38-cp38-win_amd64.whl
8 | llama_cpp_python_ggml-0.1.78+cu121-cp39-cp39-linux_x86_64.whl
9 | llama_cpp_python_ggml-0.1.78+cu121-cp39-cp39-win_amd64.whl
10 | llama_cpp_python_ggml-0.1.78+cu121-cp310-cp310-linux_x86_64.whl
11 | llama_cpp_python_ggml-0.1.78+cu121-cp310-cp310-win_amd64.whl
12 | llama_cpp_python_ggml-0.1.78+cu121-cp311-cp311-linux_x86_64.whl
13 | llama_cpp_python_ggml-0.1.78+cu121-cp311-cp311-win_amd64.whl
14 |
15 |
16 |
--------------------------------------------------------------------------------
/index/AVX/cu122/llama-cpp-python-ggml/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_ggml-0.1.78+cu122-cp37-cp37m-linux_x86_64.whl
5 | llama_cpp_python_ggml-0.1.78+cu122-cp37-cp37m-win_amd64.whl
6 | llama_cpp_python_ggml-0.1.78+cu122-cp38-cp38-linux_x86_64.whl
7 | llama_cpp_python_ggml-0.1.78+cu122-cp38-cp38-win_amd64.whl
8 | llama_cpp_python_ggml-0.1.78+cu122-cp39-cp39-linux_x86_64.whl
9 | llama_cpp_python_ggml-0.1.78+cu122-cp39-cp39-win_amd64.whl
10 | llama_cpp_python_ggml-0.1.78+cu122-cp310-cp310-linux_x86_64.whl
11 | llama_cpp_python_ggml-0.1.78+cu122-cp310-cp310-win_amd64.whl
12 | llama_cpp_python_ggml-0.1.78+cu122-cp311-cp311-linux_x86_64.whl
13 | llama_cpp_python_ggml-0.1.78+cu122-cp311-cp311-win_amd64.whl
14 |
15 |
16 |
--------------------------------------------------------------------------------
/index/AVX/cpu/llama-cpp-python-ggml/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_ggml-0.1.78+cpuavx-cp37-cp37m-linux_x86_64.whl
5 | llama_cpp_python_ggml-0.1.78+cpuavx-cp37-cp37m-win_amd64.whl
6 | llama_cpp_python_ggml-0.1.78+cpuavx-cp38-cp38-linux_x86_64.whl
7 | llama_cpp_python_ggml-0.1.78+cpuavx-cp38-cp38-win_amd64.whl
8 | llama_cpp_python_ggml-0.1.78+cpuavx-cp39-cp39-linux_x86_64.whl
9 | llama_cpp_python_ggml-0.1.78+cpuavx-cp39-cp39-win_amd64.whl
10 | llama_cpp_python_ggml-0.1.78+cpuavx-cp310-cp310-linux_x86_64.whl
11 | llama_cpp_python_ggml-0.1.78+cpuavx-cp310-cp310-win_amd64.whl
12 | llama_cpp_python_ggml-0.1.78+cpuavx-cp311-cp311-linux_x86_64.whl
13 | llama_cpp_python_ggml-0.1.78+cpuavx-cp311-cp311-win_amd64.whl
14 |
15 |
16 |
--------------------------------------------------------------------------------
/index/basic/cu116/llama-cpp-python-ggml/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_ggml-0.1.78+cu116-cp37-cp37m-linux_x86_64.whl
5 | llama_cpp_python_ggml-0.1.78+cu116-cp37-cp37m-win_amd64.whl
6 | llama_cpp_python_ggml-0.1.78+cu116-cp38-cp38-linux_x86_64.whl
7 | llama_cpp_python_ggml-0.1.78+cu116-cp38-cp38-win_amd64.whl
8 | llama_cpp_python_ggml-0.1.78+cu116-cp39-cp39-linux_x86_64.whl
9 | llama_cpp_python_ggml-0.1.78+cu116-cp39-cp39-win_amd64.whl
10 | llama_cpp_python_ggml-0.1.78+cu116-cp310-cp310-linux_x86_64.whl
11 | llama_cpp_python_ggml-0.1.78+cu116-cp310-cp310-win_amd64.whl
12 | llama_cpp_python_ggml-0.1.78+cu116-cp311-cp311-linux_x86_64.whl
13 | llama_cpp_python_ggml-0.1.78+cu116-cp311-cp311-win_amd64.whl
14 |
15 |
16 |
--------------------------------------------------------------------------------
/index/basic/cu117/llama-cpp-python-ggml/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_ggml-0.1.78+cu117-cp37-cp37m-linux_x86_64.whl
5 | llama_cpp_python_ggml-0.1.78+cu117-cp37-cp37m-win_amd64.whl
6 | llama_cpp_python_ggml-0.1.78+cu117-cp38-cp38-linux_x86_64.whl
7 | llama_cpp_python_ggml-0.1.78+cu117-cp38-cp38-win_amd64.whl
8 | llama_cpp_python_ggml-0.1.78+cu117-cp39-cp39-linux_x86_64.whl
9 | llama_cpp_python_ggml-0.1.78+cu117-cp39-cp39-win_amd64.whl
10 | llama_cpp_python_ggml-0.1.78+cu117-cp310-cp310-linux_x86_64.whl
11 | llama_cpp_python_ggml-0.1.78+cu117-cp310-cp310-win_amd64.whl
12 | llama_cpp_python_ggml-0.1.78+cu117-cp311-cp311-linux_x86_64.whl
13 | llama_cpp_python_ggml-0.1.78+cu117-cp311-cp311-win_amd64.whl
14 |
15 |
16 |
--------------------------------------------------------------------------------
/index/basic/cu118/llama-cpp-python-ggml/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_ggml-0.1.78+cu118-cp37-cp37m-linux_x86_64.whl
5 | llama_cpp_python_ggml-0.1.78+cu118-cp37-cp37m-win_amd64.whl
6 | llama_cpp_python_ggml-0.1.78+cu118-cp38-cp38-linux_x86_64.whl
7 | llama_cpp_python_ggml-0.1.78+cu118-cp38-cp38-win_amd64.whl
8 | llama_cpp_python_ggml-0.1.78+cu118-cp39-cp39-linux_x86_64.whl
9 | llama_cpp_python_ggml-0.1.78+cu118-cp39-cp39-win_amd64.whl
10 | llama_cpp_python_ggml-0.1.78+cu118-cp310-cp310-linux_x86_64.whl
11 | llama_cpp_python_ggml-0.1.78+cu118-cp310-cp310-win_amd64.whl
12 | llama_cpp_python_ggml-0.1.78+cu118-cp311-cp311-linux_x86_64.whl
13 | llama_cpp_python_ggml-0.1.78+cu118-cp311-cp311-win_amd64.whl
14 |
15 |
16 |
--------------------------------------------------------------------------------
/index/basic/cu120/llama-cpp-python-ggml/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_ggml-0.1.78+cu120-cp37-cp37m-linux_x86_64.whl
5 | llama_cpp_python_ggml-0.1.78+cu120-cp37-cp37m-win_amd64.whl
6 | llama_cpp_python_ggml-0.1.78+cu120-cp38-cp38-linux_x86_64.whl
7 | llama_cpp_python_ggml-0.1.78+cu120-cp38-cp38-win_amd64.whl
8 | llama_cpp_python_ggml-0.1.78+cu120-cp39-cp39-linux_x86_64.whl
9 | llama_cpp_python_ggml-0.1.78+cu120-cp39-cp39-win_amd64.whl
10 | llama_cpp_python_ggml-0.1.78+cu120-cp310-cp310-linux_x86_64.whl
11 | llama_cpp_python_ggml-0.1.78+cu120-cp310-cp310-win_amd64.whl
12 | llama_cpp_python_ggml-0.1.78+cu120-cp311-cp311-linux_x86_64.whl
13 | llama_cpp_python_ggml-0.1.78+cu120-cp311-cp311-win_amd64.whl
14 |
15 |
16 |
--------------------------------------------------------------------------------
/index/basic/cu121/llama-cpp-python-ggml/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_ggml-0.1.78+cu121-cp37-cp37m-linux_x86_64.whl
5 | llama_cpp_python_ggml-0.1.78+cu121-cp37-cp37m-win_amd64.whl
6 | llama_cpp_python_ggml-0.1.78+cu121-cp38-cp38-linux_x86_64.whl
7 | llama_cpp_python_ggml-0.1.78+cu121-cp38-cp38-win_amd64.whl
8 | llama_cpp_python_ggml-0.1.78+cu121-cp39-cp39-linux_x86_64.whl
9 | llama_cpp_python_ggml-0.1.78+cu121-cp39-cp39-win_amd64.whl
10 | llama_cpp_python_ggml-0.1.78+cu121-cp310-cp310-linux_x86_64.whl
11 | llama_cpp_python_ggml-0.1.78+cu121-cp310-cp310-win_amd64.whl
12 | llama_cpp_python_ggml-0.1.78+cu121-cp311-cp311-linux_x86_64.whl
13 | llama_cpp_python_ggml-0.1.78+cu121-cp311-cp311-win_amd64.whl
14 |
15 |
16 |
--------------------------------------------------------------------------------
/index/basic/cu122/llama-cpp-python-ggml/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_ggml-0.1.78+cu122-cp37-cp37m-linux_x86_64.whl
5 | llama_cpp_python_ggml-0.1.78+cu122-cp37-cp37m-win_amd64.whl
6 | llama_cpp_python_ggml-0.1.78+cu122-cp38-cp38-linux_x86_64.whl
7 | llama_cpp_python_ggml-0.1.78+cu122-cp38-cp38-win_amd64.whl
8 | llama_cpp_python_ggml-0.1.78+cu122-cp39-cp39-linux_x86_64.whl
9 | llama_cpp_python_ggml-0.1.78+cu122-cp39-cp39-win_amd64.whl
10 | llama_cpp_python_ggml-0.1.78+cu122-cp310-cp310-linux_x86_64.whl
11 | llama_cpp_python_ggml-0.1.78+cu122-cp310-cp310-win_amd64.whl
12 | llama_cpp_python_ggml-0.1.78+cu122-cp311-cp311-linux_x86_64.whl
13 | llama_cpp_python_ggml-0.1.78+cu122-cp311-cp311-win_amd64.whl
14 |
15 |
16 |
--------------------------------------------------------------------------------
/index/AVX2/cu116/llama-cpp-python-ggml/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_ggml-0.1.78+cu116-cp37-cp37m-linux_x86_64.whl
5 | llama_cpp_python_ggml-0.1.78+cu116-cp37-cp37m-win_amd64.whl
6 | llama_cpp_python_ggml-0.1.78+cu116-cp38-cp38-linux_x86_64.whl
7 | llama_cpp_python_ggml-0.1.78+cu116-cp38-cp38-win_amd64.whl
8 | llama_cpp_python_ggml-0.1.78+cu116-cp39-cp39-linux_x86_64.whl
9 | llama_cpp_python_ggml-0.1.78+cu116-cp39-cp39-win_amd64.whl
10 | llama_cpp_python_ggml-0.1.78+cu116-cp310-cp310-linux_x86_64.whl
11 | llama_cpp_python_ggml-0.1.78+cu116-cp310-cp310-win_amd64.whl
12 | llama_cpp_python_ggml-0.1.78+cu116-cp311-cp311-linux_x86_64.whl
13 | llama_cpp_python_ggml-0.1.78+cu116-cp311-cp311-win_amd64.whl
14 |
15 |
16 |
--------------------------------------------------------------------------------
/index/AVX2/cu117/llama-cpp-python-ggml/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_ggml-0.1.78+cu117-cp37-cp37m-linux_x86_64.whl
5 | llama_cpp_python_ggml-0.1.78+cu117-cp37-cp37m-win_amd64.whl
6 | llama_cpp_python_ggml-0.1.78+cu117-cp38-cp38-linux_x86_64.whl
7 | llama_cpp_python_ggml-0.1.78+cu117-cp38-cp38-win_amd64.whl
8 | llama_cpp_python_ggml-0.1.78+cu117-cp39-cp39-linux_x86_64.whl
9 | llama_cpp_python_ggml-0.1.78+cu117-cp39-cp39-win_amd64.whl
10 | llama_cpp_python_ggml-0.1.78+cu117-cp310-cp310-linux_x86_64.whl
11 | llama_cpp_python_ggml-0.1.78+cu117-cp310-cp310-win_amd64.whl
12 | llama_cpp_python_ggml-0.1.78+cu117-cp311-cp311-linux_x86_64.whl
13 | llama_cpp_python_ggml-0.1.78+cu117-cp311-cp311-win_amd64.whl
14 |
15 |
16 |
--------------------------------------------------------------------------------
/index/AVX2/cu118/llama-cpp-python-ggml/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_ggml-0.1.78+cu118-cp37-cp37m-linux_x86_64.whl
5 | llama_cpp_python_ggml-0.1.78+cu118-cp37-cp37m-win_amd64.whl
6 | llama_cpp_python_ggml-0.1.78+cu118-cp38-cp38-linux_x86_64.whl
7 | llama_cpp_python_ggml-0.1.78+cu118-cp38-cp38-win_amd64.whl
8 | llama_cpp_python_ggml-0.1.78+cu118-cp39-cp39-linux_x86_64.whl
9 | llama_cpp_python_ggml-0.1.78+cu118-cp39-cp39-win_amd64.whl
10 | llama_cpp_python_ggml-0.1.78+cu118-cp310-cp310-linux_x86_64.whl
11 | llama_cpp_python_ggml-0.1.78+cu118-cp310-cp310-win_amd64.whl
12 | llama_cpp_python_ggml-0.1.78+cu118-cp311-cp311-linux_x86_64.whl
13 | llama_cpp_python_ggml-0.1.78+cu118-cp311-cp311-win_amd64.whl
14 |
15 |
16 |
--------------------------------------------------------------------------------
/index/AVX2/cu120/llama-cpp-python-ggml/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_ggml-0.1.78+cu120-cp37-cp37m-linux_x86_64.whl
5 | llama_cpp_python_ggml-0.1.78+cu120-cp37-cp37m-win_amd64.whl
6 | llama_cpp_python_ggml-0.1.78+cu120-cp38-cp38-linux_x86_64.whl
7 | llama_cpp_python_ggml-0.1.78+cu120-cp38-cp38-win_amd64.whl
8 | llama_cpp_python_ggml-0.1.78+cu120-cp39-cp39-linux_x86_64.whl
9 | llama_cpp_python_ggml-0.1.78+cu120-cp39-cp39-win_amd64.whl
10 | llama_cpp_python_ggml-0.1.78+cu120-cp310-cp310-linux_x86_64.whl
11 | llama_cpp_python_ggml-0.1.78+cu120-cp310-cp310-win_amd64.whl
12 | llama_cpp_python_ggml-0.1.78+cu120-cp311-cp311-linux_x86_64.whl
13 | llama_cpp_python_ggml-0.1.78+cu120-cp311-cp311-win_amd64.whl
14 |
15 |
16 |
--------------------------------------------------------------------------------
/index/AVX2/cu121/llama-cpp-python-ggml/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_ggml-0.1.78+cu121-cp37-cp37m-linux_x86_64.whl
5 | llama_cpp_python_ggml-0.1.78+cu121-cp37-cp37m-win_amd64.whl
6 | llama_cpp_python_ggml-0.1.78+cu121-cp38-cp38-linux_x86_64.whl
7 | llama_cpp_python_ggml-0.1.78+cu121-cp38-cp38-win_amd64.whl
8 | llama_cpp_python_ggml-0.1.78+cu121-cp39-cp39-linux_x86_64.whl
9 | llama_cpp_python_ggml-0.1.78+cu121-cp39-cp39-win_amd64.whl
10 | llama_cpp_python_ggml-0.1.78+cu121-cp310-cp310-linux_x86_64.whl
11 | llama_cpp_python_ggml-0.1.78+cu121-cp310-cp310-win_amd64.whl
12 | llama_cpp_python_ggml-0.1.78+cu121-cp311-cp311-linux_x86_64.whl
13 | llama_cpp_python_ggml-0.1.78+cu121-cp311-cp311-win_amd64.whl
14 |
15 |
16 |
--------------------------------------------------------------------------------
/index/AVX2/cu122/llama-cpp-python-ggml/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_ggml-0.1.78+cu122-cp37-cp37m-linux_x86_64.whl
5 | llama_cpp_python_ggml-0.1.78+cu122-cp37-cp37m-win_amd64.whl
6 | llama_cpp_python_ggml-0.1.78+cu122-cp38-cp38-linux_x86_64.whl
7 | llama_cpp_python_ggml-0.1.78+cu122-cp38-cp38-win_amd64.whl
8 | llama_cpp_python_ggml-0.1.78+cu122-cp39-cp39-linux_x86_64.whl
9 | llama_cpp_python_ggml-0.1.78+cu122-cp39-cp39-win_amd64.whl
10 | llama_cpp_python_ggml-0.1.78+cu122-cp310-cp310-linux_x86_64.whl
11 | llama_cpp_python_ggml-0.1.78+cu122-cp310-cp310-win_amd64.whl
12 | llama_cpp_python_ggml-0.1.78+cu122-cp311-cp311-linux_x86_64.whl
13 | llama_cpp_python_ggml-0.1.78+cu122-cp311-cp311-win_amd64.whl
14 |
15 |
16 |
--------------------------------------------------------------------------------
/index/AVX512/cu116/llama-cpp-python-ggml/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_ggml-0.1.78+cu116-cp37-cp37m-linux_x86_64.whl
5 | llama_cpp_python_ggml-0.1.78+cu116-cp37-cp37m-win_amd64.whl
6 | llama_cpp_python_ggml-0.1.78+cu116-cp38-cp38-linux_x86_64.whl
7 | llama_cpp_python_ggml-0.1.78+cu116-cp38-cp38-win_amd64.whl
8 | llama_cpp_python_ggml-0.1.78+cu116-cp39-cp39-linux_x86_64.whl
9 | llama_cpp_python_ggml-0.1.78+cu116-cp39-cp39-win_amd64.whl
10 | llama_cpp_python_ggml-0.1.78+cu116-cp310-cp310-linux_x86_64.whl
11 | llama_cpp_python_ggml-0.1.78+cu116-cp310-cp310-win_amd64.whl
12 | llama_cpp_python_ggml-0.1.78+cu116-cp311-cp311-linux_x86_64.whl
13 | llama_cpp_python_ggml-0.1.78+cu116-cp311-cp311-win_amd64.whl
14 |
15 |
16 |
--------------------------------------------------------------------------------
/index/AVX512/cu117/llama-cpp-python-ggml/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_ggml-0.1.78+cu117-cp37-cp37m-linux_x86_64.whl
5 | llama_cpp_python_ggml-0.1.78+cu117-cp37-cp37m-win_amd64.whl
6 | llama_cpp_python_ggml-0.1.78+cu117-cp38-cp38-linux_x86_64.whl
7 | llama_cpp_python_ggml-0.1.78+cu117-cp38-cp38-win_amd64.whl
8 | llama_cpp_python_ggml-0.1.78+cu117-cp39-cp39-linux_x86_64.whl
9 | llama_cpp_python_ggml-0.1.78+cu117-cp39-cp39-win_amd64.whl
10 | llama_cpp_python_ggml-0.1.78+cu117-cp310-cp310-linux_x86_64.whl
11 | llama_cpp_python_ggml-0.1.78+cu117-cp310-cp310-win_amd64.whl
12 | llama_cpp_python_ggml-0.1.78+cu117-cp311-cp311-linux_x86_64.whl
13 | llama_cpp_python_ggml-0.1.78+cu117-cp311-cp311-win_amd64.whl
14 |
15 |
16 |
--------------------------------------------------------------------------------
/index/AVX512/cu118/llama-cpp-python-ggml/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_ggml-0.1.78+cu118-cp37-cp37m-linux_x86_64.whl
5 | llama_cpp_python_ggml-0.1.78+cu118-cp37-cp37m-win_amd64.whl
6 | llama_cpp_python_ggml-0.1.78+cu118-cp38-cp38-linux_x86_64.whl
7 | llama_cpp_python_ggml-0.1.78+cu118-cp38-cp38-win_amd64.whl
8 | llama_cpp_python_ggml-0.1.78+cu118-cp39-cp39-linux_x86_64.whl
9 | llama_cpp_python_ggml-0.1.78+cu118-cp39-cp39-win_amd64.whl
10 | llama_cpp_python_ggml-0.1.78+cu118-cp310-cp310-linux_x86_64.whl
11 | llama_cpp_python_ggml-0.1.78+cu118-cp310-cp310-win_amd64.whl
12 | llama_cpp_python_ggml-0.1.78+cu118-cp311-cp311-linux_x86_64.whl
13 | llama_cpp_python_ggml-0.1.78+cu118-cp311-cp311-win_amd64.whl
14 |
15 |
16 |
--------------------------------------------------------------------------------
/index/AVX512/cu120/llama-cpp-python-ggml/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_ggml-0.1.78+cu120-cp37-cp37m-linux_x86_64.whl
5 | llama_cpp_python_ggml-0.1.78+cu120-cp37-cp37m-win_amd64.whl
6 | llama_cpp_python_ggml-0.1.78+cu120-cp38-cp38-linux_x86_64.whl
7 | llama_cpp_python_ggml-0.1.78+cu120-cp38-cp38-win_amd64.whl
8 | llama_cpp_python_ggml-0.1.78+cu120-cp39-cp39-linux_x86_64.whl
9 | llama_cpp_python_ggml-0.1.78+cu120-cp39-cp39-win_amd64.whl
10 | llama_cpp_python_ggml-0.1.78+cu120-cp310-cp310-linux_x86_64.whl
11 | llama_cpp_python_ggml-0.1.78+cu120-cp310-cp310-win_amd64.whl
12 | llama_cpp_python_ggml-0.1.78+cu120-cp311-cp311-linux_x86_64.whl
13 | llama_cpp_python_ggml-0.1.78+cu120-cp311-cp311-win_amd64.whl
14 |
15 |
16 |
--------------------------------------------------------------------------------
/index/AVX512/cu121/llama-cpp-python-ggml/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_ggml-0.1.78+cu121-cp37-cp37m-linux_x86_64.whl
5 | llama_cpp_python_ggml-0.1.78+cu121-cp37-cp37m-win_amd64.whl
6 | llama_cpp_python_ggml-0.1.78+cu121-cp38-cp38-linux_x86_64.whl
7 | llama_cpp_python_ggml-0.1.78+cu121-cp38-cp38-win_amd64.whl
8 | llama_cpp_python_ggml-0.1.78+cu121-cp39-cp39-linux_x86_64.whl
9 | llama_cpp_python_ggml-0.1.78+cu121-cp39-cp39-win_amd64.whl
10 | llama_cpp_python_ggml-0.1.78+cu121-cp310-cp310-linux_x86_64.whl
11 | llama_cpp_python_ggml-0.1.78+cu121-cp310-cp310-win_amd64.whl
12 | llama_cpp_python_ggml-0.1.78+cu121-cp311-cp311-linux_x86_64.whl
13 | llama_cpp_python_ggml-0.1.78+cu121-cp311-cp311-win_amd64.whl
14 |
15 |
16 |
--------------------------------------------------------------------------------
/index/AVX512/cu122/llama-cpp-python-ggml/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_ggml-0.1.78+cu122-cp37-cp37m-linux_x86_64.whl
5 | llama_cpp_python_ggml-0.1.78+cu122-cp37-cp37m-win_amd64.whl
6 | llama_cpp_python_ggml-0.1.78+cu122-cp38-cp38-linux_x86_64.whl
7 | llama_cpp_python_ggml-0.1.78+cu122-cp38-cp38-win_amd64.whl
8 | llama_cpp_python_ggml-0.1.78+cu122-cp39-cp39-linux_x86_64.whl
9 | llama_cpp_python_ggml-0.1.78+cu122-cp39-cp39-win_amd64.whl
10 | llama_cpp_python_ggml-0.1.78+cu122-cp310-cp310-linux_x86_64.whl
11 | llama_cpp_python_ggml-0.1.78+cu122-cp310-cp310-win_amd64.whl
12 | llama_cpp_python_ggml-0.1.78+cu122-cp311-cp311-linux_x86_64.whl
13 | llama_cpp_python_ggml-0.1.78+cu122-cp311-cp311-win_amd64.whl
14 |
15 |
16 |
--------------------------------------------------------------------------------
/index/AVX2/cpu/llama-cpp-python-ggml/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_ggml-0.1.78+cpuavx2-cp37-cp37m-linux_x86_64.whl
5 | llama_cpp_python_ggml-0.1.78+cpuavx2-cp37-cp37m-win_amd64.whl
6 | llama_cpp_python_ggml-0.1.78+cpuavx2-cp38-cp38-linux_x86_64.whl
7 | llama_cpp_python_ggml-0.1.78+cpuavx2-cp38-cp38-win_amd64.whl
8 | llama_cpp_python_ggml-0.1.78+cpuavx2-cp39-cp39-linux_x86_64.whl
9 | llama_cpp_python_ggml-0.1.78+cpuavx2-cp39-cp39-win_amd64.whl
10 | llama_cpp_python_ggml-0.1.78+cpuavx2-cp310-cp310-linux_x86_64.whl
11 | llama_cpp_python_ggml-0.1.78+cpuavx2-cp310-cp310-win_amd64.whl
12 | llama_cpp_python_ggml-0.1.78+cpuavx2-cp311-cp311-linux_x86_64.whl
13 | llama_cpp_python_ggml-0.1.78+cpuavx2-cp311-cp311-win_amd64.whl
14 |
15 |
16 |
--------------------------------------------------------------------------------
/index/basic/cpu/llama-cpp-python-ggml/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_ggml-0.1.78+cpubasic-cp37-cp37m-linux_x86_64.whl
5 | llama_cpp_python_ggml-0.1.78+cpubasic-cp37-cp37m-win_amd64.whl
6 | llama_cpp_python_ggml-0.1.78+cpubasic-cp38-cp38-linux_x86_64.whl
7 | llama_cpp_python_ggml-0.1.78+cpubasic-cp38-cp38-win_amd64.whl
8 | llama_cpp_python_ggml-0.1.78+cpubasic-cp39-cp39-linux_x86_64.whl
9 | llama_cpp_python_ggml-0.1.78+cpubasic-cp39-cp39-win_amd64.whl
10 | llama_cpp_python_ggml-0.1.78+cpubasic-cp310-cp310-linux_x86_64.whl
11 | llama_cpp_python_ggml-0.1.78+cpubasic-cp310-cp310-win_amd64.whl
12 | llama_cpp_python_ggml-0.1.78+cpubasic-cp311-cp311-linux_x86_64.whl
13 | llama_cpp_python_ggml-0.1.78+cpubasic-cp311-cp311-win_amd64.whl
14 |
15 |
16 |
--------------------------------------------------------------------------------
/index/AVX512/cpu/llama-cpp-python-ggml/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llama_cpp_python_ggml-0.1.78+cpuavx512-cp37-cp37m-linux_x86_64.whl
5 | llama_cpp_python_ggml-0.1.78+cpuavx512-cp37-cp37m-win_amd64.whl
6 | llama_cpp_python_ggml-0.1.78+cpuavx512-cp38-cp38-linux_x86_64.whl
7 | llama_cpp_python_ggml-0.1.78+cpuavx512-cp38-cp38-win_amd64.whl
8 | llama_cpp_python_ggml-0.1.78+cpuavx512-cp39-cp39-linux_x86_64.whl
9 | llama_cpp_python_ggml-0.1.78+cpuavx512-cp39-cp39-win_amd64.whl
10 | llama_cpp_python_ggml-0.1.78+cpuavx512-cp310-cp310-linux_x86_64.whl
11 | llama_cpp_python_ggml-0.1.78+cpuavx512-cp310-cp310-win_amd64.whl
12 | llama_cpp_python_ggml-0.1.78+cpuavx512-cp311-cp311-linux_x86_64.whl
13 | llama_cpp_python_ggml-0.1.78+cpuavx512-cp311-cp311-win_amd64.whl
14 |
15 |
16 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # llama-cpp-python cuBLAS wheels
2 | Wheels for [llama-cpp-python](https://github.com/abetlen/llama-cpp-python) compiled with cuBLAS support.
3 |
4 | Requirements:
5 | - Windows x64, Linux x64, or MacOS 11.0+
6 | - CUDA 11.6 - 12.2
7 | - CPython 3.8 - 3.11
8 |
9 | > [!WARNING]
10 | > MacOS 11 and Windows ROCm wheels are unavailable for 0.2.22+.
11 | > This is due to build issues with llama.cpp that are not yet resolved.
12 |
13 | ROCm builds for AMD GPUs: https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/tag/rocm
14 | Metal builds for MacOS 11.0+: https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/tag/metal
15 |
16 | Installation instructions:
17 | ---
18 | To install, you can use this command:
19 | ```
20 | python -m pip install llama-cpp-python --prefer-binary --extra-index-url=https://jllllll.github.io/llama-cpp-python-cuBLAS-wheels/AVX2/cu117
21 | ```
22 | This will install the latest llama-cpp-python version available from here for CUDA 11.7. You can change `cu117` to change the CUDA version.
23 | You can also change `AVX2` to `AVX`, `AVX512` or `basic` based on what your CPU supports.
24 | `basic` is a build without `AVX`, `FMA` and `F16C` instructions for old or basic CPUs.
25 | CPU-only builds are also available by changing `cu117` to `cpu`.
26 |
27 | You can install a specific version with:
28 | ```
29 | python -m pip install llama-cpp-python== --prefer-binary --extra-index-url=https://jllllll.github.io/llama-cpp-python-cuBLAS-wheels/AVX2/cu117
30 | ```
31 | An example for installing 0.1.62 for CUDA 12.1 on a CPU without AVX2 support:
32 | ```
33 | python -m pip install llama-cpp-python==0.1.62 --prefer-binary --extra-index-url=https://jllllll.github.io/llama-cpp-python-cuBLAS-wheels/AVX/cu121
34 | ```
35 | List of available versions:
36 | ```
37 | python -m pip index versions llama-cpp-python --index-url=https://jllllll.github.io/llama-cpp-python-cuBLAS-wheels/AVX2/cu117
38 | ```
39 |
40 | If you are replacing an already existing installation, you may need to uninstall that version before running the command above.
41 | You can also replace the existing version in one command like so:
42 | ```
43 | python -m pip install llama-cpp-python --force-reinstall --no-deps --index-url=https://jllllll.github.io/llama-cpp-python-cuBLAS-wheels/AVX2/cu117
44 | -OR-
45 | python -m pip install llama-cpp-python==0.1.66 --force-reinstall --no-deps --index-url=https://jllllll.github.io/llama-cpp-python-cuBLAS-wheels/AVX2/cu117
46 | -OR-
47 | python -m pip install llama-cpp-python --prefer-binary --upgrade --extra-index-url=https://jllllll.github.io/llama-cpp-python-cuBLAS-wheels/AVX2/cu117
48 | ```
49 |
50 | Wheels can be manually downloaded from: https://jllllll.github.io/llama-cpp-python-cuBLAS-wheels
51 |
52 | ---
53 | I have renamed llama-cpp-python packages available to ease the transition to GGUF.
54 | This is accomplished by installing the renamed package alongside the main llama-cpp-python package.
55 | This should allow applications to maintain GGML support while still supporting GGUF.
56 | ```
57 | python -m pip install llama-cpp-python-ggml --prefer-binary --extra-index-url=https://jllllll.github.io/llama-cpp-python-cuBLAS-wheels/AVX2/cu117
58 | ```
59 |
60 | ---
61 | ### All wheels are compiled using GitHub Actions
62 |
--------------------------------------------------------------------------------
/workflows.md:
--------------------------------------------------------------------------------
1 | All workflows are configured to accept a llama-cpp-python release tag to build a specific version of the package.
2 | For the most part, they are written to account for changes in every version since 0.1.62.
3 |
4 | Primary workflows used for new llama-cpp-python releases
5 | ----
6 | - `build-wheels.yml`
7 | - This workflow will build around 192 wheels for various CUDA, Python and CPU configurations. After this, it will call the `build-wheels-cpu.yml` workflow.
8 | - `build-wheels-full-release.yml`
9 | - This workflow calls these workflows in order: `build-wheels.yml build-wheels-oobabooga.yml build-wheels-rocm-full.yml build-wheels-macos.yml`
10 | - Somewhere around 370 wheels are produced in total, last I checked. This number will likely increase as additional builds, such as MacOS Metal, are eventually included.
11 | - `build-wheels-prioritized-release.yml`
12 | - This workflow is much like `build-wheels-full-release.yml`, except `build-wheels.yml` and `build-wheels-oobabooga.yml` are incorporated into the workflow instead of being called due to minor modifications.
13 | - This workflow is configured to build the wheels used by [text-generation-webui](https://github.com/oobabooga/text-generation-webui) first. This is because the long runtime of the workflow (currently 5 - 6 hours) was causing significant delays in updating the project.
14 | - `build-wheels-cpu.yml`
15 | - This workflow builds CPU-only wheels for all of the CPU configurations supported by the other workflows.
16 | - It was made because the wheels in the main repo are only built to support the default configuration of `AVX2`.
17 |
18 | ~~These workflows, and their dependents, were recently optimized to significantly reduce run times from 6 hours for the longest down to around 2 hours.~~
19 | Workflow optimizations have been made incompatible with llama-cpp-python 0.2.X+ due to abetlen switching the build backend to one that does not support modifications of the build process.
20 | Copies of the optimized workflows can be found in the `old_workflows` directory.
21 |
22 | Renamed package workflows
23 | ----
24 | These workflows produced renamed packages under different namespaces to allow for simultaneous installation with the main package.
25 | - `build-wheels-oobabooga*.yml`
26 | - This workflow builds wheels with packages renamed to `llama_cpp_python_cuda`.
27 | - These wheels are to allow applications to simultaneously support both CPU and CUDA builds of llama-cpp-python.
28 | - As the name implies, this was made for text-generation-webui.
29 | - `build-wheels-ggml*.yml`
30 | - This workflow was made to produce wheels for llama-cpp-python 0.1.78 under the name of `llama_cpp_python_ggml`.
31 | - This allows applications to maintain support for GGML while updating to newer versions of llama-cpp-python.
32 | - Intended to be a temporary measure until more models are converted to GGUF.
33 |
34 | Configuration-specific workflows
35 | ----
36 | - `*-basic.yml`
37 | - `*-avx.yml`
38 |
39 | These are copies of other workflows with build matrices limited to specific configurations.
40 | For the most part, I made these to rebuild previous versions of llama-cpp-python as needed to support new configurations that were added to the main workflows.
41 |
42 | Batch build workflows
43 | ----
44 | - `build-wheels-batch-*.yml`
45 |
46 | These workflows accept a comma-separated string of llama-cpp-python release tags.
47 | They then use Powershell to parse the input and construct a JSON string that is used to form a job matrix.
48 | Associated workflows are then called as needed to build each version.
49 | Only one workflow is executed at a time due to the large number of jobs that can be generated.
50 |
51 | Experimental workflows used for more specialized builds
52 | ----
53 | - `build-wheel-rocm.yml`
54 | - This workflow builds Linux and Windows wheels for AMD GPUs using ROCm.
55 | - Linux wheels are built using these ROCm versions: `5.4.2 5.5 5.6.1`
56 | - Currently considered experimental until someone with an AMD GPU can confirm if the resulting wheels work.
57 | - `build-wheels-oobabooga-rocm.yml`
58 | - This workflow is much like the previous. It additionally builds `llama_cpp_python_cuda` wheels.
59 | - `build-wheels-rocm-full.yml`
60 | - This workflow is essentially a combination of the previous 2.
61 | - `build-wheels-macos.yml`
62 | - This workflow builds wheels with MacOS Metal support for MacOS 11, 12 and 13.
63 | - Builds separate wheels for Intel and Apple Silicon CPU support.
64 | - Is currently experimental and may not produce functional Metal wheels. I do not have a Mac to test with, so I can only go off of build logs.
65 |
66 | Utility workflows
67 | ----
68 | - `deploy-index.yml`
69 | - This workflow is for deploying the package index to GitHub Pages.
70 | - It is configured to run automatically when the index's html files are altered.
71 | - `build-wheels-test.yml`
72 | - This workflow is entirely for testing new workflow code and is changed frequently as needed.
73 |
--------------------------------------------------------------------------------
/.github/workflows/build-wheels-cpu.yml:
--------------------------------------------------------------------------------
1 | name: Build CPU-only Wheels
2 |
3 | on:
4 | workflow_dispatch:
5 | inputs:
6 | version:
7 | description: 'Version tag of llama-cpp-python to build: v0.2.14'
8 | default: 'v0.2.14'
9 | required: true
10 | type: string
11 | config:
12 | description: 'Override configurations to build: key1:item1-1,item1-2;key2:item2-1,item2-2'
13 | default: 'Default'
14 | required: false
15 | type: string
16 | exclude:
17 | description: 'Exclude build configurations: key1-1:item1-1,key1-2:item1-2;key2-1:item2-1,key2-2:item2-2'
18 | default: 'None'
19 | required: false
20 | type: string
21 | workflow_call:
22 | inputs:
23 | version:
24 | description: 'Version tag of llama-cpp-python to build: v0.2.14'
25 | default: 'v0.2.14'
26 | required: true
27 | type: string
28 | config:
29 | description: 'Configurations to build: key1:item1-1,item1-2;key2:item2-1,item2-2'
30 | default: 'Default'
31 | required: false
32 | type: string
33 | exclude:
34 | description: 'Exclude build configurations: key1-1:item1-1,key1-2:item1-2;key2-1:item2-1,key2-2:item2-2'
35 | default: 'None'
36 | required: false
37 | type: string
38 |
39 | permissions:
40 | contents: write
41 |
42 | jobs:
43 | define_matrix:
44 | name: Define Build Matrix
45 | runs-on: ubuntu-latest
46 | outputs:
47 | matrix: ${{ steps.set-matrix.outputs.matrix }}
48 | defaults:
49 | run:
50 | shell: pwsh
51 | env:
52 | CONFIGIN: ${{ inputs.config }}
53 | EXCLUDEIN: ${{ inputs.exclude }}
54 |
55 | steps:
56 | - name: Define Job Output
57 | id: set-matrix
58 | run: |
59 | $matrix = @{
60 | 'os' = @('ubuntu-22.04', 'windows-2019')
61 | 'pyver' = @("3.11")
62 | 'avx' = @("AVX", "AVX2")
63 | }
64 |
65 | if ($env:CONFIGIN -ne 'Default') {$env:CONFIGIN.split(';').foreach({$matrix[$_.split(':')[0]] = $_.split(':')[1].split(',')})}
66 |
67 | if ($env:EXCLUDEIN -ne 'None') {
68 | $exclusions = @()
69 | $exclusions += $env:EXCLUDEIN.split(';').replace(':','=').replace(',',"`n") | ConvertFrom-StringData
70 | $matrix['exclude'] = $exclusions
71 | }
72 |
73 | $matrixOut = ConvertTo-Json $matrix -Compress
74 | Write-Output ('matrix=' + $matrixOut) >> $env:GITHUB_OUTPUT
75 |
76 | build_wheels:
77 | name: ${{ matrix.os }} ${{ matrix.pyver }} CPU ${{ matrix.avx }}
78 | needs: define_matrix
79 | runs-on: ${{ matrix.os }}
80 | strategy:
81 | matrix: ${{ fromJSON(needs.define_matrix.outputs.matrix) }}
82 | defaults:
83 | run:
84 | shell: pwsh
85 | env:
86 | AVXVER: ${{ matrix.avx }}
87 | PCKGVER: ${{ inputs.version }}
88 |
89 | steps:
90 | - uses: actions/checkout@v4
91 | with:
92 | repository: 'abetlen/llama-cpp-python'
93 | ref: ${{ inputs.version }}
94 | submodules: 'recursive'
95 |
96 | - uses: actions/setup-python@v4
97 | with:
98 | python-version: ${{ matrix.pyver }}
99 |
100 | - name: Setup Mamba
101 | uses: conda-incubator/setup-miniconda@v3.1.0
102 | with:
103 | activate-environment: "TGW"
104 | python-version: ${{ matrix.pyver }}
105 | miniforge-version: latest
106 | add-pip-as-python-dependency: true
107 | auto-activate-base: false
108 |
109 | - name: Install Dependencies
110 | run: |
111 | python -m pip install build wheel
112 |
113 | - name: Build Wheel
114 | run: |
115 | $packageVersion = [version]$env:PCKGVER.TrimStart('v')
116 | $env:VERBOSE = '1'
117 | if ($env:AVXVER -eq 'AVX') {$env:CMAKE_ARGS = '-DGGML_AVX2=off -DGGML_FMA=off -DGGML_F16C=off'}
118 | if ($env:AVXVER -eq 'AVX512') {$env:CMAKE_ARGS = '-DGGML_AVX512=on'}
119 | if ($env:AVXVER -eq 'basic') {$env:CMAKE_ARGS = '-DGGML_AVX=off -DGGML_AVX2=off -DGGML_FMA=off -DGGML_F16C=off'}
120 | if ($packageVersion -gt [version]'0.2.13') {$env:CMAKE_ARGS = "-DGGML_NATIVE=off $env:CMAKE_ARGS"}
121 | $buildtag = "+cpu$env:AVXVER"
122 | if ($packageVersion -lt [version]'0.2.0') {
123 | $env:FORCE_CMAKE = '1'
124 | python -m build --wheel -C--build-option=egg_info "-C--build-option=--tag-build=$buildtag"
125 | } else {
126 | $initpath = Join-Path '.' 'llama_cpp' '__init__.py' -resolve
127 | $initcontent = Get-Content $initpath -raw
128 | $regexstr = '(?s)(?<=__version__ \= ")\d+(?:\.\d+)*(?=")'
129 | $regexmatch = [Regex]::Matches($initcontent,$regexstr)
130 | if (!($regexmatch[0].Success)) {throw '__init__.py parsing failed'}
131 | $newinit = $regexmatch[0].Result(('$`' + '$&' + $buildtag + '$'''))
132 | New-Item $initpath -itemType File -value $newinit -force
133 | python -m build --wheel
134 | }
135 |
136 | - name: Upload files to a GitHub release
137 | id: upload-release
138 | uses: svenstaro/upload-release-action@2.7.0
139 | continue-on-error: true
140 | with:
141 | repo_token: ${{ secrets.GITHUB_TOKEN }}
142 | file: ./dist/*.whl
143 | tag: 'cpu'
144 | file_glob: true
145 | make_latest: false
146 | overwrite: true
147 |
148 | - uses: actions/upload-artifact@v4
149 | if: steps.upload-release.outcome == 'failure'
150 | with:
151 | name: cpu
152 | path: ./dist/*.whl
153 |
--------------------------------------------------------------------------------
/.github/workflows/build-wheels-macos.yml:
--------------------------------------------------------------------------------
1 | name: Build MacOS Wheels
2 |
3 | on:
4 | workflow_dispatch:
5 | inputs:
6 | version:
7 | description: 'Version tag of llama-cpp-python to build: v0.2.20'
8 | default: 'v0.2.20'
9 | required: true
10 | type: string
11 | config:
12 | description: 'Override configurations to build: key1:item1-1,item1-2;key2:item2-1,item2-2'
13 | default: 'Default'
14 | required: false
15 | type: string
16 | exclude:
17 | description: 'Exclude build configurations: key1-1:item1-1,key1-2:item1-2;key2-1:item2-1,key2-2:item2-2'
18 | default: 'None'
19 | required: false
20 | type: string
21 | workflow_call:
22 | inputs:
23 | version:
24 | description: 'Version tag of llama-cpp-python to build: v0.2.20'
25 | default: 'v0.2.20'
26 | required: true
27 | type: string
28 | config:
29 | description: 'Configurations to build: key1:item1-1,item1-2;key2:item2-1,item2-2'
30 | default: 'Default'
31 | required: false
32 | type: string
33 | exclude:
34 | description: 'Exclude build configurations: key1-1:item1-1,key1-2:item1-2;key2-1:item2-1,key2-2:item2-2'
35 | default: 'None'
36 | required: false
37 | type: string
38 |
39 | permissions:
40 | contents: write
41 |
42 | jobs:
43 | define_matrix:
44 | name: Define Build Matrix
45 | runs-on: ubuntu-latest
46 | outputs:
47 | matrix: ${{ steps.set-matrix.outputs.matrix }}
48 | defaults:
49 | run:
50 | shell: pwsh
51 | env:
52 | CONFIGIN: ${{ inputs.config }}
53 | EXCLUDEIN: ${{ inputs.exclude }}
54 |
55 | steps:
56 | - name: Define Job Output
57 | id: set-matrix
58 | run: |
59 | $matrix = @{
60 | 'os' = @("macos-13", "macos-14", "macos-15")
61 | 'pyver' = @("3.11")
62 | }
63 |
64 | if ($env:CONFIGIN -ne 'Default') {$env:CONFIGIN.split(';').foreach({$matrix[$_.split(':')[0]] = $_.split(':')[1].split(',')})}
65 |
66 | if ($env:EXCLUDEIN -ne 'None') {
67 | $exclusions = @()
68 | $exclusions += $env:EXCLUDEIN.split(';').replace(':','=').replace(',',"`n") | ConvertFrom-StringData
69 | $matrix['exclude'] = $exclusions
70 | }
71 |
72 | $matrixOut = ConvertTo-Json $matrix -Compress
73 | Write-Output ('matrix=' + $matrixOut) >> $env:GITHUB_OUTPUT
74 |
75 | build_wheels:
76 | name: ${{ matrix.os }} Python ${{ matrix.pyver }}
77 | needs: define_matrix
78 | runs-on: ${{ matrix.os }}
79 | strategy:
80 | matrix: ${{ fromJSON(needs.define_matrix.outputs.matrix) }}
81 | env:
82 | OSVER: ${{ matrix.os }}
83 |
84 | steps:
85 | - uses: actions/checkout@v4
86 | with:
87 | repository: 'abetlen/llama-cpp-python'
88 | ref: ${{ inputs.version }}
89 | submodules: 'recursive'
90 |
91 | - uses: actions/setup-python@v4
92 | with:
93 | python-version: ${{ matrix.pyver }}
94 |
95 | - name: Install Dependencies
96 | run: |
97 | python -m pip install build wheel cmake
98 |
99 | - name: Build Wheel
100 | run: |
101 | # Find the actual Xcode path once
102 | XCODE_PATH=$(ls -d /Applications/Xcode*.app | head -1)
103 | echo "Using Xcode at: ${XCODE_PATH}"
104 | XCODEDEV="${XCODE_PATH}/Contents/Developer"
105 | XCODEBINPATH="${XCODEDEV}/Toolchains/XcodeDefault.xctoolchain/usr/bin"
106 |
107 | # Set common arguments
108 | export CMAKE_ARGS="-DGGML_NATIVE=off -DGGML_METAL=on"
109 |
110 | # Always set the SDK root to avoid 'System' library not found errors
111 | export SDKROOT="${XCODEDEV}/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk"
112 |
113 | # Set correct compiler paths
114 | export CC="${XCODEBINPATH}/cc"
115 | export CXX="${XCODEBINPATH}/c++"
116 |
117 | # Set deployment target based on OS version
118 | [[ "$OSVER" == "macos-15" ]] && export MACOSX_DEPLOYMENT_TARGET="15.0"
119 | [[ "$OSVER" == "macos-14" ]] && export MACOSX_DEPLOYMENT_TARGET="14.0"
120 | [[ "$OSVER" == "macos-13" ]] && export MACOSX_DEPLOYMENT_TARGET="13.0"
121 | [[ "$OSVER" == "macos-12" ]] && export MACOSX_DEPLOYMENT_TARGET="12.0"
122 | [[ "$OSVER" == "macos-11" ]] && export MACOSX_DEPLOYMENT_TARGET="11.0"
123 |
124 | # ARM64 build
125 | export CMAKE_OSX_ARCHITECTURES="arm64" && export ARCHFLAGS="-arch arm64"
126 | VERBOSE=1 python -m build --wheel
127 |
128 | # Create aarch64 copies
129 | for file in ./dist/*.whl; do cp "$file" "${file/arm64.whl/aarch64.whl}"; done
130 |
131 | # Explicitly reset the SDK path for the second build
132 | export SDKROOT="${XCODEDEV}/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk"
133 |
134 | # x86_64 build with specific flags
135 | export CMAKE_OSX_ARCHITECTURES="x86_64"
136 | export CMAKE_ARGS="-DGGML_NATIVE=off -DGGML_AVX=off -DGGML_AVX2=off -DGGML_FMA=off -DGGML_F16C=off -DGGML_METAL=on"
137 | export ARCHFLAGS="-arch x86_64"
138 | VERBOSE=1 python -m build --wheel
139 |
140 | - name: Upload files to a GitHub release
141 | id: upload-release
142 | uses: svenstaro/upload-release-action@2.7.0
143 | continue-on-error: true
144 | with:
145 | repo_token: ${{ secrets.GITHUB_TOKEN }}
146 | file: ./dist/*.whl
147 | tag: 'metal'
148 | file_glob: true
149 | make_latest: false
150 | overwrite: true
151 |
152 | - uses: actions/upload-artifact@v4
153 | if: steps.upload-release.outcome == 'failure'
154 | with:
155 | name: macos-wheels
156 | path: ./dist/*.whl
157 |
--------------------------------------------------------------------------------
/generate-textgen-html.ps1:
--------------------------------------------------------------------------------
1 | Set-Location $PSScriptRoot
2 |
3 | $destinationDir = if (Test-Path $(Join-Path $(Resolve-Path '.') 'index')) {Join-Path '.' 'index' -resolve} else {(New-Item 'index' -ItemType 'Directory').fullname}
4 | $destinationDir = if (Test-Path $(Join-Path $destinationDir 'textgen')) {Join-Path $destinationDir 'textgen'} else {(New-Item $(Join-Path $destinationDir 'textgen') -ItemType 'Directory').fullname}
5 | $avxVersions = "AVX","AVX2","basic"
6 | $cudaVersions = "11.7","11.8","12.0","12.1","12.2","rocm5.4.2","rocm5.5","rocm5.5.1","rocm5.6.1"
7 | $packageVersions = (73..74+76..85).foreach({"$_".Insert(0,'0.1.')}) + (0..11+14..20+@(23)).foreach({"$_".Insert(0,'0.2.')})
8 | $pythonVersions = "3.8","3.9","3.10","3.11"
9 | $supportedSystems = 'linux_x86_64','win_amd64'
10 | $wheelSource = 'https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download'
11 | $packageName = 'llama_cpp_python_cuda'
12 | $packageNameNormalized = 'llama-cpp-python-cuda'
13 | $packageNameAlt = 'llama_cpp_python_ggml_cuda'
14 | $packageNameAltNormalized = 'llama-cpp-python-ggml-cuda'
15 | $packageAltVersions = @("0.1.78")
16 |
17 | $avxVersions.foreach({Set-Variable "$_`Dir" $(if (Test-Path $(Join-Path $destinationDir $_)) {Join-Path $destinationDir $_} else {(New-Item $(Join-Path $destinationDir $_) -ItemType 'Directory').fullname})})
18 |
19 | $indexContent = "`n`n `n "
20 | Foreach ($avxVersion in $avxVersions)
21 | {
22 | $wheelURL = $wheelSource.TrimEnd('/') + '/textgen-webui'
23 | $subIndexContent = "`n`n `n "
24 | ForEach ($cudaVersion in $cudaVersions)
25 | {
26 | if ($cudaVersion.StartsWith('rocm')) {$wheelURL = $wheelSource.TrimEnd('/') + '/rocm'}
27 | $cu = if ($cudaVersion.StartsWith('rocm')) {$cudaVersion} else {'cu' + $cudaVersion.replace('.','')}
28 | $cuContent = "`n`n `n "
29 | $cuContentAlt = "`n`n `n "
30 | ForEach ($packageVersion in $packageVersions)
31 | {
32 | if ($avxVersion -eq 'basic' -and $packageVersion -eq '0.1.73') {continue}
33 | if ($cudaVersion.StartsWith('rocm') -and [version]$packageVersion -lt [version]"0.1.80") {continue}
34 | if ($cudaVersion.StartsWith('rocm') -and $avxVersion -ne 'AVX2' -and [version]$packageVersion -lt [version]"0.2.7") {continue}
35 | ForEach ($pythonVersion in $pythonVersions)
36 | {
37 | $pyVer = $pythonVersion.replace('.','')
38 | ForEach ($supportedSystem in $supportedSystems)
39 | {
40 | if ($cudaVersion.StartsWith('rocm') -and [version]$packageVersion -gt [version]"0.2.21" -and $supportedSystem -eq 'win_amd64') {continue}
41 | if ($cudaVersion.StartsWith('rocm') -and $cudaVersion.Split('rocm')[-1] -ne '5.5.1' -and $supportedSystem -eq 'win_amd64') {continue}
42 | if ($cudaVersion.StartsWith('rocm') -and $cudaVersion.Split('rocm')[-1] -eq '5.5.1' -and $supportedSystem -eq 'linux_x86_64') {continue}
43 | if ([version]$packageVersion -gt [version]"0.1.85" -and $supportedSystem -eq 'linux_x86_64') {$supportedSystem = 'manylinux_2_31_x86_64'}
44 | $wheel = if ($avxVersion -eq 'AVX') { "$packageName-$packageVersion+$cu$('avx')-cp$pyVer-cp$pyVer-$supportedSystem.whl"
45 | } elseif ($avxVersion -eq 'basic') { "$packageName-$packageVersion+$cu$('basic')-cp$pyVer-cp$pyVer-$supportedSystem.whl"
46 | } else {"$packageName-$packageVersion+$cu-cp$pyVer-cp$pyVer-$supportedSystem.whl"}
47 | $wheelAlt = if ($avxVersion -eq 'AVX') { "$packageNameAlt-$packageVersion+$cu$('avx')-cp$pyVer-cp$pyVer-$supportedSystem.whl"
48 | } elseif ($avxVersion -eq 'basic') { "$packageNameAlt-$packageVersion+$cu$('basic')-cp$pyVer-cp$pyVer-$supportedSystem.whl"
49 | } else {"$packageNameAlt-$packageVersion+$cu-cp$pyVer-cp$pyVer-$supportedSystem.whl"}
50 | if (!($packageVersion -eq '0.1.73' -and $avxVersion -eq 'AVX')) {$cuContent += "$wheel
`n "}
51 | if ($packageVersion -in $packageAltVersions -and !$cudaVersion.StartsWith('rocm')) {$cuContentAlt += "$wheelAlt
`n "}
52 | }
53 | }
54 | if (!($packageVersion -eq '0.1.73' -and $avxVersion -eq 'AVX')) {$cuContent += "`n "}
55 | if ($packageVersion -in $packageAltVersions) {$cuContentAlt += "`n "}
56 | }
57 | $cuDir = if (Test-Path $(Join-Path $(Get-Variable "$avxVersion`Dir").Value $cu)) {Join-Path $(Get-Variable "$avxVersion`Dir").Value $cu} else {(New-Item $(Join-Path $(Get-Variable "$avxVersion`Dir").Value $cu) -ItemType 'Directory').fullname}
58 | $packageDir = if (Test-Path $(Join-Path $cuDir $packageNameNormalized)) {Join-Path $cuDir $packageNameNormalized} else {(New-Item $(Join-Path $cuDir $packageNameNormalized) -ItemType 'Directory').fullname}
59 | $packageAltDir = if (Test-Path $(Join-Path $cuDir $packageNameAltNormalized)) {Join-Path $cuDir $packageNameAltNormalized} elseif (!$cudaVersion.StartsWith('rocm')) {(New-Item $(Join-Path $cuDir $packageNameAltNormalized) -ItemType 'Directory').fullname}
60 | $cuLabel = if ($cudaVersion.StartsWith('rocm')) {'ROCm'+' '+$cudaVersion.Split('rocm')[-1]} else {"CUDA $cudaVersion"}
61 | $subIndexContent += "$cuLabel
`n "
62 | New-Item $(Join-Path $packageDir "index.html") -itemType File -value $($cuContent.TrimEnd() + "`n `n`n") -force > $null
63 | if (!$cudaVersion.StartsWith('rocm')) {New-Item $(Join-Path $packageAltDir "index.html") -itemType File -value $($cuContentAlt.TrimEnd() + "`n `n`n") -force > $null}
64 | $packageIndexContent = "`n`n `n $packageName
`n"
65 | if (!$cudaVersion.StartsWith('rocm')) {$packageIndexContent += " $packageNameAlt`n"}
66 | New-Item $(Join-Path $cuDir "index.html") -itemType File -value $($packageIndexContent + " `n`n") -force > $null
67 | }
68 | $indexContent += "$avxVersion
`n "
69 | New-Item $(Join-Path $(Get-Variable "$avxVersion`Dir").Value "index.html") -itemType File -value $($subIndexContent.TrimEnd() + "`n `n`n") -force > $null
70 | }
71 | New-Item $(Join-Path $destinationDir "index.html") -itemType File -value $($indexContent.TrimEnd() + "`n `n`n") -force > $null
72 | #"`n`n `n `n `n `n CUDA $cudaVersion
`n `n"
73 |
74 | pause
75 |
--------------------------------------------------------------------------------
/generate-html.ps1:
--------------------------------------------------------------------------------
1 | Set-Location $PSScriptRoot
2 |
3 | $destinationDir = if (Test-Path $(Join-Path $(Resolve-Path '.') 'index')) {Join-Path '.' 'index' -resolve} else {(New-Item 'index' -ItemType 'Directory').fullname}
4 | $avxVersions = "AVX","AVX2","AVX512","basic"
5 | $cudaVersions = "11.6","11.7","11.8","12.0","12.1","12.2","rocm5.4.2","rocm5.5","rocm5.5.1","rocm5.6.1","cpu"
6 | $packageVersions = (@(62)+66..74+76..85).foreach({"$_".Insert(0,'0.1.')}) + (0..11+14..20+@(23)).foreach({"$_".Insert(0,'0.2.')})
7 | $pythonVersions = "3.7","3.8","3.9","3.10","3.11"
8 | $supportedSystems = 'linux_x86_64','win_amd64','macosx_11_0_x86_64','macosx_12_0_x86_64','macosx_13_0_x86_64','macosx_14_0_x86_64','macosx_11_0_arm64','macosx_12_0_arm64','macosx_13_0_arm64','macosx_14_0_arm64','macosx_11_0_aarch64','macosx_12_0_aarch64','macosx_13_0_aarch64','macosx_14_0_aarch64'
9 | $wheelSource = 'https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download'
10 | $packageName = 'llama_cpp_python'
11 | $packageNameNormalized = 'llama-cpp-python'
12 | $packageNameAlt = 'llama_cpp_python_ggml'
13 | $packageNameAltNormalized = 'llama-cpp-python-ggml'
14 | $packageAltVersions = @("0.1.78")
15 |
16 | $avxVersions.foreach({Set-Variable "$_`Dir" $(if (Test-Path $(Join-Path $destinationDir $_)) {Join-Path $destinationDir $_} else {(New-Item $(Join-Path $destinationDir $_) -ItemType 'Directory').fullname})})
17 |
18 | $indexContent = "`n`n `n "
19 | Foreach ($avxVersion in $avxVersions)
20 | {
21 | if ($avxVersion -eq 'AVX2') {$wheelURL = $wheelSource.TrimEnd('/') + '/wheels'} else {$wheelURL = $wheelSource.TrimEnd('/') + "/$avxVersion"}
22 | $wheelMacosURL = $wheelSource.TrimEnd('/') + '/metal'
23 | $subIndexContent = "`n`n `n "
24 | ForEach ($cudaVersion in $cudaVersions)
25 | {
26 | if ($cudaVersion.StartsWith('rocm') -and $avxVersion -eq 'AVX512') {continue}
27 | $cu = if ($cudaVersion -in 'cpu' -or $cudaVersion.StartsWith('rocm')) {$cudaVersion} else {'cu' + $cudaVersion.replace('.','')}
28 | if ($cudaVersion -eq 'cpu') {$wheelURL = $wheelSource.TrimEnd('/') + '/cpu'}
29 | if ($cudaVersion.StartsWith('rocm')) {$wheelURL = $wheelSource.TrimEnd('/') + '/rocm'}
30 | $cuContent = "`n`n `n "
31 | $cuContentAlt = "`n`n `n "
32 | ForEach ($packageVersion in $packageVersions)
33 | {
34 | if (($avxVersion -eq 'basic' -or $cudaVersion -eq 'cpu') -and [version]$packageVersion -lt [version]"0.1.70") {continue}
35 | if ($cudaVersion.StartsWith('rocm') -and [version]$packageVersion -lt [version]"0.1.80") {continue}
36 | if ($cudaVersion.StartsWith('rocm') -and $avxVersion -ne 'AVX2' -and [version]$packageVersion -lt [version]"0.2.7") {continue}
37 | ForEach ($pythonVersion in $pythonVersions)
38 | {
39 | if ($cudaVersion.StartsWith('rocm') -or [version]$packageVersion -gt [version]"0.1.85" -and $pythonVersion -eq "3.7") {continue}
40 | $pyVer = $pythonVersion.replace('.','')
41 | ForEach ($supportedSystem in $supportedSystems)
42 | {
43 | $doMacos = $avxVersion -eq 'basic' -and $cudaVersion -eq 'cpu' -and $supportedSystem.contains('macosx') -and (($packageVersion -eq '0.1.85' -and !$supportedSystem.contains('macosx_14_0')) -or [version]$packageVersion -gt [version]'0.2.4')
44 | if ([version]$packageVersion -gt '0.2.20' -and $supportedSystem.contains('macosx_11_0')) {$doMacos = $false}
45 | if ($cudaVersion.StartsWith('rocm') -and [version]$packageVersion -gt [version]"0.2.21" -and $supportedSystem -eq 'win_amd64') {continue}
46 | if ($cudaVersion.StartsWith('rocm') -and $cudaVersion.Split('rocm')[-1] -ne '5.5.1' -and $supportedSystem -eq 'win_amd64') {continue}
47 | if ($cudaVersion.StartsWith('rocm') -and $cudaVersion.Split('rocm')[-1] -eq '5.5.1' -and $supportedSystem -eq 'linux_x86_64') {continue}
48 | if ([version]$packageVersion -gt [version]"0.1.85" -and $supportedSystem -eq 'linux_x86_64') {$supportedSystem = 'manylinux_2_31_x86_64'}
49 | $wheelTag = if ($cudaVersion -eq 'cpu' -and !$supportedSystem.contains('macosx')) {"+cpu$($avxVersion.ToLower())"} elseif ($cudaVersion.StartsWith('rocm') -and $avxVersion -ne 'AVX2') {"+$cu$($avxVersion.ToLower())"} elseif (!$supportedSystem.contains('macosx')) {"+$cu"} else {''}
50 | $wheel = if ($pyVer -eq '37') {"$packageName-$packageVersion$wheelTag-cp$pyVer-cp$pyVer`m-$supportedSystem.whl"} else {"$packageName-$packageVersion$wheelTag-cp$pyVer-cp$pyVer-$supportedSystem.whl"}
51 | $wheelAlt = if ($pyVer -eq '37') {"$packageNameAlt-$packageVersion$wheelTag-cp$pyVer-cp$pyVer`m-$supportedSystem.whl"} else {"$packageNameAlt-$packageVersion$wheelTag-cp$pyVer-cp$pyVer-$supportedSystem.whl"}
52 | if (!$supportedSystem.contains('macosx')) {$cuContent += "$wheel
`n "} elseif ($doMacos) {$cuContent += "$wheel
`n "}
53 | if ($packageVersion -in $packageAltVersions -and !$cudaVersion.StartsWith('rocm') -and !$supportedSystem.contains('macosx')) {$cuContentAlt += "$wheelAlt
`n "}
54 | }
55 | }
56 | $cuContent += "`n "
57 | if ($packageVersion -in $packageAltVersions) {$cuContentAlt += "`n "}
58 | }
59 | $cuDir = if (Test-Path $(Join-Path $(Get-Variable "$avxVersion`Dir").Value "$cu")) {Join-Path $(Get-Variable "$avxVersion`Dir").Value "$cu"} else {(New-Item $(Join-Path $(Get-Variable "$avxVersion`Dir").Value "$cu") -ItemType 'Directory').fullname}
60 | $packageDir = if (Test-Path $(Join-Path $cuDir $packageNameNormalized)) {Join-Path $cuDir $packageNameNormalized} else {(New-Item $(Join-Path $cuDir $packageNameNormalized) -ItemType 'Directory').fullname}
61 | $packageAltDir = if (Test-Path $(Join-Path $cuDir $packageNameAltNormalized)) {Join-Path $cuDir $packageNameAltNormalized} elseif (!$cudaVersion.StartsWith('rocm')) {(New-Item $(Join-Path $cuDir $packageNameAltNormalized) -ItemType 'Directory').fullname}
62 | $cuLabel = if ($cudaVersion -eq 'cpu') {$cudaVersion} elseif ($cudaVersion.StartsWith('rocm')) {'ROCm'+' '+$cudaVersion.Split('rocm')[-1]} else {"CUDA $cudaVersion"}
63 | $subIndexContent += "$cuLabel
`n "
64 | New-Item $(Join-Path $packageDir "index.html") -itemType File -value $($cuContent.TrimEnd() + "`n `n`n") -force > $null
65 | if (!$cudaVersion.StartsWith('rocm')) {New-Item $(Join-Path $packageAltDir "index.html") -itemType File -value $($cuContentAlt.TrimEnd() + "`n `n`n") -force > $null}
66 | $packageIndexContent = "`n`n `n $packageName
`n"
67 | if (!$cudaVersion.StartsWith('rocm')) {$packageIndexContent += " $packageNameAlt`n"}
68 | New-Item $(Join-Path $cuDir "index.html") -itemType File -value $($packageIndexContent + " `n`n") -force > $null
69 | if ($avxVersion -eq 'AVX2') {New-Item $(Join-Path $destinationDir "$cu.html") -itemType File -value $($cuContent.TrimEnd() + "`n `n`n") -force > $null}
70 | }
71 | $indexContent += "$avxVersion
`n "
72 | New-Item $(Join-Path $(Get-Variable "$avxVersion`Dir").Value "index.html") -itemType File -value $($subIndexContent.TrimEnd() + "`n `n`n") -force > $null
73 | }
74 | New-Item $(Join-Path $destinationDir "index.html") -itemType File -value $($indexContent.TrimEnd() + "`n