├── .dockerignore ├── chat-templates └── mistral.jinja ├── .github └── workflows │ ├── create-release.yml │ └── build-push.yml ├── test-image.sh ├── Dockerfile.tpu ├── README.md ├── Dockerfile.cpu ├── Dockerfile.cuda-arm └── LICENSE /.dockerignore: -------------------------------------------------------------------------------- 1 | test-image.sh -------------------------------------------------------------------------------- /chat-templates/mistral.jinja: -------------------------------------------------------------------------------- 1 | {# source: https://github.com/vllm-project/vllm/discussions/2112#discussioncomment-9224099 #} 2 | {%- for message in messages %} 3 | {%- if message['role'] == 'system' -%} 4 | {{- message['content'] -}} 5 | {%- else -%} 6 | {%- if message['role'] == 'user' -%} 7 | {{-'[INST] ' + message['content'].rstrip() + ' [/INST]'-}} 8 | {%- else -%} 9 | {{-'' + message['content'] + '' -}} 10 | {%- endif -%} 11 | {%- endif -%} 12 | {%- endfor -%} 13 | {%- if add_generation_prompt -%} 14 | {{-''-}} 15 | {%- endif -%} -------------------------------------------------------------------------------- /.github/workflows/create-release.yml: -------------------------------------------------------------------------------- 1 | name: Create GitHub Release 2 | 3 | on: 4 | push: 5 | tags: 6 | - "v*.*.*" 7 | 8 | permissions: 9 | contents: write 10 | 11 | jobs: 12 | release: 13 | name: Release pushed tag 14 | runs-on: ubuntu-22.04 15 | steps: 16 | - name: Create release 17 | env: 18 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 19 | tag: ${{ github.ref_name }} 20 | run: | 21 | gh release create "$tag" \ 22 | --repo="$GITHUB_REPOSITORY" \ 23 | --title="${GITHUB_REPOSITORY#*/} ${tag#v}" \ 24 | --generate-notes 25 | -------------------------------------------------------------------------------- /test-image.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -xe 4 | 5 | IMAGE_TAG="${IMAGE_TAG:-vllm/vllm-openai:latest}" 6 | MODEL_NAME="${MODEL_NAME:-facebook/opt-125m}" 7 | 8 | docker run --rm -d --name vllm -p 8000:8000 ${IMAGE_TAG} \ 9 | --model ${MODEL_NAME} ${ARGS} 10 | 11 | # Wait for up to 120 seconds for the Docker container to be ready 12 | echo "Waiting for the container to be ready..." 13 | timeout=120 14 | while ! curl -sf http://localhost:8000/v1/models; do 15 | sleep 5 16 | timeout=$((timeout-5)) 17 | if [ "$timeout" -le 0 ]; then 18 | echo "Timed out waiting for container to respond." 19 | docker logs vllm 20 | exit 1 21 | fi 22 | done 23 | echo "Container is ready." 24 | 25 | curl -v http://localhost:8000/v1/completions \ 26 | -H "Content-Type: application/json" \ 27 | -d '{ 28 | "model": "facebook/opt-125m", 29 | "prompt": "San Francisco is a", 30 | "max_tokens": 7, 31 | "temperature": 0 32 | }' 33 | CURL_EXIT_CODE=$? 34 | if [ $CURL_EXIT_CODE -ne 0 ]; then 35 | echo "Curl command failed with exit code $CURL_EXIT_CODE" 36 | echo "Outputting Docker logs:" 37 | docker logs vllm 38 | fi 39 | exit $CURL_EXIT_CODE -------------------------------------------------------------------------------- /Dockerfile.tpu: -------------------------------------------------------------------------------- 1 | ARG NIGHTLY_DATE="20241017" 2 | ARG BASE_IMAGE="us-central1-docker.pkg.dev/tpu-pytorch-releases/docker/xla:nightly_3.10_tpuvm_$NIGHTLY_DATE" 3 | ARG VERSION=0.6.6.post1 4 | FROM $BASE_IMAGE 5 | ARG VERSION 6 | WORKDIR /workspace 7 | 8 | # Install some basic utilities 9 | RUN apt-get update && apt-get install -y \ 10 | git \ 11 | ffmpeg libsm6 libxext6 libgl1 12 | 13 | # Install the TPU and Pallas dependencies. 14 | RUN --mount=type=cache,target=/root/.cache/pip \ 15 | python3 -m pip install torch_xla[tpu] -f https://storage.googleapis.com/libtpu-releases/index.html 16 | RUN --mount=type=cache,target=/root/.cache/pip \ 17 | python3 -m pip install torch_xla[pallas] -f https://storage.googleapis.com/jax-releases/jax_nightly_releases.html -f https://storage.googleapis.com/jax-releases/jaxlib_nightly_releases.html 18 | 19 | # Build vLLM. 20 | RUN git clone https://github.com/vllm-project/vllm.git && cd vllm && git checkout v${VERSION} 21 | ENV VLLM_TARGET_DEVICE="tpu" 22 | RUN --mount=type=cache,target=/root/.cache/pip \ 23 | cd /workspace/vllm && \ 24 | python3 -m pip install \ 25 | -r requirements-tpu.txt 26 | RUN cd /workspace/vllm && python3 setup.py develop 27 | 28 | CMD ["/bin/bash"] 29 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # vLLM Docker Container Image 2 | vLLM is a fast and easy-to-use library for LLM inference and serving. 3 | This container image runs the OpenAI API server of vLLM. 4 | 5 | The image is only for TPU and CPU inference. For GPU inference, please use the upstream image from vLLM. 6 | 7 | Image URLs: 8 | 9 | - `substratusai/vllm` (Docker Hub) 10 | - `ghcr.io/substratusai/vllm` (GitHub Container Registry) 11 | 12 | This image only publishes the TPU and CPU images for vLLM: 13 | - `substratusai/vllm:main-tpu` 14 | - `substratusai/vllm:main-cpu` 15 | 16 | There are also tags available such as `v0.6.3-tpu` and `v0.6.3-cpu`. 17 | 18 | Please use the upstream GPU image from vLLM directly: 19 | ``` 20 | vllm/vllm-openai:latest 21 | ``` 22 | 23 | Support the project by adding a star! ❤️ 24 | 25 | Join us on Discord: 26 | 27 | discord-invite 28 | 29 | 30 | ## Quickstart 31 | Deploy Mistral 7B Instruct using Docker: 32 | ```bash 33 | docker run -d -p 8080:8080 --gpus=all \ 34 | substratusai/vllm \ 35 | --model=mistralai/Mistral-7B-Instruct-v0.1 36 | ``` 37 | 38 | ## Building 39 | ``` 40 | docker build -t ghcr.io/substratusai/vllm . 41 | ``` 42 | -------------------------------------------------------------------------------- /Dockerfile.cpu: -------------------------------------------------------------------------------- 1 | # This vLLM Dockerfile is used to construct image that can build and run vLLM on x86 CPU platform. 2 | ARG VERSION=0.6.4.post1 3 | FROM ubuntu:22.04 AS cpu-test-1 4 | ARG VERSION 5 | 6 | ENV CCACHE_DIR=/root/.cache/ccache 7 | 8 | ENV CMAKE_CXX_COMPILER_LAUNCHER=ccache 9 | 10 | RUN --mount=type=cache,target=/var/cache/apt \ 11 | apt-get update -y \ 12 | && apt-get install -y curl ccache git wget vim numactl gcc-12 g++-12 python3 python3-pip libtcmalloc-minimal4 libnuma-dev \ 13 | && apt-get install -y ffmpeg libsm6 libxext6 libgl1 \ 14 | && update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 10 --slave /usr/bin/g++ g++ /usr/bin/g++-12 15 | 16 | # https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/performance_tuning/tuning_guide.html 17 | # intel-openmp provides additional performance improvement vs. openmp 18 | # tcmalloc provides better memory allocation efficiency, e.g, holding memory in caches to speed up access of commonly-used objects. 19 | RUN --mount=type=cache,target=/root/.cache/pip \ 20 | pip install intel-openmp 21 | 22 | ENV LD_PRELOAD="/usr/lib/x86_64-linux-gnu/libtcmalloc_minimal.so.4:/usr/local/lib/libiomp5.so" 23 | 24 | RUN echo 'ulimit -c 0' >> ~/.bashrc 25 | 26 | RUN pip install intel_extension_for_pytorch==2.5.0 27 | 28 | WORKDIR /workspace 29 | RUN git clone https://github.com/vllm-project/vllm.git && cd vllm && git checkout v${VERSION} 30 | WORKDIR /workspace/vllm 31 | 32 | ARG PIP_EXTRA_INDEX_URL="https://download.pytorch.org/whl/cpu" 33 | ENV PIP_EXTRA_INDEX_URL=${PIP_EXTRA_INDEX_URL} 34 | RUN --mount=type=cache,target=/root/.cache/pip \ 35 | pip install --upgrade pip && \ 36 | pip install -r requirements-build.txt 37 | 38 | FROM cpu-test-1 AS build 39 | WORKDIR /workspace/vllm 40 | 41 | RUN --mount=type=cache,target=/root/.cache/pip \ 42 | pip install -v -r requirements-cpu.txt 43 | 44 | # Support for building with non-AVX512 vLLM: docker build --build-arg VLLM_CPU_DISABLE_AVX512="true" ... 45 | ARG VLLM_CPU_DISABLE_AVX512 46 | ENV VLLM_CPU_DISABLE_AVX512=${VLLM_CPU_DISABLE_AVX512} 47 | 48 | RUN --mount=type=cache,target=/root/.cache/pip \ 49 | --mount=type=cache,target=/root/.cache/ccache \ 50 | VLLM_TARGET_DEVICE=cpu python3 setup.py bdist_wheel && \ 51 | pip install dist/*.whl && \ 52 | rm -rf dist 53 | 54 | WORKDIR /workspace/ 55 | 56 | RUN ln -s /workspace/vllm/tests && ln -s /workspace/vllm/examples && ln -s /workspace/vllm/benchmarks 57 | 58 | ENTRYPOINT ["python3", "-m", "vllm.entrypoints.openai.api_server"] 59 | -------------------------------------------------------------------------------- /.github/workflows/build-push.yml: -------------------------------------------------------------------------------- 1 | name: Create and publish a Docker image 2 | 3 | # Configures this workflow to run every time a change is pushed to the branch called `release`. 4 | on: 5 | push: 6 | branches: 7 | - main 8 | tags: 9 | - "v*.*.*" 10 | paths-ignore: 11 | - '**/README.md' 12 | pull_request: 13 | 14 | env: 15 | REGISTRY: ghcr.io 16 | IMAGE_NAME: substratusai/vllm 17 | 18 | jobs: 19 | cpu: 20 | runs-on: ubuntu-latest 21 | # Sets the permissions granted to the `GITHUB_TOKEN` for the actions in this job. 22 | permissions: 23 | contents: read 24 | packages: write 25 | steps: 26 | - name: Checkout repository 27 | uses: actions/checkout@v4 28 | - name: Set up Docker Buildx 29 | uses: docker/setup-buildx-action@v3 30 | # Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here. 31 | - name: Log in to the Container registry 32 | if: github.event_name == 'push' 33 | uses: docker/login-action@v3 34 | with: 35 | registry: ${{ env.REGISTRY }} 36 | username: ${{ github.actor }} 37 | password: ${{ secrets.GITHUB_TOKEN }} 38 | - name: Login to docker.io 39 | if: github.event_name == 'push' 40 | uses: docker/login-action@v3 41 | with: 42 | username: ${{ vars.DOCKERHUB_USERNAME }} 43 | password: ${{ secrets.DOCKERHUB_TOKEN }} 44 | - name: Extract metadata (tags, labels) for Docker 45 | id: meta 46 | uses: docker/metadata-action@v5 47 | with: 48 | flavor: | 49 | latest=false 50 | suffix=-cpu 51 | images: | 52 | ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} 53 | ${{ env.IMAGE_NAME }} 54 | - name: Build Docker image 55 | uses: docker/build-push-action@v6 56 | with: 57 | context: . 58 | file: Dockerfile.cpu 59 | load: true 60 | push: false 61 | tags: ${{ steps.meta.outputs.tags }} 62 | labels: ${{ steps.meta.outputs.labels }} 63 | cache-from: type=gha 64 | cache-to: type=gha,mode=max 65 | - name: Test Docker image using curl 66 | run: | 67 | IMAGE_TAG=$(echo '${{ steps.meta.outputs.tags }}' | head -n 1) \ 68 | bash test-image.sh 69 | - name: Optionally push Docker image 70 | if: github.event_name == 'push' 71 | uses: docker/build-push-action@v6 72 | with: 73 | context: . 74 | file: Dockerfile.cpu 75 | push: true 76 | tags: ${{ steps.meta.outputs.tags }} 77 | labels: ${{ steps.meta.outputs.labels }} 78 | cache-from: type=gha 79 | cache-to: type=gha,mode=max 80 | tpu: 81 | runs-on: ubuntu-latest 82 | # Sets the permissions granted to the `GITHUB_TOKEN` for the actions in this job. 83 | permissions: 84 | contents: read 85 | packages: write 86 | steps: 87 | - name: Checkout repository 88 | uses: actions/checkout@v4 89 | - name: Set up Docker Buildx 90 | uses: docker/setup-buildx-action@v3 91 | # Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here. 92 | - name: Log in to the Container registry 93 | if: github.event_name == 'push' 94 | uses: docker/login-action@v3 95 | with: 96 | registry: ${{ env.REGISTRY }} 97 | username: ${{ github.actor }} 98 | password: ${{ secrets.GITHUB_TOKEN }} 99 | 100 | - name: Login to docker.io 101 | if: github.event_name == 'push' 102 | uses: docker/login-action@v3 103 | with: 104 | username: ${{ vars.DOCKERHUB_USERNAME }} 105 | password: ${{ secrets.DOCKERHUB_TOKEN }} 106 | - name: Extract metadata (tags, labels) for Docker 107 | id: meta 108 | uses: docker/metadata-action@v5 109 | with: 110 | flavor: | 111 | latest=false 112 | suffix=-tpu 113 | images: | 114 | ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} 115 | ${{ env.IMAGE_NAME }} 116 | - name: Build and push Docker image 117 | uses: docker/build-push-action@v6 118 | with: 119 | context: . 120 | file: Dockerfile.tpu 121 | push: ${{ github.event_name == 'push' }} 122 | tags: ${{ steps.meta.outputs.tags }} 123 | labels: ${{ steps.meta.outputs.labels }} 124 | cache-from: type=gha 125 | cache-to: type=gha,mode=max -------------------------------------------------------------------------------- /Dockerfile.cuda-arm: -------------------------------------------------------------------------------- 1 | # Source: https://github.com/LambdaLabsML/vllm-builder 2 | # Had to switch to 12.4.1 because 12.6.3 was causing Segmentation fault when vLLM builds flash-attn 3 | ARG CUDA_VERSION=12.4.1 4 | ARG IMAGE_DISTRO=ubuntu22.04 5 | ARG PYTHON_VERSION=3.12 6 | 7 | # ---------- Builder Base ---------- 8 | FROM nvcr.io/nvidia/cuda:${CUDA_VERSION}-devel-${IMAGE_DISTRO} AS base 9 | 10 | ARG TORCH_CUDA_ARCH_LIST="9.0a" 11 | ENV TORCH_CUDA_ARCH_LIST=${TORCH_CUDA_ARCH_LIST} 12 | ARG VLLM_FA_CMAKE_GPU_ARCHES="90a-real" 13 | ENV VLLM_FA_CMAKE_GPU_ARCHES=${VLLM_FA_CMAKE_GPU_ARCHES} 14 | ENV UV_HTTP_TIMEOUT=500 15 | 16 | # Update apt packages and install dependencies 17 | ENV DEBIAN_FRONTEND=noninteractive 18 | RUN apt update 19 | RUN apt upgrade -y 20 | RUN apt install -y --no-install-recommends \ 21 | curl \ 22 | git \ 23 | libibverbs-dev \ 24 | zlib1g-dev 25 | 26 | # Clean apt cache 27 | RUN apt clean 28 | RUN rm -rf /var/lib/apt/lists/* 29 | RUN rm -rf /var/cache/apt/archives 30 | 31 | # Set compiler paths 32 | ENV CC=/usr/bin/gcc 33 | ENV CXX=/usr/bin/g++ 34 | 35 | # Install uv 36 | RUN curl -LsSf https://astral.sh/uv/install.sh | env UV_INSTALL_DIR=/usr/local/bin sh 37 | 38 | # Setup build workspace 39 | WORKDIR /workspace 40 | 41 | # Prep build venv 42 | ARG PYTHON_VERSION 43 | RUN uv venv -p ${PYTHON_VERSION} --seed --python-preference only-managed 44 | ENV VIRTUAL_ENV=/workspace/.venv 45 | ENV PATH=${VIRTUAL_ENV}/bin:${PATH} 46 | ENV CUDA_HOME=/usr/local/cuda 47 | ENV LD_LIBRARY_PATH=${CUDA_HOME}/lib64:${LD_LIBRARY_PATH} 48 | 49 | # Install pytorch nightly 50 | RUN --mount=type=cache,target=/root/.cache/uv \ 51 | uv pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu126 52 | 53 | FROM base AS build-base 54 | RUN mkdir /wheels 55 | 56 | # Install build deps that aren't in project requirements files 57 | # Make sure to upgrade setuptools to avoid triton build bug 58 | RUN --mount=type=cache,target=/root/.cache/uv \ 59 | uv pip install -U build cmake ninja pybind11 setuptools wheel 60 | 61 | FROM build-base AS build-triton 62 | ARG TRITON_REF=release/3.2.x 63 | ARG TRITON_BUILD_SUFFIX=+cu126 64 | ENV TRITON_WHEEL_VERSION_SUFFIX=${TRITON_BUILD_SUFFIX:-} 65 | RUN git clone https://github.com/triton-lang/triton.git 66 | RUN cd triton && \ 67 | git checkout ${TRITON_REF} && \ 68 | git submodule sync && \ 69 | git submodule update --init --recursive -j 8 && \ 70 | # Unclear why this is needed, makes no sense, but without it kept on hitting Connection timeout downloading the tar 71 | curl -O https://anaconda.org/nvidia/cuda-nvcc/12.4.99/download/linux-aarch64/cuda-nvcc-12.4.99-0.tar.bz2 && \ 72 | uv build python --wheel --no-build-isolation -o /wheels 73 | 74 | FROM build-base AS build-xformers 75 | ARG XFORMERS_REF=v0.0.29.post2 76 | ARG XFORMERS_BUILD_VERSION=0.0.29.post2+cu126 77 | ENV BUILD_VERSION=${XFORMERS_BUILD_VERSION:-${XFORMERS_REF#v}} 78 | RUN git clone https://github.com/facebookresearch/xformers.git 79 | RUN cd xformers && \ 80 | git checkout ${XFORMERS_REF} && \ 81 | git submodule sync && \ 82 | git submodule update --init --recursive -j 8 && \ 83 | uv build --wheel --no-build-isolation -o /wheels 84 | 85 | FROM build-base AS build-flashinfer 86 | ARG FLASHINFER_ENABLE_AOT=1 87 | ARG FLASHINFER_REF=v0.2.2.post1 88 | ARG FLASHINFER_BUILD_SUFFIX=cu126 89 | ENV FLASHINFER_LOCAL_VERSION=${FLASHINFER_BUILD_SUFFIX:-} 90 | # Flashinfer only supports sm75+, removing 7.0 from arch list 91 | # ENV TORCH_CUDA_ARCH_LIST='7.5 8.0 8.6 8.9 9.0+PTX' 92 | RUN git clone https://github.com/flashinfer-ai/flashinfer.git 93 | RUN cd flashinfer && \ 94 | git checkout ${FLASHINFER_REF} && \ 95 | git submodule sync && \ 96 | git submodule update --init --recursive -j 8 && \ 97 | NVCC_THREADS=2 \ 98 | MAX_JOBS=32 \ 99 | uv build --wheel --no-build-isolation -o /wheels 100 | # Restore original CUDA arch list for subsequent stages 101 | ENV TORCH_CUDA_ARCH_LIST=${torch_cuda_arch_list} 102 | 103 | FROM build-base AS build-vllm 104 | ARG VLLM_REF=v0.8.2 105 | RUN git clone https://github.com/vllm-project/vllm.git 106 | RUN cd vllm && \ 107 | git checkout ${VLLM_REF} && \ 108 | git submodule sync && \ 109 | git submodule update --init --recursive -j 8 && \ 110 | uv pip install -r requirements/build.txt && \ 111 | NVCC_THREADS=2 \ 112 | MAX_JOBS=32 \ 113 | uv build --wheel --no-build-isolation -o /wheels 114 | 115 | FROM base AS vllm-openai 116 | COPY --from=build-flashinfer /wheels/* wheels/ 117 | COPY --from=build-triton /wheels/* wheels/ 118 | COPY --from=build-vllm /wheels/* wheels/ 119 | COPY --from=build-xformers /wheels/* wheels/ 120 | 121 | # Install triton using upstream wheel 122 | # RUN --mount=type=cache,target=/root/.cache/uv \ 123 | # uv pip install --index-url https://download.pytorch.org/whl/nightly/cu128 --pre pytorch_triton==3.3.0+gitab727c40; 124 | 125 | # Install and cleanup wheels 126 | RUN --mount=type=cache,target=/root/.cache/uv \ 127 | uv pip install wheels/* 128 | RUN rm -r wheels 129 | 130 | # Install pynvml 131 | RUN --mount=type=cache,target=/root/.cache/uv \ 132 | uv pip install pynvml 133 | 134 | # Add additional packages for vLLM OpenAI 135 | RUN --mount=type=cache,target=/root/.cache/uv \ 136 | uv pip install accelerate hf_transfer 'modelscope!=1.15.0' 'timm==0.9.10' bitsandbytes boto3 runai-model-streamer runai-model-streamer[s3] tensorizer 137 | 138 | # Clean uv cache 139 | RUN uv clean 140 | 141 | # Enable hf-transfer 142 | ENV HF_HUB_ENABLE_HF_TRANSFER=1 143 | ENV VLLM_USAGE_SOURCE=kubeai 144 | 145 | # API server entrypoint 146 | ENTRYPOINT ["python3", "-m", "vllm.entrypoints.openai.api_server"] -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | --------------------------------------------------------------------------------