├── src
└── whisperx_api_server
│ ├── __init__.py
│ ├── routers
│ ├── __init__.py
│ ├── misc.py
│ ├── models.py
│ └── transcriptions.py
│ ├── .gitignore
│ ├── api-keys
│ └── keys.json
│ ├── logger.py
│ ├── dependencies.py
│ ├── formatters.py
│ ├── main.py
│ ├── config.py
│ ├── transcriber.py
│ └── models.py
├── constraints.txt
├── requirements-cpu.txt
├── requirements-cuda.txt
├── .dockerignore
├── requirements.txt
├── cuda-docker-entrypoint.sh
├── Dockerfile.cpu
├── compose.yaml
├── .github
└── workflows
│ ├── docker-build-and-push-cpu.yml
│ └── docker-build-and-push-cuda.yml
├── Dockerfile.cuda
├── README.md
├── .gitignore
└── LICENSE
/src/whisperx_api_server/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/src/whisperx_api_server/routers/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/constraints.txt:
--------------------------------------------------------------------------------
1 | torch~=2.6.0
2 | torchaudio~=2.6.0
3 | onnxruntime~=1.21.0
--------------------------------------------------------------------------------
/src/whisperx_api_server/.gitignore:
--------------------------------------------------------------------------------
1 | **/__pycache__/
2 | venv/
3 | env/
4 | .venv/
--------------------------------------------------------------------------------
/src/whisperx_api_server/api-keys/keys.json:
--------------------------------------------------------------------------------
1 | // {
2 | // "some-key": "some-client"
3 | // }
--------------------------------------------------------------------------------
/requirements-cpu.txt:
--------------------------------------------------------------------------------
1 | --extra-index-url https://download.pytorch.org/whl/cpu
2 | torch~=2.6.0
3 | torchaudio~=2.6.0
--------------------------------------------------------------------------------
/requirements-cuda.txt:
--------------------------------------------------------------------------------
1 | --extra-index-url https://download.pytorch.org/whl/cu124
2 | torch~=2.6.0
3 | torchaudio~=2.6.0
--------------------------------------------------------------------------------
/.dockerignore:
--------------------------------------------------------------------------------
1 | __pycache__
2 | .git
3 | .gitignore
4 | README.md
5 | models
6 | venv
7 | scripts
8 | .vscode
9 | whisperx/hf-home
10 | whisperx/torch-home
11 | whisperx/test-files
12 | Dockerfile*
13 | .scannerwork
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | fastapi>=0.120.4
2 | uvicorn>=0.38.0
3 | pydantic>=2.12.3
4 | pydantic-settings>=2.11.0
5 | whisperx @ git+https://github.com/m-bain/whisperX.git@429658d4ccefa55244bcdccd5d179795436093e4
6 | python-multipart>=0.0.20
--------------------------------------------------------------------------------
/src/whisperx_api_server/routers/misc.py:
--------------------------------------------------------------------------------
1 | import logging
2 | from fastapi import APIRouter
3 | from fastapi.responses import JSONResponse
4 |
5 | from whisperx_api_server.config import (
6 | MediaType,
7 | )
8 |
9 | logger = logging.getLogger(__name__)
10 |
11 | router = APIRouter()
12 |
13 | @router.get(
14 | "/healthcheck",
15 | description="Check the health of the API server",
16 | tags=["Misc"],
17 | )
18 | def health_check():
19 | return JSONResponse(content={"status": "healthy"}, media_type=MediaType.APPLICATION_JSON)
--------------------------------------------------------------------------------
/cuda-docker-entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | readarray -t gpu_free_mem < <(nvidia-smi --query-gpu=memory.free --format=csv,noheader,nounits)
3 |
4 | if [ "${#gpu_free_mem[@]}" -eq 0 ]; then
5 | echo "Error: Could not retrieve GPU memory information from nvidia-smi."
6 | exit 1
7 | fi
8 |
9 | max_free=-1
10 | max_idx=-1
11 |
12 | for i in "${!gpu_free_mem[@]}"; do
13 | mem="${gpu_free_mem[$i]}"
14 | if [ "$mem" -gt "$max_free" ]; then
15 | max_free="$mem"
16 | max_idx="$i"
17 | fi
18 | done
19 |
20 | export CUDA_VISIBLE_DEVICES="$max_idx"
21 | echo "GPU with index $max_idx has the most available memory (${max_free} MiB)."
22 | echo "Setting CUDA_VISIBLE_DEVICES=$CUDA_VISIBLE_DEVICES"
23 |
24 | exec "$@"
--------------------------------------------------------------------------------
/Dockerfile.cpu:
--------------------------------------------------------------------------------
1 | ARG PYTHON_VERSION=3.10
2 |
3 | FROM python:${PYTHON_VERSION}-slim-bookworm AS base
4 |
5 | ENV DEBIAN_FRONTEND=noninteractive
6 |
7 | RUN apt-get update && apt-get install -y --no-install-recommends \
8 | build-essential \
9 | curl \
10 | ffmpeg \
11 | git \
12 | && rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/*
13 |
14 | WORKDIR /workspace
15 |
16 | FROM base AS python-env
17 |
18 | RUN python3 -m venv /workspace/venv
19 |
20 | ENV PATH="/workspace/venv/bin:$PATH"
21 |
22 | COPY requirements-cpu.txt ./
23 |
24 | RUN pip install --upgrade pip && \
25 | pip install --no-cache-dir -r requirements-cpu.txt
26 |
27 | COPY requirements.txt constraints.txt ./
28 |
29 | RUN pip install --no-cache-dir -c constraints.txt -r requirements.txt
30 |
31 | FROM base AS runtime
32 |
33 | COPY --from=python-env /workspace/venv /workspace/venv
34 |
35 | ENV PATH="/workspace/venv/bin:$PATH"
36 |
37 | WORKDIR /workspace
38 |
39 | COPY src/whisperx_api_server ./whisperx_api_server
40 |
41 | ENV UVICORN_HOST=0.0.0.0
42 | ENV UVICORN_PORT=8000
43 |
44 | CMD ["uvicorn", "--factory", "whisperx_api_server.main:create_app"]
--------------------------------------------------------------------------------
/src/whisperx_api_server/logger.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import logging.config
3 |
4 |
5 | def setup_logger(log_level: str) -> None:
6 | assert log_level.upper() in {
7 | "DEBUG",
8 | "INFO",
9 | "WARNING",
10 | "ERROR",
11 | "CRITICAL",
12 | }, log_level
13 |
14 | logging_config = {
15 | "version": 1, # required
16 | "disable_existing_loggers": False,
17 | "formatters": {
18 | "default": {
19 | "()": "uvicorn.logging.DefaultFormatter",
20 | "fmt": "%(asctime)s - %(name)s - %(levelname)s - %(message)s",
21 | "use_colors": True
22 | },
23 | },
24 | "handlers": {
25 | "default": {
26 | "formatter": "default",
27 | "class": "logging.StreamHandler",
28 | "stream": "ext://sys.stdout",
29 | },
30 | },
31 | "loggers": {
32 | "root": {
33 | "level": log_level.upper(),
34 | "handlers": ["default"],
35 | },
36 | },
37 | }
38 |
39 | logging.config.dictConfig(logging_config)
--------------------------------------------------------------------------------
/compose.yaml:
--------------------------------------------------------------------------------
1 | services:
2 | whisperx-api-server-cuda:
3 | image: whisperx-api-server-cuda
4 | build:
5 | context: .
6 | dockerfile: Dockerfile.cuda
7 | healthcheck:
8 | test: ["CMD-SHELL", "curl --fail http://localhost:8000/healthcheck || exit 1"]
9 | command: uvicorn --factory whisperx_api_server.main:create_app
10 | ports:
11 | - 8000:8000
12 | volumes:
13 | - hugging_face_cache:/root/.cache/huggingface
14 | - torch_cache:/root/.cache/torch
15 | deploy:
16 | resources:
17 | reservations:
18 | devices:
19 | - driver: nvidia
20 | count: 1
21 | capabilities: [gpu]
22 | whisperx-api-server-cpu:
23 | image: whisperx-api-server-cpu
24 | build:
25 | context: .
26 | dockerfile: Dockerfile.cpu
27 | healthcheck:
28 | test: ["CMD-SHELL", "curl --fail http://localhost:8000/healthcheck || exit 1"]
29 | command: uvicorn --factory whisperx_api_server.main:create_app
30 | ports:
31 | - 8000:8000
32 | volumes:
33 | - hugging_face_cache:/root/.cache/huggingface
34 | - torch_cache:/root/.cache/torch
35 | volumes:
36 | hugging_face_cache:
37 | torch_cache:
--------------------------------------------------------------------------------
/.github/workflows/docker-build-and-push-cpu.yml:
--------------------------------------------------------------------------------
1 | name: Deploy CPU image to GHCR
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 | - 'v[0-9]+.[0-9]+.[0-9]+.[0-9]+'
8 |
9 | jobs:
10 | push-image:
11 | runs-on: ubuntu-latest
12 | steps:
13 | - name: Checkout code
14 | uses: actions/checkout@v4
15 | - name: Set up Docker Buildx
16 | uses: docker/setup-buildx-action@v3
17 | - name: 'Login to GitHub Container Registry'
18 | uses: docker/login-action@v1
19 | with:
20 | registry: ghcr.io
21 | username: ${{github.actor}}
22 | password: ${{secrets.GITHUB_TOKEN}}
23 | - name: Get package version
24 | id: get_version
25 | run: |
26 | EPOCH_TIME=$(date +%s)
27 | COMMIT_HASH=$(git rev-parse --short HEAD)
28 | BRANCH_NAME=$(git rev-parse --abbrev-ref HEAD)
29 |
30 | if [[ "$BRANCH_NAME" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
31 | VERSION=$BRANCH_NAME
32 | else
33 | VERSION="${EPOCH_TIME}-${COMMIT_HASH}"
34 | fi
35 |
36 | echo "version=$VERSION" >> $GITHUB_OUTPUT
37 | - name: Build and push CPU image
38 | uses: docker/build-push-action@v6
39 | with:
40 | context: .
41 | file: ./Dockerfile.cpu
42 | cache-from: type=gha
43 | cache-to: type=gha,mode=max
44 | push: true
45 | tags: ghcr.io/nyralei/whisperx-api-server:${{ steps.get_version.outputs.version }}-cpu
--------------------------------------------------------------------------------
/.github/workflows/docker-build-and-push-cuda.yml:
--------------------------------------------------------------------------------
1 | name: Deploy CUDA image to GHCR
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 | - 'v[0-9]+.[0-9]+.[0-9]+'
8 |
9 | jobs:
10 | push-image:
11 | runs-on: ubuntu-latest
12 | steps:
13 | - name: Checkout code
14 | uses: actions/checkout@v4
15 | - name: Set up Docker Buildx
16 | uses: docker/setup-buildx-action@v3
17 | - name: 'Login to GitHub Container Registry'
18 | uses: docker/login-action@v1
19 | with:
20 | registry: ghcr.io
21 | username: ${{github.actor}}
22 | password: ${{secrets.GITHUB_TOKEN}}
23 | - name: Get package version
24 | id: get_version
25 | run: |
26 | EPOCH_TIME=$(date +%s)
27 | COMMIT_HASH=$(git rev-parse --short HEAD)
28 | BRANCH_NAME=$(git rev-parse --abbrev-ref HEAD)
29 |
30 | if [[ "$BRANCH_NAME" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
31 | VERSION=$BRANCH_NAME
32 | else
33 | VERSION="${EPOCH_TIME}-${COMMIT_HASH}"
34 | fi
35 |
36 | echo "version=$VERSION" >> $GITHUB_OUTPUT
37 | - name: Build and push CUDA image
38 | uses: docker/build-push-action@v6
39 | with:
40 | context: .
41 | file: ./Dockerfile.cuda
42 | cache-from: type=gha
43 | cache-to: type=gha,mode=max
44 | push: true
45 | tags: ghcr.io/nyralei/whisperx-api-server:${{ steps.get_version.outputs.version }}-cuda
--------------------------------------------------------------------------------
/Dockerfile.cuda:
--------------------------------------------------------------------------------
1 | ARG UBUNTU_VERSION=22.04
2 | ARG CUDA_VERSION=12.4.1
3 | ARG BASE_CUDA_RUN_CONTAINER=nvidia/cuda:${CUDA_VERSION}-runtime-ubuntu${UBUNTU_VERSION}
4 |
5 | FROM ${BASE_CUDA_RUN_CONTAINER} AS base
6 |
7 | ENV DEBIAN_FRONTEND=noninteractive
8 |
9 | RUN apt-get update && apt-get install -y --no-install-recommends \
10 | build-essential \
11 | ca-certificates \
12 | curl \
13 | ffmpeg \
14 | git \
15 | libcudnn8 \
16 | python3 \
17 | python3-pip \
18 | python3-venv \
19 | && rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/*
20 |
21 | WORKDIR /workspace
22 |
23 | FROM base AS python-env
24 |
25 | RUN python3 -m venv /workspace/venv
26 |
27 | ENV PATH="/workspace/venv/bin:$PATH"
28 |
29 | COPY requirements-cuda.txt ./
30 |
31 | RUN pip install --upgrade pip && \
32 | pip install --no-cache-dir -r requirements-cuda.txt
33 |
34 | COPY requirements.txt constraints.txt ./
35 |
36 | RUN pip install --no-cache-dir -c constraints.txt -r requirements.txt
37 |
38 | FROM base AS runtime
39 |
40 | COPY --from=python-env /workspace/venv /workspace/venv
41 |
42 | ENV PATH="/workspace/venv/bin:$PATH"
43 |
44 | WORKDIR /workspace
45 |
46 | COPY src/whisperx_api_server ./whisperx_api_server
47 |
48 | ENV UVICORN_HOST=0.0.0.0
49 | ENV UVICORN_PORT=8000
50 |
51 | COPY ./cuda-docker-entrypoint.sh /workspace/docker-entrypoint.sh
52 | RUN chmod +x /workspace/docker-entrypoint.sh
53 |
54 | ENTRYPOINT [ "/workspace/docker-entrypoint.sh" ]
55 |
56 | CMD ["uvicorn", "--factory", "whisperx_api_server.main:create_app"]
57 |
--------------------------------------------------------------------------------
/src/whisperx_api_server/dependencies.py:
--------------------------------------------------------------------------------
1 | from functools import lru_cache
2 | from typing import Annotated
3 | import json
4 | import logging
5 | from fastapi import (
6 | Depends,
7 | HTTPException,
8 | status
9 | )
10 | from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer
11 |
12 | from whisperx_api_server.config import Config
13 |
14 | @lru_cache
15 | def get_config() -> Config:
16 | return Config()
17 |
18 | ConfigDependency = Annotated[Config, Depends(get_config)]
19 |
20 | security = HTTPBearer()
21 |
22 | logger = logging.getLogger(__name__)
23 |
24 | async def verify_api_key(
25 | config: ConfigDependency, credentials: Annotated[HTTPAuthorizationCredentials, Depends(security)]
26 | ) -> None:
27 | api_keys = {}
28 |
29 | if config.api_keys_file:
30 | try:
31 | with open(config.api_keys_file, 'r') as f:
32 | api_keys = json.load(f)
33 | except (FileNotFoundError, json.JSONDecodeError) as e:
34 | raise HTTPException(
35 | status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
36 | detail="API keys file error",
37 | ) from e
38 |
39 | client_name = api_keys.get(credentials.credentials)
40 |
41 | if credentials.credentials != config.api_key and client_name is None:
42 | raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Invalid API Key")
43 |
44 | if client_name:
45 | logger.info(f"Authorized request from client: '{client_name}'")
46 | else:
47 | logger.info("Authorized request using the default API key")
48 |
49 | ApiKeyDependency = Depends(verify_api_key)
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ## Overview
2 |
3 | WhisperX API Server is a FastAPI-based server designed to transcribe audio files using the Whisper ASR (Automatic Speech Recognition) model based on WhisperX (https://github.com/m-bain/WhisperX) Python library. The API offers an OpenAI-like interface that allows users to upload audio files and receive transcription results in various formats. It supports customizable options such as different models, languages, temperature settings, and more.
4 |
5 | Features
6 | 1. Audio Transcription: Transcribe audio files using the Whisper ASR model.
7 | 2. Model Caching: Load and cache models for reusability and faster performance.
8 | 3. OpenAI-like API, based on https://platform.openai.com/docs/api-reference/audio/createTranscription and https://platform.openai.com/docs/api-reference/audio/createTranslation
9 |
10 | ## API Endpoints
11 |
12 | ### `POST /v1/audio/transcriptions`
13 | https://platform.openai.com/docs/api-reference/audio/createTranscription
14 |
15 | **Parameters**:
16 | - `file`: The audio file to transcribe.
17 | - `model (str)`: The Whisper model to use. Default is `config.whisper.model`.
18 | - `language (str)`: The language for transcription. Default is `config.default_language`.
19 | - `prompt (str)`: Optional transcription prompt.
20 | - `response_format (str)`: The format of the transcription output. Defaults to `json`.
21 | - `temperature (float)`: Temperature setting for transcription. Default is `0.0`.
22 | - `timestamp_granularities (list)`: Granularity of timestamps, either `segment` or `word`. Default is `["segment"]`.
23 | - `stream (bool)`: Enable streaming mode for real-time transcription. (Doesn't work.)
24 | - `hotwords (str)`: Optional hotwords for transcription.
25 | - `suppress_numerals (bool)`: Option to suppress numerals in the transcription. Default is `True`.
26 | - `highlight_words (bool)`: Highlight words in the transcription output for formats like VTT and SRT.
27 | - `align (bool)`: Option to do transcription timings alignment. Default is `True`.
28 | - `diarize (bool)`: Option to diarize the transcription. Default is `False`.
29 |
30 | **Returns**: Transcription results in the specified format.
31 |
32 | ### `POST /v1/audio/translations`
33 | https://platform.openai.com/docs/api-reference/audio/createTranslation
34 |
35 | **Parameters**:
36 | - `file`: The audio file to translate.
37 | - `model (str)`: The Whisper model to use. Default is `config.whisper.model`.
38 | - `prompt (str)`: Optional translation prompt.
39 | - `response_format (str)`: The format of the translation output. Defaults to `json`.
40 | - `temperature (float)`: Temperature setting for translation. Default is `0.0`.
41 |
42 | **Returns**: Translation results in the specified format.
43 |
44 | ### `GET /healthcheck`
45 | Returns the current health status of the API server.
46 |
47 | ### `GET /models/list`
48 | Lists all loaded models currently available on the server.
49 |
50 | ### `POST /models/unload`
51 | Unloads a specific model from memory cache.
52 |
53 | ### `POST /models/load`
54 | Loads a specified model into memory.
55 |
56 | ### Running the API
57 |
58 | **With Docker**:
59 |
60 | For CPU:
61 | ```bash
62 | docker compose build whisperx-api-server-cpu
63 |
64 | docker compose up whisperx-api-server-cpu
65 | ```
66 |
67 | For CUDA (GPU):
68 | ```bash
69 | docker compose build whisperx-api-server-cuda
70 |
71 | docker compose up whisperx-api-server-cuda
72 |
73 | ```
74 |
75 | ## Contributing
76 |
77 | Feel free to submit issues, fork the repository, and send pull requests to contribute to the project.
78 |
79 | ## License
80 |
81 | This project is licensed under the GNU GENERAL PUBLIC LICENSE Version 3. See the `LICENSE` file for details.
--------------------------------------------------------------------------------
/src/whisperx_api_server/formatters.py:
--------------------------------------------------------------------------------
1 | from whisperx.utils import WriteSRT, WriteVTT, WriteAudacity
2 | from fastapi.responses import JSONResponse, Response
3 | from whisperx_api_server.config import MediaType
4 |
5 | class ListWriter:
6 | """Helper class to store written lines in memory."""
7 | def __init__(self):
8 | self.lines = []
9 |
10 | def write(self, text):
11 | self.lines.append(text)
12 |
13 | def get_output(self):
14 | return ''.join(self.lines)
15 |
16 | def flush(self):
17 | pass
18 |
19 | def update_options(kwargs, defaults):
20 | """
21 | Helper function to update default options with values from kwargs.
22 |
23 | :param kwargs: Keyword arguments from the function call.
24 | :param defaults: Dictionary of default values.
25 | :return: Updated options dictionary.
26 | """
27 | options = defaults.copy()
28 | options.update({key: kwargs.get(key, value) for key, value in defaults.items()})
29 | return options
30 |
31 | def handle_whisperx_format(transcript, writer_class, options):
32 | """
33 | Helper function to handle "srt", "vtt" and "aud" formats using whisperx writers.
34 |
35 | :param transcript: The transcript dictionary.
36 | :param writer_class: The writer class (WriteSRT, WriteVTT or WriteAudacity).
37 | :param options: Options for the writer.
38 | :return: Formatted output as a string.
39 | """
40 | writer = writer_class(output_dir=None)
41 | output = ListWriter()
42 |
43 | transcript["segments"]["language"] = transcript["language"]
44 |
45 | writer.write_result(transcript["segments"], output, options)
46 |
47 | return output.get_output()
48 |
49 | def format_transcription(transcript, format, **kwargs) -> Response:
50 | """
51 | Format a transcript into a given format and return a FastAPI Response object.
52 |
53 | :param transcript: The transcript to format, a dictionary with a "segments" key that contains a list of segments with start and end times and text.
54 | :param format: The format to generate the transcript in. Supported formats are "json", "text", "srt", "vtt" and "aud".
55 | :param kwargs: Additional keyword arguments to pass to the formatter.
56 | :return: A FastAPI Response or JSONResponse object with the formatted transcript and appropriate media type.
57 | """
58 | # Default options, used for formats imported from whisperx.utils
59 | defaults = {
60 | "max_line_width": 1000,
61 | "max_line_count": None,
62 | "highlight_words": kwargs.get("highlight_words", False),
63 | }
64 | options = update_options(kwargs, defaults)
65 |
66 | if format == "json":
67 | response_data = {"text": transcript.get("text", "")}
68 | return JSONResponse(content=response_data, media_type=MediaType.APPLICATION_JSON)
69 | elif format == "verbose_json":
70 | return JSONResponse(content=transcript, media_type=MediaType.APPLICATION_JSON)
71 | elif format == "vtt_json":
72 | transcript["vtt_text"] = handle_whisperx_format(transcript, WriteVTT, options)
73 | return JSONResponse(content=transcript, media_type=MediaType.APPLICATION_JSON)
74 | elif format == "text":
75 | return Response(content=transcript.get("text", ""), media_type=MediaType.TEXT_PLAIN)
76 | elif format == "srt":
77 | content = handle_whisperx_format(transcript, WriteSRT, options)
78 | return Response(content=content, media_type=MediaType.TEXT_PLAIN)
79 | elif format == "vtt":
80 | content = handle_whisperx_format(transcript, WriteVTT, options)
81 | return Response(content=content, media_type=MediaType.TEXT_VTT)
82 | elif format == "aud":
83 | content = handle_whisperx_format(transcript, WriteAudacity, options)
84 | return Response(content=content, media_type=MediaType.TEXT_PLAIN)
85 | else:
86 | raise ValueError(f"Unsupported format: {format}")
87 |
--------------------------------------------------------------------------------
/src/whisperx_api_server/main.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import uuid
3 | import asyncio
4 | from fastapi import (
5 | FastAPI,
6 | Request
7 | )
8 | from contextlib import asynccontextmanager
9 | from fastapi.middleware.cors import CORSMiddleware
10 | from starlette.middleware.base import BaseHTTPMiddleware
11 |
12 | from whisperx_api_server.dependencies import ApiKeyDependency, get_config
13 |
14 | from whisperx_api_server.logger import setup_logger
15 |
16 | from whisperx_api_server.models import (
17 | load_model_instance,
18 | load_align_model_cached,
19 | load_diarize_model_cached,
20 | load_transcribe_pipeline_cached,
21 | )
22 |
23 | from whisperx_api_server.routers.misc import (
24 | router as misc_router,
25 | )
26 |
27 | from whisperx_api_server.routers.models import (
28 | router as models_router,
29 | )
30 |
31 | from whisperx_api_server.routers.transcriptions import (
32 | router as transcribe_router,
33 | )
34 |
35 | class RequestIDMiddleware(BaseHTTPMiddleware):
36 | async def dispatch(self, request: Request, call_next):
37 | request_id = request.headers.get("X-Request-ID", str(uuid.uuid4()))
38 | request.state.request_id = request_id
39 | response = await call_next(request)
40 | response.headers["X-Request-ID"] = request_id
41 | return response
42 |
43 | @asynccontextmanager
44 | async def lifespan(app: FastAPI):
45 | config = get_config()
46 | logger = logging.getLogger(__name__)
47 |
48 | if config.whisper.preload_model is not None:
49 | logger.info(f"Loading model {config.whisper.preload_model}")
50 | model_instance = await load_model_instance(config.whisper.preload_model)
51 | try:
52 | await load_transcribe_pipeline_cached(
53 | whispermodel=model_instance,
54 | language=getattr(config.default_language, "value", config.default_language),
55 | task="transcribe",
56 | )
57 | except Exception:
58 | logger.exception("Failed to preload transcribe pipeline; will build on first request")
59 | try:
60 | if config.alignment.preload_model is not None:
61 | logger.info(f"Loading model {config.alignment.preload_model}")
62 | await load_align_model_cached(config.alignment.preload_model)
63 | elif config.alignment.whitelist:
64 | for lang in config.alignment.whitelist:
65 | logger.info(f"Loading model {lang}")
66 | await load_align_model_cached(lang)
67 | except Exception:
68 | logger.exception("Failed to preload alignment model(s); will load on demand")
69 |
70 | try:
71 | if config.diarization.preload_model is not None:
72 | logger.info(f"Loading model {config.diarization.preload_model}")
73 | await load_diarize_model_cached(config.diarization.preload_model)
74 | except Exception:
75 | logger.exception("Failed to preload diarization model; will load on demand")
76 |
77 | yield
78 |
79 | def create_app() -> FastAPI:
80 | config = get_config()
81 | setup_logger(config.log_level)
82 | logger = logging.getLogger(__name__)
83 |
84 | logger.debug(f"Config: {config}")
85 |
86 | dependencies = []
87 | if config.api_key is not None or config.api_keys_file is not None:
88 | dependencies.append(ApiKeyDependency)
89 |
90 | app = FastAPI(lifespan=lifespan)
91 |
92 | # Misc router is for not protected endpoints like healthcheck
93 | app.include_router(misc_router)
94 |
95 | app.include_router(models_router, dependencies=dependencies)
96 | app.include_router(transcribe_router, dependencies=dependencies)
97 |
98 | if config.allow_origins is not None:
99 | app.add_middleware(
100 | CORSMiddleware,
101 | allow_origins=config.allow_origins,
102 | allow_credentials=True,
103 | allow_methods=["*"],
104 | allow_headers=["*"],
105 | )
106 |
107 | app.add_middleware(RequestIDMiddleware)
108 |
109 | return app
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # IDEs
7 | .vscode/
8 | .idea/
9 |
10 | # Custom
11 | models
12 | compose-dev.yaml
13 | Dockerfile.cuda-dev
14 | scripts
15 | whisperx/
16 | .scannerwork
17 | Dockerfile.test-dev
18 |
19 | # C extensions
20 | *.so
21 |
22 | # Distribution / packaging
23 | .Python
24 | build/
25 | develop-eggs/
26 | dist/
27 | downloads/
28 | eggs/
29 | .eggs/
30 | lib/
31 | lib64/
32 | parts/
33 | sdist/
34 | var/
35 | wheels/
36 | share/python-wheels/
37 | *.egg-info/
38 | .installed.cfg
39 | *.egg
40 | MANIFEST
41 |
42 | # PyInstaller
43 | # Usually these files are written by a python script from a template
44 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
45 | *.manifest
46 | *.spec
47 |
48 | # Installer logs
49 | pip-log.txt
50 | pip-delete-this-directory.txt
51 |
52 | # Unit test / coverage reports
53 | htmlcov/
54 | .tox/
55 | .nox/
56 | .coverage
57 | .coverage.*
58 | .cache
59 | nosetests.xml
60 | coverage.xml
61 | *.cover
62 | *.py,cover
63 | .hypothesis/
64 | .pytest_cache/
65 | cover/
66 |
67 | # Translations
68 | *.mo
69 | *.pot
70 |
71 | # Django stuff:
72 | *.log
73 | local_settings.py
74 | db.sqlite3
75 | db.sqlite3-journal
76 |
77 | # Flask stuff:
78 | instance/
79 | .webassets-cache
80 |
81 | # Scrapy stuff:
82 | .scrapy
83 |
84 | # Sphinx documentation
85 | docs/_build/
86 |
87 | # PyBuilder
88 | .pybuilder/
89 | target/
90 |
91 | # Jupyter Notebook
92 | .ipynb_checkpoints
93 |
94 | # IPython
95 | profile_default/
96 | ipython_config.py
97 |
98 | # pyenv
99 | # For a library or package, you might want to ignore these files since the code is
100 | # intended to run in multiple environments; otherwise, check them in:
101 | # .python-version
102 |
103 | # pipenv
104 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
105 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
106 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
107 | # install all needed dependencies.
108 | #Pipfile.lock
109 |
110 | # poetry
111 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
112 | # This is especially recommended for binary packages to ensure reproducibility, and is more
113 | # commonly ignored for libraries.
114 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
115 | #poetry.lock
116 |
117 | # pdm
118 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
119 | #pdm.lock
120 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
121 | # in version control.
122 | # https://pdm.fming.dev/latest/usage/project/#working-with-version-control
123 | .pdm.toml
124 | .pdm-python
125 | .pdm-build/
126 |
127 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
128 | __pypackages__/
129 |
130 | # Celery stuff
131 | celerybeat-schedule
132 | celerybeat.pid
133 |
134 | # SageMath parsed files
135 | *.sage.py
136 |
137 | # Environments
138 | .env
139 | .venv
140 | env/
141 | venv/
142 | ENV/
143 | env.bak/
144 | venv.bak/
145 |
146 | # Spyder project settings
147 | .spyderproject
148 | .spyproject
149 |
150 | # Rope project settings
151 | .ropeproject
152 |
153 | # mkdocs documentation
154 | /site
155 |
156 | # mypy
157 | .mypy_cache/
158 | .dmypy.json
159 | dmypy.json
160 |
161 | # Pyre type checker
162 | .pyre/
163 |
164 | # pytype static type analyzer
165 | .pytype/
166 |
167 | # Cython debug symbols
168 | cython_debug/
169 |
170 | # PyCharm
171 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
172 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
173 | # and can be added to the global gitignore or merged into this file. For a more nuclear
174 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
175 | #.idea/
176 |
--------------------------------------------------------------------------------
/src/whisperx_api_server/config.py:
--------------------------------------------------------------------------------
1 | from enum import Enum
2 |
3 | from pydantic import BaseModel, Field
4 | from pydantic_settings import BaseSettings, SettingsConfigDict
5 |
6 |
7 | class ResponseFormat(str, Enum):
8 | TEXT = "text"
9 | JSON = "json"
10 | VERBOSE_JSON = "verbose_json"
11 | VTT_JSON = "vtt_json"
12 | SRT = "srt"
13 | VTT = "vtt"
14 | AUD = "aud" # Audacity
15 |
16 | class MediaType(str, Enum):
17 | APPLICATION_JSON = "application/json"
18 | TEXT_PLAIN = "text/plain"
19 | TEXT_VTT = "text/vtt"
20 |
21 | class Language(str, Enum):
22 | AF = "af"
23 | AM = "am"
24 | AR = "ar"
25 | AS = "as"
26 | AZ = "az"
27 | BA = "ba"
28 | BE = "be"
29 | BG = "bg"
30 | BN = "bn"
31 | BO = "bo"
32 | BR = "br"
33 | BS = "bs"
34 | CA = "ca"
35 | CS = "cs"
36 | CY = "cy"
37 | DA = "da"
38 | DE = "de"
39 | EL = "el"
40 | EN = "en"
41 | ES = "es"
42 | ET = "et"
43 | EU = "eu"
44 | FA = "fa"
45 | FI = "fi"
46 | FO = "fo"
47 | FR = "fr"
48 | GL = "gl"
49 | GU = "gu"
50 | HA = "ha"
51 | HAW = "haw"
52 | HE = "he"
53 | HI = "hi"
54 | HR = "hr"
55 | HT = "ht"
56 | HU = "hu"
57 | HY = "hy"
58 | ID = "id"
59 | IS = "is"
60 | IT = "it"
61 | JA = "ja"
62 | JW = "jw"
63 | KA = "ka"
64 | KK = "kk"
65 | KM = "km"
66 | KN = "kn"
67 | KO = "ko"
68 | LA = "la"
69 | LB = "lb"
70 | LN = "ln"
71 | LO = "lo"
72 | LT = "lt"
73 | LV = "lv"
74 | MG = "mg"
75 | MI = "mi"
76 | MK = "mk"
77 | ML = "ml"
78 | MN = "mn"
79 | MR = "mr"
80 | MS = "ms"
81 | MT = "mt"
82 | MY = "my"
83 | NE = "ne"
84 | NL = "nl"
85 | NN = "nn"
86 | NO = "no"
87 | OC = "oc"
88 | PA = "pa"
89 | PL = "pl"
90 | PS = "ps"
91 | PT = "pt"
92 | RO = "ro"
93 | RU = "ru"
94 | SA = "sa"
95 | SD = "sd"
96 | SI = "si"
97 | SK = "sk"
98 | SL = "sl"
99 | SN = "sn"
100 | SO = "so"
101 | SQ = "sq"
102 | SR = "sr"
103 | SU = "su"
104 | SV = "sv"
105 | SW = "sw"
106 | TA = "ta"
107 | TE = "te"
108 | TG = "tg"
109 | TH = "th"
110 | TK = "tk"
111 | TL = "tl"
112 | TR = "tr"
113 | TT = "tt"
114 | UK = "uk"
115 | UR = "ur"
116 | UZ = "uz"
117 | VI = "vi"
118 | YI = "yi"
119 | YO = "yo"
120 | YUE = "yue"
121 | ZH = "zh"
122 |
123 | # https://github.com/OpenNMT/CTranslate2/blob/master/docs/quantization.md
124 | class Quantization(str, Enum):
125 | INT8 = "int8"
126 | INT8_FLOAT16 = "int8_float16"
127 | INT8_BFLOAT16 = "int8_bfloat16"
128 | INT8_FLOAT32 = "int8_float32"
129 | INT16 = "int16"
130 | FLOAT16 = "float16"
131 | BFLOAT16 = "bfloat16"
132 | FLOAT32 = "float32"
133 | DEFAULT = "default"
134 |
135 |
136 | class Device(str, Enum):
137 | CPU = "cpu"
138 | CUDA = "cuda"
139 | AUTO = "auto"
140 |
141 | class VadMethod(str, Enum):
142 | SILERO = "silero"
143 | PYANNOTE = "pyannote"
144 |
145 | class WhisperConfig(BaseModel):
146 | """See https://github.com/SYSTRAN/faster-whisper/blob/master/faster_whisper/transcribe.py#L599."""
147 |
148 | model: str = Field(default="large-v3")
149 | """
150 | Default Huggingface model to use for transcription. Note, the model must support being ran using CTranslate2.
151 | This model will be used if no model is specified in the request.
152 |
153 | Models created by authors of `faster-whisper` can be found at https://huggingface.co/Systran
154 | You can find other supported models at https://huggingface.co/models?p=2&sort=trending&search=ctranslate2 and https://huggingface.co/models?sort=trending&search=ct2
155 | """
156 | inference_device: Device = Field(default=Device.AUTO)
157 | device_index: int | list[int] = Field(default=0)
158 | compute_type: Quantization = Field(default=Quantization.DEFAULT)
159 | cpu_threads: int = Field(default=0)
160 | num_workers: int = Field(default=1)
161 | vad_method: VadMethod = Field(default=VadMethod.PYANNOTE)
162 | vad_model: str = Field(default=None)
163 | vad_options: dict = Field(default=None)
164 | cache: bool = Field(default=True)
165 | preload_model: str = Field(default=None)
166 | local_files_only: bool = Field(default=False)
167 | download_root: str = Field(default=None)
168 |
169 | class AlignConfig(BaseModel):
170 | models: dict = Field(default_factory=dict)
171 | whitelist: list = Field(default_factory=list)
172 | cache: bool = Field(default=True)
173 | preload_model: str = Field(default=None)
174 |
175 | class DiarizeConfig(BaseModel):
176 | cache: bool = Field(default=True)
177 | preload_model: str = Field(default=None)
178 |
179 | class Config(BaseSettings):
180 | """
181 | Configuration for the application. Values can be set via environment variables.
182 |
183 | Pydantic will automatically handle mapping uppercased environment variables to the corresponding fields.
184 | To populate nested, the environment should be prefixed with the nested field name and an underscore. For example,
185 | the environment variable `LOG_LEVEL` will be mapped to `log_level`, `WHISPER__MODEL`(note the double underscore) to `whisper.model`, to set quantization to int8, use `WHISPER__COMPUTE_TYPE=int8`, etc.
186 | """
187 |
188 | model_config = SettingsConfigDict(env_nested_delimiter="__")
189 |
190 | api_key: str | None = None
191 |
192 | api_keys_file: str | None = None
193 |
194 | log_level: str = "DEBUG"
195 |
196 | host: str = Field(alias="UVICORN_HOST", default="0.0.0.0")
197 | port: int = Field(alias="UVICORN_PORT", default=8000)
198 | allow_origins: list[str] | None = None
199 |
200 | default_language: Language | None = None
201 |
202 | default_response_format: ResponseFormat = ResponseFormat.JSON
203 |
204 | batch_size: int = 12
205 |
206 | whisper: WhisperConfig = WhisperConfig()
207 |
208 | alignment: AlignConfig = AlignConfig()
209 |
210 | diarization: DiarizeConfig = DiarizeConfig()
211 |
212 | cache_cleanup: bool = True
213 |
214 | audio_cleanup: bool = True
--------------------------------------------------------------------------------
/src/whisperx_api_server/routers/models.py:
--------------------------------------------------------------------------------
1 | import logging
2 | from fastapi import APIRouter, Form
3 | from fastapi.responses import JSONResponse
4 | from typing import Annotated
5 | from pydantic import AfterValidator
6 |
7 | import whisperx_api_server.transcriber as transcriber
8 | from whisperx_api_server.dependencies import get_config
9 | from whisperx_api_server.config import (
10 | Language,
11 | MediaType,
12 | )
13 | from whisperx_api_server.models import (
14 | load_model_instance,
15 | load_align_model_cached,
16 | load_diarize_model_cached,
17 | model_instances,
18 | align_model_instances,
19 | diarize_model_instances,
20 | unload_model_object,
21 | )
22 |
23 | logger = logging.getLogger(__name__)
24 |
25 | router = APIRouter()
26 |
27 | def handle_default_openai_model(
28 | model_name: str
29 | ) -> str:
30 | """Adjust the model name if it defaults to 'whisper-1'."""
31 | config = get_config()
32 | if model_name == "whisper-1":
33 | logger.info(f"{model_name} is not a valid model name. Using {config.whisper.model} instead.")
34 | return config.whisper.model
35 | return model_name
36 |
37 | ModelName = Annotated[str, AfterValidator(handle_default_openai_model)]
38 |
39 | @router.get(
40 | "/models/list",
41 | description="List loaded models",
42 | tags=["models", "transcribe"],
43 | )
44 | def list_models():
45 | global model_instances
46 | return JSONResponse(content={"models": list(model_instances.keys())}, media_type=MediaType.APPLICATION_JSON)
47 |
48 | @router.post(
49 | "/models/unload",
50 | description="Unload a model",
51 | tags=["models", "transcribe"],
52 | )
53 | def unload_model(model: Annotated[ModelName, Form()]):
54 | try:
55 | if model in model_instances:
56 | model_data = model_instances.pop(model, None)
57 | if model_data is not None:
58 | unload_model_object(model_data)
59 | response_data = {"status": "success"}
60 | else:
61 | response_data = {"status": "error", "message": f"Model {model} not found"}
62 | return JSONResponse(content=response_data, media_type=MediaType.APPLICATION_JSON)
63 | except Exception as e:
64 | return JSONResponse(content={"status": "error", "message": str(e)}, media_type=MediaType.APPLICATION_JSON)
65 |
66 | @router.post(
67 | "/models/load",
68 | description="Load a model",
69 | tags=["models", "transcribe"],
70 | )
71 | async def load_model(model: Annotated[ModelName, Form()]):
72 | try:
73 | await load_model_instance(model)
74 | return JSONResponse(content={"status": "success", "model": model}, media_type=MediaType.APPLICATION_JSON)
75 | except Exception as e:
76 | return JSONResponse(content={"status": "error", "message": str(e)}, media_type=MediaType.APPLICATION_JSON)
77 |
78 | @router.get(
79 | "/align_models/list",
80 | description="List loaded align models",
81 | tags=["models", "align"],
82 | )
83 | def list_align_models():
84 | global align_model_instances
85 | return JSONResponse(content={"models": list(align_model_instances.keys())}, media_type=MediaType.APPLICATION_JSON)
86 |
87 | @router.post(
88 | "/align_models/unload",
89 | description="Unload an align model",
90 | tags=["models", "align"],
91 | )
92 | def unload_align_model(language: Annotated[Language, Form()]):
93 | try:
94 | if language in align_model_instances:
95 | align_model_data = align_model_instances.pop(language, None)
96 | if align_model_data is not None:
97 | unload_model_object(align_model_data.get("model"))
98 | del align_model_data
99 | response_data = {"status": "success"}
100 | else:
101 | response_data = {"status": "error", "message": f"Model with language {language} not found"}
102 | return JSONResponse(content=response_data, media_type=MediaType.APPLICATION_JSON)
103 | except Exception as e:
104 | return JSONResponse(content={"status": "error", "message": str(e)}, media_type=MediaType.APPLICATION_JSON)
105 |
106 | @router.post(
107 | "/align_models/load",
108 | description="Load an align model",
109 | tags=["models", "align"],
110 | )
111 | async def load_align_model(language: Annotated[Language, Form()]):
112 | try:
113 | await load_align_model_cached(language)
114 | return JSONResponse(content={"status": "success", "model": language}, media_type=MediaType.APPLICATION_JSON)
115 | except Exception as e:
116 | return JSONResponse(content={"status": "error", "message": str(e)}, media_type=MediaType.APPLICATION_JSON)
117 |
118 | @router.get(
119 | "/diarize_models/list",
120 | description="List loaded diarize models",
121 | tags=["models", "diarize"],
122 | )
123 | def list_diarize_models():
124 | global diarize_model_instances
125 | return JSONResponse(content={"models": list(diarize_model_instances.keys())}, media_type=MediaType.APPLICATION_JSON)
126 |
127 | @router.post(
128 | "/diarize_models/unload",
129 | description="Unload a diarize model",
130 | tags=["models", "diarize"],
131 | )
132 | def unload_diarize_model(model: Annotated[ModelName, Form()]):
133 | try:
134 | if model in diarize_model_instances:
135 | diarize_model_data = diarize_model_instances.pop(model, None)
136 | if diarize_model_data is not None:
137 | unload_model_object(diarize_model_data)
138 | response_data = {"status": "success"}
139 | else:
140 | response_data = {"status": "error", "message": f"Model {model} not found"}
141 | return JSONResponse(content=response_data, media_type=MediaType.APPLICATION_JSON)
142 | except Exception as e:
143 | return JSONResponse(content={"status": "error", "message": str(e)}, media_type=MediaType.APPLICATION_JSON)
144 |
145 | @router.post(
146 | "/diarize_models/load",
147 | description="Load a diarize model",
148 | tags=["models", "diarize"],
149 | )
150 | async def load_diarize_model(model: Annotated[ModelName, Form()]):
151 | try:
152 | await load_diarize_model_cached(model)
153 | return JSONResponse(content={"status": "success", "model": model}, media_type=MediaType.APPLICATION_JSON)
154 | except Exception as e:
155 | return JSONResponse(content={"status": "error", "message": str(e)}, media_type=MediaType.APPLICATION_JSON)
--------------------------------------------------------------------------------
/src/whisperx_api_server/transcriber.py:
--------------------------------------------------------------------------------
1 | import contextlib
2 | import os
3 | from whisperx import transcribe as whisperx_transcribe
4 | from whisperx import audio as whisperx_audio
5 | from whisperx import alignment as whisperx_alignment
6 | from whisperx import diarize as whisperx_diarize
7 | from whisperx import types as whisperx_types
8 | from fastapi import UploadFile
9 | import logging
10 | import time
11 | import tempfile
12 | import asyncio
13 | import torch
14 | import gc
15 |
16 | from whisperx_api_server.config import (
17 | Language,
18 | )
19 | from whisperx_api_server.dependencies import get_config
20 | from whisperx_api_server.models import (
21 | CustomWhisperModel,
22 | load_align_model_cached,
23 | load_diarize_model_cached,
24 | load_transcribe_pipeline_cached,
25 | )
26 |
27 | logger = logging.getLogger(__name__)
28 |
29 | config = get_config()
30 |
31 | _concurrency_semaphore = None
32 |
33 | def _get_concurrency_semaphore() -> asyncio.Semaphore | None:
34 | """Return a semaphore only if running on GPU."""
35 | global _concurrency_semaphore
36 | if not torch.cuda.is_available():
37 | return None
38 | if _concurrency_semaphore is None:
39 | max_concurrent = int(os.getenv("MAX_CONCURRENT_TRANSCRIPTIONS", "1"))
40 | _concurrency_semaphore = asyncio.Semaphore(max_concurrent)
41 | return _concurrency_semaphore
42 |
43 | def _cleanup_cache_only():
44 | gc.collect()
45 |
46 | if torch.cuda.is_available():
47 | torch.cuda.empty_cache()
48 |
49 | async def _save_upload_to_temp(audio_file: UploadFile, request_id: str) -> str:
50 | loop = asyncio.get_running_loop()
51 | try:
52 | file_bytes = await audio_file.read()
53 | except Exception as e:
54 | logger.error(f"Request ID: {request_id} - Failed to read uploaded file: {e}")
55 | raise
56 |
57 | def _write_temp_file(data: bytes) -> str:
58 | with tempfile.NamedTemporaryFile(delete=False, suffix=f"_{audio_file.filename}") as temp_file:
59 | temp_file.write(data)
60 | return temp_file.name
61 |
62 | try:
63 | file_path = await loop.run_in_executor(None, _write_temp_file, file_bytes)
64 | except Exception as e:
65 | logger.error(f"Request ID: {request_id} - Failed to write temp file: {e}")
66 | raise
67 |
68 | return file_path
69 |
70 |
71 | async def _load_audio(file_path: str, request_id: str):
72 | loop = asyncio.get_running_loop()
73 | try:
74 | audio = await loop.run_in_executor(None, whisperx_audio.load_audio, file_path)
75 | logger.info(f"Request ID: {request_id} - Audio loaded from {file_path}")
76 | return audio
77 | except Exception as e:
78 | logger.error(f"Request ID: {request_id} - Failed to load audio: {e}")
79 | raise
80 |
81 | async def _transcribe_audio(model, audio, batch_size, chunk_size, language, task, request_id):
82 | loop = asyncio.get_running_loop()
83 |
84 | def _run_transcription():
85 | with torch.inference_mode():
86 | return model.transcribe(
87 | audio=audio,
88 | batch_size=batch_size,
89 | chunk_size=chunk_size,
90 | num_workers=config.whisper.num_workers,
91 | language=language,
92 | task=task,
93 | )
94 |
95 | result = await loop.run_in_executor(None, _run_transcription)
96 |
97 | logger.info(f"Request ID: {request_id} - Transcription completed")
98 | return result
99 |
100 |
101 | async def _align_audio(result, audio, whispermodel, request_id):
102 | loop = asyncio.get_running_loop()
103 | try:
104 | alignment_model_start = time.time()
105 | logger.info(f"Request ID: {request_id} - Loading alignment model")
106 | model_a, metadata = await load_align_model_cached(language_code=result["language"])
107 | logger.info(f"Request ID: {request_id} - Alignment model loaded")
108 | logger.info(f"Request ID: {request_id} - Loading alignment model took {time.time() - alignment_model_start:.2f} seconds")
109 |
110 | def _run_alignment():
111 | with torch.inference_mode():
112 | return whisperx_alignment.align(
113 | transcript=result["segments"],
114 | model=model_a,
115 | align_model_metadata=metadata,
116 | audio=audio,
117 | device=whispermodel.device,
118 | return_char_alignments=False
119 | )
120 | alignment_start = time.time()
121 | result["segments"] = await loop.run_in_executor(None, _run_alignment)
122 | logger.info(f"Request ID: {request_id} - Alignment took {time.time() - alignment_start:.2f} seconds")
123 | return result
124 | except Exception as e:
125 | logger.error(f"Request ID: {request_id} - Alignment failed: {e}")
126 | raise
127 |
128 |
129 | async def _diarize_audio(result, audio, request_id):
130 | loop = asyncio.get_running_loop()
131 | try:
132 | diarization_model_start = time.time()
133 | logger.info(f"Request ID: {request_id} - Loading diarization model")
134 | diarize_model = await load_diarize_model_cached(model_name="tensorlake/speaker-diarization-3.1")
135 | logger.info(f"Request ID: {request_id} - Diarization model loaded. Loading took {time.time() - diarization_model_start:.2f} seconds. Starting diarization")
136 |
137 | def _run_diarization():
138 | with torch.inference_mode():
139 | return diarize_model(audio)
140 | diarize_start = time.time()
141 | diarize_segments = await loop.run_in_executor(None, _run_diarization)
142 | result["segments"] = whisperx_diarize.assign_word_speakers(diarize_segments, result["segments"])
143 | logger.info(f"Request ID: {request_id} - Diarization took {time.time() - diarize_start:.2f} seconds")
144 | return result
145 | except Exception as e:
146 | logger.error(f"Request ID: {request_id} - Diarization failed: {e}")
147 | raise
148 |
149 | def _finalize_text(result, align_or_diarize: bool):
150 | segments = result.get("segments", [])
151 | if align_or_diarize and isinstance(segments, dict):
152 | segments = segments.get("segments", [])
153 |
154 | result["text"] = '\n'.join([s.get("text", "").strip() for s in segments if s.get("text")])
155 | return result
156 |
157 | async def transcribe(
158 | audio_file: UploadFile,
159 | batch_size: int = config.batch_size,
160 | chunk_size: int = 30,
161 | asr_options: dict = {},
162 | language: Language = config.default_language,
163 | whispermodel: CustomWhisperModel = config.whisper.model,
164 | align: bool = False,
165 | diarize: bool = False,
166 | request_id: str = "",
167 | task: str = "transcribe",
168 | ) -> whisperx_types.TranscriptionResult:
169 | start_time = time.time()
170 | file_path = None
171 | audio = None
172 | concurrency_sem = _get_concurrency_semaphore()
173 |
174 | try:
175 | file_path = await _save_upload_to_temp(audio_file, request_id)
176 | logger.info(f"Request ID: {request_id} - Saving uploaded file took {time.time() - start_time:.2f} seconds")
177 |
178 | if concurrency_sem:
179 | await concurrency_sem.acquire()
180 | logger.debug(f"Request ID: {request_id} - Acquired GPU concurrency semaphore")
181 |
182 | logger.info(f"Request ID: {request_id} - Transcribing {audio_file.filename} with model: {whispermodel.model_size_or_path} and options: {asr_options}, language: {language}, task: {task}")
183 |
184 | model_loading_start = time.time()
185 |
186 | model = await load_transcribe_pipeline_cached(
187 | whispermodel=whispermodel,
188 | language=language,
189 | task=task,
190 | )
191 |
192 | logger.info(f"Request ID: {request_id} - Loading model took {time.time() - model_loading_start:.2f} seconds (cached)")
193 |
194 | audio_loading_start = time.time()
195 |
196 | audio = await _load_audio(file_path, request_id)
197 |
198 | logger.info(f"Request ID: {request_id} - Loading audio took {time.time() - audio_loading_start:.2f} seconds")
199 |
200 | transcription_start = time.time()
201 |
202 | result = await _transcribe_audio(model, audio, batch_size, chunk_size, language, task, request_id)
203 |
204 | logger.info(f"Request ID: {request_id} - Transcription took {time.time() - transcription_start:.2f} seconds")
205 |
206 | if align or diarize:
207 | result = await _align_audio(result, audio, whispermodel, request_id)
208 |
209 | if diarize:
210 | result = await _diarize_audio(result, audio, request_id)
211 |
212 | result = _finalize_text(result, align or diarize)
213 |
214 | logger.info(f"Request ID: {request_id} - Transcription completed for {audio_file.filename}")
215 |
216 | return result
217 | except Exception as e:
218 | logger.error(f"Request ID: {request_id} - Transcription failed for {audio_file.filename} with error: {e}")
219 | raise
220 | finally:
221 | with contextlib.suppress(Exception):
222 | if concurrency_sem:
223 | concurrency_sem.release()
224 | with contextlib.suppress(Exception):
225 | if file_path and os.path.exists(file_path):
226 | os.remove(file_path)
227 | if config.audio_cleanup and audio is not None:
228 | del audio
229 | logger.info(f"Request ID: {request_id} - Audio data cleaned up")
230 | if config.cache_cleanup:
231 | _cleanup_cache_only()
232 | logger.info(f"Request ID: {request_id} - Cache cleanup completed")
--------------------------------------------------------------------------------
/src/whisperx_api_server/routers/transcriptions.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import uuid
3 | from .models import handle_default_openai_model
4 | from fastapi import (
5 | APIRouter,
6 | UploadFile,
7 | Form,
8 | HTTPException,
9 | Request,
10 | status
11 | )
12 | from fastapi.responses import Response
13 | from starlette.middleware.base import BaseHTTPMiddleware
14 | from typing import Literal, Annotated
15 | from pydantic import AfterValidator
16 | import time
17 |
18 |
19 | import whisperx_api_server.transcriber as transcriber
20 | from whisperx_api_server.dependencies import ConfigDependency
21 | from whisperx_api_server.formatters import format_transcription
22 | from whisperx_api_server.config import (
23 | Language,
24 | ResponseFormat,
25 | )
26 | from whisperx_api_server.models import (
27 | load_model_instance,
28 | )
29 |
30 | logger = logging.getLogger(__name__)
31 |
32 | router = APIRouter()
33 |
34 | # Annotated ModelName for validation and defaults
35 | ModelName = Annotated[str, AfterValidator(handle_default_openai_model)]
36 |
37 | class RequestIDMiddleware(BaseHTTPMiddleware):
38 | async def dispatch(self, request: Request, call_next):
39 | request_id = request.headers.get("X-Request-ID", str(uuid.uuid4()))
40 | request.state.request_id = request_id
41 | response = await call_next(request)
42 | response.headers["X-Request-ID"] = request_id
43 | return response
44 |
45 | async def get_timestamp_granularities(request: Request) -> list[Literal["segment", "word"]]:
46 | TIMESTAMP_GRANULARITIES_COMBINATIONS = [
47 | [],
48 | ["segment"],
49 | ["word"],
50 | ["word", "segment"],
51 | ["segment", "word"],
52 | ]
53 | form = await request.form()
54 | if form.get("timestamp_granularities[]") is None:
55 | return ["segment"]
56 | timestamp_granularities = form.getlist("timestamp_granularities[]")
57 | assert timestamp_granularities in TIMESTAMP_GRANULARITIES_COMBINATIONS, (
58 | f"{timestamp_granularities} is not a valid value for `timestamp_granularities[]`."
59 | )
60 | return timestamp_granularities
61 |
62 | def apply_defaults(config, model, language=None, response_format=None):
63 | if model is None:
64 | model = config.whisper.model
65 | if language is None:
66 | language = config.default_language
67 | if response_format is None:
68 | response_format = config.default_response_format
69 | return model, language, response_format
70 |
71 | """
72 | OpenAI-like endpoint to transcribe audio files using the Whisper ASR model.
73 |
74 | Args:
75 | request (Request): The HTTP request object.
76 | file (UploadFile): The audio file to transcribe.
77 | model (ModelName): The model to use for the transcription.
78 | language (Language): The language to use for the transcription. Defaults to "en".
79 | prompt (str): The prompt to use for the transcription.
80 | response_format (ResponseFormat): The response format to use for the transcription. Defaults to "json".
81 | temperature (float): The temperature to use for the transcription. Defaults to 0.0.
82 | timestamp_granularities (list[Literal["segment", "word"]]): The timestamp granularities to use for the transcription. Defaults to ["segment"].
83 | stream (bool): Whether to enable streaming mode. Defaults to False.
84 | hotwords (str): The hotwords to use for the transcription.
85 | suppress_numerals (bool): Whether to suppress numerals in the transcription. Defaults to True.
86 | highlight_words (bool): Whether to highlight words in the transcription (Applies only to VTT and SRT). Defaults to False.
87 | align (bool): Whether to do transcription timings alignment. Defaults to True.
88 | diarize (bool): Whether to diarize the transcription. Defaults to False.
89 | chunk_size (int): Chunk size in seconds for merging VAD segments. Defaults to 30.
90 |
91 | Returns:
92 | Transcription: The transcription of the audio file.
93 | """
94 | @router.post(
95 | "/v1/audio/transcriptions",
96 | description="Transcribe audio files using the Whisper ASR model.",
97 | tags=["Transcription"],
98 | )
99 | async def transcribe_audio(
100 | config: ConfigDependency,
101 | request: Request,
102 | file: UploadFile,
103 | model: Annotated[ModelName, Form()] = None,
104 | language: Annotated[Language, Form()] = None,
105 | prompt: Annotated[str, Form()] = None,
106 | response_format: Annotated[ResponseFormat, Form()] = None,
107 | temperature: Annotated[float, Form()] = 0.0,
108 | timestamp_granularities: Annotated[
109 | list[Literal["segment", "word"]],
110 | Form(alias="timestamp_granularities[]"),
111 | ] = ["segment"],
112 | stream: Annotated[bool, Form()] = False,
113 | hotwords: Annotated[str, Form()] = None,
114 | suppress_numerals: Annotated[bool, Form()] = True,
115 | highlight_words: Annotated[bool, Form()] = False,
116 | align: Annotated[bool, Form()] = True,
117 | diarize: Annotated[bool, Form()] = False,
118 | chunk_size: Annotated[int, Form()] = 30,
119 | ) -> Response:
120 | model, language, response_format = apply_defaults(config, model, language, response_format)
121 | timestamp_granularities = await get_timestamp_granularities(request)
122 | request_id = request.state.request_id
123 | logger.info(f"Request ID: {request_id} - Received transcription request")
124 | start_time = time.time() # Start the timer
125 | logger.info(f"Request ID: {request_id} - Received request to transcribe {file.filename} with parameters: \
126 | model: {model}, \
127 | language: {language}, \
128 | prompt: {prompt}, \
129 | response_format: {response_format}, \
130 | temperature: {temperature}, \
131 | timestamp_granularities: {timestamp_granularities}, \
132 | stream: {stream}, \
133 | hotwords: {hotwords}, \
134 | suppress_numerals: {suppress_numerals}, \
135 | highlight_words: {highlight_words}, \
136 | align: {align}, \
137 | diarize: {diarize}, \
138 | chunk_size: {chunk_size}")
139 |
140 | if not align:
141 | if response_format in ('vtt', 'srt', 'aud', 'vtt_json'):
142 | raise HTTPException(
143 | status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
144 | detail="Subtitles format ('vtt', 'srt', 'aud', 'vtt_json') requires alignment to be enabled."
145 | )
146 |
147 | if diarize:
148 | raise HTTPException(
149 | status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
150 | detail="Diarization requires alignment to be enabled."
151 | )
152 |
153 | # Determine if word timestamps are required
154 | word_timestamps = "word" in timestamp_granularities
155 |
156 | # Build ASR options
157 | asr_options = {
158 | "suppress_numerals": suppress_numerals,
159 | "temperatures": temperature,
160 | "word_timestamps": word_timestamps,
161 | "initial_prompt": prompt,
162 | "hotwords": hotwords,
163 | }
164 |
165 | model_load_time = time.time()
166 | # Get model instance (reuse if cached)
167 | model_instance = await load_model_instance(model)
168 |
169 | logger.info(f"Loaded model {model} in {time.time() - model_load_time:.2f} seconds")
170 |
171 | try:
172 | transcription = await transcriber.transcribe(
173 | audio_file=file,
174 | asr_options=asr_options,
175 | language=language,
176 | whispermodel=model_instance,
177 | align=align,
178 | diarize=diarize,
179 | chunk_size=chunk_size,
180 | request_id=request_id
181 | )
182 | except Exception as e:
183 | logger.exception(f"Request ID: {request_id} - Transcription failed: {e}")
184 | raise HTTPException(
185 | status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
186 | detail="An unexpected error occurred while processing the transcription request."
187 | ) from e
188 |
189 | total_time = time.time() - start_time
190 | logger.info(f"Request ID: {request_id} - Transcription process took {total_time:.2f} seconds")
191 |
192 | return format_transcription(transcription, response_format, highlight_words=highlight_words)
193 |
194 | """
195 | OpenAI-like endpoint to translate audio files using the Whisper ASR model.
196 |
197 | Args:
198 | request (Request): The HTTP request object.
199 | file (UploadFile): The audio file to translate.
200 | model (ModelName): The model to use for the translation.
201 | prompt (str): The prompt to use for the translation.
202 | response_format (ResponseFormat): The response format to use for the translation. Defaults to "json".
203 | temperature (float): The temperature to use for the translation. Defaults to 0.0.
204 | chunk_size (int): Chunk size in seconds for merging VAD segments. Defaults to 30.
205 |
206 | Returns:
207 | Translation: The translation of the audio file.
208 | """
209 | @router.post(
210 | "/v1/audio/translations",
211 | description="Translate audio files using the Whisper ASR model",
212 | tags=["Translation"],
213 | )
214 | async def translate_audio(
215 | config: ConfigDependency,
216 | request: Request,
217 | file: UploadFile,
218 | model: Annotated[ModelName, Form()] = None,
219 | prompt: Annotated[str, Form()] = "",
220 | response_format: Annotated[ResponseFormat, Form()] = None,
221 | temperature: Annotated[float, Form()] = 0.0,
222 | chunk_size: Annotated[int, Form()] = 30,
223 | ) -> Response:
224 | model, _, response_format = apply_defaults(config, model, language=None, response_format=response_format)
225 | request_id = request.state.request_id
226 | logger.info(f"Request ID: {request_id} - Received translation request")
227 | start_time = time.time() # Start the timer
228 | logger.info(f"Request ID: {request_id} - Received request to translate {file.filename} with parameters: \
229 | model: {model}, \
230 | prompt: {prompt}, \
231 | response_format: {response_format}, \
232 | temperature: {temperature}, \
233 | chunk_size: {chunk_size}")
234 |
235 | # Build ASR options
236 | asr_options = {
237 | "initial_prompt": prompt,
238 | "temperatures": temperature,
239 | }
240 |
241 | model_load_time = time.time()
242 | # Get model instance (reuse if cached)
243 | model_instance = await load_model_instance(model)
244 |
245 | logger.info(f"Loaded model {model} in {time.time() - model_load_time:.2f} seconds")
246 |
247 | try:
248 | translation = await transcriber.transcribe(
249 | audio_file=file,
250 | asr_options=asr_options,
251 | whispermodel=model_instance,
252 | chunk_size=chunk_size,
253 | request_id=request_id,
254 | task="translate"
255 | )
256 | except Exception as e:
257 | logger.exception(f"Request ID: {request_id} - Translation failed: {e}")
258 | raise HTTPException(
259 | status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
260 | detail="An unexpected error occurred while processing the translation request."
261 | ) from e
262 |
263 | total_time = time.time() - start_time
264 | logger.info(f"Request ID: {request_id} - Translation process took {total_time:.2f} seconds")
265 |
266 | return format_transcription(translation, response_format)
--------------------------------------------------------------------------------
/src/whisperx_api_server/models.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import asyncio
3 | import contextlib
4 | import torch
5 | import gc
6 | from collections import defaultdict
7 | from asyncio import Lock
8 | from typing import Union, List, Optional, Tuple, Any
9 |
10 | from whisperx import asr as whisperx_asr
11 | from whisperx import transcribe as whisperx_transcribe
12 | from whisperx import alignment as whisperx_alignment
13 | from whisperx import diarize as whisperx_diarize
14 |
15 | from whisperx_api_server.dependencies import get_config
16 |
17 | logger = logging.getLogger(__name__)
18 |
19 | # Global caches
20 | model_instances = {}
21 | model_locks = defaultdict(Lock)
22 |
23 | align_model_instances = {}
24 | alignment_locks = defaultdict(Lock)
25 |
26 | diarize_model_instances = {}
27 | diarization_locks = defaultdict(Lock)
28 |
29 | alignment_cache_mod_lock = Lock()
30 |
31 | # Transcribe pipeline cache
32 | transcribe_pipeline_instances = {}
33 | transcribe_locks = defaultdict(Lock)
34 |
35 | def unload_model_object(model_obj: Any):
36 | if model_obj is None:
37 | return
38 | # 1) Move to CPU
39 | with contextlib.suppress(Exception):
40 | model_obj.to("cpu")
41 | # 2) Delete reference
42 | del model_obj
43 |
44 | # 3) Force GC and empty cache
45 | gc.collect()
46 | torch.cuda.empty_cache()
47 |
48 | class CustomWhisperModel(whisperx_asr.WhisperModel):
49 | def __init__(
50 | self,
51 | model_size_or_path: str,
52 | device: str = "auto",
53 | device_index: Union[int, List[int]] = 0,
54 | compute_type: str = "default",
55 | cpu_threads: int = 0,
56 | num_workers: int = 1,
57 | download_root: Optional[str] = None,
58 | local_files_only: bool = False,
59 | files: dict = None,
60 | **model_kwargs,
61 | ):
62 | super().__init__(
63 | model_size_or_path=model_size_or_path,
64 | device=device,
65 | device_index=device_index,
66 | compute_type=compute_type,
67 | cpu_threads=cpu_threads,
68 | num_workers=num_workers,
69 | download_root=download_root,
70 | local_files_only=local_files_only,
71 | files=files,
72 | **model_kwargs
73 | )
74 | self.model_size_or_path = model_size_or_path
75 | self.device = device
76 | self.device_index = device_index
77 | self.compute_type = compute_type
78 | self.cpu_threads = cpu_threads
79 | self.num_workers = num_workers
80 | self.download_root = download_root
81 | self.local_files_only = local_files_only
82 | self.files = files
83 | self.model_kwargs = model_kwargs
84 |
85 |
86 | def check_device():
87 | """
88 | Original device-checking function used by whisper.
89 | """
90 | try:
91 | return "cuda" if torch.cuda.is_available() else "cpu"
92 | except Exception:
93 | logger.error("Could not determine device. Using 'cpu' instead.")
94 | return "cpu"
95 |
96 |
97 | def _determine_inference_device():
98 | """
99 | Helper to fetch `inference_device` from config or default to CPU/GPU auto-check.
100 | """
101 | config = get_config()
102 | inference_device = config.whisper.inference_device.value
103 | if inference_device == "auto":
104 | inference_device = check_device()
105 | return inference_device
106 |
107 |
108 | def initialize_model(model_name: str) -> CustomWhisperModel:
109 | """
110 | Initializes a CustomWhisperModel with the config settings.
111 | """
112 | config = get_config()
113 | inference_device = _determine_inference_device()
114 | return CustomWhisperModel(
115 | model_size_or_path=model_name,
116 | device=inference_device,
117 | device_index=config.whisper.device_index,
118 | compute_type=config.whisper.compute_type.value,
119 | cpu_threads=config.whisper.cpu_threads,
120 | num_workers=config.whisper.num_workers,
121 | local_files_only=config.whisper.local_files_only,
122 | download_root=config.whisper.download_root
123 | )
124 |
125 |
126 | async def _get_or_init_model(
127 | key: str,
128 | cache_dict: dict,
129 | lock_dict: dict,
130 | init_func,
131 | log_reuse: str = "Reusing cached model instance for {key}",
132 | log_init: str = "Initializing model: {key}",
133 | ) -> Any:
134 | """
135 | Generic helper:
136 | 1) Check if `key` is in `cache_dict`.
137 | 2) If not, acquire lock_dict[key] and load the model.
138 | 3) Return the cached/loaded model.
139 | """
140 | if key in cache_dict:
141 | logger.info(log_reuse.format(key=key))
142 | return cache_dict[key]
143 |
144 | async with lock_dict[key]:
145 | # Double-check after acquiring the lock
146 | if key not in cache_dict:
147 | logger.info(log_init.format(key=key))
148 | cache_dict[key] = await init_func()
149 | return cache_dict[key]
150 |
151 |
152 | # -------------------------------------------------------------------------
153 | # Main Whisper model loading
154 | # -------------------------------------------------------------------------
155 | async def load_model_instance(model_name: str):
156 | """
157 | Async function to get the main Whisper model instance from cache, or initialize if needed.
158 | """
159 | return await _get_or_init_model(
160 | key=model_name,
161 | cache_dict=model_instances,
162 | lock_dict=model_locks,
163 | init_func=lambda: asyncio.to_thread(initialize_model, model_name),
164 | )
165 |
166 | # -------------------------------------------------------------------------
167 | # Transcribe pipeline loading
168 | # -------------------------------------------------------------------------
169 | def _hashable_vad_options(vad_options: Any) -> Any:
170 | if vad_options is None:
171 | return None
172 | if isinstance(vad_options, dict):
173 | return tuple(sorted((k, _hashable_vad_options(v)) for k, v in vad_options.items()))
174 | if isinstance(vad_options, (list, tuple)):
175 | return tuple(_hashable_vad_options(v) for v in vad_options)
176 | return vad_options
177 |
178 | async def load_transcribe_pipeline_cached(
179 | whispermodel: CustomWhisperModel,
180 | language: Optional[str] = None,
181 | task: str = "transcribe",
182 | ):
183 | config = get_config()
184 | key = (
185 | whispermodel.model_size_or_path,
186 | whispermodel.device,
187 | whispermodel.compute_type,
188 | config.whisper.vad_method.value if hasattr(config.whisper.vad_method, "value") else config.whisper.vad_method,
189 | config.whisper.vad_model,
190 | _hashable_vad_options(config.whisper.vad_options),
191 | )
192 |
193 | def _init_pipeline():
194 | return whisperx_transcribe.load_model(
195 | whisper_arch=whispermodel.model_size_or_path,
196 | device=whispermodel.device,
197 | compute_type=whispermodel.compute_type,
198 | language=language,
199 | vad_model=config.whisper.vad_model,
200 | vad_method=config.whisper.vad_method,
201 | vad_options=config.whisper.vad_options,
202 | task=task,
203 | )
204 |
205 | pipeline = await _get_or_init_model(
206 | key=str(key),
207 | cache_dict=transcribe_pipeline_instances,
208 | lock_dict=transcribe_locks,
209 | init_func=lambda: asyncio.to_thread(_init_pipeline),
210 | log_reuse="Reusing cached transcribe pipeline: {key}",
211 | log_init="Initializing transcribe pipeline: {key}",
212 | )
213 |
214 | if not config.whisper.cache:
215 | removed = transcribe_pipeline_instances.pop(str(key), None)
216 | if removed is not None:
217 | logger.info(f"Unloading transcribe pipeline from cache (disabled): {key}")
218 | unload_model_object(removed)
219 |
220 | return pipeline
221 |
222 | # -------------------------------------------------------------------------
223 | # Alignment model loading
224 | # -------------------------------------------------------------------------
225 | async def _cleanup_alignment_cache_whitelist():
226 | """
227 | If config.alignment.whitelist is set, remove any alignment models from
228 | `align_model_instances` that are not in the whitelist.
229 | This happens under a dedicated lock to avoid race conditions.
230 | """
231 | config = get_config()
232 | whitelist = config.alignment.whitelist
233 | if not whitelist:
234 | return
235 |
236 | async with alignment_cache_mod_lock:
237 | for key in list(align_model_instances.keys()): # noqa: S7504
238 | if key not in whitelist:
239 | logger.info(f"Unloading alignment model for {key} (not in whitelist).")
240 | align_model_data = align_model_instances.pop(key, None)
241 | if align_model_data is not None:
242 | unload_model_object(align_model_data.get("model"))
243 | del align_model_data
244 |
245 | async def load_align_model_cached(
246 | language_code: str,
247 | model_name: Optional[str] = None,
248 | model_dir: Optional[str] = None
249 | ) -> Tuple[Any, Any]:
250 | """
251 | Loads and caches alignment models based on language codes (or "multilingual")
252 | while respecting the config whitelisting and caching settings.
253 | """
254 | config = get_config()
255 |
256 | # Clean up out-of-whitelist models
257 | await _cleanup_alignment_cache_whitelist()
258 |
259 | inference_device = _determine_inference_device()
260 |
261 | selected_model_name = model_name
262 | if "multilingual" in config.alignment.models:
263 | selected_model_name = config.alignment.models["multilingual"]
264 | logger.info(f"Overriding with 'multilingual' alignment model: {selected_model_name}")
265 | elif language_code in config.alignment.models:
266 | selected_model_name = config.alignment.models[language_code]
267 | logger.info(f"Using configured alignment model for '{language_code}': {selected_model_name}")
268 |
269 | # Decide how to key the cache
270 | if (selected_model_name is not None
271 | and selected_model_name == config.alignment.models.get("multilingual")):
272 | cache_key = "multilingual"
273 | else:
274 | cache_key = language_code
275 |
276 | logger.debug(f"config.alignment.models = {config.alignment.models}")
277 | logger.debug(f"Incoming language_code = {language_code}, model_name param = {model_name}")
278 |
279 | async def _init_alignment():
280 | try:
281 | loop = asyncio.get_running_loop()
282 | align_model, align_metadata = await loop.run_in_executor(
283 | None, # or a custom ThreadPoolExecutor
284 | lambda: whisperx_alignment.load_align_model(
285 | language_code=language_code,
286 | device=inference_device,
287 | model_name=selected_model_name,
288 | model_dir=model_dir
289 | )
290 | )
291 | except Exception as e:
292 | logger.error(f"Failed to load alignment model for language '{language_code}': {e}")
293 | raise
294 |
295 | return {"model": align_model, "metadata": align_metadata}
296 |
297 | # Fetch or initialize the alignment model under a lock:
298 | model_data = await _get_or_init_model(
299 | key=cache_key,
300 | cache_dict=align_model_instances,
301 | lock_dict=alignment_locks,
302 | init_func=_init_alignment,
303 | log_reuse="Reusing cached alignment model for key: {key}",
304 | log_init="Initializing alignment model for key: {key}",
305 | )
306 |
307 | # If caching is disabled, remove it immediately and free GPU memory
308 | if not config.alignment.cache:
309 | async with alignment_cache_mod_lock:
310 | removed_data = align_model_instances.pop(cache_key, None)
311 | if removed_data is not None:
312 | logger.info(f"Unloading alignment model from cache (disabled): {cache_key}")
313 | model_obj = removed_data.get("model")
314 | if model_obj is not None:
315 | unload_model_object(model_obj)
316 | del removed_data
317 |
318 | return model_data["model"], model_data["metadata"]
319 |
320 |
321 | # -------------------------------------------------------------------------
322 | # Diarization model loading
323 | # -------------------------------------------------------------------------
324 | async def load_diarize_model_cached(model_name: str):
325 | """
326 | Loads and caches a diarization pipeline model, if not already present.
327 | Clears from cache after use if `config.diarization.cache` is False.
328 | """
329 | config = get_config()
330 | inference_device = _determine_inference_device()
331 |
332 | def _init_diarization():
333 | logger.info(f"Loading diarization pipeline for model: {model_name} with device: {inference_device}")
334 | return whisperx_diarize.DiarizationPipeline(model_name=model_name, device=inference_device)
335 |
336 | diarize_model = await _get_or_init_model(
337 | key=model_name,
338 | cache_dict=diarize_model_instances,
339 | lock_dict=diarization_locks,
340 | init_func=lambda: asyncio.to_thread(_init_diarization),
341 | log_reuse="Reusing cached diarization model for: {key}",
342 | log_init="Initializing diarization model: {key}",
343 | )
344 |
345 | if not config.diarization.cache:
346 | # Immediately remove from cache, unload from GPU memory
347 | removed_model = diarize_model_instances.pop(model_name, None)
348 | if removed_model is not None:
349 | logger.info(f"Unloading diarization model from cache (disabled): {model_name}")
350 | unload_model_object(removed_model)
351 |
352 | return diarize_model
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | GNU GENERAL PUBLIC LICENSE
2 | Version 3, 29 June 2007
3 |
4 | Copyright (C) 2007 Free Software Foundation, Inc.
5 | Everyone is permitted to copy and distribute verbatim copies
6 | of this license document, but changing it is not allowed.
7 |
8 | Preamble
9 |
10 | The GNU General Public License is a free, copyleft license for
11 | software and other kinds of works.
12 |
13 | The licenses for most software and other practical works are designed
14 | to take away your freedom to share and change the works. By contrast,
15 | the GNU General Public License is intended to guarantee your freedom to
16 | share and change all versions of a program--to make sure it remains free
17 | software for all its users. We, the Free Software Foundation, use the
18 | GNU General Public License for most of our software; it applies also to
19 | any other work released this way by its authors. You can apply it to
20 | your programs, too.
21 |
22 | When we speak of free software, we are referring to freedom, not
23 | price. Our General Public Licenses are designed to make sure that you
24 | have the freedom to distribute copies of free software (and charge for
25 | them if you wish), that you receive source code or can get it if you
26 | want it, that you can change the software or use pieces of it in new
27 | free programs, and that you know you can do these things.
28 |
29 | To protect your rights, we need to prevent others from denying you
30 | these rights or asking you to surrender the rights. Therefore, you have
31 | certain responsibilities if you distribute copies of the software, or if
32 | you modify it: responsibilities to respect the freedom of others.
33 |
34 | For example, if you distribute copies of such a program, whether
35 | gratis or for a fee, you must pass on to the recipients the same
36 | freedoms that you received. You must make sure that they, too, receive
37 | or can get the source code. And you must show them these terms so they
38 | know their rights.
39 |
40 | Developers that use the GNU GPL protect your rights with two steps:
41 | (1) assert copyright on the software, and (2) offer you this License
42 | giving you legal permission to copy, distribute and/or modify it.
43 |
44 | For the developers' and authors' protection, the GPL clearly explains
45 | that there is no warranty for this free software. For both users' and
46 | authors' sake, the GPL requires that modified versions be marked as
47 | changed, so that their problems will not be attributed erroneously to
48 | authors of previous versions.
49 |
50 | Some devices are designed to deny users access to install or run
51 | modified versions of the software inside them, although the manufacturer
52 | can do so. This is fundamentally incompatible with the aim of
53 | protecting users' freedom to change the software. The systematic
54 | pattern of such abuse occurs in the area of products for individuals to
55 | use, which is precisely where it is most unacceptable. Therefore, we
56 | have designed this version of the GPL to prohibit the practice for those
57 | products. If such problems arise substantially in other domains, we
58 | stand ready to extend this provision to those domains in future versions
59 | of the GPL, as needed to protect the freedom of users.
60 |
61 | Finally, every program is threatened constantly by software patents.
62 | States should not allow patents to restrict development and use of
63 | software on general-purpose computers, but in those that do, we wish to
64 | avoid the special danger that patents applied to a free program could
65 | make it effectively proprietary. To prevent this, the GPL assures that
66 | patents cannot be used to render the program non-free.
67 |
68 | The precise terms and conditions for copying, distribution and
69 | modification follow.
70 |
71 | TERMS AND CONDITIONS
72 |
73 | 0. Definitions.
74 |
75 | "This License" refers to version 3 of the GNU General Public License.
76 |
77 | "Copyright" also means copyright-like laws that apply to other kinds of
78 | works, such as semiconductor masks.
79 |
80 | "The Program" refers to any copyrightable work licensed under this
81 | License. Each licensee is addressed as "you". "Licensees" and
82 | "recipients" may be individuals or organizations.
83 |
84 | To "modify" a work means to copy from or adapt all or part of the work
85 | in a fashion requiring copyright permission, other than the making of an
86 | exact copy. The resulting work is called a "modified version" of the
87 | earlier work or a work "based on" the earlier work.
88 |
89 | A "covered work" means either the unmodified Program or a work based
90 | on the Program.
91 |
92 | To "propagate" a work means to do anything with it that, without
93 | permission, would make you directly or secondarily liable for
94 | infringement under applicable copyright law, except executing it on a
95 | computer or modifying a private copy. Propagation includes copying,
96 | distribution (with or without modification), making available to the
97 | public, and in some countries other activities as well.
98 |
99 | To "convey" a work means any kind of propagation that enables other
100 | parties to make or receive copies. Mere interaction with a user through
101 | a computer network, with no transfer of a copy, is not conveying.
102 |
103 | An interactive user interface displays "Appropriate Legal Notices"
104 | to the extent that it includes a convenient and prominently visible
105 | feature that (1) displays an appropriate copyright notice, and (2)
106 | tells the user that there is no warranty for the work (except to the
107 | extent that warranties are provided), that licensees may convey the
108 | work under this License, and how to view a copy of this License. If
109 | the interface presents a list of user commands or options, such as a
110 | menu, a prominent item in the list meets this criterion.
111 |
112 | 1. Source Code.
113 |
114 | The "source code" for a work means the preferred form of the work
115 | for making modifications to it. "Object code" means any non-source
116 | form of a work.
117 |
118 | A "Standard Interface" means an interface that either is an official
119 | standard defined by a recognized standards body, or, in the case of
120 | interfaces specified for a particular programming language, one that
121 | is widely used among developers working in that language.
122 |
123 | The "System Libraries" of an executable work include anything, other
124 | than the work as a whole, that (a) is included in the normal form of
125 | packaging a Major Component, but which is not part of that Major
126 | Component, and (b) serves only to enable use of the work with that
127 | Major Component, or to implement a Standard Interface for which an
128 | implementation is available to the public in source code form. A
129 | "Major Component", in this context, means a major essential component
130 | (kernel, window system, and so on) of the specific operating system
131 | (if any) on which the executable work runs, or a compiler used to
132 | produce the work, or an object code interpreter used to run it.
133 |
134 | The "Corresponding Source" for a work in object code form means all
135 | the source code needed to generate, install, and (for an executable
136 | work) run the object code and to modify the work, including scripts to
137 | control those activities. However, it does not include the work's
138 | System Libraries, or general-purpose tools or generally available free
139 | programs which are used unmodified in performing those activities but
140 | which are not part of the work. For example, Corresponding Source
141 | includes interface definition files associated with source files for
142 | the work, and the source code for shared libraries and dynamically
143 | linked subprograms that the work is specifically designed to require,
144 | such as by intimate data communication or control flow between those
145 | subprograms and other parts of the work.
146 |
147 | The Corresponding Source need not include anything that users
148 | can regenerate automatically from other parts of the Corresponding
149 | Source.
150 |
151 | The Corresponding Source for a work in source code form is that
152 | same work.
153 |
154 | 2. Basic Permissions.
155 |
156 | All rights granted under this License are granted for the term of
157 | copyright on the Program, and are irrevocable provided the stated
158 | conditions are met. This License explicitly affirms your unlimited
159 | permission to run the unmodified Program. The output from running a
160 | covered work is covered by this License only if the output, given its
161 | content, constitutes a covered work. This License acknowledges your
162 | rights of fair use or other equivalent, as provided by copyright law.
163 |
164 | You may make, run and propagate covered works that you do not
165 | convey, without conditions so long as your license otherwise remains
166 | in force. You may convey covered works to others for the sole purpose
167 | of having them make modifications exclusively for you, or provide you
168 | with facilities for running those works, provided that you comply with
169 | the terms of this License in conveying all material for which you do
170 | not control copyright. Those thus making or running the covered works
171 | for you must do so exclusively on your behalf, under your direction
172 | and control, on terms that prohibit them from making any copies of
173 | your copyrighted material outside their relationship with you.
174 |
175 | Conveying under any other circumstances is permitted solely under
176 | the conditions stated below. Sublicensing is not allowed; section 10
177 | makes it unnecessary.
178 |
179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
180 |
181 | No covered work shall be deemed part of an effective technological
182 | measure under any applicable law fulfilling obligations under article
183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or
184 | similar laws prohibiting or restricting circumvention of such
185 | measures.
186 |
187 | When you convey a covered work, you waive any legal power to forbid
188 | circumvention of technological measures to the extent such circumvention
189 | is effected by exercising rights under this License with respect to
190 | the covered work, and you disclaim any intention to limit operation or
191 | modification of the work as a means of enforcing, against the work's
192 | users, your or third parties' legal rights to forbid circumvention of
193 | technological measures.
194 |
195 | 4. Conveying Verbatim Copies.
196 |
197 | You may convey verbatim copies of the Program's source code as you
198 | receive it, in any medium, provided that you conspicuously and
199 | appropriately publish on each copy an appropriate copyright notice;
200 | keep intact all notices stating that this License and any
201 | non-permissive terms added in accord with section 7 apply to the code;
202 | keep intact all notices of the absence of any warranty; and give all
203 | recipients a copy of this License along with the Program.
204 |
205 | You may charge any price or no price for each copy that you convey,
206 | and you may offer support or warranty protection for a fee.
207 |
208 | 5. Conveying Modified Source Versions.
209 |
210 | You may convey a work based on the Program, or the modifications to
211 | produce it from the Program, in the form of source code under the
212 | terms of section 4, provided that you also meet all of these conditions:
213 |
214 | a) The work must carry prominent notices stating that you modified
215 | it, and giving a relevant date.
216 |
217 | b) The work must carry prominent notices stating that it is
218 | released under this License and any conditions added under section
219 | 7. This requirement modifies the requirement in section 4 to
220 | "keep intact all notices".
221 |
222 | c) You must license the entire work, as a whole, under this
223 | License to anyone who comes into possession of a copy. This
224 | License will therefore apply, along with any applicable section 7
225 | additional terms, to the whole of the work, and all its parts,
226 | regardless of how they are packaged. This License gives no
227 | permission to license the work in any other way, but it does not
228 | invalidate such permission if you have separately received it.
229 |
230 | d) If the work has interactive user interfaces, each must display
231 | Appropriate Legal Notices; however, if the Program has interactive
232 | interfaces that do not display Appropriate Legal Notices, your
233 | work need not make them do so.
234 |
235 | A compilation of a covered work with other separate and independent
236 | works, which are not by their nature extensions of the covered work,
237 | and which are not combined with it such as to form a larger program,
238 | in or on a volume of a storage or distribution medium, is called an
239 | "aggregate" if the compilation and its resulting copyright are not
240 | used to limit the access or legal rights of the compilation's users
241 | beyond what the individual works permit. Inclusion of a covered work
242 | in an aggregate does not cause this License to apply to the other
243 | parts of the aggregate.
244 |
245 | 6. Conveying Non-Source Forms.
246 |
247 | You may convey a covered work in object code form under the terms
248 | of sections 4 and 5, provided that you also convey the
249 | machine-readable Corresponding Source under the terms of this License,
250 | in one of these ways:
251 |
252 | a) Convey the object code in, or embodied in, a physical product
253 | (including a physical distribution medium), accompanied by the
254 | Corresponding Source fixed on a durable physical medium
255 | customarily used for software interchange.
256 |
257 | b) Convey the object code in, or embodied in, a physical product
258 | (including a physical distribution medium), accompanied by a
259 | written offer, valid for at least three years and valid for as
260 | long as you offer spare parts or customer support for that product
261 | model, to give anyone who possesses the object code either (1) a
262 | copy of the Corresponding Source for all the software in the
263 | product that is covered by this License, on a durable physical
264 | medium customarily used for software interchange, for a price no
265 | more than your reasonable cost of physically performing this
266 | conveying of source, or (2) access to copy the
267 | Corresponding Source from a network server at no charge.
268 |
269 | c) Convey individual copies of the object code with a copy of the
270 | written offer to provide the Corresponding Source. This
271 | alternative is allowed only occasionally and noncommercially, and
272 | only if you received the object code with such an offer, in accord
273 | with subsection 6b.
274 |
275 | d) Convey the object code by offering access from a designated
276 | place (gratis or for a charge), and offer equivalent access to the
277 | Corresponding Source in the same way through the same place at no
278 | further charge. You need not require recipients to copy the
279 | Corresponding Source along with the object code. If the place to
280 | copy the object code is a network server, the Corresponding Source
281 | may be on a different server (operated by you or a third party)
282 | that supports equivalent copying facilities, provided you maintain
283 | clear directions next to the object code saying where to find the
284 | Corresponding Source. Regardless of what server hosts the
285 | Corresponding Source, you remain obligated to ensure that it is
286 | available for as long as needed to satisfy these requirements.
287 |
288 | e) Convey the object code using peer-to-peer transmission, provided
289 | you inform other peers where the object code and Corresponding
290 | Source of the work are being offered to the general public at no
291 | charge under subsection 6d.
292 |
293 | A separable portion of the object code, whose source code is excluded
294 | from the Corresponding Source as a System Library, need not be
295 | included in conveying the object code work.
296 |
297 | A "User Product" is either (1) a "consumer product", which means any
298 | tangible personal property which is normally used for personal, family,
299 | or household purposes, or (2) anything designed or sold for incorporation
300 | into a dwelling. In determining whether a product is a consumer product,
301 | doubtful cases shall be resolved in favor of coverage. For a particular
302 | product received by a particular user, "normally used" refers to a
303 | typical or common use of that class of product, regardless of the status
304 | of the particular user or of the way in which the particular user
305 | actually uses, or expects or is expected to use, the product. A product
306 | is a consumer product regardless of whether the product has substantial
307 | commercial, industrial or non-consumer uses, unless such uses represent
308 | the only significant mode of use of the product.
309 |
310 | "Installation Information" for a User Product means any methods,
311 | procedures, authorization keys, or other information required to install
312 | and execute modified versions of a covered work in that User Product from
313 | a modified version of its Corresponding Source. The information must
314 | suffice to ensure that the continued functioning of the modified object
315 | code is in no case prevented or interfered with solely because
316 | modification has been made.
317 |
318 | If you convey an object code work under this section in, or with, or
319 | specifically for use in, a User Product, and the conveying occurs as
320 | part of a transaction in which the right of possession and use of the
321 | User Product is transferred to the recipient in perpetuity or for a
322 | fixed term (regardless of how the transaction is characterized), the
323 | Corresponding Source conveyed under this section must be accompanied
324 | by the Installation Information. But this requirement does not apply
325 | if neither you nor any third party retains the ability to install
326 | modified object code on the User Product (for example, the work has
327 | been installed in ROM).
328 |
329 | The requirement to provide Installation Information does not include a
330 | requirement to continue to provide support service, warranty, or updates
331 | for a work that has been modified or installed by the recipient, or for
332 | the User Product in which it has been modified or installed. Access to a
333 | network may be denied when the modification itself materially and
334 | adversely affects the operation of the network or violates the rules and
335 | protocols for communication across the network.
336 |
337 | Corresponding Source conveyed, and Installation Information provided,
338 | in accord with this section must be in a format that is publicly
339 | documented (and with an implementation available to the public in
340 | source code form), and must require no special password or key for
341 | unpacking, reading or copying.
342 |
343 | 7. Additional Terms.
344 |
345 | "Additional permissions" are terms that supplement the terms of this
346 | License by making exceptions from one or more of its conditions.
347 | Additional permissions that are applicable to the entire Program shall
348 | be treated as though they were included in this License, to the extent
349 | that they are valid under applicable law. If additional permissions
350 | apply only to part of the Program, that part may be used separately
351 | under those permissions, but the entire Program remains governed by
352 | this License without regard to the additional permissions.
353 |
354 | When you convey a copy of a covered work, you may at your option
355 | remove any additional permissions from that copy, or from any part of
356 | it. (Additional permissions may be written to require their own
357 | removal in certain cases when you modify the work.) You may place
358 | additional permissions on material, added by you to a covered work,
359 | for which you have or can give appropriate copyright permission.
360 |
361 | Notwithstanding any other provision of this License, for material you
362 | add to a covered work, you may (if authorized by the copyright holders of
363 | that material) supplement the terms of this License with terms:
364 |
365 | a) Disclaiming warranty or limiting liability differently from the
366 | terms of sections 15 and 16 of this License; or
367 |
368 | b) Requiring preservation of specified reasonable legal notices or
369 | author attributions in that material or in the Appropriate Legal
370 | Notices displayed by works containing it; or
371 |
372 | c) Prohibiting misrepresentation of the origin of that material, or
373 | requiring that modified versions of such material be marked in
374 | reasonable ways as different from the original version; or
375 |
376 | d) Limiting the use for publicity purposes of names of licensors or
377 | authors of the material; or
378 |
379 | e) Declining to grant rights under trademark law for use of some
380 | trade names, trademarks, or service marks; or
381 |
382 | f) Requiring indemnification of licensors and authors of that
383 | material by anyone who conveys the material (or modified versions of
384 | it) with contractual assumptions of liability to the recipient, for
385 | any liability that these contractual assumptions directly impose on
386 | those licensors and authors.
387 |
388 | All other non-permissive additional terms are considered "further
389 | restrictions" within the meaning of section 10. If the Program as you
390 | received it, or any part of it, contains a notice stating that it is
391 | governed by this License along with a term that is a further
392 | restriction, you may remove that term. If a license document contains
393 | a further restriction but permits relicensing or conveying under this
394 | License, you may add to a covered work material governed by the terms
395 | of that license document, provided that the further restriction does
396 | not survive such relicensing or conveying.
397 |
398 | If you add terms to a covered work in accord with this section, you
399 | must place, in the relevant source files, a statement of the
400 | additional terms that apply to those files, or a notice indicating
401 | where to find the applicable terms.
402 |
403 | Additional terms, permissive or non-permissive, may be stated in the
404 | form of a separately written license, or stated as exceptions;
405 | the above requirements apply either way.
406 |
407 | 8. Termination.
408 |
409 | You may not propagate or modify a covered work except as expressly
410 | provided under this License. Any attempt otherwise to propagate or
411 | modify it is void, and will automatically terminate your rights under
412 | this License (including any patent licenses granted under the third
413 | paragraph of section 11).
414 |
415 | However, if you cease all violation of this License, then your
416 | license from a particular copyright holder is reinstated (a)
417 | provisionally, unless and until the copyright holder explicitly and
418 | finally terminates your license, and (b) permanently, if the copyright
419 | holder fails to notify you of the violation by some reasonable means
420 | prior to 60 days after the cessation.
421 |
422 | Moreover, your license from a particular copyright holder is
423 | reinstated permanently if the copyright holder notifies you of the
424 | violation by some reasonable means, this is the first time you have
425 | received notice of violation of this License (for any work) from that
426 | copyright holder, and you cure the violation prior to 30 days after
427 | your receipt of the notice.
428 |
429 | Termination of your rights under this section does not terminate the
430 | licenses of parties who have received copies or rights from you under
431 | this License. If your rights have been terminated and not permanently
432 | reinstated, you do not qualify to receive new licenses for the same
433 | material under section 10.
434 |
435 | 9. Acceptance Not Required for Having Copies.
436 |
437 | You are not required to accept this License in order to receive or
438 | run a copy of the Program. Ancillary propagation of a covered work
439 | occurring solely as a consequence of using peer-to-peer transmission
440 | to receive a copy likewise does not require acceptance. However,
441 | nothing other than this License grants you permission to propagate or
442 | modify any covered work. These actions infringe copyright if you do
443 | not accept this License. Therefore, by modifying or propagating a
444 | covered work, you indicate your acceptance of this License to do so.
445 |
446 | 10. Automatic Licensing of Downstream Recipients.
447 |
448 | Each time you convey a covered work, the recipient automatically
449 | receives a license from the original licensors, to run, modify and
450 | propagate that work, subject to this License. You are not responsible
451 | for enforcing compliance by third parties with this License.
452 |
453 | An "entity transaction" is a transaction transferring control of an
454 | organization, or substantially all assets of one, or subdividing an
455 | organization, or merging organizations. If propagation of a covered
456 | work results from an entity transaction, each party to that
457 | transaction who receives a copy of the work also receives whatever
458 | licenses to the work the party's predecessor in interest had or could
459 | give under the previous paragraph, plus a right to possession of the
460 | Corresponding Source of the work from the predecessor in interest, if
461 | the predecessor has it or can get it with reasonable efforts.
462 |
463 | You may not impose any further restrictions on the exercise of the
464 | rights granted or affirmed under this License. For example, you may
465 | not impose a license fee, royalty, or other charge for exercise of
466 | rights granted under this License, and you may not initiate litigation
467 | (including a cross-claim or counterclaim in a lawsuit) alleging that
468 | any patent claim is infringed by making, using, selling, offering for
469 | sale, or importing the Program or any portion of it.
470 |
471 | 11. Patents.
472 |
473 | A "contributor" is a copyright holder who authorizes use under this
474 | License of the Program or a work on which the Program is based. The
475 | work thus licensed is called the contributor's "contributor version".
476 |
477 | A contributor's "essential patent claims" are all patent claims
478 | owned or controlled by the contributor, whether already acquired or
479 | hereafter acquired, that would be infringed by some manner, permitted
480 | by this License, of making, using, or selling its contributor version,
481 | but do not include claims that would be infringed only as a
482 | consequence of further modification of the contributor version. For
483 | purposes of this definition, "control" includes the right to grant
484 | patent sublicenses in a manner consistent with the requirements of
485 | this License.
486 |
487 | Each contributor grants you a non-exclusive, worldwide, royalty-free
488 | patent license under the contributor's essential patent claims, to
489 | make, use, sell, offer for sale, import and otherwise run, modify and
490 | propagate the contents of its contributor version.
491 |
492 | In the following three paragraphs, a "patent license" is any express
493 | agreement or commitment, however denominated, not to enforce a patent
494 | (such as an express permission to practice a patent or covenant not to
495 | sue for patent infringement). To "grant" such a patent license to a
496 | party means to make such an agreement or commitment not to enforce a
497 | patent against the party.
498 |
499 | If you convey a covered work, knowingly relying on a patent license,
500 | and the Corresponding Source of the work is not available for anyone
501 | to copy, free of charge and under the terms of this License, through a
502 | publicly available network server or other readily accessible means,
503 | then you must either (1) cause the Corresponding Source to be so
504 | available, or (2) arrange to deprive yourself of the benefit of the
505 | patent license for this particular work, or (3) arrange, in a manner
506 | consistent with the requirements of this License, to extend the patent
507 | license to downstream recipients. "Knowingly relying" means you have
508 | actual knowledge that, but for the patent license, your conveying the
509 | covered work in a country, or your recipient's use of the covered work
510 | in a country, would infringe one or more identifiable patents in that
511 | country that you have reason to believe are valid.
512 |
513 | If, pursuant to or in connection with a single transaction or
514 | arrangement, you convey, or propagate by procuring conveyance of, a
515 | covered work, and grant a patent license to some of the parties
516 | receiving the covered work authorizing them to use, propagate, modify
517 | or convey a specific copy of the covered work, then the patent license
518 | you grant is automatically extended to all recipients of the covered
519 | work and works based on it.
520 |
521 | A patent license is "discriminatory" if it does not include within
522 | the scope of its coverage, prohibits the exercise of, or is
523 | conditioned on the non-exercise of one or more of the rights that are
524 | specifically granted under this License. You may not convey a covered
525 | work if you are a party to an arrangement with a third party that is
526 | in the business of distributing software, under which you make payment
527 | to the third party based on the extent of your activity of conveying
528 | the work, and under which the third party grants, to any of the
529 | parties who would receive the covered work from you, a discriminatory
530 | patent license (a) in connection with copies of the covered work
531 | conveyed by you (or copies made from those copies), or (b) primarily
532 | for and in connection with specific products or compilations that
533 | contain the covered work, unless you entered into that arrangement,
534 | or that patent license was granted, prior to 28 March 2007.
535 |
536 | Nothing in this License shall be construed as excluding or limiting
537 | any implied license or other defenses to infringement that may
538 | otherwise be available to you under applicable patent law.
539 |
540 | 12. No Surrender of Others' Freedom.
541 |
542 | If conditions are imposed on you (whether by court order, agreement or
543 | otherwise) that contradict the conditions of this License, they do not
544 | excuse you from the conditions of this License. If you cannot convey a
545 | covered work so as to satisfy simultaneously your obligations under this
546 | License and any other pertinent obligations, then as a consequence you may
547 | not convey it at all. For example, if you agree to terms that obligate you
548 | to collect a royalty for further conveying from those to whom you convey
549 | the Program, the only way you could satisfy both those terms and this
550 | License would be to refrain entirely from conveying the Program.
551 |
552 | 13. Use with the GNU Affero General Public License.
553 |
554 | Notwithstanding any other provision of this License, you have
555 | permission to link or combine any covered work with a work licensed
556 | under version 3 of the GNU Affero General Public License into a single
557 | combined work, and to convey the resulting work. The terms of this
558 | License will continue to apply to the part which is the covered work,
559 | but the special requirements of the GNU Affero General Public License,
560 | section 13, concerning interaction through a network will apply to the
561 | combination as such.
562 |
563 | 14. Revised Versions of this License.
564 |
565 | The Free Software Foundation may publish revised and/or new versions of
566 | the GNU General Public License from time to time. Such new versions will
567 | be similar in spirit to the present version, but may differ in detail to
568 | address new problems or concerns.
569 |
570 | Each version is given a distinguishing version number. If the
571 | Program specifies that a certain numbered version of the GNU General
572 | Public License "or any later version" applies to it, you have the
573 | option of following the terms and conditions either of that numbered
574 | version or of any later version published by the Free Software
575 | Foundation. If the Program does not specify a version number of the
576 | GNU General Public License, you may choose any version ever published
577 | by the Free Software Foundation.
578 |
579 | If the Program specifies that a proxy can decide which future
580 | versions of the GNU General Public License can be used, that proxy's
581 | public statement of acceptance of a version permanently authorizes you
582 | to choose that version for the Program.
583 |
584 | Later license versions may give you additional or different
585 | permissions. However, no additional obligations are imposed on any
586 | author or copyright holder as a result of your choosing to follow a
587 | later version.
588 |
589 | 15. Disclaimer of Warranty.
590 |
591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
599 |
600 | 16. Limitation of Liability.
601 |
602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
610 | SUCH DAMAGES.
611 |
612 | 17. Interpretation of Sections 15 and 16.
613 |
614 | If the disclaimer of warranty and limitation of liability provided
615 | above cannot be given local legal effect according to their terms,
616 | reviewing courts shall apply local law that most closely approximates
617 | an absolute waiver of all civil liability in connection with the
618 | Program, unless a warranty or assumption of liability accompanies a
619 | copy of the Program in return for a fee.
620 |
621 | END OF TERMS AND CONDITIONS
622 |
623 | How to Apply These Terms to Your New Programs
624 |
625 | If you develop a new program, and you want it to be of the greatest
626 | possible use to the public, the best way to achieve this is to make it
627 | free software which everyone can redistribute and change under these terms.
628 |
629 | To do so, attach the following notices to the program. It is safest
630 | to attach them to the start of each source file to most effectively
631 | state the exclusion of warranty; and each file should have at least
632 | the "copyright" line and a pointer to where the full notice is found.
633 |
634 |
635 | Copyright (C)
636 |
637 | This program is free software: you can redistribute it and/or modify
638 | it under the terms of the GNU General Public License as published by
639 | the Free Software Foundation, either version 3 of the License, or
640 | (at your option) any later version.
641 |
642 | This program is distributed in the hope that it will be useful,
643 | but WITHOUT ANY WARRANTY; without even the implied warranty of
644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
645 | GNU General Public License for more details.
646 |
647 | You should have received a copy of the GNU General Public License
648 | along with this program. If not, see .
649 |
650 | Also add information on how to contact you by electronic and paper mail.
651 |
652 | If the program does terminal interaction, make it output a short
653 | notice like this when it starts in an interactive mode:
654 |
655 | Copyright (C)
656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
657 | This is free software, and you are welcome to redistribute it
658 | under certain conditions; type `show c' for details.
659 |
660 | The hypothetical commands `show w' and `show c' should show the appropriate
661 | parts of the General Public License. Of course, your program's commands
662 | might be different; for a GUI interface, you would use an "about box".
663 |
664 | You should also get your employer (if you work as a programmer) or school,
665 | if any, to sign a "copyright disclaimer" for the program, if necessary.
666 | For more information on this, and how to apply and follow the GNU GPL, see
667 | .
668 |
669 | The GNU General Public License does not permit incorporating your program
670 | into proprietary programs. If your program is a subroutine library, you
671 | may consider it more useful to permit linking proprietary applications with
672 | the library. If this is what you want to do, use the GNU Lesser General
673 | Public License instead of this License. But first, please read
674 | .
675 |
--------------------------------------------------------------------------------