├── tests └── __init__.py ├── src └── insanely_fast_whisper │ ├── __init__.py │ ├── utils │ ├── __init__.py │ ├── result.py │ ├── diarization_pipeline.py │ └── diarize.py │ └── cli.py ├── pyproject.toml ├── convert_output.py ├── .gitignore ├── README.md ├── insanely_fast_whisper_colab.ipynb └── LICENSE /tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/insanely_fast_whisper/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/insanely_fast_whisper/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/insanely_fast_whisper/utils/result.py: -------------------------------------------------------------------------------- 1 | from typing import TypedDict 2 | 3 | 4 | class JsonTranscriptionResult(TypedDict): 5 | speakers: list 6 | chunks: list 7 | text: str 8 | 9 | 10 | def build_result(transcript, outputs) -> JsonTranscriptionResult: 11 | return { 12 | "speakers": transcript, 13 | "chunks": outputs["chunks"], 14 | "text": outputs["text"], 15 | } 16 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "insanely-fast-whisper" 3 | version = "0.0.15" 4 | description = "An insanely fast whisper CLI" 5 | authors = [ 6 | { name = "VB", email = "reachvaibhavs10@gmail.com" }, 7 | { name = "Patrick Arminio", email = "patrick.arminio@gmail.com" }, 8 | ] 9 | dependencies = [ 10 | "transformers", 11 | "accelerate", 12 | "pyannote-audio>=3.1.0", 13 | "setuptools>=68.2.2", 14 | "rich>=13.7.0", 15 | ] 16 | requires-python = ">=3.8" 17 | readme = "README.md" 18 | license = { text = "MIT" } 19 | 20 | 21 | [build-system] 22 | requires = ["pdm-backend"] 23 | build-backend = "pdm.backend" 24 | 25 | [project.scripts] 26 | insanely-fast-whisper = "insanely_fast_whisper.cli:main" 27 | 28 | [project.urls] 29 | # Name based 30 | Homepage = "https://github.com/Vaibhavs10/insanely-fast-whisper" 31 | Twitter = "https://twitter.com/reach_vb" 32 | -------------------------------------------------------------------------------- /src/insanely_fast_whisper/utils/diarization_pipeline.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from pyannote.audio import Pipeline 3 | from rich.progress import Progress, TimeElapsedColumn, BarColumn, TextColumn 4 | 5 | from .diarize import post_process_segments_and_transcripts, diarize_audio, \ 6 | preprocess_inputs 7 | 8 | 9 | def diarize(args, outputs): 10 | diarization_pipeline = Pipeline.from_pretrained( 11 | checkpoint_path=args.diarization_model, 12 | use_auth_token=args.hf_token, 13 | ) 14 | diarization_pipeline.to( 15 | torch.device("mps" if args.device_id == "mps" else f"cuda:{args.device_id}") 16 | ) 17 | 18 | with Progress( 19 | TextColumn("🤗 [progress.description]{task.description}"), 20 | BarColumn(style="yellow1", pulse_style="white"), 21 | TimeElapsedColumn(), 22 | ) as progress: 23 | progress.add_task("[yellow]Segmenting...", total=None) 24 | 25 | inputs, diarizer_inputs = preprocess_inputs(inputs=args.file_name) 26 | 27 | segments = diarize_audio(diarizer_inputs, diarization_pipeline, args.num_speakers, args.min_speakers, args.max_speakers) 28 | 29 | return post_process_segments_and_transcripts( 30 | segments, outputs["chunks"], group_by_speaker=False 31 | ) 32 | -------------------------------------------------------------------------------- /convert_output.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import json 3 | import os 4 | 5 | 6 | class TxtFormatter: 7 | @classmethod 8 | def preamble(cls): 9 | return "" 10 | 11 | @classmethod 12 | def format_chunk(cls, chunk, index): 13 | text = chunk['text'] 14 | return f"{text}\n" 15 | 16 | 17 | class SrtFormatter: 18 | @classmethod 19 | def preamble(cls): 20 | return "" 21 | 22 | @classmethod 23 | def format_seconds(cls, seconds): 24 | whole_seconds = int(seconds) 25 | milliseconds = int((seconds - whole_seconds) * 1000) 26 | 27 | hours = whole_seconds // 3600 28 | minutes = (whole_seconds % 3600) // 60 29 | seconds = whole_seconds % 60 30 | 31 | return f"{hours:02d}:{minutes:02d}:{seconds:02d},{milliseconds:03d}" 32 | 33 | @classmethod 34 | def format_chunk(cls, chunk, index): 35 | text = chunk['text'] 36 | start, end = chunk['timestamp'][0], chunk['timestamp'][1] 37 | start_format, end_format = cls.format_seconds(start), cls.format_seconds(end) 38 | return f"{index}\n{start_format} --> {end_format}\n{text}\n\n" 39 | 40 | 41 | class VttFormatter: 42 | @classmethod 43 | def preamble(cls): 44 | return "WEBVTT\n\n" 45 | 46 | @classmethod 47 | def format_seconds(cls, seconds): 48 | whole_seconds = int(seconds) 49 | milliseconds = int((seconds - whole_seconds) * 1000) 50 | 51 | hours = whole_seconds // 3600 52 | minutes = (whole_seconds % 3600) // 60 53 | seconds = whole_seconds % 60 54 | 55 | return f"{hours:02d}:{minutes:02d}:{seconds:02d}.{milliseconds:03d}" 56 | 57 | @classmethod 58 | def format_chunk(cls, chunk, index): 59 | text = chunk['text'] 60 | start, end = chunk['timestamp'][0], chunk['timestamp'][1] 61 | start_format, end_format = cls.format_seconds(start), cls.format_seconds(end) 62 | return f"{index}\n{start_format} --> {end_format}\n{text}\n\n" 63 | 64 | 65 | def convert(input_path, output_format, output_dir, verbose): 66 | with open(input_path, 'r') as file: 67 | data = json.load(file) 68 | 69 | formatter_class = { 70 | 'srt': SrtFormatter, 71 | 'vtt': VttFormatter, 72 | 'txt': TxtFormatter 73 | }.get(output_format) 74 | 75 | string = formatter_class.preamble() 76 | for index, chunk in enumerate(data['chunks'], 1): 77 | entry = formatter_class.format_chunk(chunk, index) 78 | 79 | if verbose: 80 | print(entry) 81 | 82 | string += entry 83 | 84 | with open(os.path.join(output_dir, f"output.{output_format}"), 'w', encoding='utf-8') as file: 85 | file.write(string) 86 | 87 | def main(): 88 | parser = argparse.ArgumentParser(description="Convert JSON to an output format.") 89 | parser.add_argument("input_file", help="Input JSON file path") 90 | parser.add_argument("-f", "--output_format", default="all", help="Format of the output file (default: srt)", choices=["txt", "vtt", "srt"]) 91 | parser.add_argument("-o", "--output_dir", default=".", help="Directory where the output file/s is/are saved") 92 | parser.add_argument("--verbose", action="store_true", help="Print each VTT entry as it's added") 93 | 94 | args = parser.parse_args() 95 | convert(args.input_file, args.output_format, args.output_dir, args.verbose) 96 | 97 | if __name__ == "__main__": 98 | # Example Usage: 99 | # python convert_output.py output.json -f vtt -o /tmp/my/output/dir 100 | main() 101 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | .pdm-python 112 | .pdm-build/ 113 | 114 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 115 | __pypackages__/ 116 | 117 | # Celery stuff 118 | celerybeat-schedule 119 | celerybeat.pid 120 | 121 | # SageMath parsed files 122 | *.sage.py 123 | 124 | # Environments 125 | .env 126 | .venv 127 | env/ 128 | venv/ 129 | ENV/ 130 | env.bak/ 131 | venv.bak/ 132 | 133 | # Spyder project settings 134 | .spyderproject 135 | .spyproject 136 | 137 | # Rope project settings 138 | .ropeproject 139 | 140 | # mkdocs documentation 141 | /site 142 | 143 | # mypy 144 | .mypy_cache/ 145 | .dmypy.json 146 | dmypy.json 147 | 148 | # Pyre type checker 149 | .pyre/ 150 | 151 | # pytype static type analyzer 152 | .pytype/ 153 | 154 | # Cython debug symbols 155 | cython_debug/ 156 | 157 | # PyCharm 158 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 159 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 160 | # and can be added to the global gitignore or merged into this file. For a more nuclear 161 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 162 | #.idea/ 163 | 164 | .vscode/ 165 | .idea/ -------------------------------------------------------------------------------- /src/insanely_fast_whisper/utils/diarize.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import torch 3 | import numpy as np 4 | from torchaudio import functional as F 5 | from transformers.pipelines.audio_utils import ffmpeg_read 6 | import sys 7 | 8 | 9 | # Code lifted from https://github.com/huggingface/speechbox/blob/main/src/speechbox/diarize.py 10 | # and from https://github.com/m-bain/whisperX/blob/main/whisperx/diarize.py 11 | 12 | 13 | def preprocess_inputs(inputs): 14 | if isinstance(inputs, str): 15 | if inputs.startswith("http://") or inputs.startswith("https://"): 16 | # We need to actually check for a real protocol, otherwise it's impossible to use a local file 17 | # like http_huggingface_co.png 18 | inputs = requests.get(inputs).content 19 | else: 20 | with open(inputs, "rb") as f: 21 | inputs = f.read() 22 | 23 | if isinstance(inputs, bytes): 24 | inputs = ffmpeg_read(inputs, 16000) 25 | 26 | if isinstance(inputs, dict): 27 | # Accepting `"array"` which is the key defined in `datasets` for better integration 28 | if not ("sampling_rate" in inputs and ("raw" in inputs or "array" in inputs)): 29 | raise ValueError( 30 | "When passing a dictionary to ASRDiarizePipeline, the dict needs to contain a " 31 | '"raw" key containing the numpy array representing the audio and a "sampling_rate" key, ' 32 | "containing the sampling_rate associated with that array" 33 | ) 34 | 35 | _inputs = inputs.pop("raw", None) 36 | if _inputs is None: 37 | # Remove path which will not be used from `datasets`. 38 | inputs.pop("path", None) 39 | _inputs = inputs.pop("array", None) 40 | in_sampling_rate = inputs.pop("sampling_rate") 41 | inputs = _inputs 42 | if in_sampling_rate != 16000: 43 | inputs = F.resample( 44 | torch.from_numpy(inputs), in_sampling_rate, 16000 45 | ).numpy() 46 | 47 | if not isinstance(inputs, np.ndarray): 48 | raise ValueError(f"We expect a numpy ndarray as input, got `{type(inputs)}`") 49 | if len(inputs.shape) != 1: 50 | raise ValueError( 51 | "We expect a single channel audio input for ASRDiarizePipeline" 52 | ) 53 | 54 | # diarization model expects float32 torch tensor of shape `(channels, seq_len)` 55 | diarizer_inputs = torch.from_numpy(inputs).float() 56 | diarizer_inputs = diarizer_inputs.unsqueeze(0) 57 | 58 | return inputs, diarizer_inputs 59 | 60 | 61 | def diarize_audio(diarizer_inputs, diarization_pipeline, num_speakers, min_speakers, max_speakers): 62 | diarization = diarization_pipeline( 63 | {"waveform": diarizer_inputs, "sample_rate": 16000}, 64 | num_speakers=num_speakers, 65 | min_speakers=min_speakers, 66 | max_speakers=max_speakers, 67 | ) 68 | 69 | segments = [] 70 | for segment, track, label in diarization.itertracks(yield_label=True): 71 | segments.append( 72 | { 73 | "segment": {"start": segment.start, "end": segment.end}, 74 | "track": track, 75 | "label": label, 76 | } 77 | ) 78 | 79 | # diarizer output may contain consecutive segments from the same speaker (e.g. {(0 -> 1, speaker_1), (1 -> 1.5, speaker_1), ...}) 80 | # we combine these segments to give overall timestamps for each speaker's turn (e.g. {(0 -> 1.5, speaker_1), ...}) 81 | new_segments = [] 82 | prev_segment = cur_segment = segments[0] 83 | 84 | for i in range(1, len(segments)): 85 | cur_segment = segments[i] 86 | 87 | # check if we have changed speaker ("label") 88 | if cur_segment["label"] != prev_segment["label"] and i < len(segments): 89 | # add the start/end times for the super-segment to the new list 90 | new_segments.append( 91 | { 92 | "segment": { 93 | "start": prev_segment["segment"]["start"], 94 | "end": cur_segment["segment"]["start"], 95 | }, 96 | "speaker": prev_segment["label"], 97 | } 98 | ) 99 | prev_segment = segments[i] 100 | 101 | # add the last segment(s) if there was no speaker change 102 | new_segments.append( 103 | { 104 | "segment": { 105 | "start": prev_segment["segment"]["start"], 106 | "end": cur_segment["segment"]["end"], 107 | }, 108 | "speaker": prev_segment["label"], 109 | } 110 | ) 111 | 112 | return new_segments 113 | 114 | 115 | def post_process_segments_and_transcripts(new_segments, transcript, group_by_speaker) -> list: 116 | # get the end timestamps for each chunk from the ASR output 117 | end_timestamps = np.array( 118 | [chunk["timestamp"][-1] if chunk["timestamp"][-1] is not None else sys.float_info.max for chunk in transcript]) 119 | segmented_preds = [] 120 | 121 | # align the diarizer timestamps and the ASR timestamps 122 | for segment in new_segments: 123 | # get the diarizer end timestamp 124 | end_time = segment["segment"]["end"] 125 | # find the ASR end timestamp that is closest to the diarizer's end timestamp and cut the transcript to here 126 | upto_idx = np.argmin(np.abs(end_timestamps - end_time)) 127 | 128 | if group_by_speaker: 129 | segmented_preds.append( 130 | { 131 | "speaker": segment["speaker"], 132 | "text": "".join( 133 | [chunk["text"] for chunk in transcript[: upto_idx + 1]] 134 | ), 135 | "timestamp": ( 136 | transcript[0]["timestamp"][0], 137 | transcript[upto_idx]["timestamp"][1], 138 | ), 139 | } 140 | ) 141 | else: 142 | for i in range(upto_idx + 1): 143 | segmented_preds.append({"speaker": segment["speaker"], **transcript[i]}) 144 | 145 | # crop the transcripts and timestamp lists according to the latest timestamp (for faster argmin) 146 | transcript = transcript[upto_idx + 1:] 147 | end_timestamps = end_timestamps[upto_idx + 1:] 148 | 149 | if len(end_timestamps) == 0: 150 | break 151 | 152 | return segmented_preds 153 | -------------------------------------------------------------------------------- /src/insanely_fast_whisper/cli.py: -------------------------------------------------------------------------------- 1 | import json 2 | import argparse 3 | from transformers import pipeline 4 | from rich.progress import Progress, TimeElapsedColumn, BarColumn, TextColumn 5 | import torch 6 | 7 | from .utils.diarization_pipeline import diarize 8 | from .utils.result import build_result 9 | 10 | parser = argparse.ArgumentParser(description="Automatic Speech Recognition") 11 | parser.add_argument( 12 | "--file-name", 13 | required=True, 14 | type=str, 15 | help="Path or URL to the audio file to be transcribed.", 16 | ) 17 | parser.add_argument( 18 | "--device-id", 19 | required=False, 20 | default="0", 21 | type=str, 22 | help='Device ID for your GPU. Just pass the device number when using CUDA, or "mps" for Macs with Apple Silicon. (default: "0")', 23 | ) 24 | parser.add_argument( 25 | "--transcript-path", 26 | required=False, 27 | default="output.json", 28 | type=str, 29 | help="Path to save the transcription output. (default: output.json)", 30 | ) 31 | parser.add_argument( 32 | "--model-name", 33 | required=False, 34 | default="openai/whisper-large-v3", 35 | type=str, 36 | help="Name of the pretrained model/ checkpoint to perform ASR. (default: openai/whisper-large-v3)", 37 | ) 38 | parser.add_argument( 39 | "--task", 40 | required=False, 41 | default="transcribe", 42 | type=str, 43 | choices=["transcribe", "translate"], 44 | help="Task to perform: transcribe or translate to another language. (default: transcribe)", 45 | ) 46 | parser.add_argument( 47 | "--language", 48 | required=False, 49 | type=str, 50 | default="None", 51 | help='Language of the input audio. (default: "None" (Whisper auto-detects the language))', 52 | ) 53 | parser.add_argument( 54 | "--batch-size", 55 | required=False, 56 | type=int, 57 | default=24, 58 | help="Number of parallel batches you want to compute. Reduce if you face OOMs. (default: 24)", 59 | ) 60 | parser.add_argument( 61 | "--flash", 62 | required=False, 63 | type=bool, 64 | default=False, 65 | help="Use Flash Attention 2. Read the FAQs to see how to install FA2 correctly. (default: False)", 66 | ) 67 | parser.add_argument( 68 | "--timestamp", 69 | required=False, 70 | type=str, 71 | default="chunk", 72 | choices=["chunk", "word"], 73 | help="Whisper supports both chunked as well as word level timestamps. (default: chunk)", 74 | ) 75 | parser.add_argument( 76 | "--hf-token", 77 | required=False, 78 | default="no_token", 79 | type=str, 80 | help="Provide a hf.co/settings/token for Pyannote.audio to diarise the audio clips", 81 | ) 82 | parser.add_argument( 83 | "--diarization_model", 84 | required=False, 85 | default="pyannote/speaker-diarization-3.1", 86 | type=str, 87 | help="Name of the pretrained model/ checkpoint to perform diarization. (default: pyannote/speaker-diarization)", 88 | ) 89 | parser.add_argument( 90 | "--num-speakers", 91 | required=False, 92 | default=None, 93 | type=int, 94 | help="Specifies the exact number of speakers present in the audio file. Useful when the exact number of participants in the conversation is known. Must be at least 1. Cannot be used together with --min-speakers or --max-speakers. (default: None)", 95 | ) 96 | parser.add_argument( 97 | "--min-speakers", 98 | required=False, 99 | default=None, 100 | type=int, 101 | help="Sets the minimum number of speakers that the system should consider during diarization. Must be at least 1. Cannot be used together with --num-speakers. Must be less than or equal to --max-speakers if both are specified. (default: None)", 102 | ) 103 | parser.add_argument( 104 | "--max-speakers", 105 | required=False, 106 | default=None, 107 | type=int, 108 | help="Defines the maximum number of speakers that the system should consider in diarization. Must be at least 1. Cannot be used together with --num-speakers. Must be greater than or equal to --min-speakers if both are specified. (default: None)", 109 | ) 110 | 111 | def main(): 112 | args = parser.parse_args() 113 | 114 | if args.num_speakers is not None and (args.min_speakers is not None or args.max_speakers is not None): 115 | parser.error("--num-speakers cannot be used together with --min-speakers or --max-speakers.") 116 | 117 | if args.num_speakers is not None and args.num_speakers < 1: 118 | parser.error("--num-speakers must be at least 1.") 119 | 120 | if args.min_speakers is not None and args.min_speakers < 1: 121 | parser.error("--min-speakers must be at least 1.") 122 | 123 | if args.max_speakers is not None and args.max_speakers < 1: 124 | parser.error("--max-speakers must be at least 1.") 125 | 126 | if args.min_speakers is not None and args.max_speakers is not None and args.min_speakers > args.max_speakers: 127 | if args.min_speakers > args.max_speakers: 128 | parser.error("--min-speakers cannot be greater than --max-speakers.") 129 | 130 | pipe = pipeline( 131 | "automatic-speech-recognition", 132 | model=args.model_name, 133 | torch_dtype=torch.float16, 134 | device="mps" if args.device_id == "mps" else f"cuda:{args.device_id}", 135 | model_kwargs={"attn_implementation": "flash_attention_2"} if args.flash else {"attn_implementation": "sdpa"}, 136 | ) 137 | 138 | if args.device_id == "mps": 139 | torch.mps.empty_cache() 140 | # elif not args.flash: 141 | # pipe.model = pipe.model.to_bettertransformer() 142 | 143 | ts = "word" if args.timestamp == "word" else True 144 | 145 | language = None if args.language == "None" else args.language 146 | 147 | generate_kwargs = {"task": args.task, "language": language} 148 | 149 | if args.model_name.split(".")[-1] == "en": 150 | generate_kwargs.pop("task") 151 | 152 | with Progress( 153 | TextColumn("🤗 [progress.description]{task.description}"), 154 | BarColumn(style="yellow1", pulse_style="white"), 155 | TimeElapsedColumn(), 156 | ) as progress: 157 | progress.add_task("[yellow]Transcribing...", total=None) 158 | 159 | outputs = pipe( 160 | args.file_name, 161 | chunk_length_s=30, 162 | batch_size=args.batch_size, 163 | generate_kwargs=generate_kwargs, 164 | return_timestamps=ts, 165 | ) 166 | 167 | if args.hf_token != "no_token": 168 | speakers_transcript = diarize(args, outputs) 169 | with open(args.transcript_path, "w", encoding="utf8") as fp: 170 | result = build_result(speakers_transcript, outputs) 171 | json.dump(result, fp, ensure_ascii=False) 172 | 173 | print( 174 | f"Voila!✨ Your file has been transcribed & speaker segmented go check it out over here 👉 {args.transcript_path}" 175 | ) 176 | else: 177 | with open(args.transcript_path, "w", encoding="utf8") as fp: 178 | result = build_result([], outputs) 179 | json.dump(result, fp, ensure_ascii=False) 180 | 181 | print( 182 | f"Voila!✨ Your file has been transcribed go check it out over here 👉 {args.transcript_path}" 183 | ) 184 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Insanely Fast Whisper 2 | 3 | An opinionated CLI to transcribe Audio files w/ Whisper on-device! Powered by 🤗 *Transformers*, *Optimum* & *flash-attn* 4 | 5 | **TL;DR** - Transcribe **150** minutes (2.5 hours) of audio in less than **98** seconds - with [OpenAI's Whisper Large v3](https://huggingface.co/openai/whisper-large-v3). Blazingly fast transcription is now a reality!⚡️ 6 | 7 | ``` 8 | pipx install insanely-fast-whisper==0.0.15 --force 9 | ``` 10 | 11 |

12 | 13 |

14 | 15 | Not convinced? Here are some benchmarks we ran on a Nvidia A100 - 80GB 👇 16 | 17 | | Optimisation type | Time to Transcribe (150 mins of Audio) | 18 | |------------------|------------------| 19 | | large-v3 (Transformers) (`fp32`) | ~31 (*31 min 1 sec*) | 20 | | large-v3 (Transformers) (`fp16` + `batching [24]` + `bettertransformer`) | ~5 (*5 min 2 sec*) | 21 | | **large-v3 (Transformers) (`fp16` + `batching [24]` + `Flash Attention 2`)** | **~2 (*1 min 38 sec*)** | 22 | | distil-large-v2 (Transformers) (`fp16` + `batching [24]` + `bettertransformer`) | ~3 (*3 min 16 sec*) | 23 | | **distil-large-v2 (Transformers) (`fp16` + `batching [24]` + `Flash Attention 2`)** | **~1 (*1 min 18 sec*)** | 24 | | large-v2 (Faster Whisper) (`fp16` + `beam_size [1]`) | ~9.23 (*9 min 23 sec*) | 25 | | large-v2 (Faster Whisper) (`8-bit` + `beam_size [1]`) | ~8 (*8 min 15 sec*) | 26 | 27 | P.S. We also ran the benchmarks on a [Google Colab T4 GPU](/notebooks/) instance too! 28 | 29 | P.P.S. This project originally started as a way to showcase benchmarks for Transformers, but has since evolved into a lightweight CLI for people to use. This is purely community driven. We add whatever community seems to have a strong demand for! 30 | 31 | ## 🆕 Blazingly fast transcriptions via your terminal! ⚡️ 32 | 33 | We've added a CLI to enable fast transcriptions. Here's how you can use it: 34 | 35 | Install `insanely-fast-whisper` with `pipx` (`pip install pipx` or `brew install pipx`): 36 | 37 | ```bash 38 | pipx install insanely-fast-whisper 39 | ``` 40 | 41 | ⚠️ If you have python 3.11.XX installed, `pipx` may parse the version incorrectly and install a very old version of `insanely-fast-whisper` without telling you (version `0.0.8`, which won't work anymore with the current `BetterTransformers`). In that case, you can install the latest version by passing `--ignore-requires-python` to `pip`: 42 | 43 | ```bash 44 | pipx install insanely-fast-whisper --force --pip-args="--ignore-requires-python" 45 | ``` 46 | 47 | If you're installing with `pip`, you can pass the argument directly: `pip install insanely-fast-whisper --ignore-requires-python`. 48 | 49 | 50 | Run inference from any path on your computer: 51 | 52 | ```bash 53 | insanely-fast-whisper --file-name 54 | ``` 55 | *Note: if you are running on macOS, you also need to add `--device-id mps` flag.* 56 | 57 | 🔥 You can run [Whisper-large-v3](https://huggingface.co/openai/whisper-large-v3) w/ [Flash Attention 2](https://github.com/Dao-AILab/flash-attention) from this CLI too: 58 | 59 | ```bash 60 | insanely-fast-whisper --file-name --flash True 61 | ``` 62 | 63 | 🌟 You can run [distil-whisper](https://huggingface.co/distil-whisper) directly from this CLI too: 64 | 65 | ```bash 66 | insanely-fast-whisper --model-name distil-whisper/large-v2 --file-name 67 | ``` 68 | 69 | Don't want to install `insanely-fast-whisper`? Just use `pipx run`: 70 | 71 | ```bash 72 | pipx run insanely-fast-whisper --file-name 73 | ``` 74 | 75 | > [!NOTE] 76 | > The CLI is highly opinionated and only works on NVIDIA GPUs & Mac. Make sure to check out the defaults and the list of options you can play around with to maximise your transcription throughput. Run `insanely-fast-whisper --help` or `pipx run insanely-fast-whisper --help` to get all the CLI arguments along with their defaults. 77 | 78 | 79 | ## CLI Options 80 | 81 | The `insanely-fast-whisper` repo provides an all round support for running Whisper in various settings. Note that as of today 26th Nov, `insanely-fast-whisper` works on both CUDA and mps (mac) enabled devices. 82 | ``` 83 | -h, --help show this help message and exit 84 | --file-name FILE_NAME 85 | Path or URL to the audio file to be transcribed. 86 | --device-id DEVICE_ID 87 | Device ID for your GPU. Just pass the device number when using CUDA, or "mps" for Macs with Apple Silicon. (default: "0") 88 | --transcript-path TRANSCRIPT_PATH 89 | Path to save the transcription output. (default: output.json) 90 | --model-name MODEL_NAME 91 | Name of the pretrained model/ checkpoint to perform ASR. (default: openai/whisper-large-v3) 92 | --task {transcribe,translate} 93 | Task to perform: transcribe or translate to another language. (default: transcribe) 94 | --language LANGUAGE 95 | Language of the input audio. (default: "None" (Whisper auto-detects the language)) 96 | --batch-size BATCH_SIZE 97 | Number of parallel batches you want to compute. Reduce if you face OOMs. (default: 24) 98 | --flash FLASH 99 | Use Flash Attention 2. Read the FAQs to see how to install FA2 correctly. (default: False) 100 | --timestamp {chunk,word} 101 | Whisper supports both chunked as well as word level timestamps. (default: chunk) 102 | --hf-token HF_TOKEN 103 | Provide a hf.co/settings/token for Pyannote.audio to diarise the audio clips 104 | --diarization_model DIARIZATION_MODEL 105 | Name of the pretrained model/ checkpoint to perform diarization. (default: pyannote/speaker-diarization) 106 | --num-speakers NUM_SPEAKERS 107 | Specifies the exact number of speakers present in the audio file. Useful when the exact number of participants in the conversation is known. Must be at least 1. Cannot be used together with --min-speakers or --max-speakers. (default: None) 108 | --min-speakers MIN_SPEAKERS 109 | Sets the minimum number of speakers that the system should consider during diarization. Must be at least 1. Cannot be used together with --num-speakers. Must be less than or equal to --max-speakers if both are specified. (default: None) 110 | --max-speakers MAX_SPEAKERS 111 | Defines the maximum number of speakers that the system should consider in diarization. Must be at least 1. Cannot be used together with --num-speakers. Must be greater than or equal to --min-speakers if both are specified. (default: None) 112 | ``` 113 | 114 | ## Frequently Asked Questions 115 | 116 | **How to correctly install flash-attn to make it work with `insanely-fast-whisper`?** 117 | 118 | Make sure to install it via `pipx runpip insanely-fast-whisper install flash-attn --no-build-isolation`. Massive kudos to @li-yifei for helping with this. 119 | 120 | **How to solve an `AssertionError: Torch not compiled with CUDA enabled` error on Windows?** 121 | 122 | The root cause of this problem is still unknown, however, you can resolve this by manually installing torch in the virtualenv like `python -m pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu121`. Thanks to @pto2k for all tdebugging this. 123 | 124 | **How to avoid Out-Of-Memory (OOM) exceptions on Mac?** 125 | 126 | The *mps* backend isn't as optimised as CUDA, hence is way more memory hungry. Typically you can run with `--batch-size 4` without any issues (should use roughly 12GB GPU VRAM). Don't forget to set `--device-id mps`. 127 | 128 | ## How to use Whisper without a CLI? 129 | 130 |
131 | All you need to run is the below snippet: 132 | 133 | ``` 134 | pip install --upgrade transformers optimum accelerate 135 | ``` 136 | 137 | ```python 138 | import torch 139 | from transformers import pipeline 140 | from transformers.utils import is_flash_attn_2_available 141 | 142 | pipe = pipeline( 143 | "automatic-speech-recognition", 144 | model="openai/whisper-large-v3", # select checkpoint from https://huggingface.co/openai/whisper-large-v3#model-details 145 | torch_dtype=torch.float16, 146 | device="cuda:0", # or mps for Mac devices 147 | model_kwargs={"attn_implementation": "flash_attention_2"} if is_flash_attn_2_available() else {"attn_implementation": "sdpa"}, 148 | ) 149 | 150 | outputs = pipe( 151 | "", 152 | chunk_length_s=30, 153 | batch_size=24, 154 | return_timestamps=True, 155 | ) 156 | 157 | outputs 158 | ``` 159 |
160 | 161 | ## Acknowledgements 162 | 163 | 1. [OpenAI Whisper](https://github.com/openai/whisper) team for open sourcing such a brilliant check point. 164 | 2. Hugging Face Transformers team, specifically [Arthur](https://github.com/ArthurZucker), [Patrick](https://github.com/patrickvonplaten), [Sanchit](https://github.com/sanchit-gandhi) & [Yoach](https://github.com/ylacombe) (alphabetical order) for continuing to maintain Whisper in Transformers. 165 | 3. Hugging Face [Optimum](https://github.com/huggingface/optimum) team for making the BetterTransformer API so easily accessible. 166 | 4. [Patrick Arminio](https://github.com/patrick91) for helping me tremendously to put together this CLI. 167 | 168 | ## Community showcase 169 | 170 | 1. @ochen1 created a brilliant MVP for a CLI here: https://github.com/ochen1/insanely-fast-whisper-cli (Try it out now!) 171 | 2. @arihanv created an app (Shush) using NextJS (Frontend) & Modal (Backend): https://github.com/arihanv/Shush (Check it outtt!) 172 | 3. @kadirnar created a python package on top of the transformers with optimisations: https://github.com/kadirnar/whisper-plus (Go go go!!!) 173 | -------------------------------------------------------------------------------- /insanely_fast_whisper_colab.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "nbformat": 4, 3 | "nbformat_minor": 0, 4 | "metadata": { 5 | "colab": { 6 | "provenance": [], 7 | "gpuType": "T4", 8 | "authorship_tag": "ABX9TyNO3mkZ+HMQrvkMHRtFpKvj", 9 | "include_colab_link": true 10 | }, 11 | "kernelspec": { 12 | "name": "python3", 13 | "display_name": "Python 3" 14 | }, 15 | "language_info": { 16 | "name": "python" 17 | }, 18 | "accelerator": "GPU" 19 | }, 20 | "cells": [ 21 | { 22 | "cell_type": "markdown", 23 | "metadata": { 24 | "id": "view-in-github", 25 | "colab_type": "text" 26 | }, 27 | "source": [ 28 | "\"Open" 29 | ] 30 | }, 31 | { 32 | "cell_type": "markdown", 33 | "source": [ 34 | "# [Insanely Fast Whisper](https://github.com/Vaibhavs10/insanely-fast-whisper)\n", 35 | "\n", 36 | "By VB (https://twitter.com/reach_vb)\n", 37 | "\n", 38 | "P.S. Make sure you're on a GPU run-time 🤗" 39 | ], 40 | "metadata": { 41 | "id": "q0MBgZKbhdII" 42 | } 43 | }, 44 | { 45 | "cell_type": "code", 46 | "source": [ 47 | "!pip install -q pipx && apt install python3.10-venv" 48 | ], 49 | "metadata": { 50 | "colab": { 51 | "base_uri": "https://localhost:8080/" 52 | }, 53 | "id": "VF-qp-FWJmyD", 54 | "outputId": "10712868-be6e-4b82-b8c2-95e43c591173" 55 | }, 56 | "execution_count": null, 57 | "outputs": [ 58 | { 59 | "output_type": "stream", 60 | "name": "stdout", 61 | "text": [ 62 | "\u001b[?25l \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m0.0/57.8 kB\u001b[0m \u001b[31m?\u001b[0m eta \u001b[36m-:--:--\u001b[0m\r\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m57.8/57.8 kB\u001b[0m \u001b[31m2.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 63 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m41.7/41.7 kB\u001b[0m \u001b[31m5.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 64 | "Reading package lists... Done\n", 65 | "Building dependency tree... Done\n", 66 | "Reading state information... Done\n", 67 | "The following additional packages will be installed:\n", 68 | " python3-pip-whl python3-setuptools-whl\n", 69 | "The following NEW packages will be installed:\n", 70 | " python3-pip-whl python3-setuptools-whl python3.10-venv\n", 71 | "0 upgraded, 3 newly installed, 0 to remove and 9 not upgraded.\n", 72 | "Need to get 2,473 kB of archives.\n", 73 | "After this operation, 2,884 kB of additional disk space will be used.\n", 74 | "Get:1 http://archive.ubuntu.com/ubuntu jammy-updates/universe amd64 python3-pip-whl all 22.0.2+dfsg-1ubuntu0.4 [1,680 kB]\n", 75 | "Get:2 http://archive.ubuntu.com/ubuntu jammy-updates/universe amd64 python3-setuptools-whl all 59.6.0-1.2ubuntu0.22.04.1 [788 kB]\n", 76 | "Get:3 http://archive.ubuntu.com/ubuntu jammy-updates/universe amd64 python3.10-venv amd64 3.10.12-1~22.04.2 [5,724 B]\n", 77 | "Fetched 2,473 kB in 2s (1,635 kB/s)\n", 78 | "Selecting previously unselected package python3-pip-whl.\n", 79 | "(Reading database ... 120880 files and directories currently installed.)\n", 80 | "Preparing to unpack .../python3-pip-whl_22.0.2+dfsg-1ubuntu0.4_all.deb ...\n", 81 | "Unpacking python3-pip-whl (22.0.2+dfsg-1ubuntu0.4) ...\n", 82 | "Selecting previously unselected package python3-setuptools-whl.\n", 83 | "Preparing to unpack .../python3-setuptools-whl_59.6.0-1.2ubuntu0.22.04.1_all.deb ...\n", 84 | "Unpacking python3-setuptools-whl (59.6.0-1.2ubuntu0.22.04.1) ...\n", 85 | "Selecting previously unselected package python3.10-venv.\n", 86 | "Preparing to unpack .../python3.10-venv_3.10.12-1~22.04.2_amd64.deb ...\n", 87 | "Unpacking python3.10-venv (3.10.12-1~22.04.2) ...\n", 88 | "Setting up python3-setuptools-whl (59.6.0-1.2ubuntu0.22.04.1) ...\n", 89 | "Setting up python3-pip-whl (22.0.2+dfsg-1ubuntu0.4) ...\n", 90 | "Setting up python3.10-venv (3.10.12-1~22.04.2) ...\n" 91 | ] 92 | } 93 | ] 94 | }, 95 | { 96 | "cell_type": "code", 97 | "source": [ 98 | "!pipx run insanely-fast-whisper --file-name https://huggingface.co/datasets/reach-vb/random-audios/resolve/main/ted_60.wav" 99 | ], 100 | "metadata": { 101 | "colab": { 102 | "base_uri": "https://localhost:8080/" 103 | }, 104 | "id": "i_H9Dm89Jj0-", 105 | "outputId": "f737b9fd-d625-4ccd-d8a1-1895cdf1b22f" 106 | }, 107 | "execution_count": null, 108 | "outputs": [ 109 | { 110 | "output_type": "stream", 111 | "name": "stdout", 112 | "text": [ 113 | "config.json: 100% 1.25k/1.25k [00:00<00:00, 6.33MB/s]\n", 114 | "model.safetensors: 100% 3.09G/3.09G [00:12<00:00, 242MB/s]\n", 115 | "generation_config.json: 100% 3.87k/3.87k [00:00<00:00, 17.3MB/s]\n", 116 | "tokenizer_config.json: 100% 283k/283k [00:00<00:00, 2.15MB/s]\n", 117 | "vocab.json: 100% 1.04M/1.04M [00:00<00:00, 5.28MB/s]\n", 118 | "tokenizer.json: 100% 2.48M/2.48M [00:00<00:00, 9.49MB/s]\n", 119 | "merges.txt: 100% 494k/494k [00:00<00:00, 3.74MB/s]\n", 120 | "normalizer.json: 100% 52.7k/52.7k [00:00<00:00, 97.3MB/s]\n", 121 | "added_tokens.json: 100% 34.6k/34.6k [00:00<00:00, 110MB/s]\n", 122 | "special_tokens_map.json: 100% 2.07k/2.07k [00:00<00:00, 8.95MB/s]\n", 123 | "Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.\n", 124 | "preprocessor_config.json: 100% 340/340 [00:00<00:00, 1.98MB/s]\n", 125 | "The BetterTransformer implementation does not support padding during training, as the fused kernels do not support attention masks. Beware that passing padded batched data during training may result in unexpected outputs. Please refer to https://huggingface.co/docs/optimum/bettertransformer/overview for more details.\n", 126 | "\u001b[2K🤗 \u001b[33mTranscribing...\u001b[0m \u001b[37m━\u001b[0m\u001b[37m━\u001b[0m\u001b[93m━\u001b[0m\u001b[93m━\u001b[0m\u001b[93m━\u001b[0m\u001b[93m━\u001b[0m\u001b[93m━\u001b[0m\u001b[93m━\u001b[0m\u001b[93m━\u001b[0m\u001b[93m━\u001b[0m\u001b[93m━\u001b[0m\u001b[93m━\u001b[0m\u001b[93m━\u001b[0m\u001b[93m━\u001b[0m\u001b[93m━\u001b[0m\u001b[37m━\u001b[0m\u001b[37m━\u001b[0m\u001b[37m━\u001b[0m\u001b[37m━\u001b[0m\u001b[37m━\u001b[0m\u001b[37m━\u001b[0m\u001b[37m━\u001b[0m\u001b[93m━\u001b[0m\u001b[93m━\u001b[0m\u001b[93m━\u001b[0m\u001b[93m━\u001b[0m\u001b[93m━\u001b[0m\u001b[93m━\u001b[0m\u001b[93m━\u001b[0m\u001b[93m━\u001b[0m\u001b[93m━\u001b[0m\u001b[93m━\u001b[0m\u001b[93m━\u001b[0m\u001b[93m━\u001b[0m\u001b[93m━\u001b[0m\u001b[37m━\u001b[0m\u001b[37m━\u001b[0m\u001b[37m━\u001b[0m\u001b[37m━\u001b[0m\u001b[37m━\u001b[0m \u001b[33m0:00:09\u001b[0m\n", 127 | "\u001b[?25hVoila! Your file has been transcribed go check it out over here! output.json\n" 128 | ] 129 | } 130 | ] 131 | }, 132 | { 133 | "cell_type": "code", 134 | "source": [ 135 | "!head output.json" 136 | ], 137 | "metadata": { 138 | "colab": { 139 | "base_uri": "https://localhost:8080/" 140 | }, 141 | "id": "NDFrydpsvu57", 142 | "outputId": "de3d9635-5cf1-46ca-d401-e6c78c5659dc" 143 | }, 144 | "execution_count": null, 145 | "outputs": [ 146 | { 147 | "output_type": "stream", 148 | "name": "stdout", 149 | "text": [ 150 | "{\"text\": \" So in college, I was a government major, which means I had to write a lot of papers. Now, when a normal student writes a paper, they might spread the work out a little like this. So, you know, you get started maybe a little slowly, but you get enough done in the first week that with some heavier days later on, everything gets done and things stay civil. And I would want to do that, like that. That would be the plan. I would have it all ready to go, but then actually the paper would come along, and then I would kind of do this. And that would happen to every single paper. But then came my 90-page senior thesis, a paper you're supposed to spend a year on. I knew for a paper like that, my normal workflow was not an option. It was way too big a project. So I planned things out, and I decided it kind of had to go something like this. This is how the year would go. So I'd start off light,\", \"chunks\": [{\"timestamp\": [0.0, 4.48], \"text\": \" So in college, I was a government major,\"}, {\"timestamp\": [4.88, 6.62], \"text\": \" which means I had to write a lot of papers.\"}, {\"timestamp\": [7.42, 8.86], \"text\": \" Now, when a normal student writes a paper,\"}, {\"timestamp\": [8.94, 10.6], \"text\": \" they might spread the work out a little like this.\"}, {\"timestamp\": [11.74, 16.3], \"text\": \" So, you know, you get started maybe a little slowly,\"}, {\"timestamp\": [16.36, 17.86], \"text\": \" but you get enough done in the first week\"}, {\"timestamp\": [17.86, 19.76], \"text\": \" that with some heavier days later on,\"}, {\"timestamp\": [20.28, 21.98], \"text\": \" everything gets done and things stay civil.\"}, {\"timestamp\": [23.64, 25.8], \"text\": \" And I would want to do that, like that.\"}, {\"timestamp\": [26.12, 26.94], \"text\": \" That would be the plan.\"}, {\"timestamp\": [27.22, 29.84], \"text\": \" I would have it all ready to go,\"}, {\"timestamp\": [29.96, 32.42], \"text\": \" but then actually the paper would come along,\"}, {\"timestamp\": [32.46, 33.6], \"text\": \" and then I would kind of do this.\"}, {\"timestamp\": [36.48, 38.44], \"text\": \" And that would happen to every single paper.\"}, {\"timestamp\": [39.32, 43.04], \"text\": \" But then came my 90-page senior thesis,\"}, {\"timestamp\": [43.54, 46.0], \"text\": \" a paper you're supposed to spend a year on.\"}, {\"timestamp\": [46.0, 50.0], \"text\": \" I knew for a paper like that, my normal workflow was not an option.\"}, {\"timestamp\": [50.0, 52.0], \"text\": \" It was way too big a project.\"}, {\"timestamp\": [52.0, 56.0], \"text\": \" So I planned things out, and I decided it kind of had to go something like this.\"}, {\"timestamp\": [56.0, 58.0], \"text\": \" This is how the year would go.\"}, {\"timestamp\": [58.0, 60.0], \"text\": \" So I'd start off light,\"}]}" 151 | ] 152 | } 153 | ] 154 | } 155 | ] 156 | } -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | --------------------------------------------------------------------------------