├── assets └── screenshot.png ├── requirements.txt ├── init_env.bat ├── LICENSE ├── .gitignore ├── README.md ├── hebrew_wispher.ipynb └── app.py /assets/screenshot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShmuelRonen/hebrew_whisper/HEAD/assets/screenshot.png -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | gradio 2 | numpy 3 | transformers 4 | librosa 5 | soundfile 6 | tqdm 7 | soundfile 8 | googletrans==4.0.0-rc1 9 | openai-whisper 10 | deep_translator 11 | nltk 12 | ffmpeg-python 13 | requests 14 | anthropic 15 | httpx -------------------------------------------------------------------------------- /init_env.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | 3 | rem Check if virtual environment exists 4 | if not exist "venv" ( 5 | echo Creating virtual environment... 6 | python -m venv venv 7 | echo Virtual environment created. 8 | ) 9 | 10 | rem Activate the virtual environment 11 | call venv\Scripts\activate 12 | 13 | rem Install requirements 14 | echo Installing requirements... 15 | pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118 16 | pip install -r requirements.txt 17 | echo Requirements installed. 18 | 19 | rem Run the Python script 20 | python app.py 21 | 22 | pause -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Shmuel Ronen 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 158 | # and can be added to the global gitignore or merged into this file. For a more nuclear 159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 160 | #.idea/ 161 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |

GUI for Unlimited Transcription and Translation with Whisper Hebrew

2 | 3 | A powerful transcription and translation tool leveraging the ivrit-ai/whisper-v2-d3-e3 model and Whisper-v2 model for high-quality, unlimited-length audio processing with enhanced paragraph splitting and temporary file management for a clean workspace. 4 | 5 | ![V2](https://github.com/ShmuelRonen/hebrew_whisper/assets/80190186/1ef67524-a063-472d-a752-0a61a495d824) 6 | 7 | 8 | ## Features 9 | 10 | - **Unlimited Length Transcription**: Transcribe audio files of any length without limitations. 11 | - **Support for Multiple Languages**: Choose from Hebrew, English, Spanish, French, German, Portuguese, and Arabic for translation. 12 | - **General and Hebrew Models**: Use either the general model (`large-v2`) for accurate timestamp generation or the specialized Hebrew model (`ivrit-ai/whisper-v2-d3-e3`) for improved Hebrew transcription. 13 | - **Proportional Timings**: Accurate proportional timings based on the general model to ensure alignment with the actual audio length. 14 | - **SRT File Generation**: Option to generate SRT files with synchronized subtitles. 15 | - **HTML Paragraph Formatting**: Proper formatting of transcriptions and translations into paragraphs for better readability. 16 | - **GPU Support**: Leverage GPU for faster transcription and translation if available. 17 | 18 | ## Usage 19 | 20 | ### Transcription and Translation 21 | 22 | 1. **Upload Audio File**: Click on the "Upload Audio File" button to select your audio file. 23 | 2. **Select Target Language**: Choose the desired language for translation from the dropdown menu. The default is set to Hebrew. 24 | 3. **Select Model Choice**: Choose between the "General Model" and the "Hebrew Model". The default is set to the "Hebrew Model". 25 | 4. **Generate SRT File**: Check the box if you want to generate an SRT file with subtitles. 26 | 5. **Submit**: Click the "Submit" button to start the transcription and translation process. 27 | 28 | ### Example 29 | 30 | 1. Upload an audio file (e.g., `example_audio.wav`). 31 | 2. Select "English" as the target language. 32 | 3. Select "General Model" to ensure accurate timing. 33 | 4. Optionally, check the "Generate Hebrew SRT File" box. 34 | 5. Click "Submit" to process the audio. 35 | 36 | The output will display the transcription and translation in the chosen language, formatted into paragraphs for easy readability. If the SRT option is selected, an SRT file will also be generated and available for download. 37 | 38 | ## Installation steps 39 | 40 | It's recommended to install in a virtual environment for Python projects to manage dependencies efficiently. 41 | 42 | Clone the repository 43 | 44 | ``` 45 | git clone https://github.com/ShmuelRonen/hebrew_whisper.git 46 | cd hebrew_whisper 47 | ``` 48 | 49 | #### NEW - One click installer and executor: 50 | 51 | ``` 52 | Double click on: 53 | 54 | init_env.bat 55 | ``` 56 | 57 | ### Manual installation: 58 | 59 | It's recommended to create and activate a virtual environment here: 60 | ``` 61 | 62 | python -m venv venv 63 | 64 | venv\Scripts\activate 65 | 66 | pip install -r requirements.txt 67 | 68 | For PyTorch with CUDA 11.8 support, use the following command 69 | pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118ilE.md 70 | ``` 71 | 72 | 73 | After the installation, you can run the app by navigating to the directory containing `app.py` and executing: 74 | ``` 75 | 76 | python app.py 77 | ``` 78 | 79 | _____________ 80 | 81 |
82 | 83 |

Audio Transcription and Translation
Powered by Hebrew whisper-v2-d3-e3 and Large-v2 whisper models

84 | 85 |
86 | Hebrew new Whisper Model  87 |
88 |
89 | 90 | ## Acknowledgement 91 | Special thanks to Kinneret Wm, Yam Peleg, Yair Lifshitz, Yanir Marmor from ivrit-ai for providing the new impruve Hebrew Whisper model, 92 | making high-quality transcription and translation accessible to developers. 93 | 94 | Special thanks to the creators of the Whisper Large-v2 model for their contribution to the development of high-quality transcription and translation technologies. 95 | 96 | 97 | ## Disclaimer 98 | This project is intended for educational and development purposes. It leverages publicly available models and APIs. Please ensure to comply with the terms of use of the underlying models and frameworks. 99 | -------------------------------------------------------------------------------- /hebrew_wispher.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "!pip install gradio numpy torch transformers librosa soundfile tqdm googletrans==4.0.0-rc1 openai-whisper deep_translator nltk ffmpeg-python requests anthropic httpx" 10 | ] 11 | }, 12 | { 13 | "cell_type": "code", 14 | "execution_count": null, 15 | "metadata": {}, 16 | "outputs": [], 17 | "source": [ 18 | "from googletrans import Translator\n", 19 | "import gradio as gr\n", 20 | "import librosa\n", 21 | "import numpy as np\n", 22 | "import os\n", 23 | "import re\n", 24 | "import shutil\n", 25 | "import soundfile as sf\n", 26 | "import tempfile\n", 27 | "import torch\n", 28 | "import time\n", 29 | "from transformers import WhisperProcessor, WhisperForConditionalGeneration\n", 30 | "import whisper\n", 31 | "import datetime\n", 32 | "\n", 33 | "SAMPLING_RATE = 16000\n", 34 | "model_name = 'ivrit-ai/whisper-large-v2-tuned'\n", 35 | "device = \"cuda:0\" if torch.cuda.is_available() else \"cpu\"\n", 36 | "model = WhisperForConditionalGeneration.from_pretrained(model_name).to(device)\n", 37 | "processor = WhisperProcessor.from_pretrained(model_name)\n", 38 | "translator = Translator()\n", 39 | "\n", 40 | "def transcribe(audio_numpy, sampling_rate=16000):\n", 41 | " if audio_numpy.ndim > 1:\n", 42 | " audio_numpy = audio_numpy.mean(axis=1)\n", 43 | "\n", 44 | " temp_dir = tempfile.mkdtemp()\n", 45 | " chunks = np.array_split(audio_numpy, indices_or_sections=int(np.ceil(len(audio_numpy) / (sampling_rate * 30)))) # 30s chunks\n", 46 | " transcribed_text = \"\"\n", 47 | "\n", 48 | " for i, chunk in enumerate(chunks):\n", 49 | " chunk_path = os.path.join(temp_dir, f\"chunk_{i}.wav\")\n", 50 | " sf.write(chunk_path, chunk, samplerate=sampling_rate)\n", 51 | " input_features = processor(chunk, sampling_rate=sampling_rate, return_tensors=\"pt\").input_features.to(device)\n", 52 | " predicted_ids = model.generate(input_features, num_beams=5)\n", 53 | " chunk_text = processor.batch_decode(predicted_ids, skip_special_tokens=True)[0]\n", 54 | " transcribed_text += chunk_text + \" \"\n", 55 | "\n", 56 | " shutil.rmtree(temp_dir)\n", 57 | " return transcribed_text\n", 58 | "\n", 59 | "def translate_text(text, target_lang):\n", 60 | " translations = {'Hebrew': 'he', 'English': 'en', 'Spanish': 'es', 'French': 'fr'}\n", 61 | " translated_text = translator.translate(text, dest=translations[target_lang]).text\n", 62 | " return translated_text\n", 63 | "\n", 64 | "def split_into_paragraphs(text, min_words_per_paragraph=20):\n", 65 | " sentences = re.split(r'(?<=[.!?])\\s+', text.strip())\n", 66 | " paragraphs = []\n", 67 | " current_paragraph = []\n", 68 | "\n", 69 | " for sentence in sentences:\n", 70 | " words_in_sentence = sentence.split()\n", 71 | " current_paragraph.extend(words_in_sentence)\n", 72 | " if len(current_paragraph) >= min_words_per_paragraph:\n", 73 | " paragraphs.append(' '.join(current_paragraph))\n", 74 | " current_paragraph = []\n", 75 | "\n", 76 | " if current_paragraph:\n", 77 | " paragraphs.append(' '.join(current_paragraph))\n", 78 | "\n", 79 | " return '\\n\\n'.join(paragraphs)\n", 80 | "\n", 81 | "from pydub import AudioSegment\n", 82 | "\n", 83 | "def generate_srt_content(audio_file_path, target_language='Hebrew', max_line_length=50):\n", 84 | " print(\"Starting transcription and translation process...\")\n", 85 | "\n", 86 | " audio = AudioSegment.from_file(audio_file_path)\n", 87 | " audio_numpy = np.array(audio.get_array_of_samples(), dtype=np.float32) / 32768.0\n", 88 | " audio_numpy = librosa.resample(audio_numpy, orig_sr=audio.frame_rate, target_sr=16000)\n", 89 | "\n", 90 | " temp_file_name = None\n", 91 | " try:\n", 92 | " with tempfile.NamedTemporaryFile(suffix=\".wav\", delete=False) as tmpfile:\n", 93 | " temp_file_name = tmpfile.name\n", 94 | " sf.write(tmpfile.name, audio_numpy, 16000)\n", 95 | "\n", 96 | " transcription_result = whisper.load_model(\"large\").transcribe(audio=temp_file_name)\n", 97 | "\n", 98 | " srt_content = \"\"\n", 99 | " for segment in transcription_result['segments']:\n", 100 | " start_time = str(datetime.timedelta(seconds=int(segment['start']))) + ',000'\n", 101 | " end_time = str(datetime.timedelta(seconds=int(segment['end']))) + ',000'\n", 102 | " text = segment['text']\n", 103 | " segment_id = segment['id'] + 1\n", 104 | "\n", 105 | " lines = []\n", 106 | " while len(text) > max_line_length:\n", 107 | " split_index = text.rfind(' ', 0, max_line_length)\n", 108 | " if split_index == -1:\n", 109 | " split_index = max_line_length\n", 110 | " lines.append(text[:split_index].strip())\n", 111 | " text = text[split_index:].strip()\n", 112 | " lines.append(text)\n", 113 | "\n", 114 | " srt_entry = f\"{segment_id}\\n{start_time} --> {end_time}\\n\"\n", 115 | " \n", 116 | " translated_lines = []\n", 117 | " for line in lines:\n", 118 | " for attempt in range(3): # Retry translation up to 3 times\n", 119 | " try:\n", 120 | " translated_line = translator.translate(line, dest='he').text\n", 121 | " translated_lines.append(translated_line)\n", 122 | " break\n", 123 | " except Exception as e:\n", 124 | " print(f\"Translation failed (attempt {attempt+1}): {str(e)}\")\n", 125 | " if attempt < 2:\n", 126 | " time.sleep(1) # Delay before retrying\n", 127 | " else:\n", 128 | " translated_lines.append(line) # Use original English line if translation fails\n", 129 | "\n", 130 | " srt_entry += \"\\n\".join(translated_lines) + \"\\n\\n\"\n", 131 | " srt_content += srt_entry\n", 132 | "\n", 133 | " os.makedirs(\"output\", exist_ok=True)\n", 134 | " srt_file_path = os.path.join(\"output\", \"output.srt\")\n", 135 | " with open(srt_file_path, \"w\", encoding=\"utf-8\") as srt_file:\n", 136 | " srt_file.write(srt_content)\n", 137 | "\n", 138 | " return srt_content\n", 139 | "\n", 140 | " finally:\n", 141 | " if temp_file_name:\n", 142 | " os.remove(temp_file_name)\n", 143 | "\n", 144 | "def transcribe_and_translate(audio_file, target_language, generate_srt_checkbox):\n", 145 | " if not target_language:\n", 146 | " return \"Please choose a Target Language\"\n", 147 | "\n", 148 | " translations = {'Hebrew': 'he', 'English': 'en', 'Spanish': 'es', 'French': 'fr'}\n", 149 | "\n", 150 | " audio = AudioSegment.from_file(audio_file)\n", 151 | " audio_numpy = np.array(audio.get_array_of_samples(), dtype=np.float32) / 32768.0\n", 152 | " audio_numpy = librosa.resample(audio_numpy, orig_sr=audio.frame_rate, target_sr=16000)\n", 153 | "\n", 154 | " transcribed_text = transcribe(audio_numpy)\n", 155 | "\n", 156 | " if generate_srt_checkbox:\n", 157 | " srt_result = generate_srt_content(audio_file, target_language)\n", 158 | " return srt_result\n", 159 | " else:\n", 160 | " if isinstance(target_language, list):\n", 161 | " target_language = target_language[0]\n", 162 | "\n", 163 | " if translations.get(target_language) != 'he':\n", 164 | " translated_text = translate_text(transcribed_text, target_language)\n", 165 | " final_text = split_into_paragraphs(translated_text)\n", 166 | " else:\n", 167 | " final_text = split_into_paragraphs(transcribed_text)\n", 168 | "\n", 169 | " os.makedirs(\"output\", exist_ok=True)\n", 170 | " result_file_path = os.path.join(\"output\", \"result.txt\")\n", 171 | " with open(result_file_path, \"w\", encoding=\"utf-8\") as result_file:\n", 172 | " result_file.write(final_text)\n", 173 | "\n", 174 | " return final_text\n", 175 | "\n", 176 | "title = \"Unlimited Length Transcription and Translation\"\n", 177 | "description = \"With ivrit-ai/whisper-large-v2-tuned | GUI by Shmuel Ronen\"\n", 178 | "\n", 179 | "interface = gr.Interface(\n", 180 | " fn=transcribe_and_translate,\n", 181 | " inputs=[\n", 182 | " gr.Audio(type=\"filepath\", label=\"Upload Audio File\"),\n", 183 | " gr.Dropdown(choices=['Hebrew', 'English', 'Spanish', 'French'], label=\"Target Language\"),\n", 184 | " gr.Checkbox(label=\"Generate Hebrew SRT File\")\n", 185 | " ],\n", 186 | " outputs=gr.Textbox(label=\"Transcription / Translation / SRT Result\"),\n", 187 | " title=title,\n", 188 | " description=description\n", 189 | ")\n", 190 | "\n", 191 | "if __name__ == \"__main__\":\n", 192 | " interface.launch()" 193 | ] 194 | } 195 | ], 196 | "metadata": { 197 | "kernelspec": { 198 | "display_name": "Python 3", 199 | "name": "python3" 200 | }, 201 | "language_info": { 202 | "name": "python" 203 | } 204 | }, 205 | "nbformat": 4, 206 | "nbformat_minor": 0 207 | } -------------------------------------------------------------------------------- /app.py: -------------------------------------------------------------------------------- 1 | from googletrans import Translator 2 | import gradio as gr 3 | import librosa 4 | import numpy as np 5 | import os 6 | import re 7 | import shutil 8 | import soundfile as sf 9 | import tempfile 10 | import torch 11 | import time 12 | import whisper 13 | import datetime 14 | from pydub import AudioSegment 15 | from transformers import WhisperProcessor, WhisperForConditionalGeneration 16 | 17 | SAMPLING_RATE = 16000 18 | general_model_name = 'large-v2' 19 | hebrew_model_name = 'ivrit-ai/whisper-v2-d3-e3' 20 | device = "cuda:0" if torch.cuda.is_available() else "cpu" 21 | general_model = whisper.load_model(general_model_name, device=device) 22 | hebrew_processor = WhisperProcessor.from_pretrained(hebrew_model_name) 23 | hebrew_model = WhisperForConditionalGeneration.from_pretrained(hebrew_model_name).to(device) 24 | translator = Translator() 25 | 26 | def is_hebrew(text): 27 | return bool(re.search(r'[\u0590-\u05FF]', text)) 28 | 29 | def is_arabic(text): 30 | return bool(re.search(r'[\u0600-\u06FF]', text)) 31 | 32 | def format_text(text): 33 | if is_hebrew(text): 34 | return f'
{text}
' 35 | elif is_arabic(text): 36 | return f'
{text}
' 37 | else: 38 | return f'
{text}
' 39 | 40 | def get_audio_length(audio_file_path): 41 | audio = AudioSegment.from_file(audio_file_path) 42 | return len(audio) / 1000.0 43 | 44 | def split_lines(text, max_line_length=40): 45 | words = text.split() 46 | lines = [] 47 | current_line = [] 48 | 49 | for word in words: 50 | if len(' '.join(current_line + [word])) <= max_line_length: 51 | current_line.append(word) 52 | else: 53 | lines.append(' '.join(current_line)) 54 | current_line = [word] 55 | 56 | if current_line: 57 | lines.append(' '.join(current_line)) 58 | 59 | return lines 60 | 61 | def format_srt_entry(segment_id, start_time, end_time, lines): 62 | srt_entry = f"{segment_id}\n{start_time} --> {end_time}\n" + "\n".join(lines) + "\n\n" 63 | return srt_entry 64 | 65 | def transcribe_with_model(audio_file_path, model_choice): 66 | if model_choice == 'General Model': 67 | return transcribe_with_general_model(audio_file_path) 68 | else: 69 | return transcribe_with_hebrew_model(audio_file_path) 70 | 71 | def transcribe_with_general_model(audio_file_path): 72 | audio = AudioSegment.from_file(audio_file_path) 73 | audio_numpy = np.array(audio.get_array_of_samples(), dtype=np.float32) / 32768.0 74 | audio_numpy = librosa.resample(audio_numpy, orig_sr=audio.frame_rate, target_sr=16000) 75 | 76 | temp_file_name = None 77 | try: 78 | with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as tmpfile: 79 | temp_file_name = tmpfile.name 80 | sf.write(temp_file_name, audio_numpy, 16000) 81 | 82 | transcription_result = general_model.transcribe(temp_file_name, language="he") 83 | return transcription_result 84 | finally: 85 | if temp_file_name: 86 | os.remove(temp_file_name) 87 | 88 | def transcribe_with_hebrew_model(audio_file_path): 89 | audio, sr = librosa.load(audio_file_path, sr=SAMPLING_RATE) 90 | audio_numpy = np.array(audio) 91 | temp_dir = tempfile.mkdtemp() 92 | transcribed_segments = [] 93 | 94 | for i in range(0, len(audio_numpy), SAMPLING_RATE * 30): 95 | chunk = audio_numpy[i:i + SAMPLING_RATE * 30] 96 | chunk_path = os.path.join(temp_dir, f"chunk_{i // (SAMPLING_RATE * 30)}.wav") 97 | sf.write(chunk_path, chunk, samplerate=SAMPLING_RATE) 98 | input_features = hebrew_processor(chunk, sampling_rate=SAMPLING_RATE, return_tensors="pt").input_features.to(device) 99 | predicted_ids = hebrew_model.generate(input_features, num_beams=5) 100 | chunk_text = hebrew_processor.batch_decode(predicted_ids, skip_special_tokens=True)[0] 101 | transcribed_segments.append({ 102 | "start": i / SAMPLING_RATE, 103 | "end": min((i + SAMPLING_RATE * 30) / SAMPLING_RATE, len(audio_numpy) / SAMPLING_RATE), 104 | "text": chunk_text, 105 | "id": i // (SAMPLING_RATE * 30) 106 | }) 107 | 108 | shutil.rmtree(temp_dir) 109 | return {"segments": transcribed_segments} 110 | 111 | def translate_text(text, target_lang): 112 | translations = {'Hebrew': 'he', 'English': 'en', 'Spanish': 'es', 'French': 'fr', 'German': 'de', 'Portuguese': 'pt', 'Arabic': 'ar'} 113 | translated_text = translator.translate(text, dest=translations[target_lang]).text 114 | return translated_text 115 | 116 | def transcribe(audio_numpy, sampling_rate=16000): 117 | if audio_numpy.ndim > 1: 118 | audio_numpy = audio_numpy.mean(axis=1) 119 | 120 | temp_dir = tempfile.mkdtemp() 121 | chunks = np.array_split(audio_numpy, indices_or_sections=int(np.ceil(len(audio_numpy) / (sampling_rate * 10)))) 122 | transcribed_text = "" 123 | 124 | for i, chunk in enumerate(chunks): 125 | chunk_path = os.path.join(temp_dir, f"chunk_{i}.wav") 126 | sf.write(chunk_path, chunk, samplerate=sampling_rate) 127 | input_features = hebrew_processor(chunk, sampling_rate=sampling_rate, return_tensors="pt").input_features.to(device) 128 | predicted_ids = hebrew_model.generate(input_features, num_beams=5) 129 | chunk_text = hebrew_processor.batch_decode(predicted_ids, skip_special_tokens=True)[0] 130 | transcribed_text += chunk_text + " " 131 | 132 | shutil.rmtree(temp_dir) 133 | return transcribed_text 134 | 135 | def transcribe_and_translate(audio_file, target_language, model_choice, generate_srt_checkbox): 136 | if not target_language: 137 | return format_text("Please choose a Target Language") 138 | 139 | audio_length = get_audio_length(audio_file) 140 | 141 | if torch.cuda.is_available(): 142 | print("GPU is available") 143 | gpu_info = torch.cuda.get_device_properties(0) 144 | print(f"GPU name: {gpu_info.name}") 145 | print(f"GPU memory usage before transcription:") 146 | print(torch.cuda.memory_summary(device=None, abbreviated=False)) 147 | 148 | # Always transcribe with the general model to get accurate timestamps 149 | general_transcription_result = transcribe_with_general_model(audio_file) 150 | general_segments = general_transcription_result['segments'] 151 | total_general_duration = sum([segment['end'] - segment['start'] for segment in general_segments]) 152 | 153 | # Accumulator for proportional timings 154 | proportional_timings = [] 155 | cumulative_duration = 0.0 156 | for general_segment in general_segments: 157 | general_duration = general_segment['end'] - general_segment['start'] 158 | proportional_duration = (general_duration / total_general_duration) * audio_length 159 | proportional_timings.append((cumulative_duration, cumulative_duration + proportional_duration)) 160 | cumulative_duration += proportional_duration 161 | 162 | if model_choice == 'General Model': 163 | return process_general_model(general_segments, target_language, generate_srt_checkbox, audio_length, proportional_timings) 164 | else: 165 | return process_hebrew_model(audio_file, target_language, generate_srt_checkbox, audio_length, proportional_timings) 166 | 167 | 168 | def process_general_model(general_segments, target_language, generate_srt_checkbox, audio_length, proportional_timings): 169 | transcribed_text = ' '.join([segment['text'] for segment in general_segments]) 170 | 171 | if generate_srt_checkbox: 172 | srt_content = "" 173 | segment_id = 1 174 | max_line_length = 40 175 | 176 | for i, (start_time_seconds, end_time_seconds) in enumerate(proportional_timings): 177 | if i < len(general_segments): 178 | text = general_segments[i]['text'] 179 | else: 180 | text = "" 181 | 182 | lines = split_lines(text, max_line_length=max_line_length) 183 | while lines: 184 | current_lines = lines[:2] 185 | lines = lines[2:] 186 | 187 | start_time = str(datetime.timedelta(seconds=start_time_seconds)).split(".")[0] + ',000' 188 | end_time = str(datetime.timedelta(seconds=end_time_seconds)).split(".")[0] + ',000' 189 | translated_lines = [translate_text(line, target_language) for line in current_lines] 190 | 191 | srt_entry = format_srt_entry(segment_id, start_time, end_time, translated_lines) 192 | srt_content += srt_entry 193 | segment_id += 1 194 | 195 | os.makedirs("output", exist_ok=True) 196 | srt_file_path = os.path.join("output", "output.srt") 197 | with open(srt_file_path, "w", encoding="utf-8") as srt_file: 198 | srt_file.write(srt_content) 199 | 200 | srt_html_content = "" 201 | for line in srt_content.split('\n'): 202 | srt_html_content += f"
{line}
" 203 | 204 | return f"Audio Length: {audio_length} seconds\n\n" + format_text(srt_html_content) 205 | else: 206 | paragraphs = split_text_into_paragraphs_by_sentence(transcribed_text) 207 | translated_paragraphs = [translate_text(paragraph, target_language) for paragraph in paragraphs] 208 | final_text = '\n\n'.join(translated_paragraphs) 209 | html_paragraphs = ''.join([f'

{paragraph}

' for paragraph in translated_paragraphs]) 210 | os.makedirs("output", exist_ok=True) 211 | result_file_path = os.path.join("output", "result.txt") 212 | with open(result_file_path, "w", encoding="utf-8") as result_file: 213 | result_file.write(final_text) 214 | return f"Audio Length: {audio_length} seconds\n\n" + format_text(html_paragraphs) 215 | 216 | def split_text_into_paragraphs_by_sentence(text, max_sentences_per_paragraph=5): 217 | # Split the text into sentences using a regular expression 218 | sentences = re.split(r'(?= max_sentences_per_paragraph: 225 | paragraphs.append(' '.join(current_paragraph)) 226 | current_paragraph = [] 227 | 228 | if current_paragraph: 229 | paragraphs.append(' '.join(current_paragraph)) 230 | 231 | return paragraphs 232 | 233 | def process_hebrew_model(audio_file, target_language, generate_srt_checkbox, audio_length, proportional_timings): 234 | # Transcribe with the Hebrew model 235 | hebrew_transcription_result = transcribe_with_hebrew_model(audio_file) 236 | hebrew_segments = hebrew_transcription_result['segments'] 237 | transcribed_text = ''.join([segment['text'] for segment in hebrew_segments]) # Ensure we get all text 238 | 239 | if generate_srt_checkbox: 240 | # Split the Hebrew transcription into parts corresponding to the general model timings 241 | hebrew_text_parts = split_text_into_parts(transcribed_text, len(proportional_timings)) 242 | srt_content = "" 243 | segment_id = 1 244 | max_line_length = 40 245 | 246 | for i, (start_time_seconds, end_time_seconds) in enumerate(proportional_timings): 247 | if i < len(hebrew_text_parts): 248 | text = hebrew_text_parts[i] 249 | else: 250 | text = "" 251 | 252 | lines = split_lines(text, max_line_length=max_line_length) 253 | while lines: 254 | current_lines = lines[:2] 255 | lines = lines[2:] 256 | 257 | start_time = str(datetime.timedelta(seconds=start_time_seconds)).split(".")[0] + ',000' 258 | end_time = str(datetime.timedelta(seconds=end_time_seconds)).split(".")[0] + ',000' 259 | translated_lines = [translate_text(line, target_language) for line in current_lines] 260 | 261 | srt_entry = format_srt_entry(segment_id, start_time, end_time, translated_lines) 262 | srt_content += srt_entry 263 | segment_id += 1 264 | 265 | os.makedirs("output", exist_ok=True) 266 | srt_file_path = os.path.join("output", "output.srt") 267 | with open(srt_file_path, "w", encoding="utf-8") as srt_file: 268 | srt_file.write(srt_content) 269 | 270 | srt_html_content = "" 271 | for line in srt_content.split('\n'): 272 | srt_html_content += f"
{line}
" 273 | 274 | return f"Audio Length: {audio_length} seconds\n\n" + format_text(srt_html_content) 275 | else: 276 | # In the non-SRT case, ensure the full text is used and split into paragraphs by sentence 277 | paragraphs = split_text_into_paragraphs_by_sentence(transcribed_text) 278 | translated_paragraphs = [translate_text(paragraph, target_language) for paragraph in paragraphs] 279 | final_text = '\n\n'.join(translated_paragraphs) 280 | html_paragraphs = ''.join([f'

{paragraph}

' for paragraph in translated_paragraphs]) 281 | os.makedirs("output", exist_ok=True) 282 | result_file_path = os.path.join("output", "result.txt") 283 | with open(result_file_path, "w", encoding="utf-8") as result_file: 284 | result_file.write(final_text) 285 | return f"Audio Length: {audio_length} seconds\n\n" + format_text(html_paragraphs) 286 | 287 | def split_text_into_parts(text, num_parts): 288 | words = text.split() 289 | avg_words_per_part = len(words) // num_parts 290 | parts = [] 291 | for i in range(num_parts): 292 | part = ' '.join(words[i * avg_words_per_part:(i + 1) * avg_words_per_part]) 293 | parts.append(part) 294 | if len(parts) < num_parts: 295 | parts.append(' '.join(words[num_parts * avg_words_per_part:])) 296 | return parts 297 | 298 | 299 | def generate_srt(segments, target_language): 300 | srt_content = "" 301 | segment_id = 1 302 | max_line_length = 40 303 | 304 | for segment in segments: 305 | start_time_seconds = segment['start'] 306 | end_time_seconds = segment['end'] 307 | text = segment['text'] 308 | 309 | lines = split_lines(text, max_line_length=max_line_length) 310 | while lines: 311 | current_lines = lines[:2] 312 | lines = lines[2:] 313 | 314 | start_time = str(datetime.timedelta(seconds=start_time_seconds)).split(".")[0] + ',000' 315 | end_time = str(datetime.timedelta(seconds=end_time_seconds)).split(".")[0] + ',000' 316 | translated_lines = [translate_text(line, target_language) for line in current_lines] 317 | 318 | srt_entry = format_srt_entry(segment_id, start_time, end_time, translated_lines) 319 | srt_content += srt_entry 320 | segment_id += 1 321 | 322 | os.makedirs("output", exist_ok=True) 323 | srt_file_path = os.path.join("output", "output.srt") 324 | with open(srt_file_path, "w", encoding="utf-8") as srt_file: 325 | srt_file.write(srt_content) 326 | 327 | srt_html_content = "" 328 | for line in srt_content.split('\n'): 329 | srt_html_content += f"
{line}
" 330 | 331 | return srt_html_content 332 | 333 | title = "Unlimited Length Transcription and Translation" 334 | description = "With: whisper-large-v2 or ivrit-ai/whisper-v2-d3-e3 models | GUI by Shmuel Ronen" 335 | 336 | interface = gr.Interface( 337 | fn=transcribe_and_translate, 338 | inputs=[ 339 | gr.Audio(type="filepath", label="Upload Audio File"), 340 | gr.Dropdown(choices=['Hebrew', 'English', 'Spanish', 'French', 'German', 'Portuguese', 'Arabic'], 341 | label="Target Language", value='Hebrew'), 342 | gr.Dropdown(choices=['General Model', 'Hebrew Model'], 343 | label="Model Choice", value='Hebrew Model'), 344 | gr.Checkbox(label="Generate Hebrew SRT File") 345 | ], 346 | outputs=gr.HTML(label="Transcription / Translation / SRT Result"), 347 | title=title, 348 | description=description 349 | ) 350 | 351 | interface.css = """ 352 | #output_text, #output_text * { 353 | text-align: right !important; 354 | direction: rtl !important; 355 | } 356 | """ 357 | 358 | if __name__ == "__main__": 359 | interface.launch() 360 | --------------------------------------------------------------------------------