├── .gitattributes ├── .gitignore ├── AudioRecorder.py ├── AudioTranscriber.py ├── GPTResponder.py ├── LICENSE ├── README.md ├── TranscriberModels.py ├── ZhipuAiResponder.py ├── custom_speech_recognition ├── __init__.py ├── __main__.py ├── audio.py ├── exceptions.py ├── flac-linux-x86 ├── flac-linux-x86_64 ├── flac-mac ├── flac-win32.exe ├── pocketsphinx-data │ └── en-US │ │ ├── LICENSE.txt │ │ ├── acoustic-model │ │ ├── README │ │ ├── feat.params │ │ ├── mdef │ │ ├── means │ │ ├── noisedict │ │ ├── sendump │ │ ├── transition_matrices │ │ └── variances │ │ ├── language-model.lm.bin │ │ └── pronounciation-dictionary.dict └── recognizers │ ├── __init__.py │ └── whisper.py ├── keys.py ├── main.py ├── pictures ├── RCLogo.png ├── img.png ├── img_1.png └── img_2.png ├── prompts.py ├── requirements.txt └── whisper_models └── tiny.pt /.gitattributes: -------------------------------------------------------------------------------- 1 | *.pt filter=lfs diff=lfs merge=lfs -text 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__/ 2 | *.wav 3 | .venv/ 4 | venv/ 5 | .idea/ 6 | .vscode 7 | whisper_models/large-v3.pt 8 | whisper_models/base.pt 9 | whisper_models/medium.pt 10 | whisper_models/small.pt 11 | -------------------------------------------------------------------------------- /AudioRecorder.py: -------------------------------------------------------------------------------- 1 | """ 2 | AudioRecorder.py 3 | ------ 4 | 这个脚本定义了音频录制相关的类,用于从默认麦克风和扬声器捕获音频数据,并将音频数据放入队列中进行处理。包括基类 BaseRecorder 和两个子类 DefaultMicRecorder 和 DefaultSpeakerRecorder。 5 | """ 6 | 7 | import custom_speech_recognition as sr 8 | import pyaudiowpatch as pyaudio 9 | from datetime import datetime 10 | 11 | RECORD_TIMEOUT = 3 12 | ENERGY_THRESHOLD = 1000 13 | DYNAMIC_ENERGY_THRESHOLD = False 14 | 15 | #BaseRecorder 是一个基类,用于记录音频 16 | class BaseRecorder: 17 | 18 | #初始化 BaseRecorder 对象。它接受一个音频源(source)和一个源名称(source_name)作为参数。它创建了一个 Recognizer 对象,并设置了能量阈值和动态能量阈值。如果音频源为 None,则会引发 ValueError。 19 | 20 | def __init__(self, source, source_name): 21 | self.recorder = sr.Recognizer() 22 | self.recorder.energy_threshold = ENERGY_THRESHOLD 23 | self.recorder.dynamic_energy_threshold = DYNAMIC_ENERGY_THRESHOLD 24 | 25 | if source is None: 26 | raise ValueError("audio source can't be None") 27 | 28 | self.source = source 29 | self.source_name = source_name 30 | #此方法用于调整环境噪音。它打印一条信息消息,然后使用 self.source 对象调整环境噪音。 31 | def adjust_for_noise(self, device_name, msg): 32 | print(f"[INFO] Adjusting for ambient noise from {device_name}. " + msg) 33 | with self.source: 34 | self.recorder.adjust_for_ambient_noise(self.source) 35 | print(f"[INFO] Completed ambient noise adjustment for {device_name}.") 36 | #此方法用于在后台监听音频,并将音频数据放入队列。它定义了一个内部回调函数 record_callback,该函数在检测到音频时将音频数据放入队列。然后,它使用 self.source 对象和回调函数来开始监听音频。 37 | def record_into_queue(self, audio_queue): 38 | def record_callback(_, audio:sr.AudioData) -> None: 39 | data = audio.get_raw_data() 40 | audio_queue.put((self.source_name, data, datetime.utcnow())) 41 | 42 | self.recorder.listen_in_background(self.source, record_callback, phrase_time_limit=RECORD_TIMEOUT) 43 | #DefaultMicRecorder 是一个继承自 BaseRecorder 的类。它的初始化方法 __init__ 中,它调用了父类 BaseRecorder 的初始化方法,并传入了一个 sr.Microphone 对象作为音频源,以及源名称 "You"。 44 | #然后,它调用了 adjust_for_noise 方法,用于调整环境噪音,传入的参数分别是设备名称 "Default Mic" 和提示信息 "Please make some noise from the Default Mic..."。 45 | class DefaultMicRecorder(BaseRecorder): 46 | #初始化 DefaultMicRecorder 对象,并设置音频源和源名称。 47 | def __init__(self): 48 | super().__init__(source=sr.Microphone(sample_rate=16000), source_name="You") 49 | self.adjust_for_noise("Default Mic", "Please make some noise from the Default Mic...") 50 | 51 | # DefaultSpeakerRecorder 是一个继承自 BaseRecorder 的类。它的初始化方法 __init__ 中,首先获取默认扬声器设备的信息,如果找不到回放设备则抛出错误。之后,调用父类的初始化方法并调整环境噪音。 52 | class DefaultSpeakerRecorder(BaseRecorder): 53 | def __init__(self): 54 | with pyaudio.PyAudio() as p: 55 | wasapi_info = p.get_host_api_info_by_type(pyaudio.paWASAPI) 56 | default_speakers = p.get_device_info_by_index(wasapi_info["defaultOutputDevice"]) 57 | 58 | if not default_speakers["isLoopbackDevice"]: 59 | for loopback in p.get_loopback_device_info_generator(): 60 | if default_speakers["name"] in loopback["name"]: 61 | default_speakers = loopback 62 | break 63 | else: 64 | print("[ERROR] No loopback device found.") 65 | 66 | source = sr.Microphone(speaker=True, 67 | device_index= default_speakers["index"], 68 | sample_rate=int(default_speakers["defaultSampleRate"]), 69 | chunk_size=pyaudio.get_sample_size(pyaudio.paInt16), 70 | channels=default_speakers["maxInputChannels"]) 71 | super().__init__(source=source, source_name="Speaker") 72 | self.adjust_for_noise("Default Speaker", "Please make or play some noise from the Default Speaker...") -------------------------------------------------------------------------------- /AudioTranscriber.py: -------------------------------------------------------------------------------- 1 | """ 2 | AudioTranscriber.py 3 | ------ 4 | 这个脚本定义了音频转录相关的类,用于从麦克风和扬声器的音频数据中转录文本。包括音频处理、转录、和文本更新等功能。 5 | """ 6 | 7 | import whisper 8 | import torch 9 | import wave 10 | import os 11 | import threading 12 | import tempfile 13 | import custom_speech_recognition as sr 14 | import io 15 | from datetime import timedelta 16 | import pyaudiowpatch as pyaudio 17 | from heapq import merge 18 | 19 | PHRASE_TIMEOUT = 3.05 20 | 21 | MAX_PHRASES = 10 22 | 23 | 24 | # AudioTranscriber 类用于音频转录和管理转录文本。 25 | class AudioTranscriber: 26 | # 初始化 AudioTranscriber 对象,设置音频源和模型。 27 | def __init__(self, mic_source, speaker_source, model): 28 | self.transcript_data = {"You": [], "Speaker": []} 29 | self.transcript_changed_event = threading.Event() 30 | self.audio_model = model 31 | self.audio_sources = { 32 | "You": { 33 | "sample_rate": mic_source.SAMPLE_RATE, 34 | "sample_width": mic_source.SAMPLE_WIDTH, 35 | "channels": mic_source.channels, 36 | "last_sample": bytes(), 37 | "last_spoken": None, 38 | "new_phrase": True, 39 | "process_data_func": self.process_mic_data 40 | }, 41 | "Speaker": { 42 | "sample_rate": speaker_source.SAMPLE_RATE, 43 | "sample_width": speaker_source.SAMPLE_WIDTH, 44 | "channels": speaker_source.channels, 45 | "last_sample": bytes(), 46 | "last_spoken": None, 47 | "new_phrase": True, 48 | "process_data_func": self.process_speaker_data 49 | } 50 | } 51 | 52 | # 处理音频队列中的数据,将音频数据转录为文本并更新转录数据。 53 | def transcribe_audio_queue(self, audio_queue): 54 | while True: 55 | who_spoke, data, time_spoken = audio_queue.get() 56 | self.update_last_sample_and_phrase_status(who_spoke, data, time_spoken) 57 | source_info = self.audio_sources[who_spoke] 58 | 59 | text = '' 60 | try: 61 | fd, path = tempfile.mkstemp(suffix=".wav") 62 | os.close(fd) 63 | source_info["process_data_func"](source_info["last_sample"], path) 64 | text = self.audio_model.get_transcription(path) 65 | except Exception as e: 66 | print(e) 67 | finally: 68 | os.unlink(path) 69 | 70 | if text != '' and text.lower() != 'you': 71 | self.update_transcript(who_spoke, text, time_spoken) 72 | self.transcript_changed_event.set() 73 | 74 | # 更新最近的样本数据和短语状态。 75 | def update_last_sample_and_phrase_status(self, who_spoke, data, time_spoken): 76 | source_info = self.audio_sources[who_spoke] 77 | if source_info["last_spoken"] and time_spoken - source_info["last_spoken"] > timedelta(seconds=PHRASE_TIMEOUT): 78 | source_info["last_sample"] = bytes() 79 | source_info["new_phrase"] = True 80 | else: 81 | source_info["new_phrase"] = False 82 | 83 | source_info["last_sample"] += data 84 | source_info["last_spoken"] = time_spoken 85 | 86 | # 处理麦克风数据,将其转换为 WAV 格式。 87 | 88 | def process_mic_data(self, data, temp_file_name): 89 | audio_data = sr.AudioData(data, self.audio_sources["You"]["sample_rate"], self.audio_sources["You"]["sample_width"]) 90 | wav_data = io.BytesIO(audio_data.get_wav_data()) 91 | with open(temp_file_name, 'w+b') as f: 92 | f.write(wav_data.read()) 93 | 94 | # 处理扬声器数据,将其转换为 WAV 格式。 95 | def process_speaker_data(self, data, temp_file_name): 96 | with wave.open(temp_file_name, 'wb') as wf: 97 | wf.setnchannels(self.audio_sources["Speaker"]["channels"]) 98 | p = pyaudio.PyAudio() 99 | wf.setsampwidth(p.get_sample_size(pyaudio.paInt16)) 100 | wf.setframerate(self.audio_sources["Speaker"]["sample_rate"]) 101 | wf.writeframes(data) 102 | 103 | # 更新转录数据,将新的转录文本添加到适当的位置。 104 | def update_transcript(self, who_spoke, text, time_spoken): 105 | source_info = self.audio_sources[who_spoke] 106 | transcript = self.transcript_data[who_spoke] 107 | 108 | if source_info["new_phrase"] or len(transcript) == 0: 109 | if len(transcript) > MAX_PHRASES: 110 | transcript.pop(-1) 111 | transcript.insert(0, (f"{who_spoke}: [{text}]\n\n", time_spoken)) 112 | else: 113 | transcript[0] = (f"{who_spoke}: [{text}]\n\n", time_spoken) 114 | 115 | # 获取组合的转录文本,按时间顺序排列。 116 | def get_transcript(self): 117 | combined_transcript = list(merge( 118 | self.transcript_data["You"], self.transcript_data["Speaker"], 119 | key=lambda x: x[1], reverse=True)) 120 | combined_transcript = combined_transcript[:MAX_PHRASES] 121 | return "".join([t[0] for t in combined_transcript]) 122 | 123 | # 清除转录数据和音频源的状态。 124 | def clear_transcript_data(self): 125 | self.transcript_data["You"].clear() 126 | self.transcript_data["Speaker"].clear() 127 | 128 | self.audio_sources["You"]["last_sample"] = bytes() 129 | self.audio_sources["Speaker"]["last_sample"] = bytes() 130 | 131 | self.audio_sources["You"]["new_phrase"] = True 132 | self.audio_sources["Speaker"]["new_phrase"] = True 133 | -------------------------------------------------------------------------------- /GPTResponder.py: -------------------------------------------------------------------------------- 1 | """ 2 | GPTResponder.py 3 | ------ 4 | 这个脚本定义了一个类 GPTResponder,用于基于转录文本生成响应。它通过 OpenAI 的 GPT 模型生成响应,并控制响应生成的时间间隔。 5 | """ 6 | 7 | import openai 8 | from keys import OPENAI_API_KEY 9 | from prompts import create_prompt, INITIAL_RESPONSE 10 | import time 11 | 12 | openai.api_key = OPENAI_API_KEY 13 | 14 | 15 | # 生成基于转录文本的响应。 16 | def generate_response_from_transcript(transcript): 17 | try: 18 | response = openai.ChatCompletion.create( 19 | model="gpt-3.5-turbo-0301", 20 | messages=[{"role": "system", "content": create_prompt(transcript)}], 21 | temperature=0.0 22 | ) 23 | except Exception as e: 24 | print(e) 25 | return '' 26 | full_response = response.choices[0].message.content 27 | try: 28 | return full_response.split('[')[1].split(']')[0] 29 | except: 30 | return '' 31 | 32 | 33 | # GPTResponder 类用于管理 GPT 响应生成和更新响应时间间隔。 34 | class GPTResponder: 35 | def __init__(self): 36 | self.response = INITIAL_RESPONSE 37 | self.response_interval = 2 38 | 39 | # 响应转录者,获取新的转录文本并生成响应。 40 | def respond_to_transcriber(self, transcriber): 41 | while True: 42 | if transcriber.transcript_changed_event.is_set(): 43 | start_time = time.time() 44 | 45 | transcriber.transcript_changed_event.clear() 46 | transcript_string = transcriber.get_transcript() 47 | response = generate_response_from_transcript(transcript_string) 48 | 49 | end_time = time.time() # Measure end time 50 | execution_time = end_time - start_time # Calculate the time it took to execute the function 51 | 52 | if response != '': 53 | self.response = response 54 | 55 | remaining_time = self.response_interval - execution_time 56 | if remaining_time > 0: 57 | time.sleep(remaining_time) 58 | else: 59 | time.sleep(0.3) 60 | 61 | # 更新响应时间间隔。 62 | def update_response_interval(self, interval): 63 | self.response_interval = interval 64 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 SevaSk 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # 开源面试助手-Interview-Helper 2 | 3 | ## 环境 4 | 5 | - Python >=3.8.0 6 | - Windows 操作系统(未在其他系统上进行测试) 7 | - FFmpeg(如果未安装,安装教程:https://blog.csdn.net/m0_47449768/article/details/130102406) 8 | 9 | ## 安装 10 | 1. 安装所需的软件包: 11 | 12 | ``` 13 | pip install -r requirements.txt 14 | ``` 15 | 16 | 2. 要用大模型回答,需要支持OpenAI格式的线上大模型。你可以在目录中创建一个 `keys.py` 文件并添加您的 OpenAI API Key: 17 | 18 | - 选项 1:执行时以变量传入。运行以下命令,将 "API KEY" 替换为实际的 OpenAI API Key: 19 | 20 | ``` 21 | python -c "with open('keys.py', 'w', encoding='utf-8') as f: f.write('OPENAI_API_KEY=\"API KEY\"')" 22 | ``` 23 | 24 | - 选项 2:在根目录下创建 keys.py,内容如下,将 "API KEY" 替换为 OpenAI API Key。 25 | 26 | ``` 27 | OPENAI_API_KEY="API KEY" 28 | ``` 29 | 30 | ## 运行 31 | 32 | 运行(使用本地的whisper模型): 33 | 34 | ``` 35 | python main.py 36 | ``` 37 | 38 | 也可以加上 --api ,将使用 whisper API 进行转录。 39 | 40 | ``` 41 | python main.py --api 42 | ``` 43 | 44 | ### 扬声器测试 45 | 启动时会测试麦克风和扬声器是否配置正确。请说话和播放声音,以通过测试。 46 | ![img.png](pictures/img.png) 47 | 48 | 49 | 50 | 启动后,将开始实时转录您的麦克风输入和扬声器输出,并根据对话询问大模型。 51 | 52 | ![img_1.png](pictures/img_1.png) 53 | 54 | ## 配置本地转录模型 55 | 56 | 该项目使用pt格式的Whipser模型,可以参照下面的"下载其它转录模型的方法",从Whisper.cpp项目下载,放到whisper_models文件夹中。 57 | 58 | ![img_2.png](pictures/img_2.png) 59 | 60 | 然后在TranscriberModels.py的`self.audio_model = whisper.load_model(os.path.join(os.getcwd(), <你下载的本地转录模型>))`中使用。 61 | 62 | ### 下载其它转录模型的方法 63 | 64 | 打开OpenAI的Whipser github: https://github.com/openai/whisper/blob/main/whisper/__init__.py#L17-L30 65 | 66 | _MODELS 变量中是各模型的下载链接,在浏览器中打开,会自动开始下载。 67 | 68 | 2024/06/20的Models下载链接快照: 69 | ``` 70 | _MODELS = { 71 | "tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt", 72 | "tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt", 73 | "base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt", 74 | "base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt", 75 | "small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt", 76 | "small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt", 77 | "medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt", 78 | "medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt", 79 | "large-v1": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large-v1.pt", 80 | "large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt", 81 | "large-v3": "https://openaipublic.azureedge.net/main/whisper/models/e5b1a55b89c1367dacf97e3e19bfd829a01529dbfdeefa8caeb59b3f1b81dadb/large-v3.pt", 82 | "large": "https://openaipublic.azureedge.net/main/whisper/models/e5b1a55b89c1367dacf97e3e19bfd829a01529dbfdeefa8caeb59b3f1b81dadb/large-v3.pt", 83 | } 84 | ``` 85 | 86 | #### 可用模型 87 | 88 | | 模型 | 磁盘 | SHA | 89 | | ------------- | ------- | ------------------------------------------ | 90 | | tiny | 75 MiB | `bd577a113a864445d4c299885e0cb97d4ba92b5f` | 91 | | tiny.en | 75 MiB | `c78c86eb1a8faa21b369bcd33207cc90d64ae9df` | 92 | | base | 142 MiB | `465707469ff3a37a2b9b8d8f89f2f99de7299dac` | 93 | | base.en | 142 MiB | `137c40403d78fd54d454da0f9bd998f78703390c` | 94 | | small | 466 MiB | `55356645c2b361a969dfd0ef2c5a50d530afd8d5` | 95 | | small.en | 466 MiB | `db8a495a91d927739e50b3fc1cc4c6b8f6c2d022` | 96 | | small.en-tdrz | 465 MiB | `b6c6e7e89af1a35c08e6de56b66ca6a02a2fdfa1` | 97 | | medium | 1.5 GiB | `fd9727b6e1217c2f614f9b698455c4ffd82463b4` | 98 | | medium.en | 1.5 GiB | `8c30f0e44ce9560643ebd10bbe50cd20eafd3723` | 99 | | large-v1 | 2.9 GiB | `b1caaf735c4cc1429223d5a74f0f4d0b9b59a299` | 100 | | large-v2 | 2.9 GiB | `0f4c8e34f21cf1a914c59d8b3ce882345ad349d6` | 101 | | large-v2-q5_0 | 1.1 GiB | `00e39f2196344e901b3a2bd5814807a769bd1630` | 102 | | large-v3 | 2.9 GiB | `ad82bf6a9043ceed055076d0fd39f5f186ff8062` | 103 | | large-v3-q5_0 | 1.1 GiB | `e6e2ed78495d403bef4b7cff42ef4aaadcfea8de` | 104 | 105 | 除非模型名称包含`.en`,那么模型是英语特化;否则模型是多语言的。以`-q5_0`结尾的模型是经过[量化处理](../README.md#quantization)的。以`-tdrz`结尾的模型支持使用[tinydiarize](https://github.com/akashmjn/tinydiarize)进行本地话者角色标记。关于模型的更多信息,请参阅[上游(openai/whisper)](https://github.com/openai/whisper#available-models-and-languages)。上述列表是由[download-ggml-model.sh](download-ggml-model.sh)脚本支持的模型的一个子集,但在https://huggingface.co/ggerganov/whisper.cpp/tree/main和其他地方还提供了更多模型。 106 | 107 | ### 模型选择 108 | 109 | 项目的默认模型是tiny.pt. 在3080平台上,中文任务、综合实时性和准确性,small表现较好。 110 | 111 | ## 开发计划 112 | 113 | 目前:完成本地转录+OpenAI回答。 114 | 115 | --- 116 | 117 | v2: 支持官方Whipser转录,并直接回答。(2024/06:现在应该用4o了) 118 | v3:支持本地Ollama大模型和线上的大模型。 119 | 120 | ## 加入开发、学习架构 121 | 122 | 我对所有文件和方法都尽可能地加上注释,让它成为程序员学习大模型应用的第一个项目。 123 | 124 | 如果你对面试助手开源感兴趣,或者单纯想要学习,都可以发邮件给我,加入开发群:jarvanzhao@qq.com 125 | 126 | -------------------------------------------------------------------------------- /TranscriberModels.py: -------------------------------------------------------------------------------- 1 | """ 2 | TranscriberModels.py 3 | ------ 4 | 这个脚本定义了获取音频转录模型的函数和两个实现转录功能的类(WhisperTranscriber 和 APIWhisperTranscriber)。根据是否使用API,返回相应的模型对象。 5 | """ 6 | 7 | import openai 8 | import whisper 9 | import os 10 | import torch 11 | 12 | # 根据是否使用API,返回相应的音频转录模型对象。 13 | def get_model(use_api): 14 | if use_api: 15 | return APIWhisperTranscriber() 16 | else: 17 | return WhisperTranscriber() 18 | # WhisperTranscriber 类使用 Whisper 模型进行音频转录。 19 | class WhisperTranscriber: 20 | # 初始化 WhisperTranscriber 对象,加载 Whisper 模型。 21 | def __init__(self): 22 | self.audio_model = whisper.load_model(os.path.join(os.getcwd(), 'whisper_models','tiny.pt')) 23 | print(f"[INFO] Whisper using GPU: " + str(torch.cuda.is_available())) 24 | 25 | # 获取音频文件的转录文本。 26 | def get_transcription(self, wav_file_path): 27 | try: 28 | result = self.audio_model.transcribe(wav_file_path, fp16=torch.cuda.is_available()) 29 | except Exception as e: 30 | print(e) 31 | return '' 32 | return result['text'].strip() 33 | 34 | # APIWhisperTranscriber 类使用 OpenAI 的 Whisper API 进行音频转录。 35 | class APIWhisperTranscriber: 36 | # 获取音频文件的转录文本。 37 | def get_transcription(self, wav_file_path): 38 | try: 39 | with open(wav_file_path, "rb") as audio_file: 40 | result = openai.Audio.transcribe("whisper-1", audio_file) 41 | except Exception as e: 42 | print(e) 43 | return '' 44 | return result['text'].strip() 45 | -------------------------------------------------------------------------------- /ZhipuAiResponder.py: -------------------------------------------------------------------------------- 1 | from zhipuai import ZhipuAI 2 | from keys import ZHIPUAI_API_KEY # 假设你在keys.py中存储了智谱AI的API密钥 3 | from prompts import create_prompt, INITIAL_RESPONSE 4 | import time 5 | 6 | client = ZhipuAI(api_key=ZHIPUAI_API_KEY) # 请填写您自己的APIKey 7 | 8 | 9 | # 生成基于转录文本的响应 10 | def generate_response_from_transcript(transcript): 11 | try: 12 | response = client.chat.completions.create( 13 | model="GLM-4-0520", # 填写需要调用的模型名称 14 | messages=[ 15 | {"role": "user", "content": create_prompt(transcript)}, 16 | ], 17 | stream=True, 18 | ) 19 | full_response = "" 20 | for chunk in response: 21 | full_response += chunk.choices[0].delta.content # 提取并连接 content 属性 22 | 23 | return full_response 24 | except Exception as e: 25 | print(f"API error: {e}") 26 | return '' 27 | 28 | 29 | class ZhipuAiResponder: 30 | def __init__(self): 31 | self.response = INITIAL_RESPONSE 32 | self.response_interval = 2 33 | 34 | # 响应转录者,获取新的转录文本并生成响应 35 | def respond_to_transcriber(self, transcriber): 36 | while True: 37 | if transcriber.transcript_changed_event.is_set(): 38 | start_time = time.time() 39 | 40 | transcriber.transcript_changed_event.clear() 41 | transcript_string = transcriber.get_transcript() 42 | response = generate_response_from_transcript(transcript_string) 43 | 44 | end_time = time.time() # Measure end time 45 | execution_time = end_time - start_time # Calculate the time it took to execute the function 46 | 47 | if response != '': 48 | self.response = response 49 | 50 | remaining_time = self.response_interval - execution_time 51 | if remaining_time > 0: 52 | time.sleep(remaining_time) 53 | else: 54 | time.sleep(0.3) 55 | 56 | # 更新响应时间间隔 57 | def update_response_interval(self, interval): 58 | self.response_interval = interval 59 | -------------------------------------------------------------------------------- /custom_speech_recognition/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | custom_speech_recognition/__init__.py 3 | ------ 4 | 这个脚本是一个自定义的语音识别库,支持多个引擎和API,包括在线和离线模式。它定义了音频源类、异常类、识别器类和一些工具函数。 5 | """ 6 | 7 | #!/usr/bin/env python3 8 | 9 | """Library for performing speech recognition, with support for several engines and APIs, online and offline.""" 10 | 11 | import io 12 | import os 13 | import tempfile 14 | import sys 15 | import subprocess 16 | import wave 17 | import aifc 18 | import math 19 | import audioop 20 | import collections 21 | import json 22 | import base64 23 | import threading 24 | import hashlib 25 | import hmac 26 | import time 27 | import uuid 28 | 29 | try: 30 | import requests 31 | except (ModuleNotFoundError, ImportError): 32 | pass 33 | 34 | __author__ = "Anthony Zhang (Uberi)" 35 | __version__ = "3.10.0" 36 | __license__ = "BSD" 37 | 38 | from urllib.parse import urlencode 39 | from urllib.request import Request, urlopen 40 | from urllib.error import URLError, HTTPError 41 | 42 | from .audio import AudioData, get_flac_converter 43 | from .exceptions import ( 44 | RequestError, 45 | TranscriptionFailed, 46 | TranscriptionNotReady, 47 | UnknownValueError, 48 | WaitTimeoutError, 49 | ) 50 | from .recognizers import whisper 51 | 52 | 53 | class AudioSource(object): 54 | def __init__(self): 55 | raise NotImplementedError("this is an abstract class") 56 | 57 | def __enter__(self): 58 | raise NotImplementedError("this is an abstract class") 59 | 60 | def __exit__(self, exc_type, exc_value, traceback): 61 | raise NotImplementedError("this is an abstract class") 62 | 63 | 64 | class Microphone(AudioSource): 65 | """ 66 | 创建一个新的“Microphone”实例,代表计算机上的物理麦克风。是“AudioSource”的子类。 67 | 如果您没有安装PyAudio0.2.11或更高版本,将会引发“AttributeError”。 68 | 如果未指定“device_index”或为“None”,则默认麦克风将用作音频源。否则,“device_index”应该是要用于音频输入的设备索引。 69 | 设备索引是一个介于0和“pyaudio.get_device_count() - 1”(假设我们之前已经使用了“import pyaudio”)之间的整数。它代表音频设备,如麦克风或扬声器。有关更多详细信息,请参阅 70 | `PyAudio文档 < http: // people.csail.mit.edu / hubert / pyaudio / docs / > `__。 71 | 麦克风音频以“chunk_size”个样本一组录制,在每秒“sample_rate”个样本(赫兹)的速度下。如果未指定,“sample_rate”的值将根据系统的麦克风设置自动确定。 72 | 更高的“sample_rate”值会带来更好的音频质量,但也会消耗更多带宽(因此,识别速度较慢)。此外,一些CPU(例如旧版Raspberry Pi模型中的CPU)在此值过高时无法跟上。 73 | 较大的“chunk_size”值有助于避免对快速变化的环境噪音产生触发,但也会使检测变得不够敏感。一般情况下,此值应该保留为默认值。 74 | """ 75 | 76 | def __init__(self, device_index=None, sample_rate=None, chunk_size=1024, speaker=False, channels = 1): 77 | assert device_index is None or isinstance(device_index, int), "Device index must be None or an integer" 78 | assert sample_rate is None or (isinstance(sample_rate, int) and sample_rate > 0), "Sample rate must be None or a positive integer" 79 | assert isinstance(chunk_size, int) and chunk_size > 0, "Chunk size must be a positive integer" 80 | 81 | # set up PyAudio 82 | self.speaker=speaker 83 | self.pyaudio_module = self.get_pyaudio() 84 | audio = self.pyaudio_module.PyAudio() 85 | try: 86 | count = audio.get_device_count() # obtain device count 87 | if device_index is not None: # ensure device index is in range 88 | assert 0 <= device_index < count, "Device index out of range ({} devices available; device index should be between 0 and {} inclusive)".format(count, count - 1) 89 | if sample_rate is None: # automatically set the sample rate to the hardware's default sample rate if not specified 90 | device_info = audio.get_device_info_by_index(device_index) if device_index is not None else audio.get_default_input_device_info() 91 | assert isinstance(device_info.get("defaultSampleRate"), (float, int)) and device_info["defaultSampleRate"] > 0, "Invalid device info returned from PyAudio: {}".format(device_info) 92 | sample_rate = int(device_info["defaultSampleRate"]) 93 | finally: 94 | audio.terminate() 95 | 96 | self.device_index = device_index 97 | self.format = self.pyaudio_module.paInt16 # 16-bit int sampling 98 | self.SAMPLE_WIDTH = self.pyaudio_module.get_sample_size(self.format) # size of each sample 99 | self.SAMPLE_RATE = sample_rate # sampling rate in Hertz 100 | self.CHUNK = chunk_size # number of frames stored in each buffer 101 | self.channels = channels 102 | 103 | self.audio = None 104 | self.stream = None 105 | 106 | @staticmethod 107 | def get_pyaudio(): 108 | """ 109 | Imports the pyaudio module and checks its version. Throws exceptions if pyaudio can't be found or a wrong version is installed 110 | """ 111 | try: 112 | import pyaudiowpatch as pyaudio 113 | except ImportError: 114 | raise AttributeError("Could not find PyAudio; check installation") 115 | from distutils.version import LooseVersion 116 | if LooseVersion(pyaudio.__version__) < LooseVersion("0.2.11"): 117 | raise AttributeError("PyAudio 0.2.11 or later is required (found version {})".format(pyaudio.__version__)) 118 | return pyaudio 119 | 120 | @staticmethod 121 | def list_microphone_names(): 122 | """ 123 | Returns a list of the names of all available microphones. For microphones where the name can't be retrieved, the list entry contains ``None`` instead. 124 | 125 | The index of each microphone's name in the returned list is the same as its device index when creating a ``Microphone`` instance - if you want to use the microphone at index 3 in the returned list, use ``Microphone(device_index=3)``. 126 | """ 127 | audio = Microphone.get_pyaudio().PyAudio() 128 | try: 129 | result = [] 130 | for i in range(audio.get_device_count()): 131 | device_info = audio.get_device_info_by_index(i) 132 | result.append(device_info.get("name")) 133 | finally: 134 | audio.terminate() 135 | return result 136 | 137 | @staticmethod 138 | def list_working_microphones(): 139 | """ 140 | Returns a dictionary mapping device indices to microphone names, for microphones that are currently hearing sounds. When using this function, ensure that your microphone is unmuted and make some noise at it to ensure it will be detected as working. 141 | 142 | Each key in the returned dictionary can be passed to the ``Microphone`` constructor to use that microphone. For example, if the return value is ``{3: "HDA Intel PCH: ALC3232 Analog (hw:1,0)"}``, you can do ``Microphone(device_index=3)`` to use that microphone. 143 | """ 144 | pyaudio_module = Microphone.get_pyaudio() 145 | audio = pyaudio_module.PyAudio() 146 | try: 147 | result = {} 148 | for device_index in range(audio.get_device_count()): 149 | device_info = audio.get_device_info_by_index(device_index) 150 | device_name = device_info.get("name") 151 | assert isinstance(device_info.get("defaultSampleRate"), (float, int)) and device_info["defaultSampleRate"] > 0, "Invalid device info returned from PyAudio: {}".format(device_info) 152 | try: 153 | # read audio 154 | pyaudio_stream = audio.open( 155 | input_device_index=device_index, channels=1, format=pyaudio_module.paInt16, 156 | rate=int(device_info["defaultSampleRate"]), input=True 157 | ) 158 | try: 159 | buffer = pyaudio_stream.read(1024) 160 | if not pyaudio_stream.is_stopped(): pyaudio_stream.stop_stream() 161 | finally: 162 | pyaudio_stream.close() 163 | except Exception: 164 | continue 165 | 166 | # compute RMS of debiased audio 167 | energy = -audioop.rms(buffer, 2) 168 | energy_bytes = bytes([energy & 0xFF, (energy >> 8) & 0xFF]) 169 | debiased_energy = audioop.rms(audioop.add(buffer, energy_bytes * (len(buffer) // 2), 2), 2) 170 | 171 | if debiased_energy > 30: # probably actually audio 172 | result[device_index] = device_name 173 | finally: 174 | audio.terminate() 175 | return result 176 | 177 | def __enter__(self): 178 | assert self.stream is None, "This audio source is already inside a context manager" 179 | self.audio = self.pyaudio_module.PyAudio() 180 | 181 | try: 182 | if self.speaker: 183 | p = self.audio 184 | self.stream = Microphone.MicrophoneStream( 185 | p.open( 186 | input_device_index=self.device_index, 187 | channels=self.channels, 188 | format=self.format, 189 | rate=self.SAMPLE_RATE, 190 | frames_per_buffer=self.CHUNK, 191 | input=True 192 | ) 193 | ) 194 | else: 195 | self.stream = Microphone.MicrophoneStream( 196 | self.audio.open( 197 | input_device_index=self.device_index, channels=1, format=self.format, 198 | rate=self.SAMPLE_RATE, frames_per_buffer=self.CHUNK, input=True, 199 | ) 200 | ) 201 | except Exception: 202 | self.audio.terminate() 203 | return self 204 | 205 | def __exit__(self, exc_type, exc_value, traceback): 206 | try: 207 | self.stream.close() 208 | finally: 209 | self.stream = None 210 | self.audio.terminate() 211 | 212 | class MicrophoneStream(object): 213 | def __init__(self, pyaudio_stream): 214 | self.pyaudio_stream = pyaudio_stream 215 | 216 | def read(self, size): 217 | return self.pyaudio_stream.read(size, exception_on_overflow=False) 218 | 219 | def close(self): 220 | try: 221 | # sometimes, if the stream isn't stopped, closing the stream throws an exception 222 | if not self.pyaudio_stream.is_stopped(): 223 | self.pyaudio_stream.stop_stream() 224 | finally: 225 | self.pyaudio_stream.close() 226 | 227 | 228 | class AudioFile(AudioSource): 229 | """ 230 | Creates a new ``AudioFile`` instance given a WAV/AIFF/FLAC audio file ``filename_or_fileobject``. Subclass of ``AudioSource``. 231 | 232 | If ``filename_or_fileobject`` is a string, then it is interpreted as a path to an audio file on the filesystem. Otherwise, ``filename_or_fileobject`` should be a file-like object such as ``io.BytesIO`` or similar. 233 | 234 | Note that functions that read from the audio (such as ``recognizer_instance.record`` or ``recognizer_instance.listen``) will move ahead in the stream. For example, if you execute ``recognizer_instance.record(audiofile_instance, duration=10)`` twice, the first time it will return the first 10 seconds of audio, and the second time it will return the 10 seconds of audio right after that. This is always reset to the beginning when entering an ``AudioFile`` context. 235 | 236 | WAV files must be in PCM/LPCM format; WAVE_FORMAT_EXTENSIBLE and compressed WAV are not supported and may result in undefined behaviour. 237 | 238 | Both AIFF and AIFF-C (compressed AIFF) formats are supported. 239 | 240 | FLAC files must be in native FLAC format; OGG-FLAC is not supported and may result in undefined behaviour. 241 | """ 242 | 243 | def __init__(self, filename_or_fileobject): 244 | assert isinstance(filename_or_fileobject, (type(""), type(u""))) or hasattr(filename_or_fileobject, "read"), "Given audio file must be a filename string or a file-like object" 245 | self.filename_or_fileobject = filename_or_fileobject 246 | self.stream = None 247 | self.DURATION = None 248 | 249 | self.audio_reader = None 250 | self.little_endian = False 251 | self.SAMPLE_RATE = None 252 | self.CHUNK = None 253 | self.FRAME_COUNT = None 254 | 255 | def __enter__(self): 256 | assert self.stream is None, "This audio source is already inside a context manager" 257 | try: 258 | # attempt to read the file as WAV 259 | self.audio_reader = wave.open(self.filename_or_fileobject, "rb") 260 | self.little_endian = True # RIFF WAV is a little-endian format (most ``audioop`` operations assume that the frames are stored in little-endian form) 261 | except (wave.Error, EOFError): 262 | try: 263 | # attempt to read the file as AIFF 264 | self.audio_reader = aifc.open(self.filename_or_fileobject, "rb") 265 | self.little_endian = False # AIFF is a big-endian format 266 | except (aifc.Error, EOFError): 267 | # attempt to read the file as FLAC 268 | if hasattr(self.filename_or_fileobject, "read"): 269 | flac_data = self.filename_or_fileobject.read() 270 | else: 271 | with open(self.filename_or_fileobject, "rb") as f: flac_data = f.read() 272 | 273 | # run the FLAC converter with the FLAC data to get the AIFF data 274 | flac_converter = get_flac_converter() 275 | if os.name == "nt": # on Windows, specify that the process is to be started without showing a console window 276 | startup_info = subprocess.STARTUPINFO() 277 | startup_info.dwFlags |= subprocess.STARTF_USESHOWWINDOW # specify that the wShowWindow field of `startup_info` contains a value 278 | startup_info.wShowWindow = subprocess.SW_HIDE # specify that the console window should be hidden 279 | else: 280 | startup_info = None # default startupinfo 281 | process = subprocess.Popen([ 282 | flac_converter, 283 | "--stdout", "--totally-silent", # put the resulting AIFF file in stdout, and make sure it's not mixed with any program output 284 | "--decode", "--force-aiff-format", # decode the FLAC file into an AIFF file 285 | "-", # the input FLAC file contents will be given in stdin 286 | ], stdin=subprocess.PIPE, stdout=subprocess.PIPE, startupinfo=startup_info) 287 | aiff_data, _ = process.communicate(flac_data) 288 | aiff_file = io.BytesIO(aiff_data) 289 | try: 290 | self.audio_reader = aifc.open(aiff_file, "rb") 291 | except (aifc.Error, EOFError): 292 | raise ValueError("Audio file could not be read as PCM WAV, AIFF/AIFF-C, or Native FLAC; check if file is corrupted or in another format") 293 | self.little_endian = False # AIFF is a big-endian format 294 | assert 1 <= self.audio_reader.getnchannels() <= 2, "Audio must be mono or stereo" 295 | self.SAMPLE_WIDTH = self.audio_reader.getsampwidth() 296 | 297 | # 24-bit audio needs some special handling for old Python versions (workaround for https://bugs.python.org/issue12866) 298 | samples_24_bit_pretending_to_be_32_bit = False 299 | if self.SAMPLE_WIDTH == 3: # 24-bit audio 300 | try: audioop.bias(b"", self.SAMPLE_WIDTH, 0) # test whether this sample width is supported (for example, ``audioop`` in Python 3.3 and below don't support sample width 3, while Python 3.4+ do) 301 | except audioop.error: # this version of audioop doesn't support 24-bit audio (probably Python 3.3 or less) 302 | samples_24_bit_pretending_to_be_32_bit = True # while the ``AudioFile`` instance will outwardly appear to be 32-bit, it will actually internally be 24-bit 303 | self.SAMPLE_WIDTH = 4 # the ``AudioFile`` instance should present itself as a 32-bit stream now, since we'll be converting into 32-bit on the fly when reading 304 | 305 | self.SAMPLE_RATE = self.audio_reader.getframerate() 306 | self.CHUNK = 4096 307 | self.FRAME_COUNT = self.audio_reader.getnframes() 308 | self.DURATION = self.FRAME_COUNT / float(self.SAMPLE_RATE) 309 | self.stream = AudioFile.AudioFileStream(self.audio_reader, self.little_endian, samples_24_bit_pretending_to_be_32_bit) 310 | return self 311 | 312 | def __exit__(self, exc_type, exc_value, traceback): 313 | if not hasattr(self.filename_or_fileobject, "read"): # only close the file if it was opened by this class in the first place (if the file was originally given as a path) 314 | self.audio_reader.close() 315 | self.stream = None 316 | self.DURATION = None 317 | 318 | class AudioFileStream(object): 319 | def __init__(self, audio_reader, little_endian, samples_24_bit_pretending_to_be_32_bit): 320 | self.audio_reader = audio_reader # an audio file object (e.g., a `wave.Wave_read` instance) 321 | self.little_endian = little_endian # whether the audio data is little-endian (when working with big-endian things, we'll have to convert it to little-endian before we process it) 322 | self.samples_24_bit_pretending_to_be_32_bit = samples_24_bit_pretending_to_be_32_bit # this is true if the audio is 24-bit audio, but 24-bit audio isn't supported, so we have to pretend that this is 32-bit audio and convert it on the fly 323 | 324 | def read(self, size=-1): 325 | buffer = self.audio_reader.readframes(self.audio_reader.getnframes() if size == -1 else size) 326 | if not isinstance(buffer, bytes): buffer = b"" # workaround for https://bugs.python.org/issue24608 327 | 328 | sample_width = self.audio_reader.getsampwidth() 329 | if not self.little_endian: # big endian format, convert to little endian on the fly 330 | if hasattr(audioop, "byteswap"): # ``audioop.byteswap`` was only added in Python 3.4 (incidentally, that also means that we don't need to worry about 24-bit audio being unsupported, since Python 3.4+ always has that functionality) 331 | buffer = audioop.byteswap(buffer, sample_width) 332 | else: # manually reverse the bytes of each sample, which is slower but works well enough as a fallback 333 | buffer = buffer[sample_width - 1::-1] + b"".join(buffer[i + sample_width:i:-1] for i in range(sample_width - 1, len(buffer), sample_width)) 334 | 335 | # workaround for https://bugs.python.org/issue12866 336 | if self.samples_24_bit_pretending_to_be_32_bit: # we need to convert samples from 24-bit to 32-bit before we can process them with ``audioop`` functions 337 | buffer = b"".join(b"\x00" + buffer[i:i + sample_width] for i in range(0, len(buffer), sample_width)) # since we're in little endian, we prepend a zero byte to each 24-bit sample to get a 32-bit sample 338 | sample_width = 4 # make sure we thread the buffer as 32-bit audio now, after converting it from 24-bit audio 339 | if self.audio_reader.getnchannels() != 1: # stereo audio 340 | buffer = audioop.tomono(buffer, sample_width, 1, 1) # convert stereo audio data to mono 341 | return buffer 342 | 343 | 344 | class Recognizer(AudioSource): 345 | def __init__(self): 346 | """ 347 | Creates a new ``Recognizer`` instance, which represents a collection of speech recognition functionality. 348 | """ 349 | self.energy_threshold = 300 # minimum audio energy to consider for recording 350 | self.dynamic_energy_threshold = True 351 | self.dynamic_energy_adjustment_damping = 0.15 352 | self.dynamic_energy_ratio = 1.5 353 | self.pause_threshold = 0.8 # seconds of non-speaking audio before a phrase is considered complete 354 | self.operation_timeout = None # seconds after an internal operation (e.g., an API request) starts before it times out, or ``None`` for no timeout 355 | 356 | self.phrase_threshold = 0.3 # minimum seconds of speaking audio before we consider the speaking audio a phrase - values below this are ignored (for filtering out clicks and pops) 357 | self.non_speaking_duration = 0.5 # seconds of non-speaking audio to keep on both sides of the recording 358 | 359 | def record(self, source, duration=None, offset=None): 360 | """ 361 | Records up to ``duration`` seconds of audio from ``source`` (an ``AudioSource`` instance) starting at ``offset`` (or at the beginning if not specified) into an ``AudioData`` instance, which it returns. 362 | 363 | If ``duration`` is not specified, then it will record until there is no more audio input. 364 | """ 365 | assert isinstance(source, AudioSource), "Source must be an audio source" 366 | assert source.stream is not None, "Audio source must be entered before recording, see documentation for ``AudioSource``; are you using ``source`` outside of a ``with`` statement?" 367 | 368 | frames = io.BytesIO() 369 | seconds_per_buffer = (source.CHUNK + 0.0) / source.SAMPLE_RATE 370 | elapsed_time = 0 371 | offset_time = 0 372 | offset_reached = False 373 | while True: # loop for the total number of chunks needed 374 | if offset and not offset_reached: 375 | offset_time += seconds_per_buffer 376 | if offset_time > offset: 377 | offset_reached = True 378 | 379 | buffer = source.stream.read(source.CHUNK) 380 | if len(buffer) == 0: break 381 | 382 | if offset_reached or not offset: 383 | elapsed_time += seconds_per_buffer 384 | if duration and elapsed_time > duration: break 385 | 386 | frames.write(buffer) 387 | 388 | frame_data = frames.getvalue() 389 | frames.close() 390 | return AudioData(frame_data, source.SAMPLE_RATE, source.SAMPLE_WIDTH) 391 | 392 | def adjust_for_ambient_noise(self, source, duration=1): 393 | """ 394 | Adjusts the energy threshold dynamically using audio from ``source`` (an ``AudioSource`` instance) to account for ambient noise. 395 | 396 | Intended to calibrate the energy threshold with the ambient energy level. Should be used on periods of audio without speech - will stop early if any speech is detected. 397 | 398 | The ``duration`` parameter is the maximum number of seconds that it will dynamically adjust the threshold for before returning. This value should be at least 0.5 in order to get a representative sample of the ambient noise. 399 | """ 400 | assert isinstance(source, AudioSource), "Source must be an audio source" 401 | assert source.stream is not None, "Audio source must be entered before adjusting, see documentation for ``AudioSource``; are you using ``source`` outside of a ``with`` statement?" 402 | assert self.pause_threshold >= self.non_speaking_duration >= 0 403 | 404 | seconds_per_buffer = (source.CHUNK + 0.0) / source.SAMPLE_RATE 405 | elapsed_time = 0 406 | 407 | # adjust energy threshold until a phrase starts 408 | while True: 409 | elapsed_time += seconds_per_buffer 410 | if elapsed_time > duration: break 411 | buffer = source.stream.read(source.CHUNK) 412 | energy = audioop.rms(buffer, source.SAMPLE_WIDTH) # energy of the audio signal 413 | 414 | # dynamically adjust the energy threshold using asymmetric weighted average 415 | damping = self.dynamic_energy_adjustment_damping ** seconds_per_buffer # account for different chunk sizes and rates 416 | target_energy = energy * self.dynamic_energy_ratio 417 | self.energy_threshold = self.energy_threshold * damping + target_energy * (1 - damping) 418 | 419 | def snowboy_wait_for_hot_word(self, snowboy_location, snowboy_hot_word_files, source, timeout=None): 420 | # load snowboy library (NOT THREAD SAFE) 421 | sys.path.append(snowboy_location) 422 | import snowboydetect 423 | sys.path.pop() 424 | 425 | detector = snowboydetect.SnowboyDetect( 426 | resource_filename=os.path.join(snowboy_location, "resources", "common.res").encode(), 427 | model_str=",".join(snowboy_hot_word_files).encode() 428 | ) 429 | detector.SetAudioGain(1.0) 430 | detector.SetSensitivity(",".join(["0.4"] * len(snowboy_hot_word_files)).encode()) 431 | snowboy_sample_rate = detector.SampleRate() 432 | 433 | elapsed_time = 0 434 | seconds_per_buffer = float(source.CHUNK) / source.SAMPLE_RATE 435 | resampling_state = None 436 | 437 | # buffers capable of holding 5 seconds of original audio 438 | five_seconds_buffer_count = int(math.ceil(5 / seconds_per_buffer)) 439 | # buffers capable of holding 0.5 seconds of resampled audio 440 | half_second_buffer_count = int(math.ceil(0.5 / seconds_per_buffer)) 441 | frames = collections.deque(maxlen=five_seconds_buffer_count) 442 | resampled_frames = collections.deque(maxlen=half_second_buffer_count) 443 | # snowboy check interval 444 | check_interval = 0.05 445 | last_check = time.time() 446 | while True: 447 | elapsed_time += seconds_per_buffer 448 | if timeout and elapsed_time > timeout: 449 | raise WaitTimeoutError("listening timed out while waiting for hotword to be said") 450 | 451 | buffer = source.stream.read(source.CHUNK) 452 | if len(buffer) == 0: break # reached end of the stream 453 | frames.append(buffer) 454 | 455 | # resample audio to the required sample rate 456 | resampled_buffer, resampling_state = audioop.ratecv(buffer, source.SAMPLE_WIDTH, 1, source.SAMPLE_RATE, snowboy_sample_rate, resampling_state) 457 | resampled_frames.append(resampled_buffer) 458 | if time.time() - last_check > check_interval: 459 | # run Snowboy on the resampled audio 460 | snowboy_result = detector.RunDetection(b"".join(resampled_frames)) 461 | assert snowboy_result != -1, "Error initializing streams or reading audio data" 462 | if snowboy_result > 0: break # wake word found 463 | resampled_frames.clear() 464 | last_check = time.time() 465 | 466 | return b"".join(frames), elapsed_time 467 | 468 | def listen(self, source, timeout=None, phrase_time_limit=None, snowboy_configuration=None): 469 | """ 470 | Records a single phrase from ``source`` (an ``AudioSource`` instance) into an ``AudioData`` instance, which it returns. 471 | 472 | This is done by waiting until the audio has an energy above ``recognizer_instance.energy_threshold`` (the user has started speaking), and then recording until it encounters ``recognizer_instance.pause_threshold`` seconds of non-speaking or there is no more audio input. The ending silence is not included. 473 | 474 | The ``timeout`` parameter is the maximum number of seconds that this will wait for a phrase to start before giving up and throwing an ``speech_recognition.WaitTimeoutError`` exception. If ``timeout`` is ``None``, there will be no wait timeout. 475 | 476 | The ``phrase_time_limit`` parameter is the maximum number of seconds that this will allow a phrase to continue before stopping and returning the part of the phrase processed before the time limit was reached. The resulting audio will be the phrase cut off at the time limit. If ``phrase_timeout`` is ``None``, there will be no phrase time limit. 477 | 478 | The ``snowboy_configuration`` parameter allows integration with `Snowboy `__, an offline, high-accuracy, power-efficient hotword recognition engine. When used, this function will pause until Snowboy detects a hotword, after which it will unpause. This parameter should either be ``None`` to turn off Snowboy support, or a tuple of the form ``(SNOWBOY_LOCATION, LIST_OF_HOT_WORD_FILES)``, where ``SNOWBOY_LOCATION`` is the path to the Snowboy root directory, and ``LIST_OF_HOT_WORD_FILES`` is a list of paths to Snowboy hotword configuration files (`*.pmdl` or `*.umdl` format). 479 | 480 | This operation will always complete within ``timeout + phrase_timeout`` seconds if both are numbers, either by returning the audio data, or by raising a ``speech_recognition.WaitTimeoutError`` exception. 481 | """ 482 | assert isinstance(source, AudioSource), "Source must be an audio source" 483 | assert source.stream is not None, "Audio source must be entered before listening, see documentation for ``AudioSource``; are you using ``source`` outside of a ``with`` statement?" 484 | assert self.pause_threshold >= self.non_speaking_duration >= 0 485 | if snowboy_configuration is not None: 486 | assert os.path.isfile(os.path.join(snowboy_configuration[0], "snowboydetect.py")), "``snowboy_configuration[0]`` must be a Snowboy root directory containing ``snowboydetect.py``" 487 | for hot_word_file in snowboy_configuration[1]: 488 | assert os.path.isfile(hot_word_file), "``snowboy_configuration[1]`` must be a list of Snowboy hot word configuration files" 489 | 490 | seconds_per_buffer = float(source.CHUNK) / source.SAMPLE_RATE 491 | pause_buffer_count = int(math.ceil(self.pause_threshold / seconds_per_buffer)) # number of buffers of non-speaking audio during a phrase, before the phrase should be considered complete 492 | phrase_buffer_count = int(math.ceil(self.phrase_threshold / seconds_per_buffer)) # minimum number of buffers of speaking audio before we consider the speaking audio a phrase 493 | non_speaking_buffer_count = int(math.ceil(self.non_speaking_duration / seconds_per_buffer)) # maximum number of buffers of non-speaking audio to retain before and after a phrase 494 | 495 | # read audio input for phrases until there is a phrase that is long enough 496 | elapsed_time = 0 # number of seconds of audio read 497 | buffer = b"" # an empty buffer means that the stream has ended and there is no data left to read 498 | while True: 499 | frames = collections.deque() 500 | 501 | if snowboy_configuration is None: 502 | # store audio input until the phrase starts 503 | while True: 504 | # handle waiting too long for phrase by raising an exception 505 | elapsed_time += seconds_per_buffer 506 | if timeout and elapsed_time > timeout: 507 | raise WaitTimeoutError("listening timed out while waiting for phrase to start") 508 | 509 | buffer = source.stream.read(source.CHUNK) 510 | if len(buffer) == 0: break # reached end of the stream 511 | frames.append(buffer) 512 | if len(frames) > non_speaking_buffer_count: # ensure we only keep the needed amount of non-speaking buffers 513 | frames.popleft() 514 | 515 | # detect whether speaking has started on audio input 516 | energy = audioop.rms(buffer, source.SAMPLE_WIDTH) # energy of the audio signal 517 | if energy > self.energy_threshold: break 518 | 519 | # dynamically adjust the energy threshold using asymmetric weighted average 520 | if self.dynamic_energy_threshold: 521 | damping = self.dynamic_energy_adjustment_damping ** seconds_per_buffer # account for different chunk sizes and rates 522 | target_energy = energy * self.dynamic_energy_ratio 523 | self.energy_threshold = self.energy_threshold * damping + target_energy * (1 - damping) 524 | else: 525 | # read audio input until the hotword is said 526 | snowboy_location, snowboy_hot_word_files = snowboy_configuration 527 | buffer, delta_time = self.snowboy_wait_for_hot_word(snowboy_location, snowboy_hot_word_files, source, timeout) 528 | elapsed_time += delta_time 529 | if len(buffer) == 0: break # reached end of the stream 530 | frames.append(buffer) 531 | 532 | # read audio input until the phrase ends 533 | pause_count, phrase_count = 0, 0 534 | phrase_start_time = elapsed_time 535 | while True: 536 | # handle phrase being too long by cutting off the audio 537 | elapsed_time += seconds_per_buffer 538 | if phrase_time_limit and elapsed_time - phrase_start_time > phrase_time_limit: 539 | break 540 | 541 | buffer = source.stream.read(source.CHUNK) 542 | if len(buffer) == 0: break # reached end of the stream 543 | frames.append(buffer) 544 | phrase_count += 1 545 | 546 | # check if speaking has stopped for longer than the pause threshold on the audio input 547 | energy = audioop.rms(buffer, source.SAMPLE_WIDTH) # unit energy of the audio signal within the buffer 548 | if energy > self.energy_threshold: 549 | pause_count = 0 550 | else: 551 | pause_count += 1 552 | if pause_count > pause_buffer_count: # end of the phrase 553 | break 554 | 555 | # check how long the detected phrase is, and retry listening if the phrase is too short 556 | phrase_count -= pause_count # exclude the buffers for the pause before the phrase 557 | if phrase_count >= phrase_buffer_count or len(buffer) == 0: break # phrase is long enough or we've reached the end of the stream, so stop listening 558 | 559 | # obtain frame data 560 | for i in range(pause_count - non_speaking_buffer_count): frames.pop() # remove extra non-speaking frames at the end 561 | frame_data = b"".join(frames) 562 | 563 | return AudioData(frame_data, source.SAMPLE_RATE, source.SAMPLE_WIDTH) 564 | 565 | def listen_in_background(self, source, callback, phrase_time_limit=None): 566 | """ 567 | Spawns a thread to repeatedly record phrases from ``source`` (an ``AudioSource`` instance) into an ``AudioData`` instance and call ``callback`` with that ``AudioData`` instance as soon as each phrase are detected. 568 | 569 | Returns a function object that, when called, requests that the background listener thread stop. The background thread is a daemon and will not stop the program from exiting if there are no other non-daemon threads. The function accepts one parameter, ``wait_for_stop``: if truthy, the function will wait for the background listener to stop before returning, otherwise it will return immediately and the background listener thread might still be running for a second or two afterwards. Additionally, if you are using a truthy value for ``wait_for_stop``, you must call the function from the same thread you originally called ``listen_in_background`` from. 570 | 571 | Phrase recognition uses the exact same mechanism as ``recognizer_instance.listen(source)``. The ``phrase_time_limit`` parameter works in the same way as the ``phrase_time_limit`` parameter for ``recognizer_instance.listen(source)``, as well. 572 | 573 | The ``callback`` parameter is a function that should accept two parameters - the ``recognizer_instance``, and an ``AudioData`` instance representing the captured audio. Note that ``callback`` function will be called from a non-main thread. 574 | """ 575 | assert isinstance(source, AudioSource), "Source must be an audio source" 576 | running = [True] 577 | 578 | def threaded_listen(): 579 | with source as s: 580 | while running[0]: 581 | try: # listen for 1 second, then check again if the stop function has been called 582 | audio = self.listen(s, 1, phrase_time_limit) 583 | except WaitTimeoutError: # listening timed out, just try again 584 | pass 585 | else: 586 | if running[0]: callback(self, audio) 587 | 588 | def stopper(wait_for_stop=True): 589 | running[0] = False 590 | if wait_for_stop: 591 | listener_thread.join() # block until the background thread is done, which can take around 1 second 592 | 593 | listener_thread = threading.Thread(target=threaded_listen) 594 | listener_thread.daemon = True 595 | listener_thread.start() 596 | return stopper 597 | 598 | def recognize_sphinx(self, audio_data, language="en-US", keyword_entries=None, grammar=None, show_all=False): 599 | """ 600 | Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using CMU Sphinx. 601 | 602 | The recognition language is determined by ``language``, an RFC5646 language tag like ``"en-US"`` or ``"en-GB"``, defaulting to US English. Out of the box, only ``en-US`` is supported. See `Notes on using `PocketSphinx `__ for information about installing other languages. This document is also included under ``reference/pocketsphinx.rst``. The ``language`` parameter can also be a tuple of filesystem paths, of the form ``(acoustic_parameters_directory, language_model_file, phoneme_dictionary_file)`` - this allows you to load arbitrary Sphinx models. 603 | 604 | If specified, the keywords to search for are determined by ``keyword_entries``, an iterable of tuples of the form ``(keyword, sensitivity)``, where ``keyword`` is a phrase, and ``sensitivity`` is how sensitive to this phrase the recognizer should be, on a scale of 0 (very insensitive, more false negatives) to 1 (very sensitive, more false positives) inclusive. If not specified or ``None``, no keywords are used and Sphinx will simply transcribe whatever words it recognizes. Specifying ``keyword_entries`` is more accurate than just looking for those same keywords in non-keyword-based transcriptions, because Sphinx knows specifically what sounds to look for. 605 | 606 | Sphinx can also handle FSG or JSGF grammars. The parameter ``grammar`` expects a path to the grammar file. Note that if a JSGF grammar is passed, an FSG grammar will be created at the same location to speed up execution in the next run. If ``keyword_entries`` are passed, content of ``grammar`` will be ignored. 607 | 608 | Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the Sphinx ``pocketsphinx.pocketsphinx.Decoder`` object resulting from the recognition. 609 | 610 | Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if there are any issues with the Sphinx installation. 611 | """ 612 | assert isinstance(audio_data, AudioData), "``audio_data`` must be audio data" 613 | assert isinstance(language, str) or (isinstance(language, tuple) and len(language) == 3), "``language`` must be a string or 3-tuple of Sphinx data file paths of the form ``(acoustic_parameters, language_model, phoneme_dictionary)``" 614 | assert keyword_entries is None or all(isinstance(keyword, (type(""), type(u""))) and 0 <= sensitivity <= 1 for keyword, sensitivity in keyword_entries), "``keyword_entries`` must be ``None`` or a list of pairs of strings and numbers between 0 and 1" 615 | 616 | # import the PocketSphinx speech recognition module 617 | try: 618 | from pocketsphinx import pocketsphinx, Jsgf, FsgModel 619 | 620 | except ImportError: 621 | raise RequestError("missing PocketSphinx module: ensure that PocketSphinx is set up correctly.") 622 | except ValueError: 623 | raise RequestError("bad PocketSphinx installation; try reinstalling PocketSphinx version 0.0.9 or better.") 624 | if not hasattr(pocketsphinx, "Decoder") or not hasattr(pocketsphinx.Decoder, "default_config"): 625 | raise RequestError("outdated PocketSphinx installation; ensure you have PocketSphinx version 0.0.9 or better.") 626 | 627 | if isinstance(language, str): # directory containing language data 628 | language_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "pocketsphinx-data", language) 629 | if not os.path.isdir(language_directory): 630 | raise RequestError("missing PocketSphinx language data directory: \"{}\"".format(language_directory)) 631 | acoustic_parameters_directory = os.path.join(language_directory, "acoustic-model") 632 | language_model_file = os.path.join(language_directory, "language-model.lm.bin") 633 | phoneme_dictionary_file = os.path.join(language_directory, "pronounciation-dictionary.dict") 634 | else: # 3-tuple of Sphinx data file paths 635 | acoustic_parameters_directory, language_model_file, phoneme_dictionary_file = language 636 | if not os.path.isdir(acoustic_parameters_directory): 637 | raise RequestError("missing PocketSphinx language model parameters directory: \"{}\"".format(acoustic_parameters_directory)) 638 | if not os.path.isfile(language_model_file): 639 | raise RequestError("missing PocketSphinx language model file: \"{}\"".format(language_model_file)) 640 | if not os.path.isfile(phoneme_dictionary_file): 641 | raise RequestError("missing PocketSphinx phoneme dictionary file: \"{}\"".format(phoneme_dictionary_file)) 642 | 643 | # create decoder object 644 | config = pocketsphinx.Decoder.default_config() 645 | config.set_string("-hmm", acoustic_parameters_directory) # set the path of the hidden Markov model (HMM) parameter files 646 | config.set_string("-lm", language_model_file) 647 | config.set_string("-dict", phoneme_dictionary_file) 648 | config.set_string("-logfn", os.devnull) # disable logging (logging causes unwanted output in terminal) 649 | decoder = pocketsphinx.Decoder(config) 650 | 651 | # obtain audio data 652 | raw_data = audio_data.get_raw_data(convert_rate=16000, convert_width=2) # the included language models require audio to be 16-bit mono 16 kHz in little-endian format 653 | 654 | # obtain recognition results 655 | if keyword_entries is not None: # explicitly specified set of keywords 656 | with PortableNamedTemporaryFile("w") as f: 657 | # generate a keywords file - Sphinx documentation recommendeds sensitivities between 1e-50 and 1e-5 658 | f.writelines("{} /1e{}/\n".format(keyword, 100 * sensitivity - 110) for keyword, sensitivity in keyword_entries) 659 | f.flush() 660 | 661 | # perform the speech recognition with the keywords file (this is inside the context manager so the file isn;t deleted until we're done) 662 | decoder.set_kws("keywords", f.name) 663 | decoder.set_search("keywords") 664 | elif grammar is not None: # a path to a FSG or JSGF grammar 665 | if not os.path.exists(grammar): 666 | raise ValueError("Grammar '{0}' does not exist.".format(grammar)) 667 | grammar_path = os.path.abspath(os.path.dirname(grammar)) 668 | grammar_name = os.path.splitext(os.path.basename(grammar))[0] 669 | fsg_path = "{0}/{1}.fsg".format(grammar_path, grammar_name) 670 | if not os.path.exists(fsg_path): # create FSG grammar if not available 671 | jsgf = Jsgf(grammar) 672 | rule = jsgf.get_rule("{0}.{0}".format(grammar_name)) 673 | fsg = jsgf.build_fsg(rule, decoder.get_logmath(), 7.5) 674 | fsg.writefile(fsg_path) 675 | else: 676 | fsg = FsgModel(fsg_path, decoder.get_logmath(), 7.5) 677 | decoder.set_fsg(grammar_name, fsg) 678 | decoder.set_search(grammar_name) 679 | 680 | decoder.start_utt() # begin utterance processing 681 | decoder.process_raw(raw_data, False, True) # process audio data with recognition enabled (no_search = False), as a full utterance (full_utt = True) 682 | decoder.end_utt() # stop utterance processing 683 | 684 | if show_all: return decoder 685 | 686 | # return results 687 | hypothesis = decoder.hyp() 688 | if hypothesis is not None: return hypothesis.hypstr 689 | raise UnknownValueError() # no transcriptions available 690 | 691 | def recognize_google(self, audio_data, key=None, language="en-US", pfilter=0, show_all=False, with_confidence=False): 692 | """ 693 | Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Google Speech Recognition API. 694 | 695 | The Google Speech Recognition API key is specified by ``key``. If not specified, it uses a generic key that works out of the box. This should generally be used for personal or testing purposes only, as it **may be revoked by Google at any time**. 696 | 697 | To obtain your own API key, simply following the steps on the `API Keys `__ page at the Chromium Developers site. In the Google Developers Console, Google Speech Recognition is listed as "Speech API". 698 | 699 | The recognition language is determined by ``language``, an RFC5646 language tag like ``"en-US"`` (US English) or ``"fr-FR"`` (International French), defaulting to US English. A list of supported language tags can be found in this `StackOverflow answer `__. 700 | 701 | The profanity filter level can be adjusted with ``pfilter``: 0 - No filter, 1 - Only shows the first character and replaces the rest with asterisks. The default is level 0. 702 | 703 | Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the raw API response as a JSON dictionary. 704 | 705 | Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection. 706 | """ 707 | assert isinstance(audio_data, AudioData), "``audio_data`` must be audio data" 708 | assert key is None or isinstance(key, str), "``key`` must be ``None`` or a string" 709 | assert isinstance(language, str), "``language`` must be a string" 710 | 711 | flac_data = audio_data.get_flac_data( 712 | convert_rate=None if audio_data.sample_rate >= 8000 else 8000, # audio samples must be at least 8 kHz 713 | convert_width=2 # audio samples must be 16-bit 714 | ) 715 | if key is None: key = "AIzaSyBOti4mM-6x9WDnZIjIeyEU21OpBXqWBgw" 716 | url = "http://www.google.com/speech-api/v2/recognize?{}".format(urlencode({ 717 | "client": "chromium", 718 | "lang": language, 719 | "key": key, 720 | "pFilter": pfilter 721 | })) 722 | request = Request(url, data=flac_data, headers={"Content-Type": "audio/x-flac; rate={}".format(audio_data.sample_rate)}) 723 | 724 | # obtain audio transcription results 725 | try: 726 | response = urlopen(request, timeout=self.operation_timeout) 727 | except HTTPError as e: 728 | raise RequestError("recognition request failed: {}".format(e.reason)) 729 | except URLError as e: 730 | raise RequestError("recognition connection failed: {}".format(e.reason)) 731 | response_text = response.read().decode("utf-8") 732 | 733 | # ignore any blank blocks 734 | actual_result = [] 735 | for line in response_text.split("\n"): 736 | if not line: continue 737 | result = json.loads(line)["result"] 738 | if len(result) != 0: 739 | actual_result = result[0] 740 | break 741 | 742 | # return results 743 | if show_all: 744 | return actual_result 745 | 746 | if not isinstance(actual_result, dict) or len(actual_result.get("alternative", [])) == 0: raise UnknownValueError() 747 | 748 | if "confidence" in actual_result["alternative"]: 749 | # return alternative with highest confidence score 750 | best_hypothesis = max(actual_result["alternative"], key=lambda alternative: alternative["confidence"]) 751 | else: 752 | # when there is no confidence available, we arbitrarily choose the first hypothesis. 753 | best_hypothesis = actual_result["alternative"][0] 754 | if "transcript" not in best_hypothesis: raise UnknownValueError() 755 | # https://cloud.google.com/speech-to-text/docs/basics#confidence-values 756 | # "Your code should not require the confidence field as it is not guaranteed to be accurate, or even set, in any of the results." 757 | confidence = best_hypothesis.get("confidence", 0.5) 758 | if with_confidence: 759 | return best_hypothesis["transcript"], confidence 760 | return best_hypothesis["transcript"] 761 | 762 | def recognize_google_cloud(self, audio_data, credentials_json=None, language="en-US", preferred_phrases=None, show_all=False): 763 | """ 764 | Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Google Cloud Speech API. 765 | 766 | This function requires a Google Cloud Platform account; see the `Google Cloud Speech API Quickstart `__ for details and instructions. Basically, create a project, enable billing for the project, enable the Google Cloud Speech API for the project, and set up Service Account Key credentials for the project. The result is a JSON file containing the API credentials. The text content of this JSON file is specified by ``credentials_json``. If not specified, the library will try to automatically `find the default API credentials JSON file `__. 767 | 768 | The recognition language is determined by ``language``, which is a BCP-47 language tag like ``"en-US"`` (US English). A list of supported language tags can be found in the `Google Cloud Speech API documentation `__. 769 | 770 | If ``preferred_phrases`` is an iterable of phrase strings, those given phrases will be more likely to be recognized over similar-sounding alternatives. This is useful for things like keyword/command recognition or adding new phrases that aren't in Google's vocabulary. Note that the API imposes certain `restrictions on the list of phrase strings `__. 771 | 772 | Returns the most likely transcription if ``show_all`` is False (the default). Otherwise, returns the raw API response as a JSON dictionary. 773 | 774 | Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the credentials aren't valid, or if there is no Internet connection. 775 | """ 776 | assert isinstance(audio_data, AudioData), "``audio_data`` must be audio data" 777 | if credentials_json is None: 778 | assert os.environ.get('GOOGLE_APPLICATION_CREDENTIALS') is not None 779 | assert isinstance(language, str), "``language`` must be a string" 780 | assert preferred_phrases is None or all(isinstance(preferred_phrases, (type(""), type(u""))) for preferred_phrases in preferred_phrases), "``preferred_phrases`` must be a list of strings" 781 | 782 | try: 783 | import socket 784 | from google.cloud import speech 785 | from google.api_core.exceptions import GoogleAPICallError 786 | except ImportError: 787 | raise RequestError('missing google-cloud-speech module: ensure that google-cloud-speech is set up correctly.') 788 | 789 | if credentials_json is not None: 790 | client = speech.SpeechClient.from_service_account_json(credentials_json) 791 | else: 792 | client = speech.SpeechClient() 793 | 794 | flac_data = audio_data.get_flac_data( 795 | convert_rate=None if 8000 <= audio_data.sample_rate <= 48000 else max(8000, min(audio_data.sample_rate, 48000)), # audio sample rate must be between 8 kHz and 48 kHz inclusive - clamp sample rate into this range 796 | convert_width=2 # audio samples must be 16-bit 797 | ) 798 | audio = speech.RecognitionAudio(content=flac_data) 799 | 800 | config = { 801 | 'encoding': speech.RecognitionConfig.AudioEncoding.FLAC, 802 | 'sample_rate_hertz': audio_data.sample_rate, 803 | 'language_code': language 804 | } 805 | if preferred_phrases is not None: 806 | config['speechContexts'] = [speech.SpeechContext( 807 | phrases=preferred_phrases 808 | )] 809 | if show_all: 810 | config['enableWordTimeOffsets'] = True # some useful extra options for when we want all the output 811 | 812 | opts = {} 813 | if self.operation_timeout and socket.getdefaulttimeout() is None: 814 | opts['timeout'] = self.operation_timeout 815 | 816 | config = speech.RecognitionConfig(**config) 817 | 818 | try: 819 | response = client.recognize(config=config, audio=audio) 820 | except GoogleAPICallError as e: 821 | raise RequestError(e) 822 | except URLError as e: 823 | raise RequestError("recognition connection failed: {0}".format(e.reason)) 824 | 825 | if show_all: return response 826 | if len(response.results) == 0: raise UnknownValueError() 827 | 828 | transcript = '' 829 | for result in response.results: 830 | transcript += result.alternatives[0].transcript.strip() + ' ' 831 | return transcript 832 | 833 | def recognize_wit(self, audio_data, key, show_all=False): 834 | """ 835 | Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Wit.ai API. 836 | 837 | The Wit.ai API key is specified by ``key``. Unfortunately, these are not available without `signing up for an account `__ and creating an app. You will need to add at least one intent to the app before you can see the API key, though the actual intent settings don't matter. 838 | 839 | To get the API key for a Wit.ai app, go to the app's overview page, go to the section titled "Make an API request", and look for something along the lines of ``Authorization: Bearer XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX``; ``XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX`` is the API key. Wit.ai API keys are 32-character uppercase alphanumeric strings. 840 | 841 | The recognition language is configured in the Wit.ai app settings. 842 | 843 | Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the `raw API response `__ as a JSON dictionary. 844 | 845 | Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection. 846 | """ 847 | assert isinstance(audio_data, AudioData), "Data must be audio data" 848 | assert isinstance(key, str), "``key`` must be a string" 849 | 850 | wav_data = audio_data.get_wav_data( 851 | convert_rate=None if audio_data.sample_rate >= 8000 else 8000, # audio samples must be at least 8 kHz 852 | convert_width=2 # audio samples should be 16-bit 853 | ) 854 | url = "https://api.wit.ai/speech?v=20170307" 855 | request = Request(url, data=wav_data, headers={"Authorization": "Bearer {}".format(key), "Content-Type": "audio/wav"}) 856 | try: 857 | response = urlopen(request, timeout=self.operation_timeout) 858 | except HTTPError as e: 859 | raise RequestError("recognition request failed: {}".format(e.reason)) 860 | except URLError as e: 861 | raise RequestError("recognition connection failed: {}".format(e.reason)) 862 | response_text = response.read().decode("utf-8") 863 | result = json.loads(response_text) 864 | 865 | # return results 866 | if show_all: return result 867 | if "_text" not in result or result["_text"] is None: raise UnknownValueError() 868 | return result["_text"] 869 | 870 | def recognize_azure(self, audio_data, key, language="en-US", profanity="masked", location="westus", show_all=False): 871 | """ 872 | Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Microsoft Azure Speech API. 873 | 874 | The Microsoft Azure Speech API key is specified by ``key``. Unfortunately, these are not available without `signing up for an account `__ with Microsoft Azure. 875 | 876 | To get the API key, go to the `Microsoft Azure Portal Resources `__ page, go to "All Resources" > "Add" > "See All" > Search "Speech > "Create", and fill in the form to make a "Speech" resource. On the resulting page (which is also accessible from the "All Resources" page in the Azure Portal), go to the "Show Access Keys" page, which will have two API keys, either of which can be used for the `key` parameter. Microsoft Azure Speech API keys are 32-character lowercase hexadecimal strings. 877 | 878 | The recognition language is determined by ``language``, a BCP-47 language tag like ``"en-US"`` (US English) or ``"fr-FR"`` (International French), defaulting to US English. A list of supported language values can be found in the `API documentation `__ under "Interactive and dictation mode". 879 | 880 | Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the `raw API response `__ as a JSON dictionary. 881 | 882 | Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection. 883 | """ 884 | assert isinstance(audio_data, AudioData), "Data must be audio data" 885 | assert isinstance(key, str), "``key`` must be a string" 886 | # assert isinstance(result_format, str), "``format`` must be a string" # simple|detailed 887 | assert isinstance(language, str), "``language`` must be a string" 888 | 889 | result_format = 'detailed' 890 | access_token, expire_time = getattr(self, "azure_cached_access_token", None), getattr(self, "azure_cached_access_token_expiry", None) 891 | allow_caching = True 892 | try: 893 | from time import monotonic # we need monotonic time to avoid being affected by system clock changes, but this is only available in Python 3.3+ 894 | except ImportError: 895 | expire_time = None # monotonic time not available, don't cache access tokens 896 | allow_caching = False # don't allow caching, since monotonic time isn't available 897 | if expire_time is None or monotonic() > expire_time: # caching not enabled, first credential request, or the access token from the previous one expired 898 | # get an access token using OAuth 899 | credential_url = "https://" + location + ".api.cognitive.microsoft.com/sts/v1.0/issueToken" 900 | credential_request = Request(credential_url, data=b"", headers={ 901 | "Content-type": "application/x-www-form-urlencoded", 902 | "Content-Length": "0", 903 | "Ocp-Apim-Subscription-Key": key, 904 | }) 905 | 906 | if allow_caching: 907 | start_time = monotonic() 908 | 909 | try: 910 | credential_response = urlopen(credential_request, timeout=60) # credential response can take longer, use longer timeout instead of default one 911 | except HTTPError as e: 912 | raise RequestError("credential request failed: {}".format(e.reason)) 913 | except URLError as e: 914 | raise RequestError("credential connection failed: {}".format(e.reason)) 915 | access_token = credential_response.read().decode("utf-8") 916 | 917 | if allow_caching: 918 | # save the token for the duration it is valid for 919 | self.azure_cached_access_token = access_token 920 | self.azure_cached_access_token_expiry = start_time + 600 # according to https://docs.microsoft.com/en-us/azure/cognitive-services/Speech-Service/rest-apis#authentication, the token expires in exactly 10 minutes 921 | 922 | wav_data = audio_data.get_wav_data( 923 | convert_rate=16000, # audio samples must be 8kHz or 16 kHz 924 | convert_width=2 # audio samples should be 16-bit 925 | ) 926 | 927 | url = "https://" + location + ".stt.speech.microsoft.com/speech/recognition/conversation/cognitiveservices/v1?{}".format(urlencode({ 928 | "language": language, 929 | "format": result_format, 930 | "profanity": profanity 931 | })) 932 | 933 | if sys.version_info >= (3, 6): # chunked-transfer requests are only supported in the standard library as of Python 3.6+, use it if possible 934 | request = Request(url, data=io.BytesIO(wav_data), headers={ 935 | "Authorization": "Bearer {}".format(access_token), 936 | "Content-type": "audio/wav; codec=\"audio/pcm\"; samplerate=16000", 937 | "Transfer-Encoding": "chunked", 938 | }) 939 | else: # fall back on manually formatting the POST body as a chunked request 940 | ascii_hex_data_length = "{:X}".format(len(wav_data)).encode("utf-8") 941 | chunked_transfer_encoding_data = ascii_hex_data_length + b"\r\n" + wav_data + b"\r\n0\r\n\r\n" 942 | request = Request(url, data=chunked_transfer_encoding_data, headers={ 943 | "Authorization": "Bearer {}".format(access_token), 944 | "Content-type": "audio/wav; codec=\"audio/pcm\"; samplerate=16000", 945 | "Transfer-Encoding": "chunked", 946 | }) 947 | 948 | try: 949 | response = urlopen(request, timeout=self.operation_timeout) 950 | except HTTPError as e: 951 | raise RequestError("recognition request failed: {}".format(e.reason)) 952 | except URLError as e: 953 | raise RequestError("recognition connection failed: {}".format(e.reason)) 954 | response_text = response.read().decode("utf-8") 955 | result = json.loads(response_text) 956 | 957 | # return results 958 | if show_all: 959 | return result 960 | if "RecognitionStatus" not in result or result["RecognitionStatus"] != "Success" or "NBest" not in result: 961 | raise UnknownValueError() 962 | return result['NBest'][0]["Display"], result['NBest'][0]["Confidence"] 963 | 964 | def recognize_bing(self, audio_data, key, language="en-US", show_all=False): 965 | """ 966 | Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Microsoft Bing Speech API. 967 | 968 | The Microsoft Bing Speech API key is specified by ``key``. Unfortunately, these are not available without `signing up for an account `__ with Microsoft Azure. 969 | 970 | To get the API key, go to the `Microsoft Azure Portal Resources `__ page, go to "All Resources" > "Add" > "See All" > Search "Bing Speech API > "Create", and fill in the form to make a "Bing Speech API" resource. On the resulting page (which is also accessible from the "All Resources" page in the Azure Portal), go to the "Show Access Keys" page, which will have two API keys, either of which can be used for the `key` parameter. Microsoft Bing Speech API keys are 32-character lowercase hexadecimal strings. 971 | 972 | The recognition language is determined by ``language``, a BCP-47 language tag like ``"en-US"`` (US English) or ``"fr-FR"`` (International French), defaulting to US English. A list of supported language values can be found in the `API documentation `__ under "Interactive and dictation mode". 973 | 974 | Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the `raw API response `__ as a JSON dictionary. 975 | 976 | Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection. 977 | """ 978 | assert isinstance(audio_data, AudioData), "Data must be audio data" 979 | assert isinstance(key, str), "``key`` must be a string" 980 | assert isinstance(language, str), "``language`` must be a string" 981 | 982 | access_token, expire_time = getattr(self, "bing_cached_access_token", None), getattr(self, "bing_cached_access_token_expiry", None) 983 | allow_caching = True 984 | try: 985 | from time import monotonic # we need monotonic time to avoid being affected by system clock changes, but this is only available in Python 3.3+ 986 | except ImportError: 987 | expire_time = None # monotonic time not available, don't cache access tokens 988 | allow_caching = False # don't allow caching, since monotonic time isn't available 989 | if expire_time is None or monotonic() > expire_time: # caching not enabled, first credential request, or the access token from the previous one expired 990 | # get an access token using OAuth 991 | credential_url = "https://api.cognitive.microsoft.com/sts/v1.0/issueToken" 992 | credential_request = Request(credential_url, data=b"", headers={ 993 | "Content-type": "application/x-www-form-urlencoded", 994 | "Content-Length": "0", 995 | "Ocp-Apim-Subscription-Key": key, 996 | }) 997 | 998 | if allow_caching: 999 | start_time = monotonic() 1000 | 1001 | try: 1002 | credential_response = urlopen(credential_request, timeout=60) # credential response can take longer, use longer timeout instead of default one 1003 | except HTTPError as e: 1004 | raise RequestError("credential request failed: {}".format(e.reason)) 1005 | except URLError as e: 1006 | raise RequestError("credential connection failed: {}".format(e.reason)) 1007 | access_token = credential_response.read().decode("utf-8") 1008 | 1009 | if allow_caching: 1010 | # save the token for the duration it is valid for 1011 | self.bing_cached_access_token = access_token 1012 | self.bing_cached_access_token_expiry = start_time + 600 # according to https://docs.microsoft.com/en-us/azure/cognitive-services/speech/api-reference-rest/bingvoicerecognition, the token expires in exactly 10 minutes 1013 | 1014 | wav_data = audio_data.get_wav_data( 1015 | convert_rate=16000, # audio samples must be 8kHz or 16 kHz 1016 | convert_width=2 # audio samples should be 16-bit 1017 | ) 1018 | 1019 | url = "https://speech.platform.bing.com/speech/recognition/interactive/cognitiveservices/v1?{}".format(urlencode({ 1020 | "language": language, 1021 | "locale": language, 1022 | "requestid": uuid.uuid4(), 1023 | })) 1024 | 1025 | if sys.version_info >= (3, 6): # chunked-transfer requests are only supported in the standard library as of Python 3.6+, use it if possible 1026 | request = Request(url, data=io.BytesIO(wav_data), headers={ 1027 | "Authorization": "Bearer {}".format(access_token), 1028 | "Content-type": "audio/wav; codec=\"audio/pcm\"; samplerate=16000", 1029 | "Transfer-Encoding": "chunked", 1030 | }) 1031 | else: # fall back on manually formatting the POST body as a chunked request 1032 | ascii_hex_data_length = "{:X}".format(len(wav_data)).encode("utf-8") 1033 | chunked_transfer_encoding_data = ascii_hex_data_length + b"\r\n" + wav_data + b"\r\n0\r\n\r\n" 1034 | request = Request(url, data=chunked_transfer_encoding_data, headers={ 1035 | "Authorization": "Bearer {}".format(access_token), 1036 | "Content-type": "audio/wav; codec=\"audio/pcm\"; samplerate=16000", 1037 | "Transfer-Encoding": "chunked", 1038 | }) 1039 | 1040 | try: 1041 | response = urlopen(request, timeout=self.operation_timeout) 1042 | except HTTPError as e: 1043 | raise RequestError("recognition request failed: {}".format(e.reason)) 1044 | except URLError as e: 1045 | raise RequestError("recognition connection failed: {}".format(e.reason)) 1046 | response_text = response.read().decode("utf-8") 1047 | result = json.loads(response_text) 1048 | 1049 | # return results 1050 | if show_all: return result 1051 | if "RecognitionStatus" not in result or result["RecognitionStatus"] != "Success" or "DisplayText" not in result: raise UnknownValueError() 1052 | return result["DisplayText"] 1053 | 1054 | def recognize_lex(self, audio_data, bot_name, bot_alias, user_id, content_type="audio/l16; rate=16000; channels=1", access_key_id=None, secret_access_key=None, region=None): 1055 | """ 1056 | Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Amazon Lex API. 1057 | 1058 | If access_key_id or secret_access_key is not set it will go through the list in the link below 1059 | http://boto3.readthedocs.io/en/latest/guide/configuration.html#configuring-credentials 1060 | """ 1061 | assert isinstance(audio_data, AudioData), "Data must be audio data" 1062 | assert isinstance(bot_name, str), "``bot_name`` must be a string" 1063 | assert isinstance(bot_alias, str), "``bot_alias`` must be a string" 1064 | assert isinstance(user_id, str), "``user_id`` must be a string" 1065 | assert isinstance(content_type, str), "``content_type`` must be a string" 1066 | assert access_key_id is None or isinstance(access_key_id, str), "``access_key_id`` must be a string" 1067 | assert secret_access_key is None or isinstance(secret_access_key, str), "``secret_access_key`` must be a string" 1068 | assert region is None or isinstance(region, str), "``region`` must be a string" 1069 | 1070 | try: 1071 | import boto3 1072 | except ImportError: 1073 | raise RequestError("missing boto3 module: ensure that boto3 is set up correctly.") 1074 | 1075 | client = boto3.client('lex-runtime', aws_access_key_id=access_key_id, 1076 | aws_secret_access_key=secret_access_key, 1077 | region_name=region) 1078 | 1079 | raw_data = audio_data.get_raw_data( 1080 | convert_rate=16000, convert_width=2 1081 | ) 1082 | 1083 | accept = "text/plain; charset=utf-8" 1084 | response = client.post_content(botName=bot_name, botAlias=bot_alias, userId=user_id, contentType=content_type, accept=accept, inputStream=raw_data) 1085 | 1086 | return response["inputTranscript"] 1087 | 1088 | def recognize_houndify(self, audio_data, client_id, client_key, show_all=False): 1089 | """ 1090 | Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Houndify API. 1091 | 1092 | The Houndify client ID and client key are specified by ``client_id`` and ``client_key``, respectively. Unfortunately, these are not available without `signing up for an account `__. Once logged into the `dashboard `__, you will want to select "Register a new client", and fill in the form as necessary. When at the "Enable Domains" page, enable the "Speech To Text Only" domain, and then select "Save & Continue". 1093 | 1094 | To get the client ID and client key for a Houndify client, go to the `dashboard `__ and select the client's "View Details" link. On the resulting page, the client ID and client key will be visible. Client IDs and client keys are both Base64-encoded strings. 1095 | 1096 | Currently, only English is supported as a recognition language. 1097 | 1098 | Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the raw API response as a JSON dictionary. 1099 | 1100 | Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection. 1101 | """ 1102 | assert isinstance(audio_data, AudioData), "Data must be audio data" 1103 | assert isinstance(client_id, str), "``client_id`` must be a string" 1104 | assert isinstance(client_key, str), "``client_key`` must be a string" 1105 | 1106 | wav_data = audio_data.get_wav_data( 1107 | convert_rate=None if audio_data.sample_rate in [8000, 16000] else 16000, # audio samples must be 8 kHz or 16 kHz 1108 | convert_width=2 # audio samples should be 16-bit 1109 | ) 1110 | url = "https://api.houndify.com/v1/audio" 1111 | user_id, request_id = str(uuid.uuid4()), str(uuid.uuid4()) 1112 | request_time = str(int(time.time())) 1113 | request_signature = base64.urlsafe_b64encode( 1114 | hmac.new( 1115 | base64.urlsafe_b64decode(client_key), 1116 | user_id.encode("utf-8") + b";" + request_id.encode("utf-8") + request_time.encode("utf-8"), 1117 | hashlib.sha256 1118 | ).digest() # get the HMAC digest as bytes 1119 | ).decode("utf-8") 1120 | request = Request(url, data=wav_data, headers={ 1121 | "Content-Type": "application/json", 1122 | "Hound-Request-Info": json.dumps({"ClientID": client_id, "UserID": user_id}), 1123 | "Hound-Request-Authentication": "{};{}".format(user_id, request_id), 1124 | "Hound-Client-Authentication": "{};{};{}".format(client_id, request_time, request_signature) 1125 | }) 1126 | try: 1127 | response = urlopen(request, timeout=self.operation_timeout) 1128 | except HTTPError as e: 1129 | raise RequestError("recognition request failed: {}".format(e.reason)) 1130 | except URLError as e: 1131 | raise RequestError("recognition connection failed: {}".format(e.reason)) 1132 | response_text = response.read().decode("utf-8") 1133 | result = json.loads(response_text) 1134 | 1135 | # return results 1136 | if show_all: return result 1137 | if "Disambiguation" not in result or result["Disambiguation"] is None: 1138 | raise UnknownValueError() 1139 | return result['Disambiguation']['ChoiceData'][0]['Transcription'], result['Disambiguation']['ChoiceData'][0]['ConfidenceScore'] 1140 | 1141 | def recognize_amazon(self, audio_data, bucket_name=None, access_key_id=None, secret_access_key=None, region=None, job_name=None, file_key=None): 1142 | """ 1143 | Performs speech recognition on ``audio_data`` (an ``AudioData`` instance) using Amazon Transcribe. 1144 | https://aws.amazon.com/transcribe/ 1145 | If access_key_id or secret_access_key is not set it will go through the list in the link below 1146 | http://boto3.readthedocs.io/en/latest/guide/configuration.html#configuring-credentials 1147 | """ 1148 | assert access_key_id is None or isinstance(access_key_id, str), "``access_key_id`` must be a string" 1149 | assert secret_access_key is None or isinstance(secret_access_key, str), "``secret_access_key`` must be a string" 1150 | assert region is None or isinstance(region, str), "``region`` must be a string" 1151 | import traceback 1152 | import uuid 1153 | import multiprocessing 1154 | from botocore.exceptions import ClientError 1155 | proc = multiprocessing.current_process() 1156 | 1157 | check_existing = audio_data is None and job_name 1158 | 1159 | bucket_name = bucket_name or ('%s-%s' % (str(uuid.uuid4()), proc.pid)) 1160 | job_name = job_name or ('%s-%s' % (str(uuid.uuid4()), proc.pid)) 1161 | 1162 | try: 1163 | import boto3 1164 | except ImportError: 1165 | raise RequestError("missing boto3 module: ensure that boto3 is set up correctly.") 1166 | 1167 | transcribe = boto3.client( 1168 | 'transcribe', 1169 | aws_access_key_id=access_key_id, 1170 | aws_secret_access_key=secret_access_key, 1171 | region_name=region) 1172 | 1173 | s3 = boto3.client('s3', 1174 | aws_access_key_id=access_key_id, 1175 | aws_secret_access_key=secret_access_key, 1176 | region_name=region) 1177 | 1178 | session = boto3.Session( 1179 | aws_access_key_id=access_key_id, 1180 | aws_secret_access_key=secret_access_key, 1181 | region_name=region 1182 | ) 1183 | 1184 | # Upload audio data to S3. 1185 | filename = '%s.wav' % job_name 1186 | try: 1187 | # Bucket creation fails surprisingly often, even if the bucket exists. 1188 | # print('Attempting to create bucket %s...' % bucket_name) 1189 | s3.create_bucket(Bucket=bucket_name) 1190 | except ClientError as exc: 1191 | print('Error creating bucket %s: %s' % (bucket_name, exc)) 1192 | s3res = session.resource('s3') 1193 | bucket = s3res.Bucket(bucket_name) 1194 | if audio_data is not None: 1195 | print('Uploading audio data...') 1196 | wav_data = audio_data.get_wav_data() 1197 | s3.put_object(Bucket=bucket_name, Key=filename, Body=wav_data) 1198 | object_acl = s3res.ObjectAcl(bucket_name, filename) 1199 | object_acl.put(ACL='public-read') 1200 | else: 1201 | print('Skipping audio upload.') 1202 | job_uri = 'https://%s.s3.amazonaws.com/%s' % (bucket_name, filename) 1203 | 1204 | if check_existing: 1205 | 1206 | # Wait for job to complete. 1207 | try: 1208 | status = transcribe.get_transcription_job(TranscriptionJobName=job_name) 1209 | except ClientError as exc: 1210 | print('!'*80) 1211 | print('Error getting job:', exc.response) 1212 | if exc.response['Error']['Code'] == 'BadRequestException' and "The requested job couldn't be found" in str(exc): 1213 | # Some error caused the job we recorded to not exist on AWS. 1214 | # Likely we were interrupted right after retrieving and deleting the job but before recording the transcript. 1215 | # Reset and try again later. 1216 | exc = TranscriptionNotReady() 1217 | exc.job_name = None 1218 | exc.file_key = None 1219 | raise exc 1220 | else: 1221 | # Some other error happened, so re-raise. 1222 | raise 1223 | 1224 | job = status['TranscriptionJob'] 1225 | if job['TranscriptionJobStatus'] in ['COMPLETED'] and 'TranscriptFileUri' in job['Transcript']: 1226 | 1227 | # Retrieve transcription JSON containing transcript. 1228 | transcript_uri = job['Transcript']['TranscriptFileUri'] 1229 | import urllib.request, json 1230 | with urllib.request.urlopen(transcript_uri) as json_data: 1231 | d = json.load(json_data) 1232 | confidences = [] 1233 | for item in d['results']['items']: 1234 | confidences.append(float(item['alternatives'][0]['confidence'])) 1235 | confidence = 0.5 1236 | if confidences: 1237 | confidence = sum(confidences)/float(len(confidences)) 1238 | transcript = d['results']['transcripts'][0]['transcript'] 1239 | 1240 | # Delete job. 1241 | try: 1242 | transcribe.delete_transcription_job(TranscriptionJobName=job_name) # cleanup 1243 | except Exception as exc: 1244 | print('Warning, could not clean up transcription: %s' % exc) 1245 | traceback.print_exc() 1246 | 1247 | # Delete S3 file. 1248 | s3.delete_object(Bucket=bucket_name, Key=filename) 1249 | 1250 | return transcript, confidence 1251 | elif job['TranscriptionJobStatus'] in ['FAILED']: 1252 | 1253 | # Delete job. 1254 | try: 1255 | transcribe.delete_transcription_job(TranscriptionJobName=job_name) # cleanup 1256 | except Exception as exc: 1257 | print('Warning, could not clean up transcription: %s' % exc) 1258 | traceback.print_exc() 1259 | 1260 | # Delete S3 file. 1261 | s3.delete_object(Bucket=bucket_name, Key=filename) 1262 | 1263 | exc = TranscriptionFailed() 1264 | exc.job_name = None 1265 | exc.file_key = None 1266 | raise exc 1267 | else: 1268 | # Keep waiting. 1269 | print('Keep waiting.') 1270 | exc = TranscriptionNotReady() 1271 | exc.job_name = job_name 1272 | exc.file_key = None 1273 | raise exc 1274 | 1275 | else: 1276 | 1277 | # Launch the transcription job. 1278 | # try: 1279 | # transcribe.delete_transcription_job(TranscriptionJobName=job_name) # pre-cleanup 1280 | # except: 1281 | # # It's ok if this fails because the job hopefully doesn't exist yet. 1282 | # pass 1283 | try: 1284 | transcribe.start_transcription_job( 1285 | TranscriptionJobName=job_name, 1286 | Media={'MediaFileUri': job_uri}, 1287 | MediaFormat='wav', 1288 | LanguageCode='en-US' 1289 | ) 1290 | exc = TranscriptionNotReady() 1291 | exc.job_name = job_name 1292 | exc.file_key = None 1293 | raise exc 1294 | except ClientError as exc: 1295 | print('!'*80) 1296 | print('Error starting job:', exc.response) 1297 | if exc.response['Error']['Code'] == 'LimitExceededException': 1298 | # Could not start job. Cancel everything. 1299 | s3.delete_object(Bucket=bucket_name, Key=filename) 1300 | exc = TranscriptionNotReady() 1301 | exc.job_name = None 1302 | exc.file_key = None 1303 | raise exc 1304 | else: 1305 | # Some other error happened, so re-raise. 1306 | raise 1307 | 1308 | def recognize_assemblyai(self, audio_data, api_token, job_name=None, **kwargs): 1309 | """ 1310 | Wraps the AssemblyAI STT service. 1311 | https://www.assemblyai.com/ 1312 | """ 1313 | 1314 | def read_file(filename, chunk_size=5242880): 1315 | with open(filename, 'rb') as _file: 1316 | while True: 1317 | data = _file.read(chunk_size) 1318 | if not data: 1319 | break 1320 | yield data 1321 | 1322 | check_existing = audio_data is None and job_name 1323 | if check_existing: 1324 | # Query status. 1325 | transciption_id = job_name 1326 | endpoint = f"https://api.assemblyai.com/v2/transcript/{transciption_id}" 1327 | headers = { 1328 | "authorization": api_token, 1329 | } 1330 | response = requests.get(endpoint, headers=headers) 1331 | data = response.json() 1332 | status = data['status'] 1333 | 1334 | if status == 'error': 1335 | # Handle error. 1336 | exc = TranscriptionFailed() 1337 | exc.job_name = None 1338 | exc.file_key = None 1339 | raise exc 1340 | # Handle success. 1341 | elif status == 'completed': 1342 | confidence = data['confidence'] 1343 | text = data['text'] 1344 | return text, confidence 1345 | 1346 | # Otherwise keep waiting. 1347 | print('Keep waiting.') 1348 | exc = TranscriptionNotReady() 1349 | exc.job_name = job_name 1350 | exc.file_key = None 1351 | raise exc 1352 | else: 1353 | # Upload file. 1354 | headers = {'authorization': api_token} 1355 | response = requests.post('https://api.assemblyai.com/v2/upload', 1356 | headers=headers, 1357 | data=read_file(audio_data)) 1358 | upload_url = response.json()['upload_url'] 1359 | 1360 | # Queue file for transcription. 1361 | endpoint = "https://api.assemblyai.com/v2/transcript" 1362 | json = { 1363 | "audio_url": upload_url 1364 | } 1365 | headers = { 1366 | "authorization": api_token, 1367 | "content-type": "application/json" 1368 | } 1369 | response = requests.post(endpoint, json=json, headers=headers) 1370 | data = response.json() 1371 | transciption_id = data['id'] 1372 | exc = TranscriptionNotReady() 1373 | exc.job_name = transciption_id 1374 | exc.file_key = None 1375 | raise exc 1376 | 1377 | def recognize_ibm(self, audio_data, key, language="en-US", show_all=False): 1378 | """ 1379 | Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the IBM Speech to Text API. 1380 | 1381 | The IBM Speech to Text username and password are specified by ``username`` and ``password``, respectively. Unfortunately, these are not available without `signing up for an account `__. Once logged into the Bluemix console, follow the instructions for `creating an IBM Watson service instance `__, where the Watson service is "Speech To Text". IBM Speech to Text usernames are strings of the form XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX, while passwords are mixed-case alphanumeric strings. 1382 | 1383 | The recognition language is determined by ``language``, an RFC5646 language tag with a dialect like ``"en-US"`` (US English) or ``"zh-CN"`` (Mandarin Chinese), defaulting to US English. The supported language values are listed under the ``model`` parameter of the `audio recognition API documentation `__, in the form ``LANGUAGE_BroadbandModel``, where ``LANGUAGE`` is the language value. 1384 | 1385 | Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the `raw API response `__ as a JSON dictionary. 1386 | 1387 | Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection. 1388 | """ 1389 | assert isinstance(audio_data, AudioData), "Data must be audio data" 1390 | assert isinstance(key, str), "``key`` must be a string" 1391 | 1392 | flac_data = audio_data.get_flac_data( 1393 | convert_rate=None if audio_data.sample_rate >= 16000 else 16000, # audio samples should be at least 16 kHz 1394 | convert_width=None if audio_data.sample_width >= 2 else 2 # audio samples should be at least 16-bit 1395 | ) 1396 | url = "https://gateway-wdc.watsonplatform.net/speech-to-text/api/v1/recognize" 1397 | request = Request(url, data=flac_data, headers={ 1398 | "Content-Type": "audio/x-flac", 1399 | }) 1400 | request.get_method = lambda: 'POST' 1401 | username = 'apikey' 1402 | password = key 1403 | authorization_value = base64.standard_b64encode("{}:{}".format(username, password).encode("utf-8")).decode("utf-8") 1404 | request.add_header("Authorization", "Basic {}".format(authorization_value)) 1405 | try: 1406 | response = urlopen(request, timeout=self.operation_timeout) 1407 | except HTTPError as e: 1408 | raise RequestError("recognition request failed: {}".format(e.reason)) 1409 | except URLError as e: 1410 | raise RequestError("recognition connection failed: {}".format(e.reason)) 1411 | response_text = response.read().decode("utf-8") 1412 | result = json.loads(response_text) 1413 | 1414 | # return results 1415 | if show_all: 1416 | return result 1417 | if "results" not in result or len(result["results"]) < 1 or "alternatives" not in result["results"][0]: 1418 | raise UnknownValueError() 1419 | 1420 | transcription = [] 1421 | confidence = None 1422 | for utterance in result["results"]: 1423 | if "alternatives" not in utterance: raise UnknownValueError() 1424 | for hypothesis in utterance["alternatives"]: 1425 | if "transcript" in hypothesis: 1426 | transcription.append(hypothesis["transcript"]) 1427 | confidence = hypothesis["confidence"] 1428 | break 1429 | return "\n".join(transcription), confidence 1430 | 1431 | lasttfgraph = '' 1432 | tflabels = None 1433 | 1434 | def recognize_tensorflow(self, audio_data, tensor_graph='tensorflow-data/conv_actions_frozen.pb', tensor_label='tensorflow-data/conv_actions_labels.txt'): 1435 | """ 1436 | Performs speech recognition on ``audio_data`` (an ``AudioData`` instance). 1437 | 1438 | Path to Tensor loaded from ``tensor_graph``. You can download a model here: http://download.tensorflow.org/models/speech_commands_v0.01.zip 1439 | 1440 | Path to Tensor Labels file loaded from ``tensor_label``. 1441 | """ 1442 | assert isinstance(audio_data, AudioData), "Data must be audio data" 1443 | assert isinstance(tensor_graph, str), "``tensor_graph`` must be a string" 1444 | assert isinstance(tensor_label, str), "``tensor_label`` must be a string" 1445 | 1446 | try: 1447 | import tensorflow as tf 1448 | except ImportError: 1449 | raise RequestError("missing tensorflow module: ensure that tensorflow is set up correctly.") 1450 | 1451 | if not (tensor_graph == self.lasttfgraph): 1452 | self.lasttfgraph = tensor_graph 1453 | 1454 | # load graph 1455 | with tf.gfile.FastGFile(tensor_graph, 'rb') as f: 1456 | graph_def = tf.GraphDef() 1457 | graph_def.ParseFromString(f.read()) 1458 | tf.import_graph_def(graph_def, name='') 1459 | # load labels 1460 | self.tflabels = [line.rstrip() for line in tf.gfile.GFile(tensor_label)] 1461 | 1462 | wav_data = audio_data.get_wav_data( 1463 | convert_rate=16000, convert_width=2 1464 | ) 1465 | 1466 | with tf.Session() as sess: 1467 | input_layer_name = 'wav_data:0' 1468 | output_layer_name = 'labels_softmax:0' 1469 | softmax_tensor = sess.graph.get_tensor_by_name(output_layer_name) 1470 | predictions, = sess.run(softmax_tensor, {input_layer_name: wav_data}) 1471 | 1472 | # Sort labels in order of confidence 1473 | top_k = predictions.argsort()[-1:][::-1] 1474 | for node_id in top_k: 1475 | human_string = self.tflabels[node_id] 1476 | return human_string 1477 | 1478 | def recognize_whisper(self, audio_data, model="base", show_dict=False, load_options=None, language=None, translate=False, **transcribe_options): 1479 | """ 1480 | Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using Whisper. 1481 | 1482 | The recognition language is determined by ``language``, an uncapitalized full language name like "english" or "chinese". See the full language list at https://github.com/openai/whisper/blob/main/whisper/tokenizer.py 1483 | 1484 | model can be any of tiny, base, small, medium, large, tiny.en, base.en, small.en, medium.en. See https://github.com/openai/whisper for more details. 1485 | 1486 | If show_dict is true, returns the full dict response from Whisper, including the detected language. Otherwise returns only the transcription. 1487 | 1488 | You can translate the result to english with Whisper by passing translate=True 1489 | 1490 | Other values are passed directly to whisper. See https://github.com/openai/whisper/blob/main/whisper/transcribe.py for all options 1491 | """ 1492 | 1493 | assert isinstance(audio_data, AudioData), "Data must be audio data" 1494 | import numpy as np 1495 | import soundfile as sf 1496 | import torch 1497 | import whisper 1498 | 1499 | if load_options or not hasattr(self, "whisper_model") or self.whisper_model.get(model) is None: 1500 | self.whisper_model = getattr(self, "whisper_model", {}) 1501 | self.whisper_model[model] = whisper.load_model(model, **load_options or {}) 1502 | 1503 | # 16 kHz https://github.com/openai/whisper/blob/28769fcfe50755a817ab922a7bc83483159600a9/whisper/audio.py#L98-L99 1504 | wav_bytes = audio_data.get_wav_data(convert_rate=16000) 1505 | wav_stream = io.BytesIO(wav_bytes) 1506 | audio_array, sampling_rate = sf.read(wav_stream) 1507 | audio_array = audio_array.astype(np.float32) 1508 | 1509 | result = self.whisper_model[model].transcribe( 1510 | audio_array, 1511 | language=language, 1512 | task="translate" if translate else None, 1513 | fp16=torch.cuda.is_available(), 1514 | **transcribe_options 1515 | ) 1516 | 1517 | if show_dict: 1518 | return result 1519 | else: 1520 | return result["text"] 1521 | 1522 | recognize_whisper_api = whisper.recognize_whisper_api 1523 | 1524 | def recognize_vosk(self, audio_data, language='en'): 1525 | from vosk import Model, KaldiRecognizer 1526 | 1527 | assert isinstance(audio_data, AudioData), "Data must be audio data" 1528 | 1529 | if not hasattr(self, 'vosk_model'): 1530 | if not os.path.exists("model"): 1531 | return "Please download the model from https://github.com/alphacep/vosk-api/blob/master/doc/models.md and unpack as 'model' in the current folder." 1532 | exit (1) 1533 | self.vosk_model = Model("model") 1534 | 1535 | rec = KaldiRecognizer(self.vosk_model, 16000); 1536 | 1537 | rec.AcceptWaveform(audio_data.get_raw_data(convert_rate=16000, convert_width=2)); 1538 | finalRecognition = rec.FinalResult() 1539 | 1540 | return finalRecognition 1541 | 1542 | 1543 | class PortableNamedTemporaryFile(object): 1544 | """Limited replacement for ``tempfile.NamedTemporaryFile``, except unlike ``tempfile.NamedTemporaryFile``, the file can be opened again while it's currently open, even on Windows.""" 1545 | def __init__(self, mode="w+b"): 1546 | self.mode = mode 1547 | 1548 | def __enter__(self): 1549 | # create the temporary file and open it 1550 | file_descriptor, file_path = tempfile.mkstemp() 1551 | self._file = os.fdopen(file_descriptor, self.mode) 1552 | 1553 | # the name property is a public field 1554 | self.name = file_path 1555 | return self 1556 | 1557 | def __exit__(self, exc_type, exc_value, traceback): 1558 | self._file.close() 1559 | os.remove(self.name) 1560 | 1561 | def write(self, *args, **kwargs): 1562 | return self._file.write(*args, **kwargs) 1563 | 1564 | def writelines(self, *args, **kwargs): 1565 | return self._file.writelines(*args, **kwargs) 1566 | 1567 | def flush(self, *args, **kwargs): 1568 | return self._file.flush(*args, **kwargs) 1569 | 1570 | 1571 | # =============================== 1572 | # backwards compatibility shims 1573 | # =============================== 1574 | 1575 | WavFile = AudioFile # WavFile was renamed to AudioFile in 3.4.1 1576 | 1577 | 1578 | def recognize_api(self, audio_data, client_access_token, language="en", session_id=None, show_all=False): 1579 | wav_data = audio_data.get_wav_data(convert_rate=16000, convert_width=2) 1580 | url = "https://api.api.ai/v1/query" 1581 | while True: 1582 | boundary = uuid.uuid4().hex 1583 | if boundary.encode("utf-8") not in wav_data: break 1584 | if session_id is None: session_id = uuid.uuid4().hex 1585 | data = b"--" + boundary.encode("utf-8") + b"\r\n" + b"Content-Disposition: form-data; name=\"request\"\r\n" + b"Content-Type: application/json\r\n" + b"\r\n" + b"{\"v\": \"20150910\", \"sessionId\": \"" + session_id.encode("utf-8") + b"\", \"lang\": \"" + language.encode("utf-8") + b"\"}\r\n" + b"--" + boundary.encode("utf-8") + b"\r\n" + b"Content-Disposition: form-data; name=\"voiceData\"; filename=\"audio.wav\"\r\n" + b"Content-Type: audio/wav\r\n" + b"\r\n" + wav_data + b"\r\n" + b"--" + boundary.encode("utf-8") + b"--\r\n" 1586 | request = Request(url, data=data, headers={"Authorization": "Bearer {}".format(client_access_token), "Content-Length": str(len(data)), "Expect": "100-continue", "Content-Type": "multipart/form-data; boundary={}".format(boundary)}) 1587 | try: response = urlopen(request, timeout=10) 1588 | except HTTPError as e: raise RequestError("recognition request failed: {}".format(e.reason)) 1589 | except URLError as e: raise RequestError("recognition connection failed: {}".format(e.reason)) 1590 | response_text = response.read().decode("utf-8") 1591 | result = json.loads(response_text) 1592 | if show_all: return result 1593 | if "status" not in result or "errorType" not in result["status"] or result["status"]["errorType"] != "success": 1594 | raise UnknownValueError() 1595 | return result["result"]["resolvedQuery"] 1596 | 1597 | 1598 | Recognizer.recognize_api = classmethod(recognize_api) # API.AI Speech Recognition is deprecated/not recommended as of 3.5.0, and currently is only optionally available for paid plans 1599 | -------------------------------------------------------------------------------- /custom_speech_recognition/__main__.py: -------------------------------------------------------------------------------- 1 | import custom_speech_recognition as sr 2 | 3 | r = sr.Recognizer() 4 | m = sr.Microphone() 5 | 6 | try: 7 | print("A moment of silence, please...") 8 | with m as source: r.adjust_for_ambient_noise(source) 9 | print("Set minimum energy threshold to {}".format(r.energy_threshold)) 10 | while True: 11 | print("Say something!") 12 | with m as source: audio = r.listen(source) 13 | print("Got it! Now to recognize it...") 14 | try: 15 | # recognize speech using Google Speech Recognition 16 | value = r.recognize_google(audio) 17 | 18 | print("You said {}".format(value)) 19 | except sr.UnknownValueError: 20 | print("Oops! Didn't catch that") 21 | except sr.RequestError as e: 22 | print("Uh oh! Couldn't request results from Google Speech Recognition service; {0}".format(e)) 23 | except KeyboardInterrupt: 24 | pass 25 | -------------------------------------------------------------------------------- /custom_speech_recognition/audio.py: -------------------------------------------------------------------------------- 1 | import aifc 2 | import audioop 3 | import io 4 | import os 5 | import platform 6 | import stat 7 | import subprocess 8 | import sys 9 | import wave 10 | 11 | 12 | class AudioData(object): 13 | """ 14 | Creates a new ``AudioData`` instance, which represents mono audio data. 15 | 16 | The raw audio data is specified by ``frame_data``, which is a sequence of bytes representing audio samples. This is the frame data structure used by the PCM WAV format. 17 | 18 | The width of each sample, in bytes, is specified by ``sample_width``. Each group of ``sample_width`` bytes represents a single audio sample. 19 | 20 | The audio data is assumed to have a sample rate of ``sample_rate`` samples per second (Hertz). 21 | 22 | Usually, instances of this class are obtained from ``recognizer_instance.record`` or ``recognizer_instance.listen``, or in the callback for ``recognizer_instance.listen_in_background``, rather than instantiating them directly. 23 | """ 24 | 25 | def __init__(self, frame_data, sample_rate, sample_width): 26 | assert sample_rate > 0, "Sample rate must be a positive integer" 27 | assert ( 28 | sample_width % 1 == 0 and 1 <= sample_width <= 4 29 | ), "Sample width must be between 1 and 4 inclusive" 30 | self.frame_data = frame_data 31 | self.sample_rate = sample_rate 32 | self.sample_width = int(sample_width) 33 | 34 | def get_segment(self, start_ms=None, end_ms=None): 35 | """ 36 | Returns a new ``AudioData`` instance, trimmed to a given time interval. In other words, an ``AudioData`` instance with the same audio data except starting at ``start_ms`` milliseconds in and ending ``end_ms`` milliseconds in. 37 | 38 | If not specified, ``start_ms`` defaults to the beginning of the audio, and ``end_ms`` defaults to the end. 39 | """ 40 | assert ( 41 | start_ms is None or start_ms >= 0 42 | ), "``start_ms`` must be a non-negative number" 43 | assert end_ms is None or end_ms >= ( 44 | 0 if start_ms is None else start_ms 45 | ), "``end_ms`` must be a non-negative number greater or equal to ``start_ms``" 46 | if start_ms is None: 47 | start_byte = 0 48 | else: 49 | start_byte = int( 50 | (start_ms * self.sample_rate * self.sample_width) // 1000 51 | ) 52 | if end_ms is None: 53 | end_byte = len(self.frame_data) 54 | else: 55 | end_byte = int( 56 | (end_ms * self.sample_rate * self.sample_width) // 1000 57 | ) 58 | return AudioData( 59 | self.frame_data[start_byte:end_byte], 60 | self.sample_rate, 61 | self.sample_width, 62 | ) 63 | 64 | def get_raw_data(self, convert_rate=None, convert_width=None): 65 | """ 66 | Returns a byte string representing the raw frame data for the audio represented by the ``AudioData`` instance. 67 | 68 | If ``convert_rate`` is specified and the audio sample rate is not ``convert_rate`` Hz, the resulting audio is resampled to match. 69 | 70 | If ``convert_width`` is specified and the audio samples are not ``convert_width`` bytes each, the resulting audio is converted to match. 71 | 72 | Writing these bytes directly to a file results in a valid `RAW/PCM audio file `__. 73 | """ 74 | assert ( 75 | convert_rate is None or convert_rate > 0 76 | ), "Sample rate to convert to must be a positive integer" 77 | assert convert_width is None or ( 78 | convert_width % 1 == 0 and 1 <= convert_width <= 4 79 | ), "Sample width to convert to must be between 1 and 4 inclusive" 80 | 81 | raw_data = self.frame_data 82 | 83 | # make sure unsigned 8-bit audio (which uses unsigned samples) is handled like higher sample width audio (which uses signed samples) 84 | if self.sample_width == 1: 85 | raw_data = audioop.bias( 86 | raw_data, 1, -128 87 | ) # subtract 128 from every sample to make them act like signed samples 88 | 89 | # resample audio at the desired rate if specified 90 | if convert_rate is not None and self.sample_rate != convert_rate: 91 | raw_data, _ = audioop.ratecv( 92 | raw_data, 93 | self.sample_width, 94 | 1, 95 | self.sample_rate, 96 | convert_rate, 97 | None, 98 | ) 99 | 100 | # convert samples to desired sample width if specified 101 | if convert_width is not None and self.sample_width != convert_width: 102 | if ( 103 | convert_width == 3 104 | ): # we're converting the audio into 24-bit (workaround for https://bugs.python.org/issue12866) 105 | raw_data = audioop.lin2lin( 106 | raw_data, self.sample_width, 4 107 | ) # convert audio into 32-bit first, which is always supported 108 | try: 109 | audioop.bias( 110 | b"", 3, 0 111 | ) # test whether 24-bit audio is supported (for example, ``audioop`` in Python 3.3 and below don't support sample width 3, while Python 3.4+ do) 112 | except ( 113 | audioop.error 114 | ): # this version of audioop doesn't support 24-bit audio (probably Python 3.3 or less) 115 | raw_data = b"".join( 116 | raw_data[i + 1 : i + 4] 117 | for i in range(0, len(raw_data), 4) 118 | ) # since we're in little endian, we discard the first byte from each 32-bit sample to get a 24-bit sample 119 | else: # 24-bit audio fully supported, we don't need to shim anything 120 | raw_data = audioop.lin2lin( 121 | raw_data, self.sample_width, convert_width 122 | ) 123 | else: 124 | raw_data = audioop.lin2lin( 125 | raw_data, self.sample_width, convert_width 126 | ) 127 | 128 | # if the output is 8-bit audio with unsigned samples, convert the samples we've been treating as signed to unsigned again 129 | if convert_width == 1: 130 | raw_data = audioop.bias( 131 | raw_data, 1, 128 132 | ) # add 128 to every sample to make them act like unsigned samples again 133 | 134 | return raw_data 135 | 136 | def get_wav_data(self, convert_rate=None, convert_width=None, nchannels = 1): 137 | """ 138 | Returns a byte string representing the contents of a WAV file containing the audio represented by the ``AudioData`` instance. 139 | 140 | If ``convert_width`` is specified and the audio samples are not ``convert_width`` bytes each, the resulting audio is converted to match. 141 | 142 | If ``convert_rate`` is specified and the audio sample rate is not ``convert_rate`` Hz, the resulting audio is resampled to match. 143 | 144 | Writing these bytes directly to a file results in a valid `WAV file `__. 145 | """ 146 | raw_data = self.get_raw_data(convert_rate, convert_width) 147 | sample_rate = ( 148 | self.sample_rate if convert_rate is None else convert_rate 149 | ) 150 | sample_width = ( 151 | self.sample_width if convert_width is None else convert_width 152 | ) 153 | 154 | # generate the WAV file contents 155 | with io.BytesIO() as wav_file: 156 | wav_writer = wave.open(wav_file, "wb") 157 | try: # note that we can't use context manager, since that was only added in Python 3.4 158 | wav_writer.setframerate(sample_rate) 159 | wav_writer.setsampwidth(sample_width) 160 | wav_writer.setnchannels(nchannels) 161 | wav_writer.writeframes(raw_data) 162 | wav_data = wav_file.getvalue() 163 | finally: # make sure resources are cleaned up 164 | wav_writer.close() 165 | return wav_data 166 | 167 | def get_aiff_data(self, convert_rate=None, convert_width=None): 168 | """ 169 | Returns a byte string representing the contents of an AIFF-C file containing the audio represented by the ``AudioData`` instance. 170 | 171 | If ``convert_width`` is specified and the audio samples are not ``convert_width`` bytes each, the resulting audio is converted to match. 172 | 173 | If ``convert_rate`` is specified and the audio sample rate is not ``convert_rate`` Hz, the resulting audio is resampled to match. 174 | 175 | Writing these bytes directly to a file results in a valid `AIFF-C file `__. 176 | """ 177 | raw_data = self.get_raw_data(convert_rate, convert_width) 178 | sample_rate = ( 179 | self.sample_rate if convert_rate is None else convert_rate 180 | ) 181 | sample_width = ( 182 | self.sample_width if convert_width is None else convert_width 183 | ) 184 | 185 | # the AIFF format is big-endian, so we need to convert the little-endian raw data to big-endian 186 | if hasattr( 187 | audioop, "byteswap" 188 | ): # ``audioop.byteswap`` was only added in Python 3.4 189 | raw_data = audioop.byteswap(raw_data, sample_width) 190 | else: # manually reverse the bytes of each sample, which is slower but works well enough as a fallback 191 | raw_data = raw_data[sample_width - 1 :: -1] + b"".join( 192 | raw_data[i + sample_width : i : -1] 193 | for i in range(sample_width - 1, len(raw_data), sample_width) 194 | ) 195 | 196 | # generate the AIFF-C file contents 197 | with io.BytesIO() as aiff_file: 198 | aiff_writer = aifc.open(aiff_file, "wb") 199 | try: # note that we can't use context manager, since that was only added in Python 3.4 200 | aiff_writer.setframerate(sample_rate) 201 | aiff_writer.setsampwidth(sample_width) 202 | aiff_writer.setnchannels(1) 203 | aiff_writer.writeframes(raw_data) 204 | aiff_data = aiff_file.getvalue() 205 | finally: # make sure resources are cleaned up 206 | aiff_writer.close() 207 | return aiff_data 208 | 209 | def get_flac_data(self, convert_rate=None, convert_width=None): 210 | """ 211 | Returns a byte string representing the contents of a FLAC file containing the audio represented by the ``AudioData`` instance. 212 | 213 | Note that 32-bit FLAC is not supported. If the audio data is 32-bit and ``convert_width`` is not specified, then the resulting FLAC will be a 24-bit FLAC. 214 | 215 | If ``convert_rate`` is specified and the audio sample rate is not ``convert_rate`` Hz, the resulting audio is resampled to match. 216 | 217 | If ``convert_width`` is specified and the audio samples are not ``convert_width`` bytes each, the resulting audio is converted to match. 218 | 219 | Writing these bytes directly to a file results in a valid `FLAC file `__. 220 | """ 221 | assert convert_width is None or ( 222 | convert_width % 1 == 0 and 1 <= convert_width <= 3 223 | ), "Sample width to convert to must be between 1 and 3 inclusive" 224 | 225 | if ( 226 | self.sample_width > 3 and convert_width is None 227 | ): # resulting WAV data would be 32-bit, which is not convertable to FLAC using our encoder 228 | convert_width = 3 # the largest supported sample width is 24-bit, so we'll limit the sample width to that 229 | 230 | # run the FLAC converter with the WAV data to get the FLAC data 231 | wav_data = self.get_wav_data(convert_rate, convert_width) 232 | flac_converter = get_flac_converter() 233 | if ( 234 | os.name == "nt" 235 | ): # on Windows, specify that the process is to be started without showing a console window 236 | startup_info = subprocess.STARTUPINFO() 237 | startup_info.dwFlags |= ( 238 | subprocess.STARTF_USESHOWWINDOW 239 | ) # specify that the wShowWindow field of `startup_info` contains a value 240 | startup_info.wShowWindow = ( 241 | subprocess.SW_HIDE 242 | ) # specify that the console window should be hidden 243 | else: 244 | startup_info = None # default startupinfo 245 | process = subprocess.Popen( 246 | [ 247 | flac_converter, 248 | "--stdout", 249 | "--totally-silent", # put the resulting FLAC file in stdout, and make sure it's not mixed with any program output 250 | "--best", # highest level of compression available 251 | "-", # the input FLAC file contents will be given in stdin 252 | ], 253 | stdin=subprocess.PIPE, 254 | stdout=subprocess.PIPE, 255 | startupinfo=startup_info, 256 | ) 257 | flac_data, stderr = process.communicate(wav_data) 258 | return flac_data 259 | 260 | 261 | def get_flac_converter(): 262 | """Returns the absolute path of a FLAC converter executable, or raises an OSError if none can be found.""" 263 | flac_converter = shutil_which("flac") # check for installed version first 264 | if flac_converter is None: # flac utility is not installed 265 | base_path = os.path.dirname( 266 | os.path.abspath(__file__) 267 | ) # directory of the current module file, where all the FLAC bundled binaries are stored 268 | system, machine = platform.system(), platform.machine() 269 | if system == "Windows" and machine in { 270 | "i686", 271 | "i786", 272 | "x86", 273 | "x86_64", 274 | "AMD64", 275 | }: 276 | flac_converter = os.path.join(base_path, "flac-win32.exe") 277 | elif system == "Darwin" and machine in { 278 | "i686", 279 | "i786", 280 | "x86", 281 | "x86_64", 282 | "AMD64", 283 | }: 284 | flac_converter = os.path.join(base_path, "flac-mac") 285 | elif system == "Linux" and machine in {"i686", "i786", "x86"}: 286 | flac_converter = os.path.join(base_path, "flac-linux-x86") 287 | elif system == "Linux" and machine in {"x86_64", "AMD64"}: 288 | flac_converter = os.path.join(base_path, "flac-linux-x86_64") 289 | else: # no FLAC converter available 290 | raise OSError( 291 | "FLAC conversion utility not available - consider installing the FLAC command line application by running `apt-get install flac` or your operating system's equivalent" 292 | ) 293 | 294 | # mark FLAC converter as executable if possible 295 | try: 296 | # handle known issue when running on docker: 297 | # run executable right after chmod() may result in OSError "Text file busy" 298 | # fix: flush FS with sync 299 | if not os.access(flac_converter, os.X_OK): 300 | stat_info = os.stat(flac_converter) 301 | os.chmod(flac_converter, stat_info.st_mode | stat.S_IEXEC) 302 | if "Linux" in platform.system(): 303 | os.sync() if sys.version_info >= (3, 3) else os.system("sync") 304 | 305 | except OSError: 306 | pass 307 | 308 | return flac_converter 309 | 310 | 311 | def shutil_which(pgm): 312 | """Python 2 compatibility: backport of ``shutil.which()`` from Python 3""" 313 | path = os.getenv("PATH") 314 | for p in path.split(os.path.pathsep): 315 | p = os.path.join(p, pgm) 316 | if os.path.exists(p) and os.access(p, os.X_OK): 317 | return p 318 | -------------------------------------------------------------------------------- /custom_speech_recognition/exceptions.py: -------------------------------------------------------------------------------- 1 | class SetupError(Exception): 2 | pass 3 | 4 | 5 | class WaitTimeoutError(Exception): 6 | pass 7 | 8 | 9 | class RequestError(Exception): 10 | pass 11 | 12 | 13 | class UnknownValueError(Exception): 14 | pass 15 | 16 | 17 | class TranscriptionNotReady(Exception): 18 | pass 19 | 20 | 21 | class TranscriptionFailed(Exception): 22 | pass 23 | -------------------------------------------------------------------------------- /custom_speech_recognition/flac-linux-x86: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JasonJarvan/interview-helper/5a3eadf734071650d3329207d8b54483dc968a39/custom_speech_recognition/flac-linux-x86 -------------------------------------------------------------------------------- /custom_speech_recognition/flac-linux-x86_64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JasonJarvan/interview-helper/5a3eadf734071650d3329207d8b54483dc968a39/custom_speech_recognition/flac-linux-x86_64 -------------------------------------------------------------------------------- /custom_speech_recognition/flac-mac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JasonJarvan/interview-helper/5a3eadf734071650d3329207d8b54483dc968a39/custom_speech_recognition/flac-mac -------------------------------------------------------------------------------- /custom_speech_recognition/flac-win32.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JasonJarvan/interview-helper/5a3eadf734071650d3329207d8b54483dc968a39/custom_speech_recognition/flac-win32.exe -------------------------------------------------------------------------------- /custom_speech_recognition/pocketsphinx-data/en-US/LICENSE.txt: -------------------------------------------------------------------------------- 1 | Copyright (c) 1999-2015 Carnegie Mellon University. All rights 2 | reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions 6 | are met: 7 | 8 | 1. Redistributions of source code must retain the above copyright 9 | notice, this list of conditions and the following disclaimer. 10 | 11 | 2. Redistributions in binary form must reproduce the above copyright 12 | notice, this list of conditions and the following disclaimer in 13 | the documentation and/or other materials provided with the 14 | distribution. 15 | 16 | This work was supported in part by funding from the Defense Advanced 17 | Research Projects Agency and the National Science Foundation of the 18 | United States of America, and the CMU Sphinx Speech Consortium. 19 | 20 | THIS SOFTWARE IS PROVIDED BY CARNEGIE MELLON UNIVERSITY ``AS IS'' AND 21 | ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 22 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 | PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY 24 | NOR ITS EMPLOYEES BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | -------------------------------------------------------------------------------- /custom_speech_recognition/pocketsphinx-data/en-US/acoustic-model/README: -------------------------------------------------------------------------------- 1 | /* ==================================================================== 2 | * Copyright (c) 2015 Alpha Cephei Inc. All rights 3 | * reserved. 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions 7 | * are met: 8 | * 9 | * 1. Redistributions of source code must retain the above copyright 10 | * notice, this list of conditions and the following disclaimer. 11 | * 12 | * 2. Redistributions in binary form must reproduce the above copyright 13 | * notice, this list of conditions and the following disclaimer in 14 | * the documentation and/or other materials provided with the 15 | * distribution. 16 | * 17 | * THIS SOFTWARE IS PROVIDED BY ALPHA CEPHEI INC. ``AS IS'' AND. 18 | * ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,. 19 | * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 20 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ALPHA CEPHEI INC. 21 | * NOR ITS EMPLOYEES BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 22 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT. 23 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,. 24 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY. 25 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT. 26 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE. 27 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | * 29 | * ==================================================================== 30 | * 31 | */ 32 | 33 | This directory contains generic US english acoustic model trained with 34 | latest sphinxtrain. 35 | -------------------------------------------------------------------------------- /custom_speech_recognition/pocketsphinx-data/en-US/acoustic-model/feat.params: -------------------------------------------------------------------------------- 1 | -lowerf 130 2 | -upperf 6800 3 | -nfilt 25 4 | -transform dct 5 | -lifter 22 6 | -feat 1s_c_d_dd 7 | -svspec 0-12/13-25/26-38 8 | -agc none 9 | -cmn current 10 | -varnorm no 11 | -model ptm 12 | -cmninit 40,3,-1 13 | -------------------------------------------------------------------------------- /custom_speech_recognition/pocketsphinx-data/en-US/acoustic-model/mdef: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JasonJarvan/interview-helper/5a3eadf734071650d3329207d8b54483dc968a39/custom_speech_recognition/pocketsphinx-data/en-US/acoustic-model/mdef -------------------------------------------------------------------------------- /custom_speech_recognition/pocketsphinx-data/en-US/acoustic-model/means: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JasonJarvan/interview-helper/5a3eadf734071650d3329207d8b54483dc968a39/custom_speech_recognition/pocketsphinx-data/en-US/acoustic-model/means -------------------------------------------------------------------------------- /custom_speech_recognition/pocketsphinx-data/en-US/acoustic-model/noisedict: -------------------------------------------------------------------------------- 1 | SIL 2 | SIL 3 | SIL 4 | [NOISE] +NSN+ 5 | [SPEECH] +SPN+ 6 | -------------------------------------------------------------------------------- /custom_speech_recognition/pocketsphinx-data/en-US/acoustic-model/sendump: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JasonJarvan/interview-helper/5a3eadf734071650d3329207d8b54483dc968a39/custom_speech_recognition/pocketsphinx-data/en-US/acoustic-model/sendump -------------------------------------------------------------------------------- /custom_speech_recognition/pocketsphinx-data/en-US/acoustic-model/transition_matrices: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JasonJarvan/interview-helper/5a3eadf734071650d3329207d8b54483dc968a39/custom_speech_recognition/pocketsphinx-data/en-US/acoustic-model/transition_matrices -------------------------------------------------------------------------------- /custom_speech_recognition/pocketsphinx-data/en-US/acoustic-model/variances: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JasonJarvan/interview-helper/5a3eadf734071650d3329207d8b54483dc968a39/custom_speech_recognition/pocketsphinx-data/en-US/acoustic-model/variances -------------------------------------------------------------------------------- /custom_speech_recognition/pocketsphinx-data/en-US/language-model.lm.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JasonJarvan/interview-helper/5a3eadf734071650d3329207d8b54483dc968a39/custom_speech_recognition/pocketsphinx-data/en-US/language-model.lm.bin -------------------------------------------------------------------------------- /custom_speech_recognition/recognizers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JasonJarvan/interview-helper/5a3eadf734071650d3329207d8b54483dc968a39/custom_speech_recognition/recognizers/__init__.py -------------------------------------------------------------------------------- /custom_speech_recognition/recognizers/whisper.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import os 4 | from io import BytesIO 5 | 6 | from custom_speech_recognition.audio import AudioData 7 | from custom_speech_recognition.exceptions import SetupError 8 | 9 | 10 | def recognize_whisper_api( 11 | recognizer, 12 | audio_data: "AudioData", 13 | *, 14 | model: str = "whisper-1", 15 | api_key: str | None = None, 16 | ): 17 | """ 18 | Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the OpenAI Whisper API. 19 | 20 | This function requires an OpenAI account; visit https://platform.openai.com/signup, then generate API Key in `User settings `__. 21 | 22 | Detail: https://platform.openai.com/docs/guides/speech-to-text 23 | 24 | Raises a ``speech_recognition.exceptions.SetupError`` exception if there are any issues with the openai installation, or the environment variable is missing. 25 | """ 26 | if not isinstance(audio_data, AudioData): 27 | raise ValueError("``audio_data`` must be an ``AudioData`` instance") 28 | if api_key is None and os.environ.get("OPENAI_API_KEY") is None: 29 | raise SetupError("Set environment variable ``OPENAI_API_KEY``") 30 | 31 | try: 32 | import openai 33 | except ImportError: 34 | raise SetupError( 35 | "missing openai module: ensure that openai is set up correctly." 36 | ) 37 | 38 | wav_data = BytesIO(audio_data.get_wav_data()) 39 | wav_data.name = "SpeechRecognition_audio.wav" 40 | 41 | transcript = openai.Audio.transcribe(model, wav_data, api_key=api_key) 42 | return transcript["text"] 43 | -------------------------------------------------------------------------------- /keys.py: -------------------------------------------------------------------------------- 1 | OPENAI_API_KEY = "sk-pqJSMYLLCCKGGosHD9FcE8Ae1a5948C6A10a6288CeBf63B0" 2 | ZHIPUAI_API_KEY = "6b4a40aaccc45ce3ace6d2208af6da20.ZuGScTDHt8JMYc1E" 3 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import threading 2 | from AudioTranscriber import AudioTranscriber 3 | from ZhipuAiResponder import ZhipuAiResponder 4 | from GPTResponder import GPTResponder 5 | import customtkinter as ctk 6 | import AudioRecorder 7 | import queue 8 | import time 9 | import torch 10 | import sys 11 | import TranscriberModels 12 | import subprocess 13 | from tkinter import PhotoImage 14 | 15 | 16 | # 这个方法清空给定的文本框,并将新文本插入其中。 17 | def write_in_textbox(textbox, text): 18 | textbox.delete("0.0", "end") 19 | textbox.insert("0.0", text) 20 | 21 | 22 | # 该方法从transcriber获取转录文本,并更新到UI的文本框中。使用after方法设置每300毫秒更新一次。 23 | def update_transcript_UI(transcriber, textbox): 24 | transcript_string = transcriber.get_transcript() 25 | write_in_textbox(textbox, transcript_string) 26 | textbox.after(300, update_transcript_UI, transcriber, textbox) 27 | 28 | 29 | # 这个方法更新响应文本框,控制响应更新的间隔,并根据滑块设置更新间隔时间。使用after方法设置每300毫秒更新一次。 30 | def update_response_UI(responder, textbox, update_interval_slider_label, update_interval_slider, freeze_state): 31 | if not freeze_state[0]: 32 | response = responder.response 33 | 34 | textbox.configure(state="normal") 35 | write_in_textbox(textbox, response) 36 | textbox.configure(state="disabled") 37 | 38 | update_interval = int(update_interval_slider.get()) 39 | responder.update_response_interval(update_interval) 40 | update_interval_slider_label.configure(text=f"询问间隔: {update_interval} 秒") 41 | 42 | textbox.after(300, update_response_UI, responder, textbox, update_interval_slider_label, update_interval_slider, 43 | freeze_state) 44 | 45 | 46 | # 该方法清除转录数据和音频队列中的内容。 47 | def clear_context(transcriber, audio_queue): 48 | transcriber.clear_transcript_data() 49 | with audio_queue.mutex: 50 | audio_queue.queue.clear() 51 | 52 | 53 | # 该方法创建和配置UI组件,包括文本框、按钮和滑块。 54 | def create_ui_components(root): 55 | ctk.set_appearance_mode("light") 56 | ctk.set_default_color_theme("blue") 57 | root.title("面试助手") 58 | root.configure(bg='#FFFFFF') 59 | root.geometry("1000x600") 60 | 61 | icon_image = PhotoImage(file="./pictures/RCLogo.png") 62 | root.iconphoto(False, icon_image) 63 | 64 | font_size = 20 65 | 66 | transcript_textbox = ctk.CTkTextbox(root, width=300, font=("Arial", font_size), text_color='#000000', wrap="word", 67 | fg_color='#FFFFFF') 68 | transcript_textbox.grid(row=0, column=0, padx=10, pady=20, sticky="nsew") 69 | 70 | response_textbox = ctk.CTkTextbox(root, width=300, font=("Arial", font_size), text_color='#000000', wrap="word", 71 | fg_color='#FFFFFF') 72 | response_textbox.grid(row=0, column=1, padx=10, pady=20, sticky="nsew") 73 | 74 | freeze_button = ctk.CTkButton(root, text="Freeze", fg_color='#E0E0E0', text_color='#000000') 75 | freeze_button.grid(row=1, column=1, padx=10, pady=3, sticky="nsew") 76 | 77 | update_interval_slider_label = ctk.CTkLabel(root, text=f"", font=("Arial", 12), text_color="#000000") 78 | update_interval_slider_label.grid(row=2, column=1, padx=10, pady=3, sticky="nsew") 79 | 80 | update_interval_slider = ctk.CTkSlider(root, from_=1, to=10, width=300, height=20, number_of_steps=9) 81 | update_interval_slider.set(2) 82 | update_interval_slider.grid(row=3, column=1, padx=10, pady=10, sticky="nsew") 83 | 84 | return transcript_textbox, response_textbox, update_interval_slider, update_interval_slider_label, freeze_button 85 | 86 | 87 | def main(): 88 | # FFmpeg检查 89 | try: 90 | subprocess.run(["ffmpeg", "-version"], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) 91 | except FileNotFoundError: 92 | print("ERROR: The ffmpeg library is not installed. Please install ffmpeg and try again.") 93 | return 94 | 95 | # 创建UI主窗口和组件。 96 | root = ctk.CTk() 97 | transcript_textbox, response_textbox, update_interval_slider, update_interval_slider_label, freeze_button = create_ui_components( 98 | root) 99 | 100 | # 设置用户和扬声器的音频录制,并将音频数据录制到队列中。 101 | audio_queue = queue.Queue() 102 | 103 | user_audio_recorder = AudioRecorder.DefaultMicRecorder() 104 | user_audio_recorder.record_into_queue(audio_queue) 105 | 106 | time.sleep(2) 107 | 108 | speaker_audio_recorder = AudioRecorder.DefaultSpeakerRecorder() 109 | speaker_audio_recorder.record_into_queue(audio_queue) 110 | 111 | # 初始化转录模型和响应生成器,并启动相应的线程。 112 | model = TranscriberModels.get_model('--api' in sys.argv) 113 | 114 | transcriber = AudioTranscriber(user_audio_recorder.source, speaker_audio_recorder.source, model) 115 | transcribe = threading.Thread(target=transcriber.transcribe_audio_queue, args=(audio_queue,)) 116 | transcribe.daemon = True 117 | transcribe.start() 118 | 119 | # todo 目前项目集合了GPT和智谱AI两个模型,分别初始化并启动相应的线程(启用1个模型就行)。 120 | 121 | # 初始化GPT响应生成器,并启动相应的线程。 122 | # responder = GPTResponder() 123 | 124 | # 初始化智谱响应生成器,并启动相应的线程。 125 | responder = ZhipuAiResponder() 126 | 127 | respond = threading.Thread(target=responder.respond_to_transcriber, args=(transcriber,)) 128 | respond.daemon = True 129 | respond.start() 130 | 131 | # 配置UI的网格布局,添加清除转录按钮和冻结按钮的功能,设置UI更新事件并启动主循环。 132 | print("READY") 133 | 134 | root.grid_rowconfigure(0, weight=100) 135 | root.grid_rowconfigure(1, weight=1) 136 | root.grid_rowconfigure(2, weight=1) 137 | root.grid_rowconfigure(3, weight=1) 138 | root.grid_columnconfigure(0, weight=2) 139 | root.grid_columnconfigure(1, weight=1) 140 | 141 | # Add the clear transcript button to the UI 142 | clear_transcript_button = ctk.CTkButton(root, text="清空转录记录", 143 | command=lambda: clear_context(transcriber, audio_queue, )) 144 | clear_transcript_button.grid(row=1, column=0, padx=10, pady=3, sticky="nsew") 145 | 146 | freeze_state = [False] # Using list to be able to change its content inside inner functions 147 | 148 | def freeze_unfreeze(): 149 | freeze_state[0] = not freeze_state[0] # Invert the freeze state 150 | freeze_button.configure(text="解冻" if freeze_state[0] else "冻结") 151 | 152 | freeze_button.configure(command=freeze_unfreeze) 153 | 154 | update_interval_slider_label.configure(text=f"询问间隔: {update_interval_slider.get()} 秒") 155 | 156 | update_transcript_UI(transcriber, transcript_textbox) 157 | update_response_UI(responder, response_textbox, update_interval_slider_label, update_interval_slider, freeze_state) 158 | 159 | root.mainloop() 160 | 161 | 162 | if __name__ == "__main__": 163 | main() 164 | -------------------------------------------------------------------------------- /pictures/RCLogo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JasonJarvan/interview-helper/5a3eadf734071650d3329207d8b54483dc968a39/pictures/RCLogo.png -------------------------------------------------------------------------------- /pictures/img.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JasonJarvan/interview-helper/5a3eadf734071650d3329207d8b54483dc968a39/pictures/img.png -------------------------------------------------------------------------------- /pictures/img_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JasonJarvan/interview-helper/5a3eadf734071650d3329207d8b54483dc968a39/pictures/img_1.png -------------------------------------------------------------------------------- /pictures/img_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JasonJarvan/interview-helper/5a3eadf734071650d3329207d8b54483dc968a39/pictures/img_2.png -------------------------------------------------------------------------------- /prompts.py: -------------------------------------------------------------------------------- 1 | """ 2 | prompts.py 3 | ------ 4 | 这个脚本定义了初始响应消息和一个用于生成对话提示的函数。提示函数根据转录文本生成特定格式的对话提示。 5 | """ 6 | 7 | INITIAL_RESPONSE = "欢迎使用面试助手" 8 | def create_prompt(transcript): 9 | return f"""You are a casual pal, genuinely interested in the conversation at hand. A poor transcription of conversation is given below. 10 | 11 | {transcript}. 12 | 13 | Please respond, in detail, to the conversation. Confidently give a straightforward response to the speaker, even if you don't understand them. Give your response in square brackets. DO NOT ask to repeat, and DO NOT ask for clarification. Just answer the speaker directly.""" -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy>=1.23 2 | openai-whisper==20230314 3 | Wave==0.0.2 4 | openai==0.27.6 5 | customtkinter==5.1.3 6 | PyAudioWPatch==0.2.12.5 7 | zhipuai 8 | --extra-index-url https://download.pytorch.org/whl/cu117 9 | torch -------------------------------------------------------------------------------- /whisper_models/tiny.pt: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9 3 | size 75572083 4 | --------------------------------------------------------------------------------