├── .gitignore
├── AudioRecorder.py
├── AudioTranscriber.py
├── LICENSE
├── README.md
├── chatbot_utils.py
├── custom_speech_recognition
├── __init__.py
├── __main__.py
├── audio.py
├── exceptions.py
├── flac-linux-x86
├── flac-linux-x86_64
├── flac-mac
├── flac-win32.exe
├── pocketsphinx-data
│ └── en-US
│ │ ├── LICENSE.txt
│ │ ├── acoustic-model
│ │ ├── README
│ │ ├── feat.params
│ │ ├── mdef
│ │ ├── means
│ │ ├── noisedict
│ │ ├── sendump
│ │ ├── transition_matrices
│ │ └── variances
│ │ ├── language-model.lm.bin
│ │ └── pronounciation-dictionary.dict
└── recognizers
│ ├── __init__.py
│ └── whisper.py
├── keys.env
├── main.py
├── requirements.txt
├── transcripts_for_vectordb
└── Test
│ └── transcript.txt
└── vector_utils.py
/.gitignore:
--------------------------------------------------------------------------------
1 | __pycache__/
2 | keys.env
3 | .venv/
--------------------------------------------------------------------------------
/AudioRecorder.py:
--------------------------------------------------------------------------------
1 | import custom_speech_recognition as sr
2 | import pyaudiowpatch as pyaudio
3 | from datetime import datetime
4 |
5 | RECORD_TIMEOUT = 3
6 | ENERGY_THRESHOLD = 1000
7 | DYNAMIC_ENERGY_THRESHOLD = False
8 |
9 | class BaseRecorder:
10 | def __init__(self, source, source_name):
11 | self.recorder = sr.Recognizer()
12 | self.recorder.energy_threshold = ENERGY_THRESHOLD
13 | self.recorder.dynamic_energy_threshold = DYNAMIC_ENERGY_THRESHOLD
14 | self.source = source
15 | self.source_name = source_name
16 |
17 | def adjust_for_noise(self, device_name, msg):
18 | print(f"[INFO] Adjusting for ambient noise from {device_name}. " + msg)
19 | with self.source:
20 | self.recorder.adjust_for_ambient_noise(self.source)
21 | print(f"[INFO] Completed ambient noise adjustment for {device_name}.")
22 |
23 | def record_into_queue(self, audio_queue):
24 | def record_callback(_, audio:sr.AudioData) -> None:
25 | data = audio.get_raw_data()
26 | audio_queue.put((self.source_name, data, datetime.utcnow()))
27 |
28 | self.recorder.listen_in_background(self.source, record_callback, phrase_time_limit=RECORD_TIMEOUT)
29 |
30 | class DefaultMicRecorder(BaseRecorder):
31 | def __init__(self):
32 | super().__init__(source=sr.Microphone(sample_rate=16000), source_name="You")
33 | self.adjust_for_noise("Default Mic", "Please make some noise from the Default Mic...")
34 |
35 | class DefaultSpeakerRecorder(BaseRecorder):
36 | def __init__(self):
37 | with pyaudio.PyAudio() as p:
38 | wasapi_info = p.get_host_api_info_by_type(pyaudio.paWASAPI)
39 | default_speakers = p.get_device_info_by_index(wasapi_info["defaultOutputDevice"])
40 |
41 | if not default_speakers["isLoopbackDevice"]:
42 | for loopback in p.get_loopback_device_info_generator():
43 | if default_speakers["name"] in loopback["name"]:
44 | default_speakers = loopback
45 | break
46 | else:
47 | print("[ERROR] No loopback device found.")
48 |
49 | source = sr.Microphone(speaker=True,
50 | device_index= default_speakers["index"],
51 | sample_rate=int(default_speakers["defaultSampleRate"]),
52 | chunk_size=pyaudio.get_sample_size(pyaudio.paInt16),
53 | channels=default_speakers["maxInputChannels"])
54 | super().__init__(source=source, source_name="Speaker")
55 | self.adjust_for_noise("Default Speaker", "Please make or play some noise from the Default Speaker...")
56 |
57 |
--------------------------------------------------------------------------------
/AudioTranscriber.py:
--------------------------------------------------------------------------------
1 | import whisper
2 | import torch
3 | import wave
4 | import os
5 | import threading
6 | from tempfile import NamedTemporaryFile
7 | import custom_speech_recognition as sr
8 | import io
9 | from datetime import timedelta
10 | import pyaudiowpatch as pyaudio
11 | from heapq import merge
12 |
13 | PHRASE_TIMEOUT = 3.05
14 |
15 | MAX_PHRASES = 30
16 |
17 | class AudioTranscriber:
18 | def __init__(self, mic_source, speaker_source):
19 | self.transcript_data = {"You": [], "Speaker": []}
20 | self.transcript_changed_event = threading.Event()
21 | self.audio_model = whisper.load_model('base.en')
22 | print(f'Whisper running on device: {self.audio_model.device}')
23 | self.should_continue = True
24 | self.audio_sources = {
25 | "You": {
26 | "sample_rate": mic_source.SAMPLE_RATE,
27 | "sample_width": mic_source.SAMPLE_WIDTH,
28 | "channels": mic_source.channels,
29 | "last_sample": bytes(),
30 | "last_spoken": None,
31 | "new_phrase": True,
32 | "process_data_func": self.process_mic_data
33 | },
34 | "Speaker": {
35 | "sample_rate": speaker_source.SAMPLE_RATE,
36 | "sample_width": speaker_source.SAMPLE_WIDTH,
37 | "channels": speaker_source.channels,
38 | "last_sample": bytes(),
39 | "last_spoken": None,
40 | "new_phrase": True,
41 | "process_data_func": self.process_speaker_data
42 | }
43 | }
44 |
45 | def transcribe_audio_queue(self, audio_queue):
46 | while self.should_continue:
47 | while self.should_continue:
48 | who_spoke, data, time_spoken = audio_queue.get()
49 | self.update_last_sample_and_phrase_status(who_spoke, data, time_spoken)
50 | source_info = self.audio_sources[who_spoke]
51 | temp_file = source_info["process_data_func"](source_info["last_sample"])
52 | text = self.get_transcription(temp_file)
53 |
54 | if text != '' and text.lower() != 'you':
55 | self.update_transcript(who_spoke, text, time_spoken)
56 | self.transcript_changed_event.set()
57 | print('Stopping')
58 |
59 | def update_last_sample_and_phrase_status(self, who_spoke, data, time_spoken):
60 | source_info = self.audio_sources[who_spoke]
61 | if source_info["last_spoken"] and time_spoken - source_info["last_spoken"] > timedelta(seconds=PHRASE_TIMEOUT):
62 | source_info["last_sample"] = bytes()
63 | source_info["new_phrase"] = True
64 | else:
65 | source_info["new_phrase"] = False
66 |
67 | source_info["last_sample"] += data
68 | source_info["last_spoken"] = time_spoken
69 |
70 | def process_mic_data(self, data):
71 | temp_file = NamedTemporaryFile().name
72 | audio_data = sr.AudioData(data, self.audio_sources["You"]["sample_rate"], self.audio_sources["You"]["sample_width"])
73 | wav_data = io.BytesIO(audio_data.get_wav_data())
74 | with open(temp_file, 'w+b') as f:
75 | f.write(wav_data.read())
76 | return temp_file
77 |
78 | def process_speaker_data(self, data):
79 | temp_file = NamedTemporaryFile().name
80 | with wave.open(temp_file, 'wb') as wf:
81 | wf.setnchannels(self.audio_sources["Speaker"]["channels"])
82 | p = pyaudio.PyAudio()
83 | wf.setsampwidth(p.get_sample_size(pyaudio.paInt16))
84 | wf.setframerate(self.audio_sources["Speaker"]["sample_rate"])
85 | wf.writeframes(data)
86 | return temp_file
87 |
88 | def get_transcription(self, file_path):
89 | result = self.audio_model.transcribe(file_path, fp16=torch.cuda.is_available())
90 | return result['text'].strip()
91 |
92 | def update_transcript(self, who_spoke, text, time_spoken):
93 | source_info = self.audio_sources[who_spoke]
94 | transcript = self.transcript_data[who_spoke]
95 |
96 | if source_info["new_phrase"] or len(transcript) == 0:
97 | if len(transcript) > MAX_PHRASES:
98 | transcript.pop(-1)
99 | transcript.insert(0, (f"{who_spoke}: [{text}]\n\n", time_spoken))
100 | else:
101 | transcript[0] = (f"{who_spoke}: [{text}]\n\n", time_spoken)
102 |
103 | def get_transcript(self, username="You", speakername="Speaker"):
104 | combined_transcript = list(merge(
105 | self.transcript_data["You"], self.transcript_data["Speaker"],
106 | key=lambda x: x[1], reverse=True))
107 | combined_transcript = combined_transcript[:MAX_PHRASES]
108 | combined_transcript = combined_transcript[::-1]
109 | formatted = "\n\n".join([f'{username if t[0].startswith("You") else speakername}: "{t[0][t[0].index(":")+2:-3]}" ' for t in combined_transcript])
110 | formatted = formatted.replace("[", "")
111 | return formatted
112 |
113 | def get_speaker_transcript(self):
114 | transcript = list(merge(self.transcript_data["Speaker"], key=lambda x: x[1], reverse=True))
115 | text_only = []
116 | for item in transcript:
117 | text = item[0]
118 | extracted_text = text.split("[")[1].split("]")[0]
119 | text_only.append(extracted_text)
120 | text_string = " ".join(text_only)
121 | return text_string
122 |
123 | def clear_transcript_data(self):
124 | self.transcript_data["You"].clear()
125 | self.transcript_data["Speaker"].clear()
126 |
127 | self.audio_sources["You"]["last_sample"] = bytes()
128 | self.audio_sources["Speaker"]["last_sample"] = bytes()
129 |
130 | self.audio_sources["You"]["new_phrase"] = True
131 | self.audio_sources["Speaker"]["new_phrase"] = True
132 |
133 |
134 | def stop(self):
135 | self.should_continue = False
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 Ethan
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
WingmanAI
2 |
3 | WingmanAI is a powerful tool for interacting with real-time transcription of both system and microphone audio. Powered by ChatGPT, this tool lets you interact in real-time with the transcripts as an extensive memory base for the bot, providing a unique communication platform.
4 |
5 | ## Demo
6 |
7 | https://github.com/e-johnstonn/wingmanAI/assets/30129211/6f9f8e09-f43e-47d5-87ae-ac5bc693963d
8 |
9 | As you can see, the bot can answer questions about past conversations when you load the transcripts for a designated person.
10 |
11 | ## Features
12 |
13 | - **Real-time Transcription**: WingmanAI can transcribe both system output and microphone input audio, allowing you to view the live transcription in an easy-to-read format.
14 |
15 | - **ChatGPT Integration**: You can chat with a ChatGPT powered bot that reads your transcripts in real-time.
16 |
17 | - **Efficient Memory Management**: The bot maintains a record of the conversation but in a token-efficient manner, as only the current chunk of transcript is passed to the bot.
18 |
19 | - **Save and Load Transcripts**: WingmanAI allows you to save transcripts for future use. You can load them up anytime later, and any query made to the bot will be cross-referenced with a vector database of the saved transcript, providing the bot with a richer context.
20 |
21 | - **Append Conversations**: You can keep appending to the saved transcripts, building a vast database over time for the bot to pull from.
22 |
23 |
24 |
25 |
26 | ## Installation
27 |
28 | 1. Clone the repository.
29 | 2. Install the requirements: ```pip install -r requirements.txt```
30 | 3. If you wish to use CUDA for Whisper (which is highly recommended), uninstall (```pip uninstall torch```) torch and run: ```pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu117```
31 |
32 | **Note**: This application is currently compatible only with Windows.
33 |
34 | ## Prerequisites
35 | Ensure you have `ffmpeg` installed on your system.
36 | Have a working OpenAI API key.
37 | Works best using CUDA! CPU transcription is not real-time.
38 | The model currently being used is the "base" model - if your hardware can't run it, change it to "tiny". Language is currently set to English.
39 |
40 | ## Getting Started
41 | 1. Add your OpenAI API key to the `keys.env` file.
42 | 2. Run `main.py`.
43 |
44 |
45 | For any queries or issues, feel free to open a new issue in the repository.
46 |
47 | Contributions are always welcomed to improve the project.
48 |
49 | ## Acknowledgements
50 |
51 | This project uses a modified version of SevaSk's "Ecoute" project for the transcriptions - check it out [here](https://github.com/SevaSk/ecoute)!
52 |
53 |
54 |
55 |
--------------------------------------------------------------------------------
/chatbot_utils.py:
--------------------------------------------------------------------------------
1 | from langchain.chat_models import ChatOpenAI
2 | from langchain.schema import SystemMessage, HumanMessage, AIMessage
3 |
4 |
5 |
6 |
7 |
8 | class GPTChat:
9 | def __init__(self):
10 | self.messages = []
11 | self.chat = ChatOpenAI()
12 | self.sys_message = SystemMessage(
13 | content="""
14 | You're ChatGPT, OpenAI's wingman AI built on GPT-3.5.
15 | Your goal is to help the User in their interactions with the speaker.
16 | Using conversation transcripts, you'll help create responses and guide the User (labeled You).
17 | Keep your responses casual, engaging, and sometimes even edgy. Always keep responses related to the conversation.
18 | The transcripts may be fragmented, incomplete, or even incorrect. Do not ask for clarification, do your best to guess what
19 | the transcripts are saying based on context. Act 100% sure of everything you say.
20 | Keep responses concise and to the point.
21 |
22 | """)
23 | self.messages.append(self.sys_message)
24 | self.response = ""
25 |
26 | def message_bot(self, human_message, transcript, context=None):
27 | temp_messages = self.messages.copy()
28 |
29 | if context is None:
30 | human_message_with_transcript = HumanMessage(
31 | content=f'TRANSCRIPTS: {transcript}, ||| USER MESSAGE: {human_message}')
32 |
33 |
34 | else:
35 | human_message_with_transcript = HumanMessage(
36 | content=f'CONTEXT FROM PAST CONVERSATIONS (ALL CONTEXT IS DIRECT QUOTES FROM OTHER PARTY (SPEAKER) {context} |||'
37 | f'TRANSCRIPTS: {transcript}, ||| USER MESSAGE: {human_message}')
38 |
39 | temp_messages.append(human_message_with_transcript)
40 |
41 | human_message = HumanMessage(content=human_message)
42 | self.messages.append(human_message)
43 |
44 | self.response = self.chat(temp_messages)
45 |
46 | ai_message = AIMessage(content=self.response.content)
47 | self.messages.append(ai_message)
48 |
49 | return str(ai_message.content)
50 |
51 |
--------------------------------------------------------------------------------
/custom_speech_recognition/__init__.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | """Library for performing speech recognition, with support for several engines and APIs, online and offline."""
4 |
5 | import io
6 | import os
7 | import tempfile
8 | import sys
9 | import subprocess
10 | import wave
11 | import aifc
12 | import math
13 | import audioop
14 | import collections
15 | import json
16 | import base64
17 | import threading
18 | import hashlib
19 | import hmac
20 | import time
21 | import uuid
22 |
23 | try:
24 | import requests
25 | except (ModuleNotFoundError, ImportError):
26 | pass
27 |
28 | __author__ = "Anthony Zhang (Uberi)"
29 | __version__ = "3.10.0"
30 | __license__ = "BSD"
31 |
32 | from urllib.parse import urlencode
33 | from urllib.request import Request, urlopen
34 | from urllib.error import URLError, HTTPError
35 |
36 | from .audio import AudioData, get_flac_converter
37 | from .exceptions import (
38 | RequestError,
39 | TranscriptionFailed,
40 | TranscriptionNotReady,
41 | UnknownValueError,
42 | WaitTimeoutError,
43 | )
44 | from .recognizers import whisper
45 |
46 |
47 | class AudioSource(object):
48 | def __init__(self):
49 | raise NotImplementedError("this is an abstract class")
50 |
51 | def __enter__(self):
52 | raise NotImplementedError("this is an abstract class")
53 |
54 | def __exit__(self, exc_type, exc_value, traceback):
55 | raise NotImplementedError("this is an abstract class")
56 |
57 |
58 | class Microphone(AudioSource):
59 | """
60 | Creates a new ``Microphone`` instance, which represents a physical microphone on the computer. Subclass of ``AudioSource``.
61 |
62 | This will throw an ``AttributeError`` if you don't have PyAudio 0.2.11 or later installed.
63 |
64 | If ``device_index`` is unspecified or ``None``, the default microphone is used as the audio source. Otherwise, ``device_index`` should be the index of the device to use for audio input.
65 |
66 | A device index is an integer between 0 and ``pyaudio.get_device_count() - 1`` (assume we have used ``import pyaudio`` beforehand) inclusive. It represents an audio device such as a microphone or speaker. See the `PyAudio documentation `__ for more details.
67 |
68 | The microphone audio is recorded in chunks of ``chunk_size`` samples, at a rate of ``sample_rate`` samples per second (Hertz). If not specified, the value of ``sample_rate`` is determined automatically from the system's microphone settings.
69 |
70 | Higher ``sample_rate`` values result in better audio quality, but also more bandwidth (and therefore, slower recognition). Additionally, some CPUs, such as those in older Raspberry Pi models, can't keep up if this value is too high.
71 |
72 | Higher ``chunk_size`` values help avoid triggering on rapidly changing ambient noise, but also makes detection less sensitive. This value, generally, should be left at its default.
73 | """
74 | def __init__(self, device_index=None, sample_rate=None, chunk_size=1024, speaker=False, channels = 1):
75 | assert device_index is None or isinstance(device_index, int), "Device index must be None or an integer"
76 | assert sample_rate is None or (isinstance(sample_rate, int) and sample_rate > 0), "Sample rate must be None or a positive integer"
77 | assert isinstance(chunk_size, int) and chunk_size > 0, "Chunk size must be a positive integer"
78 |
79 | # set up PyAudio
80 | self.speaker=speaker
81 | self.pyaudio_module = self.get_pyaudio()
82 | audio = self.pyaudio_module.PyAudio()
83 | try:
84 | count = audio.get_device_count() # obtain device count
85 | if device_index is not None: # ensure device index is in range
86 | assert 0 <= device_index < count, "Device index out of range ({} devices available; device index should be between 0 and {} inclusive)".format(count, count - 1)
87 | if sample_rate is None: # automatically set the sample rate to the hardware's default sample rate if not specified
88 | device_info = audio.get_device_info_by_index(device_index) if device_index is not None else audio.get_default_input_device_info()
89 | assert isinstance(device_info.get("defaultSampleRate"), (float, int)) and device_info["defaultSampleRate"] > 0, "Invalid device info returned from PyAudio: {}".format(device_info)
90 | sample_rate = int(device_info["defaultSampleRate"])
91 | finally:
92 | audio.terminate()
93 |
94 | self.device_index = device_index
95 | self.format = self.pyaudio_module.paInt16 # 16-bit int sampling
96 | self.SAMPLE_WIDTH = self.pyaudio_module.get_sample_size(self.format) # size of each sample
97 | self.SAMPLE_RATE = sample_rate # sampling rate in Hertz
98 | self.CHUNK = chunk_size # number of frames stored in each buffer
99 | self.channels = channels
100 |
101 | self.audio = None
102 | self.stream = None
103 |
104 | @staticmethod
105 | def get_pyaudio():
106 | """
107 | Imports the pyaudio module and checks its version. Throws exceptions if pyaudio can't be found or a wrong version is installed
108 | """
109 | try:
110 | import pyaudiowpatch as pyaudio
111 | except ImportError:
112 | raise AttributeError("Could not find PyAudio; check installation")
113 | from distutils.version import LooseVersion
114 | if LooseVersion(pyaudio.__version__) < LooseVersion("0.2.11"):
115 | raise AttributeError("PyAudio 0.2.11 or later is required (found version {})".format(pyaudio.__version__))
116 | return pyaudio
117 |
118 | @staticmethod
119 | def list_microphone_names():
120 | """
121 | Returns a list of the names of all available microphones. For microphones where the name can't be retrieved, the list entry contains ``None`` instead.
122 |
123 | The index of each microphone's name in the returned list is the same as its device index when creating a ``Microphone`` instance - if you want to use the microphone at index 3 in the returned list, use ``Microphone(device_index=3)``.
124 | """
125 | audio = Microphone.get_pyaudio().PyAudio()
126 | try:
127 | result = []
128 | for i in range(audio.get_device_count()):
129 | device_info = audio.get_device_info_by_index(i)
130 | result.append(device_info.get("name"))
131 | finally:
132 | audio.terminate()
133 | return result
134 |
135 | @staticmethod
136 | def list_working_microphones():
137 | """
138 | Returns a dictionary mapping device indices to microphone names, for microphones that are currently hearing sounds. When using this function, ensure that your microphone is unmuted and make some noise at it to ensure it will be detected as working.
139 |
140 | Each key in the returned dictionary can be passed to the ``Microphone`` constructor to use that microphone. For example, if the return value is ``{3: "HDA Intel PCH: ALC3232 Analog (hw:1,0)"}``, you can do ``Microphone(device_index=3)`` to use that microphone.
141 | """
142 | pyaudio_module = Microphone.get_pyaudio()
143 | audio = pyaudio_module.PyAudio()
144 | try:
145 | result = {}
146 | for device_index in range(audio.get_device_count()):
147 | device_info = audio.get_device_info_by_index(device_index)
148 | device_name = device_info.get("name")
149 | assert isinstance(device_info.get("defaultSampleRate"), (float, int)) and device_info["defaultSampleRate"] > 0, "Invalid device info returned from PyAudio: {}".format(device_info)
150 | try:
151 | # read audio
152 | pyaudio_stream = audio.open(
153 | input_device_index=device_index, channels=1, format=pyaudio_module.paInt16,
154 | rate=int(device_info["defaultSampleRate"]), input=True
155 | )
156 | try:
157 | buffer = pyaudio_stream.read(1024)
158 | if not pyaudio_stream.is_stopped(): pyaudio_stream.stop_stream()
159 | finally:
160 | pyaudio_stream.close()
161 | except Exception:
162 | continue
163 |
164 | # compute RMS of debiased audio
165 | energy = -audioop.rms(buffer, 2)
166 | energy_bytes = bytes([energy & 0xFF, (energy >> 8) & 0xFF])
167 | debiased_energy = audioop.rms(audioop.add(buffer, energy_bytes * (len(buffer) // 2), 2), 2)
168 |
169 | if debiased_energy > 30: # probably actually audio
170 | result[device_index] = device_name
171 | finally:
172 | audio.terminate()
173 | return result
174 |
175 | def __enter__(self):
176 | assert self.stream is None, "This audio source is already inside a context manager"
177 | self.audio = self.pyaudio_module.PyAudio()
178 |
179 | try:
180 | if self.speaker:
181 | p = self.audio
182 | self.stream = Microphone.MicrophoneStream(
183 | p.open(
184 | input_device_index=self.device_index,
185 | channels=self.channels,
186 | format=self.format,
187 | rate=self.SAMPLE_RATE,
188 | frames_per_buffer=self.CHUNK,
189 | input=True
190 | )
191 | )
192 | else:
193 | self.stream = Microphone.MicrophoneStream(
194 | self.audio.open(
195 | input_device_index=self.device_index, channels=1, format=self.format,
196 | rate=self.SAMPLE_RATE, frames_per_buffer=self.CHUNK, input=True,
197 | )
198 | )
199 | except Exception:
200 | self.audio.terminate()
201 | return self
202 |
203 | def __exit__(self, exc_type, exc_value, traceback):
204 | try:
205 | self.stream.close()
206 | finally:
207 | self.stream = None
208 | self.audio.terminate()
209 |
210 | class MicrophoneStream(object):
211 | def __init__(self, pyaudio_stream):
212 | self.pyaudio_stream = pyaudio_stream
213 |
214 | def read(self, size):
215 | return self.pyaudio_stream.read(size, exception_on_overflow=False)
216 |
217 | def close(self):
218 | try:
219 | # sometimes, if the stream isn't stopped, closing the stream throws an exception
220 | if not self.pyaudio_stream.is_stopped():
221 | self.pyaudio_stream.stop_stream()
222 | finally:
223 | self.pyaudio_stream.close()
224 |
225 |
226 | class AudioFile(AudioSource):
227 | """
228 | Creates a new ``AudioFile`` instance given a WAV/AIFF/FLAC audio file ``filename_or_fileobject``. Subclass of ``AudioSource``.
229 |
230 | If ``filename_or_fileobject`` is a string, then it is interpreted as a path to an audio file on the filesystem. Otherwise, ``filename_or_fileobject`` should be a file-like object such as ``io.BytesIO`` or similar.
231 |
232 | Note that functions that read from the audio (such as ``recognizer_instance.record`` or ``recognizer_instance.listen``) will move ahead in the stream. For example, if you execute ``recognizer_instance.record(audiofile_instance, duration=10)`` twice, the first time it will return the first 10 seconds of audio, and the second time it will return the 10 seconds of audio right after that. This is always reset to the beginning when entering an ``AudioFile`` context.
233 |
234 | WAV files must be in PCM/LPCM format; WAVE_FORMAT_EXTENSIBLE and compressed WAV are not supported and may result in undefined behaviour.
235 |
236 | Both AIFF and AIFF-C (compressed AIFF) formats are supported.
237 |
238 | FLAC files must be in native FLAC format; OGG-FLAC is not supported and may result in undefined behaviour.
239 | """
240 |
241 | def __init__(self, filename_or_fileobject):
242 | assert isinstance(filename_or_fileobject, (type(""), type(u""))) or hasattr(filename_or_fileobject, "read"), "Given audio file must be a filename string or a file-like object"
243 | self.filename_or_fileobject = filename_or_fileobject
244 | self.stream = None
245 | self.DURATION = None
246 |
247 | self.audio_reader = None
248 | self.little_endian = False
249 | self.SAMPLE_RATE = None
250 | self.CHUNK = None
251 | self.FRAME_COUNT = None
252 |
253 | def __enter__(self):
254 | assert self.stream is None, "This audio source is already inside a context manager"
255 | try:
256 | # attempt to read the file as WAV
257 | self.audio_reader = wave.open(self.filename_or_fileobject, "rb")
258 | self.little_endian = True # RIFF WAV is a little-endian format (most ``audioop`` operations assume that the frames are stored in little-endian form)
259 | except (wave.Error, EOFError):
260 | try:
261 | # attempt to read the file as AIFF
262 | self.audio_reader = aifc.open(self.filename_or_fileobject, "rb")
263 | self.little_endian = False # AIFF is a big-endian format
264 | except (aifc.Error, EOFError):
265 | # attempt to read the file as FLAC
266 | if hasattr(self.filename_or_fileobject, "read"):
267 | flac_data = self.filename_or_fileobject.read()
268 | else:
269 | with open(self.filename_or_fileobject, "rb") as f: flac_data = f.read()
270 |
271 | # run the FLAC converter with the FLAC data to get the AIFF data
272 | flac_converter = get_flac_converter()
273 | if os.name == "nt": # on Windows, specify that the process is to be started without showing a console window
274 | startup_info = subprocess.STARTUPINFO()
275 | startup_info.dwFlags |= subprocess.STARTF_USESHOWWINDOW # specify that the wShowWindow field of `startup_info` contains a value
276 | startup_info.wShowWindow = subprocess.SW_HIDE # specify that the console window should be hidden
277 | else:
278 | startup_info = None # default startupinfo
279 | process = subprocess.Popen([
280 | flac_converter,
281 | "--stdout", "--totally-silent", # put the resulting AIFF file in stdout, and make sure it's not mixed with any program output
282 | "--decode", "--force-aiff-format", # decode the FLAC file into an AIFF file
283 | "-", # the input FLAC file contents will be given in stdin
284 | ], stdin=subprocess.PIPE, stdout=subprocess.PIPE, startupinfo=startup_info)
285 | aiff_data, _ = process.communicate(flac_data)
286 | aiff_file = io.BytesIO(aiff_data)
287 | try:
288 | self.audio_reader = aifc.open(aiff_file, "rb")
289 | except (aifc.Error, EOFError):
290 | raise ValueError("Audio file could not be read as PCM WAV, AIFF/AIFF-C, or Native FLAC; check if file is corrupted or in another format")
291 | self.little_endian = False # AIFF is a big-endian format
292 | assert 1 <= self.audio_reader.getnchannels() <= 2, "Audio must be mono or stereo"
293 | self.SAMPLE_WIDTH = self.audio_reader.getsampwidth()
294 |
295 | # 24-bit audio needs some special handling for old Python versions (workaround for https://bugs.python.org/issue12866)
296 | samples_24_bit_pretending_to_be_32_bit = False
297 | if self.SAMPLE_WIDTH == 3: # 24-bit audio
298 | try: audioop.bias(b"", self.SAMPLE_WIDTH, 0) # test whether this sample width is supported (for example, ``audioop`` in Python 3.3 and below don't support sample width 3, while Python 3.4+ do)
299 | except audioop.error: # this version of audioop doesn't support 24-bit audio (probably Python 3.3 or less)
300 | samples_24_bit_pretending_to_be_32_bit = True # while the ``AudioFile`` instance will outwardly appear to be 32-bit, it will actually internally be 24-bit
301 | self.SAMPLE_WIDTH = 4 # the ``AudioFile`` instance should present itself as a 32-bit stream now, since we'll be converting into 32-bit on the fly when reading
302 |
303 | self.SAMPLE_RATE = self.audio_reader.getframerate()
304 | self.CHUNK = 4096
305 | self.FRAME_COUNT = self.audio_reader.getnframes()
306 | self.DURATION = self.FRAME_COUNT / float(self.SAMPLE_RATE)
307 | self.stream = AudioFile.AudioFileStream(self.audio_reader, self.little_endian, samples_24_bit_pretending_to_be_32_bit)
308 | return self
309 |
310 | def __exit__(self, exc_type, exc_value, traceback):
311 | if not hasattr(self.filename_or_fileobject, "read"): # only close the file if it was opened by this class in the first place (if the file was originally given as a path)
312 | self.audio_reader.close()
313 | self.stream = None
314 | self.DURATION = None
315 |
316 | class AudioFileStream(object):
317 | def __init__(self, audio_reader, little_endian, samples_24_bit_pretending_to_be_32_bit):
318 | self.audio_reader = audio_reader # an audio file object (e.g., a `wave.Wave_read` instance)
319 | self.little_endian = little_endian # whether the audio data is little-endian (when working with big-endian things, we'll have to convert it to little-endian before we process it)
320 | self.samples_24_bit_pretending_to_be_32_bit = samples_24_bit_pretending_to_be_32_bit # this is true if the audio is 24-bit audio, but 24-bit audio isn't supported, so we have to pretend that this is 32-bit audio and convert it on the fly
321 |
322 | def read(self, size=-1):
323 | buffer = self.audio_reader.readframes(self.audio_reader.getnframes() if size == -1 else size)
324 | if not isinstance(buffer, bytes): buffer = b"" # workaround for https://bugs.python.org/issue24608
325 |
326 | sample_width = self.audio_reader.getsampwidth()
327 | if not self.little_endian: # big endian format, convert to little endian on the fly
328 | if hasattr(audioop, "byteswap"): # ``audioop.byteswap`` was only added in Python 3.4 (incidentally, that also means that we don't need to worry about 24-bit audio being unsupported, since Python 3.4+ always has that functionality)
329 | buffer = audioop.byteswap(buffer, sample_width)
330 | else: # manually reverse the bytes of each sample, which is slower but works well enough as a fallback
331 | buffer = buffer[sample_width - 1::-1] + b"".join(buffer[i + sample_width:i:-1] for i in range(sample_width - 1, len(buffer), sample_width))
332 |
333 | # workaround for https://bugs.python.org/issue12866
334 | if self.samples_24_bit_pretending_to_be_32_bit: # we need to convert samples from 24-bit to 32-bit before we can process them with ``audioop`` functions
335 | buffer = b"".join(b"\x00" + buffer[i:i + sample_width] for i in range(0, len(buffer), sample_width)) # since we're in little endian, we prepend a zero byte to each 24-bit sample to get a 32-bit sample
336 | sample_width = 4 # make sure we thread the buffer as 32-bit audio now, after converting it from 24-bit audio
337 | if self.audio_reader.getnchannels() != 1: # stereo audio
338 | buffer = audioop.tomono(buffer, sample_width, 1, 1) # convert stereo audio data to mono
339 | return buffer
340 |
341 |
342 | class Recognizer(AudioSource):
343 | def __init__(self):
344 | """
345 | Creates a new ``Recognizer`` instance, which represents a collection of speech recognition functionality.
346 | """
347 | self.energy_threshold = 300 # minimum audio energy to consider for recording
348 | self.dynamic_energy_threshold = True
349 | self.dynamic_energy_adjustment_damping = 0.15
350 | self.dynamic_energy_ratio = 1.5
351 | self.pause_threshold = 0.8 # seconds of non-speaking audio before a phrase is considered complete
352 | self.operation_timeout = None # seconds after an internal operation (e.g., an API request) starts before it times out, or ``None`` for no timeout
353 |
354 | self.phrase_threshold = 0.3 # minimum seconds of speaking audio before we consider the speaking audio a phrase - values below this are ignored (for filtering out clicks and pops)
355 | self.non_speaking_duration = 0.5 # seconds of non-speaking audio to keep on both sides of the recording
356 |
357 | def record(self, source, duration=None, offset=None):
358 | """
359 | Records up to ``duration`` seconds of audio from ``source`` (an ``AudioSource`` instance) starting at ``offset`` (or at the beginning if not specified) into an ``AudioData`` instance, which it returns.
360 |
361 | If ``duration`` is not specified, then it will record until there is no more audio input.
362 | """
363 | assert isinstance(source, AudioSource), "Source must be an audio source"
364 | assert source.stream is not None, "Audio source must be entered before recording, see documentation for ``AudioSource``; are you using ``source`` outside of a ``with`` statement?"
365 |
366 | frames = io.BytesIO()
367 | seconds_per_buffer = (source.CHUNK + 0.0) / source.SAMPLE_RATE
368 | elapsed_time = 0
369 | offset_time = 0
370 | offset_reached = False
371 | while True: # loop for the total number of chunks needed
372 | if offset and not offset_reached:
373 | offset_time += seconds_per_buffer
374 | if offset_time > offset:
375 | offset_reached = True
376 |
377 | buffer = source.stream.read(source.CHUNK)
378 | if len(buffer) == 0: break
379 |
380 | if offset_reached or not offset:
381 | elapsed_time += seconds_per_buffer
382 | if duration and elapsed_time > duration: break
383 |
384 | frames.write(buffer)
385 |
386 | frame_data = frames.getvalue()
387 | frames.close()
388 | return AudioData(frame_data, source.SAMPLE_RATE, source.SAMPLE_WIDTH)
389 |
390 | def adjust_for_ambient_noise(self, source, duration=1):
391 | """
392 | Adjusts the energy threshold dynamically using audio from ``source`` (an ``AudioSource`` instance) to account for ambient noise.
393 |
394 | Intended to calibrate the energy threshold with the ambient energy level. Should be used on periods of audio without speech - will stop early if any speech is detected.
395 |
396 | The ``duration`` parameter is the maximum number of seconds that it will dynamically adjust the threshold for before returning. This value should be at least 0.5 in order to get a representative sample of the ambient noise.
397 | """
398 | assert isinstance(source, AudioSource), "Source must be an audio source"
399 | assert source.stream is not None, "Audio source must be entered before adjusting, see documentation for ``AudioSource``; are you using ``source`` outside of a ``with`` statement?"
400 | assert self.pause_threshold >= self.non_speaking_duration >= 0
401 |
402 | seconds_per_buffer = (source.CHUNK + 0.0) / source.SAMPLE_RATE
403 | elapsed_time = 0
404 |
405 | # adjust energy threshold until a phrase starts
406 | while True:
407 | elapsed_time += seconds_per_buffer
408 | if elapsed_time > duration: break
409 | buffer = source.stream.read(source.CHUNK)
410 | energy = audioop.rms(buffer, source.SAMPLE_WIDTH) # energy of the audio signal
411 |
412 | # dynamically adjust the energy threshold using asymmetric weighted average
413 | damping = self.dynamic_energy_adjustment_damping ** seconds_per_buffer # account for different chunk sizes and rates
414 | target_energy = energy * self.dynamic_energy_ratio
415 | self.energy_threshold = self.energy_threshold * damping + target_energy * (1 - damping)
416 |
417 | def snowboy_wait_for_hot_word(self, snowboy_location, snowboy_hot_word_files, source, timeout=None):
418 | # load snowboy library (NOT THREAD SAFE)
419 | sys.path.append(snowboy_location)
420 | import snowboydetect
421 | sys.path.pop()
422 |
423 | detector = snowboydetect.SnowboyDetect(
424 | resource_filename=os.path.join(snowboy_location, "resources", "common.res").encode(),
425 | model_str=",".join(snowboy_hot_word_files).encode()
426 | )
427 | detector.SetAudioGain(1.0)
428 | detector.SetSensitivity(",".join(["0.4"] * len(snowboy_hot_word_files)).encode())
429 | snowboy_sample_rate = detector.SampleRate()
430 |
431 | elapsed_time = 0
432 | seconds_per_buffer = float(source.CHUNK) / source.SAMPLE_RATE
433 | resampling_state = None
434 |
435 | # buffers capable of holding 5 seconds of original audio
436 | five_seconds_buffer_count = int(math.ceil(5 / seconds_per_buffer))
437 | # buffers capable of holding 0.5 seconds of resampled audio
438 | half_second_buffer_count = int(math.ceil(0.5 / seconds_per_buffer))
439 | frames = collections.deque(maxlen=five_seconds_buffer_count)
440 | resampled_frames = collections.deque(maxlen=half_second_buffer_count)
441 | # snowboy check interval
442 | check_interval = 0.05
443 | last_check = time.time()
444 | while True:
445 | elapsed_time += seconds_per_buffer
446 | if timeout and elapsed_time > timeout:
447 | raise WaitTimeoutError("listening timed out while waiting for hotword to be said")
448 |
449 | buffer = source.stream.read(source.CHUNK)
450 | if len(buffer) == 0: break # reached end of the stream
451 | frames.append(buffer)
452 |
453 | # resample audio to the required sample rate
454 | resampled_buffer, resampling_state = audioop.ratecv(buffer, source.SAMPLE_WIDTH, 1, source.SAMPLE_RATE, snowboy_sample_rate, resampling_state)
455 | resampled_frames.append(resampled_buffer)
456 | if time.time() - last_check > check_interval:
457 | # run Snowboy on the resampled audio
458 | snowboy_result = detector.RunDetection(b"".join(resampled_frames))
459 | assert snowboy_result != -1, "Error initializing streams or reading audio data"
460 | if snowboy_result > 0: break # wake word found
461 | resampled_frames.clear()
462 | last_check = time.time()
463 |
464 | return b"".join(frames), elapsed_time
465 |
466 | def listen(self, source, timeout=None, phrase_time_limit=None, snowboy_configuration=None):
467 | """
468 | Records a single phrase from ``source`` (an ``AudioSource`` instance) into an ``AudioData`` instance, which it returns.
469 |
470 | This is done by waiting until the audio has an energy above ``recognizer_instance.energy_threshold`` (the user has started speaking), and then recording until it encounters ``recognizer_instance.pause_threshold`` seconds of non-speaking or there is no more audio input. The ending silence is not included.
471 |
472 | The ``timeout`` parameter is the maximum number of seconds that this will wait for a phrase to start before giving up and throwing an ``speech_recognition.WaitTimeoutError`` exception. If ``timeout`` is ``None``, there will be no wait timeout.
473 |
474 | The ``phrase_time_limit`` parameter is the maximum number of seconds that this will allow a phrase to continue before stopping and returning the part of the phrase processed before the time limit was reached. The resulting audio will be the phrase cut off at the time limit. If ``phrase_timeout`` is ``None``, there will be no phrase time limit.
475 |
476 | The ``snowboy_configuration`` parameter allows integration with `Snowboy `__, an offline, high-accuracy, power-efficient hotword recognition engine. When used, this function will pause until Snowboy detects a hotword, after which it will unpause. This parameter should either be ``None`` to turn off Snowboy support, or a tuple of the form ``(SNOWBOY_LOCATION, LIST_OF_HOT_WORD_FILES)``, where ``SNOWBOY_LOCATION`` is the path to the Snowboy root directory, and ``LIST_OF_HOT_WORD_FILES`` is a list of paths to Snowboy hotword configuration files (`*.pmdl` or `*.umdl` format).
477 |
478 | This operation will always complete within ``timeout + phrase_timeout`` seconds if both are numbers, either by returning the audio data, or by raising a ``speech_recognition.WaitTimeoutError`` exception.
479 | """
480 | assert isinstance(source, AudioSource), "Source must be an audio source"
481 | assert source.stream is not None, "Audio source must be entered before listening, see documentation for ``AudioSource``; are you using ``source`` outside of a ``with`` statement?"
482 | assert self.pause_threshold >= self.non_speaking_duration >= 0
483 | if snowboy_configuration is not None:
484 | assert os.path.isfile(os.path.join(snowboy_configuration[0], "snowboydetect.py")), "``snowboy_configuration[0]`` must be a Snowboy root directory containing ``snowboydetect.py``"
485 | for hot_word_file in snowboy_configuration[1]:
486 | assert os.path.isfile(hot_word_file), "``snowboy_configuration[1]`` must be a list of Snowboy hot word configuration files"
487 |
488 | seconds_per_buffer = float(source.CHUNK) / source.SAMPLE_RATE
489 | pause_buffer_count = int(math.ceil(self.pause_threshold / seconds_per_buffer)) # number of buffers of non-speaking audio during a phrase, before the phrase should be considered complete
490 | phrase_buffer_count = int(math.ceil(self.phrase_threshold / seconds_per_buffer)) # minimum number of buffers of speaking audio before we consider the speaking audio a phrase
491 | non_speaking_buffer_count = int(math.ceil(self.non_speaking_duration / seconds_per_buffer)) # maximum number of buffers of non-speaking audio to retain before and after a phrase
492 |
493 | # read audio input for phrases until there is a phrase that is long enough
494 | elapsed_time = 0 # number of seconds of audio read
495 | buffer = b"" # an empty buffer means that the stream has ended and there is no data left to read
496 | while True:
497 | frames = collections.deque()
498 |
499 | if snowboy_configuration is None:
500 | # store audio input until the phrase starts
501 | while True:
502 | # handle waiting too long for phrase by raising an exception
503 | elapsed_time += seconds_per_buffer
504 | if timeout and elapsed_time > timeout:
505 | raise WaitTimeoutError("listening timed out while waiting for phrase to start")
506 |
507 | buffer = source.stream.read(source.CHUNK)
508 | if len(buffer) == 0: break # reached end of the stream
509 | frames.append(buffer)
510 | if len(frames) > non_speaking_buffer_count: # ensure we only keep the needed amount of non-speaking buffers
511 | frames.popleft()
512 |
513 | # detect whether speaking has started on audio input
514 | energy = audioop.rms(buffer, source.SAMPLE_WIDTH) # energy of the audio signal
515 | if energy > self.energy_threshold: break
516 |
517 | # dynamically adjust the energy threshold using asymmetric weighted average
518 | if self.dynamic_energy_threshold:
519 | damping = self.dynamic_energy_adjustment_damping ** seconds_per_buffer # account for different chunk sizes and rates
520 | target_energy = energy * self.dynamic_energy_ratio
521 | self.energy_threshold = self.energy_threshold * damping + target_energy * (1 - damping)
522 | else:
523 | # read audio input until the hotword is said
524 | snowboy_location, snowboy_hot_word_files = snowboy_configuration
525 | buffer, delta_time = self.snowboy_wait_for_hot_word(snowboy_location, snowboy_hot_word_files, source, timeout)
526 | elapsed_time += delta_time
527 | if len(buffer) == 0: break # reached end of the stream
528 | frames.append(buffer)
529 |
530 | # read audio input until the phrase ends
531 | pause_count, phrase_count = 0, 0
532 | phrase_start_time = elapsed_time
533 | while True:
534 | # handle phrase being too long by cutting off the audio
535 | elapsed_time += seconds_per_buffer
536 | if phrase_time_limit and elapsed_time - phrase_start_time > phrase_time_limit:
537 | break
538 |
539 | buffer = source.stream.read(source.CHUNK)
540 | if len(buffer) == 0: break # reached end of the stream
541 | frames.append(buffer)
542 | phrase_count += 1
543 |
544 | # check if speaking has stopped for longer than the pause threshold on the audio input
545 | energy = audioop.rms(buffer, source.SAMPLE_WIDTH) # unit energy of the audio signal within the buffer
546 | if energy > self.energy_threshold:
547 | pause_count = 0
548 | else:
549 | pause_count += 1
550 | if pause_count > pause_buffer_count: # end of the phrase
551 | break
552 |
553 | # check how long the detected phrase is, and retry listening if the phrase is too short
554 | phrase_count -= pause_count # exclude the buffers for the pause before the phrase
555 | if phrase_count >= phrase_buffer_count or len(buffer) == 0: break # phrase is long enough or we've reached the end of the stream, so stop listening
556 |
557 | # obtain frame data
558 | for i in range(pause_count - non_speaking_buffer_count): frames.pop() # remove extra non-speaking frames at the end
559 | frame_data = b"".join(frames)
560 |
561 | return AudioData(frame_data, source.SAMPLE_RATE, source.SAMPLE_WIDTH)
562 |
563 | def listen_in_background(self, source, callback, phrase_time_limit=None):
564 | """
565 | Spawns a thread to repeatedly record phrases from ``source`` (an ``AudioSource`` instance) into an ``AudioData`` instance and call ``callback`` with that ``AudioData`` instance as soon as each phrase are detected.
566 |
567 | Returns a function object that, when called, requests that the background listener thread stop. The background thread is a daemon and will not stop the program from exiting if there are no other non-daemon threads. The function accepts one parameter, ``wait_for_stop``: if truthy, the function will wait for the background listener to stop before returning, otherwise it will return immediately and the background listener thread might still be running for a second or two afterwards. Additionally, if you are using a truthy value for ``wait_for_stop``, you must call the function from the same thread you originally called ``listen_in_background`` from.
568 |
569 | Phrase recognition uses the exact same mechanism as ``recognizer_instance.listen(source)``. The ``phrase_time_limit`` parameter works in the same way as the ``phrase_time_limit`` parameter for ``recognizer_instance.listen(source)``, as well.
570 |
571 | The ``callback`` parameter is a function that should accept two parameters - the ``recognizer_instance``, and an ``AudioData`` instance representing the captured audio. Note that ``callback`` function will be called from a non-main thread.
572 | """
573 | assert isinstance(source, AudioSource), "Source must be an audio source"
574 | running = [True]
575 |
576 | def threaded_listen():
577 | with source as s:
578 | while running[0]:
579 | try: # listen for 1 second, then check again if the stop function has been called
580 | audio = self.listen(s, 1, phrase_time_limit)
581 | except WaitTimeoutError: # listening timed out, just try again
582 | pass
583 | else:
584 | if running[0]: callback(self, audio)
585 |
586 | def stopper(wait_for_stop=True):
587 | running[0] = False
588 | if wait_for_stop:
589 | listener_thread.join() # block until the background thread is done, which can take around 1 second
590 |
591 | listener_thread = threading.Thread(target=threaded_listen)
592 | listener_thread.daemon = True
593 | listener_thread.start()
594 | return stopper
595 |
596 | def recognize_sphinx(self, audio_data, language="en-US", keyword_entries=None, grammar=None, show_all=False):
597 | """
598 | Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using CMU Sphinx.
599 |
600 | The recognition language is determined by ``language``, an RFC5646 language tag like ``"en-US"`` or ``"en-GB"``, defaulting to US English. Out of the box, only ``en-US`` is supported. See `Notes on using `PocketSphinx `__ for information about installing other languages. This document is also included under ``reference/pocketsphinx.rst``. The ``language`` parameter can also be a tuple of filesystem paths, of the form ``(acoustic_parameters_directory, language_model_file, phoneme_dictionary_file)`` - this allows you to load arbitrary Sphinx models.
601 |
602 | If specified, the keywords to search for are determined by ``keyword_entries``, an iterable of tuples of the form ``(keyword, sensitivity)``, where ``keyword`` is a phrase, and ``sensitivity`` is how sensitive to this phrase the recognizer should be, on a scale of 0 (very insensitive, more false negatives) to 1 (very sensitive, more false positives) inclusive. If not specified or ``None``, no keywords are used and Sphinx will simply transcribe whatever words it recognizes. Specifying ``keyword_entries`` is more accurate than just looking for those same keywords in non-keyword-based transcriptions, because Sphinx knows specifically what sounds to look for.
603 |
604 | Sphinx can also handle FSG or JSGF grammars. The parameter ``grammar`` expects a path to the grammar file. Note that if a JSGF grammar is passed, an FSG grammar will be created at the same location to speed up execution in the next run. If ``keyword_entries`` are passed, content of ``grammar`` will be ignored.
605 |
606 | Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the Sphinx ``pocketsphinx.pocketsphinx.Decoder`` object resulting from the recognition.
607 |
608 | Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if there are any issues with the Sphinx installation.
609 | """
610 | assert isinstance(audio_data, AudioData), "``audio_data`` must be audio data"
611 | assert isinstance(language, str) or (isinstance(language, tuple) and len(language) == 3), "``language`` must be a string or 3-tuple of Sphinx data file paths of the form ``(acoustic_parameters, language_model, phoneme_dictionary)``"
612 | assert keyword_entries is None or all(isinstance(keyword, (type(""), type(u""))) and 0 <= sensitivity <= 1 for keyword, sensitivity in keyword_entries), "``keyword_entries`` must be ``None`` or a list of pairs of strings and numbers between 0 and 1"
613 |
614 | # import the PocketSphinx speech recognition module
615 | try:
616 | from pocketsphinx import pocketsphinx, Jsgf, FsgModel
617 |
618 | except ImportError:
619 | raise RequestError("missing PocketSphinx module: ensure that PocketSphinx is set up correctly.")
620 | except ValueError:
621 | raise RequestError("bad PocketSphinx installation; try reinstalling PocketSphinx version 0.0.9 or better.")
622 | if not hasattr(pocketsphinx, "Decoder") or not hasattr(pocketsphinx.Decoder, "default_config"):
623 | raise RequestError("outdated PocketSphinx installation; ensure you have PocketSphinx version 0.0.9 or better.")
624 |
625 | if isinstance(language, str): # directory containing language data
626 | language_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "pocketsphinx-data", language)
627 | if not os.path.isdir(language_directory):
628 | raise RequestError("missing PocketSphinx language data directory: \"{}\"".format(language_directory))
629 | acoustic_parameters_directory = os.path.join(language_directory, "acoustic-model")
630 | language_model_file = os.path.join(language_directory, "language-model.lm.bin")
631 | phoneme_dictionary_file = os.path.join(language_directory, "pronounciation-dictionary.dict")
632 | else: # 3-tuple of Sphinx data file paths
633 | acoustic_parameters_directory, language_model_file, phoneme_dictionary_file = language
634 | if not os.path.isdir(acoustic_parameters_directory):
635 | raise RequestError("missing PocketSphinx language model parameters directory: \"{}\"".format(acoustic_parameters_directory))
636 | if not os.path.isfile(language_model_file):
637 | raise RequestError("missing PocketSphinx language model file: \"{}\"".format(language_model_file))
638 | if not os.path.isfile(phoneme_dictionary_file):
639 | raise RequestError("missing PocketSphinx phoneme dictionary file: \"{}\"".format(phoneme_dictionary_file))
640 |
641 | # create decoder object
642 | config = pocketsphinx.Decoder.default_config()
643 | config.set_string("-hmm", acoustic_parameters_directory) # set the path of the hidden Markov model (HMM) parameter files
644 | config.set_string("-lm", language_model_file)
645 | config.set_string("-dict", phoneme_dictionary_file)
646 | config.set_string("-logfn", os.devnull) # disable logging (logging causes unwanted output in terminal)
647 | decoder = pocketsphinx.Decoder(config)
648 |
649 | # obtain audio data
650 | raw_data = audio_data.get_raw_data(convert_rate=16000, convert_width=2) # the included language models require audio to be 16-bit mono 16 kHz in little-endian format
651 |
652 | # obtain recognition results
653 | if keyword_entries is not None: # explicitly specified set of keywords
654 | with PortableNamedTemporaryFile("w") as f:
655 | # generate a keywords file - Sphinx documentation recommendeds sensitivities between 1e-50 and 1e-5
656 | f.writelines("{} /1e{}/\n".format(keyword, 100 * sensitivity - 110) for keyword, sensitivity in keyword_entries)
657 | f.flush()
658 |
659 | # perform the speech recognition with the keywords file (this is inside the context manager so the file isn;t deleted until we're done)
660 | decoder.set_kws("keywords", f.name)
661 | decoder.set_search("keywords")
662 | elif grammar is not None: # a path to a FSG or JSGF grammar
663 | if not os.path.exists(grammar):
664 | raise ValueError("Grammar '{0}' does not exist.".format(grammar))
665 | grammar_path = os.path.abspath(os.path.dirname(grammar))
666 | grammar_name = os.path.splitext(os.path.basename(grammar))[0]
667 | fsg_path = "{0}/{1}.fsg".format(grammar_path, grammar_name)
668 | if not os.path.exists(fsg_path): # create FSG grammar if not available
669 | jsgf = Jsgf(grammar)
670 | rule = jsgf.get_rule("{0}.{0}".format(grammar_name))
671 | fsg = jsgf.build_fsg(rule, decoder.get_logmath(), 7.5)
672 | fsg.writefile(fsg_path)
673 | else:
674 | fsg = FsgModel(fsg_path, decoder.get_logmath(), 7.5)
675 | decoder.set_fsg(grammar_name, fsg)
676 | decoder.set_search(grammar_name)
677 |
678 | decoder.start_utt() # begin utterance processing
679 | decoder.process_raw(raw_data, False, True) # process audio data with recognition enabled (no_search = False), as a full utterance (full_utt = True)
680 | decoder.end_utt() # stop utterance processing
681 |
682 | if show_all: return decoder
683 |
684 | # return results
685 | hypothesis = decoder.hyp()
686 | if hypothesis is not None: return hypothesis.hypstr
687 | raise UnknownValueError() # no transcriptions available
688 |
689 | def recognize_google(self, audio_data, key=None, language="en-US", pfilter=0, show_all=False, with_confidence=False):
690 | """
691 | Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Google Speech Recognition API.
692 |
693 | The Google Speech Recognition API key is specified by ``key``. If not specified, it uses a generic key that works out of the box. This should generally be used for personal or testing purposes only, as it **may be revoked by Google at any time**.
694 |
695 | To obtain your own API key, simply following the steps on the `API Keys `__ page at the Chromium Developers site. In the Google Developers Console, Google Speech Recognition is listed as "Speech API".
696 |
697 | The recognition language is determined by ``language``, an RFC5646 language tag like ``"en-US"`` (US English) or ``"fr-FR"`` (International French), defaulting to US English. A list of supported language tags can be found in this `StackOverflow answer `__.
698 |
699 | The profanity filter level can be adjusted with ``pfilter``: 0 - No filter, 1 - Only shows the first character and replaces the rest with asterisks. The default is level 0.
700 |
701 | Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the raw API response as a JSON dictionary.
702 |
703 | Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection.
704 | """
705 | assert isinstance(audio_data, AudioData), "``audio_data`` must be audio data"
706 | assert key is None or isinstance(key, str), "``key`` must be ``None`` or a string"
707 | assert isinstance(language, str), "``language`` must be a string"
708 |
709 | flac_data = audio_data.get_flac_data(
710 | convert_rate=None if audio_data.sample_rate >= 8000 else 8000, # audio samples must be at least 8 kHz
711 | convert_width=2 # audio samples must be 16-bit
712 | )
713 | if key is None: key = "AIzaSyBOti4mM-6x9WDnZIjIeyEU21OpBXqWBgw"
714 | url = "http://www.google.com/speech-api/v2/recognize?{}".format(urlencode({
715 | "client": "chromium",
716 | "lang": language,
717 | "key": key,
718 | "pFilter": pfilter
719 | }))
720 | request = Request(url, data=flac_data, headers={"Content-Type": "audio/x-flac; rate={}".format(audio_data.sample_rate)})
721 |
722 | # obtain audio transcription results
723 | try:
724 | response = urlopen(request, timeout=self.operation_timeout)
725 | except HTTPError as e:
726 | raise RequestError("recognition request failed: {}".format(e.reason))
727 | except URLError as e:
728 | raise RequestError("recognition connection failed: {}".format(e.reason))
729 | response_text = response.read().decode("utf-8")
730 |
731 | # ignore any blank blocks
732 | actual_result = []
733 | for line in response_text.split("\n"):
734 | if not line: continue
735 | result = json.loads(line)["result"]
736 | if len(result) != 0:
737 | actual_result = result[0]
738 | break
739 |
740 | # return results
741 | if show_all:
742 | return actual_result
743 |
744 | if not isinstance(actual_result, dict) or len(actual_result.get("alternative", [])) == 0: raise UnknownValueError()
745 |
746 | if "confidence" in actual_result["alternative"]:
747 | # return alternative with highest confidence score
748 | best_hypothesis = max(actual_result["alternative"], key=lambda alternative: alternative["confidence"])
749 | else:
750 | # when there is no confidence available, we arbitrarily choose the first hypothesis.
751 | best_hypothesis = actual_result["alternative"][0]
752 | if "transcript" not in best_hypothesis: raise UnknownValueError()
753 | # https://cloud.google.com/speech-to-text/docs/basics#confidence-values
754 | # "Your code should not require the confidence field as it is not guaranteed to be accurate, or even set, in any of the results."
755 | confidence = best_hypothesis.get("confidence", 0.5)
756 | if with_confidence:
757 | return best_hypothesis["transcript"], confidence
758 | return best_hypothesis["transcript"]
759 |
760 | def recognize_google_cloud(self, audio_data, credentials_json=None, language="en-US", preferred_phrases=None, show_all=False):
761 | """
762 | Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Google Cloud Speech API.
763 |
764 | This function requires a Google Cloud Platform account; see the `Google Cloud Speech API Quickstart `__ for details and instructions. Basically, create a project, enable billing for the project, enable the Google Cloud Speech API for the project, and set up Service Account Key credentials for the project. The result is a JSON file containing the API credentials. The text content of this JSON file is specified by ``credentials_json``. If not specified, the library will try to automatically `find the default API credentials JSON file `__.
765 |
766 | The recognition language is determined by ``language``, which is a BCP-47 language tag like ``"en-US"`` (US English). A list of supported language tags can be found in the `Google Cloud Speech API documentation `__.
767 |
768 | If ``preferred_phrases`` is an iterable of phrase strings, those given phrases will be more likely to be recognized over similar-sounding alternatives. This is useful for things like keyword/command recognition or adding new phrases that aren't in Google's vocabulary. Note that the API imposes certain `restrictions on the list of phrase strings `__.
769 |
770 | Returns the most likely transcription if ``show_all`` is False (the default). Otherwise, returns the raw API response as a JSON dictionary.
771 |
772 | Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the credentials aren't valid, or if there is no Internet connection.
773 | """
774 | assert isinstance(audio_data, AudioData), "``audio_data`` must be audio data"
775 | if credentials_json is None:
776 | assert os.environ.get('GOOGLE_APPLICATION_CREDENTIALS') is not None
777 | assert isinstance(language, str), "``language`` must be a string"
778 | assert preferred_phrases is None or all(isinstance(preferred_phrases, (type(""), type(u""))) for preferred_phrases in preferred_phrases), "``preferred_phrases`` must be a list of strings"
779 |
780 | try:
781 | import socket
782 | from google.cloud import speech
783 | from google.api_core.exceptions import GoogleAPICallError
784 | except ImportError:
785 | raise RequestError('missing google-cloud-speech module: ensure that google-cloud-speech is set up correctly.')
786 |
787 | if credentials_json is not None:
788 | client = speech.SpeechClient.from_service_account_json(credentials_json)
789 | else:
790 | client = speech.SpeechClient()
791 |
792 | flac_data = audio_data.get_flac_data(
793 | convert_rate=None if 8000 <= audio_data.sample_rate <= 48000 else max(8000, min(audio_data.sample_rate, 48000)), # audio sample rate must be between 8 kHz and 48 kHz inclusive - clamp sample rate into this range
794 | convert_width=2 # audio samples must be 16-bit
795 | )
796 | audio = speech.RecognitionAudio(content=flac_data)
797 |
798 | config = {
799 | 'encoding': speech.RecognitionConfig.AudioEncoding.FLAC,
800 | 'sample_rate_hertz': audio_data.sample_rate,
801 | 'language_code': language
802 | }
803 | if preferred_phrases is not None:
804 | config['speechContexts'] = [speech.SpeechContext(
805 | phrases=preferred_phrases
806 | )]
807 | if show_all:
808 | config['enableWordTimeOffsets'] = True # some useful extra options for when we want all the output
809 |
810 | opts = {}
811 | if self.operation_timeout and socket.getdefaulttimeout() is None:
812 | opts['timeout'] = self.operation_timeout
813 |
814 | config = speech.RecognitionConfig(**config)
815 |
816 | try:
817 | response = client.recognize(config=config, audio=audio)
818 | except GoogleAPICallError as e:
819 | raise RequestError(e)
820 | except URLError as e:
821 | raise RequestError("recognition connection failed: {0}".format(e.reason))
822 |
823 | if show_all: return response
824 | if len(response.results) == 0: raise UnknownValueError()
825 |
826 | transcript = ''
827 | for result in response.results:
828 | transcript += result.alternatives[0].transcript.strip() + ' '
829 | return transcript
830 |
831 | def recognize_wit(self, audio_data, key, show_all=False):
832 | """
833 | Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Wit.ai API.
834 |
835 | The Wit.ai API key is specified by ``key``. Unfortunately, these are not available without `signing up for an account `__ and creating an app. You will need to add at least one intent to the app before you can see the API key, though the actual intent settings don't matter.
836 |
837 | To get the API key for a Wit.ai app, go to the app's overview page, go to the section titled "Make an API request", and look for something along the lines of ``Authorization: Bearer XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX``; ``XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX`` is the API key. Wit.ai API keys are 32-character uppercase alphanumeric strings.
838 |
839 | The recognition language is configured in the Wit.ai app settings.
840 |
841 | Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the `raw API response `__ as a JSON dictionary.
842 |
843 | Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection.
844 | """
845 | assert isinstance(audio_data, AudioData), "Data must be audio data"
846 | assert isinstance(key, str), "``key`` must be a string"
847 |
848 | wav_data = audio_data.get_wav_data(
849 | convert_rate=None if audio_data.sample_rate >= 8000 else 8000, # audio samples must be at least 8 kHz
850 | convert_width=2 # audio samples should be 16-bit
851 | )
852 | url = "https://api.wit.ai/speech?v=20170307"
853 | request = Request(url, data=wav_data, headers={"Authorization": "Bearer {}".format(key), "Content-Type": "audio/wav"})
854 | try:
855 | response = urlopen(request, timeout=self.operation_timeout)
856 | except HTTPError as e:
857 | raise RequestError("recognition request failed: {}".format(e.reason))
858 | except URLError as e:
859 | raise RequestError("recognition connection failed: {}".format(e.reason))
860 | response_text = response.read().decode("utf-8")
861 | result = json.loads(response_text)
862 |
863 | # return results
864 | if show_all: return result
865 | if "_text" not in result or result["_text"] is None: raise UnknownValueError()
866 | return result["_text"]
867 |
868 | def recognize_azure(self, audio_data, key, language="en-US", profanity="masked", location="westus", show_all=False):
869 | """
870 | Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Microsoft Azure Speech API.
871 |
872 | The Microsoft Azure Speech API key is specified by ``key``. Unfortunately, these are not available without `signing up for an account `__ with Microsoft Azure.
873 |
874 | To get the API key, go to the `Microsoft Azure Portal Resources `__ page, go to "All Resources" > "Add" > "See All" > Search "Speech > "Create", and fill in the form to make a "Speech" resource. On the resulting page (which is also accessible from the "All Resources" page in the Azure Portal), go to the "Show Access Keys" page, which will have two API keys, either of which can be used for the `key` parameter. Microsoft Azure Speech API keys are 32-character lowercase hexadecimal strings.
875 |
876 | The recognition language is determined by ``language``, a BCP-47 language tag like ``"en-US"`` (US English) or ``"fr-FR"`` (International French), defaulting to US English. A list of supported language values can be found in the `API documentation `__ under "Interactive and dictation mode".
877 |
878 | Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the `raw API response `__ as a JSON dictionary.
879 |
880 | Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection.
881 | """
882 | assert isinstance(audio_data, AudioData), "Data must be audio data"
883 | assert isinstance(key, str), "``key`` must be a string"
884 | # assert isinstance(result_format, str), "``format`` must be a string" # simple|detailed
885 | assert isinstance(language, str), "``language`` must be a string"
886 |
887 | result_format = 'detailed'
888 | access_token, expire_time = getattr(self, "azure_cached_access_token", None), getattr(self, "azure_cached_access_token_expiry", None)
889 | allow_caching = True
890 | try:
891 | from time import monotonic # we need monotonic time to avoid being affected by system clock changes, but this is only available in Python 3.3+
892 | except ImportError:
893 | expire_time = None # monotonic time not available, don't cache access tokens
894 | allow_caching = False # don't allow caching, since monotonic time isn't available
895 | if expire_time is None or monotonic() > expire_time: # caching not enabled, first credential request, or the access token from the previous one expired
896 | # get an access token using OAuth
897 | credential_url = "https://" + location + ".api.cognitive.microsoft.com/sts/v1.0/issueToken"
898 | credential_request = Request(credential_url, data=b"", headers={
899 | "Content-type": "application/x-www-form-urlencoded",
900 | "Content-Length": "0",
901 | "Ocp-Apim-Subscription-Key": key,
902 | })
903 |
904 | if allow_caching:
905 | start_time = monotonic()
906 |
907 | try:
908 | credential_response = urlopen(credential_request, timeout=60) # credential response can take longer, use longer timeout instead of default one
909 | except HTTPError as e:
910 | raise RequestError("credential request failed: {}".format(e.reason))
911 | except URLError as e:
912 | raise RequestError("credential connection failed: {}".format(e.reason))
913 | access_token = credential_response.read().decode("utf-8")
914 |
915 | if allow_caching:
916 | # save the token for the duration it is valid for
917 | self.azure_cached_access_token = access_token
918 | self.azure_cached_access_token_expiry = start_time + 600 # according to https://docs.microsoft.com/en-us/azure/cognitive-services/Speech-Service/rest-apis#authentication, the token expires in exactly 10 minutes
919 |
920 | wav_data = audio_data.get_wav_data(
921 | convert_rate=16000, # audio samples must be 8kHz or 16 kHz
922 | convert_width=2 # audio samples should be 16-bit
923 | )
924 |
925 | url = "https://" + location + ".stt.speech.microsoft.com/speech/recognition/conversation/cognitiveservices/v1?{}".format(urlencode({
926 | "language": language,
927 | "format": result_format,
928 | "profanity": profanity
929 | }))
930 |
931 | if sys.version_info >= (3, 6): # chunked-transfer requests are only supported in the standard library as of Python 3.6+, use it if possible
932 | request = Request(url, data=io.BytesIO(wav_data), headers={
933 | "Authorization": "Bearer {}".format(access_token),
934 | "Content-type": "audio/wav; codec=\"audio/pcm\"; samplerate=16000",
935 | "Transfer-Encoding": "chunked",
936 | })
937 | else: # fall back on manually formatting the POST body as a chunked request
938 | ascii_hex_data_length = "{:X}".format(len(wav_data)).encode("utf-8")
939 | chunked_transfer_encoding_data = ascii_hex_data_length + b"\r\n" + wav_data + b"\r\n0\r\n\r\n"
940 | request = Request(url, data=chunked_transfer_encoding_data, headers={
941 | "Authorization": "Bearer {}".format(access_token),
942 | "Content-type": "audio/wav; codec=\"audio/pcm\"; samplerate=16000",
943 | "Transfer-Encoding": "chunked",
944 | })
945 |
946 | try:
947 | response = urlopen(request, timeout=self.operation_timeout)
948 | except HTTPError as e:
949 | raise RequestError("recognition request failed: {}".format(e.reason))
950 | except URLError as e:
951 | raise RequestError("recognition connection failed: {}".format(e.reason))
952 | response_text = response.read().decode("utf-8")
953 | result = json.loads(response_text)
954 |
955 | # return results
956 | if show_all:
957 | return result
958 | if "RecognitionStatus" not in result or result["RecognitionStatus"] != "Success" or "NBest" not in result:
959 | raise UnknownValueError()
960 | return result['NBest'][0]["Display"], result['NBest'][0]["Confidence"]
961 |
962 | def recognize_bing(self, audio_data, key, language="en-US", show_all=False):
963 | """
964 | Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Microsoft Bing Speech API.
965 |
966 | The Microsoft Bing Speech API key is specified by ``key``. Unfortunately, these are not available without `signing up for an account `__ with Microsoft Azure.
967 |
968 | To get the API key, go to the `Microsoft Azure Portal Resources `__ page, go to "All Resources" > "Add" > "See All" > Search "Bing Speech API > "Create", and fill in the form to make a "Bing Speech API" resource. On the resulting page (which is also accessible from the "All Resources" page in the Azure Portal), go to the "Show Access Keys" page, which will have two API keys, either of which can be used for the `key` parameter. Microsoft Bing Speech API keys are 32-character lowercase hexadecimal strings.
969 |
970 | The recognition language is determined by ``language``, a BCP-47 language tag like ``"en-US"`` (US English) or ``"fr-FR"`` (International French), defaulting to US English. A list of supported language values can be found in the `API documentation `__ under "Interactive and dictation mode".
971 |
972 | Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the `raw API response `__ as a JSON dictionary.
973 |
974 | Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection.
975 | """
976 | assert isinstance(audio_data, AudioData), "Data must be audio data"
977 | assert isinstance(key, str), "``key`` must be a string"
978 | assert isinstance(language, str), "``language`` must be a string"
979 |
980 | access_token, expire_time = getattr(self, "bing_cached_access_token", None), getattr(self, "bing_cached_access_token_expiry", None)
981 | allow_caching = True
982 | try:
983 | from time import monotonic # we need monotonic time to avoid being affected by system clock changes, but this is only available in Python 3.3+
984 | except ImportError:
985 | expire_time = None # monotonic time not available, don't cache access tokens
986 | allow_caching = False # don't allow caching, since monotonic time isn't available
987 | if expire_time is None or monotonic() > expire_time: # caching not enabled, first credential request, or the access token from the previous one expired
988 | # get an access token using OAuth
989 | credential_url = "https://api.cognitive.microsoft.com/sts/v1.0/issueToken"
990 | credential_request = Request(credential_url, data=b"", headers={
991 | "Content-type": "application/x-www-form-urlencoded",
992 | "Content-Length": "0",
993 | "Ocp-Apim-Subscription-Key": key,
994 | })
995 |
996 | if allow_caching:
997 | start_time = monotonic()
998 |
999 | try:
1000 | credential_response = urlopen(credential_request, timeout=60) # credential response can take longer, use longer timeout instead of default one
1001 | except HTTPError as e:
1002 | raise RequestError("credential request failed: {}".format(e.reason))
1003 | except URLError as e:
1004 | raise RequestError("credential connection failed: {}".format(e.reason))
1005 | access_token = credential_response.read().decode("utf-8")
1006 |
1007 | if allow_caching:
1008 | # save the token for the duration it is valid for
1009 | self.bing_cached_access_token = access_token
1010 | self.bing_cached_access_token_expiry = start_time + 600 # according to https://docs.microsoft.com/en-us/azure/cognitive-services/speech/api-reference-rest/bingvoicerecognition, the token expires in exactly 10 minutes
1011 |
1012 | wav_data = audio_data.get_wav_data(
1013 | convert_rate=16000, # audio samples must be 8kHz or 16 kHz
1014 | convert_width=2 # audio samples should be 16-bit
1015 | )
1016 |
1017 | url = "https://speech.platform.bing.com/speech/recognition/interactive/cognitiveservices/v1?{}".format(urlencode({
1018 | "language": language,
1019 | "locale": language,
1020 | "requestid": uuid.uuid4(),
1021 | }))
1022 |
1023 | if sys.version_info >= (3, 6): # chunked-transfer requests are only supported in the standard library as of Python 3.6+, use it if possible
1024 | request = Request(url, data=io.BytesIO(wav_data), headers={
1025 | "Authorization": "Bearer {}".format(access_token),
1026 | "Content-type": "audio/wav; codec=\"audio/pcm\"; samplerate=16000",
1027 | "Transfer-Encoding": "chunked",
1028 | })
1029 | else: # fall back on manually formatting the POST body as a chunked request
1030 | ascii_hex_data_length = "{:X}".format(len(wav_data)).encode("utf-8")
1031 | chunked_transfer_encoding_data = ascii_hex_data_length + b"\r\n" + wav_data + b"\r\n0\r\n\r\n"
1032 | request = Request(url, data=chunked_transfer_encoding_data, headers={
1033 | "Authorization": "Bearer {}".format(access_token),
1034 | "Content-type": "audio/wav; codec=\"audio/pcm\"; samplerate=16000",
1035 | "Transfer-Encoding": "chunked",
1036 | })
1037 |
1038 | try:
1039 | response = urlopen(request, timeout=self.operation_timeout)
1040 | except HTTPError as e:
1041 | raise RequestError("recognition request failed: {}".format(e.reason))
1042 | except URLError as e:
1043 | raise RequestError("recognition connection failed: {}".format(e.reason))
1044 | response_text = response.read().decode("utf-8")
1045 | result = json.loads(response_text)
1046 |
1047 | # return results
1048 | if show_all: return result
1049 | if "RecognitionStatus" not in result or result["RecognitionStatus"] != "Success" or "DisplayText" not in result: raise UnknownValueError()
1050 | return result["DisplayText"]
1051 |
1052 | def recognize_lex(self, audio_data, bot_name, bot_alias, user_id, content_type="audio/l16; rate=16000; channels=1", access_key_id=None, secret_access_key=None, region=None):
1053 | """
1054 | Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Amazon Lex API.
1055 |
1056 | If access_key_id or secret_access_key is not set it will go through the list in the link below
1057 | http://boto3.readthedocs.io/en/latest/guide/configuration.html#configuring-credentials
1058 | """
1059 | assert isinstance(audio_data, AudioData), "Data must be audio data"
1060 | assert isinstance(bot_name, str), "``bot_name`` must be a string"
1061 | assert isinstance(bot_alias, str), "``bot_alias`` must be a string"
1062 | assert isinstance(user_id, str), "``user_id`` must be a string"
1063 | assert isinstance(content_type, str), "``content_type`` must be a string"
1064 | assert access_key_id is None or isinstance(access_key_id, str), "``access_key_id`` must be a string"
1065 | assert secret_access_key is None or isinstance(secret_access_key, str), "``secret_access_key`` must be a string"
1066 | assert region is None or isinstance(region, str), "``region`` must be a string"
1067 |
1068 | try:
1069 | import boto3
1070 | except ImportError:
1071 | raise RequestError("missing boto3 module: ensure that boto3 is set up correctly.")
1072 |
1073 | client = boto3.client('lex-runtime', aws_access_key_id=access_key_id,
1074 | aws_secret_access_key=secret_access_key,
1075 | region_name=region)
1076 |
1077 | raw_data = audio_data.get_raw_data(
1078 | convert_rate=16000, convert_width=2
1079 | )
1080 |
1081 | accept = "text/plain; charset=utf-8"
1082 | response = client.post_content(botName=bot_name, botAlias=bot_alias, userId=user_id, contentType=content_type, accept=accept, inputStream=raw_data)
1083 |
1084 | return response["inputTranscript"]
1085 |
1086 | def recognize_houndify(self, audio_data, client_id, client_key, show_all=False):
1087 | """
1088 | Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Houndify API.
1089 |
1090 | The Houndify client ID and client key are specified by ``client_id`` and ``client_key``, respectively. Unfortunately, these are not available without `signing up for an account `__. Once logged into the `dashboard `__, you will want to select "Register a new client", and fill in the form as necessary. When at the "Enable Domains" page, enable the "Speech To Text Only" domain, and then select "Save & Continue".
1091 |
1092 | To get the client ID and client key for a Houndify client, go to the `dashboard `__ and select the client's "View Details" link. On the resulting page, the client ID and client key will be visible. Client IDs and client keys are both Base64-encoded strings.
1093 |
1094 | Currently, only English is supported as a recognition language.
1095 |
1096 | Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the raw API response as a JSON dictionary.
1097 |
1098 | Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection.
1099 | """
1100 | assert isinstance(audio_data, AudioData), "Data must be audio data"
1101 | assert isinstance(client_id, str), "``client_id`` must be a string"
1102 | assert isinstance(client_key, str), "``client_key`` must be a string"
1103 |
1104 | wav_data = audio_data.get_wav_data(
1105 | convert_rate=None if audio_data.sample_rate in [8000, 16000] else 16000, # audio samples must be 8 kHz or 16 kHz
1106 | convert_width=2 # audio samples should be 16-bit
1107 | )
1108 | url = "https://api.houndify.com/v1/audio"
1109 | user_id, request_id = str(uuid.uuid4()), str(uuid.uuid4())
1110 | request_time = str(int(time.time()))
1111 | request_signature = base64.urlsafe_b64encode(
1112 | hmac.new(
1113 | base64.urlsafe_b64decode(client_key),
1114 | user_id.encode("utf-8") + b";" + request_id.encode("utf-8") + request_time.encode("utf-8"),
1115 | hashlib.sha256
1116 | ).digest() # get the HMAC digest as bytes
1117 | ).decode("utf-8")
1118 | request = Request(url, data=wav_data, headers={
1119 | "Content-Type": "application/json",
1120 | "Hound-Request-Info": json.dumps({"ClientID": client_id, "UserID": user_id}),
1121 | "Hound-Request-Authentication": "{};{}".format(user_id, request_id),
1122 | "Hound-Client-Authentication": "{};{};{}".format(client_id, request_time, request_signature)
1123 | })
1124 | try:
1125 | response = urlopen(request, timeout=self.operation_timeout)
1126 | except HTTPError as e:
1127 | raise RequestError("recognition request failed: {}".format(e.reason))
1128 | except URLError as e:
1129 | raise RequestError("recognition connection failed: {}".format(e.reason))
1130 | response_text = response.read().decode("utf-8")
1131 | result = json.loads(response_text)
1132 |
1133 | # return results
1134 | if show_all: return result
1135 | if "Disambiguation" not in result or result["Disambiguation"] is None:
1136 | raise UnknownValueError()
1137 | return result['Disambiguation']['ChoiceData'][0]['Transcription'], result['Disambiguation']['ChoiceData'][0]['ConfidenceScore']
1138 |
1139 | def recognize_amazon(self, audio_data, bucket_name=None, access_key_id=None, secret_access_key=None, region=None, job_name=None, file_key=None):
1140 | """
1141 | Performs speech recognition on ``audio_data`` (an ``AudioData`` instance) using Amazon Transcribe.
1142 | https://aws.amazon.com/transcribe/
1143 | If access_key_id or secret_access_key is not set it will go through the list in the link below
1144 | http://boto3.readthedocs.io/en/latest/guide/configuration.html#configuring-credentials
1145 | """
1146 | assert access_key_id is None or isinstance(access_key_id, str), "``access_key_id`` must be a string"
1147 | assert secret_access_key is None or isinstance(secret_access_key, str), "``secret_access_key`` must be a string"
1148 | assert region is None or isinstance(region, str), "``region`` must be a string"
1149 | import traceback
1150 | import uuid
1151 | import multiprocessing
1152 | from botocore.exceptions import ClientError
1153 | proc = multiprocessing.current_process()
1154 |
1155 | check_existing = audio_data is None and job_name
1156 |
1157 | bucket_name = bucket_name or ('%s-%s' % (str(uuid.uuid4()), proc.pid))
1158 | job_name = job_name or ('%s-%s' % (str(uuid.uuid4()), proc.pid))
1159 |
1160 | try:
1161 | import boto3
1162 | except ImportError:
1163 | raise RequestError("missing boto3 module: ensure that boto3 is set up correctly.")
1164 |
1165 | transcribe = boto3.client(
1166 | 'transcribe',
1167 | aws_access_key_id=access_key_id,
1168 | aws_secret_access_key=secret_access_key,
1169 | region_name=region)
1170 |
1171 | s3 = boto3.client('s3',
1172 | aws_access_key_id=access_key_id,
1173 | aws_secret_access_key=secret_access_key,
1174 | region_name=region)
1175 |
1176 | session = boto3.Session(
1177 | aws_access_key_id=access_key_id,
1178 | aws_secret_access_key=secret_access_key,
1179 | region_name=region
1180 | )
1181 |
1182 | # Upload audio data to S3.
1183 | filename = '%s.wav' % job_name
1184 | try:
1185 | # Bucket creation fails surprisingly often, even if the bucket exists.
1186 | # print('Attempting to create bucket %s...' % bucket_name)
1187 | s3.create_bucket(Bucket=bucket_name)
1188 | except ClientError as exc:
1189 | print('Error creating bucket %s: %s' % (bucket_name, exc))
1190 | s3res = session.resource('s3')
1191 | bucket = s3res.Bucket(bucket_name)
1192 | if audio_data is not None:
1193 | print('Uploading audio data...')
1194 | wav_data = audio_data.get_wav_data()
1195 | s3.put_object(Bucket=bucket_name, Key=filename, Body=wav_data)
1196 | object_acl = s3res.ObjectAcl(bucket_name, filename)
1197 | object_acl.put(ACL='public-read')
1198 | else:
1199 | print('Skipping audio upload.')
1200 | job_uri = 'https://%s.s3.amazonaws.com/%s' % (bucket_name, filename)
1201 |
1202 | if check_existing:
1203 |
1204 | # Wait for job to complete.
1205 | try:
1206 | status = transcribe.get_transcription_job(TranscriptionJobName=job_name)
1207 | except ClientError as exc:
1208 | print('!'*80)
1209 | print('Error getting job:', exc.response)
1210 | if exc.response['Error']['Code'] == 'BadRequestException' and "The requested job couldn't be found" in str(exc):
1211 | # Some error caused the job we recorded to not exist on AWS.
1212 | # Likely we were interrupted right after retrieving and deleting the job but before recording the transcript.
1213 | # Reset and try again later.
1214 | exc = TranscriptionNotReady()
1215 | exc.job_name = None
1216 | exc.file_key = None
1217 | raise exc
1218 | else:
1219 | # Some other error happened, so re-raise.
1220 | raise
1221 |
1222 | job = status['TranscriptionJob']
1223 | if job['TranscriptionJobStatus'] in ['COMPLETED'] and 'TranscriptFileUri' in job['Transcript']:
1224 |
1225 | # Retrieve transcription JSON containing transcript.
1226 | transcript_uri = job['Transcript']['TranscriptFileUri']
1227 | import urllib.request, json
1228 | with urllib.request.urlopen(transcript_uri) as json_data:
1229 | d = json.load(json_data)
1230 | confidences = []
1231 | for item in d['results']['items']:
1232 | confidences.append(float(item['alternatives'][0]['confidence']))
1233 | confidence = 0.5
1234 | if confidences:
1235 | confidence = sum(confidences)/float(len(confidences))
1236 | transcript = d['results']['transcripts'][0]['transcript']
1237 |
1238 | # Delete job.
1239 | try:
1240 | transcribe.delete_transcription_job(TranscriptionJobName=job_name) # cleanup
1241 | except Exception as exc:
1242 | print('Warning, could not clean up transcription: %s' % exc)
1243 | traceback.print_exc()
1244 |
1245 | # Delete S3 file.
1246 | s3.delete_object(Bucket=bucket_name, Key=filename)
1247 |
1248 | return transcript, confidence
1249 | elif job['TranscriptionJobStatus'] in ['FAILED']:
1250 |
1251 | # Delete job.
1252 | try:
1253 | transcribe.delete_transcription_job(TranscriptionJobName=job_name) # cleanup
1254 | except Exception as exc:
1255 | print('Warning, could not clean up transcription: %s' % exc)
1256 | traceback.print_exc()
1257 |
1258 | # Delete S3 file.
1259 | s3.delete_object(Bucket=bucket_name, Key=filename)
1260 |
1261 | exc = TranscriptionFailed()
1262 | exc.job_name = None
1263 | exc.file_key = None
1264 | raise exc
1265 | else:
1266 | # Keep waiting.
1267 | print('Keep waiting.')
1268 | exc = TranscriptionNotReady()
1269 | exc.job_name = job_name
1270 | exc.file_key = None
1271 | raise exc
1272 |
1273 | else:
1274 |
1275 | # Launch the transcription job.
1276 | # try:
1277 | # transcribe.delete_transcription_job(TranscriptionJobName=job_name) # pre-cleanup
1278 | # except:
1279 | # # It's ok if this fails because the job hopefully doesn't exist yet.
1280 | # pass
1281 | try:
1282 | transcribe.start_transcription_job(
1283 | TranscriptionJobName=job_name,
1284 | Media={'MediaFileUri': job_uri},
1285 | MediaFormat='wav',
1286 | LanguageCode='en-US'
1287 | )
1288 | exc = TranscriptionNotReady()
1289 | exc.job_name = job_name
1290 | exc.file_key = None
1291 | raise exc
1292 | except ClientError as exc:
1293 | print('!'*80)
1294 | print('Error starting job:', exc.response)
1295 | if exc.response['Error']['Code'] == 'LimitExceededException':
1296 | # Could not start job. Cancel everything.
1297 | s3.delete_object(Bucket=bucket_name, Key=filename)
1298 | exc = TranscriptionNotReady()
1299 | exc.job_name = None
1300 | exc.file_key = None
1301 | raise exc
1302 | else:
1303 | # Some other error happened, so re-raise.
1304 | raise
1305 |
1306 | def recognize_assemblyai(self, audio_data, api_token, job_name=None, **kwargs):
1307 | """
1308 | Wraps the AssemblyAI STT service.
1309 | https://www.assemblyai.com/
1310 | """
1311 |
1312 | def read_file(filename, chunk_size=5242880):
1313 | with open(filename, 'rb') as _file:
1314 | while True:
1315 | data = _file.read(chunk_size)
1316 | if not data:
1317 | break
1318 | yield data
1319 |
1320 | check_existing = audio_data is None and job_name
1321 | if check_existing:
1322 | # Query status.
1323 | transciption_id = job_name
1324 | endpoint = f"https://api.assemblyai.com/v2/transcript/{transciption_id}"
1325 | headers = {
1326 | "authorization": api_token,
1327 | }
1328 | response = requests.get(endpoint, headers=headers)
1329 | data = response.json()
1330 | status = data['status']
1331 |
1332 | if status == 'error':
1333 | # Handle error.
1334 | exc = TranscriptionFailed()
1335 | exc.job_name = None
1336 | exc.file_key = None
1337 | raise exc
1338 | # Handle success.
1339 | elif status == 'completed':
1340 | confidence = data['confidence']
1341 | text = data['text']
1342 | return text, confidence
1343 |
1344 | # Otherwise keep waiting.
1345 | print('Keep waiting.')
1346 | exc = TranscriptionNotReady()
1347 | exc.job_name = job_name
1348 | exc.file_key = None
1349 | raise exc
1350 | else:
1351 | # Upload file.
1352 | headers = {'authorization': api_token}
1353 | response = requests.post('https://api.assemblyai.com/v2/upload',
1354 | headers=headers,
1355 | data=read_file(audio_data))
1356 | upload_url = response.json()['upload_url']
1357 |
1358 | # Queue file for transcription.
1359 | endpoint = "https://api.assemblyai.com/v2/transcript"
1360 | json = {
1361 | "audio_url": upload_url
1362 | }
1363 | headers = {
1364 | "authorization": api_token,
1365 | "content-type": "application/json"
1366 | }
1367 | response = requests.post(endpoint, json=json, headers=headers)
1368 | data = response.json()
1369 | transciption_id = data['id']
1370 | exc = TranscriptionNotReady()
1371 | exc.job_name = transciption_id
1372 | exc.file_key = None
1373 | raise exc
1374 |
1375 | def recognize_ibm(self, audio_data, key, language="en-US", show_all=False):
1376 | """
1377 | Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the IBM Speech to Text API.
1378 |
1379 | The IBM Speech to Text username and password are specified by ``username`` and ``password``, respectively. Unfortunately, these are not available without `signing up for an account `__. Once logged into the Bluemix console, follow the instructions for `creating an IBM Watson service instance `__, where the Watson service is "Speech To Text". IBM Speech to Text usernames are strings of the form XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX, while passwords are mixed-case alphanumeric strings.
1380 |
1381 | The recognition language is determined by ``language``, an RFC5646 language tag with a dialect like ``"en-US"`` (US English) or ``"zh-CN"`` (Mandarin Chinese), defaulting to US English. The supported language values are listed under the ``model`` parameter of the `audio recognition API documentation `__, in the form ``LANGUAGE_BroadbandModel``, where ``LANGUAGE`` is the language value.
1382 |
1383 | Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the `raw API response `__ as a JSON dictionary.
1384 |
1385 | Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection.
1386 | """
1387 | assert isinstance(audio_data, AudioData), "Data must be audio data"
1388 | assert isinstance(key, str), "``key`` must be a string"
1389 |
1390 | flac_data = audio_data.get_flac_data(
1391 | convert_rate=None if audio_data.sample_rate >= 16000 else 16000, # audio samples should be at least 16 kHz
1392 | convert_width=None if audio_data.sample_width >= 2 else 2 # audio samples should be at least 16-bit
1393 | )
1394 | url = "https://gateway-wdc.watsonplatform.net/speech-to-text/api/v1/recognize"
1395 | request = Request(url, data=flac_data, headers={
1396 | "Content-Type": "audio/x-flac",
1397 | })
1398 | request.get_method = lambda: 'POST'
1399 | username = 'apikey'
1400 | password = key
1401 | authorization_value = base64.standard_b64encode("{}:{}".format(username, password).encode("utf-8")).decode("utf-8")
1402 | request.add_header("Authorization", "Basic {}".format(authorization_value))
1403 | try:
1404 | response = urlopen(request, timeout=self.operation_timeout)
1405 | except HTTPError as e:
1406 | raise RequestError("recognition request failed: {}".format(e.reason))
1407 | except URLError as e:
1408 | raise RequestError("recognition connection failed: {}".format(e.reason))
1409 | response_text = response.read().decode("utf-8")
1410 | result = json.loads(response_text)
1411 |
1412 | # return results
1413 | if show_all:
1414 | return result
1415 | if "results" not in result or len(result["results"]) < 1 or "alternatives" not in result["results"][0]:
1416 | raise UnknownValueError()
1417 |
1418 | transcription = []
1419 | confidence = None
1420 | for utterance in result["results"]:
1421 | if "alternatives" not in utterance: raise UnknownValueError()
1422 | for hypothesis in utterance["alternatives"]:
1423 | if "transcript" in hypothesis:
1424 | transcription.append(hypothesis["transcript"])
1425 | confidence = hypothesis["confidence"]
1426 | break
1427 | return "\n".join(transcription), confidence
1428 |
1429 | lasttfgraph = ''
1430 | tflabels = None
1431 |
1432 | def recognize_tensorflow(self, audio_data, tensor_graph='tensorflow-data/conv_actions_frozen.pb', tensor_label='tensorflow-data/conv_actions_labels.txt'):
1433 | """
1434 | Performs speech recognition on ``audio_data`` (an ``AudioData`` instance).
1435 |
1436 | Path to Tensor loaded from ``tensor_graph``. You can download a model here: http://download.tensorflow.org/models/speech_commands_v0.01.zip
1437 |
1438 | Path to Tensor Labels file loaded from ``tensor_label``.
1439 | """
1440 | assert isinstance(audio_data, AudioData), "Data must be audio data"
1441 | assert isinstance(tensor_graph, str), "``tensor_graph`` must be a string"
1442 | assert isinstance(tensor_label, str), "``tensor_label`` must be a string"
1443 |
1444 | try:
1445 | import tensorflow as tf
1446 | except ImportError:
1447 | raise RequestError("missing tensorflow module: ensure that tensorflow is set up correctly.")
1448 |
1449 | if not (tensor_graph == self.lasttfgraph):
1450 | self.lasttfgraph = tensor_graph
1451 |
1452 | # load graph
1453 | with tf.gfile.FastGFile(tensor_graph, 'rb') as f:
1454 | graph_def = tf.GraphDef()
1455 | graph_def.ParseFromString(f.read())
1456 | tf.import_graph_def(graph_def, name='')
1457 | # load labels
1458 | self.tflabels = [line.rstrip() for line in tf.gfile.GFile(tensor_label)]
1459 |
1460 | wav_data = audio_data.get_wav_data(
1461 | convert_rate=16000, convert_width=2
1462 | )
1463 |
1464 | with tf.Session() as sess:
1465 | input_layer_name = 'wav_data:0'
1466 | output_layer_name = 'labels_softmax:0'
1467 | softmax_tensor = sess.graph.get_tensor_by_name(output_layer_name)
1468 | predictions, = sess.run(softmax_tensor, {input_layer_name: wav_data})
1469 |
1470 | # Sort labels in order of confidence
1471 | top_k = predictions.argsort()[-1:][::-1]
1472 | for node_id in top_k:
1473 | human_string = self.tflabels[node_id]
1474 | return human_string
1475 |
1476 | def recognize_whisper(self, audio_data, model="base", show_dict=False, load_options=None, language=None, translate=False, **transcribe_options):
1477 | """
1478 | Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using Whisper.
1479 |
1480 | The recognition language is determined by ``language``, an uncapitalized full language name like "english" or "chinese". See the full language list at https://github.com/openai/whisper/blob/main/whisper/tokenizer.py
1481 |
1482 | model can be any of tiny, base, small, medium, large, tiny.en, base.en, small.en, medium.en. See https://github.com/openai/whisper for more details.
1483 |
1484 | If show_dict is true, returns the full dict response from Whisper, including the detected language. Otherwise returns only the transcription.
1485 |
1486 | You can translate the result to english with Whisper by passing translate=True
1487 |
1488 | Other values are passed directly to whisper. See https://github.com/openai/whisper/blob/main/whisper/transcribe.py for all options
1489 | """
1490 |
1491 | assert isinstance(audio_data, AudioData), "Data must be audio data"
1492 | import numpy as np
1493 | import soundfile as sf
1494 | import torch
1495 | import whisper
1496 |
1497 | if load_options or not hasattr(self, "whisper_model") or self.whisper_model.get(model) is None:
1498 | self.whisper_model = getattr(self, "whisper_model", {})
1499 | self.whisper_model[model] = whisper.load_model(model, **load_options or {})
1500 |
1501 | # 16 kHz https://github.com/openai/whisper/blob/28769fcfe50755a817ab922a7bc83483159600a9/whisper/audio.py#L98-L99
1502 | wav_bytes = audio_data.get_wav_data(convert_rate=16000)
1503 | wav_stream = io.BytesIO(wav_bytes)
1504 | audio_array, sampling_rate = sf.read(wav_stream)
1505 | audio_array = audio_array.astype(np.float32)
1506 |
1507 | result = self.whisper_model[model].transcribe(
1508 | audio_array,
1509 | language=language,
1510 | task="translate" if translate else None,
1511 | fp16=torch.cuda.is_available(),
1512 | **transcribe_options
1513 | )
1514 |
1515 | if show_dict:
1516 | return result
1517 | else:
1518 | return result["text"]
1519 |
1520 | recognize_whisper_api = whisper.recognize_whisper_api
1521 |
1522 | def recognize_vosk(self, audio_data, language='en'):
1523 | from vosk import Model, KaldiRecognizer
1524 |
1525 | assert isinstance(audio_data, AudioData), "Data must be audio data"
1526 |
1527 | if not hasattr(self, 'vosk_model'):
1528 | if not os.path.exists("model"):
1529 | return "Please download the model from https://github.com/alphacep/vosk-api/blob/master/doc/models.md and unpack as 'model' in the current folder."
1530 | exit (1)
1531 | self.vosk_model = Model("model")
1532 |
1533 | rec = KaldiRecognizer(self.vosk_model, 16000);
1534 |
1535 | rec.AcceptWaveform(audio_data.get_raw_data(convert_rate=16000, convert_width=2));
1536 | finalRecognition = rec.FinalResult()
1537 |
1538 | return finalRecognition
1539 |
1540 |
1541 | class PortableNamedTemporaryFile(object):
1542 | """Limited replacement for ``tempfile.NamedTemporaryFile``, except unlike ``tempfile.NamedTemporaryFile``, the file can be opened again while it's currently open, even on Windows."""
1543 | def __init__(self, mode="w+b"):
1544 | self.mode = mode
1545 |
1546 | def __enter__(self):
1547 | # create the temporary file and open it
1548 | file_descriptor, file_path = tempfile.mkstemp()
1549 | self._file = os.fdopen(file_descriptor, self.mode)
1550 |
1551 | # the name property is a public field
1552 | self.name = file_path
1553 | return self
1554 |
1555 | def __exit__(self, exc_type, exc_value, traceback):
1556 | self._file.close()
1557 | os.remove(self.name)
1558 |
1559 | def write(self, *args, **kwargs):
1560 | return self._file.write(*args, **kwargs)
1561 |
1562 | def writelines(self, *args, **kwargs):
1563 | return self._file.writelines(*args, **kwargs)
1564 |
1565 | def flush(self, *args, **kwargs):
1566 | return self._file.flush(*args, **kwargs)
1567 |
1568 |
1569 | # ===============================
1570 | # backwards compatibility shims
1571 | # ===============================
1572 |
1573 | WavFile = AudioFile # WavFile was renamed to AudioFile in 3.4.1
1574 |
1575 |
1576 | def recognize_api(self, audio_data, client_access_token, language="en", session_id=None, show_all=False):
1577 | wav_data = audio_data.get_wav_data(convert_rate=16000, convert_width=2)
1578 | url = "https://api.api.ai/v1/query"
1579 | while True:
1580 | boundary = uuid.uuid4().hex
1581 | if boundary.encode("utf-8") not in wav_data: break
1582 | if session_id is None: session_id = uuid.uuid4().hex
1583 | data = b"--" + boundary.encode("utf-8") + b"\r\n" + b"Content-Disposition: form-data; name=\"request\"\r\n" + b"Content-Type: application/json\r\n" + b"\r\n" + b"{\"v\": \"20150910\", \"sessionId\": \"" + session_id.encode("utf-8") + b"\", \"lang\": \"" + language.encode("utf-8") + b"\"}\r\n" + b"--" + boundary.encode("utf-8") + b"\r\n" + b"Content-Disposition: form-data; name=\"voiceData\"; filename=\"audio.wav\"\r\n" + b"Content-Type: audio/wav\r\n" + b"\r\n" + wav_data + b"\r\n" + b"--" + boundary.encode("utf-8") + b"--\r\n"
1584 | request = Request(url, data=data, headers={"Authorization": "Bearer {}".format(client_access_token), "Content-Length": str(len(data)), "Expect": "100-continue", "Content-Type": "multipart/form-data; boundary={}".format(boundary)})
1585 | try: response = urlopen(request, timeout=10)
1586 | except HTTPError as e: raise RequestError("recognition request failed: {}".format(e.reason))
1587 | except URLError as e: raise RequestError("recognition connection failed: {}".format(e.reason))
1588 | response_text = response.read().decode("utf-8")
1589 | result = json.loads(response_text)
1590 | if show_all: return result
1591 | if "status" not in result or "errorType" not in result["status"] or result["status"]["errorType"] != "success":
1592 | raise UnknownValueError()
1593 | return result["result"]["resolvedQuery"]
1594 |
1595 |
1596 | Recognizer.recognize_api = classmethod(recognize_api) # API.AI Speech Recognition is deprecated/not recommended as of 3.5.0, and currently is only optionally available for paid plans
1597 |
--------------------------------------------------------------------------------
/custom_speech_recognition/__main__.py:
--------------------------------------------------------------------------------
1 | import custom_speech_recognition as sr
2 |
3 | r = sr.Recognizer()
4 | m = sr.Microphone()
5 |
6 | try:
7 | print("A moment of silence, please...")
8 | with m as source: r.adjust_for_ambient_noise(source)
9 | print("Set minimum energy threshold to {}".format(r.energy_threshold))
10 | while True:
11 | print("Say something!")
12 | with m as source: audio = r.listen(source)
13 | print("Got it! Now to recognize it...")
14 | try:
15 | # recognize speech using Google Speech Recognition
16 | value = r.recognize_google(audio)
17 |
18 | print("You said {}".format(value))
19 | except sr.UnknownValueError:
20 | print("Oops! Didn't catch that")
21 | except sr.RequestError as e:
22 | print("Uh oh! Couldn't request results from Google Speech Recognition service; {0}".format(e))
23 | except KeyboardInterrupt:
24 | pass
25 |
--------------------------------------------------------------------------------
/custom_speech_recognition/audio.py:
--------------------------------------------------------------------------------
1 | import aifc
2 | import audioop
3 | import io
4 | import os
5 | import platform
6 | import stat
7 | import subprocess
8 | import sys
9 | import wave
10 |
11 |
12 | class AudioData(object):
13 | """
14 | Creates a new ``AudioData`` instance, which represents mono audio data.
15 |
16 | The raw audio data is specified by ``frame_data``, which is a sequence of bytes representing audio samples. This is the frame data structure used by the PCM WAV format.
17 |
18 | The width of each sample, in bytes, is specified by ``sample_width``. Each group of ``sample_width`` bytes represents a single audio sample.
19 |
20 | The audio data is assumed to have a sample rate of ``sample_rate`` samples per second (Hertz).
21 |
22 | Usually, instances of this class are obtained from ``recognizer_instance.record`` or ``recognizer_instance.listen``, or in the callback for ``recognizer_instance.listen_in_background``, rather than instantiating them directly.
23 | """
24 |
25 | def __init__(self, frame_data, sample_rate, sample_width):
26 | assert sample_rate > 0, "Sample rate must be a positive integer"
27 | assert (
28 | sample_width % 1 == 0 and 1 <= sample_width <= 4
29 | ), "Sample width must be between 1 and 4 inclusive"
30 | self.frame_data = frame_data
31 | self.sample_rate = sample_rate
32 | self.sample_width = int(sample_width)
33 |
34 | def get_segment(self, start_ms=None, end_ms=None):
35 | """
36 | Returns a new ``AudioData`` instance, trimmed to a given time interval. In other words, an ``AudioData`` instance with the same audio data except starting at ``start_ms`` milliseconds in and ending ``end_ms`` milliseconds in.
37 |
38 | If not specified, ``start_ms`` defaults to the beginning of the audio, and ``end_ms`` defaults to the end.
39 | """
40 | assert (
41 | start_ms is None or start_ms >= 0
42 | ), "``start_ms`` must be a non-negative number"
43 | assert end_ms is None or end_ms >= (
44 | 0 if start_ms is None else start_ms
45 | ), "``end_ms`` must be a non-negative number greater or equal to ``start_ms``"
46 | if start_ms is None:
47 | start_byte = 0
48 | else:
49 | start_byte = int(
50 | (start_ms * self.sample_rate * self.sample_width) // 1000
51 | )
52 | if end_ms is None:
53 | end_byte = len(self.frame_data)
54 | else:
55 | end_byte = int(
56 | (end_ms * self.sample_rate * self.sample_width) // 1000
57 | )
58 | return AudioData(
59 | self.frame_data[start_byte:end_byte],
60 | self.sample_rate,
61 | self.sample_width,
62 | )
63 |
64 | def get_raw_data(self, convert_rate=None, convert_width=None):
65 | """
66 | Returns a byte string representing the raw frame data for the audio represented by the ``AudioData`` instance.
67 |
68 | If ``convert_rate`` is specified and the audio sample rate is not ``convert_rate`` Hz, the resulting audio is resampled to match.
69 |
70 | If ``convert_width`` is specified and the audio samples are not ``convert_width`` bytes each, the resulting audio is converted to match.
71 |
72 | Writing these bytes directly to a file results in a valid `RAW/PCM audio file `__.
73 | """
74 | assert (
75 | convert_rate is None or convert_rate > 0
76 | ), "Sample rate to convert to must be a positive integer"
77 | assert convert_width is None or (
78 | convert_width % 1 == 0 and 1 <= convert_width <= 4
79 | ), "Sample width to convert to must be between 1 and 4 inclusive"
80 |
81 | raw_data = self.frame_data
82 |
83 | # make sure unsigned 8-bit audio (which uses unsigned samples) is handled like higher sample width audio (which uses signed samples)
84 | if self.sample_width == 1:
85 | raw_data = audioop.bias(
86 | raw_data, 1, -128
87 | ) # subtract 128 from every sample to make them act like signed samples
88 |
89 | # resample audio at the desired rate if specified
90 | if convert_rate is not None and self.sample_rate != convert_rate:
91 | raw_data, _ = audioop.ratecv(
92 | raw_data,
93 | self.sample_width,
94 | 1,
95 | self.sample_rate,
96 | convert_rate,
97 | None,
98 | )
99 |
100 | # convert samples to desired sample width if specified
101 | if convert_width is not None and self.sample_width != convert_width:
102 | if (
103 | convert_width == 3
104 | ): # we're converting the audio into 24-bit (workaround for https://bugs.python.org/issue12866)
105 | raw_data = audioop.lin2lin(
106 | raw_data, self.sample_width, 4
107 | ) # convert audio into 32-bit first, which is always supported
108 | try:
109 | audioop.bias(
110 | b"", 3, 0
111 | ) # test whether 24-bit audio is supported (for example, ``audioop`` in Python 3.3 and below don't support sample width 3, while Python 3.4+ do)
112 | except (
113 | audioop.error
114 | ): # this version of audioop doesn't support 24-bit audio (probably Python 3.3 or less)
115 | raw_data = b"".join(
116 | raw_data[i + 1 : i + 4]
117 | for i in range(0, len(raw_data), 4)
118 | ) # since we're in little endian, we discard the first byte from each 32-bit sample to get a 24-bit sample
119 | else: # 24-bit audio fully supported, we don't need to shim anything
120 | raw_data = audioop.lin2lin(
121 | raw_data, self.sample_width, convert_width
122 | )
123 | else:
124 | raw_data = audioop.lin2lin(
125 | raw_data, self.sample_width, convert_width
126 | )
127 |
128 | # if the output is 8-bit audio with unsigned samples, convert the samples we've been treating as signed to unsigned again
129 | if convert_width == 1:
130 | raw_data = audioop.bias(
131 | raw_data, 1, 128
132 | ) # add 128 to every sample to make them act like unsigned samples again
133 |
134 | return raw_data
135 |
136 | def get_wav_data(self, convert_rate=None, convert_width=None, nchannels = 1):
137 | """
138 | Returns a byte string representing the contents of a WAV file containing the audio represented by the ``AudioData`` instance.
139 |
140 | If ``convert_width`` is specified and the audio samples are not ``convert_width`` bytes each, the resulting audio is converted to match.
141 |
142 | If ``convert_rate`` is specified and the audio sample rate is not ``convert_rate`` Hz, the resulting audio is resampled to match.
143 |
144 | Writing these bytes directly to a file results in a valid `WAV file `__.
145 | """
146 | raw_data = self.get_raw_data(convert_rate, convert_width)
147 | sample_rate = (
148 | self.sample_rate if convert_rate is None else convert_rate
149 | )
150 | sample_width = (
151 | self.sample_width if convert_width is None else convert_width
152 | )
153 |
154 | # generate the WAV file contents
155 | with io.BytesIO() as wav_file:
156 | wav_writer = wave.open(wav_file, "wb")
157 | try: # note that we can't use context manager, since that was only added in Python 3.4
158 | wav_writer.setframerate(sample_rate)
159 | wav_writer.setsampwidth(sample_width)
160 | wav_writer.setnchannels(nchannels)
161 | wav_writer.writeframes(raw_data)
162 | wav_data = wav_file.getvalue()
163 | finally: # make sure resources are cleaned up
164 | wav_writer.close()
165 | return wav_data
166 |
167 | def get_aiff_data(self, convert_rate=None, convert_width=None):
168 | """
169 | Returns a byte string representing the contents of an AIFF-C file containing the audio represented by the ``AudioData`` instance.
170 |
171 | If ``convert_width`` is specified and the audio samples are not ``convert_width`` bytes each, the resulting audio is converted to match.
172 |
173 | If ``convert_rate`` is specified and the audio sample rate is not ``convert_rate`` Hz, the resulting audio is resampled to match.
174 |
175 | Writing these bytes directly to a file results in a valid `AIFF-C file `__.
176 | """
177 | raw_data = self.get_raw_data(convert_rate, convert_width)
178 | sample_rate = (
179 | self.sample_rate if convert_rate is None else convert_rate
180 | )
181 | sample_width = (
182 | self.sample_width if convert_width is None else convert_width
183 | )
184 |
185 | # the AIFF format is big-endian, so we need to convert the little-endian raw data to big-endian
186 | if hasattr(
187 | audioop, "byteswap"
188 | ): # ``audioop.byteswap`` was only added in Python 3.4
189 | raw_data = audioop.byteswap(raw_data, sample_width)
190 | else: # manually reverse the bytes of each sample, which is slower but works well enough as a fallback
191 | raw_data = raw_data[sample_width - 1 :: -1] + b"".join(
192 | raw_data[i + sample_width : i : -1]
193 | for i in range(sample_width - 1, len(raw_data), sample_width)
194 | )
195 |
196 | # generate the AIFF-C file contents
197 | with io.BytesIO() as aiff_file:
198 | aiff_writer = aifc.open(aiff_file, "wb")
199 | try: # note that we can't use context manager, since that was only added in Python 3.4
200 | aiff_writer.setframerate(sample_rate)
201 | aiff_writer.setsampwidth(sample_width)
202 | aiff_writer.setnchannels(1)
203 | aiff_writer.writeframes(raw_data)
204 | aiff_data = aiff_file.getvalue()
205 | finally: # make sure resources are cleaned up
206 | aiff_writer.close()
207 | return aiff_data
208 |
209 | def get_flac_data(self, convert_rate=None, convert_width=None):
210 | """
211 | Returns a byte string representing the contents of a FLAC file containing the audio represented by the ``AudioData`` instance.
212 |
213 | Note that 32-bit FLAC is not supported. If the audio data is 32-bit and ``convert_width`` is not specified, then the resulting FLAC will be a 24-bit FLAC.
214 |
215 | If ``convert_rate`` is specified and the audio sample rate is not ``convert_rate`` Hz, the resulting audio is resampled to match.
216 |
217 | If ``convert_width`` is specified and the audio samples are not ``convert_width`` bytes each, the resulting audio is converted to match.
218 |
219 | Writing these bytes directly to a file results in a valid `FLAC file `__.
220 | """
221 | assert convert_width is None or (
222 | convert_width % 1 == 0 and 1 <= convert_width <= 3
223 | ), "Sample width to convert to must be between 1 and 3 inclusive"
224 |
225 | if (
226 | self.sample_width > 3 and convert_width is None
227 | ): # resulting WAV data would be 32-bit, which is not convertable to FLAC using our encoder
228 | convert_width = 3 # the largest supported sample width is 24-bit, so we'll limit the sample width to that
229 |
230 | # run the FLAC converter with the WAV data to get the FLAC data
231 | wav_data = self.get_wav_data(convert_rate, convert_width)
232 | flac_converter = get_flac_converter()
233 | if (
234 | os.name == "nt"
235 | ): # on Windows, specify that the process is to be started without showing a console window
236 | startup_info = subprocess.STARTUPINFO()
237 | startup_info.dwFlags |= (
238 | subprocess.STARTF_USESHOWWINDOW
239 | ) # specify that the wShowWindow field of `startup_info` contains a value
240 | startup_info.wShowWindow = (
241 | subprocess.SW_HIDE
242 | ) # specify that the console window should be hidden
243 | else:
244 | startup_info = None # default startupinfo
245 | process = subprocess.Popen(
246 | [
247 | flac_converter,
248 | "--stdout",
249 | "--totally-silent", # put the resulting FLAC file in stdout, and make sure it's not mixed with any program output
250 | "--best", # highest level of compression available
251 | "-", # the input FLAC file contents will be given in stdin
252 | ],
253 | stdin=subprocess.PIPE,
254 | stdout=subprocess.PIPE,
255 | startupinfo=startup_info,
256 | )
257 | flac_data, stderr = process.communicate(wav_data)
258 | return flac_data
259 |
260 |
261 | def get_flac_converter():
262 | """Returns the absolute path of a FLAC converter executable, or raises an OSError if none can be found."""
263 | flac_converter = shutil_which("flac") # check for installed version first
264 | if flac_converter is None: # flac utility is not installed
265 | base_path = os.path.dirname(
266 | os.path.abspath(__file__)
267 | ) # directory of the current module file, where all the FLAC bundled binaries are stored
268 | system, machine = platform.system(), platform.machine()
269 | if system == "Windows" and machine in {
270 | "i686",
271 | "i786",
272 | "x86",
273 | "x86_64",
274 | "AMD64",
275 | }:
276 | flac_converter = os.path.join(base_path, "flac-win32.exe")
277 | elif system == "Darwin" and machine in {
278 | "i686",
279 | "i786",
280 | "x86",
281 | "x86_64",
282 | "AMD64",
283 | }:
284 | flac_converter = os.path.join(base_path, "flac-mac")
285 | elif system == "Linux" and machine in {"i686", "i786", "x86"}:
286 | flac_converter = os.path.join(base_path, "flac-linux-x86")
287 | elif system == "Linux" and machine in {"x86_64", "AMD64"}:
288 | flac_converter = os.path.join(base_path, "flac-linux-x86_64")
289 | else: # no FLAC converter available
290 | raise OSError(
291 | "FLAC conversion utility not available - consider installing the FLAC command line application by running `apt-get install flac` or your operating system's equivalent"
292 | )
293 |
294 | # mark FLAC converter as executable if possible
295 | try:
296 | # handle known issue when running on docker:
297 | # run executable right after chmod() may result in OSError "Text file busy"
298 | # fix: flush FS with sync
299 | if not os.access(flac_converter, os.X_OK):
300 | stat_info = os.stat(flac_converter)
301 | os.chmod(flac_converter, stat_info.st_mode | stat.S_IEXEC)
302 | if "Linux" in platform.system():
303 | os.sync() if sys.version_info >= (3, 3) else os.system("sync")
304 |
305 | except OSError:
306 | pass
307 |
308 | return flac_converter
309 |
310 |
311 | def shutil_which(pgm):
312 | """Python 2 compatibility: backport of ``shutil.which()`` from Python 3"""
313 | path = os.getenv("PATH")
314 | for p in path.split(os.path.pathsep):
315 | p = os.path.join(p, pgm)
316 | if os.path.exists(p) and os.access(p, os.X_OK):
317 | return p
318 |
--------------------------------------------------------------------------------
/custom_speech_recognition/exceptions.py:
--------------------------------------------------------------------------------
1 | class SetupError(Exception):
2 | pass
3 |
4 |
5 | class WaitTimeoutError(Exception):
6 | pass
7 |
8 |
9 | class RequestError(Exception):
10 | pass
11 |
12 |
13 | class UnknownValueError(Exception):
14 | pass
15 |
16 |
17 | class TranscriptionNotReady(Exception):
18 | pass
19 |
20 |
21 | class TranscriptionFailed(Exception):
22 | pass
23 |
--------------------------------------------------------------------------------
/custom_speech_recognition/flac-linux-x86:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/e-johnstonn/wingmanAI/7eda7c145a9a74c44ac791f9a3fc5e0b7b16717d/custom_speech_recognition/flac-linux-x86
--------------------------------------------------------------------------------
/custom_speech_recognition/flac-linux-x86_64:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/e-johnstonn/wingmanAI/7eda7c145a9a74c44ac791f9a3fc5e0b7b16717d/custom_speech_recognition/flac-linux-x86_64
--------------------------------------------------------------------------------
/custom_speech_recognition/flac-mac:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/e-johnstonn/wingmanAI/7eda7c145a9a74c44ac791f9a3fc5e0b7b16717d/custom_speech_recognition/flac-mac
--------------------------------------------------------------------------------
/custom_speech_recognition/flac-win32.exe:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/e-johnstonn/wingmanAI/7eda7c145a9a74c44ac791f9a3fc5e0b7b16717d/custom_speech_recognition/flac-win32.exe
--------------------------------------------------------------------------------
/custom_speech_recognition/pocketsphinx-data/en-US/LICENSE.txt:
--------------------------------------------------------------------------------
1 | Copyright (c) 1999-2015 Carnegie Mellon University. All rights
2 | reserved.
3 |
4 | Redistribution and use in source and binary forms, with or without
5 | modification, are permitted provided that the following conditions
6 | are met:
7 |
8 | 1. Redistributions of source code must retain the above copyright
9 | notice, this list of conditions and the following disclaimer.
10 |
11 | 2. Redistributions in binary form must reproduce the above copyright
12 | notice, this list of conditions and the following disclaimer in
13 | the documentation and/or other materials provided with the
14 | distribution.
15 |
16 | This work was supported in part by funding from the Defense Advanced
17 | Research Projects Agency and the National Science Foundation of the
18 | United States of America, and the CMU Sphinx Speech Consortium.
19 |
20 | THIS SOFTWARE IS PROVIDED BY CARNEGIE MELLON UNIVERSITY ``AS IS'' AND
21 | ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 | PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY
24 | NOR ITS EMPLOYEES BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 |
--------------------------------------------------------------------------------
/custom_speech_recognition/pocketsphinx-data/en-US/acoustic-model/README:
--------------------------------------------------------------------------------
1 | /* ====================================================================
2 | * Copyright (c) 2015 Alpha Cephei Inc. All rights
3 | * reserved.
4 | *
5 | * Redistribution and use in source and binary forms, with or without
6 | * modification, are permitted provided that the following conditions
7 | * are met:
8 | *
9 | * 1. Redistributions of source code must retain the above copyright
10 | * notice, this list of conditions and the following disclaimer.
11 | *
12 | * 2. Redistributions in binary form must reproduce the above copyright
13 | * notice, this list of conditions and the following disclaimer in
14 | * the documentation and/or other materials provided with the
15 | * distribution.
16 | *
17 | * THIS SOFTWARE IS PROVIDED BY ALPHA CEPHEI INC. ``AS IS'' AND.
18 | * ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,.
19 | * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ALPHA CEPHEI INC.
21 | * NOR ITS EMPLOYEES BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT.
23 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,.
24 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY.
25 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT.
26 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE.
27 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 | *
29 | * ====================================================================
30 | *
31 | */
32 |
33 | This directory contains generic US english acoustic model trained with
34 | latest sphinxtrain.
35 |
--------------------------------------------------------------------------------
/custom_speech_recognition/pocketsphinx-data/en-US/acoustic-model/feat.params:
--------------------------------------------------------------------------------
1 | -lowerf 130
2 | -upperf 6800
3 | -nfilt 25
4 | -transform dct
5 | -lifter 22
6 | -feat 1s_c_d_dd
7 | -svspec 0-12/13-25/26-38
8 | -agc none
9 | -cmn current
10 | -varnorm no
11 | -model ptm
12 | -cmninit 40,3,-1
13 |
--------------------------------------------------------------------------------
/custom_speech_recognition/pocketsphinx-data/en-US/acoustic-model/mdef:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/e-johnstonn/wingmanAI/7eda7c145a9a74c44ac791f9a3fc5e0b7b16717d/custom_speech_recognition/pocketsphinx-data/en-US/acoustic-model/mdef
--------------------------------------------------------------------------------
/custom_speech_recognition/pocketsphinx-data/en-US/acoustic-model/means:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/e-johnstonn/wingmanAI/7eda7c145a9a74c44ac791f9a3fc5e0b7b16717d/custom_speech_recognition/pocketsphinx-data/en-US/acoustic-model/means
--------------------------------------------------------------------------------
/custom_speech_recognition/pocketsphinx-data/en-US/acoustic-model/noisedict:
--------------------------------------------------------------------------------
1 | SIL
2 | SIL
3 | SIL
4 | [NOISE] +NSN+
5 | [SPEECH] +SPN+
6 |
--------------------------------------------------------------------------------
/custom_speech_recognition/pocketsphinx-data/en-US/acoustic-model/sendump:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/e-johnstonn/wingmanAI/7eda7c145a9a74c44ac791f9a3fc5e0b7b16717d/custom_speech_recognition/pocketsphinx-data/en-US/acoustic-model/sendump
--------------------------------------------------------------------------------
/custom_speech_recognition/pocketsphinx-data/en-US/acoustic-model/transition_matrices:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/e-johnstonn/wingmanAI/7eda7c145a9a74c44ac791f9a3fc5e0b7b16717d/custom_speech_recognition/pocketsphinx-data/en-US/acoustic-model/transition_matrices
--------------------------------------------------------------------------------
/custom_speech_recognition/pocketsphinx-data/en-US/acoustic-model/variances:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/e-johnstonn/wingmanAI/7eda7c145a9a74c44ac791f9a3fc5e0b7b16717d/custom_speech_recognition/pocketsphinx-data/en-US/acoustic-model/variances
--------------------------------------------------------------------------------
/custom_speech_recognition/pocketsphinx-data/en-US/language-model.lm.bin:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/e-johnstonn/wingmanAI/7eda7c145a9a74c44ac791f9a3fc5e0b7b16717d/custom_speech_recognition/pocketsphinx-data/en-US/language-model.lm.bin
--------------------------------------------------------------------------------
/custom_speech_recognition/recognizers/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/e-johnstonn/wingmanAI/7eda7c145a9a74c44ac791f9a3fc5e0b7b16717d/custom_speech_recognition/recognizers/__init__.py
--------------------------------------------------------------------------------
/custom_speech_recognition/recognizers/whisper.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import os
4 | from io import BytesIO
5 |
6 | from custom_speech_recognition.audio import AudioData
7 | from custom_speech_recognition.exceptions import SetupError
8 |
9 |
10 | def recognize_whisper_api(
11 | recognizer,
12 | audio_data: "AudioData",
13 | *,
14 | model: str = "whisper-1",
15 | api_key: str | None = None,
16 | ):
17 | """
18 | Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the OpenAI Whisper API.
19 |
20 | This function requires an OpenAI account; visit https://platform.openai.com/signup, then generate API Key in `User settings `__.
21 |
22 | Detail: https://platform.openai.com/docs/guides/speech-to-text
23 |
24 | Raises a ``speech_recognition.exceptions.SetupError`` exception if there are any issues with the openai installation, or the environment variable is missing.
25 | """
26 | if not isinstance(audio_data, AudioData):
27 | raise ValueError("``audio_data`` must be an ``AudioData`` instance")
28 | if api_key is None and os.environ.get("OPENAI_API_KEY") is None:
29 | raise SetupError("Set environment variable ``OPENAI_API_KEY``")
30 |
31 | try:
32 | import openai
33 | except ImportError:
34 | raise SetupError(
35 | "missing openai module: ensure that openai is set up correctly."
36 | )
37 |
38 | wav_data = BytesIO(audio_data.get_wav_data())
39 | wav_data.name = "SpeechRecognition_audio.wav"
40 |
41 | transcript = openai.Audio.transcribe(model, wav_data, api_key=api_key)
42 | return transcript["text"]
43 |
--------------------------------------------------------------------------------
/keys.env:
--------------------------------------------------------------------------------
1 | OPENAI_API_KEY = 'your key here'
2 |
--------------------------------------------------------------------------------
/main.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import queue
3 | import threading
4 | import time
5 |
6 | from PyQt5.QtCore import pyqtSlot, QTimer
7 | from PyQt5.QtWidgets import QApplication, QWidget, QVBoxLayout, QPushButton, QTextEdit, QLineEdit, QLabel, \
8 | QTabWidget, QComboBox, QMessageBox
9 | from PyQt5.QtGui import QFont, QTextCursor
10 |
11 | import AudioRecorder
12 | from AudioTranscriber import AudioTranscriber
13 | from chatbot_utils import GPTChat
14 |
15 | from vector_utils import *
16 |
17 | from dotenv import load_dotenv
18 |
19 | load_dotenv('keys.env')
20 |
21 | audio_queue = queue.Queue()
22 |
23 | user_audio_recorder = AudioRecorder.DefaultMicRecorder()
24 | user_audio_recorder.record_into_queue(audio_queue)
25 |
26 | time.sleep(1)
27 |
28 | speaker_audio_recorder = AudioRecorder.DefaultSpeakerRecorder()
29 | speaker_audio_recorder.record_into_queue(audio_queue)
30 |
31 | global_transcriber = AudioTranscriber(user_audio_recorder.source, speaker_audio_recorder.source)
32 | transcribe = threading.Thread(target=global_transcriber.transcribe_audio_queue, args=(audio_queue,))
33 | transcribe.daemon = True
34 | transcribe.start()
35 |
36 |
37 |
38 | class CustomLineEdit(QLineEdit):
39 | def focusInEvent(self, e):
40 | super().focusInEvent(e)
41 | if self.text() == "Send a message...":
42 | self.clear()
43 |
44 | def focusOutEvent(self, e):
45 | super().focusOutEvent(e)
46 | if self.text().strip() == "":
47 | self.setText("Send a message...")
48 |
49 |
50 | class SetupWindow(QWidget):
51 | def __init__(self):
52 | super().__init__()
53 |
54 | self.setWindowTitle("Setup")
55 | self.setStyleSheet("""
56 | background-color: #424242;
57 | color: #F5F5F5;
58 | selection-background-color: #64B5F6;
59 | font-family: 'Roboto', sans-serif;
60 | """)
61 | self.resize(500, 300)
62 |
63 | self.layout = QVBoxLayout(self)
64 |
65 | self.tabs = QTabWidget()
66 | self.tabs.setStyleSheet("QTabBar::tab { color: black; }")
67 |
68 | self.tab1 = QWidget()
69 | self.tab2 = QWidget()
70 |
71 | self.tabs.addTab(self.tab1, "New")
72 | self.tabs.addTab(self.tab2, "Load")
73 |
74 | self.tab1_layout = QVBoxLayout(self)
75 | self.tab2_layout = QVBoxLayout(self)
76 |
77 | self.tab1.setLayout(self.tab1_layout)
78 | self.tab2.setLayout(self.tab2_layout)
79 |
80 | self.welcome_message = QLabel("""
81 | Welcome!
82 | Enter the name of the person you will be speaking to and click start.
83 | """)
84 |
85 | self.speaker_name_label = QLabel("Speaker's Name:
")
86 |
87 | self.speaker_name_input = QLineEdit()
88 | self.speaker_name_input.setStyleSheet("""
89 | font-size: 14pt;
90 | color: #F5F5F5;
91 | background-color: #616161;
92 | border: none;
93 | padding: 5px;
94 | """)
95 |
96 | self.start_button = QPushButton("Start")
97 | self.start_button.clicked.connect(self.start_chat)
98 | self.start_button.setStyleSheet("""
99 | QPushButton {
100 | font-size: 14pt;
101 | color: #F5F5F5;
102 | background-color: #616161;
103 | border: none;
104 | padding: 10px;
105 | }
106 | QPushButton:hover {
107 | background-color: #757575;
108 | }
109 | QPushButton:pressed {
110 | background-color: #484848;
111 | }
112 | """)
113 |
114 | self.file_dropdown = QComboBox()
115 | self.file_dropdown.setStyleSheet(self.speaker_name_input.styleSheet())
116 |
117 | self.load_file_button = QPushButton("Load this file")
118 | self.load_file_button.clicked.connect(self.load_file)
119 | self.load_file_button.setStyleSheet(self.start_button.styleSheet())
120 |
121 | self.nofiles_label = QLabel()
122 |
123 | self.tab1_layout.addWidget(self.welcome_message)
124 | self.tab1_layout.addWidget(self.speaker_name_label)
125 | self.tab1_layout.addWidget(self.speaker_name_input)
126 | self.tab1_layout.addWidget(self.start_button)
127 |
128 | self.tab2_layout.addWidget(self.file_dropdown)
129 | self.tab2_layout.addWidget(self.load_file_button)
130 | self.tab2_layout.addWidget(self.nofiles_label)
131 |
132 | self.layout.addWidget(self.tabs)
133 |
134 | self.db = Database("transcripts_for_vectordb")
135 | self.load_files_into_dropdown()
136 |
137 | def start_chat(self):
138 | self.speaker_name = self.speaker_name_input.text()
139 | if self.speaker_name:
140 | self.chat_app = ChatApp(self.speaker_name)
141 | self.chat_app.show()
142 | self.close()
143 |
144 | def load_file(self):
145 | selected_file = self.file_dropdown.currentText()
146 | if selected_file:
147 | name = str(selected_file)
148 | loaded_db = self.db.load_db(name)
149 | self.chat_app = ChatApp(name, loaded_db=loaded_db)
150 | self.chat_app.show()
151 | self.close()
152 |
153 | def load_files_into_dropdown(self):
154 | files = self.db.list_files()
155 |
156 | if files is None:
157 | self.nofiles_label.setText("No files found!")
158 | self.nofiles_label.setFont(QFont("Roboto", 16))
159 | return None
160 | else:
161 | self.file_dropdown.addItems(files)
162 |
163 |
164 |
165 |
166 |
167 |
168 | class ChatApp(QWidget):
169 | def __init__(self, speaker_name, loaded_db=None):
170 | super().__init__()
171 | self.create_widgets()
172 | self.chat = GPTChat()
173 |
174 | self.speaker_name = speaker_name
175 |
176 | self.timer = QTimer()
177 | self.timer.timeout.connect(self.update_transcript)
178 | self.timer.start(3000)
179 |
180 | self.response_timer = QTimer()
181 | self.response_timer.timeout.connect(self.update_placeholder)
182 | self.placeholder_text = ''
183 |
184 | self.db = loaded_db
185 |
186 |
187 |
188 | def create_widgets(self):
189 | self.setWindowTitle("Wingman AI")
190 |
191 | self.setStyleSheet("background-color: #424242;"
192 | "color: #F5F5F5;"
193 | "selection-background-color: #64B5F6")
194 |
195 | self.resize(800, 600)
196 |
197 | self.tab_widget = QTabWidget()
198 | self.tab_widget.setStyleSheet("QTabBar::tab { color: black; }")
199 |
200 |
201 | #Transcript tab
202 |
203 | transcript_tab = QWidget()
204 |
205 | self.transcript_box = QTextEdit()
206 | self.transcript_box.setFont(QFont("Roboto", 10))
207 | self.transcript_box.setReadOnly(True)
208 |
209 | self.recording_label = QLabel()
210 | self.recording_label.setText("Recording.")
211 |
212 | self.recording_timer = QTimer()
213 | self.recording_timer.timeout.connect(self.update_recording_label)
214 | self.recording_timer.start(1000)
215 |
216 | self.save_quit_button = QPushButton("Save and quit")
217 | self.save_quit_button.setFont(QFont("Roboto", 12))
218 | self.save_quit_button.setStyleSheet("""
219 | QPushButton {
220 | font-size: 14pt;
221 | color: #F5F5F5;
222 | background-color: #616161;
223 | border: none;
224 | padding: 10px;
225 | }
226 | QPushButton:hover {
227 | background-color: #757575;
228 | }
229 | QPushButton:pressed {
230 | background-color: #484848;
231 | }
232 | """)
233 | self.save_quit_button.clicked.connect(self.save_and_quit)
234 |
235 | transcript_layout = QVBoxLayout(transcript_tab)
236 | transcript_layout.addWidget(self.transcript_box)
237 | transcript_layout.addWidget(self.recording_label)
238 | transcript_layout.addWidget(self.save_quit_button)
239 |
240 | self.tab_widget.addTab(transcript_tab, "Transcript")
241 |
242 | #Chat tab
243 |
244 | chat_tab = QWidget()
245 |
246 | self.chat_history_box = QTextEdit()
247 | self.chat_history_box.setReadOnly(True)
248 | self.chat_history_box.append(""
249 | + "Wingman: " + "" + "Hello! I'm Wingman, your personal conversation assistant. I am now monitoring your conversation. How can I help you?" + "
")
250 |
251 | self.input_box = CustomLineEdit("Send a message...")
252 | self.input_box.setFont(QFont("Robotica", 12))
253 | self.input_box.setStyleSheet("color: darkgray;")
254 | self.input_box.setMinimumSize(400, 40)
255 |
256 |
257 | self.send_button = QPushButton("Send message")
258 | self.send_button.setFont(QFont("Robotica", 12))
259 | self.send_button.setStyleSheet("""
260 | QPushButton {
261 | font-size: 14pt;
262 | color: #F5F5F5;
263 | background-color: #616161;
264 | border: none;
265 | padding: 10px;
266 | }
267 | QPushButton:hover {
268 | background-color: #757575;
269 | }
270 | QPushButton:pressed {
271 | background-color: #484848;
272 | }
273 | """)
274 | self.send_button.clicked.connect(self.on_send)
275 |
276 | self.response_label = QLabel()
277 |
278 | chat_layout = QVBoxLayout(chat_tab)
279 | chat_layout.addWidget(self.chat_history_box)
280 | chat_layout.addWidget(self.input_box)
281 | chat_layout.addWidget(self.send_button)
282 | chat_layout.addWidget(self.response_label)
283 |
284 | self.tab_widget.addTab(chat_tab, "Chat")
285 |
286 | layout = QVBoxLayout(self)
287 | layout.addWidget(self.tab_widget)
288 |
289 | @pyqtSlot()
290 | def update_transcript(self):
291 | scrollbar = self.transcript_box.verticalScrollBar()
292 | at_bottom = scrollbar.value() == scrollbar.maximum()
293 |
294 | transcript = global_transcriber.get_transcript(speakername=self.speaker_name)
295 | self.transcript_box.setPlainText(transcript)
296 |
297 | if at_bottom:
298 | scrollbar.setValue(scrollbar.maximum())
299 |
300 | def update_recording_label(self):
301 | current_text = self.recording_label.text()
302 | if len(current_text) < 12:
303 | current_text += '.'
304 | else:
305 | current_text = "Recording."
306 | self.recording_label.setText(current_text)
307 |
308 | @pyqtSlot()
309 | def on_send(self):
310 | user_message = self.input_box.text()
311 | if user_message:
312 | self.input_box.clear()
313 | self.chat_history_box.append(
314 | "" + "You: " + "" + user_message + "
")
315 | self.chat_history_box.moveCursor(QTextCursor.End)
316 |
317 | self.placeholder_text = "."
318 | self.response_timer.start(500)
319 |
320 | threading.Thread(target=self.get_response, args=(user_message,)).start()
321 |
322 |
323 | def update_placeholder(self):
324 | if len(self.placeholder_text) < 3:
325 | self.placeholder_text += "."
326 | else:
327 | self.placeholder_text = "."
328 | self.response_label.setText("Getting response" + self.placeholder_text)
329 |
330 | def get_response(self, user_message):
331 | transcript = global_transcriber.get_transcript(speakername=self.speaker_name)
332 |
333 | if self.db:
334 | context = self.db.similarity_search(user_message, k=2)
335 | response = self.chat.message_bot(user_message, transcript, context)
336 | else:
337 | response = self.chat.message_bot(user_message, transcript)
338 |
339 | self.response_timer.stop()
340 | self.response_label.clear()
341 | QApplication.processEvents()
342 | self.chat_history_box.append("" + "Wingman: " + "" + response + "
")
343 | self.chat_history_box.update()
344 | self.chat_history_box.moveCursor(QTextCursor.End)
345 |
346 | def save_transcript(self):
347 | try:
348 | speaker_transcript = global_transcriber.get_speaker_transcript()
349 | db_lock = threading.Lock()
350 | with db_lock:
351 | d = Database('transcripts_for_vectordb')
352 | d.save_or_add_to_transcripts(self.speaker_name, speaker_transcript)
353 | except Exception as e:
354 | print(e)
355 |
356 | def save_and_quit(self):
357 | try:
358 | self.save_transcript()
359 | msgbox = QMessageBox()
360 | msgbox.setIcon(QMessageBox.Information)
361 | msgbox.setText("Transcript saved successfully!")
362 | msgbox.setWindowTitle("Success")
363 | msgbox.setStandardButtons(QMessageBox.Ok)
364 | msgbox.exec_()
365 | QApplication.quit()
366 |
367 | except Exception as e:
368 | print(e)
369 |
370 |
371 |
372 |
373 | if __name__ == "__main__":
374 | app = QApplication(sys.argv)
375 |
376 | setup_window = SetupWindow()
377 | setup_window.show()
378 |
379 | sys.exit(app.exec_())
380 |
381 |
382 |
383 |
384 |
385 |
386 |
387 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/e-johnstonn/wingmanAI/7eda7c145a9a74c44ac791f9a3fc5e0b7b16717d/requirements.txt
--------------------------------------------------------------------------------
/transcripts_for_vectordb/Test/transcript.txt:
--------------------------------------------------------------------------------
1 | Actually, I have to mentally prepare myself to start coding because... I always thought programming was meant to be difficult and boring.
2 |
3 |
--------------------------------------------------------------------------------
/vector_utils.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from langchain.embeddings import OpenAIEmbeddings
4 | from langchain.text_splitter import RecursiveCharacterTextSplitter
5 | from langchain.vectorstores import Chroma
6 |
7 |
8 |
9 |
10 | class Database:
11 | def __init__(self, directory):
12 | self.embeddings = OpenAIEmbeddings()
13 | self.text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=50)
14 | self.directory = directory
15 | self.files = os.listdir(self.directory)
16 |
17 | def list_files(self):
18 | if len(self.files) == 0:
19 | return None
20 | return self.files
21 |
22 | def save_or_add_to_transcripts(self, name, transcript):
23 | persist_directory = os.path.join(self.directory, name)
24 | if not os.path.exists(persist_directory):
25 | os.makedirs(persist_directory)
26 | transcript_file = os.path.join(persist_directory, "transcript.txt")
27 | with open(transcript_file, 'a') as f:
28 | f.write(transcript + "\n\n")
29 |
30 |
31 | def load_db(self, name):
32 | persist_directory = os.path.join(self.directory, name)
33 | transcript_file = os.path.join(persist_directory, "transcript.txt")
34 | with open(transcript_file, 'r') as f:
35 | transcript = f.read()
36 | split_docs = self.text_splitter.split_text(transcript)
37 | db = Chroma.from_texts(texts=split_docs, embedding=self.embeddings)
38 | return db
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
--------------------------------------------------------------------------------