├── .github └── FUNDING.yml ├── README.md └── alexa_picovoice_trigger.py /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: [shivasiddharth] 4 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # avs-device-sdk-pi 2 | ## Scripts to enable Alexa voice activation using Picovoice Porcupine 3 | 4 | ******************************************************************************************************************************* 5 | ### **If you like the work, find it useful and if you would like to get me a :coffee: :smile:** [![paypal](https://www.paypalobjects.com/en_US/i/btn/btn_donate_LG.gif)](https://paypal.me/sidsclass?country.x=IE&locale.x=en_US) 6 | 7 | ******************************************************************************************************************************* 8 | 1. Follow Amazon's official setup guide from [here](https://developer.amazon.com/en-US/docs/alexa/avs-device-sdk/raspberry-pi.html) for the installation procedure. 9 | 2. After the completion of the Alexa installation, install the requisites for Picovoice porcupine using: 10 | ``` 11 | sudo apt-get update 12 | sudo apt-get install python3-pip wmctrl xdotool 13 | pip3 install pvporcupine 14 | pip3 install pvrecorder 15 | ``` 16 | 3. Download the **alexa_picovoice_trigger.py** file from this git. 17 | 4. Create the Access Key in Picovoice console and download the keyword from [Picovoice Porcupine git](https://github.com/Picovoice/porcupine/blob/master/resources/keyword_files/raspberry-pi/alexa_raspberry-pi.ppn). 18 | 5. For voice activation, open a terminal and enter the following: 19 | ``` 20 | wmctrl -l 21 | ``` 22 | Note the id value of the terminal in the extreme left. 23 | 6. Start the Alexa's Startsample.sh script from the same terminal. 24 | 7. Open the alexa_picovoice_trigger.py script and change the id value given [here](https://github.com/shivasiddharth/avs-device-sdk-pi/blob/65858c8e879a08615ee4177f6b0f0288c53c1592/alexa_picovoice_trigger.py#L115) with the id value noted. 25 | 7. Start the Picovoice porcupine trigger using the following syntax: 26 | ``` 27 | python3 /home/pi/alexa_picovoice_trigger.py --access_key ${ACCESS_KEY} --keyword_paths ${KEYWORD_PATH_ONE} 28 | ``` 29 | 8. Now, Alexa can be triggered with Picovoice Porcupine wakeword engine. 30 | -------------------------------------------------------------------------------- /alexa_picovoice_trigger.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2018-2021 Picovoice Inc. 3 | # 4 | # You may not use this file except in compliance with the license. A copy of the license is located in the "LICENSE" 5 | # file accompanying this source. 6 | # 7 | # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on 8 | # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the 9 | # specific language governing permissions and limitations under the License. 10 | # 11 | 12 | import argparse 13 | import os 14 | import struct 15 | import wave 16 | import subprocess 17 | from datetime import datetime 18 | from threading import Thread 19 | 20 | import pvporcupine 21 | from pvrecorder import PvRecorder 22 | 23 | 24 | class PorcupineDemo(Thread): 25 | """ 26 | Microphone Demo for Porcupine wake word engine. It creates an input audio stream from a microphone, monitors it, and 27 | upon detecting the specified wake word(s) prints the detection time and wake word on console. It optionally saves 28 | the recorded audio into a file for further debugging. 29 | """ 30 | 31 | def __init__( 32 | self, 33 | access_key, 34 | library_path, 35 | model_path, 36 | keyword_paths, 37 | sensitivities, 38 | input_device_index=None, 39 | output_path=None): 40 | 41 | """ 42 | Constructor. 43 | 44 | :param library_path: Absolute path to Porcupine's dynamic library. 45 | :param model_path: Absolute path to the file containing model parameters. 46 | :param keyword_paths: Absolute paths to keyword model files. 47 | :param sensitivities: Sensitivities for detecting keywords. Each value should be a number within [0, 1]. A 48 | higher sensitivity results in fewer misses at the cost of increasing the false alarm rate. If not set 0.5 will 49 | be used. 50 | :param input_device_index: Optional argument. If provided, audio is recorded from this input device. Otherwise, 51 | the default audio input device is used. 52 | :param output_path: If provided recorded audio will be stored in this location at the end of the run. 53 | """ 54 | 55 | super(PorcupineDemo, self).__init__() 56 | 57 | self._access_key = access_key 58 | self._library_path = library_path 59 | self._model_path = model_path 60 | self._keyword_paths = keyword_paths 61 | self._sensitivities = sensitivities 62 | self._input_device_index = input_device_index 63 | 64 | self._output_path = output_path 65 | 66 | def run(self): 67 | """ 68 | Creates an input audio stream, instantiates an instance of Porcupine object, and monitors the audio stream for 69 | occurrences of the wake word(s). It prints the time of detection for each occurrence and the wake word. 70 | """ 71 | 72 | keywords = list() 73 | for x in self._keyword_paths: 74 | keyword_phrase_part = os.path.basename(x).replace('.ppn', '').split('_') 75 | if len(keyword_phrase_part) > 6: 76 | keywords.append(' '.join(keyword_phrase_part[0:-6])) 77 | else: 78 | keywords.append(keyword_phrase_part[0]) 79 | 80 | porcupine = None 81 | recorder = None 82 | wav_file = None 83 | try: 84 | porcupine = pvporcupine.create( 85 | access_key=self._access_key, 86 | library_path=self._library_path, 87 | model_path=self._model_path, 88 | keyword_paths=self._keyword_paths, 89 | sensitivities=self._sensitivities) 90 | 91 | recorder = PvRecorder(device_index=self._input_device_index, frame_length=porcupine.frame_length) 92 | recorder.start() 93 | 94 | if self._output_path is not None: 95 | wav_file = wave.open(self._output_path, "w") 96 | wav_file.setparams((1, 2, 16000, 512, "NONE", "NONE")) 97 | 98 | print(f'Using device: {recorder.selected_device}') 99 | 100 | print('Listening {') 101 | for keyword, sensitivity in zip(keywords, self._sensitivities): 102 | print(' %s (%.2f)' % (keyword, sensitivity)) 103 | print('}') 104 | 105 | while True: 106 | 107 | pcm = recorder.read() 108 | 109 | if wav_file is not None: 110 | wav_file.writeframes(struct.pack("h" * len(pcm), *pcm)) 111 | 112 | result = porcupine.process(pcm) 113 | if result >= 0: 114 | print('[%s] Detected %s' % (str(datetime.now()), keywords[result])) 115 | subprocess.call("wmctrl -i -a 0x01a00003 && xdotool keydown t && xdotool keyup t && xdotool keydown Return && xdotool keyup Return",shell=True) 116 | 117 | 118 | except KeyboardInterrupt: 119 | print('Stopping ...') 120 | finally: 121 | if porcupine is not None: 122 | porcupine.delete() 123 | 124 | if recorder is not None: 125 | recorder.delete() 126 | 127 | if wav_file is not None: 128 | wav_file.close() 129 | 130 | @classmethod 131 | def show_audio_devices(cls): 132 | devices = PvRecorder.get_audio_devices() 133 | 134 | for i in range(len(devices)): 135 | print(f'index: {i}, device name: {devices[i]}') 136 | 137 | 138 | def main(): 139 | parser = argparse.ArgumentParser() 140 | 141 | parser.add_argument('--access_key', 142 | help='AccessKey obtained from Picovoice Console (https://picovoice.ai/console/)') 143 | 144 | parser.add_argument( 145 | '--keywords', 146 | nargs='+', 147 | help='List of default keywords for detection. Available keywords: %s' % ', '.join(sorted(pvporcupine.KEYWORDS)), 148 | choices=sorted(pvporcupine.KEYWORDS), 149 | metavar='') 150 | 151 | parser.add_argument( 152 | '--keyword_paths', 153 | nargs='+', 154 | help="Absolute paths to keyword model files. If not set it will be populated from `--keywords` argument") 155 | 156 | parser.add_argument('--library_path', help='Absolute path to dynamic library.', default=pvporcupine.LIBRARY_PATH) 157 | 158 | parser.add_argument( 159 | '--model_path', 160 | help='Absolute path to the file containing model parameters.', 161 | default=pvporcupine.MODEL_PATH) 162 | 163 | parser.add_argument( 164 | '--sensitivities', 165 | nargs='+', 166 | help="Sensitivities for detecting keywords. Each value should be a number within [0, 1]. A higher " + 167 | "sensitivity results in fewer misses at the cost of increasing the false alarm rate. If not set 0.5 " + 168 | "will be used.", 169 | type=float, 170 | default=None) 171 | 172 | parser.add_argument('--audio_device_index', help='Index of input audio device.', type=int, default=-1) 173 | 174 | parser.add_argument('--output_path', help='Absolute path to recorded audio for debugging.', default=None) 175 | 176 | parser.add_argument('--show_audio_devices', action='store_true') 177 | 178 | args = parser.parse_args() 179 | 180 | if args.show_audio_devices: 181 | PorcupineDemo.show_audio_devices() 182 | else: 183 | if args.access_key is None: 184 | raise ValueError("AccessKey (--access_key) is required") 185 | if args.keyword_paths is None: 186 | if args.keywords is None: 187 | raise ValueError("Either `--keywords` or `--keyword_paths` must be set.") 188 | 189 | keyword_paths = [pvporcupine.KEYWORD_PATHS[x] for x in args.keywords] 190 | else: 191 | keyword_paths = args.keyword_paths 192 | 193 | if args.sensitivities is None: 194 | args.sensitivities = [0.5] * len(keyword_paths) 195 | 196 | if len(keyword_paths) != len(args.sensitivities): 197 | raise ValueError('Number of keywords does not match the number of sensitivities.') 198 | 199 | PorcupineDemo( 200 | access_key=args.access_key, 201 | library_path=args.library_path, 202 | model_path=args.model_path, 203 | keyword_paths=keyword_paths, 204 | sensitivities=args.sensitivities, 205 | output_path=args.output_path, 206 | input_device_index=args.audio_device_index).run() 207 | 208 | 209 | if __name__ == '__main__': 210 | main() 211 | --------------------------------------------------------------------------------