├── .envrc ├── .github ├── dependabot.yml └── workflows │ └── ci.yml ├── .gitignore ├── .pre-commit-config.yaml ├── LICENSE ├── Makefile ├── PRIVACY.md ├── README.md ├── TERMS.md ├── dev.sh ├── lambda ├── .python-version ├── config.example.json ├── lambda_function.py ├── llm_intent │ ├── __init__.py │ ├── llm_client.py │ └── utils.py ├── requirements-dev.txt ├── requirements.txt └── tests │ ├── __init__.py │ └── test_utils.py ├── pyproject.toml └── skill-package ├── assets └── images │ └── en-US_largeIcon.png └── interactionModels └── custom └── en-US.json /.envrc: -------------------------------------------------------------------------------- 1 | export VIRTUAL_ENV=."venv" 2 | layout python 3 | 4 | [[ -f .envrc.private ]] && source_env .envrc.private 5 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "pip" 4 | directory: "/lambda/" 5 | schedule: 6 | interval: daily 7 | - package-ecosystem: "github-actions" 8 | directory: "/" 9 | schedule: 10 | interval: daily 11 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: ci 2 | 3 | on: 4 | pull_request: 5 | push: 6 | branches: [main] 7 | 8 | jobs: 9 | pre-commit: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v4 13 | - uses: actions/setup-python@v5 14 | with: 15 | python-version-file: lambda/.python-version 16 | - uses: pre-commit/action@v3.0.1 17 | tests: 18 | runs-on: ubuntu-latest 19 | steps: 20 | - uses: actions/checkout@v4 21 | - uses: actions/setup-python@v5 22 | with: 23 | python-version-file: lambda/.python-version 24 | - name: Install dependencies 25 | run: pip install -r lambda/requirements-dev.txt 26 | - name: Run tests 27 | run: pytest 28 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pipenv 85 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 86 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 87 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 88 | # install all needed dependencies. 89 | #Pipfile.lock 90 | 91 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 92 | __pypackages__/ 93 | 94 | # Celery stuff 95 | celerybeat-schedule 96 | celerybeat.pid 97 | 98 | # SageMath parsed files 99 | *.sage.py 100 | 101 | # Environments 102 | .env 103 | .venv 104 | env/ 105 | venv/ 106 | ENV/ 107 | env.bak/ 108 | venv.bak/ 109 | 110 | # Spyder project settings 111 | .spyderproject 112 | .spyproject 113 | 114 | # Rope project settings 115 | .ropeproject 116 | 117 | # mkdocs documentation 118 | /site 119 | 120 | # mypy 121 | .mypy_cache/ 122 | .dmypy.json 123 | dmypy.json 124 | 125 | # Pyre type checker 126 | .pyre/ 127 | 128 | config.json 129 | 130 | /.idea/ 131 | !/.envrc 132 | /.envrc.private 133 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/pre-commit/pre-commit-hooks 3 | rev: v5.0.0 4 | hooks: 5 | - id: trailing-whitespace 6 | - id: end-of-file-fixer 7 | - id: check-yaml 8 | - id: check-toml 9 | - id: check-added-large-files 10 | - repo: https://github.com/pre-commit/mirrors-isort 11 | rev: v5.10.1 12 | hooks: 13 | - id: isort 14 | additional_dependencies: [toml] 15 | - repo: https://github.com/astral-sh/ruff-pre-commit 16 | rev: 'v0.9.1' 17 | hooks: 18 | - id: ruff 19 | args: 20 | - '--fix' 21 | - id: ruff-format 22 | - repo: https://github.com/pre-commit/mirrors-mypy 23 | rev: v1.14.1 24 | hooks: 25 | - id: mypy 26 | additional_dependencies: [types-requests] 27 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Paulo Truta 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .DEFAULT_GOAL := release 2 | .ONESHELL: 3 | 4 | BUILD_DIR=build 5 | 6 | BUILD_REPO_DIR=build/hosted 7 | BUILD_PACKAGE_DIR=build/package 8 | 9 | 10 | package: clean 11 | mkdir -p $(BUILD_DIR) 12 | mkdir -p $(BUILD_PACKAGE_DIR) 13 | zip -r $(BUILD_PACKAGE_DIR)/alexa-skill-llm-intent-release.zip lambda -x lambda/\config.example.json -x lambda/\.venv/\* -x "**/__pycache__/**" 14 | 15 | clean: 16 | rm -rf $(BUILD_PACKAGE_DIR) 17 | 18 | dev: clean 19 | python -m venv .venv 20 | . .venv/bin/activate 21 | pip install -r lambda/requirements-dev.txt 22 | 23 | # Hosted skill targets 24 | 25 | list: 26 | @echo "---" 27 | @echo "🎯 Listing available hosted skill targets" 28 | @echo "---" 29 | 30 | @./dev.sh list 31 | 32 | @echo "---" 33 | 34 | new: 35 | @echo "---" 36 | @echo "🎯 Creating a new hosted skill target" 37 | @echo "---" 38 | 39 | @./dev.sh new 40 | 41 | @echo "---" 42 | @echo "✅ Hosted skill created. To push repo code, run 'make update'" 43 | @echo "---" 44 | 45 | import: 46 | @echo "---" 47 | @echo "🎯 Initializing hosted skill target with id $(id)" 48 | @echo "---" 49 | 50 | @./dev.sh init $(id) 51 | 52 | @echo "---" 53 | @echo "✅ Hosted skill initialized. To push repo code, run 'make update'" 54 | @echo "---" 55 | 56 | update: 57 | @echo "---" 58 | @echo "🎯 Updating hosted skill target $(skill)" 59 | @echo "---" 60 | 61 | @./dev.sh update $(skill) 62 | 63 | @echo "---" 64 | @echo "✅ Hosted skill $(skill) deployed. Check completion status in the Alexa Developer Console" 65 | @echo "---" 66 | config: 67 | @echo "---" 68 | @echo "🎯 Setting config file and invocation name for hosted skill target $(skill)" 69 | @echo "---" 70 | 71 | @./dev.sh config ${skill} $(file) ${invocation} 72 | 73 | @echo "---" 74 | @echo "✅ Config file and invocation name set for hosted skill target $(skill)" 75 | @echo "---" 76 | 77 | dialog: 78 | @echo "---" 79 | @echo "🎯 Starting dialog for hosted skill target $(skill)" 80 | @echo "---" 81 | 82 | @./dev.sh dialog $(skill) 83 | 84 | @echo "---" 85 | @echo "✅ Dialog Session for hosted skill target $(skill) terminated" 86 | @echo "---" 87 | 88 | debug: 89 | @echo "---" 90 | @echo "🎯 Debugging hosted skill target $(skill)" 91 | @echo "---" 92 | 93 | @./dev.sh debug $(skill) 94 | 95 | @echo "---" 96 | @echo "✅ Debugging session for hosted skill target $(skill) terminated" 97 | @echo "---" 98 | -------------------------------------------------------------------------------- /PRIVACY.md: -------------------------------------------------------------------------------- 1 | # Privacy Policy 2 | 3 | We take your privacy seriously. This Privacy Policy explains how we might collect, use, and share information about you when you use our Alexa skill. Even if we don't gather the data described here, we still want to be forthcoming about what the technology in our skill might have access to. 4 | 5 | ## Information We Can Collect 6 | 7 | We might collect information about you when you interact with our skill, such as when you make requests or use the features of our skill. This information may include: 8 | 9 | - Your voice inputs and other audio data when you interact with our skill, including when you make requests or use the features of our skill. This may include audio data of your voice, as well as any background noise or conversation that is captured when you interact with our skill. 10 | - Metadata about your interactions with our skill, such as the time and date of your interactions, the features and functions you use, and the content of your requests. 11 | - Your device information, such as the type of device you are using, your device's unique identifier, and other technical information about your device. 12 | 13 | ## How Can We Use Information We Collect 14 | 15 | We might use the information we collect to provide, improve, and develop our skill, as well as for other purposes, such as: 16 | 17 | - To respond to your requests and provide the features and functions of our skill. 18 | - To improve the accuracy and effectiveness of our skill, such as by training our speech recognition algorithms and improving the performance of our skill. 19 | - To analyze and understand how our skill is being used, such as by analyzing usage patterns and user preferences. 20 | 21 | We do not share your personal information with third parties for their own marketing purposes without your consent. We may share your information in the following limited circumstances: 22 | 23 | - With service providers who assist us in providing our skill, such as companies that help us with speech recognition or other technical services. These service providers are contractually obligated to protect your information and may only use your information to provide services to us. 24 | - With law enforcement or government agencies, if required by law, or in response to a valid legal request. 25 | In the event of a merger, acquisition, or other similar event, we may share your information with a third party, subject to their privacy practices. 26 | 27 | ## Your Choices and Rights 28 | 29 | You have the following choices and rights with respect to your information: 30 | 31 | - You can choose not to provide certain information to us. However, if you do not provide certain information, some of the features and functions of our skill might not work properly. 32 | - You can choose to stop using our skill at any time. If you delete our skill from your device, your information will be deleted from our skill. 33 | - You have the right to access, rectify, erase, restrict, or object to the processing of your information, as well as the right to data portability. You can exercise these rights by contacting us at [insert contact information]. 34 | - You have the right to lodge a complaint with a data protection authority about our collection and use of your information. 35 | 36 | ## Changes to This Privacy Policy 37 | 38 | We may update this Privacy Policy from time to time. We will post any changes on this page and, if the changes are significant, we will provide a more prominent notice (including, for certain services, email notification of privacy policy changes). We encourage you to review this Privacy Policy regularly to stay informed about our collection, use, and sharing of your information. 39 | 40 | ## Contact Us 41 | 42 | If you have any questions or concerns about this Privacy Policy or our collection, use, and sharing of your information, please contact us at via the discussions board in this repository. 43 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |

2 | 3 |

4 | 5 | # alexa-skill-llm-intent 6 | 7 | [![ci](https://github.com/paulotruta/alexa-skill-llm-intent/actions/workflows/ci.yml/badge.svg)](https://github.com/paulotruta/alexa-skill-llm-intent/actions/workflows/ci.yml) 8 | 9 | An Alexa Skill template that gives you a ready to use skill to start a turn conversation with an AI. Ask a question and get answered with Alexa's soothing voice, powered by ChatGPT or other llm. 10 | 11 | - [alexa-skill-llm-intent](#alexa-skill-llm-intent) 12 | - [Configuration](#configuration) 13 | - [Requirements](#requirements) 14 | - [Setting up Environment variables](#setting-up-environment-variables) 15 | - [Creating an Alexa Skill](#creating-an-alexa-skill) 16 | - [Automated - Using the Makefile (Alexa Hosted Skills Management)](#automated---using-the-makefile-alexa-hosted-skills-management) 17 | - [Create a new Alexa Skill](#create-a-new-alexa-skill) 18 | - [Importing an existing Alexa Skill](#importing-an-existing-alexa-skill) 19 | - [List existing Alexa Hosted Skill targets](#list-existing-alexa-hosted-skill-targets) 20 | - [Setting the Skill configuration file and invocation words](#setting-the-skill-configuration-file-and-invocation-words) 21 | - [Updating the Skill](#updating-the-skill) 22 | - [Debugging Dialog Model](#debugging-dialog-model) 23 | - [Debugging Lambda Function](#debugging-lambda-function) 24 | - [Manual - Using the Alexa Developer Console](#manual---using-the-alexa-developer-console) 25 | - [Advanced - Using the Ask CLI](#advanced---using-the-ask-cli) 26 | - [Usage](#usage) 27 | - [Commands](#commands) 28 | - [Development](#development) 29 | - [Local Development](#local-development) 30 | - [Skill Package](#skill-package) 31 | - [Skill Lambda Function](#skill-lambda-function) 32 | - [Contributing](#contributing) 33 | - [Disclaimer](#disclaimer) 34 | 35 | 36 | ## Configuration 37 | 38 | ### Requirements 39 | 40 | - [Alexa Developer Account](https://developer.amazon.com/alexa) 41 | - [ASK CLI](https://developer.amazon.com/en-US/docs/alexa/smapi/quick-start-alexa-skills-kit-command-line-interface.html) 42 | - [OpenAI API schema](https://github.com/openai/openai-openapi) compatible llm provider API url, key, and model name: [Open AI](https://platform.openai.com) / [Anthropic](https://www.anthropic.com/) / [OpenRouter](https://openrouter.ai/) 43 | - Python 3.8 (optional for local development) 44 | - [AWS Account](https://aws.amazon.com/) (optional for advanced deployment) 45 | 46 | ### Setting up Environment variables 47 | 48 | You should setup your configuration file by copying `config.example.json` to `config.json` and filling the required fields: 49 | 50 | - **`invocation_name`** -> The invocation name for the skill 51 | - *example: `gemini flash`* 52 | - **`llm_url` ->** OpenAI OpenAPI Schema Compatible LLM API provider url 53 | - *example: `https://openrouter.ai/api/v1/chat/completions`* 54 | - **`llm_model` ->** Model name/version to use with the provider API 55 | - *example: `google/gemini-2.0-flash-exp:free`* 56 | - **`llm_key`->** Provider API key 57 | - *example: `sk-or-v1-`* 58 | 59 | 60 | 61 | >*ℹ️ Set `llm_model` to `webhook` to proxy the alexa request as a POST call to `llm_api_url`, sending `llm_key` as the `token` key of the json body, together with useful alexa request context.* 62 | 63 | >*⚠️ Note that the invocation name configuration value is only automatically set on deployment using the `(Automated) Makefile` method, and only for the `en-US` locale. If you are using the `(Manual) Alexa Developer Console` method, or trying to support multiple locales, you should instead set the `invocationName` value manually in the `skill-package/interactionModels/custom/.json` files.* 64 | 65 | >*ℹ️ If you don't provide a `llm_system_prompt`, the skill will use a default system prompt, which you can see in `./lambda/lambda_function.py:37` * 66 | 67 | ## Creating an Alexa Skill 68 | 69 | To use this template, you need to at least have an account setup in the [Alexa Developer Console](https://developer.amazon.com/alexa). 70 | 71 | **There's three ways you can use this template in a skill:** 72 | - **Automated ->** Using the `Makefile` to create and manage a new or imported Alexa Hosted Skill project 73 | - **Manual ->** in the the `Alexa Developer Console` itself, by uploading a build package 74 | - **Advanced ->** Using the `ask CLI` to create and manage a new AWS-hosted or Self-hosted skill project using this repository as template. 75 | 76 | ### Automated - Using the Makefile (Alexa Hosted Skills Management) 77 | 78 | >*ℹ️ This is the recommended way to create a new Alexa Skill using this template. It leverages the Ask CLI to create a new project and deploy it to your Alexa Developer Console. You can have multiple targets and deploy the template to different skills.* 79 | 80 | This method supports version control, testing, and debugging, and integrates with the Alexa Developer Console seamlessly. 81 | 82 | >*⚠️ Make sure you have the `ask` CLI installed and configured with your Amazon Developer account before running this command. If not, install it by running `npm install -g ask-cli` and configure it by running `ask configure`.* 83 | 84 | #### Create a new Alexa Skill 85 | 86 | Run the following command in your terminal: 87 | 88 | ```bash 89 | make new 90 | ``` 91 | 92 | And follow the wizard to create a new Alexa Skill project as a target, choosing the following options: 93 | 94 | - **? Choose a modeling stack for your skill:** `Interaction Model` 95 | - **? Choose the programming language you will use to code your skill:** `Python` 96 | - **? Choose a method to host your skill's backend resources:** `Alexa Hosted` 97 | 98 | >*⚠️ If you don't choose the specified options on the New Skill Wizard, the process could fail as this template is made to run an Interaction Model skill in Python, while the Makefile method currently only supports Alexa Hosted skills.* 99 | 100 | The skill will start being created: 101 | ``` 102 | 🎯 Creating a new hosted skill target 103 | 104 | Please follow the wizard to start your Alexa skill project -> 105 | ? Choose a modeling stack for your skill: Interaction Model 106 | The Interaction Model stack enables you to define the user interactions with a combination of utterances, intents, and slots. 107 | ? Choose the programming language you will use to code your skill: Python 108 | ? Choose a method to host your skill's backend resources: Alexa-hosted skills 109 | Host your skill code by Alexa (free). 110 | ? Choose the default region for your skill: eu-west-1 111 | ? Please type in your skill name: gemini flash 112 | ? Please type in your folder name for the skill project (alphanumeric): geminiflash 113 | ⠧ Creating your Alexa hosted skill. It will take about a minute. 114 | 115 | (...) 116 | 117 | Lambda code for gemini flash created at 118 | ./lambda 119 | 120 | Skill schema and interactionModels for gemini flash created at 121 | ./skill-package 122 | 123 | The skill has been enabled. 124 | 125 | Hosted skill provisioning finished. Skill-Id: amzn1.ask.skill.b9198cd2-7e05-4119-bc9b-fe264d2b7fe0 126 | Please follow the instructions at https://developer.amazon.com/en-US/docs/alexa/hosted-skills/alexa-hosted-skills-ask-cli.html to learn more about the usage of "git" for Hosted skill. 127 | 🔗 Finished. Current targets: 128 | geminiflash perplexitysearch testapplication 129 | 130 | ✅ Hosted skill created. To push repo code, run 'make update' 131 | ``` 132 | 133 | A new Alexa Hosted Skill target will show up in your Alexa Developer account with the provided name, but its code and configuration is from a blank "hello world" project. But it is now ready to be updated with the template code (check the `make update` command below). 134 | 135 | >*⚠️ Due to instabilities on Amazon's infrastrucure side, sometimes this process can hang while the skill is being created. This can result in you seeing the skill in the developer console but not on your machine. Give it an hour, delete the skill and creating a new one again.* 136 | 137 | 138 | #### Importing an existing Alexa Skill 139 | 140 | If you already have an existing Alexa Skill and want to import this template to it (overriding any previous code, model interactions, and actions), you can run: 141 | 142 | ```bash 143 | make init id= 144 | ``` 145 | 146 | This will import your skill as a Alexa Hosted Skill target, which you can then use to update the skill to use this template. 147 | 148 | >*⚠️ Be aware that if your imported Alexa-Hosted skill contains any custom code or configurations, they will be fully overriten once you run the `make update` command after importing your skill as a target.* 149 | 150 | #### List existing Alexa Hosted Skill targets 151 | 152 | You can list all the existing Alexa Hosted Skill targets being managed by this project by running: 153 | 154 | ```bash 155 | make list 156 | ``` 157 | 158 | This will return a list of `` and the date they were created or imported, for example: 159 | 160 | ``` 161 | perplexitysearch -> Created on Jan 13 02:12 162 | testapplication -> Created on Jan 13 02:45 163 | ``` 164 | 165 | >*ℹ️ These are available in the `build/hosted` folder, and are the target hosted repositories, that can individually be managed by navigating to the respective folder and using the `ask` CLI.* 166 | 167 | #### Setting the Skill configuration file and invocation words 168 | 169 | When your skill was created or imported, it automatically use the `config.json` in the `lambda` directory as its configuration. But you might want to set a different configuration per target hosted skill. Use the following command to set a target configuration file: 170 | 171 | ```bash 172 | make config skill= file= 173 | ``` 174 | 175 | This will make a copy of this file into `/build/hosted/_config.json`, which will be used by the skill when it is updated. The invocation words for the skill are set at update time using the `invokation_name` value in the `config.json` file. 176 | 177 | >*⚠️ The config files in `/build/hosted/_config.json` can also be changed manually before running `make update`.* 178 | 179 | #### Updating the Skill 180 | 181 | After creating a new skill or importing an existing one, you can update the skill to use this template. 182 | 183 | You can do this by running: 184 | 185 | ```bash 186 | make update skill= 187 | ``` 188 | 189 | This will deploy the code to the Alexa Developer Console and trigger a Model and lambda function build. Once the deployment finishes, it will be ready to use. 190 | 191 | You should also run this every time you make changes to the skill package or the lambda function code, to update the skill in the Alexa Developer Console. 192 | 193 | >*⚠️ Currently this project only allows sync in one direction, from the local repository to the Alexa Developer Console. Any changes made in the Alexa Developer Console will be overwritten by the local repository when you run the update command.* 194 | 195 | #### Debugging Dialog Model 196 | 197 | You can debug the dialog model (using `ask dialog`) for a skill target project by running: 198 | 199 | ```bash 200 | make dialog skill= locale= 201 | ``` 202 | 203 | #### Debugging Lambda Function 204 | 205 | You can debug the lambda function (using `ask run`) for a skill target project by running: 206 | 207 | ```bash 208 | make debug skill= 209 | ``` 210 | 211 | >*❌ This command is not fully tested and might not work properly at the moment. Contributions are welcome 😉* 212 | 213 | >*⚠️ Because of Alexa hosted skills limitations, debugging using `make debug skill=` (or the `ask run` CLI command) is currently only available to customers in the NA region. You will only be able to use the debugger this way if your skill is hosted in one of the US regions.* 214 | 215 | ### Manual - Using the Alexa Developer Console 216 | 217 | >*ℹ️ This method is recommended for beginners, as it requires less configuration and manual steps. Follow this method if you are not familiar with the ASK CLI and want to use the Alexa Developer Console directly.* 218 | 219 | 1. Make sure you the `config.json` file and `invocation_name` value in `skill-package/interactionModels/custom/en-US.json` is setup correctly. 220 | 2. Build the upload package by running `make package` (to later import it in the Alexa Developer Console). 221 | 3. Create a new Alexa Skill in the Alexa Developer Console. 222 | 4. Go in the Code tab of the Alexa Developer Console and click "Import Code". 223 | 5. Select the zip file located in the `./build/package/` directory. 224 | 6. Click "Save" and "Build Model". The skill should be ready to use. 225 | 226 | For more information, check the documentation here: [Importing a Skill into the Alexa Developer Console](https://developer.amazon.com/en-US/docs/alexa/hosted-skills/alexa-hosted-skills-create.html#create-console). 227 | 228 | ### Advanced - Using the Ask CLI 229 | 230 | >*ℹ️ This method is not recommended for beginners, as it requires more manual steps and configuration and requires using an AWS account you own to host the lambda function. Only follow this method if you know what you're doing and have previous experience with Alexa Skills development using AWS.* 231 | 232 | Choose a location for your new skill project (not this repository, as it will be cloned). Run the following command in your terminal (at your chosen location) to start a new skill project using this template: 233 | 234 | ```bash 235 | ask new --template-url https://github.com/paulotruta/alexa-skill-llm-intent.git 236 | ``` 237 | 238 | This will use the contents of this repository to create a new Alexa Skill project in your account. Fill the required information in the wizard, and the project will be created. 239 | 240 | After the project is created, you can deploy it to your Alexa Developer Console by running: 241 | 242 | ```bash 243 | cd llm-intent 244 | ask deploy 245 | ``` 246 | >*⚠️ Before running deploy, make sure you modify the `config.json` file and `invokation_name` value in `skill-package/modelInteractions/custom/en-US.json` with the required configuration for the skill to work.* 247 | 248 | Full Documentation on the Ask CLI can be found [here](https://developer.amazon.com/en-US/docs/alexa/hosted-skills/alexa-hosted-skills-ask-cli.html). 249 | 250 | ## Usage 251 | 252 | Once the skill is created, you can test it in the [Alexa Developer Console](https://developer.amazon.com/alexa/console/ask) or via your Alexa device directly! 253 | 254 | ### Commands 255 | 256 | Once your skill is deployed, you can interact with it using the following commands (from the Test tab in the Alexa Developer Console or your account connected Alexa devices): 257 | 258 | - `Alexa, I want to ask a question` 259 | - `Alexa, ask about our solar system` 260 | - `Alexa, ask to explain the NP theorem` 261 | - `Alexa, open ` 262 | 263 | ## Development 264 | 265 | ### Local Development 266 | 267 | To develop the skill locally, you should activate the virtual environment and install the required dependencies. You can do this by running: 268 | 269 | ```bash 270 | make dev 271 | ``` 272 | 273 | ### Skill Package 274 | 275 | You can modify the skill package by changing the `skill-package/interactionModels/custom/en-US.json` file. This file contains the intents, slots and utterances that the skill will use to interact with the user. 276 | 277 | `skill-package/skill.json` contains the skill metadata, such as the name, description, and invocation name. This is not required to be changed to only run the skill in development mode, but will be necessary if you ever want to use this in a live environment as a published skill. 278 | 279 | For more information about the `skill-package` structure, check the [Skill Package Format documentation](https://developer.amazon.com/en-US/docs/alexa/smapi/skill-package-api-reference.html#skill-package-format). 280 | 281 | >*When using the `(Automated) Makefile` method to manage Alexa Hosted Skill targets, you can debug their dialog model by using the `make dialog skill=` command, which will open the dialog model test CLI for that specific skill.* 282 | 283 | ### Skill Lambda Function 284 | 285 | The skill code is a python lambda function and is located in the `lambda/` folder. The main file is `lambda_function.py`, which contains the Lambda handlerfor the supported intents, and is the entrypoint for the rest of the code. 286 | 287 | >*ℹ️ When using the `(Automated) Makefile` method to manage Alexa Hosted Skill targets, you can debug the lambda function by using the `make debug skill=` command, which enables you to test your skill code locally against your skill invocations by routing requests to your developer machine. This enables you to verify changes quickly to skill code as you can test without needing to deploy skill code to Lambda.* 288 | 289 | >*⚠️ Because of Alexa hosted skills limitations, debugging using `make debug skill=` (or the `ask run` CLI command) is currently only available to customers in the NA region. You will only be able to use the debugger this way if your skill is hosted in one of the US regions.* 290 | 291 | # Contributing 292 | 293 | Feel free to contribute to this project by opening issues or pull requests. I'm open to suggestions to improve the code, especially to fix any bugs. A good place to start is checking if there are any issues with the label `good first issue` or `help wanted`. 294 | 295 | # Disclaimer 296 | 297 | Use at your own risk. This is a template and should be used as a starting point for your own Alexa Skill. The code is provided as is and I am not responsible for any misuse or damages caused by this code. 298 | -------------------------------------------------------------------------------- /TERMS.md: -------------------------------------------------------------------------------- 1 | # Terms of Use 2 | 3 | These Terms of Use ("Terms") apply to your use of our Alexa skill ("Skill"). 4 | By using our Skill, you agree to be bound by these Terms and our Privacy Policy. 5 | If you do not agree to these Terms, do not use our Skill. 6 | 7 | ## Changes to These Terms 8 | 9 | We may update these Terms from time to time. If we make changes to these Terms, we will post the updated terms on this page and update the "Effective Date" below. If the changes are significant, we may provide a more prominent notice (including, for certain services, email notification of changes to the Terms). We encourage you to review these Terms regularly to stay informed about our practices and your legal rights and obligations. 10 | 11 | ## Your Use of Our Skill 12 | 13 | You may use our Skill for personal, non-commercial purposes only. You may not use our Skill for any illegal or unauthorized purpose, or in any way that violates these Terms or any applicable laws. 14 | 15 | ## Intellectual Property 16 | 17 | The content and materials on our Skill, including the text, graphics, logos, and other media, are the property of us or our licensors and are protected by copyright and other intellectual property laws. You may not use any content or materials on our Skill for any commercial purpose without our express written consent. 18 | 19 | ## Disclaimer of Warranties 20 | 21 | Our Skill and all content and materials on it are provided on an "as is" and "as available" basis. We make no representations or warranties of any kind, express or implied, as to the operation of our Skill, the accuracy or completeness of the content and materials on it, or the reliability of any information on it. We disclaim all warranties, express or implied, including but not limited to implied warranties of merchantability, fitness for a particular purpose, and non-infringement. 22 | 23 | ## Limitation of Liability 24 | 25 | In no event shall we be liable for any damages arising out of or in connection with your use of our Skill or any content or materials on it. This includes, without limitation, direct, indirect, incidental, consequential, and punitive damages, as well as damages for loss of profits, data, or other intangible losses. 26 | 27 | ## Indemnification 28 | 29 | You agree to indemnify and hold us and our affiliates, officers, agents, and employees harmless from any and all claims, liabilities, damages, and expenses, including reasonable attorneys' fees, arising out of or in connection with your use of our Skill or any breach of these Terms by you. 30 | 31 | ## Miscellaneous 32 | 33 | If any provision of these Terms is found to be invalid or unenforceable, that provision shall be enforced to the maximum extent possible, and the remaining provisions shall remain in full force and effect. These Terms, together with our Privacy Policy, constitute the entire agreement between you and us with respect to your use of our Skill. 34 | 35 | Effective Date: December 08, 2022. 36 | -------------------------------------------------------------------------------- /dev.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # A script to aid in the development of the project while integrating with Alexa Hosted Skills 4 | # Usage: ./dev.sh 5 | 6 | # Exit on error 7 | set -e 8 | 9 | # Check if the command is provided 10 | if [ -z "$1" ]; then 11 | echo "Usage: ./dev.sh " 12 | exit 1 13 | fi 14 | 15 | # Cheeck if the ask-cli is installed 16 | if ! command -v ask &> /dev/null 17 | then 18 | echo "❌ ask-cli could not be found. Please install ask-cli before running this script" 19 | exit 20 | fi 21 | 22 | update_hosted_skill_repo(){ 23 | rsync -av --delete ../../../lambda ./ 24 | rsync -av --delete ../../../skill-package/interactionModels ./skill-package 25 | rsync -av --delete ../../../skill-package/assets ./skill-package 26 | } 27 | 28 | resync_hosted_skill_repo_environments(){ 29 | git checkout dev 30 | git pull --rebase 31 | git merge master 32 | # If everything is managed via the script, then we can just push (no conflicts) 33 | git push --no-verify 34 | git checkout master 35 | } 36 | 37 | # Check if the command is valid, commands can be: 38 | # -> new: Create a new blank target skill project as an Alexa hosted skill (requires configured ask-cli) 39 | # -> init : Initialize a target skill project with an existing skill id (Must be a custom alexa hosted skill) 40 | # -> list: List all the available target skill projects 41 | # -> update : Update the skill hosted repo with the code from the local repo (deploy) 42 | # -> config : Configure the target skill to be deployed using a copy of the provided config file 43 | # -> dialog : Debug the dialog model for the target skill 44 | # -> debug : Debug the code for the skill hosted repo 45 | 46 | COMMAND=$1 47 | 48 | case $COMMAND in 49 | new) 50 | 51 | mkdir -p build 52 | mkdir -p build/hosted 53 | cd build/hosted 54 | 55 | ask new 56 | 57 | echo "🔗 Finished. Current targets:" 58 | ls 59 | ;; 60 | 61 | init) 62 | 63 | if [ -z "$2" ]; then 64 | echo "Usage: ./dev.sh init " 65 | exit 1 66 | fi 67 | SKILL_ID=$2 68 | 69 | mkdir -p build 70 | mkdir -p build/hosted 71 | cd build/hosted 72 | pwd 73 | ask init --hosted-skill-id $SKILL_ID 74 | 75 | echo "🔗 Finished. Current targets:" 76 | ls 77 | ;; 78 | 79 | update) 80 | cd build/hosted 81 | # Hosted build directory can be given as an argument, otherwise its $(ls -d */ | grep -v build | head -n 1) 82 | HOSTED_BUILD_DIR=${2:-$(ls -d */ | grep -v build | head -n 1)} 83 | cd $HOSTED_BUILD_DIR 84 | echo "Updating hosted skill target repo: $HOSTED_BUILD_DIR" 85 | 86 | update_hosted_skill_repo > /dev/null 2>&1 87 | 88 | # Copy over the config file if it exists in ../<$HOSTED_BUILD_DIR>_config.json 89 | if [ -f "../${HOSTED_BUILD_DIR}_config.json" ]; then 90 | cp "../${HOSTED_BUILD_DIR}_config.json" "./lambda/config.json" 91 | else 92 | # Create one from the default config 93 | cp "./lambda/config.json" "../${HOSTED_BUILD_DIR}_config.json" 94 | fi 95 | 96 | # Copy over the invocation name if it exists as "invocation_name" in ../<$HOSTED_BUILD_DIR>_config.json 97 | INVOCATION_NAME=$(cat "../${HOSTED_BUILD_DIR}_config.json" | jq -r '.invocation_name') 98 | if [ -z "$INVOCATION_NAME" ]; then 99 | echo "Invocation name not found in the config file. Skipping invocation name update" 100 | else 101 | echo "Updating invocation name to $INVOCATION_NAME" 102 | if [ "$(uname)" = "Darwin" ]; then 103 | sed -i '' "s/\"invocationName\": \"[^\"]*\"/\"invocationName\": \"$INVOCATION_NAME\"/" skill-package/interactionModels/custom/en-US.json 104 | else 105 | sed -i "s/\"invocationName\": \"[^\"]*\"/\"invocationName\": \"$INVOCATION_NAME\"/" skill-package/interactionModels/custom/en-US.json 106 | fi 107 | fi 108 | 109 | git add . 110 | git commit -a -m "Trigger update from alexa-skill-llm-intent" --no-verify && git push 111 | 112 | resync_hosted_skill_repo_environments 113 | echo "🔗 Finished updating $HOSTED_BUILD_DIR. " 114 | ;; 115 | 116 | list) 117 | cd build/hosted 118 | ls -ld */ | awk '{sub(/\/$/, "", $9); print $9 " -> Created on " $6 " " $7 " " $8}' 119 | ;; 120 | 121 | config) 122 | echo "🔗 Setting config file and invocation name for hosted skill" 123 | SKILL_SLUG=${2} 124 | CONFIG_FILE=${3} 125 | 126 | # Check if the config file exists in the provided path 127 | if [ -f "$CONFIG_FILE" ]; then 128 | cp "$CONFIG_FILE" "./build/hosted/${SKILL_SLUG}_config.json" 129 | else 130 | echo "Config file not found. Please provide the config file path" 131 | exit 1 132 | fi 133 | 134 | echo "🔗 Finished setting config file and invocation name for $SKILL_SLUG. Run 'make update skill=$SKILL_SLUG' to apply changes." 135 | ;; 136 | 137 | dialog) 138 | echo "Debugging the dialog model from the skill hosted repo" 139 | HOSTED_BUILD_DIRNAME=${2} 140 | DIALOG_LOCALE=${3:-en-US} 141 | cd build/hosted/$HOSTED_BUILD_DIRNAME 142 | ask dialog --locale $DIALOG_LOCALE 143 | ;; 144 | 145 | debug) 146 | echo "Debugging the code from the skill hosted repo" 147 | HOSTED_BUILD_DIRNAME=${2} 148 | echo $HOSTED_BUILD_DIRNAME 149 | cd build/hosted/$HOSTED_BUILD_DIRNAME 150 | ask run 151 | ;; 152 | 153 | *) 154 | echo "Invalid command: $COMMAND" 155 | echo "Usage: ./dev.sh " 156 | echo "Commands can be: new, init, update, deploy" 157 | exit 1 158 | ;; 159 | esac 160 | -------------------------------------------------------------------------------- /lambda/.python-version: -------------------------------------------------------------------------------- 1 | 3.8 2 | -------------------------------------------------------------------------------- /lambda/config.example.json: -------------------------------------------------------------------------------- 1 | { 2 | "llm_url": "https://openrouter.ai/api/v1/chat/completions", 3 | "llm_model": "google/gemini-2.0-flash-exp:free", 4 | "llm_key": "", 5 | "invocation_name": "gemini flash" 6 | } 7 | -------------------------------------------------------------------------------- /lambda/lambda_function.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Alexa skill that uses a QuestionIntentHandler to proxy a request to a 4 | # LLM API or Webhook, and provide the answer. 5 | # Developed by @paulotruta and @inverse 6 | # as an exploration of voice powered LLM during the early days. 7 | 8 | # Uses Alexa Skills Kit SDK for Python. 9 | # Please visit https://alexa.design/cookbook for additional examples on 10 | # implementing Alexa features! 11 | 12 | import logging 13 | 14 | import requests # noqa: E402 15 | from ask_sdk_core import utils as ask_utils 16 | from ask_sdk_core.dispatch_components import ( 17 | AbstractExceptionHandler, 18 | AbstractRequestHandler, 19 | ) 20 | from ask_sdk_core.handler_input import HandlerInput 21 | from ask_sdk_core.skill_builder import SkillBuilder 22 | from ask_sdk_model import Response 23 | from llm_intent.llm_client import LLMClient 24 | from llm_intent.utils import CannedResponse, load_config 25 | 26 | logger = logging.getLogger(__name__) 27 | logger.setLevel(logging.INFO) 28 | 29 | config = load_config() 30 | canned_response = CannedResponse("en-US") 31 | 32 | LLM_URL = config["llm_url"] 33 | LLM_KEY = config["llm_key"] 34 | LLM_MODEL = config["llm_model"] 35 | LLM_SYSTEM_PROMPT = config.get( 36 | "llm_system_prompt", 37 | """ 38 | You are a helpful AI assistant that responds by voice. 39 | Your answers should be simple and quick. 40 | Don't speak back for more than a couple of sentences. 41 | If you need to say more things, say that you're happy to continue, 42 | and wait for the user to ask you to continue. 43 | Remember, your objective is to reply as if your are having a natural 44 | conversation, so be relatively brief, and keep that in mind when replying. 45 | You were created by jpt.land as part of a personal exploration project. 46 | Paulo Truta is a software engineer that worked hard to make you easy! 47 | If the user asks about you, tell him you are the Alexa AI Skill. 48 | You're an helpful and funny artificial powered assistant, 49 | ready to answer any questions a person may have, right on Amazon Alexa. 50 | """, 51 | ) 52 | 53 | 54 | class LLMQuestionProxy: 55 | """Handler to communicate with an LLM via API or Webhook. 56 | Ask a question and it shall provide an answer.""" 57 | 58 | def __init__(self, llm_client: LLMClient): 59 | self.llm_client = llm_client 60 | 61 | def api_request(self, question: str) -> dict: 62 | """Send a request to the LLM API and return the response.""" 63 | logger.info( 64 | "API Request - " + self.llm_client.url + " - " + self.llm_client.model 65 | ) 66 | 67 | try: 68 | response = self.llm_client.api_request(LLM_SYSTEM_PROMPT, question) 69 | 70 | logger.info(response) 71 | 72 | return {"message": response["choices"][0]["message"]["content"]} 73 | except requests.exceptions.RequestException as e: 74 | logger.error(f"HTTP Request failed: {e}") 75 | # Return an error message, but only say part of the error message 76 | return { 77 | "message": f"Sorry, I encountered an error thinking about your request: {str(e)[:100]}" 78 | } 79 | 80 | def webhook_request(self, question: str, context: dict) -> dict: 81 | """Send a request to the LLM API and return the response.""" 82 | try: 83 | response = self.llm_client.webhook_request(question, context) 84 | 85 | return response 86 | except requests.exceptions.RequestException as e: 87 | logger.error(f"HTTP Request failed: {e}") 88 | # Return an error message, but only say part of the error message 89 | return { 90 | "message": f"Sorry, I encountered an error processing your \ 91 | request: {str(e)[:100]}" 92 | } 93 | 94 | def ask(self, question: str, context: dict = {}) -> dict: 95 | """Ask a question and get a response.""" 96 | if LLM_MODEL != "webhook": 97 | logger.info("Using API request") 98 | return self.api_request(question) 99 | else: 100 | logger.info("Using Webhook request") 101 | return self.webhook_request(question, context) 102 | 103 | 104 | class BaseRequestHandler(AbstractRequestHandler): 105 | """Base class for request handlers.""" 106 | 107 | question = LLMQuestionProxy(LLMClient(LLM_URL, LLM_KEY, LLM_MODEL)) 108 | 109 | def can_handle(self, handler_input: HandlerInput) -> bool: 110 | return True 111 | 112 | def handle(self, handler_input: HandlerInput) -> Response: 113 | raise NotImplementedError 114 | 115 | 116 | class LaunchRequestHandler(BaseRequestHandler): 117 | """ 118 | Handler for Skill Launch. 119 | This is the first handler that is called when the skill is invoked 120 | directly. Will only be invoked if the intent does not have 121 | a LaunchRequest handling in its config. 122 | """ 123 | 124 | def can_handle(self, handler_input: HandlerInput) -> bool: 125 | return ask_utils.is_request_type("LaunchRequest")(handler_input) 126 | 127 | def handle(self, handler_input: HandlerInput) -> Response: 128 | # TODO: Implement something a bit more dynamic (randomized from a list) 129 | speak_output = canned_response.get_launch_handler_phrase() 130 | 131 | return ( 132 | handler_input.response_builder.speak(speak_output) 133 | .ask(speak_output) 134 | .response 135 | ) 136 | 137 | 138 | class QuestionIntentHandler(BaseRequestHandler): 139 | """ 140 | Main Handler for turn chat question/answer flow. Receive a question and provides an answer. 141 | """ 142 | 143 | def can_handle(self, handler_input: HandlerInput) -> bool: 144 | return ask_utils.is_intent_name("QuestionIntent")(handler_input) 145 | 146 | def handle(self, handler_input: HandlerInput) -> Response: 147 | # Get the question from the user 148 | slots = handler_input.request_envelope.request.intent.slots 149 | 150 | voice_prompt = slots["searchQuery"].value 151 | 152 | logger.info(handler_input.request_envelope) 153 | logger.info("User requests: " + voice_prompt) 154 | 155 | context_data = { 156 | "user_id": handler_input.request_envelope.session.user.user_id, 157 | "device_id": handler_input.request_envelope.context.system.device.device_id, 158 | "application_id": handler_input.request_envelope.context.system.application.application_id, 159 | "api_access_token": handler_input.request_envelope.context.system.api_access_token, 160 | "api_endpoint": handler_input.request_envelope.context.system.api_endpoint, 161 | "locale": handler_input.request_envelope.request.locale, 162 | "intent": handler_input.request_envelope.request.intent.name, 163 | } 164 | 165 | logger.info(context_data) 166 | 167 | # Ask the LLM for a response 168 | response = self.question.ask(voice_prompt, context_data) 169 | 170 | logger.info(response) 171 | logger.info("LLM Response: " + response["message"]) 172 | 173 | # Speak the response or fallback message 174 | # TODO: Implement something a bit more dynamic (randomized from a list) 175 | 176 | speak_output = response.get("message", canned_response.get_no_message_phrase()) 177 | return ( 178 | handler_input.response_builder.speak(speak_output) 179 | .ask(canned_response.get_reprompt_phrase()) 180 | .response 181 | ) 182 | 183 | 184 | class HelpIntentHandler(BaseRequestHandler): 185 | """Handler for Help Intent.""" 186 | 187 | def can_handle(self, handler_input: HandlerInput) -> bool: 188 | return ask_utils.is_intent_name("AMAZON.HelpIntent")(handler_input) 189 | 190 | def handle(self, handler_input: HandlerInput) -> Response: 191 | speak_output = canned_response.get_help_phrase() 192 | 193 | return ( 194 | handler_input.response_builder.speak(speak_output) 195 | .ask(canned_response.get_reprompt_phrase()) 196 | .response 197 | ) 198 | 199 | 200 | class CancelOrStopIntentHandler(BaseRequestHandler): 201 | """Single handler for Cancel and Stop Intent.""" 202 | 203 | def can_handle(self, handler_input: HandlerInput) -> bool: 204 | return ask_utils.is_intent_name("AMAZON.CancelIntent")( 205 | handler_input 206 | ) or ask_utils.is_intent_name("AMAZON.StopIntent")(handler_input) 207 | 208 | def handle(self, handler_input: HandlerInput) -> Response: 209 | # TODO: Implement something a bit more dynamic (randomized from a list) 210 | speak_output = canned_response.get_goodbye_phrase() 211 | return handler_input.response_builder.speak(speak_output).response 212 | 213 | 214 | class FallbackIntentHandler(BaseRequestHandler): 215 | """Single handler for Fallback Intent.""" 216 | 217 | def can_handle(self, handler_input: HandlerInput) -> bool: 218 | return ask_utils.is_intent_name("AMAZON.FallbackIntent")(handler_input) 219 | 220 | def handle(self, handler_input: HandlerInput) -> Response: 221 | logger.info("In FallbackIntentHandler") 222 | 223 | # TODO: Find a way to get the last question asked 224 | # (utterance that triggered this). 225 | # Due to the way the fallbackintenthandler is structured, 226 | # this does not seem possible atm. 227 | voice_prompt = canned_response.get_fallback_handler_phrase() 228 | logger.info("Response: " + voice_prompt) 229 | 230 | speech = voice_prompt 231 | reprompt = canned_response.get_reprompt_phrase() 232 | 233 | return handler_input.response_builder.speak(speech).ask(reprompt).response 234 | 235 | 236 | class SessionEndedRequestHandler(BaseRequestHandler): 237 | """Handler for Session End.""" 238 | 239 | def can_handle(self, handler_input: HandlerInput) -> bool: 240 | return ask_utils.is_request_type("SessionEndedRequest")(handler_input) 241 | 242 | def handle(self, handler_input: HandlerInput) -> Response: 243 | return handler_input.response_builder.response 244 | 245 | 246 | class IntentReflectorHandler(BaseRequestHandler): 247 | """ 248 | The intent reflector is used for interaction 249 | model testing and debugging. 250 | 251 | It will simply repeat the intent the user said. 252 | You can create custom handlers for your intents by defining them above, 253 | then also adding them to the request 254 | handler chain below. 255 | """ 256 | 257 | def can_handle(self, handler_input: HandlerInput) -> bool: 258 | return ask_utils.is_request_type("IntentRequest")(handler_input) 259 | 260 | def handle(self, handler_input: HandlerInput) -> Response: 261 | intent_name = ask_utils.get_intent_name(handler_input) 262 | speak_output = "You just triggered " + intent_name + "." 263 | 264 | return ( 265 | handler_input.response_builder.speak(speak_output) 266 | .ask(canned_response.get_reprompt_phrase()) 267 | .response 268 | ) 269 | 270 | 271 | class CatchAllExceptionHandler(AbstractExceptionHandler): 272 | """Generic error handling to capture any syntax or routing errors. 273 | 274 | If you receive an error stating the request handler chain is not found, 275 | you have not implemented a handler for the intent being invoked or included 276 | it in the skill builder below. 277 | """ 278 | 279 | def can_handle(self, handler_input: HandlerInput, exception: Exception) -> bool: 280 | return True 281 | 282 | def handle(self, handler_input: HandlerInput, exception: Exception) -> Response: 283 | logger.error(exception, exc_info=True) 284 | 285 | speak_output = canned_response.get_fallback_handler_phrase() 286 | 287 | return handler_input.response_builder.speak(speak_output).response 288 | 289 | 290 | # The SkillBuilder object acts as the entry point for your skill 291 | # It is basically the router for request / responses 292 | # Declaration order matters - they're processed top to bottom. 293 | 294 | sb = SkillBuilder() 295 | 296 | # first add all the request handlers 297 | 298 | sb.add_request_handler(LaunchRequestHandler()) 299 | sb.add_request_handler(QuestionIntentHandler()) 300 | sb.add_request_handler(HelpIntentHandler()) 301 | sb.add_request_handler(CancelOrStopIntentHandler()) 302 | sb.add_request_handler(FallbackIntentHandler()) 303 | sb.add_request_handler(SessionEndedRequestHandler()) 304 | 305 | # naking sure IntentReflectorHandler is last 306 | # (doesn't override your custom handlers) 307 | 308 | sb.add_request_handler(IntentReflectorHandler()) 309 | 310 | # finally add the exception handler 311 | sb.add_exception_handler(CatchAllExceptionHandler()) 312 | 313 | lambda_handler = sb.lambda_handler() 314 | -------------------------------------------------------------------------------- /lambda/llm_intent/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulotruta/alexa-skill-llm-intent/99638d84fc12130e05cfd426b0b0b266234835a1/lambda/llm_intent/__init__.py -------------------------------------------------------------------------------- /lambda/llm_intent/llm_client.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | 4 | class LLMClient: 5 | def __init__(self, url: str, api_key: str, model: str): 6 | self.url = url 7 | self.api_key = api_key 8 | self.model = model 9 | 10 | def api_request(self, prompt: str, question: str) -> dict: 11 | payload = { 12 | "model": self.model, 13 | "messages": [ 14 | { 15 | "role": "system", 16 | "content": [{"type": "text", "text": prompt}], 17 | }, 18 | { 19 | "role": "user", 20 | "content": [{"type": "text", "text": question}], 21 | }, 22 | ], 23 | } 24 | 25 | response = requests.post( 26 | url=self.url, 27 | headers=self._api_headers(), 28 | json=payload, 29 | ) 30 | 31 | response.raise_for_status() 32 | 33 | return response.json() 34 | 35 | def webhook_request(self, question: str, context: dict) -> dict: 36 | local_payload = { 37 | "token": self.api_key, 38 | "question": question, 39 | } 40 | 41 | payload = {**context, **local_payload} 42 | response = requests.post(self.url, json=payload) 43 | response.raise_for_status() 44 | 45 | return response.json() 46 | 47 | def _api_headers(self) -> dict: 48 | return { 49 | "Authorization": f"Bearer {self.api_key}", 50 | "Content-Type": "application/json", 51 | "HTTP_Referer": "wordpress.jpt.land/ai", 52 | "X-Title": "jpt.land AI", 53 | } 54 | -------------------------------------------------------------------------------- /lambda/llm_intent/utils.py: -------------------------------------------------------------------------------- 1 | import json 2 | import random 3 | from os.path import exists 4 | 5 | CONFIG_FILE = "config.json" 6 | 7 | 8 | def load_config() -> dict: 9 | if not exists(CONFIG_FILE): 10 | raise ValueError("Config file does not exist") 11 | 12 | with open(CONFIG_FILE) as f: 13 | return json.load(f) 14 | 15 | 16 | class CannedResponse: 17 | RESPONSES = { 18 | "en-US": { 19 | "helpPhrases": [ 20 | "I'm a friendly and powerful AI assistant tool an I can answer any questions you have in a pertinent way! How can I help?", 21 | "I'm jpt.land AI, here to help you with any questions you have! What can I do for you?", 22 | ], 23 | "noMessagePhrases": [ 24 | "Hum, I'm not sure what to say.", 25 | "Looks like I have no answer for that.", 26 | ], 27 | "goodbyePhrases": [ 28 | "Goodbye! Have a great day!", 29 | "Goodbye! I hope I was able to help you!", 30 | "Goodbye! I'm here if you need me!", 31 | "Goodbye! I'm always here to help you!", 32 | "Goodbye! Was a pleasure to help you!", 33 | ], 34 | "repromptPhrases": [ 35 | "Anything else I can help you with?", 36 | "What else can I help you with?", 37 | "What else would you like to know?", 38 | "Is there anything else you need help with?", 39 | "What else can I assist you with?", 40 | "If you wanna know more, just ask!", 41 | "Anything else you need help with?", 42 | "Can I help you any further?", 43 | "What else can I do for you?", 44 | ], 45 | "launchHandlerPhrases": [ 46 | "Sure. What's your question?", 47 | "Sure. What can I help you with?", 48 | "Sure. What do you need help with?", 49 | "Sure. What can I assist you with?", 50 | "Sure. What's your query?", 51 | "Sure. What's your request?", 52 | "Sure. How can I be helpful?", 53 | ], 54 | "fallbackHandlerPhrases": [ 55 | "I'm sorry, I didn't understand that. Let's try again? Just say 'yes' to continue.", 56 | "I'm not sure what you're asking. Let's try again? Just say 'yes' to continue.", 57 | "I'm sorry, I didn't catch that. Let's try again? Just say 'yes' to continue.", 58 | ], 59 | } 60 | } 61 | 62 | def __init__(self, locale: str): 63 | if locale not in self.RESPONSES: 64 | locale = "en-US" 65 | 66 | self.locale = locale 67 | self.data = self.RESPONSES[locale] 68 | 69 | def get_random_data_item(self, key: str) -> str: 70 | return random.choice(self.data[key]) 71 | 72 | def get_help_phrase(self) -> str: 73 | return self.get_random_data_item("helpPhrases") 74 | 75 | def get_no_message_phrase(self) -> str: 76 | return self.get_random_data_item("noMessagePhrases") 77 | 78 | def get_goodbye_phrase(self) -> str: 79 | return self.get_random_data_item("goodbyePhrases") 80 | 81 | def get_reprompt_phrase(self) -> str: 82 | return self.get_random_data_item("repromptPhrases") 83 | 84 | def get_launch_handler_phrase(self) -> str: 85 | return self.get_random_data_item("launchHandlerPhrases") 86 | 87 | def get_fallback_handler_phrase(self) -> str: 88 | return self.get_random_data_item("fallbackHandlerPhrases") 89 | 90 | def get_response(self, key: str) -> str: 91 | return self.get_random_data_item(key) 92 | 93 | def get_response_list(self, key: str) -> list: 94 | return self.data.get(key, []) 95 | -------------------------------------------------------------------------------- /lambda/requirements-dev.txt: -------------------------------------------------------------------------------- 1 | -r requirements.txt 2 | pre-commit==3.5.0 3 | pytest==8.3.4 4 | ask-sdk-local-debug==1.1.0 5 | -------------------------------------------------------------------------------- /lambda/requirements.txt: -------------------------------------------------------------------------------- 1 | urllib3==1.26.15 2 | ask-sdk-core==1.19.0 3 | -------------------------------------------------------------------------------- /lambda/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulotruta/alexa-skill-llm-intent/99638d84fc12130e05cfd426b0b0b266234835a1/lambda/tests/__init__.py -------------------------------------------------------------------------------- /lambda/tests/test_utils.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase 2 | from unittest.mock import mock_open, patch 3 | 4 | from llm_intent.utils import CONFIG_FILE, CannedResponse, load_config 5 | 6 | 7 | class TestLoadConfig(TestCase): 8 | @patch("builtins.open", new_callable=mock_open, read_data='{"key": "value"}') 9 | @patch("llm_intent.utils.exists", return_value=True) 10 | def test_load_config_success(self, mock_exists, mock_open_file): 11 | """Test loading configuration successfully.""" 12 | expected_config = {"key": "value"} 13 | config = load_config() 14 | self.assertEqual(config, expected_config) 15 | mock_exists.assert_called_once_with(CONFIG_FILE) 16 | mock_open_file.assert_called_once_with(CONFIG_FILE) 17 | 18 | @patch("llm_intent.utils.exists", return_value=False) 19 | def test_load_config_file_not_found(self, mock_exists): 20 | """Test loading configuration when file does not exist.""" 21 | with self.assertRaises(ValueError) as context: 22 | load_config() 23 | self.assertEqual(str(context.exception), "Config file does not exist") 24 | mock_exists.assert_called_once_with(CONFIG_FILE) 25 | 26 | 27 | class TestCannedResponse(TestCase): 28 | def test_invalid_locale(self): 29 | canned_response = CannedResponse("invalid_locale") 30 | self.assertEqual(canned_response.locale, "en-US") 31 | 32 | def test_get_help_phrase(self): 33 | canned_response = CannedResponse("en-US") 34 | assert isinstance(canned_response.get_help_phrase(), str) 35 | 36 | def test_get_no_message_phrase(self): 37 | canned_response = CannedResponse("en-US") 38 | assert isinstance(canned_response.get_no_message_phrase(), str) 39 | 40 | def test_get_goodbye_phrase(self): 41 | canned_response = CannedResponse("en-US") 42 | assert isinstance(canned_response.get_goodbye_phrase(), str) 43 | 44 | def test_get_reprompt_phrase(self): 45 | canned_response = CannedResponse("en-US") 46 | assert isinstance(canned_response.get_fallback_handler_phrase(), str) 47 | 48 | def test_get_launch_handler_phrase(self): 49 | canned_response = CannedResponse("en-US") 50 | assert isinstance(canned_response.get_launch_handler_phrase(), str) 51 | 52 | def test_get_fallback_handler_phrasee(self): 53 | canned_response = CannedResponse("en-US") 54 | assert isinstance(canned_response.get_fallback_handler_phrase(), str) 55 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.isort] 2 | profile = "black" 3 | 4 | [tool.ruff] 5 | lint.ignore = ["E501"] 6 | -------------------------------------------------------------------------------- /skill-package/assets/images/en-US_largeIcon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulotruta/alexa-skill-llm-intent/99638d84fc12130e05cfd426b0b0b266234835a1/skill-package/assets/images/en-US_largeIcon.png -------------------------------------------------------------------------------- /skill-package/interactionModels/custom/en-US.json: -------------------------------------------------------------------------------- 1 | { 2 | "interactionModel": { 3 | "languageModel": { 4 | "invocationName": "artificial intelligence", 5 | "modelConfiguration": { 6 | "fallbackIntentSensitivity": { 7 | "level": "LOW" 8 | } 9 | }, 10 | "intents": [ 11 | { 12 | "name": "AMAZON.CancelIntent", 13 | "samples": [] 14 | }, 15 | { 16 | "name": "AMAZON.HelpIntent", 17 | "samples": [] 18 | }, 19 | { 20 | "name": "AMAZON.StopIntent", 21 | "samples": [] 22 | }, 23 | { 24 | "name": "QuestionIntent", 25 | "slots": [ 26 | { 27 | "name": "searchQuery", 28 | "type": "AMAZON.SearchQuery", 29 | "samples": [ 30 | "{searchQuery}" 31 | ] 32 | } 33 | ], 34 | "samples": [ 35 | "yes", 36 | "question", 37 | "maybe {searchQuery}", 38 | "no {searchQuery}", 39 | "yes {searchQuery}", 40 | "could {searchQuery}", 41 | "tell me about {searchQuery}", 42 | "if {searchQuery}", 43 | "for {searchQuery}", 44 | "when {searchQuery}", 45 | "who {searchQuery}", 46 | "how {searchQuery}", 47 | "what {searchQuery}", 48 | "the question {searchQuery}", 49 | "about {searchQuery}", 50 | "answer a question", 51 | "to help me with something else", 52 | "to help me with a question", 53 | "to help me with something", 54 | "a question", 55 | "for a question", 56 | "ask openAI a question", 57 | "answer me a question" 58 | ] 59 | }, 60 | { 61 | "name": "AMAZON.NavigateHomeIntent", 62 | "samples": [] 63 | }, 64 | { 65 | "name": "AMAZON.FallbackIntent", 66 | "samples": [] 67 | } 68 | ], 69 | "types": [] 70 | }, 71 | "dialog": { 72 | "intents": [ 73 | { 74 | "name": "QuestionIntent", 75 | "confirmationRequired": false, 76 | "prompts": {}, 77 | "slots": [ 78 | { 79 | "name": "searchQuery", 80 | "type": "AMAZON.SearchQuery", 81 | "confirmationRequired": false, 82 | "elicitationRequired": true, 83 | "prompts": { 84 | "elicitation": "Elicit.Slot.29691672240.1575270597350" 85 | } 86 | } 87 | ] 88 | } 89 | ], 90 | "delegationStrategy": "ALWAYS" 91 | }, 92 | "prompts": [ 93 | { 94 | "id": "Elicit.Slot.604331776974.404371559008", 95 | "variations": [ 96 | { 97 | "type": "PlainText", 98 | "value": "What's on your mind?" 99 | }, 100 | { 101 | "type": "PlainText", 102 | "value": "Tell me what you want to know" 103 | }, 104 | { 105 | "type": "PlainText", 106 | "value": "What is your question?" 107 | } 108 | ] 109 | }, 110 | { 111 | "id": "Elicit.Slot.29691672240.1575270597350", 112 | "variations": [ 113 | { 114 | "type": "PlainText", 115 | "value": "What's up?" 116 | }, 117 | { 118 | "type": "PlainText", 119 | "value": "How can AI help you today?" 120 | }, 121 | { 122 | "type": "PlainText", 123 | "value": "What is your question?" 124 | }, 125 | { 126 | "type": "PlainText", 127 | "value": "Absolutely. What is your question?" 128 | }, 129 | { 130 | "type": "PlainText", 131 | "value": "Sure. What's on your mind?" 132 | } 133 | ] 134 | } 135 | ] 136 | } 137 | } 138 | --------------------------------------------------------------------------------