├── .github
├── FUNDING.yaml
└── workflows
│ ├── validate.yaml
│ └── release.yaml
├── hacs.json
├── .licenserc.yaml
├── custom_components
└── salutespeech
│ ├── manifest.json
│ ├── translations
│ ├── ru.json
│ └── en.json
│ ├── api
│ ├── grpc
│ │ ├── synthesis.proto
│ │ ├── synthesis_pb2_grpc.py
│ │ ├── recognition_pb2_grpc.py
│ │ ├── synthesis_pb2.py
│ │ ├── recognition.proto
│ │ └── recognition_pb2.py
│ ├── certs
│ │ └── russian_trusted_root_ca.cer
│ └── rest
│ │ └── salutespeech_auth.py
│ ├── const.py
│ ├── config_flow.py
│ ├── __init__.py
│ ├── tts.py
│ └── stt.py
├── CONTRIBUTING.md
├── .gitignore
├── README.md
└── LICENSE
/.github/FUNDING.yaml:
--------------------------------------------------------------------------------
1 | custom:
2 | - https://mansmarthome.info/donate/?utm_source=github&utm_medium=referral&utm_campaign=salutespeech
3 |
--------------------------------------------------------------------------------
/hacs.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "SaluteSpeech",
3 | "render_readme": true,
4 | "zip_release": true,
5 | "filename": "salutespeech.zip"
6 | }
7 |
--------------------------------------------------------------------------------
/.licenserc.yaml:
--------------------------------------------------------------------------------
1 | header:
2 | license:
3 | spdx-id: MPL-2.0
4 |
5 | paths:
6 | - 'custom_components/**/*.py'
7 |
8 | paths-ignore:
9 | - 'custom_components/salutespeech/api/**/*_pb2{,_grpc}.py'
10 |
11 | comment: on-failure
12 |
--------------------------------------------------------------------------------
/custom_components/salutespeech/manifest.json:
--------------------------------------------------------------------------------
1 | {
2 | "domain": "salutespeech",
3 | "name": "SaluteSpeech",
4 | "codeowners": ["@black-roland"],
5 | "config_flow": true,
6 | "documentation": "https://github.com/black-roland/homeassistant-salutespeech#README",
7 | "integration_type": "service",
8 | "iot_class": "cloud_push",
9 | "issue_tracker": "https://github.com/black-roland/homeassistant-salutespeech/issues",
10 | "loggers": ["salutespeech"],
11 | "requirements": [],
12 | "version": "1.1.0"
13 | }
14 |
--------------------------------------------------------------------------------
/custom_components/salutespeech/translations/ru.json:
--------------------------------------------------------------------------------
1 | {
2 | "config": {
3 | "step": {
4 | "user": {
5 | "description": "По умолчанию используется сертификат НУЦ Минцифры, поставляемый вместе с интеграцией. Это необходимо, если сертификат не установлен как [доверенный на уровне системы](https://developers.sber.ru/docs/ru/salutespeech/certificates).",
6 | "data": {
7 | "auth_key": "Ключ авторизации",
8 | "use_bundled_root_certificates": "Использовать встроенный корневой сертификат"
9 | }
10 | }
11 | }
12 | }
13 | }
14 |
--------------------------------------------------------------------------------
/custom_components/salutespeech/translations/en.json:
--------------------------------------------------------------------------------
1 | {
2 | "config": {
3 | "step": {
4 | "user": {
5 | "description": "By default, the certificate from the Ministry of Digital Development is used, which comes with the integration. This is necessary if the certificate is not installed as a [trusted system-wide](https://developers.sber.ru/docs/ru/salutespeech/certificates).",
6 | "data": {
7 | "auth_key": "Authorization key",
8 | "use_bundled_root_certificates": "Use bundled root certificate"
9 | }
10 | }
11 | }
12 | }
13 | }
14 |
--------------------------------------------------------------------------------
/.github/workflows/validate.yaml:
--------------------------------------------------------------------------------
1 | name: "Validate"
2 |
3 | on:
4 | push:
5 | branches:
6 | - master
7 | pull_request:
8 |
9 | jobs:
10 | hassfest:
11 | name: "With hassfest"
12 | runs-on: "ubuntu-latest"
13 | steps:
14 | - uses: "actions/checkout@v3"
15 | - uses: home-assistant/actions/hassfest@master
16 |
17 | hacs:
18 | name: "With HACS validation"
19 | runs-on: "ubuntu-latest"
20 | steps:
21 | - uses: "hacs/action@main"
22 | with:
23 | category: "integration"
24 | ignore: brands
25 |
26 | license:
27 | name: "With License Eye Header"
28 | runs-on: "ubuntu-latest"
29 | steps:
30 | - uses: apache/skywalking-eyes@v0.7.0
31 | with:
32 | mode: check
33 |
--------------------------------------------------------------------------------
/custom_components/salutespeech/api/grpc/synthesis.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto3";
2 |
3 | package smartspeech.synthesis.v1;
4 |
5 | import "google/protobuf/duration.proto";
6 |
7 | option go_package = "./;protocol";
8 | option java_package = "TODO";
9 |
10 | service SmartSpeech {
11 | rpc Synthesize (SynthesisRequest) returns (stream SynthesisResponse);
12 | }
13 |
14 | message SynthesisRequest {
15 | enum AudioEncoding {
16 | AUDIO_ENCODING_UNSPECIFIED = 0;
17 | PCM_S16LE = 1; // 16-bit signed little-endian (Linear PCM)
18 | OPUS = 2; // mime audio/ogg; codecs=opus
19 | WAV = 3; // mime audio/x-wav with 16-bit signed little-endian (Linear PCM)
20 | }
21 |
22 | enum ContentType {
23 | TEXT = 0;
24 | SSML = 1;
25 | }
26 |
27 | string text = 1;
28 | AudioEncoding audio_encoding = 2;
29 | string language = 3; // Language code in RFC-3066 format, i.e.: ru-RU
30 | ContentType content_type = 4;
31 | string voice = 5;
32 | bool rebuild_cache = 6;
33 | }
34 |
35 | message SynthesisResponse {
36 | bytes data = 1; // chunk of audio data
37 | google.protobuf.Duration audio_duration = 2; // time from start of audio so far
38 | }
39 |
--------------------------------------------------------------------------------
/custom_components/salutespeech/const.py:
--------------------------------------------------------------------------------
1 | """Constants for the SaluteSpeech integration."""
2 |
3 | # This Source Code Form is subject to the terms of the Mozilla Public
4 | # License, v. 2.0. If a copy of the MPL was not distributed with this
5 | # file, You can obtain one at https://mozilla.org/MPL/2.0/.
6 |
7 | import logging
8 |
9 | from .api.grpc import synthesis_pb2
10 |
11 | DOMAIN = "salutespeech"
12 | LOGGER = logging.getLogger(__package__)
13 |
14 | CONF_AUTH_KEY = "auth_key"
15 | CONF_USE_BUNDLED_ROOT_CERTIFICATES = "use_bundled_root_certificates"
16 |
17 | DATA_AUTH_HELPER = "auth_helper"
18 | DATA_ROOT_CERTIFICATES = "root_certificates"
19 |
20 | SUPPORTED_LANGUAGES = ["ru-RU", "en-US", "kk-KZ"]
21 |
22 | TTS_VOICES = {
23 | "en-US": [
24 | ("Kira", "Kin"),
25 | ],
26 | "kk-KZ": [
27 | ("Наталья", "Nec"),
28 | ("Борис", "Bys"),
29 | ("Марфа", "May"),
30 | ("Тарас", "Tur"),
31 | ("Александра", "Ost"),
32 | ("Сергей", "Pon"),
33 | ],
34 | "ru-RU": [
35 | ("Наталья", "Nec"),
36 | ("Борис", "Bys"),
37 | ("Марфа", "May"),
38 | ("Тарас", "Tur"),
39 | ("Александра", "Ost"),
40 | ("Сергей", "Pon"),
41 | ],
42 | }
43 |
44 | TTS_OUTPUT_CONTAINERS = {
45 | "wav": synthesis_pb2.SynthesisRequest.WAV,
46 | "opus": synthesis_pb2.SynthesisRequest.OPUS,
47 | }
48 |
49 | DEFAULT_LANG = "ru-RU"
50 | DEFAULT_VOICE = "Nec"
51 | DEFAULT_OUTPUT_CONTAINER = "wav"
52 |
53 | EVENT_SALUTESPEECH_STT_EMOTIONS = f"{DOMAIN}_stt_emotions"
54 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Участие в разработке
2 |
3 | Спасибо, что интересуетесь разработкой интеграции SaluteSpeech для Home Assistant!
4 |
5 | Если вы хотите предложить исправления или новые функции, вот несколько рекомендаций.
6 |
7 | ## Обсуждение изменений
8 |
9 | Перед тем как начать работу над крупными изменениями или новыми функциями, **пожалуйста, сначала обсудите их**:
10 | 1. **Поищите открытые [issues](https://github.com/black-roland/homeassistant-salutespeech/issues)** — возможно, ваша идея или проблема уже обсуждается.
11 | 2. **Создайте [discussion](https://github.com/black-roland/homeassistant-salutespeech/discussions) или [issue](https://github.com/black-roland/homeassistant-salutespeech/issues)** и опишите, что вы хотите сделать или изменить. Это поможет избежать ситуации, когда вы делаете работу, которую я не смогу принять по каким-либо причинам.
12 |
13 | ## Сообщение об ошибках и запросы функций
14 |
15 | Если вы нашли ошибку или у вас есть идея для новой функции — создайте [issue](https://github.com/black-roland/homeassistant-salutespeech/issues). Перед созданием проверьте, нет ли уже Issue с похожей темой.
16 |
17 | Чем детальнее вы опишете проблему или предложение (с шагами для воспроизведения, логами, версиями HA), тем быстрее я смогу отреагировать.
18 |
19 | ## Pull Request'ы
20 |
21 | PR с исправлениями и улучшениями всегда приветствуются! Процесс прост:
22 |
23 | 1. Убедитесь, что ваш PR решает конкретную проблему или добавляет одну чёткую функцию.
24 | 2. Опишите в PR, что именно он меняет и зачем.
25 | 3. Будьте готовы к тому, что я могу попросить внести правки.
26 |
27 | Спасибо за ваш вклад!
28 |
--------------------------------------------------------------------------------
/custom_components/salutespeech/config_flow.py:
--------------------------------------------------------------------------------
1 | """Config flow for SaluteSpeech integration."""
2 |
3 | # This Source Code Form is subject to the terms of the Mozilla Public
4 | # License, v. 2.0. If a copy of the MPL was not distributed with this
5 | # file, You can obtain one at https://mozilla.org/MPL/2.0/.
6 |
7 | from typing import Any
8 |
9 | import voluptuous as vol
10 | from homeassistant.config_entries import ConfigFlow, ConfigFlowResult
11 |
12 | from .const import CONF_AUTH_KEY, CONF_USE_BUNDLED_ROOT_CERTIFICATES, DOMAIN
13 |
14 |
15 | class SaluteSpeechConfigFlow(ConfigFlow, domain=DOMAIN):
16 | """Handle a config flow for SaluteSpeech."""
17 |
18 | VERSION = 1
19 | MINOR_VERSION = 1
20 |
21 | async def async_step_user(
22 | self, user_input: dict[str, Any] | None = None
23 | ) -> ConfigFlowResult:
24 | """Handle the initial step."""
25 |
26 | if user_input is not None:
27 | # TODO: Validate input
28 | return self.async_create_entry(
29 | title="SaluteSpeech",
30 | data=user_input,
31 | )
32 |
33 | advanced_schema = vol.Schema(
34 | {
35 | vol.Required(CONF_USE_BUNDLED_ROOT_CERTIFICATES, default=True): bool,
36 | }
37 | )
38 |
39 | return self.async_show_form(
40 | step_id="user",
41 | data_schema=vol.Schema(
42 | {
43 | vol.Required(CONF_AUTH_KEY): str,
44 | **(advanced_schema.schema if self.show_advanced_options else {}),
45 | }
46 | ),
47 | )
48 |
--------------------------------------------------------------------------------
/custom_components/salutespeech/__init__.py:
--------------------------------------------------------------------------------
1 | """The SaluteSpeech integration."""
2 |
3 | # This Source Code Form is subject to the terms of the Mozilla Public
4 | # License, v. 2.0. If a copy of the MPL was not distributed with this
5 | # file, You can obtain one at https://mozilla.org/MPL/2.0/.
6 |
7 | import os
8 |
9 | from homeassistant.config_entries import ConfigEntry
10 | from homeassistant.const import Platform
11 | from homeassistant.core import HomeAssistant
12 |
13 | from custom_components.salutespeech.const import (
14 | CONF_AUTH_KEY,
15 | CONF_USE_BUNDLED_ROOT_CERTIFICATES,
16 | DATA_AUTH_HELPER,
17 | DATA_ROOT_CERTIFICATES,
18 | )
19 |
20 | from .api.rest.salutespeech_auth import SaluteSpeechAuth
21 |
22 | PLATFORMS = [Platform.STT, Platform.TTS]
23 |
24 |
25 | async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
26 | """Set up a config entry."""
27 |
28 | def read_root_certificates():
29 | file_name = os.path.join(
30 | os.path.dirname(__file__),
31 | "api",
32 | "certs",
33 | "russian_trusted_root_ca.cer",
34 | )
35 | return open(file_name, "rb").read()
36 |
37 | root_certificates = (
38 | await hass.async_add_executor_job(read_root_certificates)
39 | if entry.data.get(CONF_USE_BUNDLED_ROOT_CERTIFICATES, True)
40 | else None
41 | )
42 |
43 | entry.runtime_data = {}
44 | entry.runtime_data[DATA_ROOT_CERTIFICATES] = root_certificates
45 | entry.runtime_data[DATA_AUTH_HELPER] = SaluteSpeechAuth(
46 | hass, entry.data[CONF_AUTH_KEY], root_certificates
47 | )
48 |
49 | await hass.config_entries.async_forward_entry_setups(entry, PLATFORMS)
50 |
51 | return True
52 |
53 |
54 | async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
55 | """Unload config entry."""
56 | return await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
57 |
--------------------------------------------------------------------------------
/.github/workflows/release.yaml:
--------------------------------------------------------------------------------
1 | name: "Release"
2 |
3 | on:
4 | push:
5 | tags:
6 | - "v*.*.*"
7 |
8 | jobs:
9 | release:
10 | name: "Release"
11 | runs-on: "ubuntu-latest"
12 | permissions:
13 | contents: write
14 | steps:
15 | - name: "Checkout"
16 | uses: "actions/checkout@v4"
17 | - name: "Verify the release version"
18 | uses: technote-space/package-version-check-action@v1
19 | with:
20 | COMMIT_DISABLED: true
21 | PACKAGE_DIR: ${{ github.workspace }}/custom_components/salutespeech
22 | PACKAGE_NAME: manifest.json
23 | - name: "Compress"
24 | shell: "bash"
25 | run: |
26 | cd "${{ github.workspace }}/custom_components/salutespeech"
27 | zip -r "${{ runner.temp }}/salutespeech.zip" ./
28 | - name: "Publish"
29 | uses: softprops/action-gh-release@v2
30 | with:
31 | draft: true
32 | files: ${{ runner.temp }}/salutespeech.zip
33 | body: >
34 | [](https://github.com/black-roland/homeassistant-salutespeech/releases)
35 | [](https://boosty.to/mansmarthome/about?utm_source=github&utm_medium=referral&utm_campaign=salutespeech)
36 | [](https://mansmarthome.info/donate/?utm_source=github&utm_medium=referral&utm_campaign=salutespeech#%D1%81%D0%B8%D1%81%D1%82%D0%B5%D0%BC%D0%B0-%D0%B1%D1%8B%D1%81%D1%82%D1%80%D1%8B%D1%85-%D0%BF%D0%BB%D0%B0%D1%82%D0%B5%D0%B6%D0%B5%D0%B9)
37 | generate_release_notes: true
38 |
--------------------------------------------------------------------------------
/custom_components/salutespeech/api/certs/russian_trusted_root_ca.cer:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIIFwjCCA6qgAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwcDELMAkGA1UEBhMCUlUx
3 | PzA9BgNVBAoMNlRoZSBNaW5pc3RyeSBvZiBEaWdpdGFsIERldmVsb3BtZW50IGFu
4 | ZCBDb21tdW5pY2F0aW9uczEgMB4GA1UEAwwXUnVzc2lhbiBUcnVzdGVkIFJvb3Qg
5 | Q0EwHhcNMjIwMzAxMjEwNDE1WhcNMzIwMjI3MjEwNDE1WjBwMQswCQYDVQQGEwJS
6 | VTE/MD0GA1UECgw2VGhlIE1pbmlzdHJ5IG9mIERpZ2l0YWwgRGV2ZWxvcG1lbnQg
7 | YW5kIENvbW11bmljYXRpb25zMSAwHgYDVQQDDBdSdXNzaWFuIFRydXN0ZWQgUm9v
8 | dCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMfFOZ8pUAL3+r2n
9 | qqE0Zp52selXsKGFYoG0GM5bwz1bSFtCt+AZQMhkWQheI3poZAToYJu69pHLKS6Q
10 | XBiwBC1cvzYmUYKMYZC7jE5YhEU2bSL0mX7NaMxMDmH2/NwuOVRj8OImVa5s1F4U
11 | zn4Kv3PFlDBjjSjXKVY9kmjUBsXQrIHeaqmUIsPIlNWUnimXS0I0abExqkbdrXbX
12 | YwCOXhOO2pDUx3ckmJlCMUGacUTnylyQW2VsJIyIGA8V0xzdaeUXg0VZ6ZmNUr5Y
13 | Ber/EAOLPb8NYpsAhJe2mXjMB/J9HNsoFMBFJ0lLOT/+dQvjbdRZoOT8eqJpWnVD
14 | U+QL/qEZnz57N88OWM3rabJkRNdU/Z7x5SFIM9FrqtN8xewsiBWBI0K6XFuOBOTD
15 | 4V08o4TzJ8+Ccq5XlCUW2L48pZNCYuBDfBh7FxkB7qDgGDiaftEkZZfApRg2E+M9
16 | G8wkNKTPLDc4wH0FDTijhgxR3Y4PiS1HL2Zhw7bD3CbslmEGgfnnZojNkJtcLeBH
17 | BLa52/dSwNU4WWLubaYSiAmA9IUMX1/RpfpxOxd4Ykmhz97oFbUaDJFipIggx5sX
18 | ePAlkTdWnv+RWBxlJwMQ25oEHmRguNYf4Zr/Rxr9cS93Y+mdXIZaBEE0KS2iLRqa
19 | OiWBki9IMQU4phqPOBAaG7A+eP8PAgMBAAGjZjBkMB0GA1UdDgQWBBTh0YHlzlpf
20 | BKrS6badZrHF+qwshzAfBgNVHSMEGDAWgBTh0YHlzlpfBKrS6badZrHF+qwshzAS
21 | BgNVHRMBAf8ECDAGAQH/AgEEMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsF
22 | AAOCAgEAALIY1wkilt/urfEVM5vKzr6utOeDWCUczmWX/RX4ljpRdgF+5fAIS4vH
23 | tmXkqpSCOVeWUrJV9QvZn6L227ZwuE15cWi8DCDal3Ue90WgAJJZMfTshN4OI8cq
24 | W9E4EG9wglbEtMnObHlms8F3CHmrw3k6KmUkWGoa+/ENmcVl68u/cMRl1JbW2bM+
25 | /3A+SAg2c6iPDlehczKx2oa95QW0SkPPWGuNA/CE8CpyANIhu9XFrj3RQ3EqeRcS
26 | AQQod1RNuHpfETLU/A2gMmvn/w/sx7TB3W5BPs6rprOA37tutPq9u6FTZOcG1Oqj
27 | C/B7yTqgI7rbyvox7DEXoX7rIiEqyNNUguTk/u3SZ4VXE2kmxdmSh3TQvybfbnXV
28 | 4JbCZVaqiZraqc7oZMnRoWrXRG3ztbnbes/9qhRGI7PqXqeKJBztxRTEVj8ONs1d
29 | WN5szTwaPIvhkhO3CO5ErU2rVdUr89wKpNXbBODFKRtgxUT70YpmJ46VVaqdAhOZ
30 | D9EUUn4YaeLaS8AjSF/h7UkjOibNc4qVDiPP+rkehFWM66PVnP1Msh93tc+taIfC
31 | EYVMxjh8zNbFuoc7fzvvrFILLe7ifvEIUqSVIC/AzplM/Jxw7buXFeGP1qVCBEHq
32 | 391d/9RAfaZ12zkwFsl+IKwE/OZxW8AHa9i1p4GO0YSNuczzEm4=
33 | -----END CERTIFICATE-----
--------------------------------------------------------------------------------
/custom_components/salutespeech/api/grpc/synthesis_pb2_grpc.py:
--------------------------------------------------------------------------------
1 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
2 | """Client and server classes corresponding to protobuf-defined services."""
3 | import grpc
4 |
5 | from custom_components.salutespeech.api.grpc import synthesis_pb2 as custom__components_dot_salutespeech_dot_api_dot_grpc_dot_synthesis__pb2
6 |
7 |
8 | class SmartSpeechStub(object):
9 | """Missing associated documentation comment in .proto file."""
10 |
11 | def __init__(self, channel):
12 | """Constructor.
13 |
14 | Args:
15 | channel: A grpc.Channel.
16 | """
17 | self.Synthesize = channel.unary_stream(
18 | '/smartspeech.synthesis.v1.SmartSpeech/Synthesize',
19 | request_serializer=custom__components_dot_salutespeech_dot_api_dot_grpc_dot_synthesis__pb2.SynthesisRequest.SerializeToString,
20 | response_deserializer=custom__components_dot_salutespeech_dot_api_dot_grpc_dot_synthesis__pb2.SynthesisResponse.FromString,
21 | )
22 |
23 |
24 | class SmartSpeechServicer(object):
25 | """Missing associated documentation comment in .proto file."""
26 |
27 | def Synthesize(self, request, context):
28 | """Missing associated documentation comment in .proto file."""
29 | context.set_code(grpc.StatusCode.UNIMPLEMENTED)
30 | context.set_details('Method not implemented!')
31 | raise NotImplementedError('Method not implemented!')
32 |
33 |
34 | def add_SmartSpeechServicer_to_server(servicer, server):
35 | rpc_method_handlers = {
36 | 'Synthesize': grpc.unary_stream_rpc_method_handler(
37 | servicer.Synthesize,
38 | request_deserializer=custom__components_dot_salutespeech_dot_api_dot_grpc_dot_synthesis__pb2.SynthesisRequest.FromString,
39 | response_serializer=custom__components_dot_salutespeech_dot_api_dot_grpc_dot_synthesis__pb2.SynthesisResponse.SerializeToString,
40 | ),
41 | }
42 | generic_handler = grpc.method_handlers_generic_handler(
43 | 'smartspeech.synthesis.v1.SmartSpeech', rpc_method_handlers)
44 | server.add_generic_rpc_handlers((generic_handler,))
45 |
46 |
47 | # This class is part of an EXPERIMENTAL API.
48 | class SmartSpeech(object):
49 | """Missing associated documentation comment in .proto file."""
50 |
51 | @staticmethod
52 | def Synthesize(request,
53 | target,
54 | options=(),
55 | channel_credentials=None,
56 | call_credentials=None,
57 | insecure=False,
58 | compression=None,
59 | wait_for_ready=None,
60 | timeout=None,
61 | metadata=None):
62 | return grpc.experimental.unary_stream(request, target, '/smartspeech.synthesis.v1.SmartSpeech/Synthesize',
63 | custom__components_dot_salutespeech_dot_api_dot_grpc_dot_synthesis__pb2.SynthesisRequest.SerializeToString,
64 | custom__components_dot_salutespeech_dot_api_dot_grpc_dot_synthesis__pb2.SynthesisResponse.FromString,
65 | options, channel_credentials,
66 | insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
67 |
--------------------------------------------------------------------------------
/custom_components/salutespeech/api/grpc/recognition_pb2_grpc.py:
--------------------------------------------------------------------------------
1 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
2 | """Client and server classes corresponding to protobuf-defined services."""
3 | import grpc
4 |
5 | from custom_components.salutespeech.api.grpc import recognition_pb2 as custom__components_dot_salutespeech_dot_api_dot_grpc_dot_recognition__pb2
6 |
7 |
8 | class SmartSpeechStub(object):
9 | """Missing associated documentation comment in .proto file."""
10 |
11 | def __init__(self, channel):
12 | """Constructor.
13 |
14 | Args:
15 | channel: A grpc.Channel.
16 | """
17 | self.Recognize = channel.stream_stream(
18 | '/smartspeech.recognition.v2.SmartSpeech/Recognize',
19 | request_serializer=custom__components_dot_salutespeech_dot_api_dot_grpc_dot_recognition__pb2.RecognitionRequest.SerializeToString,
20 | response_deserializer=custom__components_dot_salutespeech_dot_api_dot_grpc_dot_recognition__pb2.RecognitionResponse.FromString,
21 | )
22 |
23 |
24 | class SmartSpeechServicer(object):
25 | """Missing associated documentation comment in .proto file."""
26 |
27 | def Recognize(self, request_iterator, context):
28 | """Missing associated documentation comment in .proto file."""
29 | context.set_code(grpc.StatusCode.UNIMPLEMENTED)
30 | context.set_details('Method not implemented!')
31 | raise NotImplementedError('Method not implemented!')
32 |
33 |
34 | def add_SmartSpeechServicer_to_server(servicer, server):
35 | rpc_method_handlers = {
36 | 'Recognize': grpc.stream_stream_rpc_method_handler(
37 | servicer.Recognize,
38 | request_deserializer=custom__components_dot_salutespeech_dot_api_dot_grpc_dot_recognition__pb2.RecognitionRequest.FromString,
39 | response_serializer=custom__components_dot_salutespeech_dot_api_dot_grpc_dot_recognition__pb2.RecognitionResponse.SerializeToString,
40 | ),
41 | }
42 | generic_handler = grpc.method_handlers_generic_handler(
43 | 'smartspeech.recognition.v2.SmartSpeech', rpc_method_handlers)
44 | server.add_generic_rpc_handlers((generic_handler,))
45 |
46 |
47 | # This class is part of an EXPERIMENTAL API.
48 | class SmartSpeech(object):
49 | """Missing associated documentation comment in .proto file."""
50 |
51 | @staticmethod
52 | def Recognize(request_iterator,
53 | target,
54 | options=(),
55 | channel_credentials=None,
56 | call_credentials=None,
57 | insecure=False,
58 | compression=None,
59 | wait_for_ready=None,
60 | timeout=None,
61 | metadata=None):
62 | return grpc.experimental.stream_stream(request_iterator, target, '/smartspeech.recognition.v2.SmartSpeech/Recognize',
63 | custom__components_dot_salutespeech_dot_api_dot_grpc_dot_recognition__pb2.RecognitionRequest.SerializeToString,
64 | custom__components_dot_salutespeech_dot_api_dot_grpc_dot_recognition__pb2.RecognitionResponse.FromString,
65 | options, channel_credentials,
66 | insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
67 |
--------------------------------------------------------------------------------
/custom_components/salutespeech/api/rest/salutespeech_auth.py:
--------------------------------------------------------------------------------
1 | """SaluteSpeech authorization helper."""
2 |
3 | # This Source Code Form is subject to the terms of the Mozilla Public
4 | # License, v. 2.0. If a copy of the MPL was not distributed with this
5 | # file, You can obtain one at https://mozilla.org/MPL/2.0/.
6 |
7 | import ssl
8 | import uuid
9 | from datetime import datetime, timedelta
10 | from typing import Optional
11 |
12 | from aiohttp import ClientSession
13 | from homeassistant.core import HomeAssistant
14 | from homeassistant.helpers.aiohttp_client import async_get_clientsession
15 |
16 |
17 | class SaluteSpeechAuth:
18 | def __init__(
19 | self,
20 | hass: HomeAssistant,
21 | auth_key: str,
22 | root_certificates: bytes = None,
23 | scope: str = "SALUTE_SPEECH_PERS",
24 | ) -> None:
25 | """SaluteSpeech authorization helper."""
26 | self._hass: HomeAssistant = hass
27 | self._auth_key: str = auth_key
28 | self._root_certificates: bytes = root_certificates
29 | self._scope: str = scope
30 |
31 | self._session: ClientSession = async_get_clientsession(hass)
32 | self._token_url: str = "https://ngw.devices.sberbank.ru:9443/api/v2/oauth"
33 |
34 | self._access_token: Optional[str] = None
35 | self._expires_at: Optional[datetime] = None
36 | self._ssl_context: Optional[ssl.SSLContext] = None
37 |
38 | def _generate_rquid(self) -> str:
39 | """Get unique request ID."""
40 | return str(uuid.uuid4())
41 |
42 | async def _get_ssl_context(self) -> ssl.SSLContext:
43 | """Get SSL context."""
44 | if self._ssl_context:
45 | return self._ssl_context
46 |
47 | if self._root_certificates is None:
48 | return None
49 |
50 | self._ssl_context = await self._hass.async_add_executor_job(
51 | lambda: ssl.create_default_context(
52 | cadata=self._root_certificates.decode("utf-8")
53 | )
54 | )
55 | return self._ssl_context
56 |
57 | async def get_access_token(self) -> Optional[str]:
58 | """Return the access token. If it exists and valid, return it. Otherwise, fetch a new one."""
59 | if self._access_token and datetime.now() < self._expires_at:
60 | return self._access_token
61 |
62 | headers = {
63 | "Content-Type": "application/x-www-form-urlencoded",
64 | "Accept": "application/json",
65 | "RqUID": self._generate_rquid(),
66 | "Authorization": f"Basic {self._auth_key}",
67 | }
68 |
69 | ssl_context = await self._get_ssl_context()
70 |
71 | try:
72 | async with self._session.post(
73 | self._token_url,
74 | headers=headers,
75 | ssl=ssl_context,
76 | data={"scope": self._scope},
77 | ) as response:
78 | if response.status != 200:
79 | error_text = await response.text()
80 | raise Exception(
81 | f"Failed to get access token: {response.status} {error_text}"
82 | )
83 |
84 | data = await response.json()
85 | self._access_token = data.get("access_token")
86 | expires_in = 1800 # 30 minutes
87 | self._expires_at = datetime.now() + timedelta(seconds=expires_in - 59)
88 | return self._access_token
89 |
90 | except Exception as e:
91 | raise Exception(f"Error during token request: {e}")
92 |
--------------------------------------------------------------------------------
/custom_components/salutespeech/api/grpc/synthesis_pb2.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # Generated by the protocol buffer compiler. DO NOT EDIT!
3 | # source: custom_components/salutespeech/api/grpc/synthesis.proto
4 | """Generated protocol buffer code."""
5 | from google.protobuf import descriptor as _descriptor
6 | from google.protobuf import descriptor_pool as _descriptor_pool
7 | from google.protobuf import message as _message
8 | from google.protobuf import reflection as _reflection
9 | from google.protobuf import symbol_database as _symbol_database
10 | # @@protoc_insertion_point(imports)
11 |
12 | _sym_db = _symbol_database.Default()
13 |
14 |
15 | from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2
16 |
17 |
18 | DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n7custom_components/salutespeech/api/grpc/synthesis.proto\x12\x18smartspeech.synthesis.v1\x1a\x1egoogle/protobuf/duration.proto\"\xee\x02\n\x10SynthesisRequest\x12\x0c\n\x04text\x18\x01 \x01(\t\x12P\n\x0e\x61udio_encoding\x18\x02 \x01(\x0e\x32\x38.smartspeech.synthesis.v1.SynthesisRequest.AudioEncoding\x12\x10\n\x08language\x18\x03 \x01(\t\x12L\n\x0c\x63ontent_type\x18\x04 \x01(\x0e\x32\x36.smartspeech.synthesis.v1.SynthesisRequest.ContentType\x12\r\n\x05voice\x18\x05 \x01(\t\x12\x15\n\rrebuild_cache\x18\x06 \x01(\x08\"Q\n\rAudioEncoding\x12\x1e\n\x1a\x41UDIO_ENCODING_UNSPECIFIED\x10\x00\x12\r\n\tPCM_S16LE\x10\x01\x12\x08\n\x04OPUS\x10\x02\x12\x07\n\x03WAV\x10\x03\"!\n\x0b\x43ontentType\x12\x08\n\x04TEXT\x10\x00\x12\x08\n\x04SSML\x10\x01\"T\n\x11SynthesisResponse\x12\x0c\n\x04\x64\x61ta\x18\x01 \x01(\x0c\x12\x31\n\x0e\x61udio_duration\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration2v\n\x0bSmartSpeech\x12g\n\nSynthesize\x12*.smartspeech.synthesis.v1.SynthesisRequest\x1a+.smartspeech.synthesis.v1.SynthesisResponse0\x01\x42\x13\n\x04TODOZ\x0b./;protocolb\x06proto3')
19 |
20 |
21 |
22 | _SYNTHESISREQUEST = DESCRIPTOR.message_types_by_name['SynthesisRequest']
23 | _SYNTHESISRESPONSE = DESCRIPTOR.message_types_by_name['SynthesisResponse']
24 | _SYNTHESISREQUEST_AUDIOENCODING = _SYNTHESISREQUEST.enum_types_by_name['AudioEncoding']
25 | _SYNTHESISREQUEST_CONTENTTYPE = _SYNTHESISREQUEST.enum_types_by_name['ContentType']
26 | SynthesisRequest = _reflection.GeneratedProtocolMessageType('SynthesisRequest', (_message.Message,), {
27 | 'DESCRIPTOR' : _SYNTHESISREQUEST,
28 | '__module__' : 'custom_components.salutespeech.api.grpc.synthesis_pb2'
29 | # @@protoc_insertion_point(class_scope:smartspeech.synthesis.v1.SynthesisRequest)
30 | })
31 | _sym_db.RegisterMessage(SynthesisRequest)
32 |
33 | SynthesisResponse = _reflection.GeneratedProtocolMessageType('SynthesisResponse', (_message.Message,), {
34 | 'DESCRIPTOR' : _SYNTHESISRESPONSE,
35 | '__module__' : 'custom_components.salutespeech.api.grpc.synthesis_pb2'
36 | # @@protoc_insertion_point(class_scope:smartspeech.synthesis.v1.SynthesisResponse)
37 | })
38 | _sym_db.RegisterMessage(SynthesisResponse)
39 |
40 | _SMARTSPEECH = DESCRIPTOR.services_by_name['SmartSpeech']
41 | if _descriptor._USE_C_DESCRIPTORS == False:
42 |
43 | DESCRIPTOR._options = None
44 | DESCRIPTOR._serialized_options = b'\n\004TODOZ\013./;protocol'
45 | _SYNTHESISREQUEST._serialized_start=118
46 | _SYNTHESISREQUEST._serialized_end=484
47 | _SYNTHESISREQUEST_AUDIOENCODING._serialized_start=368
48 | _SYNTHESISREQUEST_AUDIOENCODING._serialized_end=449
49 | _SYNTHESISREQUEST_CONTENTTYPE._serialized_start=451
50 | _SYNTHESISREQUEST_CONTENTTYPE._serialized_end=484
51 | _SYNTHESISRESPONSE._serialized_start=486
52 | _SYNTHESISRESPONSE._serialized_end=570
53 | _SMARTSPEECH._serialized_start=572
54 | _SMARTSPEECH._serialized_end=690
55 | # @@protoc_insertion_point(module_scope)
56 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | share/python-wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 | MANIFEST
28 |
29 | # PyInstaller
30 | # Usually these files are written by a python script from a template
31 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
32 | *.manifest
33 | *.spec
34 |
35 | # Installer logs
36 | pip-log.txt
37 | pip-delete-this-directory.txt
38 |
39 | # Unit test / coverage reports
40 | htmlcov/
41 | .tox/
42 | .nox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | nosetests.xml
47 | coverage.xml
48 | *.cover
49 | *.py,cover
50 | .hypothesis/
51 | .pytest_cache/
52 | cover/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | .pybuilder/
76 | target/
77 |
78 | # Jupyter Notebook
79 | .ipynb_checkpoints
80 |
81 | # IPython
82 | profile_default/
83 | ipython_config.py
84 |
85 | # pyenv
86 | # For a library or package, you might want to ignore these files since the code is
87 | # intended to run in multiple environments; otherwise, check them in:
88 | # .python-version
89 |
90 | # pipenv
91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
94 | # install all needed dependencies.
95 | #Pipfile.lock
96 |
97 | # UV
98 | # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
99 | # This is especially recommended for binary packages to ensure reproducibility, and is more
100 | # commonly ignored for libraries.
101 | #uv.lock
102 |
103 | # poetry
104 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
105 | # This is especially recommended for binary packages to ensure reproducibility, and is more
106 | # commonly ignored for libraries.
107 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
108 | #poetry.lock
109 |
110 | # pdm
111 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
112 | #pdm.lock
113 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
114 | # in version control.
115 | # https://pdm.fming.dev/latest/usage/project/#working-with-version-control
116 | .pdm.toml
117 | .pdm-python
118 | .pdm-build/
119 |
120 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
121 | __pypackages__/
122 |
123 | # Celery stuff
124 | celerybeat-schedule
125 | celerybeat.pid
126 |
127 | # SageMath parsed files
128 | *.sage.py
129 |
130 | # Environments
131 | .env
132 | .venv
133 | env/
134 | venv/
135 | ENV/
136 | env.bak/
137 | venv.bak/
138 |
139 | # Spyder project settings
140 | .spyderproject
141 | .spyproject
142 |
143 | # Rope project settings
144 | .ropeproject
145 |
146 | # mkdocs documentation
147 | /site
148 |
149 | # mypy
150 | .mypy_cache/
151 | .dmypy.json
152 | dmypy.json
153 |
154 | # Pyre type checker
155 | .pyre/
156 |
157 | # pytype static type analyzer
158 | .pytype/
159 |
160 | # Cython debug symbols
161 | cython_debug/
162 |
163 | # PyCharm
164 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
165 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
166 | # and can be added to the global gitignore or merged into this file. For a more nuclear
167 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
168 | #.idea/
169 |
170 | # PyPI configuration file
171 | .pypirc
172 |
--------------------------------------------------------------------------------
/custom_components/salutespeech/api/grpc/recognition.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto3";
2 |
3 | package smartspeech.recognition.v2;
4 |
5 | import "google/protobuf/duration.proto";
6 |
7 | option go_package = "./;protocol";
8 | option java_package = "TODO";
9 |
10 | service SmartSpeech {
11 | rpc Recognize (stream RecognitionRequest) returns (stream RecognitionResponse);
12 | }
13 |
14 | message RecognitionRequest {
15 | oneof request {
16 | RecognitionOptions options = 1;
17 | bytes audio_chunk = 2;
18 | }
19 | }
20 |
21 | enum EouReason {
22 | UNSPECIFIED = 0; // Not an EOU
23 | ORGANIC = 1; // Proper EOU
24 | NO_SPEECH_TIMEOUT = 2; // EOU because there was no speech for at least no_speech_timeout seconds
25 | MAX_SPEECH_TIMEOUT = 3; // EOU because there was continuous speech with no breaks for at least max_speech_timeout seconds
26 | }
27 |
28 | message RecognitionResponse {
29 | oneof response {
30 | Transcription transcription = 1;
31 | BackendInfo backend_info = 2;
32 | InsightResult insight = 3;
33 | VADResult vad = 4;
34 | }
35 | }
36 |
37 | message Transcription {
38 | int32 channel = 1;
39 | repeated Hypothesis results = 2;
40 | bool eou = 3; // marks final result for this utterance
41 | EouReason eou_reason = 4; // what caused the end of this utterance (eou must be true)
42 | google.protobuf.Duration processed_audio_start = 5; // starting position of processed audio
43 | google.protobuf.Duration processed_audio_end = 6; // ending position of processed audio
44 | Emotions emotions_result = 7; // may be set on end of utterance
45 | SpeakerInfo speaker_info = 8;
46 | }
47 |
48 | message InsightResult {
49 | string insight_result = 10;
50 | }
51 |
52 | message VADResult {
53 | int32 channel = 1;
54 | google.protobuf.Duration processed_audio_time = 2; // VAD time mark in audio
55 | google.protobuf.Duration utterance_detection_time = 3; // VAD time since utterance start
56 | }
57 |
58 | message OptionalBool {
59 | bool enable = 1;
60 | }
61 |
62 | message RecognitionOptions {
63 | enum AudioEncoding {
64 | AUDIO_ENCODING_UNSPECIFIED = 0;
65 | PCM_S16LE = 1; // 16-bit signed little-endian (Linear PCM)
66 | OPUS = 2; // mime audio/ogg; codecs=opus
67 | MP3 = 3; // MPEG-1/2 Layer-3
68 | FLAC = 4;
69 | ALAW = 5;
70 | MULAW = 6;
71 | G729 = 7;
72 | }
73 |
74 | AudioEncoding audio_encoding = 1;
75 | int32 sample_rate = 2; // For PCM_16LE, ALAW, MULAW audio encodings
76 | int32 channels_count = 3;
77 | string language = 4; // Language code in RFC-3066 format, i.e.: ru-RU
78 | string model = 5;
79 | OptionalBool enable_multi_utterance = 6;
80 | OptionalBool enable_partial_results = 7;
81 | int32 hypotheses_count = 8;
82 | google.protobuf.Duration no_speech_timeout = 9;
83 | google.protobuf.Duration max_speech_timeout = 10;
84 | Hints hints = 11;
85 | SpeakerSeparationOptions speaker_separation_options = 12;
86 |
87 | NormalizationOptions normalization_options = 13;
88 | repeated string insight_models = 14;
89 | OptionalBool enable_vad = 15;
90 | OptionalBool custom_ws_flow_control = 16;
91 | OptionalBool enable_long_utterances = 17;
92 | }
93 |
94 | message NormalizationOptions {
95 | OptionalBool enable = 1;
96 | OptionalBool profanity_filter = 2;
97 | OptionalBool punctuation = 3;
98 | OptionalBool capitalization = 4;
99 | OptionalBool question = 5;
100 | OptionalBool force_cyrillic = 6;
101 | }
102 |
103 | message Hints {
104 | repeated string words = 1;
105 | bool enable_letters = 2;
106 | google.protobuf.Duration eou_timeout = 3;
107 | }
108 |
109 | message SpeakerSeparationOptions {
110 | bool enable = 1;
111 | bool enable_only_main_speaker = 2; // return only main speaker
112 | int32 count = 3; // number of expected speakers
113 | }
114 |
115 | message Hypothesis {
116 | message WordAlignment {
117 | string word = 1; // single word
118 | google.protobuf.Duration start = 2; // starting position of the word in audio stream
119 | google.protobuf.Duration end = 3; // ending position of the word in audio stream
120 | }
121 |
122 | string text = 1; // non-normalized text result
123 | string normalized_text = 2; // normalized text result
124 | google.protobuf.Duration start = 3; // hypothesis starting position from current utterance start
125 | google.protobuf.Duration end = 4; // hypothesis final position from current utterance start
126 | repeated WordAlignment word_alignments = 5; // alignments of single words in audio stream
127 | }
128 |
129 | message Emotions {
130 | float positive = 1; // confidence [0.0 - 1.0]
131 | float neutral = 2;
132 | float negative = 3;
133 | float positive_a = 4;
134 | float neutral_a = 5;
135 | float negative_a = 6;
136 | float positive_t = 7;
137 | float neutral_t = 8;
138 | float negative_t = 9;
139 | }
140 |
141 | message BackendInfo {
142 | string model_name = 1;
143 | string model_version = 2;
144 | string server_version = 3;
145 | }
146 |
147 | message SpeakerInfo {
148 | int32 speaker_id = 1;
149 | float main_speaker_confidence = 2; // main speaker feature [0.0 - 1.0]
150 | }
151 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | See [description in English below](#salutespeech-integration-for-home-assistant) 👇
2 |
3 |
4 |
5 | # Интеграция SaluteSpeech для Home Assistant
6 |
7 | [](https://my.home-assistant.io/redirect/hacs_repository/?owner=black-roland&repository=homeassistant-salutespeech&category=integration) [](https://my.home-assistant.io/redirect/config_flow_start/?domain=salutespeech)
8 |
9 | Интеграция SaluteSpeech для Home Assistant предоставляет функциональность распознавания (STT) и синтеза речи (TTS), используя передовые технологии обработки естественного языка от Sber.
10 |
11 | SaluteSpeech это облачный сервис.
12 |
13 | ## Возможности
14 |
15 | - **Преобразование речи в текст:**
16 | - Определение конца высказывания.
17 | - [Выявление эмоций в диалоге](https://yaml.mansmarthome.info/roland/efff1cbd469c4a1d8e25e721ab388aea).
18 | - Расстановка знаков препинания и буквы «ё».
19 |
20 | - **Преобразование текста в речь:**
21 | - Генерация речи на русском, английском и казахском языках.
22 | - Озвучивание текста любой сложности и стиля.
23 | - Уникальные ML-модели для расстановки ударений и произношения.
24 | - Нормализация текста для корректного озвучивания адресов, имен, цифр и других сложных слов.
25 |
26 | ## Подготовка
27 |
28 | 1. **Регистрация в SaluteSpeech:**
29 | - Зарегистрируйтесь [в личном кабинете](https://developers.sber.ru/docs/ru/salutespeech/integration-individuals).
30 | - Создайте проект SaluteSpeech API и сгенерируйте ключ авторизации.
31 |
32 | 2. **Получение Access Token:**
33 | - Для доступа к сервису необходимо получить Access Token. Для физических лиц доступен тип использования `SALUTE_SPEECH_PERS`.
34 |
35 | ## Установка
36 |
37 | 1. Добавьте репозиторий в HACS: `https://github.com/black-roland/homeassistant-salutespeech` или воспользуйтесь голубой кнопкой выше.
38 | 2. Установите пользовательский компонент через HACS.
39 | 3. Перезапустите Home Assistant, чтобы завершить установку.
40 |
41 | ## Настройка
42 |
43 | - [Добавьте интеграцию в настройках](https://my.home-assistant.io/redirect/config_flow_start/?domain=salutespeech).
44 | - Введите ваш Access Token и сохраните конфигурацию.
45 | - Настройте SaluteSpeech в качестве движка распознавания (STT) и синтеза речи (TTS) для вашего голосового помощника.
46 |
47 | ## Поддержка автора
48 |
49 | Если эта интеграция была вам полезна, подумайте о том, чтобы [угостить автора чашечкой кофе](https://mansmarthome.info/donate/?utm_source=github&utm_medium=referral&utm_campaign=salutespeech#%D1%81%D0%B8%D1%81%D1%82%D0%B5%D0%BC%D0%B0-%D0%B1%D1%8B%D1%81%D1%82%D1%80%D1%8B%D1%85-%D0%BF%D0%BB%D0%B0%D1%82%D0%B5%D0%B6%D0%B5%D0%B9)! Ваша благодарность ценится!
50 |
51 | #### Благодарности
52 |
53 | Огромное спасибо всем, кто поддерживает этот проект! Ваш вклад имеет большое значение.
54 |
55 | 
56 |
57 | ---
58 |
59 | # SaluteSpeech Integration for Home Assistant
60 |
61 | [](https://my.home-assistant.io/redirect/hacs_repository/?owner=black-roland&repository=homeassistant-salutespeech&category=integration) [](https://my.home-assistant.io/redirect/config_flow_start/?domain=salutespeech)
62 |
63 | The SaluteSpeech integration for Home Assistant provides speech recognition (STT) and text-to-speech (TTS) functionality using advanced natural language processing technology from Sber.
64 |
65 | SpeechKit is a cloud service.
66 |
67 | ## Features
68 |
69 | - **Speech-to-Text:**
70 | - End-of-speech detection.
71 | - Emotion detection in dialogue.
72 |
73 | - **Text-to-Speech (TTS):**
74 | - Speech generation in Russian, English, and Kazakh.
75 | - Pronunciation of texts of any complexity and style.
76 | - Unique ML models for stress and pronunciation placement.
77 | - Text normalization for correct pronunciation of addresses, names, numbers, and other complex words.
78 |
79 | ## Prerequisites
80 |
81 | 1. **Register in SaluteSpeech:**
82 | - Register a personal account in the [Studio](https://developers.sber.ru/docs/ru/salutespeech/integration-individuals).
83 | - Create a SaluteSpeech API project and generate an authorization key.
84 |
85 | 2. **Obtain Access Token:**
86 | - To access the service, you need to obtain an Access Token. For individuals, the usage type `SALUTE_SPEECH_PERS` is available.
87 |
88 | ## Installation
89 |
90 | 1. Add the repository to HACS (Home Assistant Community Store): `https://github.com/black-roland/homeassistant-salutespeech` or use the blue button above
91 | 2. Install the custom component using HACS.
92 | 3. Restart Home Assistant to complete the installation.
93 |
94 | ## Configuration
95 |
96 | - [Set up the integration in settings](https://my.home-assistant.io/redirect/config_flow_start/?domain=salutespeech).
97 | - Enter your API key and save the configuration.
98 | - Configure SaluteSpeech as an STT and TTS engine for your Voice assistant.
99 |
100 |
101 | ## Donations
102 |
103 | If this integration has been useful to you, consider [buying the author a coffee](https://dalink.to/mansmarthome)! Your gratitude is appreciated!
104 |
105 | #### Thank you
106 |
107 | 
108 |
--------------------------------------------------------------------------------
/custom_components/salutespeech/tts.py:
--------------------------------------------------------------------------------
1 | """Support for the SaluteSpeech text-to-speech service."""
2 |
3 | # This Source Code Form is subject to the terms of the Mozilla Public
4 | # License, v. 2.0. If a copy of the MPL was not distributed with this
5 | # file, You can obtain one at https://mozilla.org/MPL/2.0/.
6 |
7 | from __future__ import annotations
8 |
9 | import io
10 | from typing import Any
11 |
12 | import grpc
13 | from grpc import aio
14 | from homeassistant.components.tts import (
15 | ATTR_AUDIO_OUTPUT,
16 | ATTR_VOICE,
17 | TextToSpeechEntity,
18 | TtsAudioType,
19 | Voice,
20 | )
21 | from homeassistant.config_entries import ConfigEntry
22 | from homeassistant.core import HomeAssistant, callback
23 | from homeassistant.helpers import device_registry as dr
24 | from homeassistant.helpers.entity_platform import AddEntitiesCallback
25 |
26 | from .api.grpc import synthesis_pb2, synthesis_pb2_grpc
27 | from .const import (
28 | DATA_AUTH_HELPER,
29 | DATA_ROOT_CERTIFICATES,
30 | DEFAULT_LANG,
31 | DEFAULT_OUTPUT_CONTAINER,
32 | DEFAULT_VOICE,
33 | DOMAIN,
34 | LOGGER,
35 | SUPPORTED_LANGUAGES,
36 | TTS_OUTPUT_CONTAINERS,
37 | TTS_VOICES,
38 | )
39 |
40 |
41 | async def async_setup_entry(
42 | hass: HomeAssistant,
43 | config_entry: ConfigEntry,
44 | async_add_entities: AddEntitiesCallback,
45 | ) -> None:
46 | """Set up SaluteSpeech text-to-speech."""
47 | entities: list[TextToSpeechEntity] = [SaluteSpeechTTSEntity(config_entry)]
48 | async_add_entities(entities)
49 |
50 |
51 | class SaluteSpeechTTSEntity(TextToSpeechEntity):
52 | """The SaluteSpeech TTS entity."""
53 |
54 | def __init__(self, config_entry: ConfigEntry) -> None:
55 | """Initialize the entity."""
56 | self._attr_unique_id = config_entry.entry_id
57 | self._attr_name = config_entry.title
58 | self._attr_device_info = dr.DeviceInfo(
59 | identifiers={(DOMAIN, config_entry.entry_id)},
60 | manufacturer="Sber",
61 | model="SaluteSpeech",
62 | entry_type=dr.DeviceEntryType.SERVICE,
63 | )
64 |
65 | self._config_entry = config_entry
66 |
67 | @property
68 | def supported_languages(self) -> list[str]:
69 | """Return a list of supported languages."""
70 | return SUPPORTED_LANGUAGES
71 |
72 | @property
73 | def default_language(self) -> str:
74 | """Return the default language."""
75 | return DEFAULT_LANG
76 |
77 | @property
78 | def supported_options(self) -> list[str]:
79 | """Return list of supported options like voice, audio output."""
80 | return [ATTR_VOICE, ATTR_AUDIO_OUTPUT]
81 |
82 | @property
83 | def default_options(self) -> dict[str, Any]:
84 | """Return a dict including default options."""
85 | return {
86 | ATTR_VOICE: DEFAULT_VOICE,
87 | ATTR_AUDIO_OUTPUT: DEFAULT_OUTPUT_CONTAINER,
88 | }
89 |
90 | @callback
91 | def async_get_supported_voices(self, language: str) -> list[Voice] | None:
92 | """Return a list of supported voices for a language."""
93 | if not (voices := TTS_VOICES.get(language)):
94 | return None
95 | return [Voice(voice_id, name) for name, voice_id in voices]
96 |
97 | async def async_get_tts_audio(
98 | self, message: str, language: str, options: dict[str, Any]
99 | ) -> TtsAudioType:
100 | """Get TTS audio from SaluteSpeech."""
101 | LOGGER.debug("Starting TTS synthesis for message: %s", message)
102 |
103 | output_container = options[ATTR_AUDIO_OUTPUT]
104 | container_audio_type = TTS_OUTPUT_CONTAINERS.get(
105 | output_container, TTS_OUTPUT_CONTAINERS[DEFAULT_OUTPUT_CONTAINER]
106 | )
107 | voice = options[ATTR_VOICE]
108 |
109 | root_certificates = self._config_entry.runtime_data[DATA_ROOT_CERTIFICATES]
110 | auth_helper = self._config_entry.runtime_data[DATA_AUTH_HELPER]
111 |
112 | ssl_cred = grpc.ssl_channel_credentials(root_certificates=root_certificates)
113 | token = await auth_helper.get_access_token()
114 | token_cred = grpc.access_token_call_credentials(token)
115 |
116 | async with aio.secure_channel(
117 | "smartspeech.sber.ru:443",
118 | grpc.composite_channel_credentials(ssl_cred, token_cred),
119 | ) as channel:
120 | stub = synthesis_pb2_grpc.SmartSpeechStub(channel)
121 |
122 | try:
123 | request = self._create_synthesis_request(
124 | message, container_audio_type, voice, language
125 | )
126 | audio = await self._fetch_audio_data(stub, request)
127 | if audio:
128 | LOGGER.debug("TTS synthesis completed successfully")
129 | return (output_container, audio)
130 | else:
131 | LOGGER.error("No audio data received from SaluteSpeech")
132 | return (None, None)
133 | except grpc.RpcError as err:
134 | LOGGER.error("Error occurred during SaluteSpeech TTS call: %s", err)
135 | return (None, None)
136 |
137 | def _create_synthesis_request(
138 | self,
139 | message: str,
140 | container_audio_type: synthesis_pb2.SynthesisRequest.AudioEncoding,
141 | voice: str,
142 | language: str = DEFAULT_LANG,
143 | ) -> synthesis_pb2.SynthesisRequest:
144 | """Create a TTS request."""
145 | synthesis_options = synthesis_pb2.SynthesisRequest()
146 |
147 | synthesis_options.text = message
148 | synthesis_options.audio_encoding = container_audio_type
149 | synthesis_options.voice = f"{voice}_24000"
150 | synthesis_options.language = language
151 |
152 | return synthesis_options
153 |
154 | async def _fetch_audio_data(
155 | self, stub, request: synthesis_pb2.SynthesisRequest
156 | ) -> bytes | None:
157 | """Fetch audio data from SaluteSpeech."""
158 | connection = stub.Synthesize(request, timeout=10)
159 |
160 | audio = io.BytesIO()
161 | async for response in connection:
162 | if response.data:
163 | audio.write(response.data)
164 | else:
165 | LOGGER.warning("Empty audio chunk received from SaluteSpeech")
166 |
167 | audio.seek(0)
168 | return audio.read()
169 |
--------------------------------------------------------------------------------
/custom_components/salutespeech/stt.py:
--------------------------------------------------------------------------------
1 | """Speech to text using SaluteSpeech."""
2 |
3 | # This Source Code Form is subject to the terms of the Mozilla Public
4 | # License, v. 2.0. If a copy of the MPL was not distributed with this
5 | # file, You can obtain one at https://mozilla.org/MPL/2.0/.
6 |
7 | from typing import AsyncGenerator, AsyncIterable, List
8 |
9 | import grpc
10 | from grpc import aio
11 | from homeassistant.components.stt import (
12 | AudioBitRates,
13 | AudioChannels,
14 | AudioCodecs,
15 | AudioFormats,
16 | AudioSampleRates,
17 | SpeechMetadata,
18 | SpeechResult,
19 | SpeechResultState,
20 | SpeechToTextEntity,
21 | )
22 | from homeassistant.config_entries import ConfigEntry
23 | from homeassistant.core import HomeAssistant
24 | from homeassistant.helpers import device_registry as dr
25 | from homeassistant.helpers.entity_platform import AddEntitiesCallback
26 |
27 | from .api.grpc import recognition_pb2, recognition_pb2_grpc
28 | from .const import (
29 | DATA_AUTH_HELPER,
30 | DATA_ROOT_CERTIFICATES,
31 | DOMAIN,
32 | EVENT_SALUTESPEECH_STT_EMOTIONS,
33 | LOGGER,
34 | SUPPORTED_LANGUAGES,
35 | )
36 |
37 |
38 | async def async_setup_entry(
39 | hass: HomeAssistant,
40 | config_entry: ConfigEntry,
41 | async_add_entities: AddEntitiesCallback,
42 | ) -> None:
43 | """Set up SaluteSpeech via config entry."""
44 | async_add_entities([SaluteSpeechSTTEntity(config_entry)])
45 |
46 |
47 | class SaluteSpeechSTTEntity(SpeechToTextEntity):
48 | """SaluteSpeech STT entity."""
49 |
50 | def __init__(self, config_entry: ConfigEntry) -> None:
51 | """Initialize the entity."""
52 | self._attr_unique_id = f"{config_entry.entry_id}"
53 | self._attr_name = config_entry.title
54 | self._attr_device_info = dr.DeviceInfo(
55 | identifiers={(DOMAIN, config_entry.entry_id)},
56 | manufacturer="Sber",
57 | model="SaluteSpeech",
58 | entry_type=dr.DeviceEntryType.SERVICE,
59 | )
60 | self._config_entry = config_entry
61 |
62 | @property
63 | def supported_languages(self) -> List[str]:
64 | """Return a list of supported languages."""
65 | return SUPPORTED_LANGUAGES
66 |
67 | @property
68 | def supported_formats(self) -> List[AudioFormats]:
69 | """Return a list of supported formats."""
70 | return [AudioFormats.WAV, AudioFormats.OGG]
71 |
72 | @property
73 | def supported_codecs(self) -> List[AudioCodecs]:
74 | """Return a list of supported codecs."""
75 | return [AudioCodecs.PCM, AudioCodecs.OPUS]
76 |
77 | @property
78 | def supported_bit_rates(self) -> List[AudioBitRates]:
79 | """Return a list of supported bitrates."""
80 | return [AudioBitRates.BITRATE_16]
81 |
82 | @property
83 | def supported_sample_rates(self) -> List[AudioSampleRates]:
84 | """Return a list of supported samplerates."""
85 | return [
86 | AudioSampleRates.SAMPLERATE_8000,
87 | AudioSampleRates.SAMPLERATE_16000,
88 | AudioSampleRates.SAMPLERATE_44100,
89 | AudioSampleRates.SAMPLERATE_48000,
90 | ]
91 |
92 | @property
93 | def supported_channels(self) -> List[AudioChannels]:
94 | """Return a list of supported channels."""
95 | return [AudioChannels.CHANNEL_MONO]
96 |
97 | async def async_process_audio_stream(
98 | self, metadata: SpeechMetadata, stream: AsyncIterable[bytes]
99 | ) -> SpeechResult:
100 | """Process an audio stream to STT service."""
101 |
102 | async def request_generator() -> (
103 | AsyncGenerator[recognition_pb2.RecognitionRequest, None]
104 | ):
105 | LOGGER.debug("Sending the message with recognition params...")
106 | recognition_options = self._get_recognition_options(metadata)
107 | yield recognition_pb2.RecognitionRequest(options=recognition_options)
108 |
109 | async for audio_bytes in stream:
110 | yield recognition_pb2.RecognitionRequest(audio_chunk=audio_bytes)
111 |
112 | async def recognize_stream(
113 | stub: recognition_pb2_grpc.SmartSpeechStub,
114 | ) -> List[str]:
115 | """Recognize speech from the audio stream."""
116 | connection = stub.Recognize(request_generator(), metadata=())
117 |
118 | text = None
119 | async for response in connection:
120 | if response.HasField("backend_info"):
121 | backend_info = response.backend_info
122 | LOGGER.debug("Backend info: %s", backend_info)
123 |
124 | if response.HasField("transcription"):
125 | transcription = response.transcription
126 |
127 | if not transcription.eou:
128 | continue
129 |
130 | text = " ".join([hyp.normalized_text for hyp in transcription.results])
131 |
132 | if transcription.HasField("emotions_result"):
133 | self.hass.bus.async_fire(
134 | EVENT_SALUTESPEECH_STT_EMOTIONS,
135 | {
136 | "positive": transcription.emotions_result.positive,
137 | "neutral": transcription.emotions_result.neutral,
138 | "negative": transcription.emotions_result.negative,
139 | },
140 | )
141 |
142 | return text
143 |
144 | root_certificates = self._config_entry.runtime_data[DATA_ROOT_CERTIFICATES]
145 | auth_helper = self._config_entry.runtime_data[DATA_AUTH_HELPER]
146 |
147 | ssl_cred = grpc.ssl_channel_credentials(root_certificates=root_certificates)
148 | token = await auth_helper.get_access_token()
149 | token_cred = grpc.access_token_call_credentials(token)
150 | async with aio.secure_channel(
151 | "smartspeech.sber.ru:443",
152 | grpc.composite_channel_credentials(ssl_cred, token_cred),
153 | ) as channel:
154 | stub = recognition_pb2_grpc.SmartSpeechStub(channel)
155 | try:
156 | transcription = await recognize_stream(stub)
157 | if not transcription:
158 | return SpeechResult(None, SpeechResultState.ERROR)
159 | return SpeechResult(transcription, SpeechResultState.SUCCESS)
160 | except grpc.RpcError as err:
161 | LOGGER.error("Error occurred during speech recognition: %s", err)
162 | return SpeechResult(None, SpeechResultState.ERROR)
163 |
164 | def _get_recognition_options(
165 | self, metadata: SpeechMetadata
166 | ) -> recognition_pb2.RecognitionOptions:
167 | """Get recognition options based on metadata."""
168 | options = recognition_pb2.RecognitionOptions()
169 | options.audio_encoding = recognition_pb2.RecognitionOptions.PCM_S16LE
170 | options.sample_rate = metadata.sample_rate
171 | options.channels_count = 1
172 | # options.language = metadata.language
173 | return options
174 |
--------------------------------------------------------------------------------
/custom_components/salutespeech/api/grpc/recognition_pb2.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # Generated by the protocol buffer compiler. DO NOT EDIT!
3 | # source: custom_components/salutespeech/api/grpc/recognition.proto
4 | """Generated protocol buffer code."""
5 | from google.protobuf.internal import enum_type_wrapper
6 | from google.protobuf import descriptor as _descriptor
7 | from google.protobuf import descriptor_pool as _descriptor_pool
8 | from google.protobuf import message as _message
9 | from google.protobuf import reflection as _reflection
10 | from google.protobuf import symbol_database as _symbol_database
11 | # @@protoc_insertion_point(imports)
12 |
13 | _sym_db = _symbol_database.Default()
14 |
15 |
16 | from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2
17 |
18 |
19 | DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n9custom_components/salutespeech/api/grpc/recognition.proto\x12\x1asmartspeech.recognition.v2\x1a\x1egoogle/protobuf/duration.proto\"y\n\x12RecognitionRequest\x12\x41\n\x07options\x18\x01 \x01(\x0b\x32..smartspeech.recognition.v2.RecognitionOptionsH\x00\x12\x15\n\x0b\x61udio_chunk\x18\x02 \x01(\x0cH\x00\x42\t\n\x07request\"\x9a\x02\n\x13RecognitionResponse\x12\x42\n\rtranscription\x18\x01 \x01(\x0b\x32).smartspeech.recognition.v2.TranscriptionH\x00\x12?\n\x0c\x62\x61\x63kend_info\x18\x02 \x01(\x0b\x32\'.smartspeech.recognition.v2.BackendInfoH\x00\x12<\n\x07insight\x18\x03 \x01(\x0b\x32).smartspeech.recognition.v2.InsightResultH\x00\x12\x34\n\x03vad\x18\x04 \x01(\x0b\x32%.smartspeech.recognition.v2.VADResultH\x00\x42\n\n\x08response\"\x91\x03\n\rTranscription\x12\x0f\n\x07\x63hannel\x18\x01 \x01(\x05\x12\x37\n\x07results\x18\x02 \x03(\x0b\x32&.smartspeech.recognition.v2.Hypothesis\x12\x0b\n\x03\x65ou\x18\x03 \x01(\x08\x12\x39\n\neou_reason\x18\x04 \x01(\x0e\x32%.smartspeech.recognition.v2.EouReason\x12\x38\n\x15processed_audio_start\x18\x05 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x36\n\x13processed_audio_end\x18\x06 \x01(\x0b\x32\x19.google.protobuf.Duration\x12=\n\x0f\x65motions_result\x18\x07 \x01(\x0b\x32$.smartspeech.recognition.v2.Emotions\x12=\n\x0cspeaker_info\x18\x08 \x01(\x0b\x32\'.smartspeech.recognition.v2.SpeakerInfo\"\'\n\rInsightResult\x12\x16\n\x0einsight_result\x18\n \x01(\t\"\x92\x01\n\tVADResult\x12\x0f\n\x07\x63hannel\x18\x01 \x01(\x05\x12\x37\n\x14processed_audio_time\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12;\n\x18utterance_detection_time\x18\x03 \x01(\x0b\x32\x19.google.protobuf.Duration\"\x1e\n\x0cOptionalBool\x12\x0e\n\x06\x65nable\x18\x01 \x01(\x08\"\x96\x08\n\x12RecognitionOptions\x12T\n\x0e\x61udio_encoding\x18\x01 \x01(\x0e\x32<.smartspeech.recognition.v2.RecognitionOptions.AudioEncoding\x12\x13\n\x0bsample_rate\x18\x02 \x01(\x05\x12\x16\n\x0e\x63hannels_count\x18\x03 \x01(\x05\x12\x10\n\x08language\x18\x04 \x01(\t\x12\r\n\x05model\x18\x05 \x01(\t\x12H\n\x16\x65nable_multi_utterance\x18\x06 \x01(\x0b\x32(.smartspeech.recognition.v2.OptionalBool\x12H\n\x16\x65nable_partial_results\x18\x07 \x01(\x0b\x32(.smartspeech.recognition.v2.OptionalBool\x12\x18\n\x10hypotheses_count\x18\x08 \x01(\x05\x12\x34\n\x11no_speech_timeout\x18\t \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x35\n\x12max_speech_timeout\x18\n \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x30\n\x05hints\x18\x0b \x01(\x0b\x32!.smartspeech.recognition.v2.Hints\x12X\n\x1aspeaker_separation_options\x18\x0c \x01(\x0b\x32\x34.smartspeech.recognition.v2.SpeakerSeparationOptions\x12O\n\x15normalization_options\x18\r \x01(\x0b\x32\x30.smartspeech.recognition.v2.NormalizationOptions\x12\x16\n\x0einsight_models\x18\x0e \x03(\t\x12<\n\nenable_vad\x18\x0f \x01(\x0b\x32(.smartspeech.recognition.v2.OptionalBool\x12H\n\x16\x63ustom_ws_flow_control\x18\x10 \x01(\x0b\x32(.smartspeech.recognition.v2.OptionalBool\x12H\n\x16\x65nable_long_utterances\x18\x11 \x01(\x0b\x32(.smartspeech.recognition.v2.OptionalBool\"z\n\rAudioEncoding\x12\x1e\n\x1a\x41UDIO_ENCODING_UNSPECIFIED\x10\x00\x12\r\n\tPCM_S16LE\x10\x01\x12\x08\n\x04OPUS\x10\x02\x12\x07\n\x03MP3\x10\x03\x12\x08\n\x04\x46LAC\x10\x04\x12\x08\n\x04\x41LAW\x10\x05\x12\t\n\x05MULAW\x10\x06\x12\x08\n\x04G729\x10\x07\"\x93\x03\n\x14NormalizationOptions\x12\x38\n\x06\x65nable\x18\x01 \x01(\x0b\x32(.smartspeech.recognition.v2.OptionalBool\x12\x42\n\x10profanity_filter\x18\x02 \x01(\x0b\x32(.smartspeech.recognition.v2.OptionalBool\x12=\n\x0bpunctuation\x18\x03 \x01(\x0b\x32(.smartspeech.recognition.v2.OptionalBool\x12@\n\x0e\x63\x61pitalization\x18\x04 \x01(\x0b\x32(.smartspeech.recognition.v2.OptionalBool\x12:\n\x08question\x18\x05 \x01(\x0b\x32(.smartspeech.recognition.v2.OptionalBool\x12@\n\x0e\x66orce_cyrillic\x18\x06 \x01(\x0b\x32(.smartspeech.recognition.v2.OptionalBool\"^\n\x05Hints\x12\r\n\x05words\x18\x01 \x03(\t\x12\x16\n\x0e\x65nable_letters\x18\x02 \x01(\x08\x12.\n\x0b\x65ou_timeout\x18\x03 \x01(\x0b\x32\x19.google.protobuf.Duration\"[\n\x18SpeakerSeparationOptions\x12\x0e\n\x06\x65nable\x18\x01 \x01(\x08\x12 \n\x18\x65nable_only_main_speaker\x18\x02 \x01(\x08\x12\r\n\x05\x63ount\x18\x03 \x01(\x05\"\xc5\x02\n\nHypothesis\x12\x0c\n\x04text\x18\x01 \x01(\t\x12\x17\n\x0fnormalized_text\x18\x02 \x01(\t\x12(\n\x05start\x18\x03 \x01(\x0b\x32\x19.google.protobuf.Duration\x12&\n\x03\x65nd\x18\x04 \x01(\x0b\x32\x19.google.protobuf.Duration\x12M\n\x0fword_alignments\x18\x05 \x03(\x0b\x32\x34.smartspeech.recognition.v2.Hypothesis.WordAlignment\x1ao\n\rWordAlignment\x12\x0c\n\x04word\x18\x01 \x01(\t\x12(\n\x05start\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12&\n\x03\x65nd\x18\x03 \x01(\x0b\x32\x19.google.protobuf.Duration\"\xb5\x01\n\x08\x45motions\x12\x10\n\x08positive\x18\x01 \x01(\x02\x12\x0f\n\x07neutral\x18\x02 \x01(\x02\x12\x10\n\x08negative\x18\x03 \x01(\x02\x12\x12\n\npositive_a\x18\x04 \x01(\x02\x12\x11\n\tneutral_a\x18\x05 \x01(\x02\x12\x12\n\nnegative_a\x18\x06 \x01(\x02\x12\x12\n\npositive_t\x18\x07 \x01(\x02\x12\x11\n\tneutral_t\x18\x08 \x01(\x02\x12\x12\n\nnegative_t\x18\t \x01(\x02\"P\n\x0b\x42\x61\x63kendInfo\x12\x12\n\nmodel_name\x18\x01 \x01(\t\x12\x15\n\rmodel_version\x18\x02 \x01(\t\x12\x16\n\x0eserver_version\x18\x03 \x01(\t\"B\n\x0bSpeakerInfo\x12\x12\n\nspeaker_id\x18\x01 \x01(\x05\x12\x1f\n\x17main_speaker_confidence\x18\x02 \x01(\x02*X\n\tEouReason\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0b\n\x07ORGANIC\x10\x01\x12\x15\n\x11NO_SPEECH_TIMEOUT\x10\x02\x12\x16\n\x12MAX_SPEECH_TIMEOUT\x10\x03\x32\x7f\n\x0bSmartSpeech\x12p\n\tRecognize\x12..smartspeech.recognition.v2.RecognitionRequest\x1a/.smartspeech.recognition.v2.RecognitionResponse(\x01\x30\x01\x42\x13\n\x04TODOZ\x0b./;protocolb\x06proto3')
20 |
21 | _EOUREASON = DESCRIPTOR.enum_types_by_name['EouReason']
22 | EouReason = enum_type_wrapper.EnumTypeWrapper(_EOUREASON)
23 | UNSPECIFIED = 0
24 | ORGANIC = 1
25 | NO_SPEECH_TIMEOUT = 2
26 | MAX_SPEECH_TIMEOUT = 3
27 |
28 |
29 | _RECOGNITIONREQUEST = DESCRIPTOR.message_types_by_name['RecognitionRequest']
30 | _RECOGNITIONRESPONSE = DESCRIPTOR.message_types_by_name['RecognitionResponse']
31 | _TRANSCRIPTION = DESCRIPTOR.message_types_by_name['Transcription']
32 | _INSIGHTRESULT = DESCRIPTOR.message_types_by_name['InsightResult']
33 | _VADRESULT = DESCRIPTOR.message_types_by_name['VADResult']
34 | _OPTIONALBOOL = DESCRIPTOR.message_types_by_name['OptionalBool']
35 | _RECOGNITIONOPTIONS = DESCRIPTOR.message_types_by_name['RecognitionOptions']
36 | _NORMALIZATIONOPTIONS = DESCRIPTOR.message_types_by_name['NormalizationOptions']
37 | _HINTS = DESCRIPTOR.message_types_by_name['Hints']
38 | _SPEAKERSEPARATIONOPTIONS = DESCRIPTOR.message_types_by_name['SpeakerSeparationOptions']
39 | _HYPOTHESIS = DESCRIPTOR.message_types_by_name['Hypothesis']
40 | _HYPOTHESIS_WORDALIGNMENT = _HYPOTHESIS.nested_types_by_name['WordAlignment']
41 | _EMOTIONS = DESCRIPTOR.message_types_by_name['Emotions']
42 | _BACKENDINFO = DESCRIPTOR.message_types_by_name['BackendInfo']
43 | _SPEAKERINFO = DESCRIPTOR.message_types_by_name['SpeakerInfo']
44 | _RECOGNITIONOPTIONS_AUDIOENCODING = _RECOGNITIONOPTIONS.enum_types_by_name['AudioEncoding']
45 | RecognitionRequest = _reflection.GeneratedProtocolMessageType('RecognitionRequest', (_message.Message,), {
46 | 'DESCRIPTOR' : _RECOGNITIONREQUEST,
47 | '__module__' : 'custom_components.salutespeech.api.grpc.recognition_pb2'
48 | # @@protoc_insertion_point(class_scope:smartspeech.recognition.v2.RecognitionRequest)
49 | })
50 | _sym_db.RegisterMessage(RecognitionRequest)
51 |
52 | RecognitionResponse = _reflection.GeneratedProtocolMessageType('RecognitionResponse', (_message.Message,), {
53 | 'DESCRIPTOR' : _RECOGNITIONRESPONSE,
54 | '__module__' : 'custom_components.salutespeech.api.grpc.recognition_pb2'
55 | # @@protoc_insertion_point(class_scope:smartspeech.recognition.v2.RecognitionResponse)
56 | })
57 | _sym_db.RegisterMessage(RecognitionResponse)
58 |
59 | Transcription = _reflection.GeneratedProtocolMessageType('Transcription', (_message.Message,), {
60 | 'DESCRIPTOR' : _TRANSCRIPTION,
61 | '__module__' : 'custom_components.salutespeech.api.grpc.recognition_pb2'
62 | # @@protoc_insertion_point(class_scope:smartspeech.recognition.v2.Transcription)
63 | })
64 | _sym_db.RegisterMessage(Transcription)
65 |
66 | InsightResult = _reflection.GeneratedProtocolMessageType('InsightResult', (_message.Message,), {
67 | 'DESCRIPTOR' : _INSIGHTRESULT,
68 | '__module__' : 'custom_components.salutespeech.api.grpc.recognition_pb2'
69 | # @@protoc_insertion_point(class_scope:smartspeech.recognition.v2.InsightResult)
70 | })
71 | _sym_db.RegisterMessage(InsightResult)
72 |
73 | VADResult = _reflection.GeneratedProtocolMessageType('VADResult', (_message.Message,), {
74 | 'DESCRIPTOR' : _VADRESULT,
75 | '__module__' : 'custom_components.salutespeech.api.grpc.recognition_pb2'
76 | # @@protoc_insertion_point(class_scope:smartspeech.recognition.v2.VADResult)
77 | })
78 | _sym_db.RegisterMessage(VADResult)
79 |
80 | OptionalBool = _reflection.GeneratedProtocolMessageType('OptionalBool', (_message.Message,), {
81 | 'DESCRIPTOR' : _OPTIONALBOOL,
82 | '__module__' : 'custom_components.salutespeech.api.grpc.recognition_pb2'
83 | # @@protoc_insertion_point(class_scope:smartspeech.recognition.v2.OptionalBool)
84 | })
85 | _sym_db.RegisterMessage(OptionalBool)
86 |
87 | RecognitionOptions = _reflection.GeneratedProtocolMessageType('RecognitionOptions', (_message.Message,), {
88 | 'DESCRIPTOR' : _RECOGNITIONOPTIONS,
89 | '__module__' : 'custom_components.salutespeech.api.grpc.recognition_pb2'
90 | # @@protoc_insertion_point(class_scope:smartspeech.recognition.v2.RecognitionOptions)
91 | })
92 | _sym_db.RegisterMessage(RecognitionOptions)
93 |
94 | NormalizationOptions = _reflection.GeneratedProtocolMessageType('NormalizationOptions', (_message.Message,), {
95 | 'DESCRIPTOR' : _NORMALIZATIONOPTIONS,
96 | '__module__' : 'custom_components.salutespeech.api.grpc.recognition_pb2'
97 | # @@protoc_insertion_point(class_scope:smartspeech.recognition.v2.NormalizationOptions)
98 | })
99 | _sym_db.RegisterMessage(NormalizationOptions)
100 |
101 | Hints = _reflection.GeneratedProtocolMessageType('Hints', (_message.Message,), {
102 | 'DESCRIPTOR' : _HINTS,
103 | '__module__' : 'custom_components.salutespeech.api.grpc.recognition_pb2'
104 | # @@protoc_insertion_point(class_scope:smartspeech.recognition.v2.Hints)
105 | })
106 | _sym_db.RegisterMessage(Hints)
107 |
108 | SpeakerSeparationOptions = _reflection.GeneratedProtocolMessageType('SpeakerSeparationOptions', (_message.Message,), {
109 | 'DESCRIPTOR' : _SPEAKERSEPARATIONOPTIONS,
110 | '__module__' : 'custom_components.salutespeech.api.grpc.recognition_pb2'
111 | # @@protoc_insertion_point(class_scope:smartspeech.recognition.v2.SpeakerSeparationOptions)
112 | })
113 | _sym_db.RegisterMessage(SpeakerSeparationOptions)
114 |
115 | Hypothesis = _reflection.GeneratedProtocolMessageType('Hypothesis', (_message.Message,), {
116 |
117 | 'WordAlignment' : _reflection.GeneratedProtocolMessageType('WordAlignment', (_message.Message,), {
118 | 'DESCRIPTOR' : _HYPOTHESIS_WORDALIGNMENT,
119 | '__module__' : 'custom_components.salutespeech.api.grpc.recognition_pb2'
120 | # @@protoc_insertion_point(class_scope:smartspeech.recognition.v2.Hypothesis.WordAlignment)
121 | })
122 | ,
123 | 'DESCRIPTOR' : _HYPOTHESIS,
124 | '__module__' : 'custom_components.salutespeech.api.grpc.recognition_pb2'
125 | # @@protoc_insertion_point(class_scope:smartspeech.recognition.v2.Hypothesis)
126 | })
127 | _sym_db.RegisterMessage(Hypothesis)
128 | _sym_db.RegisterMessage(Hypothesis.WordAlignment)
129 |
130 | Emotions = _reflection.GeneratedProtocolMessageType('Emotions', (_message.Message,), {
131 | 'DESCRIPTOR' : _EMOTIONS,
132 | '__module__' : 'custom_components.salutespeech.api.grpc.recognition_pb2'
133 | # @@protoc_insertion_point(class_scope:smartspeech.recognition.v2.Emotions)
134 | })
135 | _sym_db.RegisterMessage(Emotions)
136 |
137 | BackendInfo = _reflection.GeneratedProtocolMessageType('BackendInfo', (_message.Message,), {
138 | 'DESCRIPTOR' : _BACKENDINFO,
139 | '__module__' : 'custom_components.salutespeech.api.grpc.recognition_pb2'
140 | # @@protoc_insertion_point(class_scope:smartspeech.recognition.v2.BackendInfo)
141 | })
142 | _sym_db.RegisterMessage(BackendInfo)
143 |
144 | SpeakerInfo = _reflection.GeneratedProtocolMessageType('SpeakerInfo', (_message.Message,), {
145 | 'DESCRIPTOR' : _SPEAKERINFO,
146 | '__module__' : 'custom_components.salutespeech.api.grpc.recognition_pb2'
147 | # @@protoc_insertion_point(class_scope:smartspeech.recognition.v2.SpeakerInfo)
148 | })
149 | _sym_db.RegisterMessage(SpeakerInfo)
150 |
151 | _SMARTSPEECH = DESCRIPTOR.services_by_name['SmartSpeech']
152 | if _descriptor._USE_C_DESCRIPTORS == False:
153 |
154 | DESCRIPTOR._options = None
155 | DESCRIPTOR._serialized_options = b'\n\004TODOZ\013./;protocol'
156 | _EOUREASON._serialized_start=3461
157 | _EOUREASON._serialized_end=3549
158 | _RECOGNITIONREQUEST._serialized_start=121
159 | _RECOGNITIONREQUEST._serialized_end=242
160 | _RECOGNITIONRESPONSE._serialized_start=245
161 | _RECOGNITIONRESPONSE._serialized_end=527
162 | _TRANSCRIPTION._serialized_start=530
163 | _TRANSCRIPTION._serialized_end=931
164 | _INSIGHTRESULT._serialized_start=933
165 | _INSIGHTRESULT._serialized_end=972
166 | _VADRESULT._serialized_start=975
167 | _VADRESULT._serialized_end=1121
168 | _OPTIONALBOOL._serialized_start=1123
169 | _OPTIONALBOOL._serialized_end=1153
170 | _RECOGNITIONOPTIONS._serialized_start=1156
171 | _RECOGNITIONOPTIONS._serialized_end=2202
172 | _RECOGNITIONOPTIONS_AUDIOENCODING._serialized_start=2080
173 | _RECOGNITIONOPTIONS_AUDIOENCODING._serialized_end=2202
174 | _NORMALIZATIONOPTIONS._serialized_start=2205
175 | _NORMALIZATIONOPTIONS._serialized_end=2608
176 | _HINTS._serialized_start=2610
177 | _HINTS._serialized_end=2704
178 | _SPEAKERSEPARATIONOPTIONS._serialized_start=2706
179 | _SPEAKERSEPARATIONOPTIONS._serialized_end=2797
180 | _HYPOTHESIS._serialized_start=2800
181 | _HYPOTHESIS._serialized_end=3125
182 | _HYPOTHESIS_WORDALIGNMENT._serialized_start=3014
183 | _HYPOTHESIS_WORDALIGNMENT._serialized_end=3125
184 | _EMOTIONS._serialized_start=3128
185 | _EMOTIONS._serialized_end=3309
186 | _BACKENDINFO._serialized_start=3311
187 | _BACKENDINFO._serialized_end=3391
188 | _SPEAKERINFO._serialized_start=3393
189 | _SPEAKERINFO._serialized_end=3459
190 | _SMARTSPEECH._serialized_start=3551
191 | _SMARTSPEECH._serialized_end=3678
192 | # @@protoc_insertion_point(module_scope)
193 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Mozilla Public License Version 2.0
2 | ==================================
3 |
4 | 1. Definitions
5 | --------------
6 |
7 | 1.1. "Contributor"
8 | means each individual or legal entity that creates, contributes to
9 | the creation of, or owns Covered Software.
10 |
11 | 1.2. "Contributor Version"
12 | means the combination of the Contributions of others (if any) used
13 | by a Contributor and that particular Contributor's Contribution.
14 |
15 | 1.3. "Contribution"
16 | means Covered Software of a particular Contributor.
17 |
18 | 1.4. "Covered Software"
19 | means Source Code Form to which the initial Contributor has attached
20 | the notice in Exhibit A, the Executable Form of such Source Code
21 | Form, and Modifications of such Source Code Form, in each case
22 | including portions thereof.
23 |
24 | 1.5. "Incompatible With Secondary Licenses"
25 | means
26 |
27 | (a) that the initial Contributor has attached the notice described
28 | in Exhibit B to the Covered Software; or
29 |
30 | (b) that the Covered Software was made available under the terms of
31 | version 1.1 or earlier of the License, but not also under the
32 | terms of a Secondary License.
33 |
34 | 1.6. "Executable Form"
35 | means any form of the work other than Source Code Form.
36 |
37 | 1.7. "Larger Work"
38 | means a work that combines Covered Software with other material, in
39 | a separate file or files, that is not Covered Software.
40 |
41 | 1.8. "License"
42 | means this document.
43 |
44 | 1.9. "Licensable"
45 | means having the right to grant, to the maximum extent possible,
46 | whether at the time of the initial grant or subsequently, any and
47 | all of the rights conveyed by this License.
48 |
49 | 1.10. "Modifications"
50 | means any of the following:
51 |
52 | (a) any file in Source Code Form that results from an addition to,
53 | deletion from, or modification of the contents of Covered
54 | Software; or
55 |
56 | (b) any new file in Source Code Form that contains any Covered
57 | Software.
58 |
59 | 1.11. "Patent Claims" of a Contributor
60 | means any patent claim(s), including without limitation, method,
61 | process, and apparatus claims, in any patent Licensable by such
62 | Contributor that would be infringed, but for the grant of the
63 | License, by the making, using, selling, offering for sale, having
64 | made, import, or transfer of either its Contributions or its
65 | Contributor Version.
66 |
67 | 1.12. "Secondary License"
68 | means either the GNU General Public License, Version 2.0, the GNU
69 | Lesser General Public License, Version 2.1, the GNU Affero General
70 | Public License, Version 3.0, or any later versions of those
71 | licenses.
72 |
73 | 1.13. "Source Code Form"
74 | means the form of the work preferred for making modifications.
75 |
76 | 1.14. "You" (or "Your")
77 | means an individual or a legal entity exercising rights under this
78 | License. For legal entities, "You" includes any entity that
79 | controls, is controlled by, or is under common control with You. For
80 | purposes of this definition, "control" means (a) the power, direct
81 | or indirect, to cause the direction or management of such entity,
82 | whether by contract or otherwise, or (b) ownership of more than
83 | fifty percent (50%) of the outstanding shares or beneficial
84 | ownership of such entity.
85 |
86 | 2. License Grants and Conditions
87 | --------------------------------
88 |
89 | 2.1. Grants
90 |
91 | Each Contributor hereby grants You a world-wide, royalty-free,
92 | non-exclusive license:
93 |
94 | (a) under intellectual property rights (other than patent or trademark)
95 | Licensable by such Contributor to use, reproduce, make available,
96 | modify, display, perform, distribute, and otherwise exploit its
97 | Contributions, either on an unmodified basis, with Modifications, or
98 | as part of a Larger Work; and
99 |
100 | (b) under Patent Claims of such Contributor to make, use, sell, offer
101 | for sale, have made, import, and otherwise transfer either its
102 | Contributions or its Contributor Version.
103 |
104 | 2.2. Effective Date
105 |
106 | The licenses granted in Section 2.1 with respect to any Contribution
107 | become effective for each Contribution on the date the Contributor first
108 | distributes such Contribution.
109 |
110 | 2.3. Limitations on Grant Scope
111 |
112 | The licenses granted in this Section 2 are the only rights granted under
113 | this License. No additional rights or licenses will be implied from the
114 | distribution or licensing of Covered Software under this License.
115 | Notwithstanding Section 2.1(b) above, no patent license is granted by a
116 | Contributor:
117 |
118 | (a) for any code that a Contributor has removed from Covered Software;
119 | or
120 |
121 | (b) for infringements caused by: (i) Your and any other third party's
122 | modifications of Covered Software, or (ii) the combination of its
123 | Contributions with other software (except as part of its Contributor
124 | Version); or
125 |
126 | (c) under Patent Claims infringed by Covered Software in the absence of
127 | its Contributions.
128 |
129 | This License does not grant any rights in the trademarks, service marks,
130 | or logos of any Contributor (except as may be necessary to comply with
131 | the notice requirements in Section 3.4).
132 |
133 | 2.4. Subsequent Licenses
134 |
135 | No Contributor makes additional grants as a result of Your choice to
136 | distribute the Covered Software under a subsequent version of this
137 | License (see Section 10.2) or under the terms of a Secondary License (if
138 | permitted under the terms of Section 3.3).
139 |
140 | 2.5. Representation
141 |
142 | Each Contributor represents that the Contributor believes its
143 | Contributions are its original creation(s) or it has sufficient rights
144 | to grant the rights to its Contributions conveyed by this License.
145 |
146 | 2.6. Fair Use
147 |
148 | This License is not intended to limit any rights You have under
149 | applicable copyright doctrines of fair use, fair dealing, or other
150 | equivalents.
151 |
152 | 2.7. Conditions
153 |
154 | Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
155 | in Section 2.1.
156 |
157 | 3. Responsibilities
158 | -------------------
159 |
160 | 3.1. Distribution of Source Form
161 |
162 | All distribution of Covered Software in Source Code Form, including any
163 | Modifications that You create or to which You contribute, must be under
164 | the terms of this License. You must inform recipients that the Source
165 | Code Form of the Covered Software is governed by the terms of this
166 | License, and how they can obtain a copy of this License. You may not
167 | attempt to alter or restrict the recipients' rights in the Source Code
168 | Form.
169 |
170 | 3.2. Distribution of Executable Form
171 |
172 | If You distribute Covered Software in Executable Form then:
173 |
174 | (a) such Covered Software must also be made available in Source Code
175 | Form, as described in Section 3.1, and You must inform recipients of
176 | the Executable Form how they can obtain a copy of such Source Code
177 | Form by reasonable means in a timely manner, at a charge no more
178 | than the cost of distribution to the recipient; and
179 |
180 | (b) You may distribute such Executable Form under the terms of this
181 | License, or sublicense it under different terms, provided that the
182 | license for the Executable Form does not attempt to limit or alter
183 | the recipients' rights in the Source Code Form under this License.
184 |
185 | 3.3. Distribution of a Larger Work
186 |
187 | You may create and distribute a Larger Work under terms of Your choice,
188 | provided that You also comply with the requirements of this License for
189 | the Covered Software. If the Larger Work is a combination of Covered
190 | Software with a work governed by one or more Secondary Licenses, and the
191 | Covered Software is not Incompatible With Secondary Licenses, this
192 | License permits You to additionally distribute such Covered Software
193 | under the terms of such Secondary License(s), so that the recipient of
194 | the Larger Work may, at their option, further distribute the Covered
195 | Software under the terms of either this License or such Secondary
196 | License(s).
197 |
198 | 3.4. Notices
199 |
200 | You may not remove or alter the substance of any license notices
201 | (including copyright notices, patent notices, disclaimers of warranty,
202 | or limitations of liability) contained within the Source Code Form of
203 | the Covered Software, except that You may alter any license notices to
204 | the extent required to remedy known factual inaccuracies.
205 |
206 | 3.5. Application of Additional Terms
207 |
208 | You may choose to offer, and to charge a fee for, warranty, support,
209 | indemnity or liability obligations to one or more recipients of Covered
210 | Software. However, You may do so only on Your own behalf, and not on
211 | behalf of any Contributor. You must make it absolutely clear that any
212 | such warranty, support, indemnity, or liability obligation is offered by
213 | You alone, and You hereby agree to indemnify every Contributor for any
214 | liability incurred by such Contributor as a result of warranty, support,
215 | indemnity or liability terms You offer. You may include additional
216 | disclaimers of warranty and limitations of liability specific to any
217 | jurisdiction.
218 |
219 | 4. Inability to Comply Due to Statute or Regulation
220 | ---------------------------------------------------
221 |
222 | If it is impossible for You to comply with any of the terms of this
223 | License with respect to some or all of the Covered Software due to
224 | statute, judicial order, or regulation then You must: (a) comply with
225 | the terms of this License to the maximum extent possible; and (b)
226 | describe the limitations and the code they affect. Such description must
227 | be placed in a text file included with all distributions of the Covered
228 | Software under this License. Except to the extent prohibited by statute
229 | or regulation, such description must be sufficiently detailed for a
230 | recipient of ordinary skill to be able to understand it.
231 |
232 | 5. Termination
233 | --------------
234 |
235 | 5.1. The rights granted under this License will terminate automatically
236 | if You fail to comply with any of its terms. However, if You become
237 | compliant, then the rights granted under this License from a particular
238 | Contributor are reinstated (a) provisionally, unless and until such
239 | Contributor explicitly and finally terminates Your grants, and (b) on an
240 | ongoing basis, if such Contributor fails to notify You of the
241 | non-compliance by some reasonable means prior to 60 days after You have
242 | come back into compliance. Moreover, Your grants from a particular
243 | Contributor are reinstated on an ongoing basis if such Contributor
244 | notifies You of the non-compliance by some reasonable means, this is the
245 | first time You have received notice of non-compliance with this License
246 | from such Contributor, and You become compliant prior to 30 days after
247 | Your receipt of the notice.
248 |
249 | 5.2. If You initiate litigation against any entity by asserting a patent
250 | infringement claim (excluding declaratory judgment actions,
251 | counter-claims, and cross-claims) alleging that a Contributor Version
252 | directly or indirectly infringes any patent, then the rights granted to
253 | You by any and all Contributors for the Covered Software under Section
254 | 2.1 of this License shall terminate.
255 |
256 | 5.3. In the event of termination under Sections 5.1 or 5.2 above, all
257 | end user license agreements (excluding distributors and resellers) which
258 | have been validly granted by You or Your distributors under this License
259 | prior to termination shall survive termination.
260 |
261 | ************************************************************************
262 | * *
263 | * 6. Disclaimer of Warranty *
264 | * ------------------------- *
265 | * *
266 | * Covered Software is provided under this License on an "as is" *
267 | * basis, without warranty of any kind, either expressed, implied, or *
268 | * statutory, including, without limitation, warranties that the *
269 | * Covered Software is free of defects, merchantable, fit for a *
270 | * particular purpose or non-infringing. The entire risk as to the *
271 | * quality and performance of the Covered Software is with You. *
272 | * Should any Covered Software prove defective in any respect, You *
273 | * (not any Contributor) assume the cost of any necessary servicing, *
274 | * repair, or correction. This disclaimer of warranty constitutes an *
275 | * essential part of this License. No use of any Covered Software is *
276 | * authorized under this License except under this disclaimer. *
277 | * *
278 | ************************************************************************
279 |
280 | ************************************************************************
281 | * *
282 | * 7. Limitation of Liability *
283 | * -------------------------- *
284 | * *
285 | * Under no circumstances and under no legal theory, whether tort *
286 | * (including negligence), contract, or otherwise, shall any *
287 | * Contributor, or anyone who distributes Covered Software as *
288 | * permitted above, be liable to You for any direct, indirect, *
289 | * special, incidental, or consequential damages of any character *
290 | * including, without limitation, damages for lost profits, loss of *
291 | * goodwill, work stoppage, computer failure or malfunction, or any *
292 | * and all other commercial damages or losses, even if such party *
293 | * shall have been informed of the possibility of such damages. This *
294 | * limitation of liability shall not apply to liability for death or *
295 | * personal injury resulting from such party's negligence to the *
296 | * extent applicable law prohibits such limitation. Some *
297 | * jurisdictions do not allow the exclusion or limitation of *
298 | * incidental or consequential damages, so this exclusion and *
299 | * limitation may not apply to You. *
300 | * *
301 | ************************************************************************
302 |
303 | 8. Litigation
304 | -------------
305 |
306 | Any litigation relating to this License may be brought only in the
307 | courts of a jurisdiction where the defendant maintains its principal
308 | place of business and such litigation shall be governed by laws of that
309 | jurisdiction, without reference to its conflict-of-law provisions.
310 | Nothing in this Section shall prevent a party's ability to bring
311 | cross-claims or counter-claims.
312 |
313 | 9. Miscellaneous
314 | ----------------
315 |
316 | This License represents the complete agreement concerning the subject
317 | matter hereof. If any provision of this License is held to be
318 | unenforceable, such provision shall be reformed only to the extent
319 | necessary to make it enforceable. Any law or regulation which provides
320 | that the language of a contract shall be construed against the drafter
321 | shall not be used to construe this License against a Contributor.
322 |
323 | 10. Versions of the License
324 | ---------------------------
325 |
326 | 10.1. New Versions
327 |
328 | Mozilla Foundation is the license steward. Except as provided in Section
329 | 10.3, no one other than the license steward has the right to modify or
330 | publish new versions of this License. Each version will be given a
331 | distinguishing version number.
332 |
333 | 10.2. Effect of New Versions
334 |
335 | You may distribute the Covered Software under the terms of the version
336 | of the License under which You originally received the Covered Software,
337 | or under the terms of any subsequent version published by the license
338 | steward.
339 |
340 | 10.3. Modified Versions
341 |
342 | If you create software not governed by this License, and you want to
343 | create a new license for such software, you may create and use a
344 | modified version of this License if you rename the license and remove
345 | any references to the name of the license steward (except to note that
346 | such modified license differs from this License).
347 |
348 | 10.4. Distributing Source Code Form that is Incompatible With Secondary
349 | Licenses
350 |
351 | If You choose to distribute Source Code Form that is Incompatible With
352 | Secondary Licenses under the terms of this version of the License, the
353 | notice described in Exhibit B of this License must be attached.
354 |
355 | Exhibit A - Source Code Form License Notice
356 | -------------------------------------------
357 |
358 | This Source Code Form is subject to the terms of the Mozilla Public
359 | License, v. 2.0. If a copy of the MPL was not distributed with this
360 | file, You can obtain one at http://mozilla.org/MPL/2.0/.
361 |
362 | If it is not possible or desirable to put the notice in a particular
363 | file, then You may include the notice in a location (such as a LICENSE
364 | file in a relevant directory) where a recipient would be likely to look
365 | for such a notice.
366 |
367 | You may add additional accurate notices of copyright ownership.
368 |
369 | Exhibit B - "Incompatible With Secondary Licenses" Notice
370 | ---------------------------------------------------------
371 |
372 | This Source Code Form is "Incompatible With Secondary Licenses", as
373 | defined by the Mozilla Public License, v. 2.0.
374 |
--------------------------------------------------------------------------------