├── .gitignore ├── .travis.yml ├── LICENSE ├── MANIFEST.in ├── README.md ├── deepaffects ├── __init__.py ├── api_client.py ├── apis │ ├── __init__.py │ ├── denoise_api.py │ ├── diarize_api.py │ ├── diarize_api_v2.py │ ├── ellipsis_api.py │ ├── emotion_api.py │ └── featurize_api.py ├── configuration.py ├── models │ ├── __init__.py │ ├── async_response.py │ ├── audio.py │ ├── audio_features.py │ ├── diarize_audio.py │ ├── diarize_segment.py │ └── emotion_score.py ├── realtime │ ├── __init__.py │ ├── api.py │ ├── deepaffects_realtime_pb2.py │ ├── deepaffects_realtime_pb2_grpc.py │ ├── types.py │ └── util.py └── rest.py ├── docs ├── AsyncResponse.md ├── Audio.md ├── DenoiseApi.md ├── DiarizeApi.md ├── DiarizeApiV2.md ├── DiarizeAudio.md ├── EllipsisApi.md ├── EmotionApi.md ├── EmotionScore.md └── FeaturizeApi.md ├── examples ├── diarize_emotion_example.py ├── emotion_identify_example.py ├── playlist_chunk_generator.py ├── speaker_identify_example.py └── ticker_based_earnings_call_identification.py ├── requirements.txt ├── setup.py ├── test-requirements.txt └── test ├── __init__.py ├── data ├── clean.wav ├── happy.mp3 ├── noisy.wav └── reconstructed.wav ├── test_async_response.py ├── test_audio.py ├── test_base_setup.py ├── test_denoise_api.py ├── test_diarize_api.py ├── test_diarize_api_v2.py ├── test_diarize_audio.py ├── test_emotion_api.py ├── test_emotion_score.py └── test_featurize_api.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | 27 | # PyInstaller 28 | # Usually these files are written by a python script from a template 29 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 30 | *.manifest 31 | *.spec 32 | 33 | # Installer logs 34 | pip-log.txt 35 | pip-delete-this-directory.txt 36 | 37 | # Unit test / coverage reports 38 | htmlcov/ 39 | .tox/ 40 | .coverage 41 | .coverage.* 42 | .cache 43 | nosetests.xml 44 | coverage.xml 45 | *,cover 46 | .hypothesis/ 47 | 48 | # Translations 49 | *.mo 50 | *.pot 51 | 52 | # Django stuff: 53 | *.log 54 | local_settings.py 55 | 56 | # Flask stuff: 57 | instance/ 58 | .webassets-cache 59 | 60 | # Scrapy stuff: 61 | .scrapy 62 | 63 | # Sphinx documentation 64 | docs/_build/ 65 | 66 | # PyBuilder 67 | target/ 68 | 69 | # IPython Notebook 70 | .ipynb_checkpoints 71 | 72 | # pyenv 73 | .python-version 74 | 75 | # celery beat schedule file 76 | celerybeat-schedule 77 | 78 | # dotenv 79 | .env 80 | 81 | # virtualenv 82 | venv/ 83 | ENV/ 84 | 85 | # Spyder project settings 86 | .spyderproject 87 | 88 | # Rope project settings 89 | .ropeproject 90 | 91 | # IntelliJ project settings 92 | .idea/ 93 | 94 | #vscode 95 | .vscode 96 | 97 | .DS_Store 98 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | # ref: https://docs.travis-ci.com/user/languages/python 2 | language: python 3 | python: 4 | - "2.7" 5 | - "3.4" 6 | - "3.5" 7 | - "3.6" 8 | #- "3.5-dev" # 3.5 development branch 9 | #- "nightly" # points to the latest development branch e.g. 3.6-dev 10 | before_install: 11 | - sudo apt-get install mediainfo 12 | # command to install dependencies 13 | install: "pip install -r requirements.txt" 14 | # command to run tests 15 | script: nosetests 16 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 SeerNet 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.md 2 | include LICENSE 3 | include docs/*.md 4 | include test/data/*.wav 5 | include test/data/*.mp3 6 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # deepaffects-python 2 | 3 | [![Build Status](https://travis-ci.org/SEERNET/deepaffects-python.svg?branch=master)](https://travis-ci.org/SEERNET/deepaffects-python) 4 | [![PyPI version](https://badge.fury.io/py/deepaffects.svg)](https://badge.fury.io/py/deepaffects) 5 | 6 | Python client library for DeepAffects APIs 7 | 8 | ## Requirements. 9 | 10 | Python 2.7 and 3.3+ 11 | 12 | pymediainfo >= 2.1.9, this is a wrapper library around [mediainfo](https://mediaarea.net/en/MediaInfo), which we use to 13 | extract the sampling rate and codec information from audio files. 14 | 15 | ## Installation 16 | 17 | ### pip install 18 | 19 | The python package can be installed directly from pip using: 20 | 21 | ```bash 22 | pip install deepaffects 23 | 24 | ``` 25 | ### pip install from github 26 | 27 | The python package is hosted on Github, you can install directly from Github 28 | 29 | ```sh 30 | pip install git+https://github.com/SEERNET/deepaffects-python.git 31 | ``` 32 | (you may need to run `pip` with root permission: `sudo pip install git+https://github.com/SEERNET/deepaffects-python.git`) 33 | 34 | Then import the package: 35 | ```python 36 | import deepaffects 37 | ``` 38 | 39 | ### Setuptools 40 | 41 | Install via [Setuptools](http://pypi.python.org/pypi/setuptools). 42 | 43 | ```sh 44 | python setup.py install --user 45 | ``` 46 | (or `sudo python setup.py install` to install the package for all users) 47 | 48 | Then import the package: 49 | ```python 50 | import deepaffects 51 | ``` 52 | 53 | ## Documentation for Authorization 54 | 55 | DeepAffects API authenticates all the api requests via API Key. 56 | 57 | For API key registration and setup, checkout our [quickstart guide](https://developers.deepaffects.com/docs/#quickstart-guide) 58 | 59 | ### UserSecurity 60 | 61 | - **Type**: API key 62 | - **API key parameter name**: apikey 63 | - **Location**: URL query string 64 | 65 | 66 | ## Getting Started 67 | 68 | Please follow the [installation](#installation) instruction and execute the following python code: 69 | 70 | 71 | ```python 72 | from __future__ import print_function 73 | import time 74 | import deepaffects 75 | from deepaffects.rest import ApiException 76 | from pprint import pprint 77 | 78 | # Configure API key authorization: UserSecurity 79 | deepaffects.configuration.api_key['apikey'] = 'YOUR_API_KEY' 80 | # create an instance of the API class 81 | api_instance = deepaffects.DenoiseApi() 82 | body = deepaffects.Audio.from_file('/path/to/file') # Audio | Audio object that needs to be denoised. 83 | webhook = 'webhook_example' # str | The webhook url where result from async resource is posted 84 | request_id = 'request_id_example' # str | Unique identifier for the request (optional) 85 | 86 | try: 87 | # Denoise an audio file 88 | api_response = api_instance.async_denoise_audio(body, webhook, request_id=request_id) 89 | pprint(api_response) 90 | except ApiException as e: 91 | print("Exception when calling DenoiseApi->async_denoise_audio: %s\n" % e) 92 | 93 | ``` 94 | 95 | ## Documentation for API Endpoints 96 | 97 | All URIs are relative to *https://localhost* 98 | 99 | Class | Method | HTTP request | Description 100 | ----------------- | --------------------------------------- | ------------- | ------------- 101 | *DenoiseApi* | [async_denoise_audio](https://github.com/SEERNET/deepaffects-python/blob/master/docs/DenoiseApi.md#async_denoise_audio) | **POST** /api/v1/async/denoise | Denoise an audio file 102 | *DenoiseApi* | [sync_denoise_audio](https://github.com/SEERNET/deepaffects-python/blob/master/docs/DenoiseApi.md#sync_denoise_audio) | **POST** /api/v1/sync/denoise | Denoise an audio file 103 | *DiarizeApiV2* | [async_diarize_audio](https://github.com/SEERNET/deepaffects-python/blob/master/docs/DiarizeApiV2.md#async_diarize_audio) | **POST** /api/v2/async/diarize | Diarize an audio file 104 | *DiarizeApi* | [async_diarize_audio](https://github.com/SEERNET/deepaffects-python/blob/master/docs/DiarizeApi.md#async_diarize_audio) | **POST** /api/v1/async/diarize | Diarize an audio file (Legacy) 105 | *DiarizeApi* | [sync_diarize_audio](https://github.com/SEERNET/deepaffects-python/blob/master/docs/DiarizeApi.md#sync_diarize_audio) | **POST** /api/v1/sync/diarize | Diarize an audio file (Legacy) 106 | *EmotionApi* | [async_recognise_emotion](https://github.com/SEERNET/deepaffects-python/blob/master/docs/EmotionApi.md#async_recognise_emotion) | **POST** /api/v1/async/recognise_emotion | Find emotion in an audio file 107 | *EmotionApi* | [sync_recognise_emotion](https://github.com/SEERNET/deepaffects-python/blob/master/docs/EmotionApi.md#sync_recognise_emotion) | **POST** /api/v1/sync/recognise_emotion | Find emotion in an audio file 108 | *FeaturizeApi* | [async_featurize_audio](https://github.com/SEERNET/deepaffects-python/blob/master/docs/FeaturizeApi.md#async_featurize_audio) | **POST** /api/v1/async/featurize | featurize an audio file 109 | *FeaturizeApi* | [sync_featurize_audio](https://github.com/SEERNET/deepaffects-python/blob/master/docs/FeaturizeApi.md#sync_featurize_audio) | **POST** /api/v1/sync/featurize | featurize an audio file 110 | 111 | 112 | ## Documentation For Models 113 | 114 | - [AsyncResponse](https://github.com/SEERNET/deepaffects-python/blob/master/docs/AsyncResponse.md) 115 | - [Audio](https://github.com/SEERNET/deepaffects-python/blob/master/docs/Audio.md) 116 | - [DiarizeAudio](https://github.com/SEERNET/deepaffects-python/blob/master/docs/DiarizeAudio.md) 117 | - [EmotionScore](https://github.com/SEERNET/deepaffects-python/blob/master/docs/EmotionScore.md) 118 | 119 | 120 | 121 | 122 | ## UserSecurity 123 | 124 | - **Type**: API key 125 | - **API key parameter name**: apikey 126 | - **Location**: URL query string 127 | 128 | 129 | ## About 130 | [DeepAffects](https://www.deepaffects.com/dashboard) is an emotional intelligence analysis engine that measures the effect emotional intelligence 131 | has on team dynamics, and provides emotional analytics that serve as the basis of insights to improve 132 | project management, performance and satisfaction across organizations, projects, and teams. To watch DeepAffects in action: check out DeepAffects [Atlassian JIRA addon](https://marketplace.atlassian.com/plugins/com.deepaffects.teams.jira/cloud/overview) and our [Github addon](https://teams.deepaffects.com/). 133 | 134 | 135 | 136 | -------------------------------------------------------------------------------- /deepaffects/__init__.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | 3 | """ 4 | DeepAffects 5 | 6 | OpenAPI Specification of DeepAffects APIs 7 | 8 | OpenAPI spec version: 0.1.0 9 | 10 | Generated by: https://github.com/swagger-api/swagger-codegen.git 11 | """ 12 | 13 | 14 | from __future__ import absolute_import 15 | 16 | # import models into sdk package 17 | from .models.async_response import AsyncResponse 18 | from .models.audio import Audio 19 | from .models.diarize_audio import DiarizeAudio 20 | from .models.emotion_score import EmotionScore 21 | from .models.diarize_segment import DiarizeSegment 22 | 23 | # import apis into sdk package 24 | from .apis.denoise_api import DenoiseApi 25 | from .apis.diarize_api import DiarizeApi 26 | from .apis.emotion_api import EmotionApi 27 | from .apis.featurize_api import FeaturizeApi 28 | from .apis.ellipsis_api import EllipsisAPI 29 | from .apis.diarize_api_v2 import DiarizeApiV2 30 | 31 | # import ApiClient 32 | from .api_client import ApiClient 33 | 34 | from .configuration import Configuration 35 | 36 | configuration = Configuration() 37 | -------------------------------------------------------------------------------- /deepaffects/apis/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | # import apis into api package 4 | from .denoise_api import DenoiseApi 5 | from .diarize_api import DiarizeApi 6 | from .emotion_api import EmotionApi 7 | from .featurize_api import FeaturizeApi 8 | from .ellipsis_api import EllipsisAPI 9 | from .diarize_api_v2 import DiarizeApiV2 10 | -------------------------------------------------------------------------------- /deepaffects/apis/denoise_api.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | 3 | """ 4 | DeepAffects 5 | 6 | OpenAPI spec version: v1 7 | """ 8 | 9 | 10 | from __future__ import absolute_import 11 | 12 | # python 2 and python 3 compatibility library 13 | from six import iteritems 14 | 15 | from ..api_client import ApiClient 16 | from ..configuration import Configuration 17 | 18 | 19 | class DenoiseApi(object): 20 | def __init__(self, api_client=None): 21 | config = Configuration() 22 | if api_client: 23 | self.api_client = api_client 24 | else: 25 | if not config.api_client: 26 | config.api_client = ApiClient() 27 | self.api_client = config.api_client 28 | 29 | def async_denoise_audio(self, body, webhook, **kwargs): 30 | """ 31 | Denoise an audio file asynchronously 32 | This method makes a synchronous HTTP request by default. To make an 33 | asynchronous HTTP request, please define a `callback` function 34 | to be invoked when receiving the response. 35 | >>> def callback_function(response): 36 | >>> pprint(response) 37 | >>> 38 | >>> thread = api.async_denoise_audio(body, webhook, callback=callback_function) 39 | 40 | :param callback function: The callback function 41 | for asynchronous request. (optional) 42 | :param Audio body: Audio object that needs to be denoised. (required) 43 | :param str webhook: The webhook url where result from async resource is posted (required) 44 | :param str request_id: Unique identifier for the request 45 | :return: AsyncResponse 46 | If the method is called asynchronously, 47 | returns the request thread. 48 | """ 49 | kwargs['_return_http_data_only'] = True 50 | if kwargs.get('callback'): 51 | return self.async_denoise_audio_with_http_info(body, webhook, **kwargs) 52 | else: 53 | (data) = self.async_denoise_audio_with_http_info(body, webhook, **kwargs) 54 | return data 55 | 56 | def async_denoise_audio_with_http_info(self, body, webhook, **kwargs): 57 | """ 58 | Denoise an audio file asynchronously 59 | This method makes a synchronous HTTP request by default. To make an 60 | asynchronous HTTP request, please define a `callback` function 61 | to be invoked when receiving the response. 62 | >>> def callback_function(response): 63 | >>> pprint(response) 64 | >>> 65 | >>> thread = api.async_denoise_audio_with_http_info(body, webhook, callback=callback_function) 66 | 67 | :param callback function: The callback function 68 | for asynchronous request. (optional) 69 | :param Audio body: Audio object that needs to be denoised. (required) 70 | :param str webhook: The webhook url where result from async resource is posted (required) 71 | :param str request_id: Unique identifier for the request 72 | :return: AsyncResponse 73 | If the method is called asynchronously, 74 | returns the request thread. 75 | """ 76 | 77 | all_params = ['body', 'webhook', 'request_id'] 78 | all_params.append('callback') 79 | all_params.append('_return_http_data_only') 80 | all_params.append('_preload_content') 81 | all_params.append('_request_timeout') 82 | 83 | params = locals() 84 | for key, val in iteritems(params['kwargs']): 85 | if key not in all_params: 86 | raise TypeError( 87 | "Got an unexpected keyword argument '%s'" 88 | " to method async_denoise_audio" % key 89 | ) 90 | params[key] = val 91 | del params['kwargs'] 92 | # verify the required parameter 'body' is set 93 | if ('body' not in params) or (params['body'] is None): 94 | raise ValueError("Missing the required parameter `body` when calling `async_denoise_audio`") 95 | # verify the required parameter 'webhook' is set 96 | if ('webhook' not in params) or (params['webhook'] is None): 97 | raise ValueError("Missing the required parameter `webhook` when calling `async_denoise_audio`") 98 | 99 | 100 | collection_formats = {} 101 | 102 | resource_path = '/audio/generic/api/v1/async/denoise'.replace('{format}', 'json') 103 | path_params = {} 104 | 105 | query_params = {} 106 | if 'webhook' in params: 107 | query_params['webhook'] = params['webhook'] 108 | if 'request_id' in params: 109 | query_params['request_id'] = params['request_id'] 110 | 111 | header_params = {} 112 | 113 | form_params = [] 114 | local_var_files = {} 115 | 116 | body_params = None 117 | if 'body' in params: 118 | body_params = params['body'] 119 | # HTTP header `Accept` 120 | header_params['Accept'] = self.api_client.\ 121 | select_header_accept(['application/json']) 122 | 123 | # HTTP header `Content-Type` 124 | header_params['Content-Type'] = self.api_client.\ 125 | select_header_content_type(['application/json']) 126 | 127 | # Authentication setting 128 | auth_settings = ['UserSecurity'] 129 | 130 | return self.api_client.call_api(resource_path, 'POST', 131 | path_params, 132 | query_params, 133 | header_params, 134 | body=body_params, 135 | post_params=form_params, 136 | files=local_var_files, 137 | response_type='AsyncResponse', 138 | auth_settings=auth_settings, 139 | callback=params.get('callback'), 140 | _return_http_data_only=params.get('_return_http_data_only'), 141 | _preload_content=params.get('_preload_content', True), 142 | _request_timeout=params.get('_request_timeout'), 143 | collection_formats=collection_formats) 144 | 145 | def sync_denoise_audio(self, body, **kwargs): 146 | """ 147 | Denoise an audio file synchronously. 148 | This method makes a synchronous HTTP request by default. To make an 149 | asynchronous HTTP request, please define a `callback` function 150 | to be invoked when receiving the response. 151 | >>> def callback_function(response): 152 | >>> pprint(response) 153 | >>> 154 | >>> thread = api.sync_denoise_audio(body, callback=callback_function) 155 | 156 | :param callback function: The callback function 157 | for asynchronous request. (optional) 158 | :param Audio body: Audio object that needs to be denoised. (required) 159 | :return: Audio 160 | If the method is called asynchronously, 161 | returns the request thread. 162 | """ 163 | kwargs['_return_http_data_only'] = True 164 | if kwargs.get('callback'): 165 | return self.sync_denoise_audio_with_http_info(body, **kwargs) 166 | else: 167 | (data) = self.sync_denoise_audio_with_http_info(body, **kwargs) 168 | return data 169 | 170 | def sync_denoise_audio_with_http_info(self, body, **kwargs): 171 | """ 172 | Denoise an audio file synchronously. 173 | This method makes a synchronous HTTP request by default. To make an 174 | asynchronous HTTP request, please define a `callback` function 175 | to be invoked when receiving the response. 176 | >>> def callback_function(response): 177 | >>> pprint(response) 178 | >>> 179 | >>> thread = api.sync_denoise_audio_with_http_info(body, callback=callback_function) 180 | 181 | :param callback function: The callback function 182 | for asynchronous request. (optional) 183 | :param Audio body: Audio object that needs to be denoised. (required) 184 | :return: Audio 185 | If the method is called asynchronously, 186 | returns the request thread. 187 | """ 188 | 189 | all_params = ['body'] 190 | all_params.append('callback') 191 | all_params.append('_return_http_data_only') 192 | all_params.append('_preload_content') 193 | all_params.append('_request_timeout') 194 | 195 | params = locals() 196 | for key, val in iteritems(params['kwargs']): 197 | if key not in all_params: 198 | raise TypeError( 199 | "Got an unexpected keyword argument '%s'" 200 | " to method sync_denoise_audio" % key 201 | ) 202 | params[key] = val 203 | del params['kwargs'] 204 | # verify the required parameter 'body' is set 205 | if ('body' not in params) or (params['body'] is None): 206 | raise ValueError("Missing the required parameter `body` when calling `sync_denoise_audio`") 207 | 208 | 209 | collection_formats = {} 210 | 211 | resource_path = '/audio/generic/api/v1/sync/denoise'.replace('{format}', 'json') 212 | path_params = {} 213 | 214 | query_params = [] 215 | 216 | header_params = {} 217 | 218 | form_params = [] 219 | local_var_files = {} 220 | 221 | body_params = None 222 | if 'body' in params: 223 | body_params = params['body'] 224 | # HTTP header `Accept` 225 | header_params['Accept'] = self.api_client.\ 226 | select_header_accept(['application/json']) 227 | 228 | # HTTP header `Content-Type` 229 | header_params['Content-Type'] = self.api_client.\ 230 | select_header_content_type(['application/json']) 231 | 232 | # Authentication setting 233 | auth_settings = ['UserSecurity'] 234 | 235 | return self.api_client.call_api(resource_path, 'POST', 236 | path_params, 237 | query_params, 238 | header_params, 239 | body=body_params, 240 | post_params=form_params, 241 | files=local_var_files, 242 | response_type='Audio', 243 | auth_settings=auth_settings, 244 | callback=params.get('callback'), 245 | _return_http_data_only=params.get('_return_http_data_only'), 246 | _preload_content=params.get('_preload_content', True), 247 | _request_timeout=params.get('_request_timeout'), 248 | collection_formats=collection_formats) 249 | -------------------------------------------------------------------------------- /deepaffects/apis/diarize_api.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | 3 | """ 4 | DeepAffects 5 | 6 | OpenAPI spec version: v1 7 | """ 8 | 9 | 10 | from __future__ import absolute_import 11 | 12 | # python 2 and python 3 compatibility library 13 | from six import iteritems 14 | 15 | from ..api_client import ApiClient 16 | from ..configuration import Configuration 17 | 18 | 19 | class DiarizeApi(object): 20 | 21 | def __init__(self, api_client=None): 22 | config = Configuration() 23 | if api_client: 24 | self.api_client = api_client 25 | else: 26 | if not config.api_client: 27 | config.api_client = ApiClient() 28 | self.api_client = config.api_client 29 | 30 | def async_diarize_audio(self, body, webhook, **kwargs): 31 | """ 32 | Diarize an audio file asynchronously. 33 | This method makes a synchronous HTTP request by default. To make an 34 | asynchronous HTTP request, please define a `callback` function 35 | to be invoked when receiving the response. 36 | >>> def callback_function(response): 37 | >>> pprint(response) 38 | >>> 39 | >>> thread = api.async_diarize_audio(body, webhook, callback=callback_function) 40 | 41 | :param callback function: The callback function 42 | for asynchronous request. (optional) 43 | :param DiarizeAudio body: Audio object that needs to be diarized. (required) 44 | :param str webhook: The webhook url where result from async resource is posted (required) 45 | :param str request_id: Unique identifier for the request 46 | :return: AsyncResponse 47 | If the method is called asynchronously, 48 | returns the request thread. 49 | """ 50 | kwargs['_return_http_data_only'] = True 51 | if kwargs.get('callback'): 52 | return self.async_diarize_audio_with_http_info(body, webhook, **kwargs) 53 | else: 54 | (data) = self.async_diarize_audio_with_http_info(body, webhook, **kwargs) 55 | return data 56 | 57 | def async_diarize_audio_with_http_info(self, body, webhook, **kwargs): 58 | """ 59 | Diarize an audio file asynchronously. 60 | This method makes a synchronous HTTP request by default. To make an 61 | asynchronous HTTP request, please define a `callback` function 62 | to be invoked when receiving the response. 63 | >>> def callback_function(response): 64 | >>> pprint(response) 65 | >>> 66 | >>> thread = api.async_diarize_audio_with_http_info(body, webhook, callback=callback_function) 67 | 68 | :param callback function: The callback function 69 | for asynchronous request. (optional) 70 | :param DiarizeAudio body: Audio object that needs to be diarized. (required) 71 | :param str webhook: The webhook url where result from async resource is posted (required) 72 | :param str request_id: Unique identifier for the request 73 | :return: AsyncResponse 74 | If the method is called asynchronously, 75 | returns the request thread. 76 | """ 77 | 78 | all_params = ['body', 'webhook', 'request_id'] 79 | all_params.append('callback') 80 | all_params.append('_return_http_data_only') 81 | all_params.append('_preload_content') 82 | all_params.append('_request_timeout') 83 | 84 | params = locals() 85 | for key, val in iteritems(params['kwargs']): 86 | if key not in all_params: 87 | raise TypeError( 88 | "Got an unexpected keyword argument '%s'" 89 | " to method async_diarize_audio" % key 90 | ) 91 | params[key] = val 92 | del params['kwargs'] 93 | # verify the required parameter 'body' is set 94 | if ('body' not in params) or (params['body'] is None): 95 | raise ValueError("Missing the required parameter `body` when calling `async_diarize_audio`") 96 | # verify the required parameter 'webhook' is set 97 | if ('webhook' not in params) or (params['webhook'] is None): 98 | raise ValueError("Missing the required parameter `webhook` when calling `async_diarize_audio`") 99 | 100 | 101 | collection_formats = {} 102 | 103 | resource_path = '/audio/generic/api/v1/async/diarize'.replace('{format}', 'json') 104 | path_params = {} 105 | 106 | query_params = {} 107 | if 'webhook' in params: 108 | query_params['webhook'] = params['webhook'] 109 | if 'request_id' in params: 110 | query_params['request_id'] = params['request_id'] 111 | 112 | header_params = {} 113 | 114 | form_params = [] 115 | local_var_files = {} 116 | 117 | body_params = None 118 | if 'body' in params: 119 | body_params = params['body'] 120 | # HTTP header `Accept` 121 | header_params['Accept'] = self.api_client.\ 122 | select_header_accept(['application/json']) 123 | 124 | # HTTP header `Content-Type` 125 | header_params['Content-Type'] = self.api_client.\ 126 | select_header_content_type(['application/json']) 127 | 128 | # Authentication setting 129 | auth_settings = ['UserSecurity'] 130 | 131 | return self.api_client.call_api(resource_path, 'POST', 132 | path_params, 133 | query_params, 134 | header_params, 135 | body=body_params, 136 | post_params=form_params, 137 | files=local_var_files, 138 | response_type='AsyncResponse', 139 | auth_settings=auth_settings, 140 | callback=params.get('callback'), 141 | _return_http_data_only=params.get('_return_http_data_only'), 142 | _preload_content=params.get('_preload_content', True), 143 | _request_timeout=params.get('_request_timeout'), 144 | collection_formats=collection_formats) 145 | 146 | def sync_diarize_audio(self, body, **kwargs): 147 | """ 148 | Diarize an audio file synchronously. 149 | This method makes a synchronous HTTP request by default. To make an 150 | asynchronous HTTP request, please define a `callback` function 151 | to be invoked when receiving the response. 152 | >>> def callback_function(response): 153 | >>> pprint(response) 154 | >>> 155 | >>> thread = api.sync_diarize_audio(body, callback=callback_function) 156 | 157 | :param callback function: The callback function 158 | for asynchronous request. (optional) 159 | :param DiarizeAudio body: Audio object that needs to be diarized. (required) 160 | :return: list[Audio] 161 | If the method is called asynchronously, 162 | returns the request thread. 163 | """ 164 | kwargs['_return_http_data_only'] = True 165 | if kwargs.get('callback'): 166 | return self.sync_diarize_audio_with_http_info(body, **kwargs) 167 | else: 168 | (data) = self.sync_diarize_audio_with_http_info(body, **kwargs) 169 | return data 170 | 171 | def sync_diarize_audio_with_http_info(self, body, **kwargs): 172 | """ 173 | Diarize an audio file synchronously. 174 | This method makes a synchronous HTTP request by default. To make an 175 | asynchronous HTTP request, please define a `callback` function 176 | to be invoked when receiving the response. 177 | >>> def callback_function(response): 178 | >>> pprint(response) 179 | >>> 180 | >>> thread = api.sync_diarize_audio_with_http_info(body, callback=callback_function) 181 | 182 | :param callback function: The callback function 183 | for asynchronous request. (optional) 184 | :param DiarizeAudio body: Audio object that needs to be diarized. (required) 185 | :return: list[Audio] 186 | If the method is called asynchronously, 187 | returns the request thread. 188 | """ 189 | 190 | all_params = ['body'] 191 | all_params.append('callback') 192 | all_params.append('_return_http_data_only') 193 | all_params.append('_preload_content') 194 | all_params.append('_request_timeout') 195 | 196 | params = locals() 197 | for key, val in iteritems(params['kwargs']): 198 | if key not in all_params: 199 | raise TypeError( 200 | "Got an unexpected keyword argument '%s'" 201 | " to method sync_diarize_audio" % key 202 | ) 203 | params[key] = val 204 | del params['kwargs'] 205 | # verify the required parameter 'body' is set 206 | if ('body' not in params) or (params['body'] is None): 207 | raise ValueError("Missing the required parameter `body` when calling `sync_diarize_audio`") 208 | 209 | 210 | collection_formats = {} 211 | 212 | resource_path = '/audio/generic/api/v1/sync/diarize'.replace('{format}', 'json') 213 | path_params = {} 214 | 215 | query_params = [] 216 | 217 | header_params = {} 218 | 219 | form_params = [] 220 | local_var_files = {} 221 | 222 | body_params = None 223 | if 'body' in params: 224 | body_params = params['body'] 225 | # HTTP header `Accept` 226 | header_params['Accept'] = self.api_client.\ 227 | select_header_accept(['application/json']) 228 | 229 | # HTTP header `Content-Type` 230 | header_params['Content-Type'] = self.api_client.\ 231 | select_header_content_type(['application/json']) 232 | 233 | # Authentication setting 234 | auth_settings = ['UserSecurity'] 235 | 236 | return self.api_client.call_api(resource_path, 'POST', 237 | path_params, 238 | query_params, 239 | header_params, 240 | body=body_params, 241 | post_params=form_params, 242 | files=local_var_files, 243 | response_type='list[Audio]', 244 | auth_settings=auth_settings, 245 | callback=params.get('callback'), 246 | _return_http_data_only=params.get('_return_http_data_only'), 247 | _preload_content=params.get('_preload_content', True), 248 | _request_timeout=params.get('_request_timeout'), 249 | collection_formats=collection_formats) 250 | -------------------------------------------------------------------------------- /deepaffects/apis/diarize_api_v2.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | 3 | """ 4 | DeepAffects 5 | 6 | OpenAPI spec version: v1 7 | """ 8 | 9 | 10 | from __future__ import absolute_import 11 | 12 | # python 2 and python 3 compatibility library 13 | from six import iteritems 14 | 15 | from ..api_client import ApiClient 16 | from ..configuration import Configuration 17 | 18 | 19 | class DiarizeApiV2(object): 20 | 21 | def __init__(self, api_client=None): 22 | config = Configuration() 23 | if api_client: 24 | self.api_client = api_client 25 | else: 26 | if not config.api_client: 27 | config.api_client = ApiClient() 28 | self.api_client = config.api_client 29 | 30 | def async_diarize_audio(self, body, webhook, **kwargs): 31 | """ 32 | Diarize an audio file asynchronously. 33 | This method makes a synchronous HTTP request by default. To make an 34 | asynchronous HTTP request, please define a `callback` function 35 | to be invoked when receiving the response. 36 | >>> def callback_function(response): 37 | >>> pprint(response) 38 | >>> 39 | >>> thread = api.async_diarize_audio(body, webhook, callback=callback_function) 40 | 41 | :param callback function: The callback function 42 | for asynchronous request. (optional) 43 | :param DiarizeAudio body: Audio object that needs to be diarized. (required) 44 | :param str webhook: The webhook url where result from async resource is posted (required) 45 | :param str request_id: Unique identifier for the request 46 | :return: AsyncResponse 47 | If the method is called asynchronously, 48 | returns the request thread. 49 | """ 50 | kwargs['_return_http_data_only'] = True 51 | if kwargs.get('callback'): 52 | return self.async_diarize_audio_with_http_info(body, webhook, **kwargs) 53 | else: 54 | (data) = self.async_diarize_audio_with_http_info(body, webhook, **kwargs) 55 | return data 56 | 57 | def async_diarize_audio_with_http_info(self, body, webhook, **kwargs): 58 | """ 59 | Diarize an audio file asynchronously. 60 | This method makes a synchronous HTTP request by default. To make an 61 | asynchronous HTTP request, please define a `callback` function 62 | to be invoked when receiving the response. 63 | >>> def callback_function(response): 64 | >>> pprint(response) 65 | >>> 66 | >>> thread = api.async_diarize_audio_with_http_info(body, webhook, callback=callback_function) 67 | 68 | :param callback function: The callback function 69 | for asynchronous request. (optional) 70 | :param DiarizeAudio body: Audio object that needs to be diarized. (required) 71 | :param str webhook: The webhook url where result from async resource is posted (required) 72 | :param str request_id: Unique identifier for the request 73 | :return: AsyncResponse 74 | If the method is called asynchronously, 75 | returns the request thread. 76 | """ 77 | 78 | all_params = ['body', 'webhook', 'request_id'] 79 | all_params.append('callback') 80 | all_params.append('_return_http_data_only') 81 | all_params.append('_preload_content') 82 | all_params.append('_request_timeout') 83 | 84 | params = locals() 85 | for key, val in iteritems(params['kwargs']): 86 | if key not in all_params: 87 | raise TypeError( 88 | "Got an unexpected keyword argument '%s'" 89 | " to method async_diarize_audio" % key 90 | ) 91 | params[key] = val 92 | del params['kwargs'] 93 | # verify the required parameter 'body' is set 94 | if ('body' not in params) or (params['body'] is None): 95 | raise ValueError("Missing the required parameter `body` when calling `async_diarize_audio`") 96 | # verify the required parameter 'webhook' is set 97 | if ('webhook' not in params) or (params['webhook'] is None): 98 | raise ValueError("Missing the required parameter `webhook` when calling `async_diarize_audio`") 99 | 100 | 101 | collection_formats = {} 102 | 103 | resource_path = '/audio/generic/api/v2/async/diarize'.replace('{format}', 'json') 104 | path_params = {} 105 | 106 | query_params = {} 107 | if 'webhook' in params: 108 | query_params['webhook'] = params['webhook'] 109 | if 'request_id' in params: 110 | query_params['request_id'] = params['request_id'] 111 | 112 | header_params = {} 113 | 114 | form_params = [] 115 | local_var_files = {} 116 | 117 | body_params = None 118 | if 'body' in params: 119 | body_params = params['body'] 120 | # HTTP header `Accept` 121 | header_params['Accept'] = self.api_client.\ 122 | select_header_accept(['application/json']) 123 | 124 | # HTTP header `Content-Type` 125 | header_params['Content-Type'] = self.api_client.\ 126 | select_header_content_type(['application/json']) 127 | 128 | # Authentication setting 129 | auth_settings = ['UserSecurity'] 130 | 131 | return self.api_client.call_api(resource_path, 'POST', 132 | path_params, 133 | query_params, 134 | header_params, 135 | body=body_params, 136 | post_params=form_params, 137 | files=local_var_files, 138 | response_type='AsyncResponse', 139 | auth_settings=auth_settings, 140 | callback=params.get('callback'), 141 | _return_http_data_only=params.get('_return_http_data_only'), 142 | _preload_content=params.get('_preload_content', True), 143 | _request_timeout=params.get('_request_timeout'), 144 | collection_formats=collection_formats) 145 | 146 | -------------------------------------------------------------------------------- /deepaffects/apis/ellipsis_api.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | 3 | """ 4 | DeepAffects 5 | 6 | OpenAPI spec version: v1 7 | """ 8 | 9 | 10 | from __future__ import absolute_import 11 | 12 | # python 2 and python 3 compatibility library 13 | from six import iteritems 14 | 15 | from ..api_client import ApiClient 16 | from ..configuration import Configuration 17 | 18 | 19 | class EllipsisAPI(object): 20 | 21 | def __init__(self, api_client=None): 22 | config = Configuration() 23 | if api_client: 24 | self.api_client = api_client 25 | else: 26 | if not config.api_client: 27 | config.api_client = ApiClient() 28 | self.api_client = config.api_client 29 | 30 | def async_is_depressed(self, body, webhook, **kwargs): 31 | """ 32 | Detect whether the person in audio clip is depressed 33 | This method makes a synchronous HTTP request by default. To make an 34 | asynchronous HTTP request, please define a `callback` function 35 | to be invoked when receiving the response. 36 | >>> def callback_function(response): 37 | >>> pprint(response) 38 | >>> 39 | >>> thread = api.async_is_depressed(body, webhook, callback=callback_function) 40 | 41 | :param callback function: The callback function 42 | for asynchronous request. (optional) 43 | :param Audio body: Audio object to predict depression. (required) 44 | :param str webhook: The webhook url where result from async resource is posted (required) 45 | :param str request_id: Unique identifier for the request 46 | :return: AsyncResponse 47 | If the method is called asynchronously, 48 | returns the request thread. 49 | """ 50 | kwargs['_return_http_data_only'] = True 51 | if kwargs.get('callback'): 52 | return self.async_is_depressed_with_http_info(body, webhook, **kwargs) 53 | else: 54 | (data) = self.async_is_depressed_with_http_info(body, webhook, **kwargs) 55 | return data 56 | 57 | def async_is_depressed_with_http_info(self, body, webhook, **kwargs): 58 | """ 59 | Detect whether the person in audio clip is depressed 60 | This method makes a synchronous HTTP request by default. To make an 61 | asynchronous HTTP request, please define a `callback` function 62 | to be invoked when receiving the response. 63 | >>> def callback_function(response): 64 | >>> pprint(response) 65 | >>> 66 | >>> thread = api.async_is_depressed_with_http_info(body, webhook, callback=callback_function) 67 | 68 | :param callback function: The callback function 69 | for asynchronous request. (optional) 70 | :param Audio body: Audio object to predict depression. (required) 71 | :param str webhook: The webhook url where result from async resource is posted (required) 72 | :param str request_id: Unique identifier for the request 73 | :return: AsyncResponse 74 | If the method is called asynchronously, 75 | returns the request thread. 76 | """ 77 | 78 | all_params = ['body', 'webhook', 'request_id'] 79 | all_params.append('callback') 80 | all_params.append('_return_http_data_only') 81 | all_params.append('_preload_content') 82 | all_params.append('_request_timeout') 83 | 84 | params = locals() 85 | for key, val in iteritems(params['kwargs']): 86 | if key not in all_params: 87 | raise TypeError( 88 | "Got an unexpected keyword argument '%s'" 89 | " to method async_is_depressed" % key 90 | ) 91 | params[key] = val 92 | del params['kwargs'] 93 | # verify the required parameter 'body' is set 94 | if ('body' not in params) or (params['body'] is None): 95 | raise ValueError("Missing the required parameter `body` when calling `async_is_depressed`") 96 | # verify the required parameter 'webhook' is set 97 | if ('webhook' not in params) or (params['webhook'] is None): 98 | raise ValueError("Missing the required parameter `webhook` when calling `async_is_depressed`") 99 | 100 | 101 | collection_formats = {} 102 | 103 | resource_path = '/audio/custom/ellipsis/api/v1/async/is_depressed'.replace('{format}', 'json') 104 | path_params = {} 105 | 106 | query_params = {} 107 | if 'webhook' in params: 108 | query_params['webhook'] = params['webhook'] 109 | if 'request_id' in params: 110 | query_params['request_id'] = params['request_id'] 111 | 112 | header_params = {} 113 | 114 | form_params = [] 115 | local_var_files = {} 116 | 117 | body_params = None 118 | if 'body' in params: 119 | body_params = params['body'] 120 | # HTTP header `Accept` 121 | header_params['Accept'] = self.api_client.\ 122 | select_header_accept(['application/json']) 123 | 124 | # HTTP header `Content-Type` 125 | header_params['Content-Type'] = self.api_client.\ 126 | select_header_content_type(['application/json']) 127 | 128 | # Authentication setting 129 | auth_settings = ['UserSecurity'] 130 | 131 | return self.api_client.call_api(resource_path, 'POST', 132 | path_params, 133 | query_params, 134 | header_params, 135 | body=body_params, 136 | post_params=form_params, 137 | files=local_var_files, 138 | response_type='AsyncResponse', 139 | auth_settings=auth_settings, 140 | callback=params.get('callback'), 141 | _return_http_data_only=params.get('_return_http_data_only'), 142 | _preload_content=params.get('_preload_content', True), 143 | _request_timeout=params.get('_request_timeout'), 144 | collection_formats=collection_formats) 145 | 146 | def sync_is_depressed(self, body, **kwargs): 147 | """ 148 | Detect whether the person in audio clip is depressed 149 | This method makes a synchronous HTTP request by default. To make an 150 | asynchronous HTTP request, please define a `callback` function 151 | to be invoked when receiving the response. 152 | >>> def callback_function(response): 153 | >>> pprint(response) 154 | >>> 155 | >>> thread = api.sync_is_depressed(body, callback=callback_function) 156 | 157 | :param callback function: The callback function 158 | for asynchronous request. (optional) 159 | :param Audio body: Audio object to predict depression. (required) 160 | :return: boolean 161 | If the method is called asynchronously, 162 | returns the request thread. 163 | """ 164 | kwargs['_return_http_data_only'] = True 165 | if kwargs.get('callback'): 166 | return self.sync_is_depressed_with_http_info(body, **kwargs) 167 | else: 168 | (data) = self.sync_is_depressed_with_http_info(body, **kwargs) 169 | return data 170 | 171 | def sync_is_depressed_with_http_info(self, body, **kwargs): 172 | """ 173 | Detect whether the person in audio clip is depressed 174 | This method makes a synchronous HTTP request by default. To make an 175 | asynchronous HTTP request, please define a `callback` function 176 | to be invoked when receiving the response. 177 | >>> def callback_function(response): 178 | >>> pprint(response) 179 | >>> 180 | >>> thread = api.sync_is_depressed_with_http_info(body, callback=callback_function) 181 | 182 | :param callback function: The callback function 183 | for asynchronous request. (optional) 184 | :param Audio body: Audio object to predict depression. (required) 185 | :return: boolean 186 | If the method is called asynchronously, 187 | returns the request thread. 188 | """ 189 | 190 | all_params = ['body'] 191 | all_params.append('callback') 192 | all_params.append('_return_http_data_only') 193 | all_params.append('_preload_content') 194 | all_params.append('_request_timeout') 195 | 196 | params = locals() 197 | for key, val in iteritems(params['kwargs']): 198 | if key not in all_params: 199 | raise TypeError( 200 | "Got an unexpected keyword argument '%s'" 201 | " to method sync_is_depressed" % key 202 | ) 203 | params[key] = val 204 | del params['kwargs'] 205 | # verify the required parameter 'body' is set 206 | if ('body' not in params) or (params['body'] is None): 207 | raise ValueError("Missing the required parameter `body` when calling `sync_is_depressed`") 208 | 209 | 210 | collection_formats = {} 211 | 212 | resource_path = '/audio/custom/ellipsis/api/v1/sync/is_depressed'.replace('{format}', 'json') 213 | path_params = {} 214 | 215 | query_params = [] 216 | 217 | header_params = {} 218 | 219 | form_params = [] 220 | local_var_files = {} 221 | 222 | body_params = None 223 | if 'body' in params: 224 | body_params = params['body'] 225 | # HTTP header `Accept` 226 | header_params['Accept'] = self.api_client.\ 227 | select_header_accept(['application/json']) 228 | 229 | # HTTP header `Content-Type` 230 | header_params['Content-Type'] = self.api_client.\ 231 | select_header_content_type(['application/json']) 232 | 233 | # Authentication setting 234 | auth_settings = ['UserSecurity'] 235 | 236 | return self.api_client.call_api(resource_path, 'POST', 237 | path_params, 238 | query_params, 239 | header_params, 240 | body=body_params, 241 | post_params=form_params, 242 | files=local_var_files, 243 | response_type='bool', 244 | auth_settings=auth_settings, 245 | callback=params.get('callback'), 246 | _return_http_data_only=params.get('_return_http_data_only'), 247 | _preload_content=params.get('_preload_content', True), 248 | _request_timeout=params.get('_request_timeout'), 249 | collection_formats=collection_formats) 250 | -------------------------------------------------------------------------------- /deepaffects/apis/emotion_api.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | 3 | """ 4 | DeepAffects 5 | 6 | OpenAPI spec version: v1 7 | """ 8 | 9 | 10 | from __future__ import absolute_import 11 | 12 | # python 2 and python 3 compatibility library 13 | from six import iteritems 14 | 15 | from ..api_client import ApiClient 16 | from ..configuration import Configuration 17 | 18 | 19 | class EmotionApi(object): 20 | 21 | def __init__(self, api_client=None): 22 | config = Configuration() 23 | if api_client: 24 | self.api_client = api_client 25 | else: 26 | if not config.api_client: 27 | config.api_client = ApiClient() 28 | self.api_client = config.api_client 29 | 30 | def async_recognise_emotion(self, body, webhook, **kwargs): 31 | """ 32 | Find emotion in an audio file 33 | Extract emotion from an audio file asynchronously. 34 | This method makes a synchronous HTTP request by default. To make an 35 | asynchronous HTTP request, please define a `callback` function 36 | to be invoked when receiving the response. 37 | >>> def callback_function(response): 38 | >>> pprint(response) 39 | >>> 40 | >>> thread = api.async_recognise_emotion(body, webhook, callback=callback_function) 41 | 42 | :param callback function: The callback function 43 | for asynchronous request. (optional) 44 | :param Audio body: Audio object that needs to be featurized. (required) 45 | :param str webhook: The webhook url where result from async resource is posted (required) 46 | :param str request_id: Unique identifier for the request 47 | :return: AsyncResponse 48 | If the method is called asynchronously, 49 | returns the request thread. 50 | """ 51 | kwargs['_return_http_data_only'] = True 52 | if kwargs.get('callback'): 53 | return self.async_recognise_emotion_with_http_info(body, webhook, **kwargs) 54 | else: 55 | (data) = self.async_recognise_emotion_with_http_info(body, webhook, **kwargs) 56 | return data 57 | 58 | def async_recognise_emotion_with_http_info(self, body, webhook, **kwargs): 59 | """ 60 | Find emotion in an audio file 61 | Extract emotion from an audio file. 62 | This method makes a synchronous HTTP request by default. To make an 63 | asynchronous HTTP request, please define a `callback` function 64 | to be invoked when receiving the response. 65 | >>> def callback_function(response): 66 | >>> pprint(response) 67 | >>> 68 | >>> thread = api.async_recognise_emotion_with_http_info(body, webhook, callback=callback_function) 69 | 70 | :param callback function: The callback function 71 | for asynchronous request. (optional) 72 | :param Audio body: Audio object that needs to be featurized. (required) 73 | :param str webhook: The webhook url where result from async resource is posted (required) 74 | :param str request_id: Unique identifier for the request 75 | :return: AsyncResponse 76 | If the method is called asynchronously, 77 | returns the request thread. 78 | """ 79 | 80 | all_params = ['body', 'webhook', 'request_id'] 81 | all_params.append('callback') 82 | all_params.append('_return_http_data_only') 83 | all_params.append('_preload_content') 84 | all_params.append('_request_timeout') 85 | 86 | params = locals() 87 | for key, val in iteritems(params['kwargs']): 88 | if key not in all_params: 89 | raise TypeError( 90 | "Got an unexpected keyword argument '%s'" 91 | " to method async_recognise_emotion" % key 92 | ) 93 | params[key] = val 94 | del params['kwargs'] 95 | # verify the required parameter 'body' is set 96 | if ('body' not in params) or (params['body'] is None): 97 | raise ValueError("Missing the required parameter `body` when calling `async_recognise_emotion`") 98 | # verify the required parameter 'webhook' is set 99 | if ('webhook' not in params) or (params['webhook'] is None): 100 | raise ValueError("Missing the required parameter `webhook` when calling `async_recognise_emotion`") 101 | 102 | 103 | collection_formats = {} 104 | 105 | resource_path = '/audio/generic/api/v1/async/recognise_emotion'.replace('{format}', 'json') 106 | path_params = {} 107 | 108 | query_params = {} 109 | if 'webhook' in params: 110 | query_params['webhook'] = params['webhook'] 111 | if 'request_id' in params: 112 | query_params['request_id'] = params['request_id'] 113 | 114 | header_params = {} 115 | 116 | form_params = [] 117 | local_var_files = {} 118 | 119 | body_params = None 120 | if 'body' in params: 121 | body_params = params['body'] 122 | # HTTP header `Accept` 123 | header_params['Accept'] = self.api_client.\ 124 | select_header_accept(['application/json']) 125 | 126 | # HTTP header `Content-Type` 127 | header_params['Content-Type'] = self.api_client.\ 128 | select_header_content_type(['application/json']) 129 | 130 | # Authentication setting 131 | auth_settings = ['UserSecurity'] 132 | 133 | return self.api_client.call_api(resource_path, 'POST', 134 | path_params, 135 | query_params, 136 | header_params, 137 | body=body_params, 138 | post_params=form_params, 139 | files=local_var_files, 140 | response_type='AsyncResponse', 141 | auth_settings=auth_settings, 142 | callback=params.get('callback'), 143 | _return_http_data_only=params.get('_return_http_data_only'), 144 | _preload_content=params.get('_preload_content', True), 145 | _request_timeout=params.get('_request_timeout'), 146 | collection_formats=collection_formats) 147 | 148 | def sync_recognise_emotion(self, body, **kwargs): 149 | """ 150 | Find emotion in an audio file 151 | Extract emotion from an audio file. 152 | This method makes a synchronous HTTP request by default. To make an 153 | asynchronous HTTP request, please define a `callback` function 154 | to be invoked when receiving the response. 155 | >>> def callback_function(response): 156 | >>> pprint(response) 157 | >>> 158 | >>> thread = api.sync_recognise_emotion(body, callback=callback_function) 159 | 160 | :param callback function: The callback function 161 | for asynchronous request. (optional) 162 | :param Audio body: Audio object that needs to be featurized. (required) 163 | :return: list[EmotionScore] 164 | If the method is called asynchronously, 165 | returns the request thread. 166 | """ 167 | kwargs['_return_http_data_only'] = True 168 | if kwargs.get('callback'): 169 | return self.sync_recognise_emotion_with_http_info(body, **kwargs) 170 | else: 171 | (data) = self.sync_recognise_emotion_with_http_info(body, **kwargs) 172 | return data 173 | 174 | def sync_recognise_emotion_with_http_info(self, body, **kwargs): 175 | """ 176 | Find emotion in an audio file 177 | Extract emotion from an audio file. 178 | This method makes a synchronous HTTP request by default. To make an 179 | asynchronous HTTP request, please define a `callback` function 180 | to be invoked when receiving the response. 181 | >>> def callback_function(response): 182 | >>> pprint(response) 183 | >>> 184 | >>> thread = api.sync_recognise_emotion_with_http_info(body, callback=callback_function) 185 | 186 | :param callback function: The callback function 187 | for asynchronous request. (optional) 188 | :param Audio body: Audio object that needs to be featurized. (required) 189 | :return: list[EmotionScore] 190 | If the method is called asynchronously, 191 | returns the request thread. 192 | """ 193 | 194 | all_params = ['body'] 195 | all_params.append('callback') 196 | all_params.append('_return_http_data_only') 197 | all_params.append('_preload_content') 198 | all_params.append('_request_timeout') 199 | 200 | params = locals() 201 | for key, val in iteritems(params['kwargs']): 202 | if key not in all_params: 203 | raise TypeError( 204 | "Got an unexpected keyword argument '%s'" 205 | " to method sync_recognise_emotion" % key 206 | ) 207 | params[key] = val 208 | del params['kwargs'] 209 | # verify the required parameter 'body' is set 210 | if ('body' not in params) or (params['body'] is None): 211 | raise ValueError("Missing the required parameter `body` when calling `sync_recognise_emotion`") 212 | 213 | 214 | collection_formats = {} 215 | 216 | resource_path = '/audio/generic/api/v1/sync/recognise_emotion'.replace('{format}', 'json') 217 | path_params = {} 218 | 219 | query_params = [] 220 | 221 | header_params = {} 222 | 223 | form_params = [] 224 | local_var_files = {} 225 | 226 | body_params = None 227 | if 'body' in params: 228 | body_params = params['body'] 229 | # HTTP header `Accept` 230 | header_params['Accept'] = self.api_client.\ 231 | select_header_accept(['application/json']) 232 | 233 | # HTTP header `Content-Type` 234 | header_params['Content-Type'] = self.api_client.\ 235 | select_header_content_type(['application/json']) 236 | 237 | # Authentication setting 238 | auth_settings = ['UserSecurity'] 239 | 240 | return self.api_client.call_api(resource_path, 'POST', 241 | path_params, 242 | query_params, 243 | header_params, 244 | body=body_params, 245 | post_params=form_params, 246 | files=local_var_files, 247 | response_type='list[EmotionScore]', 248 | auth_settings=auth_settings, 249 | callback=params.get('callback'), 250 | _return_http_data_only=params.get('_return_http_data_only'), 251 | _preload_content=params.get('_preload_content', True), 252 | _request_timeout=params.get('_request_timeout'), 253 | collection_formats=collection_formats) 254 | -------------------------------------------------------------------------------- /deepaffects/apis/featurize_api.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | 3 | """ 4 | DeepAffects 5 | 6 | OpenAPI spec version: v1 7 | """ 8 | 9 | 10 | from __future__ import absolute_import 11 | 12 | # python 2 and python 3 compatibility library 13 | from six import iteritems 14 | 15 | from ..api_client import ApiClient 16 | from ..configuration import Configuration 17 | 18 | 19 | class FeaturizeApi(object): 20 | 21 | def __init__(self, api_client=None): 22 | config = Configuration() 23 | if api_client: 24 | self.api_client = api_client 25 | else: 26 | if not config.api_client: 27 | config.api_client = ApiClient() 28 | self.api_client = config.api_client 29 | 30 | def async_featurize_audio(self, body, webhook, **kwargs): 31 | """ 32 | Extract paralinguistic feature from an audio file asynchronously. 33 | This method makes a synchronous HTTP request by default. To make an 34 | asynchronous HTTP request, please define a `callback` function 35 | to be invoked when receiving the response. 36 | >>> def callback_function(response): 37 | >>> pprint(response) 38 | >>> 39 | >>> thread = api.async_featurize_audio(body, webhook, callback=callback_function) 40 | 41 | :param callback function: The callback function 42 | for asynchronous request. (optional) 43 | :param Audio body: Audio object that needs to be featurized. (required) 44 | :param str webhook: The webhook url where result from async resource is posted (required) 45 | :param str request_id: Unique identifier for the request 46 | :return: AsyncResponse 47 | If the method is called asynchronously, 48 | returns the request thread. 49 | """ 50 | kwargs['_return_http_data_only'] = True 51 | if kwargs.get('callback'): 52 | return self.async_featurize_audio_with_http_info(body, webhook, **kwargs) 53 | else: 54 | (data) = self.async_featurize_audio_with_http_info(body, webhook, **kwargs) 55 | return data 56 | 57 | def async_featurize_audio_with_http_info(self, body, webhook, **kwargs): 58 | """ 59 | featurize an audio file 60 | Extract paralinguistic feature from an audio file. 61 | This method makes a synchronous HTTP request by default. To make an 62 | asynchronous HTTP request, please define a `callback` function 63 | to be invoked when receiving the response. 64 | >>> def callback_function(response): 65 | >>> pprint(response) 66 | >>> 67 | >>> thread = api.async_featurize_audio_with_http_info(body, webhook, callback=callback_function) 68 | 69 | :param callback function: The callback function 70 | for asynchronous request. (optional) 71 | :param Audio body: Audio object that needs to be featurized. (required) 72 | :param str webhook: The webhook url where result from async resource is posted (required) 73 | :param str request_id: Unique identifier for the request 74 | :return: AsyncResponse 75 | If the method is called asynchronously, 76 | returns the request thread. 77 | """ 78 | 79 | all_params = ['body', 'webhook', 'request_id'] 80 | all_params.append('callback') 81 | all_params.append('_return_http_data_only') 82 | all_params.append('_preload_content') 83 | all_params.append('_request_timeout') 84 | 85 | params = locals() 86 | for key, val in iteritems(params['kwargs']): 87 | if key not in all_params: 88 | raise TypeError( 89 | "Got an unexpected keyword argument '%s'" 90 | " to method async_featurize_audio" % key 91 | ) 92 | params[key] = val 93 | del params['kwargs'] 94 | # verify the required parameter 'body' is set 95 | if ('body' not in params) or (params['body'] is None): 96 | raise ValueError("Missing the required parameter `body` when calling `async_featurize_audio`") 97 | # verify the required parameter 'webhook' is set 98 | if ('webhook' not in params) or (params['webhook'] is None): 99 | raise ValueError("Missing the required parameter `webhook` when calling `async_featurize_audio`") 100 | 101 | 102 | collection_formats = {} 103 | 104 | resource_path = '/audio/generic/api/v1/async/featurize'.replace('{format}', 'json') 105 | path_params = {} 106 | 107 | query_params = {} 108 | if 'webhook' in params: 109 | query_params['webhook'] = params['webhook'] 110 | if 'request_id' in params: 111 | query_params['request_id'] = params['request_id'] 112 | 113 | header_params = {} 114 | 115 | form_params = [] 116 | local_var_files = {} 117 | 118 | body_params = None 119 | if 'body' in params: 120 | body_params = params['body'] 121 | # HTTP header `Accept` 122 | header_params['Accept'] = self.api_client.\ 123 | select_header_accept(['application/json']) 124 | 125 | # HTTP header `Content-Type` 126 | header_params['Content-Type'] = self.api_client.\ 127 | select_header_content_type(['application/json']) 128 | 129 | # Authentication setting 130 | auth_settings = ['UserSecurity'] 131 | 132 | return self.api_client.call_api(resource_path, 'POST', 133 | path_params, 134 | query_params, 135 | header_params, 136 | body=body_params, 137 | post_params=form_params, 138 | files=local_var_files, 139 | response_type='AsyncResponse', 140 | auth_settings=auth_settings, 141 | callback=params.get('callback'), 142 | _return_http_data_only=params.get('_return_http_data_only'), 143 | _preload_content=params.get('_preload_content', True), 144 | _request_timeout=params.get('_request_timeout'), 145 | collection_formats=collection_formats) 146 | 147 | def sync_featurize_audio(self, body, **kwargs): 148 | """ 149 | Extract paralinguistic feature from an audio file synchronously. 150 | This method makes a synchronous HTTP request by default. To make an 151 | asynchronous HTTP request, please define a `callback` function 152 | to be invoked when receiving the response. 153 | >>> def callback_function(response): 154 | >>> pprint(response) 155 | >>> 156 | >>> thread = api.sync_featurize_audio(body, callback=callback_function) 157 | 158 | :param callback function: The callback function 159 | for asynchronous request. (optional) 160 | :param Audio body: Audio object that needs to be featurized. (required) 161 | :return: AudioFeatures 162 | If the method is called asynchronously, 163 | returns the request thread. 164 | """ 165 | kwargs['_return_http_data_only'] = True 166 | if kwargs.get('callback'): 167 | return self.sync_featurize_audio_with_http_info(body, **kwargs) 168 | else: 169 | (data) = self.sync_featurize_audio_with_http_info(body, **kwargs) 170 | return data 171 | 172 | def sync_featurize_audio_with_http_info(self, body, **kwargs): 173 | """ 174 | Extract paralinguistic feature from an audio file synchronously. 175 | This method makes a synchronous HTTP request by default. To make an 176 | asynchronous HTTP request, please define a `callback` function 177 | to be invoked when receiving the response. 178 | >>> def callback_function(response): 179 | >>> pprint(response) 180 | >>> 181 | >>> thread = api.sync_featurize_audio_with_http_info(body, callback=callback_function) 182 | 183 | :param callback function: The callback function 184 | for asynchronous request. (optional) 185 | :param Audio body: Audio object that needs to be featurized. (required) 186 | :return: AudioFeatures 187 | If the method is called asynchronously, 188 | returns the request thread. 189 | """ 190 | 191 | all_params = ['body'] 192 | all_params.append('callback') 193 | all_params.append('_return_http_data_only') 194 | all_params.append('_preload_content') 195 | all_params.append('_request_timeout') 196 | 197 | params = locals() 198 | for key, val in iteritems(params['kwargs']): 199 | if key not in all_params: 200 | raise TypeError( 201 | "Got an unexpected keyword argument '%s'" 202 | " to method sync_featurize_audio" % key 203 | ) 204 | params[key] = val 205 | del params['kwargs'] 206 | # verify the required parameter 'body' is set 207 | if ('body' not in params) or (params['body'] is None): 208 | raise ValueError("Missing the required parameter `body` when calling `sync_featurize_audio`") 209 | 210 | 211 | collection_formats = {} 212 | 213 | resource_path = '/audio/generic/api/v1/sync/featurize'.replace('{format}', 'json') 214 | path_params = {} 215 | 216 | query_params = [] 217 | 218 | header_params = {} 219 | 220 | form_params = [] 221 | local_var_files = {} 222 | 223 | body_params = None 224 | if 'body' in params: 225 | body_params = params['body'] 226 | # HTTP header `Accept` 227 | header_params['Accept'] = self.api_client.\ 228 | select_header_accept(['application/json']) 229 | 230 | # HTTP header `Content-Type` 231 | header_params['Content-Type'] = self.api_client.\ 232 | select_header_content_type(['application/json']) 233 | 234 | # Authentication setting 235 | auth_settings = ['UserSecurity'] 236 | 237 | return self.api_client.call_api(resource_path, 'POST', 238 | path_params, 239 | query_params, 240 | header_params, 241 | body=body_params, 242 | post_params=form_params, 243 | files=local_var_files, 244 | response_type='AudioFeatures', 245 | auth_settings=auth_settings, 246 | callback=params.get('callback'), 247 | _return_http_data_only=params.get('_return_http_data_only'), 248 | _preload_content=params.get('_preload_content', True), 249 | _request_timeout=params.get('_request_timeout'), 250 | collection_formats=collection_formats) 251 | -------------------------------------------------------------------------------- /deepaffects/configuration.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | 3 | """ 4 | DeepAffects 5 | 6 | OpenAPI Specification of DeepAffects APIs 7 | 8 | OpenAPI spec version: 0.1.0 9 | 10 | Generated by: https://github.com/swagger-api/swagger-codegen.git 11 | """ 12 | 13 | 14 | from __future__ import absolute_import 15 | 16 | import urllib3 17 | 18 | import sys 19 | import logging 20 | 21 | from six import iteritems 22 | from six.moves import http_client as httplib 23 | 24 | 25 | def singleton(cls, *args, **kw): 26 | instances = {} 27 | 28 | def _singleton(): 29 | if cls not in instances: 30 | instances[cls] = cls(*args, **kw) 31 | return instances[cls] 32 | return _singleton 33 | 34 | 35 | @singleton 36 | class Configuration(object): 37 | def __init__(self): 38 | """ 39 | Constructor 40 | """ 41 | # Default Base url 42 | self.host = "https://proxy.api.deepaffects.com" 43 | # Default api client 44 | self.api_client = None 45 | # Temp file folder for downloading files 46 | self.temp_folder_path = None 47 | 48 | # Authentication Settings 49 | # dict to store API key(s) 50 | self.api_key = {} 51 | # dict to store API prefix (e.g. Bearer) 52 | self.api_key_prefix = {} 53 | # Username for HTTP basic authentication 54 | self.username = "" 55 | # Password for HTTP basic authentication 56 | self.password = "" 57 | 58 | # Logging Settings 59 | self.logger = {} 60 | self.logger["package_logger"] = logging.getLogger("swagger_client") 61 | self.logger["urllib3_logger"] = logging.getLogger("urllib3") 62 | # Log format 63 | self.logger_format = '%(asctime)s %(levelname)s %(message)s' 64 | # Log stream handler 65 | self.logger_stream_handler = None 66 | # Log file handler 67 | self.logger_file_handler = None 68 | # Debug file location 69 | self.logger_file = None 70 | # Debug switch 71 | self.debug = False 72 | 73 | # SSL/TLS verification 74 | # Set this to false to skip verifying SSL certificate when calling API from https server. 75 | self.verify_ssl = True 76 | # Set this to customize the certificate file to verify the peer. 77 | self.ssl_ca_cert = None 78 | # client certificate file 79 | self.cert_file = None 80 | # client key file 81 | self.key_file = None 82 | 83 | @property 84 | def logger_file(self): 85 | """ 86 | Gets the logger_file. 87 | """ 88 | return self.__logger_file 89 | 90 | @logger_file.setter 91 | def logger_file(self, value): 92 | """ 93 | Sets the logger_file. 94 | 95 | If the logger_file is None, then add stream handler and remove file handler. 96 | Otherwise, add file handler and remove stream handler. 97 | 98 | :param value: The logger_file path. 99 | :type: str 100 | """ 101 | self.__logger_file = value 102 | if self.__logger_file: 103 | # If set logging file, 104 | # then add file handler and remove stream handler. 105 | self.logger_file_handler = logging.FileHandler(self.__logger_file) 106 | self.logger_file_handler.setFormatter(self.logger_formatter) 107 | for _, logger in iteritems(self.logger): 108 | logger.addHandler(self.logger_file_handler) 109 | if self.logger_stream_handler: 110 | logger.removeHandler(self.logger_stream_handler) 111 | else: 112 | # If not set logging file, 113 | # then add stream handler and remove file handler. 114 | self.logger_stream_handler = logging.StreamHandler() 115 | self.logger_stream_handler.setFormatter(self.logger_formatter) 116 | for _, logger in iteritems(self.logger): 117 | logger.addHandler(self.logger_stream_handler) 118 | if self.logger_file_handler: 119 | logger.removeHandler(self.logger_file_handler) 120 | 121 | @property 122 | def debug(self): 123 | """ 124 | Gets the debug status. 125 | """ 126 | return self.__debug 127 | 128 | @debug.setter 129 | def debug(self, value): 130 | """ 131 | Sets the debug status. 132 | 133 | :param value: The debug status, True or False. 134 | :type: bool 135 | """ 136 | self.__debug = value 137 | if self.__debug: 138 | # if debug status is True, turn on debug logging 139 | for _, logger in iteritems(self.logger): 140 | logger.setLevel(logging.DEBUG) 141 | # turn on httplib debug 142 | httplib.HTTPConnection.debuglevel = 1 143 | else: 144 | # if debug status is False, turn off debug logging, 145 | # setting log level to default `logging.WARNING` 146 | for _, logger in iteritems(self.logger): 147 | logger.setLevel(logging.WARNING) 148 | # turn off httplib debug 149 | httplib.HTTPConnection.debuglevel = 0 150 | 151 | @property 152 | def logger_format(self): 153 | """ 154 | Gets the logger_format. 155 | """ 156 | return self.__logger_format 157 | 158 | @logger_format.setter 159 | def logger_format(self, value): 160 | """ 161 | Sets the logger_format. 162 | 163 | The logger_formatter will be updated when sets logger_format. 164 | 165 | :param value: The format string. 166 | :type: str 167 | """ 168 | self.__logger_format = value 169 | self.logger_formatter = logging.Formatter(self.__logger_format) 170 | 171 | def get_api_key_with_prefix(self, identifier): 172 | """ 173 | Gets API key (with prefix if set). 174 | 175 | :param identifier: The identifier of apiKey. 176 | :return: The token for api key authentication. 177 | """ 178 | if self.api_key.get(identifier) and self.api_key_prefix.get(identifier): 179 | return self.api_key_prefix[identifier] + ' ' + self.api_key[identifier] 180 | elif self.api_key.get(identifier): 181 | return self.api_key[identifier] 182 | 183 | def get_basic_auth_token(self): 184 | """ 185 | Gets HTTP basic authentication header (string). 186 | 187 | :return: The token for basic HTTP authentication. 188 | """ 189 | return urllib3.util.make_headers(basic_auth=self.username + ':' + self.password)\ 190 | .get('authorization') 191 | 192 | def auth_settings(self): 193 | """ 194 | Gets Auth Settings dict for api client. 195 | 196 | :return: The Auth Settings information dict. 197 | """ 198 | return { 199 | 'UserSecurity': 200 | { 201 | 'type': 'api_key', 202 | 'in': 'query', 203 | 'key': 'apikey', 204 | 'value': self.get_api_key_with_prefix('apikey') 205 | }, 206 | 207 | } 208 | 209 | def to_debug_report(self): 210 | """ 211 | Gets the essential information for debugging. 212 | 213 | :return: The report for debugging. 214 | """ 215 | return "Python SDK Debug Report:\n"\ 216 | "OS: {env}\n"\ 217 | "Python Version: {pyversion}\n"\ 218 | "Version of the API: 0.1.0\n"\ 219 | "SDK Package Version: 1.0.0".\ 220 | format(env=sys.platform, pyversion=sys.version) 221 | -------------------------------------------------------------------------------- /deepaffects/models/__init__.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | 3 | """ 4 | DeepAffects 5 | 6 | OpenAPI Specification of DeepAffects APIs 7 | 8 | OpenAPI spec version: 0.1.0 9 | 10 | Generated by: https://github.com/swagger-api/swagger-codegen.git 11 | """ 12 | 13 | 14 | from __future__ import absolute_import 15 | 16 | # import models into model package 17 | from .async_response import AsyncResponse 18 | from .audio import Audio 19 | from .diarize_audio import DiarizeAudio 20 | from .emotion_score import EmotionScore 21 | from .audio_features import AudioFeatures 22 | from .diarize_segment import DiarizeSegment 23 | -------------------------------------------------------------------------------- /deepaffects/models/async_response.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | 3 | """ 4 | DeepAffects 5 | 6 | OpenAPI spec version: v1 7 | """ 8 | 9 | from pprint import pformat 10 | 11 | from six import iteritems 12 | import json 13 | 14 | 15 | class AsyncResponse(object): 16 | """ 17 | NOTE: This class is auto generated by the swagger code generator program. 18 | Do not edit the class manually. 19 | """ 20 | def __init__(self, request_id=None, api=None): 21 | """ 22 | AsyncResponse - a model defined in Swagger 23 | 24 | :param dict swaggerTypes: The key is attribute name 25 | and the value is attribute type. 26 | :param dict attributeMap: The key is attribute name 27 | and the value is json key in definition. 28 | """ 29 | self.swagger_types = { 30 | 'request_id': 'str', 31 | 'api': 'str' 32 | } 33 | 34 | self.attribute_map = { 35 | 'request_id': 'request_id', 36 | 'api': 'api' 37 | } 38 | 39 | self._request_id = request_id 40 | self._api = api 41 | 42 | @property 43 | def request_id(self): 44 | """ 45 | Gets the request_id of this AsyncResponse. 46 | Unique identifier for the api call 47 | 48 | :return: The request_id of this AsyncResponse. 49 | :rtype: str 50 | """ 51 | return self._request_id 52 | 53 | @request_id.setter 54 | def request_id(self, request_id): 55 | """ 56 | Sets the request_id of this AsyncResponse. 57 | Unique identifier for the api call 58 | 59 | :param request_id: The request_id of this AsyncResponse. 60 | :type: str 61 | """ 62 | if request_id is None: 63 | raise ValueError("Invalid value for `request_id`, must not be `None`") 64 | 65 | self._request_id = request_id 66 | 67 | @property 68 | def api(self): 69 | """ 70 | Gets the api of this AsyncResponse. 71 | API hit 72 | 73 | :return: The api of this AsyncResponse. 74 | :rtype: str 75 | """ 76 | return self._api 77 | 78 | @api.setter 79 | def api(self, api): 80 | """ 81 | Sets the api of this AsyncResponse. 82 | API hit 83 | 84 | :param api: The api of this AsyncResponse. 85 | :type: str 86 | """ 87 | if api is None: 88 | raise ValueError("Invalid value for `api`, must not be `None`") 89 | 90 | self._api = api 91 | 92 | def to_dict(self): 93 | """ 94 | Returns the model properties as a dict 95 | """ 96 | result = {} 97 | 98 | for attr, _ in iteritems(self.swagger_types): 99 | value = getattr(self, attr) 100 | if isinstance(value, list): 101 | result[attr] = list(map( 102 | lambda x: x.to_dict() if hasattr(x, "to_dict") else x, 103 | value 104 | )) 105 | elif hasattr(value, "to_dict"): 106 | result[attr] = value.to_dict() 107 | elif isinstance(value, dict): 108 | result[attr] = dict(map( 109 | lambda item: (item[0], item[1].to_dict()) 110 | if hasattr(item[1], "to_dict") else item, 111 | value.items() 112 | )) 113 | else: 114 | result[attr] = value 115 | 116 | return result 117 | 118 | def to_str(self): 119 | """ 120 | Returns the string representation of the model 121 | """ 122 | return pformat(self.to_dict()) 123 | 124 | def __repr__(self): 125 | """ 126 | For `print` and `pprint` 127 | """ 128 | return self.to_str() 129 | 130 | def __eq__(self, other): 131 | """ 132 | Returns true if both objects are equal 133 | """ 134 | if not isinstance(other, AsyncResponse): 135 | return False 136 | 137 | return self.__dict__ == other.__dict__ 138 | 139 | def __ne__(self, other): 140 | """ 141 | Returns true if both objects are not equal 142 | """ 143 | return not self == other 144 | 145 | @staticmethod 146 | def from_json(content_str): 147 | content = json.loads(content_str) 148 | async_response = AsyncResponse(api=content["api"], request_id=content["request_id"]) 149 | return async_response 150 | 151 | -------------------------------------------------------------------------------- /deepaffects/models/audio.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | 3 | """ 4 | DeepAffects 5 | 6 | OpenAPI spec version: v1 7 | """ 8 | try: 9 | import StringIO as SIO 10 | except ImportError: 11 | import io as SIO 12 | import base64 13 | import json 14 | from pprint import pformat 15 | 16 | from pymediainfo import MediaInfo 17 | from six import iteritems 18 | 19 | class Audio(object): 20 | def __init__(self, encoding=None, sample_rate=None, language_code='en-US', content=None): 21 | """ 22 | Audio - a model defined in Swagger 23 | 24 | :param dict swaggerTypes: The key is attribute name 25 | and the value is attribute type. 26 | :param dict attributeMap: The key is attribute name 27 | and the value is json key in definition. 28 | """ 29 | self.swagger_types = { 30 | 'encoding': 'str', 31 | 'sample_rate': 'str', 32 | 'language_code': 'str', 33 | 'content': 'str' 34 | } 35 | 36 | self.attribute_map = { 37 | 'encoding': 'encoding', 38 | 'sample_rate': 'sampleRate', 39 | 'language_code': 'languageCode', 40 | 'content': 'content' 41 | } 42 | 43 | self._encoding = encoding 44 | self._sample_rate = sample_rate 45 | self._language_code = language_code 46 | self._content = content 47 | 48 | @property 49 | def encoding(self): 50 | """ 51 | Gets the encoding of this Audio. 52 | Encoding of audio file like MP3, WAV etc. 53 | 54 | :return: The encoding of this Audio. 55 | :rtype: str 56 | """ 57 | return self._encoding 58 | 59 | @encoding.setter 60 | def encoding(self, encoding): 61 | """ 62 | Sets the encoding of this Audio. 63 | Encoding of audio file like MP3, WAV etc. 64 | 65 | :param encoding: The encoding of this Audio. 66 | :type: str 67 | """ 68 | if encoding is None: 69 | raise ValueError("Invalid value for `encoding`, must not be `None`") 70 | 71 | self._encoding = encoding 72 | 73 | @property 74 | def sample_rate(self): 75 | """ 76 | Gets the sample_rate of this Audio. 77 | Sample rate of the audio file. 78 | 79 | :return: The sample_rate of this Audio. 80 | :rtype: str 81 | """ 82 | return self._sample_rate 83 | 84 | @sample_rate.setter 85 | def sample_rate(self, sample_rate): 86 | """ 87 | Sets the sample_rate of this Audio. 88 | Sample rate of the audio file. 89 | 90 | :param sample_rate: The sample_rate of this Audio. 91 | :type: int 92 | """ 93 | if sample_rate is None: 94 | raise ValueError("Invalid value for `sample_rate`, must not be `None`") 95 | 96 | self._sample_rate = sample_rate 97 | 98 | @property 99 | def language_code(self): 100 | """ 101 | Gets the language_code of this Audio. 102 | Language spoken in the audio file. 103 | 104 | :return: The language_code of this Audio. 105 | :rtype: str 106 | """ 107 | return self._language_code 108 | 109 | @language_code.setter 110 | def language_code(self, language_code): 111 | """ 112 | Sets the language_code of this Audio. 113 | Language spoken in the audio file. 114 | 115 | :param language_code: The language_code of this Audio. 116 | :type: str 117 | """ 118 | if language_code is None: 119 | raise ValueError("Invalid value for `language_code`, must not be `None`") 120 | 121 | self._language_code = language_code 122 | 123 | @property 124 | def content(self): 125 | """ 126 | Gets the content of this Audio. 127 | base64 encoding of the audio file. 128 | 129 | :return: The content of this Audio. 130 | :rtype: str 131 | """ 132 | return self._content 133 | 134 | @content.setter 135 | def content(self, content): 136 | """ 137 | Sets the content of this Audio. 138 | base64 encoding of the audio file. 139 | 140 | :param content: The content of this Audio. 141 | :type: str 142 | """ 143 | if content is None: 144 | raise ValueError("Invalid value for `content`, must not be `None`") 145 | 146 | self._content = content 147 | 148 | def to_dict(self): 149 | """ 150 | Returns the model properties as a dict 151 | """ 152 | result = {} 153 | 154 | for attr, _ in iteritems(self.swagger_types): 155 | value = getattr(self, attr) 156 | if isinstance(value, list): 157 | result[attr] = list(map( 158 | lambda x: x.to_dict() if hasattr(x, "to_dict") else x, 159 | value 160 | )) 161 | elif hasattr(value, "to_dict"): 162 | result[attr] = value.to_dict() 163 | elif isinstance(value, dict): 164 | result[attr] = dict(map( 165 | lambda item: (item[0], item[1].to_dict()) 166 | if hasattr(item[1], "to_dict") else item, 167 | value.items() 168 | )) 169 | else: 170 | result[attr] = value 171 | 172 | return result 173 | 174 | def to_str(self): 175 | """ 176 | Returns the string representation of the model 177 | """ 178 | return pformat(self.to_dict()) 179 | 180 | def __repr__(self): 181 | """ 182 | For `print` and `pprint` 183 | """ 184 | return self.to_str() 185 | 186 | def __eq__(self, other): 187 | """ 188 | Returns true if both objects are equal 189 | """ 190 | if not isinstance(other, Audio): 191 | return False 192 | 193 | return self.__dict__ == other.__dict__ 194 | 195 | def __ne__(self, other): 196 | """ 197 | Returns true if both objects are not equal 198 | """ 199 | return not self == other 200 | 201 | @staticmethod 202 | def from_file(file_name, language_code='en-US'): 203 | media_info = MediaInfo.parse(file_name) 204 | codec = media_info.tracks[0].__dict__['codec'] 205 | sampling_rate = media_info.tracks[1].__dict__['sampling_rate'] 206 | fout = SIO.StringIO() 207 | with open(file_name, 'rb') as fin: 208 | audio_content = fin.read() 209 | audio = Audio(encoding=codec, sample_rate=sampling_rate, language_code=language_code, 210 | content=base64.b64encode(audio_content).decode('utf-8')) 211 | fout.close() 212 | return audio 213 | 214 | @staticmethod 215 | def from_json(content_str): 216 | content = json.loads(content_str) 217 | audio = Audio(encoding=content['encoding'], sample_rate=content['sample_rate'], 218 | language_code=content['language_code'], content=content['content']) 219 | return audio 220 | 221 | def to_file(self, file_name): 222 | base64.decode(SIO.StringIO(self.content), output=open(file_name, 'wb')) -------------------------------------------------------------------------------- /deepaffects/models/audio_features.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | 3 | """ 4 | DeepAffects 5 | 6 | OpenAPI Specification of DeepAffects APIs # noqa: E501 7 | 8 | OpenAPI spec version: 0.1.0 9 | 10 | Generated by: https://github.com/swagger-api/swagger-codegen.git 11 | """ 12 | 13 | import pprint 14 | import re # noqa: F401 15 | 16 | import six 17 | 18 | 19 | class AudioFeatures(object): 20 | """NOTE: This class is auto generated by the swagger code generator program. 21 | 22 | Do not edit the class manually. 23 | """ 24 | 25 | """ 26 | Attributes: 27 | swagger_types (dict): The key is attribute name 28 | and the value is attribute type. 29 | attribute_map (dict): The key is attribute name 30 | and the value is json key in definition. 31 | """ 32 | swagger_types = { 33 | 'mfccs': 'list[list[float]]', 34 | 'zcr': 'list[float]', 35 | 'energy': 'list[float]' 36 | } 37 | 38 | attribute_map = { 39 | 'mfccs': 'mfccs', 40 | 'zcr': 'zcr', 41 | 'energy': 'energy' 42 | } 43 | 44 | def __init__(self, mfccs=None, zcr=None, energy=None): # noqa: E501 45 | """AudioFeatures - a model defined in Swagger""" # noqa: E501 46 | 47 | self._mfccs = None 48 | self._zcr = None 49 | self._energy = None 50 | self.discriminator = None 51 | 52 | 53 | @property 54 | def mfccs(self): 55 | """Gets the mfccs of this AudioFeatures. # noqa: E501 56 | 57 | mel frequency cepstral coefficients # noqa: E501 58 | 59 | :return: The mfccs of this AudioFeatures. # noqa: E501 60 | :rtype: list[list[float]] 61 | """ 62 | return self._mfccs 63 | 64 | @mfccs.setter 65 | def mfccs(self, mfccs): 66 | """Sets the mfccs of this AudioFeatures. 67 | 68 | mel frequency cepstral coefficients # noqa: E501 69 | 70 | :param mfccs: The mfccs of this AudioFeatures. # noqa: E501 71 | :type: list[list[float]] 72 | """ 73 | if mfccs is None: 74 | raise ValueError("Invalid value for `mfccs`, must not be `None`") # noqa: E501 75 | 76 | self._mfccs = mfccs 77 | 78 | @property 79 | def zcr(self): 80 | """Gets the zcr of this AudioFeatures. # noqa: E501 81 | 82 | zero crossing rate # noqa: E501 83 | 84 | :return: The zcr of this AudioFeatures. # noqa: E501 85 | :rtype: list[float] 86 | """ 87 | return self._zcr 88 | 89 | @zcr.setter 90 | def zcr(self, zcr): 91 | """Sets the zcr of this AudioFeatures. 92 | 93 | zero crossing rate # noqa: E501 94 | 95 | :param zcr: The zcr of this AudioFeatures. # noqa: E501 96 | :type: list[float] 97 | """ 98 | if zcr is None: 99 | raise ValueError("Invalid value for `zcr`, must not be `None`") # noqa: E501 100 | 101 | self._zcr = zcr 102 | 103 | @property 104 | def energy(self): 105 | """Gets the energy of this AudioFeatures. # noqa: E501 106 | 107 | energy # noqa: E501 108 | 109 | :return: The energy of this AudioFeatures. # noqa: E501 110 | :rtype: list[float] 111 | """ 112 | return self._energy 113 | 114 | @energy.setter 115 | def energy(self, energy): 116 | """Sets the energy of this AudioFeatures. 117 | 118 | energy # noqa: E501 119 | 120 | :param energy: The energy of this AudioFeatures. # noqa: E501 121 | :type: list[float] 122 | """ 123 | if energy is None: 124 | raise ValueError("Invalid value for `energy`, must not be `None`") # noqa: E501 125 | 126 | self._energy = energy 127 | 128 | def to_dict(self): 129 | """Returns the model properties as a dict""" 130 | result = {} 131 | 132 | for attr, _ in six.iteritems(self.swagger_types): 133 | value = getattr(self, attr) 134 | if isinstance(value, list): 135 | result[attr] = list(map( 136 | lambda x: x.to_dict() if hasattr(x, "to_dict") else x, 137 | value 138 | )) 139 | elif hasattr(value, "to_dict"): 140 | result[attr] = value.to_dict() 141 | elif isinstance(value, dict): 142 | result[attr] = dict(map( 143 | lambda item: (item[0], item[1].to_dict()) 144 | if hasattr(item[1], "to_dict") else item, 145 | value.items() 146 | )) 147 | else: 148 | result[attr] = value 149 | 150 | return result 151 | 152 | def to_str(self): 153 | """Returns the string representation of the model""" 154 | return pprint.pformat(self.to_dict()) 155 | 156 | def __repr__(self): 157 | """For `print` and `pprint`""" 158 | return self.to_str() 159 | 160 | def __eq__(self, other): 161 | """Returns true if both objects are equal""" 162 | if not isinstance(other, AudioFeatures): 163 | return False 164 | 165 | return self.__dict__ == other.__dict__ 166 | 167 | def __ne__(self, other): 168 | """Returns true if both objects are not equal""" 169 | return not self == other 170 | -------------------------------------------------------------------------------- /deepaffects/models/diarize_audio.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | 3 | """ 4 | DeepAffects 5 | 6 | OpenAPI Specification of DeepAffects APIs 7 | 8 | """ 9 | 10 | 11 | try: 12 | import StringIO as SIO 13 | except ImportError: 14 | import io as SIO 15 | import base64 16 | import json 17 | from pprint import pformat 18 | 19 | from pymediainfo import MediaInfo 20 | from six import iteritems 21 | 22 | 23 | class DiarizeAudio(object): 24 | 25 | def __init__(self, encoding=None, sample_rate=None, language_code='en-US', content=None, speakers=-1, 26 | merge_segments=True, audio_type="default"): 27 | """ 28 | DiarizeAudio - a model defined in Swagger 29 | 30 | :param dict swaggerTypes: The key is attribute name 31 | and the value is attribute type. 32 | :param dict attributeMap: The key is attribute name 33 | and the value is json key in definition. 34 | """ 35 | self.swagger_types = { 36 | 'encoding': 'str', 37 | 'sample_rate': 'int', 38 | 'language_code': 'str', 39 | 'content': 'str', 40 | 'speakers': 'int', 41 | 'audio_type': 'str', 42 | 'merge_segments': 'bool' 43 | } 44 | 45 | self.attribute_map = { 46 | 'encoding': 'encoding', 47 | 'sample_rate': 'sampleRate', 48 | 'language_code': 'languageCode', 49 | 'content': 'content', 50 | 'speakers': 'speakers', 51 | 'audio_type': 'audioType', 52 | 'merge_segments': 'vad' 53 | } 54 | 55 | self._encoding = encoding 56 | self._sample_rate = sample_rate 57 | self._language_code = language_code 58 | self._content = content 59 | self._speakers = speakers 60 | self._merge_segments = merge_segments 61 | self._audio_type = audio_type 62 | 63 | @property 64 | def encoding(self): 65 | """ 66 | Gets the encoding of this DiarizeAudio. 67 | Encoding of audio file like MP3, WAV etc. 68 | 69 | :return: The encoding of this DiarizeAudio. 70 | :rtype: str 71 | """ 72 | return self._encoding 73 | 74 | @encoding.setter 75 | def encoding(self, encoding): 76 | """ 77 | Sets the encoding of this DiarizeAudio. 78 | Encoding of audio file like MP3, WAV etc. 79 | 80 | :param encoding: The encoding of this DiarizeAudio. 81 | :type: str 82 | """ 83 | if encoding is None: 84 | raise ValueError("Invalid value for `encoding`, must not be `None`") 85 | 86 | self._encoding = encoding 87 | 88 | @property 89 | def sample_rate(self): 90 | """ 91 | Gets the sample_rate of this DiarizeAudio. 92 | Sample rate of the audio file. 93 | 94 | :return: The sample_rate of this DiarizeAudio. 95 | :rtype: int 96 | """ 97 | return self._sample_rate 98 | 99 | @sample_rate.setter 100 | def sample_rate(self, sample_rate): 101 | """ 102 | Sets the sample_rate of this DiarizeAudio. 103 | Sample rate of the audio file. 104 | 105 | :param sample_rate: The sample_rate of this DiarizeAudio. 106 | :type: int 107 | """ 108 | if sample_rate is None: 109 | raise ValueError("Invalid value for `sample_rate`, must not be `None`") 110 | 111 | self._sample_rate = sample_rate 112 | 113 | @property 114 | def language_code(self): 115 | """ 116 | Gets the language_code of this DiarizeAudio. 117 | Language spoken in the audio file. 118 | 119 | :return: The language_code of this DiarizeAudio. 120 | :rtype: str 121 | """ 122 | return self._language_code 123 | 124 | @language_code.setter 125 | def language_code(self, language_code): 126 | """ 127 | Sets the language_code of this DiarizeAudio. 128 | Language spoken in the audio file. 129 | 130 | :param language_code: The language_code of this DiarizeAudio. 131 | :type: str 132 | """ 133 | if language_code is None: 134 | raise ValueError("Invalid value for `language_code`, must not be `None`") 135 | 136 | self._language_code = language_code 137 | 138 | @property 139 | def content(self): 140 | """ 141 | Gets the content of this DiarizeAudio. 142 | base64 encoding of the audio file. 143 | 144 | :return: The content of this DiarizeAudio. 145 | :rtype: str 146 | """ 147 | return self._content 148 | 149 | @content.setter 150 | def content(self, content): 151 | """ 152 | Sets the content of this DiarizeAudio. 153 | base64 encoding of the audio file. 154 | 155 | :param content: The content of this DiarizeAudio. 156 | :type: str 157 | """ 158 | if content is None: 159 | raise ValueError("Invalid value for `content`, must not be `None`") 160 | 161 | self._content = content 162 | 163 | @property 164 | def speakers(self): 165 | """ 166 | Gets the speakers of this DiarizeAudio. 167 | Number of speakers in the file (-1 for unknown speakers) 168 | 169 | :return: The speakers of this DiarizeAudio. 170 | :rtype: int 171 | """ 172 | return self._speakers 173 | 174 | @speakers.setter 175 | def speakers(self, speakers): 176 | """ 177 | Sets the speakers of this DiarizeAudio. 178 | Number of speakers in the file (-1 for unknown speakers) 179 | 180 | :param speakers: The speakers of this DiarizeAudio. 181 | :type: int 182 | """ 183 | if speakers is None: 184 | raise ValueError("Invalid value for `speakers`, must not be `None`") 185 | 186 | self._speakers = speakers 187 | 188 | @property 189 | def audio_type(self): 190 | """ 191 | Gets the corresponding type of audio file 192 | example: meeting, call-center, default 193 | 194 | :return: The audio_type of this DiarizeAudio. 195 | :rtype: str 196 | """ 197 | return self._audio_type 198 | 199 | @audio_type.setter 200 | def audio_type(self, audio_type): 201 | """ 202 | Sets the audio_type of this DiarizeAudio. 203 | Corresponding type of audio file like meeting, call-center, default 204 | 205 | :param encoding: The audio_type of this DiarizeAudio. 206 | :type: str 207 | """ 208 | if audio_type is None: 209 | raise ValueError("Invalid value for `audio_type`, must not be `None`") 210 | 211 | self._audio_type = audio_type 212 | 213 | @property 214 | def merge_segments(self): 215 | """ 216 | Whether the consecutive segments of same speaker should be merged 217 | 218 | :return: The merge_segments of this DiarizeAudio. 219 | :rtype: bool 220 | """ 221 | return self._merge_segments 222 | 223 | @merge_segments.setter 224 | def merge_segments(self, merge_segments): 225 | """ 226 | Sets the merge_segments of this DiarizeAudio. 227 | Whether the consecutive segments of same speaker should be merged 228 | 229 | :param encoding: The merge_segments of this DiarizeAudio. 230 | :type: str 231 | """ 232 | if merge_segments is None: 233 | raise ValueError("Invalid value for `merge_segments`, must not be `None`") 234 | 235 | self._merge_segments = merge_segments 236 | 237 | def to_dict(self): 238 | """ 239 | Returns the model properties as a dict 240 | """ 241 | result = {} 242 | 243 | for attr, _ in iteritems(self.swagger_types): 244 | value = getattr(self, attr) 245 | if isinstance(value, list): 246 | result[attr] = list(map( 247 | lambda x: x.to_dict() if hasattr(x, "to_dict") else x, 248 | value 249 | )) 250 | elif hasattr(value, "to_dict"): 251 | result[attr] = value.to_dict() 252 | elif isinstance(value, dict): 253 | result[attr] = dict(map( 254 | lambda item: (item[0], item[1].to_dict()) 255 | if hasattr(item[1], "to_dict") else item, 256 | value.items() 257 | )) 258 | else: 259 | result[attr] = value 260 | 261 | return result 262 | 263 | def to_str(self): 264 | """ 265 | Returns the string representation of the model 266 | """ 267 | return pformat(self.to_dict()) 268 | 269 | def __repr__(self): 270 | """ 271 | For `print` and `pprint` 272 | """ 273 | return self.to_str() 274 | 275 | def __eq__(self, other): 276 | """ 277 | Returns true if both objects are equal 278 | """ 279 | if not isinstance(other, DiarizeAudio): 280 | return False 281 | 282 | return self.__dict__ == other.__dict__ 283 | 284 | def __ne__(self, other): 285 | """ 286 | Returns true if both objects are not equal 287 | """ 288 | return not self == other 289 | 290 | @staticmethod 291 | def from_file(file_name, language_code='en-US', speakers=-1, merge_segments=True, audio_type='default'): 292 | media_info = MediaInfo.parse(file_name) 293 | codec = media_info.tracks[0].__dict__['codec'] 294 | sampling_rate = media_info.tracks[1].__dict__['sampling_rate'] 295 | fout = SIO.StringIO() 296 | with open(file_name, 'rb') as fin: 297 | audio_content = fin.read() 298 | audio = DiarizeAudio(encoding=codec, sample_rate=sampling_rate, language_code=language_code, 299 | content=base64.b64encode(audio_content).decode('utf-8'), speakers=speakers, 300 | merge_segments=merge_segments, audio_type=audio_type) 301 | fout.close() 302 | return audio 303 | 304 | @staticmethod 305 | def from_json(content_str): 306 | content = json.loads(content_str) 307 | audio = DiarizeAudio(encoding=content['encoding'], sample_rate=content['sample_rate'], 308 | language_code=content['language_code'], content=content['content'], 309 | speakers=content['speakers'], merge_segments=content['merge_segments'], 310 | audio_type=content['audio_type']) 311 | return audio 312 | -------------------------------------------------------------------------------- /deepaffects/models/diarize_segment.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | 3 | """ 4 | DeepAffects 5 | 6 | OpenAPI Specification of DeepAffects APIs 7 | 8 | """ 9 | 10 | 11 | from pprint import pformat 12 | from six import iteritems 13 | 14 | class DiarizeSegment(object): 15 | 16 | def __init__(self, start=None, end=None, speaker_id=None): 17 | """ 18 | DiarizeSegment - a model defined in Swagger 19 | 20 | :param dict swaggerTypes: The key is attribute name 21 | and the value is attribute type. 22 | :param dict attributeMap: The key is attribute name 23 | and the value is json key in definition. 24 | """ 25 | self.swagger_types = { 26 | 'start': 'float', 27 | 'end': 'float', 28 | 'speaker_id': 'int' 29 | } 30 | 31 | self.attribute_map = { 32 | 'start': 'start', 33 | 'end': 'end', 34 | 'speaker_id': 'speakerId' 35 | } 36 | 37 | self._start = start 38 | self._end = end 39 | self._speaker_id = speaker_id 40 | 41 | @property 42 | def start(self): 43 | """ 44 | Gets the start of this DiarizeSegment. 45 | start of the corresponding diarized segment. 46 | 47 | :return: The encoding of this DiarizeSegment. 48 | :rtype: float 49 | """ 50 | return self._start 51 | 52 | @start.setter 53 | def start(self, start): 54 | """ 55 | Sets the start of this DiarizeSegment. 56 | start of the corresponding diarized segment. 57 | 58 | :param start: The start of this DiarizeSegment. 59 | :type: float 60 | """ 61 | if start is None: 62 | raise ValueError("Invalid value for `start`, must not be `None`") 63 | 64 | self._start = start 65 | 66 | @property 67 | def end(self): 68 | """ 69 | Gets the end of this DiarizeSegment. 70 | end of the corresponding diarized segment. 71 | 72 | :return: The end of this DiarizeSegment. 73 | :rtype: float 74 | """ 75 | return self._end 76 | 77 | @end.setter 78 | def end(self, end): 79 | """ 80 | Sets the end of this DiarizeSegment. 81 | end of the corresponding diarized segment. 82 | 83 | :param end: The end of this DiarizeSegment. 84 | :type: float 85 | """ 86 | if end is None: 87 | raise ValueError("Invalid value for `end`, must not be `None`") 88 | 89 | self._end = end 90 | 91 | @property 92 | def speaker_id(self): 93 | """ 94 | Gets the speaker_id of this DiarizeSegment. 95 | Unique identifier of the speaker speaking in the segment 96 | 97 | :return: The speaker_id of this DiarizeSegment. 98 | :rtype: int 99 | """ 100 | return self._speaker_id 101 | 102 | @speaker_id.setter 103 | def speaker_id(self, speaker_id): 104 | """ 105 | Sets the speaker_id of this DiarizeSegment. 106 | Unique identifier of the speaker speaking in the segment 107 | 108 | :param speaker_id: The speaker_id of this DiarizeSegment. 109 | :type: sinttr 110 | """ 111 | if speaker_id is None: 112 | raise ValueError("Invalid value for `speaker_id`, must not be `None`") 113 | 114 | self._speaker_id = speaker_id 115 | 116 | def to_dict(self): 117 | """ 118 | Returns the model properties as a dict 119 | """ 120 | result = {} 121 | 122 | for attr, _ in iteritems(self.swagger_types): 123 | value = getattr(self, attr) 124 | if isinstance(value, list): 125 | result[attr] = list(map( 126 | lambda x: x.to_dict() if hasattr(x, "to_dict") else x, 127 | value 128 | )) 129 | elif hasattr(value, "to_dict"): 130 | result[attr] = value.to_dict() 131 | elif isinstance(value, dict): 132 | result[attr] = dict(map( 133 | lambda item: (item[0], item[1].to_dict()) 134 | if hasattr(item[1], "to_dict") else item, 135 | value.items() 136 | )) 137 | else: 138 | result[attr] = value 139 | 140 | return result 141 | 142 | def to_str(self): 143 | """ 144 | Returns the string representation of the model 145 | """ 146 | return pformat(self.to_dict()) 147 | 148 | def __repr__(self): 149 | """ 150 | For `print` and `pprint` 151 | """ 152 | return self.to_str() 153 | 154 | def __eq__(self, other): 155 | """ 156 | Returns true if both objects are equal 157 | """ 158 | if not isinstance(other, DiarizeSegment): 159 | return False 160 | 161 | return self.__dict__ == other.__dict__ 162 | 163 | def __ne__(self, other): 164 | """ 165 | Returns true if both objects are not equal 166 | """ 167 | return not self == other 168 | -------------------------------------------------------------------------------- /deepaffects/models/emotion_score.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | 3 | """ 4 | DeepAffects 5 | 6 | OpenAPI Specification of DeepAffects audio APIs 7 | """ 8 | 9 | from pprint import pformat 10 | from six import iteritems 11 | import re 12 | 13 | 14 | class EmotionScore(object): 15 | """NOTE: This class is auto generated by the swagger code generator program. 16 | 17 | Do not edit the class manually. 18 | """ 19 | 20 | """ 21 | Attributes: 22 | swagger_types (dict): The key is attribute name 23 | and the value is attribute type. 24 | attribute_map (dict): The key is attribute name 25 | and the value is json key in definition. 26 | """ 27 | swagger_types = { 28 | 'emotion': 'str', 29 | 'score': 'float' 30 | } 31 | 32 | attribute_map = { 33 | 'emotion': 'emotion', 34 | 'score': 'score' 35 | } 36 | 37 | def __init__(self, emotion=None, score=None): # noqa: E501 38 | """EmotionScore - a model defined in Swagger""" # noqa: E501 39 | 40 | self._emotion = None 41 | self._score = None 42 | self.discriminator = None 43 | 44 | self.emotion = emotion 45 | self.score = score 46 | 47 | @property 48 | def emotion(self): 49 | """Gets the emotion of this EmotionScore. # noqa: E501 50 | 51 | Type of emotion like Happy, Sad, Surprised etc. # noqa: E501 52 | 53 | :return: The emotion of this EmotionScore. # noqa: E501 54 | :rtype: str 55 | """ 56 | return self._emotion 57 | 58 | @emotion.setter 59 | def emotion(self, emotion): 60 | """Sets the emotion of this EmotionScore. 61 | 62 | Type of emotion like Happy, Sad, Surprised etc. # noqa: E501 63 | 64 | :param emotion: The emotion of this EmotionScore. # noqa: E501 65 | :type: str 66 | """ 67 | 68 | self._emotion = emotion 69 | 70 | @property 71 | def score(self): 72 | """Gets the score of this EmotionScore. # noqa: E501 73 | 74 | Probability score or confidence of the corresponding emotion. # noqa: E501 75 | 76 | :return: The score of this EmotionScore. # noqa: E501 77 | :rtype: float 78 | """ 79 | return self._score 80 | 81 | @score.setter 82 | def score(self, score): 83 | """Sets the score of this EmotionScore. 84 | 85 | Probability score or confidence of the corresponding emotion. # noqa: E501 86 | 87 | :param score: The score of this EmotionScore. # noqa: E501 88 | :type: float 89 | """ 90 | 91 | self._score = score 92 | 93 | def to_dict(self): 94 | """ 95 | Returns the model properties as a dict 96 | """ 97 | result = {} 98 | 99 | for attr, _ in iteritems(self.swagger_types): 100 | value = getattr(self, attr) 101 | if isinstance(value, list): 102 | result[attr] = list(map( 103 | lambda x: x.to_dict() if hasattr(x, "to_dict") else x, 104 | value 105 | )) 106 | elif hasattr(value, "to_dict"): 107 | result[attr] = value.to_dict() 108 | elif isinstance(value, dict): 109 | result[attr] = dict(map( 110 | lambda item: (item[0], item[1].to_dict()) 111 | if hasattr(item[1], "to_dict") else item, 112 | value.items() 113 | )) 114 | else: 115 | result[attr] = value 116 | 117 | return result 118 | 119 | def to_str(self): 120 | """ 121 | Returns the string representation of the model 122 | """ 123 | return pformat(self.to_dict()) 124 | 125 | def __repr__(self): 126 | """ 127 | For `print` and `pprint` 128 | """ 129 | return self.to_str() 130 | 131 | def __eq__(self, other): 132 | """ 133 | Returns true if both objects are equal 134 | """ 135 | if not isinstance(other, EmotionScore): 136 | return False 137 | 138 | return self.__dict__ == other.__dict__ 139 | 140 | def __ne__(self, other): 141 | """ 142 | Returns true if both objects are not equal 143 | """ 144 | return not self == other 145 | -------------------------------------------------------------------------------- /deepaffects/realtime/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SEERNET/deepaffects-python/e137b50f267f7aa133a0123d92be3552c5daa1bc/deepaffects/realtime/__init__.py -------------------------------------------------------------------------------- /deepaffects/realtime/api.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import random 4 | import time 5 | import grpc 6 | import os 7 | import io 8 | import sys 9 | import pydub 10 | import base64 11 | from pydub import AudioSegment 12 | 13 | import deepaffects.realtime.deepaffects_realtime_pb2_grpc as deepaffects_grpc 14 | import deepaffects.realtime.deepaffects_realtime_pb2 as deepaffects_types 15 | 16 | 17 | configuration = { 18 | "TIMEOUT_SECONDS" = 2000, 19 | "API_KEY" = "" 20 | } 21 | 22 | 23 | def get_deepaffects_client(): 24 | channel = grpc.insecure_channel('realtime.deepaffects.com:80') 25 | stub = deepaffects_grpc.DeepAffectsRealtimeStub(channel) 26 | return stub 27 | 28 | 29 | # def generate_chunks(): 30 | # # split sound in 5-second slices and export 31 | # audio_clip = AudioSegment.from_file("modi_video.mp4") 32 | # for i, chunk in enumerate(audio_clip[::1000]): 33 | # time.sleep(1) 34 | # chunk_name = "chunk.wav" 35 | # with open(chunk_name, "wb") as f: 36 | # chunk.export(f, format="wav") 37 | # base64_chunk = encode_to_base64(chunk_name) 38 | # os.remove(chunk_name) 39 | # print("Sending chunk %s" % (i)) 40 | # yield make_input_audio_segment(i, base64_chunk) 41 | 42 | 43 | # def identify_speaker(stub): 44 | # responses = stub.GetSpeaker(generate_chunks()) 45 | # for response in responses: 46 | # print("Received message %s at %s" % (response.id, 47 | # response.speaker)) 48 | 49 | 50 | # def run(): 51 | # channel = grpc.insecure_channel('localhost:50051') 52 | # stub = audio_stream_pb2_grpc.DeepAffectsApiStub(channel) 53 | # print("-------------- Realtime Api --------------") 54 | # get_speaker(stub) 55 | 56 | 57 | # if __name__ == '__main__': 58 | # run() 59 | -------------------------------------------------------------------------------- /deepaffects/realtime/deepaffects_realtime_pb2.py: -------------------------------------------------------------------------------- 1 | # Generated by the protocol buffer compiler. DO NOT EDIT! 2 | # source: deepaffects-realtime.proto 3 | 4 | import sys 5 | _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) 6 | from google.protobuf import descriptor as _descriptor 7 | from google.protobuf import message as _message 8 | from google.protobuf import reflection as _reflection 9 | from google.protobuf import symbol_database as _symbol_database 10 | from google.protobuf import descriptor_pb2 11 | # @@protoc_insertion_point(imports) 12 | 13 | _sym_db = _symbol_database.Default() 14 | 15 | 16 | 17 | 18 | DESCRIPTOR = _descriptor.FileDescriptor( 19 | name='deepaffects-realtime.proto', 20 | package='deepaffectsrealtime', 21 | syntax='proto3', 22 | serialized_pb=_b('\n\x1a\x64\x65\x65paffects-realtime.proto\x12\x13\x64\x65\x65paffectsrealtime\"\xb3\x01\n\x0cSegmentChunk\x12\n\n\x02id\x18\x01 \x01(\x05\x12\x0f\n\x07\x63ontent\x18\x02 \x01(\t\x12\x10\n\x08\x65ncoding\x18\x03 \x01(\t\x12\x14\n\x0clanguageCode\x18\x04 \x01(\t\x12\x12\n\nsampleRate\x18\x05 \x01(\x05\x12\x12\n\nspeakerIds\x18\x06 \x03(\t\x12\x15\n\rsegmentOffset\x18\x07 \x01(\x01\x12\x10\n\x08\x64uration\x18\x08 \x01(\x01\x12\r\n\x05\x45rror\x18\t \x01(\t\"y\n\x0eSegmentSpeaker\x12\n\n\x02id\x18\x01 \x01(\x05\x12\x12\n\nspeaker_id\x18\x02 \x01(\t\x12\r\n\x05start\x18\x03 \x01(\x01\x12\x0b\n\x03\x65nd\x18\x04 \x01(\x01\x12\r\n\x05\x45rror\x18\x05 \x01(\t\x12\x1c\n\x14identification_score\x18\x06 \x01(\x02\"o\n\x0eSegmentEmotion\x12\n\n\x02id\x18\x01 \x01(\x05\x12\r\n\x05start\x18\x02 \x01(\x01\x12\x0b\n\x03\x65nd\x18\x03 \x01(\x01\x12\x0f\n\x07\x65motion\x18\x04 \x01(\t\x12\r\n\x05\x45rror\x18\x05 \x01(\t\x12\x15\n\remotion_score\x18\x06 \x01(\x01\"\xa8\x01\n\x15SegmentDiarizeEmotion\x12\n\n\x02id\x18\x01 \x01(\x05\x12\r\n\x05start\x18\x02 \x01(\x01\x12\x0b\n\x03\x65nd\x18\x03 \x01(\x01\x12\x0f\n\x07\x65motion\x18\x04 \x01(\t\x12\x12\n\nspeaker_id\x18\x05 \x01(\t\x12\r\n\x05\x45rror\x18\x06 \x01(\t\x12\x15\n\remotion_score\x18\x07 \x01(\x01\x12\x1c\n\x14identification_score\x18\x08 \x01(\x01\x32\xbe\x02\n\x13\x44\x65\x65pAffectsRealtime\x12_\n\x0fIdentifySpeaker\x12!.deepaffectsrealtime.SegmentChunk\x1a#.deepaffectsrealtime.SegmentSpeaker\"\x00(\x01\x30\x01\x12_\n\x0fIdentifyEmotion\x12!.deepaffectsrealtime.SegmentChunk\x1a#.deepaffectsrealtime.SegmentEmotion\"\x00(\x01\x30\x01\x12\x65\n\x0e\x44iarizeEmotion\x12!.deepaffectsrealtime.SegmentChunk\x1a*.deepaffectsrealtime.SegmentDiarizeEmotion\"\x00(\x01\x30\x01\x42;\n\x1cio.grpc.examples.deepaffectsB\x13\x44\x65\x65paffectsRealtimeP\x01\xa2\x02\x03\x44RIb\x06proto3') 23 | ) 24 | 25 | 26 | 27 | 28 | _SEGMENTCHUNK = _descriptor.Descriptor( 29 | name='SegmentChunk', 30 | full_name='deepaffectsrealtime.SegmentChunk', 31 | filename=None, 32 | file=DESCRIPTOR, 33 | containing_type=None, 34 | fields=[ 35 | _descriptor.FieldDescriptor( 36 | name='id', full_name='deepaffectsrealtime.SegmentChunk.id', index=0, 37 | number=1, type=5, cpp_type=1, label=1, 38 | has_default_value=False, default_value=0, 39 | message_type=None, enum_type=None, containing_type=None, 40 | is_extension=False, extension_scope=None, 41 | options=None, file=DESCRIPTOR), 42 | _descriptor.FieldDescriptor( 43 | name='content', full_name='deepaffectsrealtime.SegmentChunk.content', index=1, 44 | number=2, type=9, cpp_type=9, label=1, 45 | has_default_value=False, default_value=_b("").decode('utf-8'), 46 | message_type=None, enum_type=None, containing_type=None, 47 | is_extension=False, extension_scope=None, 48 | options=None, file=DESCRIPTOR), 49 | _descriptor.FieldDescriptor( 50 | name='encoding', full_name='deepaffectsrealtime.SegmentChunk.encoding', index=2, 51 | number=3, type=9, cpp_type=9, label=1, 52 | has_default_value=False, default_value=_b("").decode('utf-8'), 53 | message_type=None, enum_type=None, containing_type=None, 54 | is_extension=False, extension_scope=None, 55 | options=None, file=DESCRIPTOR), 56 | _descriptor.FieldDescriptor( 57 | name='languageCode', full_name='deepaffectsrealtime.SegmentChunk.languageCode', index=3, 58 | number=4, type=9, cpp_type=9, label=1, 59 | has_default_value=False, default_value=_b("").decode('utf-8'), 60 | message_type=None, enum_type=None, containing_type=None, 61 | is_extension=False, extension_scope=None, 62 | options=None, file=DESCRIPTOR), 63 | _descriptor.FieldDescriptor( 64 | name='sampleRate', full_name='deepaffectsrealtime.SegmentChunk.sampleRate', index=4, 65 | number=5, type=5, cpp_type=1, label=1, 66 | has_default_value=False, default_value=0, 67 | message_type=None, enum_type=None, containing_type=None, 68 | is_extension=False, extension_scope=None, 69 | options=None, file=DESCRIPTOR), 70 | _descriptor.FieldDescriptor( 71 | name='speakerIds', full_name='deepaffectsrealtime.SegmentChunk.speakerIds', index=5, 72 | number=6, type=9, cpp_type=9, label=3, 73 | has_default_value=False, default_value=[], 74 | message_type=None, enum_type=None, containing_type=None, 75 | is_extension=False, extension_scope=None, 76 | options=None, file=DESCRIPTOR), 77 | _descriptor.FieldDescriptor( 78 | name='segmentOffset', full_name='deepaffectsrealtime.SegmentChunk.segmentOffset', index=6, 79 | number=7, type=1, cpp_type=5, label=1, 80 | has_default_value=False, default_value=float(0), 81 | message_type=None, enum_type=None, containing_type=None, 82 | is_extension=False, extension_scope=None, 83 | options=None, file=DESCRIPTOR), 84 | _descriptor.FieldDescriptor( 85 | name='duration', full_name='deepaffectsrealtime.SegmentChunk.duration', index=7, 86 | number=8, type=1, cpp_type=5, label=1, 87 | has_default_value=False, default_value=float(0), 88 | message_type=None, enum_type=None, containing_type=None, 89 | is_extension=False, extension_scope=None, 90 | options=None, file=DESCRIPTOR), 91 | _descriptor.FieldDescriptor( 92 | name='Error', full_name='deepaffectsrealtime.SegmentChunk.Error', index=8, 93 | number=9, type=9, cpp_type=9, label=1, 94 | has_default_value=False, default_value=_b("").decode('utf-8'), 95 | message_type=None, enum_type=None, containing_type=None, 96 | is_extension=False, extension_scope=None, 97 | options=None, file=DESCRIPTOR), 98 | ], 99 | extensions=[ 100 | ], 101 | nested_types=[], 102 | enum_types=[ 103 | ], 104 | options=None, 105 | is_extendable=False, 106 | syntax='proto3', 107 | extension_ranges=[], 108 | oneofs=[ 109 | ], 110 | serialized_start=52, 111 | serialized_end=231, 112 | ) 113 | 114 | 115 | _SEGMENTSPEAKER = _descriptor.Descriptor( 116 | name='SegmentSpeaker', 117 | full_name='deepaffectsrealtime.SegmentSpeaker', 118 | filename=None, 119 | file=DESCRIPTOR, 120 | containing_type=None, 121 | fields=[ 122 | _descriptor.FieldDescriptor( 123 | name='id', full_name='deepaffectsrealtime.SegmentSpeaker.id', index=0, 124 | number=1, type=5, cpp_type=1, label=1, 125 | has_default_value=False, default_value=0, 126 | message_type=None, enum_type=None, containing_type=None, 127 | is_extension=False, extension_scope=None, 128 | options=None, file=DESCRIPTOR), 129 | _descriptor.FieldDescriptor( 130 | name='speaker_id', full_name='deepaffectsrealtime.SegmentSpeaker.speaker_id', index=1, 131 | number=2, type=9, cpp_type=9, label=1, 132 | has_default_value=False, default_value=_b("").decode('utf-8'), 133 | message_type=None, enum_type=None, containing_type=None, 134 | is_extension=False, extension_scope=None, 135 | options=None, file=DESCRIPTOR), 136 | _descriptor.FieldDescriptor( 137 | name='start', full_name='deepaffectsrealtime.SegmentSpeaker.start', index=2, 138 | number=3, type=1, cpp_type=5, label=1, 139 | has_default_value=False, default_value=float(0), 140 | message_type=None, enum_type=None, containing_type=None, 141 | is_extension=False, extension_scope=None, 142 | options=None, file=DESCRIPTOR), 143 | _descriptor.FieldDescriptor( 144 | name='end', full_name='deepaffectsrealtime.SegmentSpeaker.end', index=3, 145 | number=4, type=1, cpp_type=5, label=1, 146 | has_default_value=False, default_value=float(0), 147 | message_type=None, enum_type=None, containing_type=None, 148 | is_extension=False, extension_scope=None, 149 | options=None, file=DESCRIPTOR), 150 | _descriptor.FieldDescriptor( 151 | name='Error', full_name='deepaffectsrealtime.SegmentSpeaker.Error', index=4, 152 | number=5, type=9, cpp_type=9, label=1, 153 | has_default_value=False, default_value=_b("").decode('utf-8'), 154 | message_type=None, enum_type=None, containing_type=None, 155 | is_extension=False, extension_scope=None, 156 | options=None, file=DESCRIPTOR), 157 | _descriptor.FieldDescriptor( 158 | name='identification_score', full_name='deepaffectsrealtime.SegmentSpeaker.identification_score', index=5, 159 | number=6, type=2, cpp_type=6, label=1, 160 | has_default_value=False, default_value=float(0), 161 | message_type=None, enum_type=None, containing_type=None, 162 | is_extension=False, extension_scope=None, 163 | options=None, file=DESCRIPTOR), 164 | ], 165 | extensions=[ 166 | ], 167 | nested_types=[], 168 | enum_types=[ 169 | ], 170 | options=None, 171 | is_extendable=False, 172 | syntax='proto3', 173 | extension_ranges=[], 174 | oneofs=[ 175 | ], 176 | serialized_start=233, 177 | serialized_end=354, 178 | ) 179 | 180 | 181 | _SEGMENTEMOTION = _descriptor.Descriptor( 182 | name='SegmentEmotion', 183 | full_name='deepaffectsrealtime.SegmentEmotion', 184 | filename=None, 185 | file=DESCRIPTOR, 186 | containing_type=None, 187 | fields=[ 188 | _descriptor.FieldDescriptor( 189 | name='id', full_name='deepaffectsrealtime.SegmentEmotion.id', index=0, 190 | number=1, type=5, cpp_type=1, label=1, 191 | has_default_value=False, default_value=0, 192 | message_type=None, enum_type=None, containing_type=None, 193 | is_extension=False, extension_scope=None, 194 | options=None, file=DESCRIPTOR), 195 | _descriptor.FieldDescriptor( 196 | name='start', full_name='deepaffectsrealtime.SegmentEmotion.start', index=1, 197 | number=2, type=1, cpp_type=5, label=1, 198 | has_default_value=False, default_value=float(0), 199 | message_type=None, enum_type=None, containing_type=None, 200 | is_extension=False, extension_scope=None, 201 | options=None, file=DESCRIPTOR), 202 | _descriptor.FieldDescriptor( 203 | name='end', full_name='deepaffectsrealtime.SegmentEmotion.end', index=2, 204 | number=3, type=1, cpp_type=5, label=1, 205 | has_default_value=False, default_value=float(0), 206 | message_type=None, enum_type=None, containing_type=None, 207 | is_extension=False, extension_scope=None, 208 | options=None, file=DESCRIPTOR), 209 | _descriptor.FieldDescriptor( 210 | name='emotion', full_name='deepaffectsrealtime.SegmentEmotion.emotion', index=3, 211 | number=4, type=9, cpp_type=9, label=1, 212 | has_default_value=False, default_value=_b("").decode('utf-8'), 213 | message_type=None, enum_type=None, containing_type=None, 214 | is_extension=False, extension_scope=None, 215 | options=None, file=DESCRIPTOR), 216 | _descriptor.FieldDescriptor( 217 | name='Error', full_name='deepaffectsrealtime.SegmentEmotion.Error', index=4, 218 | number=5, type=9, cpp_type=9, label=1, 219 | has_default_value=False, default_value=_b("").decode('utf-8'), 220 | message_type=None, enum_type=None, containing_type=None, 221 | is_extension=False, extension_scope=None, 222 | options=None, file=DESCRIPTOR), 223 | _descriptor.FieldDescriptor( 224 | name='emotion_score', full_name='deepaffectsrealtime.SegmentEmotion.emotion_score', index=5, 225 | number=6, type=1, cpp_type=5, label=1, 226 | has_default_value=False, default_value=float(0), 227 | message_type=None, enum_type=None, containing_type=None, 228 | is_extension=False, extension_scope=None, 229 | options=None, file=DESCRIPTOR), 230 | ], 231 | extensions=[ 232 | ], 233 | nested_types=[], 234 | enum_types=[ 235 | ], 236 | options=None, 237 | is_extendable=False, 238 | syntax='proto3', 239 | extension_ranges=[], 240 | oneofs=[ 241 | ], 242 | serialized_start=356, 243 | serialized_end=467, 244 | ) 245 | 246 | 247 | _SEGMENTDIARIZEEMOTION = _descriptor.Descriptor( 248 | name='SegmentDiarizeEmotion', 249 | full_name='deepaffectsrealtime.SegmentDiarizeEmotion', 250 | filename=None, 251 | file=DESCRIPTOR, 252 | containing_type=None, 253 | fields=[ 254 | _descriptor.FieldDescriptor( 255 | name='id', full_name='deepaffectsrealtime.SegmentDiarizeEmotion.id', index=0, 256 | number=1, type=5, cpp_type=1, label=1, 257 | has_default_value=False, default_value=0, 258 | message_type=None, enum_type=None, containing_type=None, 259 | is_extension=False, extension_scope=None, 260 | options=None, file=DESCRIPTOR), 261 | _descriptor.FieldDescriptor( 262 | name='start', full_name='deepaffectsrealtime.SegmentDiarizeEmotion.start', index=1, 263 | number=2, type=1, cpp_type=5, label=1, 264 | has_default_value=False, default_value=float(0), 265 | message_type=None, enum_type=None, containing_type=None, 266 | is_extension=False, extension_scope=None, 267 | options=None, file=DESCRIPTOR), 268 | _descriptor.FieldDescriptor( 269 | name='end', full_name='deepaffectsrealtime.SegmentDiarizeEmotion.end', index=2, 270 | number=3, type=1, cpp_type=5, label=1, 271 | has_default_value=False, default_value=float(0), 272 | message_type=None, enum_type=None, containing_type=None, 273 | is_extension=False, extension_scope=None, 274 | options=None, file=DESCRIPTOR), 275 | _descriptor.FieldDescriptor( 276 | name='emotion', full_name='deepaffectsrealtime.SegmentDiarizeEmotion.emotion', index=3, 277 | number=4, type=9, cpp_type=9, label=1, 278 | has_default_value=False, default_value=_b("").decode('utf-8'), 279 | message_type=None, enum_type=None, containing_type=None, 280 | is_extension=False, extension_scope=None, 281 | options=None, file=DESCRIPTOR), 282 | _descriptor.FieldDescriptor( 283 | name='speaker_id', full_name='deepaffectsrealtime.SegmentDiarizeEmotion.speaker_id', index=4, 284 | number=5, type=9, cpp_type=9, label=1, 285 | has_default_value=False, default_value=_b("").decode('utf-8'), 286 | message_type=None, enum_type=None, containing_type=None, 287 | is_extension=False, extension_scope=None, 288 | options=None, file=DESCRIPTOR), 289 | _descriptor.FieldDescriptor( 290 | name='Error', full_name='deepaffectsrealtime.SegmentDiarizeEmotion.Error', index=5, 291 | number=6, type=9, cpp_type=9, label=1, 292 | has_default_value=False, default_value=_b("").decode('utf-8'), 293 | message_type=None, enum_type=None, containing_type=None, 294 | is_extension=False, extension_scope=None, 295 | options=None, file=DESCRIPTOR), 296 | _descriptor.FieldDescriptor( 297 | name='emotion_score', full_name='deepaffectsrealtime.SegmentDiarizeEmotion.emotion_score', index=6, 298 | number=7, type=1, cpp_type=5, label=1, 299 | has_default_value=False, default_value=float(0), 300 | message_type=None, enum_type=None, containing_type=None, 301 | is_extension=False, extension_scope=None, 302 | options=None, file=DESCRIPTOR), 303 | _descriptor.FieldDescriptor( 304 | name='identification_score', full_name='deepaffectsrealtime.SegmentDiarizeEmotion.identification_score', index=7, 305 | number=8, type=1, cpp_type=5, label=1, 306 | has_default_value=False, default_value=float(0), 307 | message_type=None, enum_type=None, containing_type=None, 308 | is_extension=False, extension_scope=None, 309 | options=None, file=DESCRIPTOR), 310 | ], 311 | extensions=[ 312 | ], 313 | nested_types=[], 314 | enum_types=[ 315 | ], 316 | options=None, 317 | is_extendable=False, 318 | syntax='proto3', 319 | extension_ranges=[], 320 | oneofs=[ 321 | ], 322 | serialized_start=470, 323 | serialized_end=638, 324 | ) 325 | 326 | DESCRIPTOR.message_types_by_name['SegmentChunk'] = _SEGMENTCHUNK 327 | DESCRIPTOR.message_types_by_name['SegmentSpeaker'] = _SEGMENTSPEAKER 328 | DESCRIPTOR.message_types_by_name['SegmentEmotion'] = _SEGMENTEMOTION 329 | DESCRIPTOR.message_types_by_name['SegmentDiarizeEmotion'] = _SEGMENTDIARIZEEMOTION 330 | _sym_db.RegisterFileDescriptor(DESCRIPTOR) 331 | 332 | SegmentChunk = _reflection.GeneratedProtocolMessageType('SegmentChunk', (_message.Message,), dict( 333 | DESCRIPTOR = _SEGMENTCHUNK, 334 | __module__ = 'deepaffects_realtime_pb2' 335 | # @@protoc_insertion_point(class_scope:deepaffectsrealtime.SegmentChunk) 336 | )) 337 | _sym_db.RegisterMessage(SegmentChunk) 338 | 339 | SegmentSpeaker = _reflection.GeneratedProtocolMessageType('SegmentSpeaker', (_message.Message,), dict( 340 | DESCRIPTOR = _SEGMENTSPEAKER, 341 | __module__ = 'deepaffects_realtime_pb2' 342 | # @@protoc_insertion_point(class_scope:deepaffectsrealtime.SegmentSpeaker) 343 | )) 344 | _sym_db.RegisterMessage(SegmentSpeaker) 345 | 346 | SegmentEmotion = _reflection.GeneratedProtocolMessageType('SegmentEmotion', (_message.Message,), dict( 347 | DESCRIPTOR = _SEGMENTEMOTION, 348 | __module__ = 'deepaffects_realtime_pb2' 349 | # @@protoc_insertion_point(class_scope:deepaffectsrealtime.SegmentEmotion) 350 | )) 351 | _sym_db.RegisterMessage(SegmentEmotion) 352 | 353 | SegmentDiarizeEmotion = _reflection.GeneratedProtocolMessageType('SegmentDiarizeEmotion', (_message.Message,), dict( 354 | DESCRIPTOR = _SEGMENTDIARIZEEMOTION, 355 | __module__ = 'deepaffects_realtime_pb2' 356 | # @@protoc_insertion_point(class_scope:deepaffectsrealtime.SegmentDiarizeEmotion) 357 | )) 358 | _sym_db.RegisterMessage(SegmentDiarizeEmotion) 359 | 360 | 361 | DESCRIPTOR.has_options = True 362 | DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\034io.grpc.examples.deepaffectsB\023DeepaffectsRealtimeP\001\242\002\003DRI')) 363 | 364 | _DEEPAFFECTSREALTIME = _descriptor.ServiceDescriptor( 365 | name='DeepAffectsRealtime', 366 | full_name='deepaffectsrealtime.DeepAffectsRealtime', 367 | file=DESCRIPTOR, 368 | index=0, 369 | options=None, 370 | serialized_start=641, 371 | serialized_end=959, 372 | methods=[ 373 | _descriptor.MethodDescriptor( 374 | name='IdentifySpeaker', 375 | full_name='deepaffectsrealtime.DeepAffectsRealtime.IdentifySpeaker', 376 | index=0, 377 | containing_service=None, 378 | input_type=_SEGMENTCHUNK, 379 | output_type=_SEGMENTSPEAKER, 380 | options=None, 381 | ), 382 | _descriptor.MethodDescriptor( 383 | name='IdentifyEmotion', 384 | full_name='deepaffectsrealtime.DeepAffectsRealtime.IdentifyEmotion', 385 | index=1, 386 | containing_service=None, 387 | input_type=_SEGMENTCHUNK, 388 | output_type=_SEGMENTEMOTION, 389 | options=None, 390 | ), 391 | _descriptor.MethodDescriptor( 392 | name='DiarizeEmotion', 393 | full_name='deepaffectsrealtime.DeepAffectsRealtime.DiarizeEmotion', 394 | index=2, 395 | containing_service=None, 396 | input_type=_SEGMENTCHUNK, 397 | output_type=_SEGMENTDIARIZEEMOTION, 398 | options=None, 399 | ), 400 | ]) 401 | _sym_db.RegisterServiceDescriptor(_DEEPAFFECTSREALTIME) 402 | 403 | DESCRIPTOR.services_by_name['DeepAffectsRealtime'] = _DEEPAFFECTSREALTIME 404 | 405 | # @@protoc_insertion_point(module_scope) 406 | -------------------------------------------------------------------------------- /deepaffects/realtime/deepaffects_realtime_pb2_grpc.py: -------------------------------------------------------------------------------- 1 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! 2 | import grpc 3 | 4 | import deepaffects.realtime.deepaffects_realtime_pb2 as deepaffects__realtime__pb2 5 | 6 | 7 | class DeepAffectsRealtimeStub(object): 8 | """Interface exported by the server. 9 | """ 10 | 11 | def __init__(self, channel): 12 | """Constructor. 13 | 14 | Args: 15 | channel: A grpc.Channel. 16 | """ 17 | self.IdentifySpeaker = channel.stream_stream( 18 | '/deepaffectsrealtime.DeepAffectsRealtime/IdentifySpeaker', 19 | request_serializer=deepaffects__realtime__pb2.SegmentChunk.SerializeToString, 20 | response_deserializer=deepaffects__realtime__pb2.SegmentSpeaker.FromString, 21 | ) 22 | self.IdentifyEmotion = channel.stream_stream( 23 | '/deepaffectsrealtime.DeepAffectsRealtime/IdentifyEmotion', 24 | request_serializer=deepaffects__realtime__pb2.SegmentChunk.SerializeToString, 25 | response_deserializer=deepaffects__realtime__pb2.SegmentEmotion.FromString, 26 | ) 27 | self.DiarizeEmotion = channel.stream_stream( 28 | '/deepaffectsrealtime.DeepAffectsRealtime/DiarizeEmotion', 29 | request_serializer=deepaffects__realtime__pb2.SegmentChunk.SerializeToString, 30 | response_deserializer=deepaffects__realtime__pb2.SegmentDiarizeEmotion.FromString, 31 | ) 32 | 33 | 34 | class DeepAffectsRealtimeServicer(object): 35 | """Interface exported by the server. 36 | """ 37 | 38 | def IdentifySpeaker(self, request_iterator, context): 39 | """A Bidirectional streaming RPC. 40 | 41 | Accepts a stream of SegmentChunk sent while a route is being traversed, 42 | while receiving other SegmentSpeaker (e.g. from other users). 43 | """ 44 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 45 | context.set_details('Method not implemented!') 46 | raise NotImplementedError('Method not implemented!') 47 | 48 | def IdentifyEmotion(self, request_iterator, context): 49 | # missing associated documentation comment in .proto file 50 | pass 51 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 52 | context.set_details('Method not implemented!') 53 | raise NotImplementedError('Method not implemented!') 54 | 55 | def DiarizeEmotion(self, request_iterator, context): 56 | # missing associated documentation comment in .proto file 57 | pass 58 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 59 | context.set_details('Method not implemented!') 60 | raise NotImplementedError('Method not implemented!') 61 | 62 | 63 | def add_DeepAffectsRealtimeServicer_to_server(servicer, server): 64 | rpc_method_handlers = { 65 | 'IdentifySpeaker': grpc.stream_stream_rpc_method_handler( 66 | servicer.IdentifySpeaker, 67 | request_deserializer=deepaffects__realtime__pb2.SegmentChunk.FromString, 68 | response_serializer=deepaffects__realtime__pb2.SegmentSpeaker.SerializeToString, 69 | ), 70 | 'IdentifyEmotion': grpc.stream_stream_rpc_method_handler( 71 | servicer.IdentifyEmotion, 72 | request_deserializer=deepaffects__realtime__pb2.SegmentChunk.FromString, 73 | response_serializer=deepaffects__realtime__pb2.SegmentEmotion.SerializeToString, 74 | ), 75 | 'DiarizeEmotion': grpc.stream_stream_rpc_method_handler( 76 | servicer.DiarizeEmotion, 77 | request_deserializer=deepaffects__realtime__pb2.SegmentChunk.FromString, 78 | response_serializer=deepaffects__realtime__pb2.SegmentDiarizeEmotion.SerializeToString, 79 | ), 80 | } 81 | generic_handler = grpc.method_handlers_generic_handler( 82 | 'deepaffectsrealtime.DeepAffectsRealtime', rpc_method_handlers) 83 | server.add_generic_rpc_handlers((generic_handler,)) 84 | -------------------------------------------------------------------------------- /deepaffects/realtime/types.py: -------------------------------------------------------------------------------- 1 | from deepaffects.realtime.deepaffects_realtime_pb2 import SegmentChunk 2 | 3 | 4 | def segment_chunk(content, encoding="wav", languageCode="en-US", sampleRate=8000, segmentOffset=0, duration=0): 5 | """segment_chunk. 6 | 7 | Args: 8 | encoding : Audio Encoding, 9 | languageCode: language code , 10 | sampleRate: sample rate of audio , 11 | content: base64 encoded audio, 12 | duration: in seconds, 13 | segmentOffset: offset of the segment in complete audio stream 14 | """ 15 | 16 | if duration < 3: 17 | raise ValueError('Chunk duration should be greater than 3 sec.') 18 | 19 | return SegmentChunk( 20 | content=content, 21 | encoding=encoding, 22 | languageCode=languageCode, 23 | sampleRate=sampleRate, 24 | duration=duration, 25 | segmentOffset=segmentOffset) 26 | -------------------------------------------------------------------------------- /deepaffects/realtime/util.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | import random 3 | import time 4 | import grpc 5 | import os 6 | import uuid 7 | import io 8 | import sys 9 | import m3u8 10 | import pydub 11 | import requests 12 | import base64 13 | import uuid 14 | from pydub import AudioSegment 15 | from pytube.request import get 16 | from pytube.compat import urlopen 17 | from pytube import YouTube 18 | 19 | import deepaffects.realtime.deepaffects_realtime_pb2_grpc as deepaffects_grpc 20 | import deepaffects.realtime.deepaffects_realtime_pb2 as deepaffects_types 21 | from deepaffects.realtime.types import segment_chunk 22 | 23 | MAX_MESSAGE_LENGTH = 1000000000 24 | 25 | def encode_to_base64(file): 26 | with open(file, "rb") as f1: 27 | return base64.b64encode(f1.read()).decode('utf-8') 28 | 29 | 30 | def get_segment_chunk_from_pydub_chunk(chunk, offset, i): 31 | base64_chunk = pydub_segment_to_base64(chunk,i) 32 | print("Sending chunk %s - with size :- %s sec" % (i, len(chunk) / 1000)) 33 | audio_segments = segment_chunk( 34 | encoding="wav", 35 | languageCode="en-US", 36 | sampleRate=chunk.frame_rate, 37 | content=base64_chunk, 38 | duration=len(chunk) / 1000, 39 | segmentOffset=offset) 40 | offset = (offset + len(chunk) / 1000) 41 | return audio_segments, offset 42 | 43 | 44 | def pydub_segment_to_base64(chunk,i): 45 | chunk_file = "chunk-{}-{}.wav".format(i, str(uuid.uuid4())) 46 | chunk.export(chunk_file, format="wav") 47 | base64_chunk = encode_to_base64(chunk_file) 48 | try: 49 | os.remove(chunk_file) 50 | except: 51 | pass 52 | return base64_chunk 53 | 54 | 55 | def get_deepaffects_client(host_url='realtime.deepaffects.com:80'): 56 | channel = grpc.insecure_channel(host_url, options=[('grpc.max_send_message_length', MAX_MESSAGE_LENGTH), ('grpc.max_receive_message_length', MAX_MESSAGE_LENGTH)]) 57 | stub = deepaffects_grpc.DeepAffectsRealtimeStub(channel) 58 | return stub 59 | 60 | 61 | def chunk_generator_from_playlist(file_path=None, out_file_name=None, buffer_size=30000, download_audio=True): 62 | chunk = None 63 | try: 64 | offset = 0 65 | last_processed = -1 66 | endlist = False 67 | # for playlists with m3u8 extensions 68 | m3u8_obj_outer = m3u8.load(file_path) 69 | base_uri = m3u8_obj_outer.base_uri 70 | base_audio = m3u8_obj_outer.data['playlists'][0]['uri'] 71 | audio_stream_url = base_uri + base_audio 72 | chunk_index = 1 73 | index = 0 74 | unsent_segment = False 75 | while endlist is not True: 76 | try: 77 | m3u8_obj = m3u8.load(audio_stream_url) 78 | if last_processed < m3u8_obj.media_sequence: 79 | for i, segment in enumerate(m3u8_obj.data['segments']): 80 | response = urlopen(base_uri + segment['uri']) 81 | buff = response.read() 82 | new_chunk = AudioSegment.from_file(io.BytesIO(buff), "aac") 83 | 84 | if (chunk_index == 1) and (last_processed == -1): 85 | chunk = new_chunk 86 | else: 87 | chunk = chunk + new_chunk 88 | 89 | offset_in_milliseconds = offset * 1000 90 | if (len(chunk) - (offset_in_milliseconds)) > buffer_size: 91 | segment_chunk = chunk[offset_in_milliseconds: offset_in_milliseconds + buffer_size] 92 | audio_segment, offset = get_segment_chunk_from_pydub_chunk(segment_chunk, offset, index) 93 | index = index + 1 94 | yield audio_segment 95 | chunk_index = chunk_index + 1 96 | last_processed = m3u8_obj.media_sequence 97 | 98 | if m3u8_obj.data['is_endlist'] == True: 99 | endlist = True 100 | else: 101 | time.sleep(2) 102 | except Exception as e: 103 | print(e) 104 | endlist = True 105 | 106 | if (len(chunk) - (offset * 1000)) > 0: 107 | segment_chunk = chunk[offset * 1000:] 108 | audio_segment, offset = get_segment_chunk_from_pydub_chunk(segment_chunk, offset, index) 109 | index = index + 1 110 | yield audio_segment 111 | 112 | except Exception as e: 113 | print(e) 114 | finally: 115 | if download_audio and (out_file_name is not None) and (chunk is not None): 116 | dir_path = "./output" 117 | if not os.path.exists(dir_path): 118 | os.makedirs(dir_path) 119 | chunk.export(dir_path + out_file_name + "-audio-out.wav", format="wav") 120 | 121 | 122 | 123 | def chunk_generator_from_file(file_path, buffer_size=30000): 124 | # Implement this generator function to yield Audio segments 125 | # To generate Audio Segments use segment_chunk 126 | # from deepaffects.realtime.types import segment_chunk 127 | # yield segment_chunk(Args) 128 | """segment_chunk. 129 | 130 | Args: 131 | encoding : Audio Encoding, 132 | languageCode: language code , 133 | sampleRate: sample rate of audio , 134 | content: base64 encoded audio, 135 | segmentOffset: offset of the segment in complete audio stream 136 | """ 137 | 138 | """ 139 | Sample implementation which reads audio from a file and splits it into 140 | segments more than 3 sec 141 | AudioSegment and yields base64 encoded audio segment objects asynchronously 142 | """ 143 | audio_clip = AudioSegment.from_file(file_path) 144 | offset = None 145 | buffer_chunk = None 146 | index = 0 147 | for i, chunk in enumerate(audio_clip[::buffer_size]): 148 | if offset is None: 149 | offset = 0 150 | if i == 0: 151 | buffer_chunk = chunk 152 | else: 153 | buffer_chunk = buffer_chunk + chunk 154 | offset_in_milliseconds = offset * 1000 155 | if ((len(buffer_chunk) - (offset_in_milliseconds)) > buffer_size): 156 | segment_chunk = buffer_chunk[offset_in_milliseconds: offset_in_milliseconds + buffer_size] 157 | audio_segment, offset = get_segment_chunk_from_pydub_chunk(segment_chunk, offset, index) 158 | index = index + 1 159 | yield audio_segment 160 | 161 | if ((len(buffer_chunk) - (offset * 1000)) != 0): 162 | segment_chunk = buffer_chunk[offset * 1000:] 163 | audio_segment, offset = get_segment_chunk_from_pydub_chunk(segment_chunk, offset, index) 164 | index = index + 1 165 | yield audio_segment 166 | 167 | 168 | 169 | def chunk_generator_from_url(file_path, is_youtube_url=False, buffer_size=30000, chunk_size=15 * 8192): 170 | if is_youtube_url: 171 | yt = YouTube(file_path) 172 | stream = yt.streams.filter(only_audio=True).first() 173 | download_url = stream.url 174 | else: 175 | download_url = file_path 176 | offset = None 177 | buffer_chunk = None 178 | index = 0 179 | for i, chunk in enumerate(get(url=download_url, streaming=True, chunk_size=chunk_size)): 180 | if offset is None: 181 | offset = 0 182 | if i == 0: 183 | buffer_chunk_raw = chunk 184 | else: 185 | buffer_chunk_raw = buffer_chunk_raw + chunk 186 | 187 | buffer_chunk = AudioSegment.from_file(io.BytesIO( 188 | buffer_chunk_raw)) 189 | 190 | offset_in_milliseconds = offset * 1000 191 | if (len(buffer_chunk) - (offset_in_milliseconds)) > buffer_size: 192 | segment_chunk = buffer_chunk[offset_in_milliseconds: offset_in_milliseconds + buffer_size] 193 | audio_segment, offset = get_segment_chunk_from_pydub_chunk(segment_chunk, offset, index) 194 | index = index + 1 195 | yield audio_segment 196 | if (len(buffer_chunk) - (offset * 1000)) != 0: 197 | segment_chunk = buffer_chunk[offset * 1000:] 198 | audio_segment, offset = get_segment_chunk_from_pydub_chunk(segment_chunk, offset, index) 199 | index = index + 1 200 | yield audio_segment 201 | -------------------------------------------------------------------------------- /deepaffects/rest.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | 3 | """ 4 | DeepAffects 5 | 6 | OpenAPI Specification of DeepAffects APIs 7 | 8 | OpenAPI spec version: 0.1.0 9 | 10 | Generated by: https://github.com/swagger-api/swagger-codegen.git 11 | """ 12 | 13 | 14 | from __future__ import absolute_import 15 | 16 | import io 17 | import json 18 | import ssl 19 | import certifi 20 | import logging 21 | import re 22 | 23 | # python 2 and python 3 compatibility library 24 | from six import PY3 25 | from six.moves.urllib.parse import urlencode 26 | 27 | from .configuration import Configuration 28 | 29 | try: 30 | import urllib3 31 | except ImportError: 32 | raise ImportError('Swagger python client requires urllib3.') 33 | 34 | 35 | logger = logging.getLogger(__name__) 36 | 37 | 38 | class RESTResponse(io.IOBase): 39 | 40 | def __init__(self, resp): 41 | self.urllib3_response = resp 42 | self.status = resp.status 43 | self.reason = resp.reason 44 | self.data = resp.data 45 | 46 | def getheaders(self): 47 | """ 48 | Returns a dictionary of the response headers. 49 | """ 50 | return self.urllib3_response.getheaders() 51 | 52 | def getheader(self, name, default=None): 53 | """ 54 | Returns a given response header. 55 | """ 56 | return self.urllib3_response.getheader(name, default) 57 | 58 | 59 | class RESTClientObject(object): 60 | 61 | def __init__(self, pools_size=4, maxsize=4): 62 | # urllib3.PoolManager will pass all kw parameters to connectionpool 63 | # https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/poolmanager.py#L75 64 | # https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/connectionpool.py#L680 65 | # maxsize is the number of requests to host that are allowed in parallel 66 | # ca_certs vs cert_file vs key_file 67 | # http://stackoverflow.com/a/23957365/2985775 68 | 69 | # cert_reqs 70 | if Configuration().verify_ssl: 71 | cert_reqs = ssl.CERT_REQUIRED 72 | else: 73 | cert_reqs = ssl.CERT_NONE 74 | 75 | # ca_certs 76 | if Configuration().ssl_ca_cert: 77 | ca_certs = Configuration().ssl_ca_cert 78 | else: 79 | # if not set certificate file, use Mozilla's root certificates. 80 | ca_certs = certifi.where() 81 | 82 | # cert_file 83 | cert_file = Configuration().cert_file 84 | 85 | # key file 86 | key_file = Configuration().key_file 87 | 88 | # https pool manager 89 | self.pool_manager = urllib3.PoolManager( 90 | num_pools=pools_size, 91 | maxsize=maxsize, 92 | cert_reqs=cert_reqs, 93 | ca_certs=ca_certs, 94 | cert_file=cert_file, 95 | key_file=key_file 96 | ) 97 | 98 | def request(self, method, url, query_params=None, headers=None, 99 | body=None, post_params=None, _preload_content=True, _request_timeout=None): 100 | """ 101 | :param method: http request method 102 | :param url: http request url 103 | :param query_params: query parameters in the url 104 | :param headers: http request headers 105 | :param body: request json body, for `application/json` 106 | :param post_params: request post parameters, 107 | `application/x-www-form-urlencoded` 108 | and `multipart/form-data` 109 | :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without 110 | reading/decoding response data. Default is True. 111 | :param _request_timeout: timeout setting for this request. If one number provided, it will be total request 112 | timeout. It can also be a pair (tuple) of (connection, read) timeouts. 113 | """ 114 | method = method.upper() 115 | assert method in ['GET', 'HEAD', 'DELETE', 'POST', 'PUT', 'PATCH', 'OPTIONS'] 116 | 117 | if post_params and body: 118 | raise ValueError( 119 | "body parameter cannot be used with post_params parameter." 120 | ) 121 | 122 | post_params = post_params or {} 123 | headers = headers or {} 124 | 125 | timeout = None 126 | if _request_timeout: 127 | if isinstance(_request_timeout, (int, ) if PY3 else (int, long)): 128 | timeout = urllib3.Timeout(total=_request_timeout) 129 | elif isinstance(_request_timeout, tuple) and len(_request_timeout) == 2: 130 | timeout = urllib3.Timeout(connect=_request_timeout[0], read=_request_timeout[1]) 131 | 132 | if 'Content-Type' not in headers: 133 | headers['Content-Type'] = 'application/json' 134 | 135 | try: 136 | # For `POST`, `PUT`, `PATCH`, `OPTIONS`, `DELETE` 137 | if method in ['POST', 'PUT', 'PATCH', 'OPTIONS', 'DELETE']: 138 | if query_params: 139 | url += '?' + urlencode(query_params) 140 | if re.search('json', headers['Content-Type'], re.IGNORECASE): 141 | request_body = None 142 | if body: 143 | request_body = json.dumps(body) 144 | r = self.pool_manager.request(method, url, 145 | body=request_body, 146 | preload_content=_preload_content, 147 | timeout=timeout, 148 | headers=headers) 149 | elif headers['Content-Type'] == 'application/x-www-form-urlencoded': 150 | r = self.pool_manager.request(method, url, 151 | fields=post_params, 152 | encode_multipart=False, 153 | preload_content=_preload_content, 154 | timeout=timeout, 155 | headers=headers) 156 | elif headers['Content-Type'] == 'multipart/form-data': 157 | # must del headers['Content-Type'], or the correct Content-Type 158 | # which generated by urllib3 will be overwritten. 159 | del headers['Content-Type'] 160 | r = self.pool_manager.request(method, url, 161 | fields=post_params, 162 | encode_multipart=True, 163 | preload_content=_preload_content, 164 | timeout=timeout, 165 | headers=headers) 166 | # Pass a `string` parameter directly in the body to support 167 | # other content types than Json when `body` argument is provided 168 | # in serialized form 169 | elif isinstance(body, str): 170 | request_body = body 171 | r = self.pool_manager.request(method, url, 172 | body=request_body, 173 | preload_content=_preload_content, 174 | timeout=timeout, 175 | headers=headers) 176 | else: 177 | # Cannot generate the request from given parameters 178 | msg = """Cannot prepare a request message for provided arguments. 179 | Please check that your arguments match declared content type.""" 180 | raise ApiException(status=0, reason=msg) 181 | # For `GET`, `HEAD` 182 | else: 183 | r = self.pool_manager.request(method, url, 184 | fields=query_params, 185 | preload_content=_preload_content, 186 | timeout=timeout, 187 | headers=headers) 188 | except urllib3.exceptions.SSLError as e: 189 | msg = "{0}\n{1}".format(type(e).__name__, str(e)) 190 | raise ApiException(status=0, reason=msg) 191 | 192 | if _preload_content: 193 | r = RESTResponse(r) 194 | 195 | # In the python 3, the response.data is bytes. 196 | # we need to decode it to string. 197 | if PY3: 198 | r.data = r.data.decode('utf8') 199 | 200 | # log response body 201 | logger.debug("response body: %s", r.data) 202 | 203 | if r.status not in range(200, 206): 204 | raise ApiException(http_resp=r) 205 | 206 | return r 207 | 208 | def GET(self, url, headers=None, query_params=None, _preload_content=True, _request_timeout=None): 209 | return self.request("GET", url, 210 | headers=headers, 211 | _preload_content=_preload_content, 212 | _request_timeout=_request_timeout, 213 | query_params=query_params) 214 | 215 | def HEAD(self, url, headers=None, query_params=None, _preload_content=True, _request_timeout=None): 216 | return self.request("HEAD", url, 217 | headers=headers, 218 | _preload_content=_preload_content, 219 | _request_timeout=_request_timeout, 220 | query_params=query_params) 221 | 222 | def OPTIONS(self, url, headers=None, query_params=None, post_params=None, body=None, _preload_content=True, 223 | _request_timeout=None): 224 | return self.request("OPTIONS", url, 225 | headers=headers, 226 | query_params=query_params, 227 | post_params=post_params, 228 | _preload_content=_preload_content, 229 | _request_timeout=_request_timeout, 230 | body=body) 231 | 232 | def DELETE(self, url, headers=None, query_params=None, body=None, _preload_content=True, _request_timeout=None): 233 | return self.request("DELETE", url, 234 | headers=headers, 235 | query_params=query_params, 236 | _preload_content=_preload_content, 237 | _request_timeout=_request_timeout, 238 | body=body) 239 | 240 | def POST(self, url, headers=None, query_params=None, post_params=None, body=None, _preload_content=True, 241 | _request_timeout=None): 242 | return self.request("POST", url, 243 | headers=headers, 244 | query_params=query_params, 245 | post_params=post_params, 246 | _preload_content=_preload_content, 247 | _request_timeout=_request_timeout, 248 | body=body) 249 | 250 | def PUT(self, url, headers=None, query_params=None, post_params=None, body=None, _preload_content=True, 251 | _request_timeout=None): 252 | return self.request("PUT", url, 253 | headers=headers, 254 | query_params=query_params, 255 | post_params=post_params, 256 | _preload_content=_preload_content, 257 | _request_timeout=_request_timeout, 258 | body=body) 259 | 260 | def PATCH(self, url, headers=None, query_params=None, post_params=None, body=None, _preload_content=True, 261 | _request_timeout=None): 262 | return self.request("PATCH", url, 263 | headers=headers, 264 | query_params=query_params, 265 | post_params=post_params, 266 | _preload_content=_preload_content, 267 | _request_timeout=_request_timeout, 268 | body=body) 269 | 270 | 271 | class ApiException(Exception): 272 | 273 | def __init__(self, status=None, reason=None, http_resp=None): 274 | if http_resp: 275 | self.status = http_resp.status 276 | self.reason = http_resp.reason 277 | self.body = http_resp.data 278 | self.headers = http_resp.getheaders() 279 | else: 280 | self.status = status 281 | self.reason = reason 282 | self.body = None 283 | self.headers = None 284 | 285 | def __str__(self): 286 | """ 287 | Custom error messages for exception 288 | """ 289 | error_message = "({0})\n"\ 290 | "Reason: {1}\n".format(self.status, self.reason) 291 | if self.headers: 292 | error_message += "HTTP response headers: {0}\n".format(self.headers) 293 | 294 | if self.body: 295 | error_message += "HTTP response body: {0}\n".format(self.body) 296 | 297 | return error_message 298 | -------------------------------------------------------------------------------- /docs/AsyncResponse.md: -------------------------------------------------------------------------------- 1 | # AsyncResponse 2 | 3 | ## Properties 4 | Name | Type | Description | Notes 5 | ------------ | ------------- | ------------- | ------------- 6 | **request_id** | **str** | Unique identifier for the api call | 7 | **api** | **str** | API hit | 8 | 9 | [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) 10 | 11 | 12 | -------------------------------------------------------------------------------- /docs/Audio.md: -------------------------------------------------------------------------------- 1 | # Audio 2 | 3 | ## Properties 4 | Name | Type | Description | Notes 5 | ------------ | ------------- | ------------- | ------------- 6 | **encoding** | **str** | Encoding of audio file like MP3, WAV etc. | 7 | **sample_rate** | **int** | Sample rate of the audio file. | 8 | **language_code** | **str** | Language spoken in the audio file. | [default to 'en-US'] 9 | **content** | **str** | base64 encoding of the audio file. | 10 | 11 | [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) 12 | 13 | 14 | -------------------------------------------------------------------------------- /docs/DenoiseApi.md: -------------------------------------------------------------------------------- 1 | # deepaffects.DenoiseApi 2 | 3 | All URIs are relative to *https://localhost* 4 | 5 | Method | HTTP request | Description 6 | ------------- | ------------- | ------------- 7 | [**async_denoise_audio**](DenoiseApi.md#async_denoise_audio) | **POST** /api/v1/async/denoise | Denoise an audio file 8 | [**sync_denoise_audio**](DenoiseApi.md#sync_denoise_audio) | **POST** /api/v1/sync/denoise | Denoise an audio file 9 | 10 | 11 | # **async_denoise_audio** 12 | > AsyncResponse async_denoise_audio(body, webhook, request_id=request_id) 13 | 14 | Denoise an audio file. 15 | 16 | ### Example 17 | ```python 18 | from __future__ import print_function 19 | import time 20 | import deepaffects 21 | from deepaffects.rest import ApiException 22 | from pprint import pprint 23 | 24 | # Configure API key authorization: UserSecurity 25 | deepaffects.configuration.api_key['apikey'] = 'YOUR_API_KEY' 26 | 27 | # create an instance of the API class 28 | api_instance = deepaffects.DenoiseApi() 29 | body = deepaffects.Audio.from_file(file_name="/path/to/file") # Audio | Audio object that needs to be denoised. 30 | webhook = 'webhook_example' # str | The webhook url where result from async resource is posted 31 | request_id = 'request_id_example' # str | Unique identifier for the request (optional) 32 | 33 | try: 34 | # Denoise an audio file 35 | api_response = api_instance.async_denoise_audio(body, webhook, request_id=request_id) 36 | pprint(api_response) 37 | except ApiException as e: 38 | print("Exception when calling DenoiseApi->async_denoise_audio: %s\n" % e) 39 | ``` 40 | 41 | ### Parameters 42 | 43 | Name | Type | Description | Notes 44 | ------------- | ------------- | ------------- | ------------- 45 | **body** | [**Audio**](Audio.md)| Audio object that needs to be denoised. | 46 | **webhook** | **str**| The webhook url where result from async resource is posted | 47 | **request_id** | **str**| Unique identifier for the request | [optional] 48 | 49 | ### Return type 50 | 51 | [**AsyncResponse**](AsyncResponse.md) 52 | 53 | ### Authorization 54 | 55 | [UserSecurity](../README.md#UserSecurity) 56 | 57 | ### HTTP request headers 58 | 59 | - **Content-Type**: application/json 60 | - **Accept**: application/json 61 | 62 | [[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) 63 | 64 | # **sync_denoise_audio** 65 | > Audio sync_denoise_audio(body) 66 | 67 | Denoise an audio file 68 | 69 | 70 | ### Example 71 | ```python 72 | from __future__ import print_function 73 | import time 74 | import deepaffects 75 | from deepaffects.rest import ApiException 76 | from pprint import pprint 77 | 78 | # Configure API key authorization: UserSecurity 79 | deepaffects.configuration.api_key['apikey'] = 'YOUR_API_KEY' 80 | 81 | # create an instance of the API class 82 | api_instance = deepaffects.DenoiseApi() 83 | body = deepaffects.Audio.from_file(file_name="/path/to/file") # Audio | Audio object that needs to be denoised. 84 | 85 | try: 86 | # Denoise an audio file 87 | api_response = api_instance.sync_denoise_audio(body) 88 | pprint(api_response) 89 | except ApiException as e: 90 | print("Exception when calling DenoiseApi->sync_denoise_audio: %s\n" % e) 91 | ``` 92 | 93 | ### Parameters 94 | 95 | Name | Type | Description | Notes 96 | ------------- | ------------- | ------------- | ------------- 97 | **body** | [**Audio**](Audio.md)| Audio object that needs to be denoised. | 98 | 99 | ### Return type 100 | 101 | [**Audio**](Audio.md) 102 | 103 | ### Authorization 104 | 105 | [UserSecurity](../README.md#UserSecurity) 106 | 107 | ### HTTP request headers 108 | 109 | - **Content-Type**: application/json 110 | - **Accept**: application/json 111 | 112 | [[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) 113 | 114 | -------------------------------------------------------------------------------- /docs/DiarizeApi.md: -------------------------------------------------------------------------------- 1 | # deepaffects.DiarizeApi 2 | 3 | All URIs are relative to *https://localhost* 4 | 5 | Method | HTTP request | Description 6 | ------------- | ------------- | ------------- 7 | [**async_diarize_audio**](DiarizeApi.md#async_diarize_audio) | **POST** /api/v1/async/diarize | Diarize an audio file 8 | [**sync_diarize_audio**](DiarizeApi.md#sync_diarize_audio) | **POST** /api/v1/sync/diarize | Diarize an audio file 9 | 10 | 11 | # **async_diarize_audio** 12 | > AsyncResponse async_diarize_audio(body, webhook, request_id=request_id) 13 | 14 | Diarize an audio file. 15 | 16 | ### Example 17 | ```python 18 | from __future__ import print_function 19 | import time 20 | import deepaffects 21 | from deepaffects.rest import ApiException 22 | from pprint import pprint 23 | 24 | # Configure API key authorization: UserSecurity 25 | deepaffects.configuration.api_key['apikey'] = 'YOUR_API_KEY' 26 | 27 | # create an instance of the API class 28 | api_instance = deepaffects.DiarizeApi() 29 | body = deepaffects.DiarizeAudio.from_file(file_name="/path/to/file") # Audio | Audio object that needs to be diarized. 30 | webhook = 'https://your_webhook.url' # str | The webhook url where result from async resource is posted 31 | request_id = 'request_id_example' # str | Unique identifier for the request (optional) 32 | 33 | try: 34 | # Diarize an audio file 35 | api_response = api_instance.async_diarize_audio(body, webhook, request_id=request_id) 36 | pprint(api_response) 37 | except ApiException as e: 38 | print("Exception when calling DiarizeApi->async_diarize_audio: %s\n" % e) 39 | ``` 40 | 41 | ### Parameters 42 | 43 | Name | Type | Description | Notes 44 | ------------- | ------------- | ------------- | ------------- 45 | **body** | [**DiarizeAudio**](DiarizeAudio.md)| Audio object that needs to be diarized. | 46 | **webhook** | **str**| The webhook url where result from async resource is posted | 47 | **request_id** | **str**| Unique identifier for the request | [optional] 48 | 49 | ### Return type 50 | 51 | [**AsyncResponse**](AsyncResponse.md) 52 | 53 | ### Authorization 54 | 55 | [UserSecurity](../README.md#UserSecurity) 56 | 57 | ### HTTP request headers 58 | 59 | - **Content-Type**: application/json 60 | - **Accept**: application/json 61 | 62 | [[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) 63 | 64 | # **sync_diarize_audio** 65 | > list[Audio] sync_diarize_audio(body) 66 | 67 | Diarize an audio file 68 | 69 | Diarize an audio file. 70 | 71 | ### Example 72 | ```python 73 | from __future__ import print_function 74 | import time 75 | import deepaffects 76 | from deepaffects.rest import ApiException 77 | from pprint import pprint 78 | 79 | # Configure API key authorization: UserSecurity 80 | deepaffects.configuration.api_key['apikey'] = 'YOUR_API_KEY' 81 | 82 | # create an instance of the API class 83 | api_instance = deepaffects.DiarizeApi() 84 | body = deepaffects.DiarizeAudio.from_file(file_name="/path/to/file") # Audio | Audio object that needs to be diarized. 85 | 86 | try: 87 | # Diarize an audio file 88 | api_response = api_instance.sync_diarize_audio(body) 89 | pprint(api_response) 90 | except ApiException as e: 91 | print("Exception when calling DiarizeApi->sync_diarize_audio: %s\n" % e) 92 | ``` 93 | 94 | ### Parameters 95 | 96 | Name | Type | Description | Notes 97 | ------------- | ------------- | ------------- | ------------- 98 | **body** | [**DiarizeAudio**](DiarizeAudio.md)| Audio object that needs to be diarized. | 99 | 100 | ### Return type 101 | 102 | [**list[Audio]**](Audio.md) 103 | 104 | ### Authorization 105 | 106 | [UserSecurity](../README.md#UserSecurity) 107 | 108 | ### HTTP request headers 109 | 110 | - **Content-Type**: application/json 111 | - **Accept**: application/json 112 | 113 | [[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) 114 | 115 | -------------------------------------------------------------------------------- /docs/DiarizeApiV2.md: -------------------------------------------------------------------------------- 1 | # deepaffects.DiarizeApi 2 | 3 | All URIs are relative to *https://localhost* 4 | 5 | Method | HTTP request | Description 6 | ------------- | ------------- | ------------- 7 | [**async_diarize_audio**](DiarizeApiV2.md#async_diarize_audio) | **POST** /api/v1/async/diarize | Diarize an audio file 8 | 9 | 10 | # **async_diarize_audio** 11 | > AsyncResponse async_diarize_audio(body, webhook, request_id=request_id) 12 | 13 | Diarize an audio file. 14 | 15 | ### Example 16 | ```python 17 | from __future__ import print_function 18 | import deepaffects 19 | from deepaffects.rest import ApiException 20 | from pprint import pprint 21 | 22 | # Configure API key authorization: UserSecurity 23 | deepaffects.configuration.api_key['apikey'] = 'YOUR_API_KEY' 24 | 25 | # create an instance of the API class 26 | api_instance = deepaffects.DiarizeApiV2() 27 | body = deepaffects.DiarizeAudio.from_file(file_name="/path/to/file") # DiarizeAudio | audio object that needs to be diarized. 28 | webhook = 'https://your_webhook.url' # str | The webhook url where result from async resource is posted 29 | request_id = 'request_id_example' # str | Unique identifier for the request (optional) 30 | 31 | try: 32 | # Diarize an audio file 33 | api_response = api_instance.async_diarize_audio(body, webhook, request_id=request_id) 34 | pprint(api_response) 35 | except ApiException as e: 36 | print("Exception when calling DiarizeApi->async_diarize_audio: %s\n" % e) 37 | ``` 38 | 39 | ### Parameters 40 | 41 | Name | Type | Description | Notes 42 | ------------- | ------------- | ------------- | ------------- 43 | **body** | [**DiarizeAudio**](DiarizeAudio.md)| Audio object that needs to be diarized. | 44 | **webhook** | **str**| The webhook url where result from async resource is posted | 45 | **request_id** | **str**| Unique identifier for the request | [optional] 46 | 47 | ### Return type 48 | 49 | [**AsyncResponse**](AsyncResponse.md) 50 | 51 | ### Authorization 52 | 53 | [UserSecurity](../README.md#UserSecurity) 54 | 55 | ### HTTP request headers 56 | 57 | - **Content-Type**: application/json 58 | - **Accept**: application/json 59 | 60 | [[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) 61 | -------------------------------------------------------------------------------- /docs/DiarizeAudio.md: -------------------------------------------------------------------------------- 1 | # DiarizeAudio 2 | 3 | ## Properties 4 | Name | Type | Description | Notes 5 | ------------ | ------------- | ------------- | ------------- 6 | **encoding** | **str** | Encoding of audio file like MP3, WAV etc. | 7 | **sample_rate** | **int** | Sample rate of the audio file. | 8 | **language_code** | **str** | Language spoken in the audio file. | [default to 'en-US'] 9 | **content** | **str** | base64 encoding of the audio file. | 10 | **speakers** | **int** | Number of speakers in the file (-1 for unknown speakers) | [default to -1] 11 | 12 | [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) 13 | 14 | 15 | -------------------------------------------------------------------------------- /docs/EllipsisApi.md: -------------------------------------------------------------------------------- 1 | # deepaffects.EllipsisApi 2 | 3 | All URIs are relative to *https://localhost* 4 | 5 | Method | HTTP request | Description 6 | ------------- | ------------- | ------------- 7 | [**is_depressed**](EllipsisApi.md#is_depressed) | **POST** /api/v1/audio/ellipsis/is_depressed | Find if a person is depressed from audio. 8 | 9 | 10 | # **is_depressed** 11 | > bool is_depressed(body) 12 | 13 | Find if a person is depressed from audio. 14 | 15 | 16 | ### Example 17 | ```python 18 | from __future__ import print_function 19 | import time 20 | import deepaffects 21 | from deepaffects.rest import ApiException 22 | from pprint import pprint 23 | 24 | # Configure API key authorization: UserSecurity 25 | deepaffects.configuration.api_key['apikey'] = 'YOUR_API_KEY' 26 | 27 | # create an instance of the API class 28 | api_instance = deepaffects.EllipsisAPI() 29 | body = deepaffects.Audio.from_file(file_name="/path/to/file") # Audio | Audio object to predict depression. 30 | 31 | try: 32 | # Find if a person is depressed from audio. 33 | api_response = api_instance.sync_is_depressed(body) 34 | pprint(api_response) 35 | except ApiException as e: 36 | print("Exception when calling EllipsisApi->is_depressed: %s\n" % e) 37 | ``` 38 | 39 | ### Parameters 40 | 41 | Name | Type | Description | Notes 42 | ------------- | ------------- | ------------- | ------------- 43 | **body** | [**Audio**](Audio.md)| Audio object that needs to be featurized. | 44 | 45 | ### Return type 46 | 47 | **bool** 48 | 49 | ### Authorization 50 | 51 | [UserSecurity](../README.md#UserSecurity) 52 | 53 | ### HTTP request headers 54 | 55 | - **Content-Type**: application/json 56 | - **Accept**: application/json 57 | 58 | [[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) 59 | 60 | -------------------------------------------------------------------------------- /docs/EmotionApi.md: -------------------------------------------------------------------------------- 1 | # deepaffects.EmotionApi 2 | 3 | All URIs are relative to *https://localhost* 4 | 5 | Method | HTTP request | Description 6 | ------------- | ------------- | ------------- 7 | [**async_recognise_emotion**](EmotionApi.md#async_recognise_emotion) | **POST** /api/v1/async/recognise_emotion | Find emotion in an audio file 8 | [**sync_recognise_emotion**](EmotionApi.md#sync_recognise_emotion) | **POST** /api/v1/sync/recognise_emotion | Find emotion in an audio file 9 | 10 | 11 | # **async_recognise_emotion** 12 | > AsyncResponse async_recognise_emotion(body, webhook, request_id=request_id) 13 | 14 | Find emotion in an audio file 15 | 16 | Extract emotion from an audio file. 17 | 18 | ### Example 19 | ```python 20 | from __future__ import print_function 21 | import time 22 | import deepaffects 23 | from deepaffects.rest import ApiException 24 | from pprint import pprint 25 | 26 | # Configure API key authorization: UserSecurity 27 | deepaffects.configuration.api_key['apikey'] = 'YOUR_API_KEY' 28 | 29 | # create an instance of the API class 30 | api_instance = deepaffects.EmotionApi() 31 | body = deepaffects.Audio.from_file(file_name="/path/to/file") # Audio | Audio object to extract emotions from. 32 | webhook = 'https://your_webhook.url' # str | The webhook url where result from async resource is posted 33 | request_id = 'request_id_example' # str | Unique identifier for the request (optional) 34 | 35 | try: 36 | # Find emotion in an audio file 37 | api_response = api_instance.async_recognise_emotion(body, webhook, request_id=request_id) 38 | pprint(api_response) 39 | except ApiException as e: 40 | print("Exception when calling EmotionApi->async_recognise_emotion: %s\n" % e) 41 | ``` 42 | 43 | ### Parameters 44 | 45 | Name | Type | Description | Notes 46 | ------------- | ------------- | ------------- | ------------- 47 | **body** | [**Audio**](Audio.md)| Audio object that needs to be featurized. | 48 | **webhook** | **str**| The webhook url where result from async resource is posted | 49 | **request_id** | **str**| Unique identifier for the request | [optional] 50 | 51 | ### Return type 52 | 53 | [**AsyncResponse**](AsyncResponse.md) 54 | 55 | ### Authorization 56 | 57 | [UserSecurity](../README.md#UserSecurity) 58 | 59 | ### HTTP request headers 60 | 61 | - **Content-Type**: application/json 62 | - **Accept**: application/json 63 | 64 | [[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) 65 | 66 | # **sync_recognise_emotion** 67 | > list[EmotionScore] sync_recognise_emotion(body) 68 | 69 | Find emotion in an audio file 70 | 71 | Extract emotion from an audio file. 72 | 73 | ### Example 74 | ```python 75 | from __future__ import print_function 76 | import time 77 | import deepaffects 78 | from deepaffects.rest import ApiException 79 | from pprint import pprint 80 | 81 | # Configure API key authorization: UserSecurity 82 | deepaffects.configuration.api_key['apikey'] = 'YOUR_API_KEY' 83 | 84 | # create an instance of the API class 85 | api_instance = deepaffects.EmotionApi() 86 | body = deepaffects.Audio.from_file(file_name="/path/to/file") # Audio | Audio object to extract emotions from. 87 | 88 | try: 89 | # Find emotion in an audio file 90 | api_response = api_instance.sync_recognise_emotion(body) 91 | pprint(api_response) 92 | except ApiException as e: 93 | print("Exception when calling EmotionApi->sync_recognise_emotion: %s\n" % e) 94 | ``` 95 | 96 | ### Parameters 97 | 98 | Name | Type | Description | Notes 99 | ------------- | ------------- | ------------- | ------------- 100 | **body** | [**Audio**](Audio.md)| Audio object that needs to be featurized. | 101 | 102 | ### Return type 103 | 104 | [**list[EmotionScore]**](EmotionScore.md) 105 | 106 | ### Authorization 107 | 108 | [UserSecurity](../README.md#UserSecurity) 109 | 110 | ### HTTP request headers 111 | 112 | - **Content-Type**: application/json 113 | - **Accept**: application/json 114 | 115 | [[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) 116 | 117 | -------------------------------------------------------------------------------- /docs/EmotionScore.md: -------------------------------------------------------------------------------- 1 | # EmotionScore 2 | 3 | ## Properties 4 | Name | Type | Description | Notes 5 | ------------ | ------------- | ------------- | ------------- 6 | **happy** | **float** | Probability of happy emotion | [optional] 7 | **angry** | **float** | Probability of angry emotion | [optional] 8 | **sad** | **float** | Probability of sad emotion | [optional] 9 | **disgust** | **float** | Probability of disgust emotion | [optional] 10 | **pleasant** | **float** | Probability of pleasant emotion | [optional] 11 | **neutral** | **float** | Probability of neutral emotion | [optional] 12 | **fear** | **float** | Probability of fear emotion | [optional] 13 | 14 | [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) 15 | 16 | 17 | -------------------------------------------------------------------------------- /docs/FeaturizeApi.md: -------------------------------------------------------------------------------- 1 | # deepaffects.FeaturizeApi 2 | 3 | All URIs are relative to *https://localhost* 4 | 5 | Method | HTTP request | Description 6 | ------------- | ------------- | ------------- 7 | [**async_featurize_audio**](FeaturizeApi.md#async_featurize_audio) | **POST** /api/v1/async/featurize | featurize an audio file 8 | [**sync_featurize_audio**](FeaturizeApi.md#sync_featurize_audio) | **POST** /api/v1/sync/featurize | featurize an audio file 9 | 10 | 11 | # **async_featurize_audio** 12 | > AsyncResponse async_featurize_audio(body, webhook, request_id=request_id) 13 | 14 | featurize an audio file 15 | 16 | Extract paralinguistic feature from an audio file. 17 | 18 | ### Example 19 | ```python 20 | from __future__ import print_function 21 | import time 22 | import deepaffects 23 | from deepaffects.rest import ApiException 24 | from pprint import pprint 25 | 26 | # Configure API key authorization: UserSecurity 27 | deepaffects.configuration.api_key['apikey'] = 'YOUR_API_KEY' 28 | 29 | # create an instance of the API class 30 | api_instance = deepaffects.FeaturizeApi() 31 | body = deepaffects.Audio.from_file('/path/to/file') # Audio | Audio object that needs to be featurized. 32 | webhook = 'webhook_example' # str | The webhook url where result from async resource is posted 33 | request_id = 'request_id_example' # str | Unique identifier for the request (optional) 34 | 35 | try: 36 | # featurize an audio file 37 | api_response = api_instance.async_featurize_audio(body, webhook, request_id=request_id) 38 | pprint(api_response) 39 | except ApiException as e: 40 | print("Exception when calling FeaturizeApi->async_featurize_audio: %s\n" % e) 41 | ``` 42 | 43 | ### Parameters 44 | 45 | Name | Type | Description | Notes 46 | ------------- | ------------- | ------------- | ------------- 47 | **body** | [**Audio**](Audio.md)| Audio object that needs to be featurized. | 48 | **webhook** | **str**| The webhook url where result from async resource is posted | 49 | **request_id** | **str**| Unique identifier for the request | [optional] 50 | 51 | ### Return type 52 | 53 | [**AsyncResponse**](AsyncResponse.md) 54 | 55 | ### Authorization 56 | 57 | [UserSecurity](../README.md#UserSecurity) 58 | 59 | ### HTTP request headers 60 | 61 | - **Content-Type**: application/json 62 | - **Accept**: application/json 63 | 64 | [[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) 65 | 66 | # **sync_featurize_audio** 67 | > list[list[float]] sync_featurize_audio(body) 68 | 69 | featurize an audio file 70 | 71 | Extract paralinguistic feature from an audio file. 72 | 73 | ### Example 74 | ```python 75 | from __future__ import print_function 76 | import time 77 | import deepaffects 78 | from deepaffects.rest import ApiException 79 | from pprint import pprint 80 | 81 | # Configure API key authorization: UserSecurity 82 | deepaffects.configuration.api_key['apikey'] = 'YOUR_API_KEY' 83 | 84 | # create an instance of the API class 85 | api_instance = deepaffects.FeaturizeApi() 86 | body = deepaffects.Audio.from_file('/path/to/file') # Audio | Audio object that needs to be featurized. 87 | 88 | try: 89 | # featurize an audio file 90 | api_response = api_instance.sync_featurize_audio(body) 91 | pprint(api_response) 92 | except ApiException as e: 93 | print("Exception when calling FeaturizeApi->sync_featurize_audio: %s\n" % e) 94 | ``` 95 | 96 | ### Parameters 97 | 98 | Name | Type | Description | Notes 99 | ------------- | ------------- | ------------- | ------------- 100 | **body** | [**Audio**](Audio.md)| Audio object that needs to be featurized. | 101 | 102 | ### Return type 103 | 104 | **list[list[float]]** 105 | 106 | ### Authorization 107 | 108 | [UserSecurity](../README.md#UserSecurity) 109 | 110 | ### HTTP request headers 111 | 112 | - **Content-Type**: application/json 113 | - **Accept**: application/json 114 | 115 | [[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) 116 | 117 | -------------------------------------------------------------------------------- /examples/diarize_emotion_example.py: -------------------------------------------------------------------------------- 1 | from deepaffects.realtime.util import get_deepaffects_client, chunk_generator_from_file, chunk_generator_from_url 2 | 3 | TIMEOUT_SECONDS = 10000 4 | apikey = "YOUR_API_KEY" 5 | file_path = "FILE_PATH" 6 | is_youtube_url = False 7 | languageCode = "en-Us" 8 | sampleRate = "16000" 9 | encoding = "wav" 10 | speakerIds = "list of userids for for speaker verification seperated by ','" 11 | verbose = "True" 12 | # DeepAffects realtime Api client 13 | client = get_deepaffects_client() 14 | 15 | metadata = [ 16 | ('apikey', apikey), 17 | ('encoding', encoding), 18 | ('speakerids', speakerIds), 19 | ('samplerate', sampleRate), 20 | ('languagecode', languageCode), 21 | ('verbose', verbose) 22 | ] 23 | 24 | # Implement chunk_generator() is a generator function which yields segment_chunk objects asynchronously 25 | # from deepaffects.realtime.types import segment_chunk 26 | # yield segment_chunk(Args) 27 | """segment_chunk. 28 | 29 | Args: 30 | encoding : Audio Encoding, 31 | languageCode: language code , 32 | sampleRate: sample rate of audio , 33 | content: base64 encoded audio, 34 | segmentOffset: offset of the segment in complete audio stream 35 | """ 36 | 37 | """ 38 | Sample implementation which reads audio from a file and splits it into 39 | segments more than 3 sec 40 | AudioSegment and yields base64 encoded audio segment objects asynchronously 41 | """ 42 | 43 | """Stream audio from url or youtube. 44 | 45 | responses = client.DiarizeEmotion( 46 | chunk_generator_from_url(file_path, is_youtube_url=is_youtube_url), TIMEOUT_SECONDS, metadata=metadata) 47 | """ 48 | 49 | """Stream audio from local file. 50 | """ 51 | responses = client.DiarizeEmotion( 52 | chunk_generator_from_file(file_path), TIMEOUT_SECONDS, metadata=metadata) 53 | 54 | 55 | # responses is the iterator for all the response values 56 | for response in responses: 57 | print("Received message") 58 | print(response) 59 | 60 | """Response. 61 | response = { 62 | userId: userId of the speaker identified in the segment, 63 | emotion: Emotion identified in the segment, 64 | start: start of the segment, 65 | end: end of the segment 66 | } 67 | """ 68 | -------------------------------------------------------------------------------- /examples/emotion_identify_example.py: -------------------------------------------------------------------------------- 1 | from deepaffects.realtime.util import get_deepaffects_client, chunk_generator_from_file, chunk_generator_from_url 2 | 3 | TIMEOUT_SECONDS = 10000 4 | apikey = "YOUR_API_KEY" 5 | file_path = "FILE_PATH" 6 | is_youtube_url = False 7 | languageCode = "en-Us" 8 | sampleRate = "16000" 9 | encoding = "wav" 10 | 11 | # DeepAffects realtime Api client 12 | client = get_deepaffects_client() 13 | 14 | metadata = [ 15 | ('apikey', apikey), 16 | ('encoding', encoding), 17 | ('samplerate', sampleRate), 18 | ('languagecode', languageCode) 19 | ] 20 | 21 | # Implement chunk_generator() is a generator function which yields segment_chunk objects asynchronously 22 | # from deepaffects.realtime.types import segment_chunk 23 | # yield segment_chunk(Args) 24 | """segment_chunk. 25 | 26 | Args: 27 | encoding : Audio Encoding, 28 | languageCode: language code , 29 | sampleRate: sample rate of audio , 30 | content: base64 encoded audio, 31 | segmentOffset: offset of the segment in complete audio stream 32 | """ 33 | 34 | """ 35 | Sample implementation which reads audio from a file and splits it into 36 | segments more than 3 sec 37 | AudioSegment and yields base64 encoded audio segment objects asynchronously 38 | """ 39 | 40 | """Stream audio from url or youtube. 41 | 42 | responses = client.IdentifyEmotion( 43 | chunk_generator_from_url(file_path, is_youtube_url=is_youtube_url), TIMEOUT_SECONDS, metadata=metadata) 44 | """ 45 | 46 | """Stream audio from local file. 47 | """ 48 | responses = client.IdentifyEmotion( 49 | chunk_generator_from_file(file_path), TIMEOUT_SECONDS, metadata=metadata) 50 | 51 | # responses is the iterator for all the response values 52 | for response in responses: 53 | print("Received message") 54 | print(response) 55 | 56 | """Response. 57 | response = { 58 | emotion: Emotion identified in the segment, 59 | start: start of the segment, 60 | end: end of the segment 61 | } 62 | """ 63 | -------------------------------------------------------------------------------- /examples/playlist_chunk_generator.py: -------------------------------------------------------------------------------- 1 | from deepaffects.realtime.util import get_deepaffects_client, chunk_generator_from_file, chunk_generator_from_url, chunk_generator_from_playlist 2 | 3 | TIMEOUT_SECONDS = 10000 4 | apikey = "YOUR_API_KEY" 5 | file_path = "PLAYLIST_PATH" 6 | speakerIds = "list of userids for for speaker verification seperated by ','" 7 | verbose = "True" 8 | 9 | # DeepAffects realtime Api client 10 | client = get_deepaffects_client() 11 | 12 | metadata = [ 13 | ('apikey', apikey), 14 | ('speakerids', speakerIds), 15 | ('verbose', verbose) 16 | ] 17 | 18 | # Implement chunk_generator() is a generator function which yields segment_chunk objects asynchronously 19 | # from deepaffects.realtime.types import segment_chunk 20 | # yield segment_chunk(Args) 21 | """segment_chunk. 22 | 23 | Args: 24 | encoding : Audio Encoding, 25 | languageCode: language code , 26 | sampleRate: sample rate of audio , 27 | content: base64 encoded audio, 28 | segmentOffset: offset of the segment in complete audio stream 29 | """ 30 | 31 | """ 32 | Sample implementation which reads audio from a file and splits it into 33 | segments more than 3 sec 34 | AudioSegment and yields base64 encoded audio segment objects asynchronously 35 | """ 36 | 37 | """Stream audio from earningcast. 38 | """ 39 | responses = client.DiarizeEmotion( 40 | chunk_generator_from_playlist(file_path), TIMEOUT_SECONDS, metadata=metadata) 41 | 42 | # responses is the iterator for all the response values 43 | for response in responses: 44 | print("Received message") 45 | print(response) 46 | 47 | """Response. 48 | response = { 49 | emotion: Emotion identified in the segment, 50 | start: start of the segment, 51 | end: end of the segment 52 | } 53 | """ 54 | -------------------------------------------------------------------------------- /examples/speaker_identify_example.py: -------------------------------------------------------------------------------- 1 | from deepaffects.realtime.util import get_deepaffects_client, chunk_generator_from_file, chunk_generator_from_url 2 | 3 | TIMEOUT_SECONDS = 10000 4 | apikey = "YOUR_API_KEY" 5 | file_path = "FILE_PATH" 6 | is_youtube_url = False 7 | languageCode = "en-Us" 8 | sampleRate = "16000" 9 | encoding = "wav" 10 | apiVersion = "v2" 11 | speakerIds = "list of userids for for speaker verification seperated by ','" 12 | verbose = "True" 13 | 14 | # DeepAffects realtime Api client 15 | client = get_deepaffects_client() 16 | 17 | # chunk_generator() is a generator function which yields audio segment object asynchronously 18 | metadata = [ 19 | ('apikey', apikey), 20 | ('speakerids', speakerIds), 21 | ('encoding', encoding), 22 | ('samplerate', sampleRate), 23 | ('languagecode', languageCode), 24 | ('apiversion', apiVersion), 25 | ('verbose', verbose) 26 | ] 27 | 28 | """Stream audio from url or youtube. 29 | 30 | responses = client.IdentifySpeaker( 31 | chunk_generator_from_url(file_path, is_youtube_url=is_youtube_url), TIMEOUT_SECONDS, metadata=metadata) 32 | """ 33 | 34 | """Stream audio from local file. 35 | """ 36 | responses = client.IdentifySpeaker( 37 | chunk_generator_from_file(file_path), TIMEOUT_SECONDS, metadata=metadata) 38 | 39 | # responses is the iterator for all the response values 40 | for response in responses: 41 | print("Received message") 42 | print(response) 43 | 44 | """Response. 45 | 46 | response = { 47 | userId: userId of the speaker identified in the segment, 48 | start: start of the segment, 49 | end: end of the segment 50 | } 51 | """ 52 | -------------------------------------------------------------------------------- /examples/ticker_based_earnings_call_identification.py: -------------------------------------------------------------------------------- 1 | from deepaffects.realtime.util import get_deepaffects_client, chunk_generator_from_file, chunk_generator_from_url, chunk_generator_from_playlist 2 | 3 | TIMEOUT_SECONDS = 10000 4 | apikey = "YOUR_API_KEY" 5 | file_path = "PLAYLIST_PATH" 6 | ticker = "TICKER_SYMBOL" 7 | out_file_name = "OUT_FILE_NAME" 8 | verbose = "True" 9 | 10 | metadata = [ 11 | ('apikey', apikey), 12 | ('ticker', ticker), 13 | ('verbose', verbose) 14 | ] 15 | 16 | # Implement chunk_generator() is a generator function which yields segment_chunk objects asynchronously 17 | # from deepaffects.realtime.types import segment_chunk 18 | # yield segment_chunk(Args) 19 | """segment_chunk. 20 | 21 | Args: 22 | encoding : Audio Encoding, 23 | languageCode: language code , 24 | sampleRate: sample rate of audio , 25 | content: base64 encoded audio, 26 | segmentOffset: offset of the segment in complete audio stream 27 | """ 28 | 29 | """ 30 | Sample implementation which reads audio from a file and splits it into 31 | segments more than 3 sec 32 | AudioSegment and yields base64 encoded audio segment objects asynchronously 33 | """ 34 | 35 | """Stream audio from earningcast. 36 | """ 37 | # DeepAffects realtime Api client 38 | client = get_deepaffects_client() 39 | 40 | responses = client.DiarizeEmotion( 41 | chunk_generator_from_playlist(out_file_name=out_file_name, file_path=file_path), TIMEOUT_SECONDS, metadata=metadata) 42 | 43 | # responses is the iterator for all the response values 44 | for response in responses: 45 | print("Received message") 46 | print(response) 47 | 48 | """Response. 49 | response = { 50 | emotion: Emotion identified in the segment, 51 | start: start of the segment, 52 | end: end of the segment 53 | } 54 | """ 55 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | certifi >= 14.05.14 2 | python_dateutil >= 2.5.3 3 | setuptools >= 21.0.0 4 | urllib3 >= 1.15.1 5 | pymediainfo >= 2.1.9 6 | pydub==0.22.1 7 | six==1.11.0 8 | grpcio==1.13.0 9 | protobuf==3.6.0 10 | pytube==9.2.2 11 | m3u8==0.3.6 12 | requests==2.20.0 -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | 3 | 4 | from setuptools import setup, find_packages 5 | import os 6 | 7 | NAME = "deepaffects" 8 | VERSION = "1.4.1" 9 | # To install the library, run the following 10 | # 11 | # python setup.py install 12 | # 13 | # prerequisite: setuptools 14 | # http://pypi.python.org/pypi/setuptools 15 | 16 | REQUIRES = ["urllib3 >= 1.15", "six >= 1.10", "certifi", "python-dateutil", "pymediainfo >= 2.1.9", "grpcio==1.13.0", 17 | "protobuf==3.6.0", "pydub==0.22.1", "pytube==9.2.2"] 18 | 19 | 20 | def readme(): 21 | with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme: 22 | return readme.read() 23 | 24 | setup( 25 | name=NAME, 26 | version=VERSION, 27 | description="Python bindings for DeepAffects APIs", 28 | author_email="support@seernet.io", 29 | url="https://github.com/SEERNET/deepaffects-python", 30 | author="Sushant Hiray, Venkatesh Duppada", 31 | setup_requires=[ 32 | "six >= 1.3.0", 33 | "pillow >= 2.8.1" 34 | ], 35 | install_requires=REQUIRES, 36 | packages=find_packages(), 37 | include_package_data=True, 38 | long_description=readme(), 39 | long_description_content_type="text/markdown" 40 | ) 41 | -------------------------------------------------------------------------------- /test-requirements.txt: -------------------------------------------------------------------------------- 1 | coverage>=4.0.3 2 | nose>=1.3.7 3 | pluggy>=0.3.1 4 | py>=1.4.31 5 | randomize>=0.13 6 | -------------------------------------------------------------------------------- /test/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SEERNET/deepaffects-python/e137b50f267f7aa133a0123d92be3552c5daa1bc/test/__init__.py -------------------------------------------------------------------------------- /test/data/clean.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SEERNET/deepaffects-python/e137b50f267f7aa133a0123d92be3552c5daa1bc/test/data/clean.wav -------------------------------------------------------------------------------- /test/data/happy.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SEERNET/deepaffects-python/e137b50f267f7aa133a0123d92be3552c5daa1bc/test/data/happy.mp3 -------------------------------------------------------------------------------- /test/data/noisy.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SEERNET/deepaffects-python/e137b50f267f7aa133a0123d92be3552c5daa1bc/test/data/noisy.wav -------------------------------------------------------------------------------- /test/data/reconstructed.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SEERNET/deepaffects-python/e137b50f267f7aa133a0123d92be3552c5daa1bc/test/data/reconstructed.wav -------------------------------------------------------------------------------- /test/test_async_response.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | 3 | """ 4 | DeepAffects 5 | 6 | OpenAPI Specification of DeepAffects APIs 7 | 8 | OpenAPI spec version: 0.1.0 9 | """ 10 | 11 | 12 | from __future__ import absolute_import 13 | 14 | import os 15 | import sys 16 | import unittest 17 | 18 | import deepaffects 19 | from deepaffects.rest import ApiException 20 | from deepaffects.models.async_response import AsyncResponse 21 | 22 | 23 | class TestAsyncResponse(unittest.TestCase): 24 | """ AsyncResponse unit test stubs """ 25 | 26 | def setUp(self): 27 | pass 28 | 29 | def tearDown(self): 30 | pass 31 | 32 | def testAsyncResponse(self): 33 | """ 34 | Test AsyncResponse 35 | """ 36 | model = deepaffects.models.async_response.AsyncResponse() 37 | 38 | 39 | if __name__ == '__main__': 40 | unittest.main() 41 | -------------------------------------------------------------------------------- /test/test_audio.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | 3 | """ 4 | DeepAffects 5 | 6 | OpenAPI Specification of DeepAffects APIs 7 | 8 | OpenAPI spec version: v1 9 | """ 10 | 11 | 12 | from __future__ import absolute_import 13 | 14 | import os 15 | import sys 16 | import unittest 17 | 18 | import deepaffects 19 | from deepaffects.rest import ApiException 20 | from deepaffects.models.audio import Audio 21 | 22 | 23 | class TestAudio(unittest.TestCase): 24 | """ Audio unit test stubs """ 25 | 26 | def setUp(self): 27 | pass 28 | 29 | def tearDown(self): 30 | pass 31 | 32 | def testAudio(self): 33 | """ 34 | Test Audio 35 | """ 36 | model = deepaffects.models.audio.Audio() 37 | 38 | 39 | if __name__ == '__main__': 40 | unittest.main() 41 | -------------------------------------------------------------------------------- /test/test_base_setup.py: -------------------------------------------------------------------------------- 1 | import os 2 | from unittest import TestCase, SkipTest 3 | 4 | import deepaffects 5 | 6 | DIR = os.path.dirname(os.path.realpath(__file__)) 7 | 8 | class AudioTest(TestCase): 9 | def initialize_api(self): 10 | pass 11 | 12 | def setUp(self): 13 | self.api_key = os.getenv("DEEPAFFECTS_API_KEY") 14 | if not self.api_key: 15 | raise ValueError( 16 | "API Key needs to be defined in an environment variable DEEPAFFECTS_API_KEY to run tests." 17 | ) 18 | deepaffects.configuration.api_key = self.api_key 19 | self.initialize_api() 20 | 21 | @staticmethod 22 | def _require_numpy(): 23 | try: 24 | import numpy as np 25 | return np 26 | except ImportError: 27 | raise SkipTest("Numpy is not installed!") 28 | 29 | -------------------------------------------------------------------------------- /test/test_denoise_api.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | 3 | """ 4 | DeepAffects 5 | 6 | OpenAPI spec version: v1 7 | """ 8 | 9 | 10 | from __future__ import absolute_import 11 | 12 | import os 13 | import sys 14 | import unittest 15 | 16 | import deepaffects 17 | from deepaffects.models.audio import Audio 18 | from .test_base_setup import DIR 19 | 20 | 21 | class TestDenoiseApi(unittest.TestCase): 22 | """ DenoiseApi unit test stubs """ 23 | 24 | @staticmethod 25 | def sdr(clean, reconstructed): 26 | # TODO: Implement SDR here 27 | return 5.1 28 | 29 | def setUp(self): 30 | deepaffects.configuration.api_key['apikey'] = os.environ['DEEPAFFECTS_API_KEY'] 31 | self.api = deepaffects.DenoiseApi() 32 | 33 | def tearDown(self): 34 | pass 35 | 36 | def test_sync_denoise_audio(self): 37 | """ 38 | Test case for sync_denoise_audio 39 | 40 | Denoise an audio file 41 | """ 42 | test_noisy_audio = os.path.normpath(os.path.join(DIR, "data/noisy.wav")) 43 | test_clean_audio = os.path.normpath(os.path.join(DIR, "data/clean.wav")) 44 | test_reconstructed_audio = os.path.normpath(os.path.join(DIR, "data/reconstructed.wav")) 45 | body = Audio.from_file(file_name=test_noisy_audio) 46 | api_response = self.api.sync_denoise_audio(body=body) 47 | api_response.to_file(test_reconstructed_audio) 48 | self.assertTrue(TestDenoiseApi.sdr(test_clean_audio, test_reconstructed_audio) > 5) 49 | pass 50 | 51 | 52 | if __name__ == '__main__': 53 | unittest.main() 54 | -------------------------------------------------------------------------------- /test/test_diarize_api.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | 3 | """ 4 | DeepAffects 5 | 6 | OpenAPI spec version: v1 7 | """ 8 | 9 | 10 | from __future__ import absolute_import 11 | 12 | import os 13 | import sys 14 | import unittest 15 | 16 | import deepaffects 17 | from deepaffects.rest import ApiException 18 | from deepaffects.apis.diarize_api import DiarizeApi 19 | 20 | 21 | class TestDiarizeApi(unittest.TestCase): 22 | """ DiarizeApi unit test stubs """ 23 | 24 | def setUp(self): 25 | self.api = deepaffects.apis.diarize_api.DiarizeApi() 26 | 27 | def tearDown(self): 28 | pass 29 | 30 | def test_async_diarize_audio(self): 31 | """ 32 | Test case for diarize_audio 33 | 34 | Diarize an audio file 35 | """ 36 | pass 37 | 38 | 39 | if __name__ == '__main__': 40 | unittest.main() 41 | -------------------------------------------------------------------------------- /test/test_diarize_api_v2.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | 3 | """ 4 | DeepAffects 5 | 6 | OpenAPI spec version: v1 7 | """ 8 | 9 | 10 | from __future__ import absolute_import 11 | 12 | import os 13 | import sys 14 | import unittest 15 | 16 | import deepaffects 17 | from deepaffects import Audio 18 | from deepaffects.rest import ApiException 19 | from deepaffects.apis.diarize_api_v2 import DiarizeApiV2 20 | from .test_base_setup import DIR 21 | import uuid 22 | 23 | 24 | class TestDiarizeApiV2(unittest.TestCase): 25 | """ DiarizeApiV2 unit test stubs """ 26 | 27 | def setUp(self): 28 | deepaffects.configuration.api_key['apikey'] = os.environ['DEEPAFFECTS_API_KEY'] 29 | self.webhook_url = os.environ["DEEPAFFECTS_API_WEBHOOK"] 30 | self.api = deepaffects.apis.diarize_api_v2.DiarizeApiV2() 31 | self.request_id = str(uuid.uuid4()) 32 | 33 | def tearDown(self): 34 | pass 35 | 36 | def test_async_diarize_audio(self): 37 | """ 38 | Test case for diarize_audio 39 | 40 | Diarize an audio file 41 | """ 42 | test_conversation_audio = os.path.normpath(os.path.join(DIR, "data/happy.mp3")) 43 | body = Audio.from_file(file_name=test_conversation_audio) 44 | 45 | api_response = self.api.async_diarize_audio(body=body, webhook=self.webhook_url, request_id=self.request_id) 46 | self.assertTrue(api_response.request_id, self.request_id) 47 | 48 | if __name__ == '__main__': 49 | unittest.main() 50 | -------------------------------------------------------------------------------- /test/test_diarize_audio.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | 3 | """ 4 | DeepAffects 5 | 6 | OpenAPI Specification of DeepAffects APIs 7 | 8 | OpenAPI spec version: v1 9 | """ 10 | 11 | 12 | from __future__ import absolute_import 13 | 14 | import os 15 | import sys 16 | import unittest 17 | 18 | import deepaffects 19 | from deepaffects.rest import ApiException 20 | from deepaffects.models.diarize_audio import DiarizeAudio 21 | 22 | 23 | class TestDiarizeAudio(unittest.TestCase): 24 | """ DiarizeAudio unit test stubs """ 25 | 26 | def setUp(self): 27 | pass 28 | 29 | def tearDown(self): 30 | pass 31 | 32 | def testDiarizeAudio(self): 33 | """ 34 | Test DiarizeAudio 35 | """ 36 | model = deepaffects.models.diarize_audio.DiarizeAudio() 37 | 38 | 39 | if __name__ == '__main__': 40 | unittest.main() 41 | -------------------------------------------------------------------------------- /test/test_emotion_api.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | 3 | """ 4 | DeepAffects 5 | 6 | OpenAPI Specification of DeepAffects APIs 7 | 8 | OpenAPI spec version: v1 9 | """ 10 | 11 | from __future__ import absolute_import 12 | 13 | import os 14 | import sys 15 | import unittest 16 | 17 | from deepaffects import Audio 18 | 19 | import deepaffects 20 | from deepaffects.rest import ApiException 21 | from deepaffects.apis.emotion_api import EmotionApi 22 | from deepaffects.models.emotion_score import EmotionScore 23 | from .test_base_setup import DIR, AudioTest 24 | 25 | 26 | class TestEmotionApi(unittest.TestCase): 27 | """ EmotionApi unit test stubs """ 28 | 29 | def setUp(self): 30 | deepaffects.configuration.api_key['apikey'] = os.environ['DEEPAFFECTS_API_KEY'] 31 | self.api = deepaffects.apis.emotion_api.EmotionApi() 32 | 33 | def tearDown(self): 34 | pass 35 | 36 | def test_async_recognise_emotion(self): 37 | """ 38 | Test case for async_recognise_emotion 39 | 40 | Find emotion in an audio file 41 | """ 42 | pass 43 | 44 | def test_sync_recognise_emotion(self): 45 | """ 46 | Test case for sync_recognise_emotion 47 | 48 | Find emotion in an audio file 49 | """ 50 | test_happy_audio = os.path.normpath(os.path.join(DIR, "data/happy.mp3")) 51 | body = Audio.from_file(file_name=test_happy_audio) 52 | api_response = self.api.sync_recognise_emotion(body=body) 53 | for obj in api_response: 54 | if obj.emotion == 'Happy': 55 | assert obj.score > 0.8 56 | 57 | 58 | if __name__ == '__main__': 59 | unittest.main() 60 | -------------------------------------------------------------------------------- /test/test_emotion_score.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | 3 | """ 4 | DeepAffects 5 | 6 | OpenAPI Specification of DeepAffects APIs 7 | 8 | OpenAPI spec version: v1 9 | """ 10 | 11 | 12 | from __future__ import absolute_import 13 | 14 | import os 15 | import sys 16 | import unittest 17 | 18 | import deepaffects 19 | from deepaffects.rest import ApiException 20 | from deepaffects.models.emotion_score import EmotionScore 21 | 22 | 23 | class TestEmotionScore(unittest.TestCase): 24 | """ EmotionScore unit test stubs """ 25 | 26 | def setUp(self): 27 | pass 28 | 29 | def tearDown(self): 30 | pass 31 | 32 | def testEmotionScore(self): 33 | """ 34 | Test EmotionScore 35 | """ 36 | model = deepaffects.models.emotion_score.EmotionScore() 37 | 38 | 39 | if __name__ == '__main__': 40 | unittest.main() 41 | -------------------------------------------------------------------------------- /test/test_featurize_api.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | 3 | """ 4 | DeepAffects 5 | 6 | OpenAPI Specification of DeepAffects APIs 7 | 8 | OpenAPI spec version: v1 9 | """ 10 | 11 | 12 | from __future__ import absolute_import 13 | 14 | import os 15 | import sys 16 | import unittest 17 | 18 | import deepaffects 19 | from deepaffects.rest import ApiException 20 | from deepaffects.apis.featurize_api import FeaturizeApi 21 | 22 | 23 | class TestFeaturizeApi(unittest.TestCase): 24 | """ FeaturizeApi unit test stubs """ 25 | 26 | def setUp(self): 27 | self.api = deepaffects.apis.featurize_api.FeaturizeApi() 28 | 29 | def tearDown(self): 30 | pass 31 | 32 | def test_async_featurize_audio(self): 33 | """ 34 | Test case for featurize_audio 35 | 36 | featurize an audio file 37 | """ 38 | pass 39 | 40 | 41 | if __name__ == '__main__': 42 | unittest.main() 43 | --------------------------------------------------------------------------------