├── tests
├── __init__.py
├── test_client.py
├── test_results.py
├── test_tags.py
├── test_models.py
└── test_jobs.py
├── samples
├── __init__.py
├── config.json
├── image.png
├── model_sample.py
├── job_with_text_input_sample.py
├── job_with_file_input_sample.py
├── job_with_embedded_input_sample.py
└── job_with_aws_input_sample.py
├── modzy
├── edge
│ ├── proto
│ │ ├── __init__.py
│ │ ├── jobs
│ │ │ ├── __init__.py
│ │ │ └── v1
│ │ │ │ └── __init__.py
│ │ ├── common
│ │ │ ├── __init__.py
│ │ │ └── v1
│ │ │ │ ├── __init__.py
│ │ │ │ ├── errors_pb2.pyi
│ │ │ │ ├── errors_pb2.py
│ │ │ │ ├── common_pb2.pyi
│ │ │ │ └── common_pb2.py
│ │ ├── accounting
│ │ │ ├── __init__.py
│ │ │ └── v1
│ │ │ │ ├── __init__.py
│ │ │ │ ├── accounting_pb2.pyi
│ │ │ │ └── accounting_pb2.py
│ │ ├── inferences
│ │ │ ├── __init__.py
│ │ │ └── api
│ │ │ │ ├── __init__.py
│ │ │ │ └── v1
│ │ │ │ ├── __init__.py
│ │ │ │ ├── inferences_pb2_grpc.py
│ │ │ │ └── inferences_pb2.pyi
│ │ ├── results
│ │ │ └── v1
│ │ │ │ ├── __init__.py
│ │ │ │ ├── results_pb2.pyi
│ │ │ │ └── results_pb2_grpc.py
│ │ └── protoc_gen_openapiv2
│ │ │ └── options
│ │ │ ├── __init__.py
│ │ │ ├── annotations_pb2.pyi
│ │ │ └── annotations_pb2.py
│ ├── __init__.py
│ ├── client.py
│ └── jobs.py
├── __init__.py
├── _size.py
├── _api_object.py
├── client.py
├── tags.py
├── error.py
├── http.py
├── _util.py
└── results.py
├── key.png
├── python.gif
├── install.gif
├── python-sdk-github-banner.png
├── pyproject.toml
├── .editorconfig
├── MANIFEST.in
├── requirements_dev.txt
├── .github
├── PULL_REQUEST_TEMPLATE.md
└── ISSUE_TEMPLATE.md
├── tox.ini
├── setup.cfg
├── HISTORY.rst
├── setup.py
├── .gitignore
├── Makefile
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.adoc
└── LICENSE
/tests/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/samples/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/modzy/edge/proto/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/modzy/edge/proto/jobs/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/modzy/edge/proto/common/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/modzy/edge/proto/common/v1/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/modzy/edge/proto/jobs/v1/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/modzy/edge/proto/accounting/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/modzy/edge/proto/accounting/v1/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/modzy/edge/proto/inferences/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/modzy/edge/proto/inferences/api/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/modzy/edge/proto/results/v1/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/modzy/edge/proto/inferences/api/v1/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/modzy/edge/proto/protoc_gen_openapiv2/options/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/key.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/modzy/sdk-python/HEAD/key.png
--------------------------------------------------------------------------------
/python.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/modzy/sdk-python/HEAD/python.gif
--------------------------------------------------------------------------------
/install.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/modzy/sdk-python/HEAD/install.gif
--------------------------------------------------------------------------------
/samples/config.json:
--------------------------------------------------------------------------------
1 | {
2 | "languages": ["eng", "deu", "fra"]
3 | }
4 |
--------------------------------------------------------------------------------
/modzy/edge/__init__.py:
--------------------------------------------------------------------------------
1 | from .proto.inferences.api.v1.inferences_pb2 import InputSource
--------------------------------------------------------------------------------
/samples/image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/modzy/sdk-python/HEAD/samples/image.png
--------------------------------------------------------------------------------
/python-sdk-github-banner.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/modzy/sdk-python/HEAD/python-sdk-github-banner.png
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = [
3 | "setuptools>=42",
4 | "wheel"
5 | ]
6 | build-backend = "setuptools.build_meta"
7 |
--------------------------------------------------------------------------------
/modzy/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """Modzy Python API Client."""
3 |
4 | import logging
5 |
6 | from .client import ApiClient # noqa
7 | from .edge.client import EdgeClient
8 | __version__ = '0.11.6'
9 |
10 | logging.getLogger(__name__).addHandler(logging.NullHandler())
11 |
--------------------------------------------------------------------------------
/.editorconfig:
--------------------------------------------------------------------------------
1 | # http://editorconfig.org
2 |
3 | root = true
4 |
5 | [*]
6 | indent_style = space
7 | indent_size = 4
8 | trim_trailing_whitespace = true
9 | insert_final_newline = true
10 | charset = utf-8
11 |
12 | [*.bat]
13 | indent_style = tab
14 | end_of_line = crlf
15 |
16 | [Makefile]
17 | indent_style = tab
18 |
--------------------------------------------------------------------------------
/tests/test_client.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """Tests for `modzy` package."""
5 |
6 | from modzy import ApiClient
7 |
8 |
9 | def test_can_construct_client():
10 | client = ApiClient('https://example.com', 'my-key')
11 | assert client is not None
12 |
13 | # TODO: actual test suite
14 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include CONTRIBUTING.adoc
2 | include HISTORY.rst
3 | include LICENSE.md
4 | include README.adoc
5 |
6 | recursive-include tests *
7 | recursive-exclude * __pycache__
8 | recursive-exclude * *.py[co]
9 |
10 | recursive-include docs *.rst conf.py Makefile make.bat *.jpg *.png *.gif
11 |
12 | prune samples
13 | prune tests
14 |
--------------------------------------------------------------------------------
/requirements_dev.txt:
--------------------------------------------------------------------------------
1 | certifi==2021.5.30
2 | chardet==4.0.0
3 | idna==3.1
4 | requests==2.26.0
5 | urllib3==1.26.7
6 |
7 | pip>=20.0.0
8 | bump2version==1.0.1
9 | wheel==0.37.0
10 | watchdog==2.1.5
11 | flake8==3.9.2
12 | tox==3.24.4
13 | coverage==5.5
14 | Sphinx==4.2.0
15 |
16 | pytest==6.2.5
17 | python-dotenv==0.19.0
18 | pytest-dotenv==0.5.2
19 |
20 | deprecation==2.1.0
21 |
22 | grpcio-tools
23 | grpcio
24 | grpcio-reflection
25 | protobuf==3.19.4
26 | pyyaml
27 | six
28 | gcloud
29 | boto3
--------------------------------------------------------------------------------
/.github/PULL_REQUEST_TEMPLATE.md:
--------------------------------------------------------------------------------
1 | ## Description
2 |
3 |
4 |
5 | ## Related issues
6 |
7 |
8 |
9 | ## Tests
10 |
11 |
12 |
13 | ## Checklist
14 |
15 | - [ ] I read the Contributing guide.
16 | - [ ] I updated the documentation and, if relevant, the README.md file.
17 | - [ ] I am willing to follow-up on review comments in a timely manner.
18 |
--------------------------------------------------------------------------------
/tox.ini:
--------------------------------------------------------------------------------
1 | [tox]
2 | envlist = py34, py35, py36, py37, flake8
3 |
4 | [testenv:flake8]
5 | basepython = python
6 | deps = flake8
7 | commands = flake8 modzy tests
8 |
9 | [testenv]
10 | setenv =
11 | PYTHONPATH = {toxinidir}
12 | deps =
13 | -r{toxinidir}/requirements_dev.txt
14 | ; If you want to make tox run the tests with the same versions, create a
15 | ; requirements.txt with the pinned versions and uncomment the following line:
16 | ; -r{toxinidir}/requirements.txt
17 | commands =
18 | pip install -U pip
19 | py.test --basetemp={envtmpdir}
20 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [bumpversion]
2 | current_version = 0.6.0
3 | commit = True
4 | tag = True
5 |
6 | [bumpversion:file:setup.py]
7 | search = version='{current_version}'
8 | replace = version='{new_version}'
9 |
10 | [bumpversion:file:modzy/__init__.py]
11 | search = __version__ = '{current_version}'
12 | replace = __version__ = '{new_version}'
13 |
14 | [bdist_wheel]
15 | universal = 1
16 |
17 | [flake8]
18 | exclude = docs
19 | max-line-length = 119
20 |
21 | [aliases]
22 | test = pytest
23 |
24 | [tool:pytest]
25 | collect_ignore = ['setup.py']
26 | log_cli = true
27 | log_cli_level = DEBUG
28 |
29 |
--------------------------------------------------------------------------------
/modzy/_size.py:
--------------------------------------------------------------------------------
1 | import re
2 | from enum import IntEnum
3 |
4 | SIZE_PATTERN = re.compile('^(\d+(\.\d+)?)([a-zA-Z]{0,2})$')
5 |
6 |
7 | class DataUnit(IntEnum):
8 | i = 1 # BYTES
9 | K = 1000 # KILOBYTES
10 | M = 1000 * 1000 # MEGABYTES
11 | G = 1000 * 1000 * 1000 # GIGABYTES
12 | T = 1000 * 1000 * 1000 * 1000 # TERABYTES
13 | Ki = 1024 # KIBIBYTES
14 | Mi = 1024 * 1024 # MEBIBYTES
15 | Gi = 1024 * 1024 * 1024 # GIBIBYTES
16 | Ti = 1024 * 1024 * 1024 * 1024 # TEBIBYTES
17 | KB = 1024 # KIBIBYTES
18 | MB = 1024 * 1024 # MEBIBYTES
19 | GB = 1024 * 1024 * 1024 # GIBIBYTES
20 | TB = 1024 * 1024 * 1024 * 1024 # TEBIBYTES
21 |
22 |
23 | def human_read_to_bytes(human_size):
24 | match = SIZE_PATTERN.match(human_size)
25 | return int(match.group(1))*DataUnit[match.group(3)]
26 |
--------------------------------------------------------------------------------
/modzy/edge/proto/common/v1/errors_pb2.pyi:
--------------------------------------------------------------------------------
1 | from google.protobuf import descriptor as _descriptor
2 | from google.protobuf import message as _message
3 | from typing import ClassVar as _ClassVar, Optional as _Optional
4 |
5 | DESCRIPTOR: _descriptor.FileDescriptor
6 |
7 | class Error(_message.Message):
8 | __slots__ = ["message", "report_error_url", "status", "status_code"]
9 | MESSAGE_FIELD_NUMBER: _ClassVar[int]
10 | REPORT_ERROR_URL_FIELD_NUMBER: _ClassVar[int]
11 | STATUS_CODE_FIELD_NUMBER: _ClassVar[int]
12 | STATUS_FIELD_NUMBER: _ClassVar[int]
13 | message: str
14 | report_error_url: str
15 | status: str
16 | status_code: int
17 | def __init__(self, message: _Optional[str] = ..., report_error_url: _Optional[str] = ..., status: _Optional[str] = ..., status_code: _Optional[int] = ...) -> None: ...
18 |
--------------------------------------------------------------------------------
/modzy/edge/proto/protoc_gen_openapiv2/options/annotations_pb2.pyi:
--------------------------------------------------------------------------------
1 | from google.protobuf import descriptor_pb2 as _descriptor_pb2
2 | from modzy.edge.proto.protoc_gen_openapiv2.options import openapiv2_pb2 as _openapiv2_pb2
3 | from google.protobuf import descriptor as _descriptor
4 | from typing import ClassVar as _ClassVar
5 |
6 | DESCRIPTOR: _descriptor.FileDescriptor
7 | OPENAPIV2_FIELD_FIELD_NUMBER: _ClassVar[int]
8 | OPENAPIV2_OPERATION_FIELD_NUMBER: _ClassVar[int]
9 | OPENAPIV2_SCHEMA_FIELD_NUMBER: _ClassVar[int]
10 | OPENAPIV2_SWAGGER_FIELD_NUMBER: _ClassVar[int]
11 | OPENAPIV2_TAG_FIELD_NUMBER: _ClassVar[int]
12 | openapiv2_field: _descriptor.FieldDescriptor
13 | openapiv2_operation: _descriptor.FieldDescriptor
14 | openapiv2_schema: _descriptor.FieldDescriptor
15 | openapiv2_swagger: _descriptor.FieldDescriptor
16 | openapiv2_tag: _descriptor.FieldDescriptor
17 |
--------------------------------------------------------------------------------
/HISTORY.rst:
--------------------------------------------------------------------------------
1 | =======
2 | History
3 | =======
4 |
5 | 0.5.2
6 | ------------------
7 | * Update readme file
8 |
9 | 0.5.1
10 | ------------------
11 | * Update version to match Modzy version
12 |
13 | 0.3.2 (2020-05-12)
14 | ------------------
15 |
16 | * Update model and version client
17 | * Added samples
18 | * Review readme file, contributing guide
19 | * Added .env support
20 |
21 |
22 | 0.3.1 (2019-11-22)
23 | ------------------
24 |
25 | * Added basic logging.
26 | * Switched to `requests` library for HTTP requests.
27 |
28 | 0.3.0 (2019-11-14)
29 | ------------------
30 |
31 | * Added Tag support.
32 | * Added ability to query job history.
33 | * Added ability to query related models.
34 |
35 | 0.2.0 (2019-09-2)
36 | ------------------
37 |
38 | * Rename library to `modzy`.
39 |
40 | 0.1.1 (2019-08-27)
41 | ------------------
42 |
43 | * Added docstrings.
44 |
45 | 0.1.0 (2019-08-26)
46 | ------------------
47 |
48 | * Initial release.
49 |
--------------------------------------------------------------------------------
/tests/test_results.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | import logging
5 | import os
6 | import dotenv
7 | import pytest
8 | from datetime import datetime, timedelta
9 | from modzy import ApiClient, error
10 |
11 | dotenv.load_dotenv()
12 |
13 | BASE_URL = os.getenv('MODZY_BASE_URL')
14 | API_KEY = os.getenv('MODZY_API_KEY')
15 |
16 | MODEL_ID = 'ed542963de' # sentiment-analysis
17 | BLOCK_TIMEOUT = 600 # how long to wait until giving up on real api
18 |
19 | @pytest.fixture()
20 | def client():
21 | return ApiClient(base_url=BASE_URL, api_key=API_KEY)
22 |
23 | @pytest.fixture()
24 | def logger():
25 | return logging.getLogger(__name__)
26 |
27 | def test_get_results(client, logger):
28 | job = client.jobs.submit_text(MODEL_ID, '0.0.27', {'input.txt': 'Modzy is great!'})
29 | logger.debug("job %s", job)
30 | job.block_until_complete(timeout=BLOCK_TIMEOUT)
31 | logger.debug("job after block %s", job)
32 | result = client.results.get(job.job_identifier) # by id
33 | logger.debug("job results by id %s: %s", job, result)
34 | result_copy = client.results.get(job) # by object
35 | logger.debug("job results by object %s: %s", job, result_copy)
36 | assert result is not result_copy
37 | assert result.job_identifier == result_copy.job_identifier
38 |
--------------------------------------------------------------------------------
/modzy/edge/proto/accounting/v1/accounting_pb2.pyi:
--------------------------------------------------------------------------------
1 | from google.api import field_behavior_pb2 as _field_behavior_pb2
2 | from google.protobuf import descriptor as _descriptor
3 | from google.protobuf import message as _message
4 | from typing import ClassVar as _ClassVar, Optional as _Optional
5 |
6 | DESCRIPTOR: _descriptor.FileDescriptor
7 |
8 | class APIKeyIdentifier(_message.Message):
9 | __slots__ = ["prefix"]
10 | PREFIX_FIELD_NUMBER: _ClassVar[int]
11 | prefix: str
12 | def __init__(self, prefix: _Optional[str] = ...) -> None: ...
13 |
14 | class AccountIdentifier(_message.Message):
15 | __slots__ = ["identifier"]
16 | IDENTIFIER_FIELD_NUMBER: _ClassVar[int]
17 | identifier: str
18 | def __init__(self, identifier: _Optional[str] = ...) -> None: ...
19 |
20 | class TeamIdentifier(_message.Message):
21 | __slots__ = ["identifier"]
22 | IDENTIFIER_FIELD_NUMBER: _ClassVar[int]
23 | identifier: str
24 | def __init__(self, identifier: _Optional[str] = ...) -> None: ...
25 |
26 | class UserIdentifier(_message.Message):
27 | __slots__ = ["email", "identifier", "name"]
28 | EMAIL_FIELD_NUMBER: _ClassVar[int]
29 | IDENTIFIER_FIELD_NUMBER: _ClassVar[int]
30 | NAME_FIELD_NUMBER: _ClassVar[int]
31 | email: str
32 | identifier: str
33 | name: str
34 | def __init__(self, identifier: _Optional[str] = ..., email: _Optional[str] = ..., name: _Optional[str] = ...) -> None: ...
35 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE.md:
--------------------------------------------------------------------------------
1 | ## Checklist
2 |
3 | Please review first that the issue is fully related with this SDK by checking the relevant checkboxes (`[x]`).
4 |
5 | - [ ] I have a Modzy API Key active and have the entitlements to perform the desired action.
6 | - [ ] I review that have access to Modzy API host.
7 | - [ ] I think that is a error specific to the SDK.
8 | - [ ] I review the documentation and existing issues in order to not duplicate existing ones.
9 | - [ ] I am willing to follow-up on comments in a timely manner.
10 |
11 | ### Info
12 |
13 | * Modzy SDK version:
14 | * Python version:
15 | * Operating System:
16 |
17 | ### Description
18 |
19 |
20 |
21 | ### Steps to reproduce
22 |
23 |
24 |
25 | ```
26 | Paste the command(s) you ran and the output.
27 | ```
28 |
29 | **Expected results:**
30 |
31 | **Actual results:**
32 |
33 | ### Traceback
34 |
35 |
36 | Logs
37 |
42 |
43 | ```
44 | Paste the logs that you consider useful for diagnostic.
45 | ```
46 |
47 |
--------------------------------------------------------------------------------
/modzy/edge/proto/common/v1/errors_pb2.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # Generated by the protocol buffer compiler. DO NOT EDIT!
3 | # source: protos/modzy/common/v1/errors.proto
4 | """Generated protocol buffer code."""
5 | from google.protobuf.internal import builder as _builder
6 | from google.protobuf import descriptor as _descriptor
7 | from google.protobuf import descriptor_pool as _descriptor_pool
8 | from google.protobuf import symbol_database as _symbol_database
9 | # @@protoc_insertion_point(imports)
10 |
11 | _sym_db = _symbol_database.Default()
12 |
13 |
14 |
15 |
16 | DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n#protos/modzy/common/v1/errors.proto\x12\tcommon.v1\"\x84\x01\n\x05\x45rror\x12\x18\n\x07message\x18\x01 \x01(\tR\x07message\x12(\n\x10report_error_url\x18\x02 \x01(\tR\x0ereportErrorUrl\x12\x16\n\x06status\x18\x03 \x01(\tR\x06status\x12\x1f\n\x0bstatus_code\x18\x04 \x01(\rR\nstatusCodeBa\n#com.modzy.platform.protos.common.v1P\x01Z8github.modzy.engineering/platform/protos/modzy/common/v1b\x06proto3')
17 |
18 | _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
19 | _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'protos.modzy.common.v1.errors_pb2', globals())
20 | if _descriptor._USE_C_DESCRIPTORS == False:
21 |
22 | DESCRIPTOR._options = None
23 | DESCRIPTOR._serialized_options = b'\n#com.modzy.platform.protos.common.v1P\001Z8github.modzy.engineering/platform/protos/modzy/common/v1'
24 | _ERROR._serialized_start=51
25 | _ERROR._serialized_end=183
26 | # @@protoc_insertion_point(module_scope)
27 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """The setup script."""
5 |
6 | from setuptools import find_packages, setup
7 |
8 | with open('README.md') as readme_file:
9 | readme = readme_file.read()
10 |
11 | with open('HISTORY.rst') as history_file:
12 | history = history_file.read()
13 |
14 | requirements = ['requests', 'python-dotenv', 'deprecation', 'protobuf~=4.21.10', 'grpcio', 'google-api-python-client', 'boto3']
15 |
16 | # removed in 0.7.1 test_requirements = ['pytest']
17 |
18 | setup(
19 | author='Modzy',
20 | author_email='support@modzy.com',
21 | classifiers=[
22 | 'Development Status :: 2 - Pre-Alpha',
23 | 'Intended Audience :: Developers',
24 | 'Natural Language :: English',
25 | 'Programming Language :: Python :: 3',
26 | 'Programming Language :: Python :: 3.4',
27 | 'Programming Language :: Python :: 3.5',
28 | 'Programming Language :: Python :: 3.6',
29 | 'Programming Language :: Python :: 3.7'
30 | ],
31 | description="Modzy's Python SDK queries and deploys models, submits inference jobs and returns results directly to your editor.",
32 | python_requires='>=3.7',
33 | install_requires=requirements,
34 | long_description=readme,
35 | long_description_content_type='text/markdown',
36 | include_package_data=True,
37 | keywords='modzy, sdk',
38 | name='modzy-sdk',
39 | packages=find_packages(),
40 | package_data={
41 | "": ["*.pyi"]
42 | },
43 | # removed in 0.7.1 test_suite='tests',
44 | # removed in 0.7.1 tests_require=test_requirements,
45 | url='https://github.com/modzy/sdk-python',
46 | version='0.11.6',
47 | zip_safe=False,
48 | )
49 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Windows OS
2 | $RECYCLE.BIN/
3 | Desktop.ini
4 | Thumbs.db
5 | ehthumbs.db
6 |
7 | # Mac OS
8 | .Spotlight-V100
9 | .AppleDouble
10 | .LSOverride
11 | .DS_Store
12 | .Trashes
13 |
14 | # Byte-compiled / optimized / DLL files
15 | __pycache__/
16 | *.py[cod]
17 | *$py.class
18 |
19 | # C extensions
20 | *.so
21 |
22 | # Distribution / packaging
23 | .Python
24 | env/
25 | build/
26 | develop-eggs/
27 | dist/
28 | downloads/
29 | eggs/
30 | .eggs/
31 | lib/
32 | lib64/
33 | parts/
34 | sdist/
35 | var/
36 | wheels/
37 | *.egg-info/
38 | .installed.cfg
39 | *.egg
40 |
41 | # PyInstaller
42 | # Usually these files are written by a python script from a template
43 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
44 | *.manifest
45 | *.spec
46 |
47 | # Installer logs
48 | pip-log.txt
49 | pip-delete-this-directory.txt
50 |
51 | # Unit test / coverage reports
52 | htmlcov/
53 | .tox/
54 | .coverage
55 | .coverage.*
56 | .cache
57 | nosetests.xml
58 | coverage.xml
59 | *.cover
60 | .hypothesis/
61 | .pytest_cache/
62 |
63 | # Translations
64 | *.mo
65 | *.pot
66 |
67 | # Django stuff:
68 | *.log
69 | local_settings.py
70 |
71 | # Flask stuff:
72 | instance/
73 | .webassets-cache
74 |
75 | # Scrapy stuff:
76 | .scrapy
77 |
78 | # Sphinx documentation
79 | docs/_build/
80 |
81 | # PyBuilder
82 | target/
83 |
84 | # Jupyter Notebook
85 | .ipynb_checkpoints
86 |
87 | # files
88 | deploy-test.py
89 | new-job-routes-test.py
90 |
91 | # pyenv
92 | .python-version
93 |
94 | # celery beat schedule file
95 | celerybeat-schedule
96 |
97 | # SageMath parsed files
98 | *.sage.py
99 |
100 | # dotenv
101 | .env
102 |
103 | # virtualenv
104 | .venv
105 | venv/
106 | ENV/
107 |
108 | # Spyder project settings
109 | .spyderproject
110 | .spyproject
111 |
112 | # Rope project settings
113 | .ropeproject
114 |
115 | # mkdocs documentation
116 | /site
117 |
118 | # mypy
119 | .mypy_cache/
120 |
121 | # IntelliJ
122 | .idea
123 |
124 | # Visual Studio Code
125 | .vscode
126 |
--------------------------------------------------------------------------------
/modzy/_api_object.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | import json
4 | import re
5 | from keyword import iskeyword
6 |
7 |
8 | def to_snake_case(name):
9 | return re.sub('((?<=[a-z0-9])[A-Z]|(?!^)(? None: ...
19 | class SortEntry(_message.Message):
20 | __slots__ = ["key", "value"]
21 | KEY_FIELD_NUMBER: _ClassVar[int]
22 | VALUE_FIELD_NUMBER: _ClassVar[int]
23 | key: str
24 | value: str
25 | def __init__(self, key: _Optional[str] = ..., value: _Optional[str] = ...) -> None: ...
26 | FILTERS_FIELD_NUMBER: _ClassVar[int]
27 | PAGE_FIELD_NUMBER: _ClassVar[int]
28 | SORT_FIELD_NUMBER: _ClassVar[int]
29 | filters: _containers.ScalarMap[str, str]
30 | page: Pagination
31 | sort: _containers.ScalarMap[str, str]
32 | def __init__(self, filters: _Optional[_Mapping[str, str]] = ..., sort: _Optional[_Mapping[str, str]] = ..., page: _Optional[_Union[Pagination, _Mapping]] = ...) -> None: ...
33 |
34 | class ModelAutoscalingConfig(_message.Message):
35 | __slots__ = ["maximum", "minimum", "model", "model_library_id"]
36 | MAXIMUM_FIELD_NUMBER: _ClassVar[int]
37 | MINIMUM_FIELD_NUMBER: _ClassVar[int]
38 | MODEL_FIELD_NUMBER: _ClassVar[int]
39 | MODEL_LIBRARY_ID_FIELD_NUMBER: _ClassVar[int]
40 | maximum: int
41 | minimum: int
42 | model: ModelIdentifier
43 | model_library_id: str
44 | def __init__(self, model: _Optional[_Union[ModelIdentifier, _Mapping]] = ..., minimum: _Optional[int] = ..., maximum: _Optional[int] = ..., model_library_id: _Optional[str] = ...) -> None: ...
45 |
46 | class ModelIdentifier(_message.Message):
47 | __slots__ = ["identifier", "name", "version"]
48 | IDENTIFIER_FIELD_NUMBER: _ClassVar[int]
49 | NAME_FIELD_NUMBER: _ClassVar[int]
50 | VERSION_FIELD_NUMBER: _ClassVar[int]
51 | identifier: str
52 | name: str
53 | version: str
54 | def __init__(self, identifier: _Optional[str] = ..., version: _Optional[str] = ..., name: _Optional[str] = ...) -> None: ...
55 |
56 | class ModelLibrary(_message.Message):
57 | __slots__ = ["ca_certificate", "identifier", "name", "registry", "url"]
58 | CA_CERTIFICATE_FIELD_NUMBER: _ClassVar[int]
59 | IDENTIFIER_FIELD_NUMBER: _ClassVar[int]
60 | NAME_FIELD_NUMBER: _ClassVar[int]
61 | REGISTRY_FIELD_NUMBER: _ClassVar[int]
62 | URL_FIELD_NUMBER: _ClassVar[int]
63 | ca_certificate: str
64 | identifier: str
65 | name: str
66 | registry: Registry
67 | url: str
68 | def __init__(self, identifier: _Optional[str] = ..., url: _Optional[str] = ..., name: _Optional[str] = ..., ca_certificate: _Optional[str] = ..., registry: _Optional[_Union[Registry, _Mapping]] = ...) -> None: ...
69 |
70 | class Page(_message.Message):
71 | __slots__ = ["number", "page_count", "size", "total_elements"]
72 | NUMBER_FIELD_NUMBER: _ClassVar[int]
73 | PAGE_COUNT_FIELD_NUMBER: _ClassVar[int]
74 | SIZE_FIELD_NUMBER: _ClassVar[int]
75 | TOTAL_ELEMENTS_FIELD_NUMBER: _ClassVar[int]
76 | number: int
77 | page_count: int
78 | size: int
79 | total_elements: int
80 | def __init__(self, number: _Optional[int] = ..., size: _Optional[int] = ..., page_count: _Optional[int] = ..., total_elements: _Optional[int] = ...) -> None: ...
81 |
82 | class Pagination(_message.Message):
83 | __slots__ = ["number", "size"]
84 | NUMBER_FIELD_NUMBER: _ClassVar[int]
85 | SIZE_FIELD_NUMBER: _ClassVar[int]
86 | number: int
87 | size: int
88 | def __init__(self, number: _Optional[int] = ..., size: _Optional[int] = ...) -> None: ...
89 |
90 | class Registry(_message.Message):
91 | __slots__ = ["credentials", "host", "port", "skip_tls_verify"]
92 | CREDENTIALS_FIELD_NUMBER: _ClassVar[int]
93 | HOST_FIELD_NUMBER: _ClassVar[int]
94 | PORT_FIELD_NUMBER: _ClassVar[int]
95 | SKIP_TLS_VERIFY_FIELD_NUMBER: _ClassVar[int]
96 | credentials: str
97 | host: str
98 | port: int
99 | skip_tls_verify: bool
100 | def __init__(self, host: _Optional[str] = ..., port: _Optional[int] = ..., credentials: _Optional[str] = ..., skip_tls_verify: bool = ...) -> None: ...
101 |
--------------------------------------------------------------------------------
/modzy/edge/proto/common/v1/common_pb2.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # Generated by the protocol buffer compiler. DO NOT EDIT!
3 | # source: protos/modzy/common/v1/common.proto
4 | """Generated protocol buffer code."""
5 | from google.protobuf.internal import builder as _builder
6 | from google.protobuf import descriptor as _descriptor
7 | from google.protobuf import descriptor_pool as _descriptor_pool
8 | from google.protobuf import symbol_database as _symbol_database
9 | # @@protoc_insertion_point(imports)
10 |
11 | _sym_db = _symbol_database.Default()
12 |
13 |
14 | from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2
15 | from ...protoc_gen_openapiv2.options import annotations_pb2 as protoc__gen__openapiv2_dot_options_dot_annotations__pb2
16 |
17 |
18 | DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n#protos/modzy/common/v1/common.proto\x12\tcommon.v1\x1a\x1fgoogle/api/field_behavior.proto\x1a.protoc-gen-openapiv2/options/annotations.proto\"\xa4\x01\n\x0fModelIdentifier\x12\x34\n\nidentifier\x18\x01 \x01(\tB\x14\xe0\x41\x02\x92\x41\x0eJ\x0c\"ed542963de\"R\nidentifier\x12)\n\x07version\x18\x02 \x01(\tB\x0f\xe0\x41\x02\x92\x41\tJ\x07\"1.0.1\"R\x07version\x12\x30\n\x04name\x18\x03 \x01(\tB\x1c\xe0\x41\x03\x92\x41\x16J\x14\"Sentiment Analysis\"R\x04name\"\xac\x01\n\x0cModelLibrary\x12\x1e\n\nidentifier\x18\x05 \x01(\tR\nidentifier\x12\x10\n\x03url\x18\x01 \x01(\tR\x03url\x12\x12\n\x04name\x18\x02 \x01(\tR\x04name\x12%\n\x0e\x63\x61_certificate\x18\x03 \x01(\tR\rcaCertificate\x12/\n\x08registry\x18\x04 \x01(\x0b\x32\x13.common.v1.RegistryR\x08registry\"\xa8\x01\n\x16ModelAutoscalingConfig\x12\x30\n\x05model\x18\x01 \x01(\x0b\x32\x1a.common.v1.ModelIdentifierR\x05model\x12\x18\n\x07minimum\x18\x02 \x01(\rR\x07minimum\x12\x18\n\x07maximum\x18\x03 \x01(\rR\x07maximum\x12(\n\x10model_library_id\x18\x04 \x01(\tR\x0emodelLibraryId\"|\n\x08Registry\x12\x12\n\x04host\x18\x01 \x01(\tR\x04host\x12\x12\n\x04port\x18\x02 \x01(\rR\x04port\x12 \n\x0b\x63redentials\x18\x03 \x01(\tR\x0b\x63redentials\x12&\n\x0fskip_tls_verify\x18\x05 \x01(\x08R\rskipTlsVerify\"\xa2\x02\n\x0bListOptions\x12=\n\x07\x66ilters\x18\x01 \x03(\x0b\x32#.common.v1.ListOptions.FiltersEntryR\x07\x66ilters\x12\x34\n\x04sort\x18\x02 \x03(\x0b\x32 .common.v1.ListOptions.SortEntryR\x04sort\x12)\n\x04page\x18\x03 \x01(\x0b\x32\x15.common.v1.PaginationR\x04page\x1a:\n\x0c\x46iltersEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01\x1a\x37\n\tSortEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01\"8\n\nPagination\x12\x16\n\x06number\x18\x01 \x01(\rR\x06number\x12\x12\n\x04size\x18\x02 \x01(\rR\x04size\"\x82\x01\n\x04Page\x12\x16\n\x06number\x18\x01 \x01(\rR\x06number\x12\x12\n\x04size\x18\x02 \x01(\rR\x04size\x12\"\n\npage_count\x18\x03 \x01(\rB\x03\xe0\x41\x03R\tpageCount\x12*\n\x0etotal_elements\x18\x04 \x01(\rB\x03\xe0\x41\x03R\rtotalElementsBa\n#com.modzy.platform.protos.common.v1P\x01Z8github.modzy.engineering/platform/protos/modzy/common/v1b\x06proto3')
19 |
20 | _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
21 | _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'protos.modzy.common.v1.common_pb2', globals())
22 | if _descriptor._USE_C_DESCRIPTORS == False:
23 |
24 | DESCRIPTOR._options = None
25 | DESCRIPTOR._serialized_options = b'\n#com.modzy.platform.protos.common.v1P\001Z8github.modzy.engineering/platform/protos/modzy/common/v1'
26 | _MODELIDENTIFIER.fields_by_name['identifier']._options = None
27 | _MODELIDENTIFIER.fields_by_name['identifier']._serialized_options = b'\340A\002\222A\016J\014\"ed542963de\"'
28 | _MODELIDENTIFIER.fields_by_name['version']._options = None
29 | _MODELIDENTIFIER.fields_by_name['version']._serialized_options = b'\340A\002\222A\tJ\007\"1.0.1\"'
30 | _MODELIDENTIFIER.fields_by_name['name']._options = None
31 | _MODELIDENTIFIER.fields_by_name['name']._serialized_options = b'\340A\003\222A\026J\024\"Sentiment Analysis\"'
32 | _LISTOPTIONS_FILTERSENTRY._options = None
33 | _LISTOPTIONS_FILTERSENTRY._serialized_options = b'8\001'
34 | _LISTOPTIONS_SORTENTRY._options = None
35 | _LISTOPTIONS_SORTENTRY._serialized_options = b'8\001'
36 | _PAGE.fields_by_name['page_count']._options = None
37 | _PAGE.fields_by_name['page_count']._serialized_options = b'\340A\003'
38 | _PAGE.fields_by_name['total_elements']._options = None
39 | _PAGE.fields_by_name['total_elements']._serialized_options = b'\340A\003'
40 | _MODELIDENTIFIER._serialized_start=132
41 | _MODELIDENTIFIER._serialized_end=296
42 | _MODELLIBRARY._serialized_start=299
43 | _MODELLIBRARY._serialized_end=471
44 | _MODELAUTOSCALINGCONFIG._serialized_start=474
45 | _MODELAUTOSCALINGCONFIG._serialized_end=642
46 | _REGISTRY._serialized_start=644
47 | _REGISTRY._serialized_end=768
48 | _LISTOPTIONS._serialized_start=771
49 | _LISTOPTIONS._serialized_end=1061
50 | _LISTOPTIONS_FILTERSENTRY._serialized_start=946
51 | _LISTOPTIONS_FILTERSENTRY._serialized_end=1004
52 | _LISTOPTIONS_SORTENTRY._serialized_start=1006
53 | _LISTOPTIONS_SORTENTRY._serialized_end=1061
54 | _PAGINATION._serialized_start=1063
55 | _PAGINATION._serialized_end=1119
56 | _PAGE._serialized_start=1122
57 | _PAGE._serialized_end=1252
58 | # @@protoc_insertion_point(module_scope)
59 |
--------------------------------------------------------------------------------
/tests/test_models.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """Test for Models & Model classes."""
5 |
6 | import os
7 | import dotenv
8 | import pytest
9 | from modzy import ApiClient, error
10 | import logging
11 |
12 | dotenv.load_dotenv()
13 |
14 | BASE_URL = os.getenv('MODZY_BASE_URL')
15 | API_KEY = os.getenv('MODZY_API_KEY')
16 |
17 |
18 | MODEL_ID = 'ed542963de' # sentiment-analysis
19 |
20 |
21 | @pytest.fixture()
22 | def client():
23 | return ApiClient(base_url=BASE_URL, api_key=API_KEY)
24 |
25 |
26 | @pytest.fixture()
27 | def logger():
28 | return logging.getLogger(__name__)
29 |
30 |
31 | def test_get_all_model_objects(client, logger):
32 | models = client.models.get_all()
33 | logger.debug("models: %s", len(models))
34 | for model in models:
35 | logger.debug("model: %s", model)
36 | logger.debug("model keys: %s", model.keys())
37 | assert model.modelId
38 | logger.debug("latestVersion: %s", model.latestVersion)
39 | assert "latestVersion" in model.keys()
40 | logger.debug("versions: %s", model.versions)
41 | assert hasattr(model, 'versions')
42 |
43 | assert len(models) # just going to assume there should be some models
44 |
45 |
46 | def test_get_single_model(client, logger):
47 | model = client.models.get(MODEL_ID) # by id
48 | logger.debug("model_modelId: %s", model.modelId)
49 | assert model.modelId
50 | logger.debug("model_latestVersion: %s", model.latestVersion)
51 | assert model.latestVersion
52 | logger.debug("model_versions: %s", model.versions)
53 | assert len(model.versions) >= 0
54 |
55 | model_copy = client.models.get(model) # from object
56 | assert model.modelId == model_copy.modelId
57 |
58 | def test_get_model_by_name(client, logger):
59 | model = client.models.get_by_name("Military Equipment Classification") # by name
60 | logger.debug("model_modelId: %s", model.modelId)
61 | assert model.modelId
62 | logger.debug("model_latestVersion: %s", model.latestVersion)
63 | assert model.latestVersion
64 | logger.debug("model_versions: %s", model.versions)
65 | assert len(model.versions) >= 0
66 |
67 |
68 | def test_get_single_model_invalid(client, logger):
69 | api_error = None
70 | try:
71 | client.models.get('notamodelidentifier')
72 | except error.NotFoundError as ae:
73 | api_error = ae
74 | assert api_error.message
75 | assert api_error.url
76 | assert api_error.response.status_code == 404
77 |
78 |
79 | def test_get_related_models(client, logger):
80 | models = client.models.get_related(MODEL_ID)
81 | logger.debug("models related to sentiment-analysis: %s", len(models))
82 | for model in models:
83 | logger.debug("model: %s", model)
84 | assert model.modelId
85 | assert len(models) # just going to assume there should be some models
86 |
87 |
88 | def test_model_sync(client, logger):
89 | model = client.models.get(MODEL_ID) # by id
90 | logger.debug("models sentiment-analysis: %s", model)
91 | original_latestVersion = model.latestVersion
92 | model.latestVersion = 'Not.The.Latest'
93 | model.sync()
94 | logger.debug("models sentiment-analysis sync: %s", model)
95 | assert model.latestVersion == original_latestVersion
96 |
97 |
98 | def test_get_model_versions(client, logger):
99 | versions = client.models.get_versions(MODEL_ID)
100 | logger.debug("versions related to sentiment-analysis: %s", len(versions))
101 | for version in versions:
102 | logger.debug("version: %s", version)
103 | assert version.version
104 | assert len(version) # just going to assume there should be some versions
105 |
106 |
107 | def test_get_model_version(client, logger):
108 | version = client.models.get_version(MODEL_ID, '0.0.27')
109 | logger.debug("version: %s", version)
110 | assert version.version
111 |
112 |
113 | def test_get_model_version_input_sample(client, logger):
114 | input_sample = client.models.get_version_input_sample(MODEL_ID, '0.0.27')
115 | logger.debug("version: %s", input_sample)
116 | assert input_sample
117 |
118 |
119 | def test_get_model_version_output_sample(client, logger):
120 | output_sample = client.models.get_version_output_sample(MODEL_ID, '0.0.27')
121 | logger.debug("version: %s", output_sample)
122 | assert output_sample
123 |
124 |
125 | def test_get_model_processing_details(client):
126 | client.models.get_model_processing_details(MODEL_ID, "0.0.27")
127 |
128 |
129 | def test_get_minimum_engines(client):
130 | client.models.get_minimum_engines()
131 |
132 |
133 | def test_update_processing_state(client):
134 | client.models.update_processing_engines(MODEL_ID, "0.0.27", 0, 1)
135 |
136 |
137 | def test_update_processing_state_wait(client):
138 | client.models.update_processing_engines(MODEL_ID, "0.0.27", 2, 2, None)
139 |
140 |
141 | @pytest.mark.parametrize("engine_min_max", [(100, 200), (1, 0)])
142 | def test_processing_state_errors(client, engine_min_max):
143 | min_engines, max_engines = engine_min_max
144 | with pytest.raises(ValueError):
145 | client.models.update_processing_engines(MODEL_ID, "0.0.27", min_engines, max_engines)
146 |
--------------------------------------------------------------------------------
/modzy/error.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """Package specific exceptions."""
3 |
4 |
5 | class Error(Exception):
6 | """Base class for all Modzy exceptions.
7 |
8 | Attributes:
9 | message (str): Human readable error description.
10 | """
11 |
12 | def __init__(self, message):
13 | """Creates an `Error` instance.
14 |
15 | Args:
16 | message (str): Human readable error description.
17 | """
18 | super().__init__(message)
19 | self.message = str(message)
20 |
21 |
22 | class ApiError(Error):
23 | """Base class for errors related to communication with the API.
24 |
25 | Attributes:
26 | message (str): Human readable error description.
27 | url (str): The API URL.
28 | reason (Optional[Exception]): The source exception. May be None.
29 | """
30 |
31 | def __init__(self, message, url, reason=None):
32 | """Creates an `ApiError` instance.
33 |
34 | Args:
35 | message (str): Human readable error description.
36 | url (str): The API URL.
37 | reason (Optional[Exception]): The source exception. Defaults to None.
38 | """
39 | super().__init__(message)
40 | self.url = url
41 | self.reason = reason
42 |
43 |
44 | class NetworkError(ApiError):
45 | """Error connecting to the API over the network."""
46 |
47 |
48 | class ResponseError(ApiError):
49 | """Base class for HTTP error responses.
50 |
51 | Attributes:
52 | message (str): Human readable error description.
53 | url (str): The API URL.
54 | response (requests.Response): The requests `Response` object.
55 | """
56 |
57 | def __init__(self, message, url, response):
58 | """Creates a `ResponseError` instance.
59 |
60 | Args:
61 | message (str): Human readable error description.
62 | url (str): The API URL.
63 | response (requests.Response): The requests `Response` object.
64 | """
65 | super().__init__(message, url)
66 | self.response = response
67 |
68 |
69 | class ClientError(ResponseError): # 4xx
70 | """Base class for all HTTP 4xx Client Errors."""
71 |
72 |
73 | class BadRequestError(ClientError): # 400
74 | """HTTP 400 Bad Request Error.
75 |
76 | Raised if the client sends something that the API cannot or will not handle.
77 | """
78 |
79 |
80 | class UnauthorizedError(ClientError): # 401
81 | """HTTP 401 Unauthorized Error.
82 |
83 | Raised if the access key is not authorized to access the API.
84 | """
85 |
86 |
87 | class ForbiddenError(ClientError): # 403
88 | """HTTP 403 Forbidden Error.
89 |
90 | Raised if the access key doesn't have the permission for the requested action
91 | but was authenticated.
92 | """
93 |
94 |
95 | class NotFoundError(ClientError): # 404
96 | """HTTP 404 Not Found Error.
97 |
98 | Raised if a resource does not exist.
99 | """
100 |
101 |
102 | class MethodNotAllowedError(ClientError): # 405
103 | """HTTP 405 Method Not Allowed Error.
104 |
105 | Raised if the client used a method the API does not handle. For
106 | example `POST` if the resource is view only.
107 | """
108 |
109 |
110 | class NotAcceptableError(ClientError): # 406
111 | """HTTP 406 Not Acceptable Error.
112 |
113 | Raised if the API can't return any content conforming to the
114 | `Accept` headers of the client.
115 | """
116 |
117 |
118 | class ConflictError(ClientError): # 409
119 | """HTTP 409 Conflict Error.
120 |
121 | Raised to signal that a request cannot be completed because it conflicts
122 | with the current API state.
123 | """
124 |
125 |
126 | class RequestEntityTooLargeError(ClientError): # 413
127 | """HTTP 413 Request Entity Too Large Error.
128 |
129 | Raised if the data submitted exceeded the limit.
130 | """
131 |
132 |
133 | class UnprocessableEntityError(ClientError): # 422
134 | """HTTP 422 Unprocessable Entity Error.
135 |
136 | Raised if the API was able to understand the request but was unable
137 | to process the request.
138 | """
139 |
140 |
141 | class ServerError(ResponseError): # 5xx
142 | """Base class for all HTTP 5xx Client Errors."""
143 |
144 |
145 | class InternalServerError(ServerError): # 500
146 | """HTTP 500 Internal Server Error."""
147 |
148 |
149 | _response_error_classes = {
150 | 400: BadRequestError,
151 | 401: UnauthorizedError,
152 | 403: ForbiddenError,
153 | 404: NotFoundError,
154 | 405: MethodNotAllowedError,
155 | 406: NotAcceptableError,
156 | 409: ConflictError,
157 | 413: RequestEntityTooLargeError,
158 | 422: UnprocessableEntityError,
159 | 500: InternalServerError,
160 | }
161 |
162 |
163 | def _create_response_error(message, url, response):
164 | code = response.status_code
165 | error_class = _response_error_classes.get(code)
166 | if error_class:
167 | return error_class(message, url, response)
168 | if 400 <= code < 500:
169 | return ClientError(message, url, response)
170 | if 500 <= code < 600:
171 | return ServerError(message, url, response)
172 | return ResponseError(message, url, response) # ?
173 |
174 |
175 | class ResultsError(Error): # name?
176 | """Model run failed for a given input source."""
177 |
178 |
179 | class Timeout(Error):
180 | """A blocking function timed out."""
181 |
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 |
2 | # Contributor Covenant Code of Conduct
3 |
4 | ## Our Pledge
5 |
6 | We as members, contributors, and leaders pledge to make participation in our
7 | community a harassment-free experience for everyone, regardless of age, body
8 | size, visible or invisible disability, ethnicity, sex characteristics, gender
9 | identity and expression, level of experience, education, socio-economic status,
10 | nationality, personal appearance, race, religion, or sexual identity
11 | and orientation.
12 |
13 | We pledge to act and interact in ways that contribute to an open, welcoming,
14 | diverse, inclusive, and healthy community.
15 |
16 | ## Our Standards
17 |
18 | Examples of behavior that contributes to a positive environment for our
19 | community include:
20 |
21 | * Demonstrating empathy and kindness toward other people
22 | * Being respectful of differing opinions, viewpoints, and experiences
23 | * Giving and gracefully accepting constructive feedback
24 | * Accepting responsibility and apologizing to those affected by our mistakes,
25 | and learning from the experience
26 | * Focusing on what is best not just for us as individuals, but for the
27 | overall community
28 |
29 | Examples of unacceptable behavior include:
30 |
31 | * The use of sexualized language or imagery, and sexual attention or
32 | advances of any kind
33 | * Trolling, insulting or derogatory comments, and personal or political attacks
34 | * Public or private harassment
35 | * Publishing others' private information, such as a physical or email
36 | address, without their explicit permission
37 | * Other conduct which could reasonably be considered inappropriate in a
38 | professional setting
39 |
40 | ## Enforcement Responsibilities
41 |
42 | Community leaders are responsible for clarifying and enforcing our standards of
43 | acceptable behavior and will take appropriate and fair corrective action in
44 | response to any behavior that they deem inappropriate, threatening, offensive,
45 | or harmful.
46 |
47 | Community leaders have the right and responsibility to remove, edit, or reject
48 | comments, commits, code, wiki edits, issues, and other contributions that are
49 | not aligned to this Code of Conduct, and will communicate reasons for moderation
50 | decisions when appropriate.
51 |
52 | ## Scope
53 |
54 | This Code of Conduct applies within all community spaces, and also applies when
55 | an individual is officially representing the community in public spaces.
56 | Examples of representing our community include using an official e-mail address,
57 | posting via an official social media account, or acting as an appointed
58 | representative at an online or offline event.
59 |
60 | ## Enforcement
61 |
62 | Instances of abusive, harassing, or otherwise unacceptable behavior may be
63 | reported to the community leaders responsible for enforcement at
64 | opensource@modzy.com.
65 | All complaints will be reviewed and investigated promptly and fairly.
66 |
67 | All community leaders are obligated to respect the privacy and security of the
68 | reporter of any incident.
69 |
70 | ## Enforcement Guidelines
71 |
72 | Community leaders will follow these Community Impact Guidelines in determining
73 | the consequences for any action they deem in violation of this Code of Conduct:
74 |
75 | ### 1. Correction
76 |
77 | **Community Impact**: Use of inappropriate language or other behavior deemed
78 | unprofessional or unwelcome in the community.
79 |
80 | **Consequence**: A private, written warning from community leaders, providing
81 | clarity around the nature of the violation and an explanation of why the
82 | behavior was inappropriate. A public apology may be requested.
83 |
84 | ### 2. Warning
85 |
86 | **Community Impact**: A violation through a single incident or series
87 | of actions.
88 |
89 | **Consequence**: A warning with consequences for continued behavior. No
90 | interaction with the people involved, including unsolicited interaction with
91 | those enforcing the Code of Conduct, for a specified period of time. This
92 | includes avoiding interactions in community spaces as well as external channels
93 | like social media. Violating these terms may lead to a temporary or
94 | permanent ban.
95 |
96 | ### 3. Temporary Ban
97 |
98 | **Community Impact**: A serious violation of community standards, including
99 | sustained inappropriate behavior.
100 |
101 | **Consequence**: A temporary ban from any sort of interaction or public
102 | communication with the community for a specified period of time. No public or
103 | private interaction with the people involved, including unsolicited interaction
104 | with those enforcing the Code of Conduct, is allowed during this period.
105 | Violating these terms may lead to a permanent ban.
106 |
107 | ### 4. Permanent Ban
108 |
109 | **Community Impact**: Demonstrating a pattern of violation of community
110 | standards, including sustained inappropriate behavior, harassment of an
111 | individual, or aggression toward or disparagement of classes of individuals.
112 |
113 | **Consequence**: A permanent ban from any sort of public interaction within
114 | the community.
115 |
116 | ## Attribution
117 |
118 | This Code of Conduct is adapted from the [Contributor Covenant][homepage],
119 | version 2.0, available at
120 | https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
121 |
122 | Community Impact Guidelines were inspired by [Mozilla's code of conduct
123 | enforcement ladder](https://github.com/mozilla/diversity).
124 |
125 | [homepage]: https://www.contributor-covenant.org
126 |
127 | For answers to common questions about this code of conduct, see the FAQ at
128 | https://www.contributor-covenant.org/faq. Translations are available at
129 | https://www.contributor-covenant.org/translations.
130 |
131 |
--------------------------------------------------------------------------------
/CONTRIBUTING.adoc:
--------------------------------------------------------------------------------
1 | :doctype: article
2 | :icons: font
3 | :source-highlighter: highlightjs
4 | :docname: Javascript SDK
5 |
6 |
7 |
8 |
9 |
10 | ++++
11 |
12 |
13 |
14 |
Contributing to Modzy's Python SDK
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |

24 |
25 |

26 |
27 |

28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
47 |
48 |
49 |
50 |
51 |
52 |
53 | ++++
54 |
55 | Contributions are welcome and they are greatly appreciated! Every little bit helps, and credit will always be given.
56 |
57 | Below you'll find our contributing requirements, a step-by-step guideline, and our features roadmap.
58 |
59 |
60 | == Requirements
61 |
62 | - Search previous link:https://github.com/modzy/sdk-python/issues[issues] before making new ones to avoid duplicates.
63 | - If you are reporting a bug, please include:
64 | . Your operating system name and version.
65 | . Any details about your local setup that might be helpful in troubleshooting.
66 | . Detailed steps to reproduce the bug.
67 | - If you are proposing a feature:
68 | . Explain in detail how it would work.
69 | . Keep the scope as narrow as possible, to make it easier to implement.
70 | . Remember that this is a volunteer-driven project, and that contributions are welcome.
71 | - File an issue to let us know what you're working on.
72 | - Fork the repo, develop and test your code changes, and add supporting documentation.
73 | - Use descriptive comments throughout your code.
74 | - Add test cases and comment them.
75 | //- Check your spelling and grammar.
76 | - Use descriptive commit messages that clearly explain the changes. Reference the original issue in the pull request.
77 | - Make an individual pull request for each issue.
78 |
79 |
80 |
81 | == Guidelines
82 |
83 |
84 | === 1. Fork the repo and set it for local development
85 |
86 | Clone the repository:
87 |
88 | - `$ git clone https://github.com/modzy/sdk-python.git`
89 |
90 | Setup a virtual environment from the local git directory:
91 |
92 | - `$ conda create --name VIRTUAL_ENVIRON_NAME --file requirements_dev.txt -c conda-forge python=3.9`
93 | or for non-conda python distros:
94 | - `$ python3 -m venv /path/to/VIRTUAL_ENVIRON_NAME`
95 |
96 | Activate the virtual environment:
97 |
98 | - `$ conda activate VIRTUAL_ENVIRON_NAME`
99 |
100 | or for non-conda python distros there are different commands for Linux vs. Windows.
101 |
102 | On Linux use source to activate
103 |
104 | - `$ source /path/to/VIRTUAL_ENVIRON_NAME/bin/activate`
105 |
106 | On Windows run the activate.bat file
107 |
108 | - `C:>\path\to\VIRTUAL_ENVIRON_NAME\Scripts\activate.bat`
109 |
110 |
111 | Install dependencies (if not using conda):
112 |
113 | - `$ pip3 install -r requirements_dev.txt`
114 |
115 |
116 | Create a branch for your awesome new feature:
117 |
118 | - `$ git checkout -b my-awesome-new-feature`
119 |
120 |
121 | === 2. Develop your changes
122 |
123 | Fix that bug or build your feature.
124 |
125 | === 3. Run unit tests
126 |
127 | Configure the environment variables to easily run all the tests with a single configuration point.
128 | Note that the environment variables must be set before opening the terminal or program that you will be running your tests from. Environmental variables do not update on the fly.
129 |
130 | You can:
131 |
132 | ==== Set environment variables in bash
133 | There are 2 environmental variables that the test scripts will need.
134 | MODZY_BASE_URL: This variable holds the network address of the modzy installation. Notice the '/api' appeanded to the end of the url. This is mandatory and will cause errors if not present.
135 | MODZY_API_KEY: This variable holds a full copy of a modzy API key. Modzy's role based access controls will cause some of the tests to fail if your key doesn't have all the roles assigned.
136 | ===== Windows
137 |
138 | [source,bash]
139 | ----
140 | set MODZY_BASE_URL=https://modzy.example.com/api
141 | set MODZY_API_KEY=
142 | ----
143 |
144 | ===== Unix
145 |
146 | [source,bash]
147 | ----
148 | export MODZY_BASE_URL=https://modzy.example.com/api
149 | export MODZY_API_KEY=
150 | ----
151 |
152 | ==== Use `.env` file
153 |
154 | Create a `.env` file in the root folder with your API key:
155 |
156 | [source,python]
157 | ----
158 | MODZY_BASE_URL=https://modzy.example.com/api
159 | MODZY_API_KEY=
160 | ----
161 |
162 | Run tests:
163 |
164 | - `$ py.test`
165 |
166 | Or specify the test that you want to run:
167 |
168 | - `$ py.test tests.test_client`
169 |
170 | === 4. Document your changes
171 |
172 | Add supporting documentation for your code.
173 |
174 | //what else would be useful for maintainers?
175 |
176 | === 5. Send a pull request
177 |
178 | Add and commit your changes:
179 |
180 | - `git add .`
181 |
182 | - `$ git commit "A descriptive message"`
183 |
184 | Push your branch to GitHub:
185 |
186 | - `$ git push origin my-new-awesome-feature`
187 |
188 | Initiate a Pull Request:
189 |
190 | If your PR doesn't pass all tests due to role based access controls with your key, please provide log information so that we may test the PR under appropriate conditions.
191 |
192 |
193 |
194 |
195 | //== Code of conduct
196 |
197 |
198 |
199 | == Roadmap
200 |
201 | - Documentation improvement.
202 | - Comprehensive unit tests.
203 | - Wider API coverage (custom models, accounting, audit, etc).
204 | - [Maybe] Add retry logic for possibly transient issues.
205 | - [Maybe] Consider moving to concrete classes for the API JSON - objects, or else move the ApiObject to a public module.
206 | - [Maybe] Python 2.7 compatibility.
207 |
208 |
209 |
210 |
211 | == Support
212 |
213 | Use GitHub to report bugs and send feature requests. +
214 | Reach out to https://www.modzy.com/support/ for support requests.
215 |
--------------------------------------------------------------------------------
/tests/test_jobs.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | import logging
5 | import os
6 | import dotenv
7 | import pytest
8 | import time
9 | from datetime import datetime, timedelta
10 | from modzy import ApiClient, error
11 | from modzy.jobs import Jobs
12 |
13 | dotenv.load_dotenv()
14 |
15 | BASE_URL = os.getenv('MODZY_BASE_URL')
16 | API_KEY = os.getenv('MODZY_API_KEY')
17 |
18 | MODEL_ID = 'ed542963de' # sentiment-analysis
19 |
20 |
21 | @pytest.fixture()
22 | def client():
23 | return ApiClient(base_url=BASE_URL, api_key=API_KEY)
24 |
25 |
26 | @pytest.fixture()
27 | def logger():
28 | return logging.getLogger(__name__)
29 |
30 |
31 | def test_get_job_history(client, logger):
32 | jobs = client.jobs.get_history()
33 | logger.debug("jobs history: %d", len(jobs))
34 | for job in jobs:
35 | logger.debug("job: %s", job)
36 | assert job.job_identifier
37 | assert job.submitted_by
38 | assert job.model
39 | assert job.model.identifier
40 | assert job.model.version
41 | assert job.model.name
42 | assert job.status
43 | assert len(jobs)
44 |
45 |
46 | def test_get_job_history_by_user(client, logger):
47 | params = {'user': 'a'}
48 | jobs = client.jobs.get_history(**params)
49 | logger.debug("jobs history: by %s %d", params, len(jobs))
50 | for job in jobs:
51 | assert job.job_identifier
52 | assert job.submitted_by
53 | assert job.model
54 | assert job.model.identifier
55 | assert job.model.version
56 | assert job.model.name
57 | assert job.status
58 | assert len(jobs)
59 |
60 |
61 | def test_get_job_history_by_access_key(client, logger):
62 | params = {'access_key': API_KEY.split('.')[0]}
63 | jobs = client.jobs.get_history(**params)
64 | logger.debug("jobs history: by %s %d", params, len(jobs))
65 | for job in jobs:
66 | assert job.job_identifier
67 | assert job.submitted_by
68 | assert job.model
69 | assert job.model.identifier
70 | assert job.model.version
71 | assert job.model.name
72 | assert job.status
73 | assert len(jobs)
74 |
75 |
76 | def test_get_job_history_by_date(client, logger):
77 | # by start date
78 | params = {'start_date': datetime.now() - timedelta(weeks=1)}
79 | jobs = client.jobs.get_history(**params)
80 | logger.debug("jobs history: by %s %d", params, len(jobs))
81 | assert len(jobs)
82 | # by end date (Should return a 400)
83 | params = {'end_date': datetime.now()}
84 | api_error = None
85 | try:
86 | client.jobs.get_history(**params)
87 | except error.ApiError as ae:
88 | logger.debug("jobs history: by %s %s", params, ae)
89 | api_error = ae
90 | assert api_error
91 | # by start and end date
92 | params = {'start_date': datetime(2019, 1, 1), 'end_date': datetime.now()}
93 | jobs = client.jobs.get_history(**params)
94 | logger.debug("jobs history: by %s %d", params, len(jobs))
95 | assert len(jobs)
96 | # by invalid start date
97 | params = {'start_date': datetime.now() + timedelta(days=1)}
98 | api_error = None
99 | try:
100 | client.jobs.get_history(**params)
101 | except error.ApiError as ae:
102 | logger.debug("jobs history: by %s %s", params, ae)
103 | api_error = ae
104 | assert api_error
105 |
106 |
107 | def test_get_job_history_by_model(client, logger):
108 | # by model
109 | params = {'model': 'Sentiment Analysis'}
110 | jobs = client.jobs.get_history(**params)
111 | logger.debug("jobs history: by %s %d", params, len(jobs))
112 | logger.debug("jobs history: by %s %d", params, len(jobs))
113 | assert len(jobs)
114 |
115 | def test_get_job_history_by_status(client, logger):
116 | # by all
117 | params = {'status': "all"}
118 | jobs = client.jobs.get_history(**params)
119 | logger.debug("jobs history: by %s %d", params, len(jobs))
120 | assert len(jobs)
121 | # by pending
122 | params = {'status': "pending"}
123 | client.jobs.submit_text(MODEL_ID, '0.0.27', {'input.txt': 'Modzy is great!'})
124 | time.sleep(5)
125 | jobs = client.jobs.get_history(**params)
126 | logger.debug("jobs history: by %s %d", params, len(jobs))
127 | assert len(jobs)
128 | # by terminated
129 | params = {'status': "terminated"}
130 | jobs = client.jobs.get_history(**params)
131 | logger.debug("jobs history: by %s %d", params, len(jobs))
132 | assert len(jobs)
133 |
134 |
135 | def test_get_job_history_by_sort(client, logger):
136 | # order by status (default order direction)
137 | params = {'sort_by': "status"}
138 | jobs = client.jobs.get_history(**params)
139 | logger.debug("jobs history: by %s %d", params, len(jobs))
140 | assert len(jobs)
141 | # order by status asc
142 | params = {'sort_by': "status", 'direction': "asc"}
143 | jobs = client.jobs.get_history(**params)
144 | logger.debug("jobs history: by %s %d", params, len(jobs))
145 | assert len(jobs)
146 | # order by status desc
147 | params = {'sort_by': "status", 'direction': "desc"}
148 | jobs = client.jobs.get_history(**params)
149 | logger.debug("jobs history: by %s %d", params, len(jobs))
150 | assert len(jobs)
151 |
152 |
153 | def test_submit_job(client, logger):
154 | job = client.jobs.submit_text(MODEL_ID, '0.0.27', {'input.txt': 'Modzy is great!'})
155 | logger.debug("job %s", job)
156 | assert job.job_identifier
157 | assert job.status == client.jobs.status.SUBMITTED
158 |
159 |
160 | def test_get_job(client, logger):
161 | job = client.jobs.submit_text(MODEL_ID, '0.0.27', {'input.txt': 'Modzy is great!'})
162 | logger.debug("job %s", job)
163 | time.sleep(5)
164 | job = client.jobs.get(job.job_identifier) # by id
165 | logger.debug("job copy by id %s", job)
166 | assert job.job_identifier
167 | assert job.status
168 |
169 |
170 | def test_cancel_job(client, logger):
171 | job = client.jobs.submit_text(MODEL_ID, '0.0.27', {
172 | str(i): {'input.txt': 'Modzy is great!'}
173 | for i in range(2)
174 | })
175 | time.sleep(5)
176 | job = client.jobs.get(job.job_identifier) # by id
177 | logger.debug("job %s", job)
178 | if job.status != Jobs.status.COMPLETED:
179 | logger.debug("job before cancel %s", job)
180 | job = client.jobs.cancel(job.job_identifier)
181 | logger.debug("job after cancel %s", job)
182 |
183 | assert job.status == client.jobs.status.CANCELED or job.status == client.jobs.status.COMPLETED
184 |
185 |
--------------------------------------------------------------------------------
/samples/job_with_text_input_sample.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import os
3 | import logging
4 | import dotenv
5 |
6 | from modzy.error import ResultsError
7 | from modzy.jobs import Jobs
8 |
9 | sys.path.insert(0, '..')
10 | from modzy import ApiClient
11 |
12 |
13 | # Always configure the logger level (ie: DEBUG, INFO, WARNING, ERROR, CRITICAL)
14 | logging.basicConfig(level=logging.INFO)
15 | logger = logging.getLogger(__name__)
16 |
17 | # The system admin can provide the right base API URL, the API key can be downloaded from your profile page on Modzy.
18 | # You can configure those params as is described in the README file (as environment variables, or by using the .env file),
19 | # or you can just update the BASE_URL and API_KEY variables and use this sample code (not recommended for production environments).
20 |
21 | dotenv.load_dotenv()
22 |
23 | # The MODZY_BASE_URL should point to the API services route which may be different from the Modzy page URL.
24 | # (ie: https://modzy.example.com/api).
25 | BASE_URL = os.getenv('MODZY_BASE_URL')
26 | # The MODZY_API_KEY is your own personal API key. It is composed by a public part, a dot character, and a private part
27 | # (ie: AzQBJ3h4B1z60xNmhAJF.uQyQh8putLIRDi1nOldh).
28 | API_KEY = os.getenv('MODZY_API_KEY')
29 |
30 | # Client initialization:
31 | # Initialize the ApiClient instance with the BASE_URL and the API_KEY to store those arguments
32 | # for the following API calls.
33 | client = ApiClient(base_url=BASE_URL, api_key=API_KEY)
34 |
35 | # Create a Job with a text input, wait, and retrieve results:
36 |
37 | # Get the model object:
38 | # If you already know the model identifier (i.e.: you got it from the URL of the model details page or from the input sample),
39 | # you can skip this step. If you don't, you can find the model identifier by using its name as follows:
40 | model = client.models.get_by_name("Sentiment Analysis")
41 | # Or if you already know the model id and want to know more about the model, you can use this instead:
42 | # model = client.models.get("ed542963de")
43 | # You can find more information about how to query the models on the model_sample.py file.
44 |
45 | # The model identifier is under the modelId key. You can take a look at the other keys by uncommenting the following line
46 | # logger.info(", ".join("{} :: {}".format(key, value) for key, value in model.items()))
47 | # Or just log the model identifier and the latest version
48 | logger.info("The model identifier is {} and the latest version is {}".format(model.modelId, model.latest_version))
49 |
50 | # Get the model version object:
51 | # If you already know the model version and the input key(s) of the model version you can skip this step. Also, you can
52 | # use the following code block to know about the inputs keys and skip the call on future job submissions.
53 | modelVersion = client.models.get_version(model, model.latest_version)
54 | # The info stored in modelVersion provides insights about the amount of time that the model can spend processing, the inputs, and
55 | # output keys of the model.
56 | logger.info("This model version is {}".format(modelVersion))
57 | logger.info(" timeouts: status {}ms, run {}ms ".format(modelVersion.timeout.status, modelVersion.timeout.run))
58 | logger.info(" inputs: ")
59 | for input in modelVersion.inputs:
60 | logger.info(" key {}, type {}, description: {}".format(input.name, input.acceptedMediaTypes, input.description))
61 | logger.info(" outputs: ")
62 | for output in modelVersion.outputs:
63 | logger.info(" key {}, type {}, description: {}".format(output.name, output.mediaType, output.description))
64 |
65 | # Send the job:
66 | # With the info about the model (identifier), the model version (version string, input/output keys), you are ready to
67 | # submit the job. Just prepare the source dictionary:
68 | sources = {"source-key": {"input.txt": "Modzy is great!"}}
69 | # An inference job groups input data that you send to a model. You can send any amount of inputs to
70 | # process and you can identify and refer to a specific input by the key that you assign, for example we can add:
71 | sources["second-key"] = {"input.txt": "Sometimes I really hate ribs"}
72 | sources["another-key"] = {"input.txt": "Born and raised in Pennsylvania, Swift moved to Nashville, Tennessee, at the age of 14 to pursue a career in country music"}
73 | # If you send a wrong input key, the model fails to process the input.
74 | sources["wrong-key"] = {"a.wrong.key": "This input is wrong!"}
75 | # When you have all your inputs ready, you can use our helper method to submit the job as follows:
76 | job = client.jobs.submit_text(model.modelId, modelVersion.version, sources)
77 | # Modzy creates the job and queue for processing. The job object contains all the info that you need to keep track
78 | # of the process, the most important being the job identifier and the job status.
79 | logger.info("job: %s", job)
80 | # The job moves to SUBMITTED, meaning that Modzy acknowledged the job and sent it to the queue to be processed.
81 | # We provide a helper method to listen until the job finishes processing. Its a good practice to set a max timeout
82 | # if you're doing a test (ie: 2*status+run). Otherwise, if the timeout is set to None, it will listen until the job finishes and moves to
83 | # COMPLETED, CANCELED, or TIMEOUT.
84 | job.block_until_complete(timeout=None)
85 |
86 | # Get the results:
87 | # Check the status of the job. Jobs may be canceled or may reach a timeout.
88 | if job.status == Jobs.status.COMPLETED:
89 | # A completed job means that all the inputs were processed by the model. Check the results for each
90 | # input key provided in the source dictionary to see the model output.
91 | result = job.get_result()
92 | # The result object has some useful info:
93 | logger.info("Result: finished: {}, total: {}, completed: {}, failed: {}".format(result.finished, result.total, result.completed, result.failed))
94 | # Notice that we are iterating through the same input source keys
95 | for key in sources:
96 | # The result object has the individual results of each job input. In this case the output key is called
97 | # results.json, so we can get the results as follows:
98 | try:
99 | model_res = result.get_source_outputs(key)['results.json']
100 | # The output for this model comes in a JSON format, so we can directly log the model results:
101 | logger.info(
102 | " {}: ".format(key) + ", ".join("{}: {}".format(key, value) for key, value in model_res.items()))
103 | except ResultsError as failure:
104 | # If the model raises an error, we can get the specific error message:
105 | logger.warning(" {}: failure: {}".format(key, failure));
106 |
107 | else:
108 | logger.warning("The job ends with status {}".format(job))
109 |
--------------------------------------------------------------------------------
/modzy/edge/proto/inferences/api/v1/inferences_pb2_grpc.py:
--------------------------------------------------------------------------------
1 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
2 | """Client and server classes corresponding to protobuf-defined services."""
3 | import grpc
4 |
5 | from . import inferences_pb2 as protos_dot_modzy_dot_inferences_dot_api_dot_v1_dot_inferences__pb2
6 |
7 |
8 | class InferenceServiceStub(object):
9 | """Missing associated documentation comment in .proto file."""
10 |
11 | def __init__(self, channel):
12 | """Constructor.
13 |
14 | Args:
15 | channel: A grpc.Channel.
16 | """
17 | self.PerformInference = channel.unary_unary(
18 | '/inferences.api.v1.InferenceService/PerformInference',
19 | request_serializer=protos_dot_modzy_dot_inferences_dot_api_dot_v1_dot_inferences__pb2.InferenceRequest.SerializeToString,
20 | response_deserializer=protos_dot_modzy_dot_inferences_dot_api_dot_v1_dot_inferences__pb2.Inference.FromString,
21 | )
22 | self.GetInferenceDetails = channel.unary_unary(
23 | '/inferences.api.v1.InferenceService/GetInferenceDetails',
24 | request_serializer=protos_dot_modzy_dot_inferences_dot_api_dot_v1_dot_inferences__pb2.InferenceIdentifier.SerializeToString,
25 | response_deserializer=protos_dot_modzy_dot_inferences_dot_api_dot_v1_dot_inferences__pb2.Inference.FromString,
26 | )
27 | self.StreamInferences = channel.stream_stream(
28 | '/inferences.api.v1.InferenceService/StreamInferences',
29 | request_serializer=protos_dot_modzy_dot_inferences_dot_api_dot_v1_dot_inferences__pb2.InferenceRequest.SerializeToString,
30 | response_deserializer=protos_dot_modzy_dot_inferences_dot_api_dot_v1_dot_inferences__pb2.Inference.FromString,
31 | )
32 |
33 |
34 | class InferenceServiceServicer(object):
35 | """Missing associated documentation comment in .proto file."""
36 |
37 | def PerformInference(self, request, context):
38 | """Missing associated documentation comment in .proto file."""
39 | context.set_code(grpc.StatusCode.UNIMPLEMENTED)
40 | context.set_details('Method not implemented!')
41 | raise NotImplementedError('Method not implemented!')
42 |
43 | def GetInferenceDetails(self, request, context):
44 | """Missing associated documentation comment in .proto file."""
45 | context.set_code(grpc.StatusCode.UNIMPLEMENTED)
46 | context.set_details('Method not implemented!')
47 | raise NotImplementedError('Method not implemented!')
48 |
49 | def StreamInferences(self, request_iterator, context):
50 | """Missing associated documentation comment in .proto file."""
51 | context.set_code(grpc.StatusCode.UNIMPLEMENTED)
52 | context.set_details('Method not implemented!')
53 | raise NotImplementedError('Method not implemented!')
54 |
55 |
56 | def add_InferenceServiceServicer_to_server(servicer, server):
57 | rpc_method_handlers = {
58 | 'PerformInference': grpc.unary_unary_rpc_method_handler(
59 | servicer.PerformInference,
60 | request_deserializer=protos_dot_modzy_dot_inferences_dot_api_dot_v1_dot_inferences__pb2.InferenceRequest.FromString,
61 | response_serializer=protos_dot_modzy_dot_inferences_dot_api_dot_v1_dot_inferences__pb2.Inference.SerializeToString,
62 | ),
63 | 'GetInferenceDetails': grpc.unary_unary_rpc_method_handler(
64 | servicer.GetInferenceDetails,
65 | request_deserializer=protos_dot_modzy_dot_inferences_dot_api_dot_v1_dot_inferences__pb2.InferenceIdentifier.FromString,
66 | response_serializer=protos_dot_modzy_dot_inferences_dot_api_dot_v1_dot_inferences__pb2.Inference.SerializeToString,
67 | ),
68 | 'StreamInferences': grpc.stream_stream_rpc_method_handler(
69 | servicer.StreamInferences,
70 | request_deserializer=protos_dot_modzy_dot_inferences_dot_api_dot_v1_dot_inferences__pb2.InferenceRequest.FromString,
71 | response_serializer=protos_dot_modzy_dot_inferences_dot_api_dot_v1_dot_inferences__pb2.Inference.SerializeToString,
72 | ),
73 | }
74 | generic_handler = grpc.method_handlers_generic_handler(
75 | 'inferences.api.v1.InferenceService', rpc_method_handlers)
76 | server.add_generic_rpc_handlers((generic_handler,))
77 |
78 |
79 | # This class is part of an EXPERIMENTAL API.
80 | class InferenceService(object):
81 | """Missing associated documentation comment in .proto file."""
82 |
83 | @staticmethod
84 | def PerformInference(request,
85 | target,
86 | options=(),
87 | channel_credentials=None,
88 | call_credentials=None,
89 | insecure=False,
90 | compression=None,
91 | wait_for_ready=None,
92 | timeout=None,
93 | metadata=None):
94 | return grpc.experimental.unary_unary(request, target, '/inferences.api.v1.InferenceService/PerformInference',
95 | protos_dot_modzy_dot_inferences_dot_api_dot_v1_dot_inferences__pb2.InferenceRequest.SerializeToString,
96 | protos_dot_modzy_dot_inferences_dot_api_dot_v1_dot_inferences__pb2.Inference.FromString,
97 | options, channel_credentials,
98 | insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
99 |
100 | @staticmethod
101 | def GetInferenceDetails(request,
102 | target,
103 | options=(),
104 | channel_credentials=None,
105 | call_credentials=None,
106 | insecure=False,
107 | compression=None,
108 | wait_for_ready=None,
109 | timeout=None,
110 | metadata=None):
111 | return grpc.experimental.unary_unary(request, target, '/inferences.api.v1.InferenceService/GetInferenceDetails',
112 | protos_dot_modzy_dot_inferences_dot_api_dot_v1_dot_inferences__pb2.InferenceIdentifier.SerializeToString,
113 | protos_dot_modzy_dot_inferences_dot_api_dot_v1_dot_inferences__pb2.Inference.FromString,
114 | options, channel_credentials,
115 | insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
116 |
117 | @staticmethod
118 | def StreamInferences(request_iterator,
119 | target,
120 | options=(),
121 | channel_credentials=None,
122 | call_credentials=None,
123 | insecure=False,
124 | compression=None,
125 | wait_for_ready=None,
126 | timeout=None,
127 | metadata=None):
128 | return grpc.experimental.stream_stream(request_iterator, target, '/inferences.api.v1.InferenceService/StreamInferences',
129 | protos_dot_modzy_dot_inferences_dot_api_dot_v1_dot_inferences__pb2.InferenceRequest.SerializeToString,
130 | protos_dot_modzy_dot_inferences_dot_api_dot_v1_dot_inferences__pb2.Inference.FromString,
131 | options, channel_credentials,
132 | insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
133 |
--------------------------------------------------------------------------------
/modzy/http.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """The HTTP client implementation."""
3 |
4 | import json
5 | import logging
6 | from urllib.parse import urlparse
7 |
8 | import requests
9 |
10 | from ._api_object import ApiObject
11 | from .error import NetworkError, _create_response_error
12 |
13 |
14 | def _url_is_absolute(url):
15 | return bool(urlparse(url).netloc)
16 |
17 |
18 | def _urlappend(base, url):
19 | if _url_is_absolute(url):
20 | return url
21 | if not base.endswith('/'):
22 | base = base + '/'
23 | return base + url.lstrip('/')
24 |
25 |
26 | class HttpClient:
27 | """The HTTP Client object.
28 |
29 | This object is responsible for making the actual HTTP requests to the API. User code
30 | should generally not need to directly access this object.
31 |
32 | This class should not be instantiated directly but rather accessed through the `http`
33 | attribute of an `ApiClient` instance.
34 |
35 | Attributes:
36 | session (requests.Session): The requests `Session` used to make HTTP requests.
37 | """
38 |
39 | def __init__(self, api_client, session=None):
40 | """Creates an `HttpClient` instance.
41 |
42 | Args:
43 | api_client (ApiClient): An `ApiClient` instance.
44 | session (Optional[requests.Session]): A requests `Session` used to make HTTP requests.
45 | If None is specified one will be created. Defaults to None.
46 | """
47 | self._api_client = api_client
48 | self.session = session if session is not None else requests.Session()
49 | self.logger = logging.getLogger(__name__)
50 |
51 | def request(self, method, url, json_data=None, file_data=None, params=None):
52 | """Sends an HTTP request.
53 |
54 | The client's API key will automatically be used for authentication.
55 |
56 | Args:
57 | method (str): The HTTP method for the request.
58 | url (str): URL to request.
59 | json_data (Optional[Any]): JSON serializeable object to include in the request body.
60 | file_data (Optional[Any]): Dictionary to be submitted as files part of the request
61 |
62 | Returns:
63 | dict: JSON object deserialized from the response body.
64 |
65 | Raises:
66 | ApiError: A subclass of ApiError will be raised if the API returns an error status,
67 | or the client is unable to connect.
68 | """
69 |
70 | url = _urlappend(self._api_client.base_url, url)
71 |
72 | if json_data:
73 | data = json.dumps(json_data).encode('utf-8')
74 | else:
75 | data = None
76 |
77 | headers = {'Accept': 'application/json'}
78 | if self._api_client.api_key: # will there be any endpoints that don't need an api key?
79 | headers['Authorization'] = 'ApiKey {}'.format(self._api_client.api_key)
80 | if json_data is not None:
81 | headers['Content-Type'] = 'application/json'
82 | self.logger.debug("%s: %s - [%s]", method, url, self._api_client.cert)
83 |
84 | try:
85 | response = self.session.request(method, url, data=data, headers=headers, files=file_data, verify=self._api_client.cert, params=params)
86 | self.logger.debug("response %s - length %s", response.status_code, len(response.content))
87 | except requests.exceptions.RequestException as ex:
88 | self.logger.exception('unable to make network request')
89 | raise NetworkError(str(ex), url, reason=ex)
90 |
91 | try:
92 | json_data = json.loads(response.content.decode('utf-8'), object_hook=ApiObject)
93 | except ValueError:
94 | if len(response.content) > 0:
95 | json_data = None
96 | else:
97 | json_data = {}
98 |
99 | if not (200 <= response.status_code < 300):
100 | message = None
101 | if hasattr(json_data, 'get'):
102 | message = json_data.get('message')
103 | if not message:
104 | message = 'HTTP Error {}: {}'.format(response.status_code, response.reason)
105 | raise _create_response_error(str(message), url, response)
106 |
107 | if json_data is None:
108 | # will our API *always* return JSON? may change in future
109 | # do we need a different Exception class for malformed body?
110 | raise _create_response_error('API did not return valid JSON.', url, response)
111 |
112 | return json_data
113 |
114 | def get(self, url):
115 | """Sends a GET request.
116 |
117 | Args:
118 | url (str): URL to request.
119 |
120 | Returns:
121 | dict: JSON object.
122 |
123 | Raises:
124 | ApiError: A subclass of ApiError will be raised if the API returns an error status,
125 | or the client is unable to connect.
126 | """
127 | return self.request('GET', url)
128 |
129 | def post(self, url, json_data=None, file_data=None, params=None):
130 | """Sends a POST request.
131 |
132 | Args:
133 | url (str): URL to request.
134 | json_data (Optional[dict]): JSON to include in the request body.
135 |
136 | Returns:
137 | dict: JSON object.
138 |
139 | Raises:
140 | ApiError: A subclass of ApiError will be raised if the API returns an error status,
141 | or the client is unable to connect.
142 | """
143 | return self.request('POST', url, json_data=json_data, file_data=file_data, params=params)
144 |
145 | def patch(self, url, json_data=None):
146 | """Sends a PATCH request.
147 |
148 | Args:
149 | url (str): URL to request.
150 | json_data (Optional[dict]): JSON to include in the request body.
151 |
152 | Returns:
153 | dict: JSON object.
154 |
155 | Raises:
156 | ApiError: A subclass of ApiError will be raised if the API returns an error status,
157 | or the client is unable to connect.
158 | """
159 | return self.request('PATCH', url, json_data=json_data)
160 |
161 | def put(self, url, json_data=None):
162 | """Sends a PUT request.
163 |
164 | Args:
165 | url (str): URL to request.
166 | json_data (Optional[dict]): JSON to include in the request body.
167 |
168 | Returns:
169 | dict: JSON object.
170 |
171 | Raises:
172 | ApiError: A subclass of ApiError will be raised if the API returns an error status,
173 | or the client is unable to connect.
174 | """
175 | return self.request('PUT', url, json_data=json_data)
176 |
177 | def delete(self, url, json_data=None):
178 | """Sends a DELETE request.
179 |
180 | Args:
181 | url (str): URL to request.
182 | json_data (Optional[dict]): JSON to include in the request body.
183 |
184 | Returns:
185 | dict: JSON object.
186 |
187 | Raises:
188 | ApiError: A subclass of ApiError will be raised if the API returns an error status,
189 | or the client is unable to connect.
190 | """
191 | return self.request('DELETE', url, json_data=json_data)
192 |
--------------------------------------------------------------------------------
/samples/job_with_file_input_sample.py:
--------------------------------------------------------------------------------
1 | import json
2 | import pathlib
3 | import sys
4 | import os
5 | import logging
6 | import dotenv
7 |
8 | from modzy.error import ResultsError
9 | from modzy.jobs import Jobs
10 |
11 | sys.path.insert(0, '..')
12 | from modzy import ApiClient
13 | from modzy._util import file_to_bytes
14 |
15 |
16 | # Always configure the logger level (ie: DEBUG, INFO, WARNING, ERROR, CRITICAL)
17 | logging.basicConfig(level=logging.INFO)
18 | logger = logging.getLogger(__name__)
19 |
20 | # The system admin can provide the right base API URL, the API key can be downloaded from your profile page on Modzy.
21 | # You can configure those params as is described in the README file (as environment variables, or by using the .env file),
22 | # or you can just update the BASE_URL and API_KEY variables and use this sample code (not recommended for production environments).
23 |
24 | dotenv.load_dotenv()
25 |
26 | # The MODZY_BASE_URL should point to the API services route which may be different from the Modzy page URL.
27 | # (ie: https://modzy.example.com/api).
28 | BASE_URL = os.getenv('MODZY_BASE_URL')
29 | # The MODZY_API_KEY is your own personal API key. It is composed by a public part, a dot character, and a private part
30 | # (ie: AzQBJ3h4B1z60xNmhAJF.uQyQh8putLIRDi1nOldh).
31 | API_KEY = os.getenv('MODZY_API_KEY')
32 |
33 | # Client initialization:
34 | # Initialize the ApiClient instance with the BASE_URL and the API_KEY to store those arguments
35 | # for the following API calls.
36 | client = ApiClient(base_url=BASE_URL, api_key=API_KEY)
37 |
38 | # Create a Job with a file input, wait, and retrieve results:
39 |
40 | # Get the model object:
41 | # If you already know the model identifier (i.e.: you got it from the URL of the model details page or from the input sample),
42 | # you can skip this step. If you don't, you can find the model identifier by using its name as follows:
43 | model = client.models.get_by_name("Multi-Language OCR")
44 | # Or if you already know the model id and want to know more about the model, you can use this instead:
45 | # model = client.models.get("c60c8dbd79")
46 | # You can find more information about how to query the models on the model_sample.py file.
47 |
48 | # The model identifier is under the modelId key. You can take a look at the other keys by uncommenting the following line
49 | # logger.info(", ".join("{} :: {}".format(key, value) for key, value in model.items()))
50 | # Or just log the model identifier and the latest version
51 | logger.info("The model identifier is {} and the latest version is {}".format(model.modelId, model.latest_version))
52 |
53 | # Get the model version object:
54 | # If you already know the model version and the input key(s) of the model version you can skip this step. Also, you can
55 | # use the following code block to know about the inputs keys and skip the call on future job submissions.
56 | modelVersion = client.models.get_version(model, model.latest_version)
57 | # The info stored in modelVersion provides insights about the amount of time that the model can spend processing,
58 | # the inputs, and output keys of the model.
59 | logger.info("This model version is {}".format(modelVersion))
60 | logger.info(" timeouts: status {}ms, run {}ms ".format(modelVersion.timeout.status, modelVersion.timeout.run))
61 | logger.info(" inputs: ")
62 | for input in modelVersion.inputs:
63 | logger.info(" key {}, type {}, description: {}".format(input.name, input.acceptedMediaTypes, input.description))
64 | logger.info(" outputs: ")
65 | for output in modelVersion.outputs:
66 | logger.info(" key {}, type {}, description: {}".format(output.name, output.mediaType, output.description))
67 |
68 | # Send the job:
69 | # A file input can be a byte array or any file path. This input type fits for any size files.
70 | image_path = pathlib.Path('./samples/image.png')
71 | config_path = pathlib.Path('./samples/config.json')
72 | # With the info about the model (identifier), the model version (version string, input/output keys), you are ready to
73 | # submit the job. Just prepare the source dictionary:
74 | sources = {"source-key": {"input": image_path.resolve(), "config.json": config_path.resolve()}}
75 | # An inference job groups input data that you send to a model. You can send any amount of inputs to
76 | # process and you can identify and refer to a specific input by the key that you assign, for example we can add:
77 | sources["second-key"] = {"input": image_path, "config.json": config_path}
78 | # You don't need to load all the inputs from files, you can just convert the files to bytes as follows:
79 | image_bytes = file_to_bytes(image_path.resolve())
80 | config_bytes = json.dumps({"languages":["spa"]}).encode('utf-8')
81 | sources["another-key"] = {"input": image_bytes, "config.json": config_bytes}
82 | # If you send a wrong input key, the model fails to process the input.
83 | sources["wrong-key"] = {"a.wrong.key": image_bytes, "config.json":config_bytes}
84 | # If you send a correct input key but some wrong values, the model fails too.
85 | sources["wrong-value"] = {"input": config_bytes, "config.json":image_bytes}
86 | # When you have all your inputs ready, you can use our helper method to submit the job as follows:
87 | job = client.jobs.submit_file(model.modelId, modelVersion.version, sources)
88 | # Modzy creates the job and queue for processing. The job object contains all the info that you need to keep track
89 | # of the process, the most important being the job identifier and the job status.
90 | logger.info("job: %s", job)
91 | # The job moves to SUBMITTED, meaning that Modzy acknowledged the job and sent it to the queue to be processed.
92 | # We provide a helper method to listen until the job finishes processing. Its a good practice to set a max timeout
93 | # if you're doing a test (ie: 2*status+run). Otherwise, if the timeout is set to None, it will listen until the job
94 | # finishes and moves to COMPLETED, CANCELED, or TIMEOUT.
95 | job.block_until_complete(timeout=None)
96 |
97 | # Get the results:
98 | # Check the status of the job. Jobs may be canceled or may reach a timeout.
99 | if job.status == Jobs.status.COMPLETED:
100 | # A completed job means that all the inputs were processed by the model. Check the results for each
101 | # input key provided in the source dictionary to see the model output.
102 | result = job.get_result()
103 | # The result object has some useful info:
104 | logger.info("Result: finished: {}, total: {}, completed: {}, failed: {}"
105 | .format(result.finished, result.total, result.completed, result.failed))
106 | # Notice that we are iterating through the same input source keys
107 | for key in sources:
108 | # The result object has the individual results of each job input. In this case the output key is called
109 | # results.json, so we can get the results as follows:
110 | try:
111 | model_res = result.get_source_outputs(key)['results.json']
112 | # The output for this model comes in a JSON format, so we can directly log the model results:
113 | logger.info(
114 | " {}: ".format(key) + ", ".join(
115 | "{}: {}".format(key, str(value).replace('\n', '')) for key, value in model_res.items()))
116 | except ResultsError as failure:
117 | # If the model raises an error, we can get the specific error message:
118 | logger.warning(" {}: failure: {}".format(key, failure));
119 |
120 | else:
121 | logger.warning("The job ends with status {}".format(job))
122 |
--------------------------------------------------------------------------------
/samples/job_with_embedded_input_sample.py:
--------------------------------------------------------------------------------
1 | import json
2 | import sys
3 | import os
4 | import logging
5 | import dotenv
6 |
7 | from modzy.error import ResultsError
8 | from modzy.jobs import Jobs
9 |
10 | sys.path.insert(0, '..')
11 | from modzy import ApiClient
12 | from modzy._util import file_to_bytes
13 |
14 |
15 | # Always configure the logger level (ie: DEBUG, INFO, WARNING, ERROR, CRITICAL)
16 | logging.basicConfig(level=logging.INFO)
17 | logger = logging.getLogger(__name__)
18 |
19 | # The system admin can provide the right base API URL, the API key can be downloaded from your profile page on Modzy.
20 | # You can configure those params as is described in the README file (as environment variables, or by using the .env file),
21 | # or you can just update the BASE_URL and API_KEY variables and use this sample code (not recommended for production environments).
22 |
23 | dotenv.load_dotenv()
24 |
25 | # The MODZY_BASE_URL should point to the API services route which may be different from the Modzy page URL.
26 | # (ie: https://modzy.example.com/api).
27 | BASE_URL = os.getenv('MODZY_BASE_URL')
28 | # The MODZY_API_KEY is your own personal API key. It is composed by a public part, a dot character, and a private part
29 | # (ie: AzQBJ3h4B1z60xNmhAJF.uQyQh8putLIRDi1nOldh).
30 | API_KEY = os.getenv('MODZY_API_KEY')
31 |
32 | # Client initialization:
33 | # Initialize the ApiClient instance with the BASE_URL and the API_KEY to store those arguments
34 | # for the following API calls.
35 | client = ApiClient(base_url=BASE_URL, api_key=API_KEY)
36 |
37 | # Create a Job with a embedded input, wait and retrieve results:
38 |
39 | # Get the model object:
40 | # If you already know the model identifier (i.e.: you got it from the URL of the model details page or from the input sample),
41 | # you can skip this step. If you don't, you can find the model identifier by using its name as follows:
42 | model = client.models.get_by_name("Multi-Language OCR")
43 | # Or if you already know the model id and want to know more about the model, you can use this instead:
44 | # model = client.models.get("c60c8dbd79")
45 | # You can find more information about how to query the models on the model_sample.py file.
46 |
47 | # The model identifier is under the modelId key. You can take a look at the other keys by uncommenting the following line
48 | # logger.info(", ".join("{} :: {}".format(key, value) for key, value in model.items()))
49 | # Or just log the model identifier and the latest version
50 | logger.info("The model identifier is {} and the latest version is {}".format(model.modelId, model.latest_version))
51 |
52 | # Get the model version object:
53 | # If you already know the model version and the input key(s) of the model version you can skip this step. Also, you can
54 | # use the following code block to know about the inputs keys and skip the call on future job submissions.
55 | modelVersion = client.models.get_version(model, model.latest_version)
56 | # The info stored in modelVersion provides insights about the amount of time that the model can spend processing,
57 | # the inputs, and output keys of the model.
58 | logger.info("This model version is {}".format(modelVersion))
59 | logger.info(" timeouts: status {}ms, run {}ms ".format(modelVersion.timeout.status, modelVersion.timeout.run))
60 | logger.info(" inputs: ")
61 | for input in modelVersion.inputs:
62 | logger.info(" key {}, type {}, description: {}".format(input.name, input.acceptedMediaTypes, input.description))
63 | logger.info(" outputs: ")
64 | for output in modelVersion.outputs:
65 | logger.info(" key {}, type {}, description: {}".format(output.name, output.mediaType, output.description))
66 |
67 | # Send the job:
68 | # An embedded input is a byte array encoded as a string in Base64. This input type comes very handy for small to middle size files. However,
69 | # it requires to load and encode files in memory which can be an issue for larger files, use submit_files instead.
70 | image_bytes = file_to_bytes('../samples/image.png')
71 | config_bytes = file_to_bytes('../samples/config.json')
72 | # With the info about the model (identifier), the model version (version string, input/output keys), you are ready to
73 | # submit the job. Just prepare the source dictionary:
74 | sources = {"source-key": {"input": image_bytes, "config.json":config_bytes}}
75 | # An inference job groups input data that you send to a model. You can send any amount of inputs to
76 | # process and you can identify and refer to a specific input by the key that you assign, for example we can add:
77 | sources["second-key"] = {"input": image_bytes, "config.json":config_bytes}
78 | # You don't need to load all the inputs from files, you can just convert the files to bytes as follows:
79 | config_bytes = json.dumps({"languages":["spa"]}).encode('utf-8')
80 | sources["another-key"] = {"input": image_bytes, "config.json":config_bytes}
81 | # If you send a wrong input key, the model fails to process the input.
82 | sources["wrong-key"] = {"a.wrong.key": image_bytes, "config.json":config_bytes}
83 | # If you send a correct input key but some wrong values, the model fails too.
84 | sources["wrong-value"] = {"input": config_bytes, "config.json":image_bytes}
85 | # When you have all your inputs ready, you can use our helper method to submit the job as follows:
86 | job = client.jobs.submit_embedded(model.modelId, modelVersion.version, sources)
87 | # Modzy creates the job and queue for processing. The job object contains all the info that you need to keep track
88 | # of the process, the most important being the job identifier and the job status.
89 | logger.info("job: %s", job)
90 | # The job moves to SUBMITTED, meaning that Modzy acknowledged the job and sent it to the queue to be processed.
91 | # We provide a helper method to listen until the job finishes processing. Its a good practice to set a max timeout
92 | # if you're doing a test (ie: 2*status+run). Otherwise, if the timeout is set to None, it will listen until the job
93 | # finishes and moves to COMPLETED, CANCELED, or TIMEOUT.
94 | job.block_until_complete(timeout=None)
95 |
96 | # Get the results:
97 | # Check the status of the job. Jobs may be canceled or may reach a timeout.
98 | if job.status == Jobs.status.COMPLETED:
99 | # A completed job means that all the inputs were processed by the model. Check the results for each
100 | # input key provided in the source dictionary to see the model output.
101 | result = job.get_result()
102 | # The result object has some useful info:
103 | logger.info("Result: finished: {}, total: {}, completed: {}, failed: {}"
104 | .format(result.finished, result.total, result.completed, result.failed))
105 | # Notice that we are iterating through the same input source keys
106 | for key in sources:
107 | # The result object has the individual results of each job input. In this case the output key is called
108 | # results.json, so we can get the results as follows:
109 | try:
110 | model_res = result.get_source_outputs(key)['results.json']
111 | # The output for this model comes in a JSON format, so we can directly log the model results:
112 | logger.info(
113 | " {}: ".format(key) + ", ".join(
114 | "{}: {}".format(key, str(value).replace('\n', '')) for key, value in model_res.items()))
115 | except ResultsError as failure:
116 | # If the model raises an error, we can get the specific error message:
117 | logger.warning(" {}: failure: {}".format(key, failure));
118 |
119 | else:
120 | logger.warning("The job ends with status {}".format(job))
121 |
--------------------------------------------------------------------------------
/samples/job_with_aws_input_sample.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import os
3 | import logging
4 | import dotenv
5 |
6 | from modzy.error import ResultsError
7 | from modzy.jobs import Jobs
8 |
9 | sys.path.insert(0, '..')
10 | from modzy import ApiClient
11 |
12 |
13 | # Always configure the logger level (ie: DEBUG, INFO, WARNING, ERROR, CRITICAL)
14 | logging.basicConfig(level=logging.INFO)
15 | logger = logging.getLogger(__name__)
16 |
17 | # The system admin can provide the right base API URL, the API key can be downloaded from your profile page on Modzy.
18 | # You can configure those params as described in the README file (as environment variables, or by using the .env file),
19 | # or you can just update the BASE_URL and API_KEY variables and use this sample code (not recommended for production environments).
20 |
21 | dotenv.load_dotenv()
22 |
23 | # The MODZY_BASE_URL should point to the API services route which may be different from the Modzy page URL.
24 | # (ie: https://modzy.example.com/api).
25 | BASE_URL = os.getenv('MODZY_BASE_URL')
26 | # The MODZY_API_KEY is your own personal API key. It is composed by a public part, a dot character, and a private part
27 | # (ie: AzQBJ3h4B1z60xNmhAJF.uQyQh8putLIRDi1nOldh).
28 | API_KEY = os.getenv('MODZY_API_KEY')
29 |
30 | # Client initialization:
31 | # Initialize the ApiClient instance with the BASE_URL and the API_KEY to store those arguments
32 | # for the following API calls.
33 | client = ApiClient(base_url=BASE_URL, api_key=API_KEY)
34 |
35 | # Create a Job with an aws input, wait, and retrieve results:
36 |
37 | # Get the model object:
38 | # If you already know the model identifier (i.e.: you got it from the URL of the model details page or from the input sample),
39 | # you can skip this step. If you don't, you can find the model identifier by using its name as follows:
40 | model = client.models.get_by_name("Facial Embedding")
41 | # Or if you already know the model id and want to know more about the model, you can use this instead:
42 | # model = client.models.get("f7e252e26a")
43 | # You can find more information about how to query the models on the model_sample.py file.
44 |
45 | # The model identifier is under the modelId key. You can take a look at the other keys by uncommenting the following line
46 | # logger.info(", ".join("{} :: {}".format(key, value) for key, value in model.items()))
47 | # Or just log the model identifier and the latest version
48 | logger.info("The model identifier is {} and the latest version is {}".format(model.modelId, model.latest_version))
49 |
50 | # Get the model version object:
51 | # If you already know the model version and the input key(s) of the model version you can skip this step. Also, you can
52 | # use the following code block to know about the input keys and skip the call on future job submissions.
53 | modelVersion = client.models.get_version(model, model.latest_version)
54 | # The info stored in modelVersion provides insights about the amount of time that the model can spend processing,
55 | # the input, and output keys of the model.
56 | logger.info("This model version is {}".format(modelVersion))
57 | logger.info(" timeouts: status {}ms, run {}ms ".format(modelVersion.timeout.status, modelVersion.timeout.run))
58 | logger.info(" inputs: ")
59 | for input in modelVersion.inputs:
60 | logger.info(" key {}, type {}, description: {}".format(input.name, input.acceptedMediaTypes, input.description))
61 | logger.info(" outputs: ")
62 | for output in modelVersion.outputs:
63 | logger.info(" key {}, type {}, description: {}".format(output.name, output.mediaType, output.description))
64 |
65 | # Send the job:
66 | # Amazon Simple Storage Service (AWS S3) is an object storage service (for more info visit: https://aws.amazon.com/s3/?nc1=h_ls).
67 | # It allows to store images, videos, or other content as files. In order to use as input type, provide the following properties:
68 | # AWS Access Key: replace <>
69 | ACCESS_KEY="<>"
70 | # AWS Secret Access Key: replace <>
71 | SECRET_ACCESS_KEY="<>"
72 | # AWS Default Region : replace <>
73 | REGION="<>"
74 | # The Bucket Name: replace <>
75 | BUCKET_NAME="<>"
76 | # The File Key: replace <> (remember, this model needs an image as input)
77 | FILE_KEY="<>"
78 | # With the info about the model (identifier) and the model version (version string, input/output keys), you are ready to
79 | # submit the job. Just prepare the source dictionary:
80 | sources = {"source-key": {"image": {'bucket': BUCKET_NAME, 'key': FILE_KEY}}}
81 | # An inference job groups input data sent to a model. You can send any amount of inputs to
82 | # process and you can identify and refer to a specific input by the key assigned. For example we can add:
83 | sources["second-key"] = {"image": {'bucket': BUCKET_NAME, 'key': FILE_KEY}}
84 | sources["another-key"] = {"image": {'bucket': BUCKET_NAME, 'key': FILE_KEY}}
85 | # If you send a wrong input key, the model fails to process the input.
86 | sources["wrong-key"] = {"a.wrong.key": {'bucket': BUCKET_NAME, 'key': FILE_KEY}}
87 | # If you send a correct input key, but a wrong AWS S3 value key, the model fails to process the input.
88 | sources["wrong-value"] = {"image": {'bucket': BUCKET_NAME, 'key': "wrong-aws-file-key.png"}}
89 | # When you have all your inputs ready, you can use our helper method to submit the job as follows:
90 | job = client.jobs.submit_aws_s3(model.modelId, modelVersion.version, sources, ACCESS_KEY, SECRET_ACCESS_KEY, "us-west-2")
91 | # Modzy creates the job and queue for processing. The job object contains all the info that you need to keep track
92 | # of the process, the most important being the job identifier and the job status.
93 | logger.info("job: %s", job)
94 | # The job moves to SUBMITTED, meaning that Modzy acknowledged the job and sent it to the queue to be processed.
95 | # We provide a helper method to listen until the job finishes processing. Its a good practice to set a max timeout
96 | # if you're doing a test (ie: 2*status+run). Otherwise, if the timeout is set to None, it listens until the job
97 | # finishes and moves to COMPLETED, CANCELED, or TIMEOUT.
98 | job.block_until_complete(timeout=None)
99 |
100 | # Get the results:
101 | # Check the status of the job. Jobs may be canceled or may reach a timeout.
102 | if job.status == Jobs.status.COMPLETED:
103 | # A completed job means that all the inputs were processed by the model. Check the results for each
104 | # input key provided in the source dictionary to see the model output.
105 | result = job.get_result()
106 | # The results object has some useful info:
107 | logger.info("Result: finished: {}, total: {}, completed: {}, failed: {}"
108 | .format(result.finished, result.total, result.completed, result.failed))
109 | # Notice that we are iterating through the same input source keys
110 | for key in sources:
111 | # The results object has the individual results of each job input. In this case the output key is called
112 | # results.json, so we can get the results as follows:
113 | try:
114 | model_res = result.get_source_outputs(key)['results.json']
115 | # The output for this model comes in a JSON format, so we can directly log the model results:
116 | logger.info(
117 | " {}: ".format(key) + ", ".join("{}: {}".format(key, value) for key, value in model_res.items()))
118 | except ResultsError as failure:
119 | # If the model raises an error, we can get the specific error message:
120 | logger.warning(" {}: failure: {}".format(key, failure));
121 |
122 | else:
123 | logger.warning("The job ends with status {}".format(job))
124 |
--------------------------------------------------------------------------------
/modzy/_util.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import pathlib
3 | import time
4 | from base64 import b64encode
5 |
6 | from requests.adapters import HTTPAdapter
7 | from requests.packages.urllib3.util.retry import Retry
8 |
9 | from .error import InternalServerError, NetworkError
10 |
11 |
12 | def encode_data_uri(bytes_like, mimetype='application/octet-stream'):
13 | encoded = b64encode(bytes_like).decode('ascii')
14 | data_uri = 'data:{};base64,{}'.format(mimetype, encoded)
15 | return data_uri
16 |
17 |
18 | def file_to_bytes(file_like):
19 | if hasattr(file_like, 'read'): # File-like object
20 | if hasattr(file_like, 'seekable') and file_like.seekable():
21 | file_like.seek(0)
22 | maybe_bytes = file_like.read()
23 | if not isinstance(maybe_bytes, bytes):
24 | raise TypeError("the file object's 'read' function must return bytes not {}; "
25 | "files should be opened using binary mode 'rb'"
26 | .format(type(maybe_bytes).__name__))
27 | return maybe_bytes
28 |
29 | # should we just pass the object directly to `open` instead of trying to find the path ourselves?
30 | # would break pathlib.Path support on Python<3.6, but would be consistent with the Python version...
31 | if hasattr(file_like, '__fspath__'): # os.PathLike
32 | path = file_like.__fspath__()
33 | elif isinstance(file_like, pathlib.Path): # Python 3.4-3.5
34 | path = str(file_like)
35 | else:
36 | path = file_like
37 |
38 | with open(path, 'rb') as file:
39 | return file.read()
40 |
41 |
42 | def file_to_chunks(file_like, chunk_size):
43 | file = None
44 | should_close = False
45 | if not hasattr(file_like, 'read'):
46 | if hasattr(file_like, '__fspath__'): # os.PathLike
47 | path = file_like.__fspath__()
48 | elif isinstance(file_like, pathlib.Path): # Python 3.4-3.5
49 | path = str(file_like)
50 | else:
51 | path = file_like
52 | file = open(path, 'rb')
53 | should_close = True
54 | else:
55 | file = file_like
56 |
57 | if hasattr(file, 'seekable') and file.seekable():
58 | file.seek(0)
59 |
60 | while True:
61 | chunk = file.read(chunk_size)
62 | if not chunk:
63 | break
64 | elif not isinstance(chunk, bytes):
65 | raise TypeError("the file object's 'read' function must return bytes not {}; "
66 | "files should be opened using binary mode 'rb'"
67 | .format(type(chunk).__name__))
68 | else:
69 | yield chunk
70 |
71 | if should_close:
72 | file.close()
73 |
74 |
75 | def bytes_to_chunks(byte_array, chunk_size):
76 | for i in range(0, len(byte_array), chunk_size):
77 | yield byte_array[i:i + chunk_size]
78 |
79 |
80 | def depth(d):
81 | if d and isinstance(d, dict):
82 | return max(depth(v) for k, v in d.items()) + 1
83 | return 0
84 |
85 |
86 | '''
87 | Model Deployment (models.deploy()) specific utilities
88 | '''
89 |
90 |
91 | def load_model(client, logger, identifier, version):
92 | start = time.time()
93 | # Before loading the model we need to ensure that it has been pulled.
94 | percentage = -1
95 | while percentage < 100:
96 | try:
97 | res = client.http.get(f"/models/{identifier}/versions/{version}/container-image")
98 | new_percentage = res.get("percentage")
99 | except NetworkError:
100 | continue
101 | except InternalServerError:
102 | continue
103 |
104 | if new_percentage != percentage:
105 | logger.info(f'Loading model at {new_percentage}%')
106 | print(f'Loading model at {new_percentage}%')
107 | percentage = new_percentage
108 |
109 | time.sleep(1)
110 |
111 | retry_strategy = Retry(
112 | total=10,
113 | backoff_factor=0.3,
114 | status_forcelist=[400],
115 | allowed_methods=frozenset(['POST']),
116 | )
117 | adapter = HTTPAdapter(max_retries=retry_strategy)
118 | client.http.session.mount('https://', adapter)
119 |
120 | try:
121 | res = client.http.post(f"/models/{identifier}/versions/{version}/load-process")
122 | except NetworkError:
123 | return
124 | except InternalServerError:
125 | return
126 |
127 | logger.info(f'Loading container image took [{1000 * (time.time() - start)} ms]')
128 |
129 |
130 | def upload_input_example(client, logger, identifier, version, model_data_metadata, input_sample_path):
131 | start = time.time()
132 |
133 | input_filename = model_data_metadata['inputs'][0]['name']
134 | files = {'file': open(input_sample_path, 'rb')}
135 | params = {'name': input_filename}
136 | res = client.http.post(f"/models/{identifier}/versions/{version}/testInput", params=params, file_data=files)
137 |
138 | logger.info(f'Uploading sample input took [{1000 * (time.time() - start)} ms]')
139 |
140 |
141 | def run_model(client, logger, identifier, version):
142 | start = time.time()
143 | res = client.http.post(f"/models/{identifier}/versions/{version}/run-process")
144 |
145 | percentage = -1
146 | while percentage < 100:
147 | try:
148 | res = client.http.get(f"/models/{identifier}/versions/{version}/run-process")
149 | new_percentage = res.get('percentage')
150 | except NetworkError:
151 | continue
152 |
153 | if new_percentage != percentage:
154 | logger.info(f'Running model at {new_percentage}%')
155 | print(f'Running model at {new_percentage}%')
156 | percentage = new_percentage
157 |
158 | time.sleep(1)
159 |
160 | test_output = res['result']
161 | # perform validation check on test_output and raise error if error exists
162 | if test_output["status"] == "FAILED":
163 | raise ValueError(f'Sample inference test failed with error {test_output["error"]}. Check model container and try again.')
164 |
165 | sample_input = {'input': {'accessKeyID': '',
166 | 'region': '',
167 | 'secretAccessKey': '',
168 | 'sources': {'0001': {'input': {'bucket': '',
169 | 'key': '/path/to/s3/input'}}},
170 | 'type': 'aws-s3'},
171 | 'model': {'identifier': identifier, 'version': version}
172 | }
173 |
174 | formatted_sample_output = {'jobIdentifier': '',
175 | 'total': '',
176 | 'completed': '',
177 | 'failed': '',
178 | 'finished': '',
179 | 'submittedByKey': '',
180 | 'results': {'': {'model': None,
181 | 'userIdentifier': None,
182 | 'status': test_output['status'],
183 | 'engine': test_output['engine'],
184 | 'error': test_output['error'],
185 | 'startTime': test_output['startTime'],
186 | 'endTime': test_output['endTime'],
187 | 'updateTime': test_output['updateTime'],
188 | 'inputSize': test_output['inputSize'],
189 | 'accessKey': None,
190 | 'teamIdentifier': None,
191 | 'accountIdentifier': None,
192 | 'timeMeters': None,
193 | 'datasourceCompletedTime': None,
194 | 'elapsedTime': test_output['elapsedTime'],
195 | 'results.json': test_output['results.json']}
196 | }
197 | }
198 |
199 | sample_input_res = client.http.put(f"/models/{identifier}/versions/{version}/sample-input", json_data=sample_input)
200 | sample_output_res = client.http.put(f"/models/{identifier}/versions/{version}/sample-output", json_data=formatted_sample_output)
201 |
202 | logger.info(f'Inference test took [{1000 * (time.time() - start)} ms]')
203 |
204 |
205 | def deploy_model(client, logger, identifier, version):
206 | start = time.time()
207 | status = {'status': 'active'}
208 |
209 | res = client.http.patch(f"/models/{identifier}/versions/{version}", status)
210 |
211 | logger.info(f'Model Deployment took [{1000 * (time.time() - start)} ms]')
212 |
--------------------------------------------------------------------------------
/modzy/results.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """Classes for interacting with results."""
3 |
4 | import logging
5 | import time
6 |
7 | from ._api_object import ApiObject
8 | from .error import NotFoundError, ResultsError, Timeout
9 |
10 |
11 | class Results:
12 | """The `Results` object.
13 |
14 | This object is used to retreive information about results from the API.
15 |
16 | Note:
17 | This class should not be instantiated directly but rather accessed through the `results`
18 | attribute of an `ApiClient` instance.
19 | """
20 |
21 | _base_route = '/results'
22 |
23 | def __init__(self, api_client):
24 | """Creates a `Results` instance.
25 |
26 | Args:
27 | api_client (ApiClient): An `ApiClient` instance.
28 | """
29 | self._api_client = api_client
30 | self.logger = logging.getLogger(__name__)
31 |
32 | def get(self, result):
33 | """Gets a `Result` instance.
34 |
35 | Args:
36 | job (Union[str, Job, Result]): The job identifier or a `Job` or a `Job` or `Result` instance.
37 |
38 | Returns:
39 | Result: The `Result` instance.
40 |
41 | Raises:
42 | ApiError: A subclass of ApiError will be raised if the API returns an error status,
43 | or the client is unable to connect.
44 | """
45 | identifier = Result._coerce_identifier(result)
46 | self.logger.debug("getting results %s", result)
47 | json_obj = self._api_client.http.get('{}/{}'.format(self._base_route, identifier))
48 | return Result(json_obj, self._api_client)
49 |
50 | def block_until_complete(self, result, timeout=60, poll_interval=5):
51 | """Blocks until the `Result` completes or a timeout is reached.
52 |
53 | This is accomplished by polling the API until the `Result` is marked finished. This may mean
54 | that the underlying `Job` was completed or canceled.
55 |
56 | Args:
57 | job (Union[str, Job, Result]): The job identifier or a `Job` or a `Job` or `Result` instance.
58 | timeout (Optional[float]): Seconds to wait until timeout. `None` indicates wait forever.
59 | Defaults to 60.
60 | poll_interval (Optional[float]): Seconds between polls. Defaults to 1.
61 |
62 | Returns:
63 | Result: The `Result` instance.
64 |
65 | Raises:
66 | Timeout: The `Result` did not complete before the timeout was reached.
67 | ApiError: A subclass of ApiError will be raised if the API returns an error status,
68 | or the client is unable to connect.
69 | """
70 | identifier = Result._coerce_identifier(result)
71 | endby = time.time() + timeout if (timeout is not None) else None
72 | ignore404 = False
73 | while True: # poll at least once
74 | try:
75 | result = self.get(identifier)
76 | self.logger.debug("result %s", result)
77 | except NotFoundError:
78 | # work around 404 for recently accepted jobs
79 | if not ignore404:
80 | self._api_client.jobs.get(identifier) # this didn't error so job must exist
81 | # TODO: short-circuit on job cancelation
82 | ignore404 = True
83 | else:
84 | if result.finished: # this covers CANCELED/COMPLETED
85 | return result
86 | if (endby is not None) and (time.time() > endby - poll_interval):
87 | raise Timeout('timed out before completion')
88 | self.logger.debug("waiting... %d", poll_interval)
89 | time.sleep(poll_interval)
90 | # TODO: should probably ramp up poll_interval as wait time increases
91 |
92 |
93 | class Result(ApiObject):
94 | """A result object.
95 |
96 | This object contains a parsed copy of the information returned from the server about a certain result.
97 |
98 | Attributes:
99 | job_identifier (str): The job identifier.
100 |
101 | Note:
102 | This class should not be instantiated directly. Instead, it is returned by various package
103 | functions.
104 |
105 | This object is a `dict` subclass that also supports attribute access. Information can be
106 | accessed through dotted attribute notation using "snake_case" or the original "camelCase" JSON
107 | key name (``result.job_identifier`` or ``result.jobIdentifier``). Alternatively, the original
108 | "camelCase" JSON key can be used with bracketed key access notation (``result['jobIdentifier']``).
109 | """
110 |
111 | @classmethod
112 | def _coerce_identifier(cls, maybe_result):
113 | identifier = getattr(maybe_result, 'job_identifier', maybe_result)
114 | if not isinstance(identifier, str):
115 | raise TypeError('the identifier must be {} or str, not {}'
116 | .format(cls.__name__, type(maybe_result).__name__))
117 | return identifier
118 |
119 | def sync(self):
120 | """Updates the `Result` instance's data in-place with new data from the API.
121 |
122 | Returns:
123 | Result: The `Result` instance (self).
124 |
125 | Raises:
126 | ApiError: A subclass of ApiError will be raised if the API returns an error status,
127 | or the client is unable to connect.
128 | """
129 | updated = self._api_client.results.get(self.job_identifier)
130 | self.update(updated) # is updating in place a bad idea?
131 | return self
132 |
133 | def block_until_complete(self, timeout=60, poll_interval=5):
134 | """Block until the `Result` completes or a timeout is reached.
135 |
136 | This is accomplished by polling the API until the `Result` is marked finished. This may mean
137 | that the underlying `Job` was completed or canceled.
138 |
139 | Args:
140 | timeout (Optional[float]): Seconds to wait until timeout. `None` indicates wait forever.
141 | Defaults to 60.
142 | poll_interval (Optional[float]): Seconds between polls. Defaults to 1.
143 |
144 | Returns:
145 | Result: The `Result` instance (self).
146 |
147 | Raises:
148 | Timeout: The `Result` did not complete before the timeout was reached.
149 | ApiError: A subclass of ApiError will be raised if the API returns an error status,
150 | or the client is unable to connect.
151 | """
152 | # once we can tell if a source exists or not vs isn't finished from the results API
153 | # we should provide a way to block until a specific source finishes
154 | if self.finished:
155 | return self
156 |
157 | updated = self._api_client.results.block_until_complete(self.job_identifier,
158 | poll_interval=poll_interval, timeout=timeout)
159 | self.update(updated) # is updating in place a bad idea?
160 | return self
161 |
162 | def get_source_outputs(self, source_name):
163 | """Gets the model outputs for a given source.
164 |
165 | Args:
166 | source_name (str): The source name.
167 |
168 | Returns:
169 | dict: A `dict` mapping the output's filenames to JSON parsed data.
170 |
171 | Raises:
172 | ResultsError: The results for this source indicate a model failure.
173 | KeyError: The source name was not found.
174 | """
175 | try:
176 | source = self.results[source_name]
177 | if source_name in source: # deal with legacy double nesting of source source_name
178 | source = source[source_name]
179 | return source
180 | except (KeyError, AttributeError):
181 | pass
182 |
183 | try:
184 | source = self.failures[source_name]
185 | if source_name in source: # deal with legacy double nesting of source source_name
186 | source = source[source_name]
187 | raise ResultsError(source.error)
188 | except (KeyError, AttributeError):
189 | pass
190 |
191 | # TODO: can we give a better error message if job canceled?
192 | raise KeyError(source_name)
193 |
194 | def get_first_outputs(self):
195 | """Gets the first or only outputs found in this result.
196 |
197 | This is useful for retrieving the outputs after submitting a job with only a single
198 | input source where you do not know or care about the source name. For example::
199 |
200 | job = client.jobs.submit_files('my-model', '1.0.0', {'input': './my-file.dat'})
201 | result = client.results.block_until_complete(job)
202 | outputs = result.get_first_outputs()
203 | output_data = outputs['output-file.dat']
204 |
205 | Args:
206 | source_name (str): The source name.
207 |
208 | Returns:
209 | dict: A `dict` mapping the output's filenames to JSON parsed data.
210 |
211 | Raises:
212 | ResultsError: The results for this source indicate a model failure.
213 | KeyError: No sources have completed.
214 | """
215 | source_name = self._get_first_source_name()
216 | return self.get_source_outputs(source_name)
217 |
218 | def _get_first_source_name(self):
219 | try:
220 | return next(iter(self.results))
221 | except (StopIteration, AttributeError):
222 | pass
223 |
224 | try:
225 | return next(iter(self.failures))
226 | except (StopIteration, AttributeError):
227 | pass
228 |
229 | # TODO: can we give a better error message if job canceled?
230 | raise KeyError('no source outputs available, wait for job to complete')
231 |
232 | def __str__(self):
233 | return "Result(job_identifier='{}',finished='{}')".format(self.job_identifier, self.finished)
234 |
--------------------------------------------------------------------------------
/modzy/edge/client.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | import grpc
4 |
5 | from .inferences import EdgeInferenceClient
6 | from .jobs import EdgeJobsClient
7 |
8 |
9 | class EdgeClient:
10 | """The Edge API client object.
11 |
12 | This class is used to interact with the Modzy Edge API.
13 |
14 | Attributes:
15 | host (str): The host for the Modzy Edge API.
16 | port (int): The port on which Modzy Edge is listening.
17 | """
18 |
19 | def __init__(self, host, port):
20 | """Creates an `ApiClient` instance.
21 |
22 | Args:
23 | host (str): The host for the API.
24 | port (int): Port for the API.
25 | """
26 | self.logger = logging.getLogger(__name__)
27 | self.host = host
28 | self.port = port
29 | self.origin = '{}:{}'.format(self.host, self.port)
30 | self._channel = None
31 | self.jobs: EdgeJobsClient | None = None
32 | self.inferences: EdgeInferenceClient | None = None
33 |
34 | def connect(self):
35 | if self._channel is None:
36 | self._channel = grpc.insecure_channel(self.origin)
37 | self.jobs = EdgeJobsClient(self._channel, origin=self.origin)
38 | self.inferences = EdgeInferenceClient(self._channel, origin=self.origin)
39 | return self
40 |
41 | def close(self):
42 | self._channel.close()
43 |
44 | def __enter__(self):
45 | return self.connect()
46 |
47 | def __exit__(self, exc_type, exc_val, exc_tb):
48 | return self.close()
49 |
50 | def submit_embedded(self, identifier, version, sources, explain=False):
51 | """Submits a job containing embedded data.
52 |
53 | Args:
54 | identifier (str): The model identifier.
55 | version (str): The model version string.
56 | sources (dict): A mapping of source names to text sources. Each source should be a
57 | mapping of model input filename to filepath or file-like object.
58 | explain (bool): indicates if you desire an explainable result for your model.`
59 |
60 | Returns:
61 | str: Job identifier returned by Modzy Edge.
62 |
63 | Raises:
64 | ApiError: An ApiError will be raised if the API returns an error status,
65 | or the client is unable to connect.
66 |
67 | Example:
68 | .. code-block::
69 |
70 | job = client.submit_embedded('model-identifier', '1.2.3',
71 | {
72 | 'source-name-1': {
73 | 'model-input-name-1': b'some bytes',
74 | 'model-input-name-2': bytearray([1,2,3,4]),
75 | },
76 | 'source-name-2': {
77 | 'model-input-name-1': b'some bytes',
78 | 'model-input-name-2': bytearray([1,2,3,4]),
79 | }
80 | })
81 |
82 | """
83 | self.logger.warning("Deprecated. Use EdgeClient.jobs.submit_embedded().")
84 | self.connect()
85 | return self.jobs.submit_embedded(identifier, version, sources, explain)
86 |
87 | def submit_text(self, identifier, version, sources, explain=False):
88 | """Submits text data for a multiple source `Job`.
89 |
90 | Args:
91 | identifier (str): The model identifier.
92 | version (str): The model version string.
93 | sources (dict): A mapping of source names to text sources. Each source should be a
94 | mapping of model input filename to filepath or file-like object.
95 | explain (bool): indicates if you desire an explainable result for your model.`
96 |
97 | Returns:
98 | str: Job identifier returned by Modzy Edge.
99 |
100 | Raises:
101 | ApiError: An ApiError will be raised if the API returns an error status,
102 | or the client is unable to connect.
103 |
104 | Example:
105 | .. code-block::
106 |
107 | job = client.submit_text('model-identifier', '1.2.3',
108 | {
109 | 'source-name-1': {
110 | 'model-input-name-1': 'some text',
111 | 'model-input-name-2': 'some more text',
112 | },
113 | 'source-name-2': {
114 | 'model-input-name-1': 'some text 2',
115 | 'model-input-name-2': 'some more text 2',
116 | }
117 | })
118 |
119 | """
120 | self.logger.warning("Deprecated. Use EdgeClient.jobs.submit_text().")
121 | self.connect()
122 | return self.jobs.submit_text(identifier, version, sources, explain)
123 |
124 | def submit_aws_s3(self, identifier, version, sources, access_key_id, secret_access_key, region, explain=False):
125 | """Submits AwS S3 hosted data for a multiple source `Job`.
126 |
127 | Args:
128 | identifier (str): The model identifier or a `Model` instance.
129 | version (str): The model version string.
130 | sources (dict): A mapping of source names to text sources. Each source should be a
131 | mapping of model input filename to S3 bucket and key.
132 | access_key_id (str): The AWS Access Key ID.
133 | secret_access_key (str): The AWS Secret Access Key.
134 | region (str): The AWS Region.
135 | explain (bool): indicates if you desire an explainable result for your model.`
136 |
137 | Returns:
138 | str: Job identifier returned by Modzy Edge.
139 |
140 | Raises:
141 | ApiError: An ApiError will be raised if the API returns an error status,
142 | or the client is unable to connect.
143 |
144 | Example:
145 | .. code-block::
146 |
147 | job = client.submit_aws_s3('model-identifier', '1.2.3',
148 | {
149 | 'source-name-1': {
150 | 'model-input-name-1': {
151 | 'bucket': 'my-bucket',
152 | 'key': '/my/data/file-1.dat'
153 | },
154 | 'model-input-name-2': {
155 | 'bucket': 'my-bucket',
156 | 'key': '/my/data/file-2.dat'
157 | }
158 | },
159 | 'source-name-2': {
160 | 'model-input-name-1': {
161 | 'bucket': 'my-bucket',
162 | 'key': '/my/data/file-3.dat'
163 | },
164 | 'model-input-name-2': {
165 | 'bucket': 'my-bucket',
166 | 'key': '/my/data/file-4.dat'
167 | }
168 | }
169 | },
170 | access_key_id='AWS_ACCESS_KEY_ID',
171 | secret_access_key='AWS_SECRET_ACCESS_KEY',
172 | region='us-east-1',
173 | )
174 | """
175 | self.logger.warning("Deprecated. Use EdgeClient.jobs.submit_aws_s3().")
176 | self.connect()
177 | return self.jobs.submit_aws_s3(identifier, version, sources, access_key_id, secret_access_key, region, explain)
178 |
179 | def get_job_details(self, job_identifier):
180 | """Get job details.
181 |
182 | Args:
183 | job_identifier (str): The job identifier.
184 |
185 | Returns:
186 | dict: Details for requested job.
187 |
188 | Raises:
189 | ApiError: An ApiError will be raised if the API returns an error status,
190 | or the client is unable to connect.
191 | """
192 | self.logger.warning("Deprecated. Use EdgeClient.jobs.get_job_details().")
193 | self.connect()
194 | return self.jobs.get_job_details(job_identifier)
195 |
196 | def get_all_job_details(self, timeout=None):
197 | """Get job details for all jobs.
198 |
199 | Args:
200 | timeout (int): Optional timeout value in seconds.
201 |
202 | Returns:
203 | dict: Details for all jobs that have been run.
204 |
205 | Raises:
206 | ApiError: An ApiError will be raised if the API returns an error status,
207 | or the client is unable to connect.
208 | """
209 | self.logger.warning("Deprecated. Use EdgeClient.jobs.get_all_job_details().")
210 | self.connect()
211 | return self.jobs.get_all_job_details(timeout)
212 |
213 | def block_until_complete(self, job_identifier, poll_interval=0.01, timeout=30):
214 | """Block until job complete.
215 |
216 | Args:
217 | job_identifier (str): The job identifier.
218 |
219 | Returns:
220 | dict: Final job details.
221 |
222 | Raises:
223 | ApiError: An ApiError will be raised if the API returns an error status,
224 | or the client is unable to connect.
225 | """
226 | self.logger.warning("Deprecated. Use EdgeClient.jobs.block_until_complete().")
227 | self.connect()
228 | self.jobs.block_until_complete(job_identifier, poll_interval, timeout)
229 |
230 | def get_results(self, job_identifier):
231 | """Block until job complete.
232 |
233 | Args:
234 | job_identifier (str): The job identifier.
235 |
236 | Returns:
237 | dict: Results for the requested job.
238 |
239 | Raises:
240 | ApiError: An ApiError will be raised if the API returns an error status,
241 | or the client is unable to connect.
242 | """
243 | self.logger.warning("Deprecated. Use EdgeClient.jobs.get_results().")
244 | self.connect()
245 | return self.jobs.get_results(job_identifier)
246 |
--------------------------------------------------------------------------------
/modzy/edge/proto/inferences/api/v1/inferences_pb2.pyi:
--------------------------------------------------------------------------------
1 | from google.protobuf import timestamp_pb2 as _timestamp_pb2
2 | from google.protobuf import duration_pb2 as _duration_pb2
3 | from google.api import annotations_pb2 as _annotations_pb2
4 | from google.api import field_behavior_pb2 as _field_behavior_pb2
5 | from ....protoc_gen_openapiv2.options import annotations_pb2 as _annotations_pb2_1
6 | from ....common.v1 import common_pb2 as _common_pb2
7 | from ....common.v1 import errors_pb2 as _errors_pb2
8 | from google.protobuf.internal import containers as _containers
9 | from google.protobuf.internal import enum_type_wrapper as _enum_type_wrapper
10 | from google.protobuf import descriptor as _descriptor
11 | from google.protobuf import message as _message
12 | from typing import ClassVar as _ClassVar, Iterable as _Iterable, Mapping as _Mapping, Optional as _Optional, Union as _Union
13 |
14 | CANCELED: InferenceStatus
15 | COMPLETE: InferenceStatus
16 | DESCRIPTOR: _descriptor.FileDescriptor
17 | FAILED: InferenceStatus
18 | FETCHING: InferenceStatus
19 | IN_PROGRESS: InferenceStatus
20 | QUEUED: InferenceStatus
21 | UNKNOWN_INFERENCE_STATUS: InferenceStatus
22 |
23 | class AzureInputSource(_message.Message):
24 | __slots__ = ["container", "path", "storage_account", "storage_account_key"]
25 | CONTAINER_FIELD_NUMBER: _ClassVar[int]
26 | PATH_FIELD_NUMBER: _ClassVar[int]
27 | STORAGE_ACCOUNT_FIELD_NUMBER: _ClassVar[int]
28 | STORAGE_ACCOUNT_KEY_FIELD_NUMBER: _ClassVar[int]
29 | container: str
30 | path: str
31 | storage_account: str
32 | storage_account_key: str
33 | def __init__(self, storage_account: _Optional[str] = ..., storage_account_key: _Optional[str] = ..., container: _Optional[str] = ..., path: _Optional[str] = ...) -> None: ...
34 |
35 | class EmbeddedInputSource(_message.Message):
36 | __slots__ = ["data", "name"]
37 | DATA_FIELD_NUMBER: _ClassVar[int]
38 | NAME_FIELD_NUMBER: _ClassVar[int]
39 | data: bytes
40 | name: str
41 | def __init__(self, name: _Optional[str] = ..., data: _Optional[bytes] = ...) -> None: ...
42 |
43 | class Inference(_message.Message):
44 | __slots__ = ["completed_at", "elapsed_time", "explain", "identifier", "inputs", "model", "result", "status", "submitted_at", "tags"]
45 | class TagsEntry(_message.Message):
46 | __slots__ = ["key", "value"]
47 | KEY_FIELD_NUMBER: _ClassVar[int]
48 | VALUE_FIELD_NUMBER: _ClassVar[int]
49 | key: str
50 | value: str
51 | def __init__(self, key: _Optional[str] = ..., value: _Optional[str] = ...) -> None: ...
52 | COMPLETED_AT_FIELD_NUMBER: _ClassVar[int]
53 | ELAPSED_TIME_FIELD_NUMBER: _ClassVar[int]
54 | EXPLAIN_FIELD_NUMBER: _ClassVar[int]
55 | IDENTIFIER_FIELD_NUMBER: _ClassVar[int]
56 | INPUTS_FIELD_NUMBER: _ClassVar[int]
57 | MODEL_FIELD_NUMBER: _ClassVar[int]
58 | RESULT_FIELD_NUMBER: _ClassVar[int]
59 | STATUS_FIELD_NUMBER: _ClassVar[int]
60 | SUBMITTED_AT_FIELD_NUMBER: _ClassVar[int]
61 | TAGS_FIELD_NUMBER: _ClassVar[int]
62 | completed_at: _timestamp_pb2.Timestamp
63 | elapsed_time: _duration_pb2.Duration
64 | explain: bool
65 | identifier: str
66 | inputs: _containers.RepeatedCompositeFieldContainer[RedactedInputSource]
67 | model: _common_pb2.ModelIdentifier
68 | result: InferenceResult
69 | status: InferenceStatus
70 | submitted_at: _timestamp_pb2.Timestamp
71 | tags: _containers.ScalarMap[str, str]
72 | def __init__(self, identifier: _Optional[str] = ..., model: _Optional[_Union[_common_pb2.ModelIdentifier, _Mapping]] = ..., inputs: _Optional[_Iterable[_Union[RedactedInputSource, _Mapping]]] = ..., tags: _Optional[_Mapping[str, str]] = ..., explain: bool = ..., status: _Optional[_Union[InferenceStatus, str]] = ..., submitted_at: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., completed_at: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., elapsed_time: _Optional[_Union[_duration_pb2.Duration, _Mapping]] = ..., result: _Optional[_Union[InferenceResult, _Mapping]] = ...) -> None: ...
73 |
74 | class InferenceIdentifier(_message.Message):
75 | __slots__ = ["identifier"]
76 | IDENTIFIER_FIELD_NUMBER: _ClassVar[int]
77 | identifier: str
78 | def __init__(self, identifier: _Optional[str] = ...) -> None: ...
79 |
80 | class InferenceRequest(_message.Message):
81 | __slots__ = ["explain", "inputs", "model", "tags"]
82 | class TagsEntry(_message.Message):
83 | __slots__ = ["key", "value"]
84 | KEY_FIELD_NUMBER: _ClassVar[int]
85 | VALUE_FIELD_NUMBER: _ClassVar[int]
86 | key: str
87 | value: str
88 | def __init__(self, key: _Optional[str] = ..., value: _Optional[str] = ...) -> None: ...
89 | EXPLAIN_FIELD_NUMBER: _ClassVar[int]
90 | INPUTS_FIELD_NUMBER: _ClassVar[int]
91 | MODEL_FIELD_NUMBER: _ClassVar[int]
92 | TAGS_FIELD_NUMBER: _ClassVar[int]
93 | explain: bool
94 | inputs: _containers.RepeatedCompositeFieldContainer[InputSource]
95 | model: _common_pb2.ModelIdentifier
96 | tags: _containers.ScalarMap[str, str]
97 | def __init__(self, model: _Optional[_Union[_common_pb2.ModelIdentifier, _Mapping]] = ..., inputs: _Optional[_Iterable[_Union[InputSource, _Mapping]]] = ..., tags: _Optional[_Mapping[str, str]] = ..., explain: bool = ...) -> None: ...
98 |
99 | class InferenceResult(_message.Message):
100 | __slots__ = ["outputs"]
101 | class Output(_message.Message):
102 | __slots__ = ["content_type", "data"]
103 | CONTENT_TYPE_FIELD_NUMBER: _ClassVar[int]
104 | DATA_FIELD_NUMBER: _ClassVar[int]
105 | content_type: str
106 | data: bytes
107 | def __init__(self, data: _Optional[bytes] = ..., content_type: _Optional[str] = ...) -> None: ...
108 | class OutputsEntry(_message.Message):
109 | __slots__ = ["key", "value"]
110 | KEY_FIELD_NUMBER: _ClassVar[int]
111 | VALUE_FIELD_NUMBER: _ClassVar[int]
112 | key: str
113 | value: InferenceResult.Output
114 | def __init__(self, key: _Optional[str] = ..., value: _Optional[_Union[InferenceResult.Output, _Mapping]] = ...) -> None: ...
115 | OUTPUTS_FIELD_NUMBER: _ClassVar[int]
116 | outputs: _containers.MessageMap[str, InferenceResult.Output]
117 | def __init__(self, outputs: _Optional[_Mapping[str, InferenceResult.Output]] = ...) -> None: ...
118 |
119 | class InputSource(_message.Message):
120 | __slots__ = ["azure", "data", "key", "s3", "text"]
121 | AZURE_FIELD_NUMBER: _ClassVar[int]
122 | DATA_FIELD_NUMBER: _ClassVar[int]
123 | KEY_FIELD_NUMBER: _ClassVar[int]
124 | S3_FIELD_NUMBER: _ClassVar[int]
125 | TEXT_FIELD_NUMBER: _ClassVar[int]
126 | azure: AzureInputSource
127 | data: bytes
128 | key: str
129 | s3: S3InputSource
130 | text: str
131 | def __init__(self, key: _Optional[str] = ..., text: _Optional[str] = ..., data: _Optional[bytes] = ..., s3: _Optional[_Union[S3InputSource, _Mapping]] = ..., azure: _Optional[_Union[AzureInputSource, _Mapping]] = ...) -> None: ...
132 |
133 | class RedactedInputSource(_message.Message):
134 | __slots__ = ["azure", "data", "key", "s3", "text"]
135 | class AzureContentInfo(_message.Message):
136 | __slots__ = ["container", "path", "sha256_digest", "size_in_bytes", "storage_account"]
137 | CONTAINER_FIELD_NUMBER: _ClassVar[int]
138 | PATH_FIELD_NUMBER: _ClassVar[int]
139 | SHA256_DIGEST_FIELD_NUMBER: _ClassVar[int]
140 | SIZE_IN_BYTES_FIELD_NUMBER: _ClassVar[int]
141 | STORAGE_ACCOUNT_FIELD_NUMBER: _ClassVar[int]
142 | container: str
143 | path: str
144 | sha256_digest: str
145 | size_in_bytes: int
146 | storage_account: str
147 | def __init__(self, size_in_bytes: _Optional[int] = ..., sha256_digest: _Optional[str] = ..., storage_account: _Optional[str] = ..., container: _Optional[str] = ..., path: _Optional[str] = ...) -> None: ...
148 | class EmbeddedContentInfo(_message.Message):
149 | __slots__ = ["sha256_digest", "size_in_bytes"]
150 | SHA256_DIGEST_FIELD_NUMBER: _ClassVar[int]
151 | SIZE_IN_BYTES_FIELD_NUMBER: _ClassVar[int]
152 | sha256_digest: str
153 | size_in_bytes: int
154 | def __init__(self, size_in_bytes: _Optional[int] = ..., sha256_digest: _Optional[str] = ...) -> None: ...
155 | class S3ContentInfo(_message.Message):
156 | __slots__ = ["bucket", "endpoint", "path", "region", "sha256_digest", "size_in_bytes"]
157 | BUCKET_FIELD_NUMBER: _ClassVar[int]
158 | ENDPOINT_FIELD_NUMBER: _ClassVar[int]
159 | PATH_FIELD_NUMBER: _ClassVar[int]
160 | REGION_FIELD_NUMBER: _ClassVar[int]
161 | SHA256_DIGEST_FIELD_NUMBER: _ClassVar[int]
162 | SIZE_IN_BYTES_FIELD_NUMBER: _ClassVar[int]
163 | bucket: str
164 | endpoint: str
165 | path: str
166 | region: str
167 | sha256_digest: str
168 | size_in_bytes: int
169 | def __init__(self, size_in_bytes: _Optional[int] = ..., sha256_digest: _Optional[str] = ..., endpoint: _Optional[str] = ..., region: _Optional[str] = ..., bucket: _Optional[str] = ..., path: _Optional[str] = ...) -> None: ...
170 | AZURE_FIELD_NUMBER: _ClassVar[int]
171 | DATA_FIELD_NUMBER: _ClassVar[int]
172 | KEY_FIELD_NUMBER: _ClassVar[int]
173 | S3_FIELD_NUMBER: _ClassVar[int]
174 | TEXT_FIELD_NUMBER: _ClassVar[int]
175 | azure: RedactedInputSource.AzureContentInfo
176 | data: RedactedInputSource.EmbeddedContentInfo
177 | key: str
178 | s3: RedactedInputSource.S3ContentInfo
179 | text: RedactedInputSource.EmbeddedContentInfo
180 | def __init__(self, key: _Optional[str] = ..., text: _Optional[_Union[RedactedInputSource.EmbeddedContentInfo, _Mapping]] = ..., data: _Optional[_Union[RedactedInputSource.EmbeddedContentInfo, _Mapping]] = ..., s3: _Optional[_Union[RedactedInputSource.S3ContentInfo, _Mapping]] = ..., azure: _Optional[_Union[RedactedInputSource.AzureContentInfo, _Mapping]] = ...) -> None: ...
181 |
182 | class S3InputSource(_message.Message):
183 | __slots__ = ["access_key_id", "access_key_secret", "bucket", "endpoint", "path", "region"]
184 | ACCESS_KEY_ID_FIELD_NUMBER: _ClassVar[int]
185 | ACCESS_KEY_SECRET_FIELD_NUMBER: _ClassVar[int]
186 | BUCKET_FIELD_NUMBER: _ClassVar[int]
187 | ENDPOINT_FIELD_NUMBER: _ClassVar[int]
188 | PATH_FIELD_NUMBER: _ClassVar[int]
189 | REGION_FIELD_NUMBER: _ClassVar[int]
190 | access_key_id: str
191 | access_key_secret: str
192 | bucket: str
193 | endpoint: str
194 | path: str
195 | region: str
196 | def __init__(self, endpoint: _Optional[str] = ..., region: _Optional[str] = ..., bucket: _Optional[str] = ..., path: _Optional[str] = ..., access_key_id: _Optional[str] = ..., access_key_secret: _Optional[str] = ...) -> None: ...
197 |
198 | class TextInputSource(_message.Message):
199 | __slots__ = ["name", "text"]
200 | NAME_FIELD_NUMBER: _ClassVar[int]
201 | TEXT_FIELD_NUMBER: _ClassVar[int]
202 | name: str
203 | text: str
204 | def __init__(self, name: _Optional[str] = ..., text: _Optional[str] = ...) -> None: ...
205 |
206 | class InferenceStatus(int, metaclass=_enum_type_wrapper.EnumTypeWrapper):
207 | __slots__ = []
208 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/modzy/edge/jobs.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import time
3 |
4 | from google.protobuf.json_format import MessageToDict
5 | # NOTE: The following import line does work when the program runs
6 | # even though IntelliJ says it can't find "Struct" in the package.
7 | # This is due to the way that the Python files are generated from
8 | # the proto file in Protobuf 3.20.x+.
9 | from google.protobuf.struct_pb2 import Struct
10 | from grpc import Channel
11 | from grpc._channel import _InactiveRpcError
12 |
13 | from .proto.common.v1.common_pb2 import ModelIdentifier
14 | from .proto.jobs.v1.jobs_pb2 import JobFilter, JobIdentifier, JobInput, JobSubmission
15 | from .proto.jobs.v1.jobs_pb2_grpc import JobServiceStub
16 | from .proto.results.v1.results_pb2_grpc import ResultsServiceStub
17 | from .._util import depth, encode_data_uri
18 | from ..error import ApiError, Timeout
19 |
20 |
21 | def _fix_single_source_job(sources, s3=False):
22 | """Compatibility function to check and fix the sources parameter if is a single source dict
23 |
24 | Args:
25 | sources (dict): a single of double source dict
26 |
27 | Returns:
28 | dict: a properly formatted sources dictionary
29 |
30 | """
31 | dict_levels = depth(sources)
32 | if dict_levels == (1 + s3):
33 | return {'job': sources}
34 | else:
35 | return sources
36 |
37 |
38 | def _parse_inactive_rpc_error(inactive_rpc_error):
39 | """Parse relevant info from _InactiveRpcError.
40 |
41 | Args:
42 | inactive_rpc_error (_InactiveRpcError): Error to be parsed.
43 |
44 | Returns:
45 | str: the error string.
46 |
47 | """
48 | lines = str(inactive_rpc_error).splitlines()
49 | details_index = [lines.index(l) for l in lines if l.startswith('\tdetails')][0]
50 | details_message = lines[details_index].split('=')[1].strip().replace('"', '')
51 |
52 | return details_message
53 |
54 |
55 | class EdgeJobsClient:
56 |
57 | def __init__(self, channel: Channel, origin=""):
58 | self.logger = logging.getLogger(__name__)
59 | self.origin = origin
60 | self._channel = channel
61 | self._jobs_client = JobServiceStub(self._channel)
62 | self._results_client = ResultsServiceStub(self._channel)
63 |
64 | def submit_embedded(self, identifier, version, sources, explain=False):
65 | """Submits a job containing embedded data.
66 |
67 | Args:
68 | identifier (str): The model identifier.
69 | version (str): The model version string.
70 | sources (dict): A mapping of source names to text sources. Each source should be a
71 | mapping of model input filename to filepath or file-like object.
72 | explain (bool): indicates if you desire an explainable result for your model.`
73 |
74 | Returns:
75 | str: Job identifier returned by Modzy Edge.
76 |
77 | Raises:
78 | ApiError: An ApiError will be raised if the API returns an error status,
79 | or the client is unable to connect.
80 |
81 | Example:
82 | .. code-block::
83 |
84 | job = client.submit_embedded('model-identifier', '1.2.3',
85 | {
86 | 'source-name-1': {
87 | 'model-input-name-1': b'some bytes',
88 | 'model-input-name-2': bytearray([1,2,3,4]),
89 | },
90 | 'source-name-2': {
91 | 'model-input-name-1': b'some bytes',
92 | 'model-input-name-2': bytearray([1,2,3,4]),
93 | }
94 | })
95 |
96 | """
97 |
98 | sources = {
99 | source: {
100 | key: encode_data_uri(value)
101 | for key, value in inputs.items()
102 | }
103 | for source, inputs in _fix_single_source_job(sources).items()
104 | }
105 |
106 | sources_struct = Struct()
107 | for k, v in sources.items():
108 | sources_struct[k] = v
109 |
110 | job_input = JobInput(type="embedded", sources=sources_struct)
111 | model_identifier = ModelIdentifier(identifier=identifier, version=version)
112 | job_submission = JobSubmission(model=model_identifier, input=job_input, explain=explain)
113 |
114 | try:
115 | job_receipt = self._jobs_client.SubmitJob(job_submission)
116 | except _InactiveRpcError as e:
117 | raise ApiError(_parse_inactive_rpc_error(e), self.origin) from e
118 |
119 | return job_receipt.job_identifier
120 |
121 | def submit_text(self, identifier, version, sources, explain=False):
122 | """Submits text data for a multiple source `Job`.
123 |
124 | Args:
125 | identifier (str): The model identifier.
126 | version (str): The model version string.
127 | sources (dict): A mapping of source names to text sources. Each source should be a
128 | mapping of model input filename to filepath or file-like object.
129 | explain (bool): indicates if you desire an explainable result for your model.`
130 |
131 | Returns:
132 | str: Job identifier returned by Modzy Edge.
133 |
134 | Raises:
135 | ApiError: An ApiError will be raised if the API returns an error status,
136 | or the client is unable to connect.
137 |
138 | Example:
139 | .. code-block::
140 |
141 | job = client.submit_text('model-identifier', '1.2.3',
142 | {
143 | 'source-name-1': {
144 | 'model-input-name-1': 'some text',
145 | 'model-input-name-2': 'some more text',
146 | },
147 | 'source-name-2': {
148 | 'model-input-name-1': 'some text 2',
149 | 'model-input-name-2': 'some more text 2',
150 | }
151 | })
152 |
153 | """
154 | sources_struct = Struct()
155 | for k, v in _fix_single_source_job(sources).items():
156 | sources_struct[k] = v
157 |
158 | job_input = JobInput(type="text", sources=sources_struct)
159 | model_identifier = ModelIdentifier(identifier=identifier, version=version)
160 | job_submission = JobSubmission(model=model_identifier, input=job_input, explain=explain)
161 |
162 | try:
163 | job_receipt = self._jobs_client.SubmitJob(job_submission)
164 | except _InactiveRpcError as e:
165 | raise ApiError(_parse_inactive_rpc_error(e), self.origin) from e
166 |
167 | return job_receipt.job_identifier
168 |
169 | def submit_aws_s3(self, identifier, version, sources, access_key_id, secret_access_key, region, explain=False):
170 | """Submits AwS S3 hosted data for a multiple source `Job`.
171 |
172 | Args:
173 | identifier (str): The model identifier or a `Model` instance.
174 | version (str): The model version string.
175 | sources (dict): A mapping of source names to text sources. Each source should be a
176 | mapping of model input filename to S3 bucket and key.
177 | access_key_id (str): The AWS Access Key ID.
178 | secret_access_key (str): The AWS Secret Access Key.
179 | region (str): The AWS Region.
180 | explain (bool): indicates if you desire an explainable result for your model.`
181 |
182 | Returns:
183 | str: Job identifier returned by Modzy Edge.
184 |
185 | Raises:
186 | ApiError: An ApiError will be raised if the API returns an error status,
187 | or the client is unable to connect.
188 |
189 | Example:
190 | .. code-block::
191 |
192 | job = client.submit_aws_s3('model-identifier', '1.2.3',
193 | {
194 | 'source-name-1': {
195 | 'model-input-name-1': {
196 | 'bucket': 'my-bucket',
197 | 'key': '/my/data/file-1.dat'
198 | },
199 | 'model-input-name-2': {
200 | 'bucket': 'my-bucket',
201 | 'key': '/my/data/file-2.dat'
202 | }
203 | },
204 | 'source-name-2': {
205 | 'model-input-name-1': {
206 | 'bucket': 'my-bucket',
207 | 'key': '/my/data/file-3.dat'
208 | },
209 | 'model-input-name-2': {
210 | 'bucket': 'my-bucket',
211 | 'key': '/my/data/file-4.dat'
212 | }
213 | }
214 | },
215 | access_key_id='AWS_ACCESS_KEY_ID',
216 | secret_access_key='AWS_SECRET_ACCESS_KEY',
217 | region='us-east-1',
218 | )
219 | """
220 | sources_struct = Struct()
221 | for k, v in _fix_single_source_job(sources, s3=True).items():
222 | sources_struct[k] = v
223 |
224 | job_input = JobInput(type="aws-s3", accessKeyID=access_key_id, secretAccessKey=secret_access_key,
225 | region=region, sources=sources_struct)
226 |
227 | model_identifier = ModelIdentifier(identifier=identifier, version=version)
228 | job_submission = JobSubmission(model=model_identifier, input=job_input, explain=explain)
229 |
230 | try:
231 | job_receipt = self._jobs_client.SubmitJob(job_submission)
232 | except _InactiveRpcError as e:
233 | raise ApiError(_parse_inactive_rpc_error(e), self.origin) from e
234 |
235 | return job_receipt.job_identifier
236 |
237 | def get_job_details(self, job_identifier):
238 | """Get job details.
239 |
240 | Args:
241 | job_identifier (str): The job identifier.
242 |
243 | Returns:
244 | dict: Details for requested job.
245 |
246 | Raises:
247 | ApiError: An ApiError will be raised if the API returns an error status,
248 | or the client is unable to connect.
249 | """
250 | job_identifier = JobIdentifier(identifier=job_identifier)
251 |
252 | try:
253 | job_details = self._jobs_client.GetJob(job_identifier)
254 | except _InactiveRpcError as e:
255 | raise ApiError(_parse_inactive_rpc_error(e), self.origin) from e
256 |
257 | return MessageToDict(job_details)
258 |
259 | def get_all_job_details(self, timeout=None):
260 | """Get job details for all jobs.
261 |
262 | Args:
263 | timeout (int): Optional timeout value in seconds.
264 |
265 | Returns:
266 | dict: Details for all jobs that have been run.
267 |
268 | Raises:
269 | ApiError: An ApiError will be raised if the API returns an error status,
270 | or the client is unable to connect.
271 | """
272 | try:
273 | all_job_details = self._jobs_client.GetJobs(JobFilter(), timeout=timeout)
274 | except _InactiveRpcError as e:
275 | raise ApiError(_parse_inactive_rpc_error(e), self.origin) from e
276 |
277 | return MessageToDict(all_job_details)
278 |
279 | def block_until_complete(self, job_identifier, poll_interval=0.01, timeout=30):
280 | """Block until job complete.
281 |
282 | Args:
283 | job_identifier (str): The job identifier.
284 | poll_interval (int): Time interval in seconds between polls. Defaults to 0.01.
285 | timeout (int): Seconds amount to wait until timeout. `None` indicates waiting forever. Defaults to 30.
286 |
287 |
288 | Returns:
289 | dict: Final job details.
290 |
291 | Raises:
292 | ApiError: An ApiError will be raised if the API returns an error status,
293 | or the client is unable to connect.
294 | """
295 | endby = time.time() + timeout if (timeout is not None) else None
296 | while True:
297 | job_details = self.get_job_details(job_identifier)
298 | if job_details['status'] in {"COMPLETE", "CANCELLED", "FAILED"}:
299 | return job_details
300 | time.sleep(poll_interval)
301 | if (endby is not None) and (time.time() > endby - poll_interval):
302 | raise Timeout('timed out before completion')
303 |
304 | def get_results(self, job_identifier):
305 | """Block until job complete.
306 |
307 | Args:
308 | job_identifier (str): The job identifier.
309 |
310 | Returns:
311 | dict: Results for the requested job.
312 |
313 | Raises:
314 | ApiError: An ApiError will be raised if the API returns an error status,
315 | or the client is unable to connect.
316 | """
317 | job_identifier = JobIdentifier(identifier=job_identifier)
318 | try:
319 | results = self._results_client.GetResults(job_identifier)
320 | except _InactiveRpcError as e:
321 | raise ApiError(_parse_inactive_rpc_error(e), self.origin) from e
322 |
323 | return MessageToDict(results)
324 |
--------------------------------------------------------------------------------
/modzy/edge/proto/results/v1/results_pb2.pyi:
--------------------------------------------------------------------------------
1 | from google.protobuf import any_pb2 as _any_pb2
2 | from google.protobuf import empty_pb2 as _empty_pb2
3 | from google.protobuf import struct_pb2 as _struct_pb2
4 | from google.protobuf import timestamp_pb2 as _timestamp_pb2
5 | from google.api import annotations_pb2 as _annotations_pb2
6 | from google.api import field_behavior_pb2 as _field_behavior_pb2
7 | from ...protoc_gen_openapiv2.options import annotations_pb2 as _annotations_pb2_1
8 | from ...common.v1 import common_pb2 as _common_pb2
9 | from ...common.v1 import errors_pb2 as _errors_pb2
10 | from ...accounting.v1 import accounting_pb2 as _accounting_pb2
11 | from ...jobs.v1 import jobs_pb2 as _jobs_pb2
12 | from google.protobuf.internal import containers as _containers
13 | from google.protobuf.internal import enum_type_wrapper as _enum_type_wrapper
14 | from google.protobuf import descriptor as _descriptor
15 | from google.protobuf import message as _message
16 | from typing import ClassVar as _ClassVar, Iterable as _Iterable, Mapping as _Mapping, Optional as _Optional, Union as _Union
17 |
18 | DESCRIPTOR: _descriptor.FileDescriptor
19 |
20 | class OutputIdentifier(_message.Message):
21 | __slots__ = ["output_identifier", "result_identifier"]
22 | OUTPUT_IDENTIFIER_FIELD_NUMBER: _ClassVar[int]
23 | RESULT_IDENTIFIER_FIELD_NUMBER: _ClassVar[int]
24 | output_identifier: str
25 | result_identifier: ResultIdentifier
26 | def __init__(self, result_identifier: _Optional[_Union[ResultIdentifier, _Mapping]] = ..., output_identifier: _Optional[str] = ...) -> None: ...
27 |
28 | class Result(_message.Message):
29 | __slots__ = ["access_key", "end_time", "engine", "explained", "input_completed_time", "input_fetching_time", "input_name", "input_size", "job", "model_latency", "output_uploading_time", "queue_time", "result", "start_time", "update_time", "use_legacy_datasource_json", "voting"]
30 | class Job(_message.Message):
31 | __slots__ = ["account", "average_model_latency", "completed_input_count", "created_at", "elapsed_time", "failed_input_count", "finished", "identifier", "initial_queue_time", "input_size", "model", "result_summarizing_time", "result_summary_started_at", "submitted_at", "submitted_by", "tags", "team", "total_input_count", "total_queue_time", "updated_at", "user"]
32 | ACCOUNT_FIELD_NUMBER: _ClassVar[int]
33 | AVERAGE_MODEL_LATENCY_FIELD_NUMBER: _ClassVar[int]
34 | COMPLETED_INPUT_COUNT_FIELD_NUMBER: _ClassVar[int]
35 | CREATED_AT_FIELD_NUMBER: _ClassVar[int]
36 | ELAPSED_TIME_FIELD_NUMBER: _ClassVar[int]
37 | FAILED_INPUT_COUNT_FIELD_NUMBER: _ClassVar[int]
38 | FINISHED_FIELD_NUMBER: _ClassVar[int]
39 | IDENTIFIER_FIELD_NUMBER: _ClassVar[int]
40 | INITIAL_QUEUE_TIME_FIELD_NUMBER: _ClassVar[int]
41 | INPUT_SIZE_FIELD_NUMBER: _ClassVar[int]
42 | MODEL_FIELD_NUMBER: _ClassVar[int]
43 | RESULT_SUMMARIZING_TIME_FIELD_NUMBER: _ClassVar[int]
44 | RESULT_SUMMARY_STARTED_AT_FIELD_NUMBER: _ClassVar[int]
45 | SUBMITTED_AT_FIELD_NUMBER: _ClassVar[int]
46 | SUBMITTED_BY_FIELD_NUMBER: _ClassVar[int]
47 | TAGS_FIELD_NUMBER: _ClassVar[int]
48 | TEAM_FIELD_NUMBER: _ClassVar[int]
49 | TOTAL_INPUT_COUNT_FIELD_NUMBER: _ClassVar[int]
50 | TOTAL_QUEUE_TIME_FIELD_NUMBER: _ClassVar[int]
51 | UPDATED_AT_FIELD_NUMBER: _ClassVar[int]
52 | USER_FIELD_NUMBER: _ClassVar[int]
53 | account: _accounting_pb2.AccountIdentifier
54 | average_model_latency: float
55 | completed_input_count: int
56 | created_at: int
57 | elapsed_time: int
58 | failed_input_count: int
59 | finished: bool
60 | identifier: str
61 | initial_queue_time: int
62 | input_size: int
63 | model: _common_pb2.ModelIdentifier
64 | result_summarizing_time: int
65 | result_summary_started_at: int
66 | submitted_at: int
67 | submitted_by: str
68 | tags: _containers.RepeatedScalarFieldContainer[str]
69 | team: _accounting_pb2.TeamIdentifier
70 | total_input_count: int
71 | total_queue_time: int
72 | updated_at: int
73 | user: _accounting_pb2.UserIdentifier
74 | def __init__(self, identifier: _Optional[str] = ..., created_at: _Optional[int] = ..., updated_at: _Optional[int] = ..., model: _Optional[_Union[_common_pb2.ModelIdentifier, _Mapping]] = ..., user: _Optional[_Union[_accounting_pb2.UserIdentifier, _Mapping]] = ..., submitted_by: _Optional[str] = ..., team: _Optional[_Union[_accounting_pb2.TeamIdentifier, _Mapping]] = ..., account: _Optional[_Union[_accounting_pb2.AccountIdentifier, _Mapping]] = ..., tags: _Optional[_Iterable[str]] = ..., total_input_count: _Optional[int] = ..., completed_input_count: _Optional[int] = ..., failed_input_count: _Optional[int] = ..., submitted_at: _Optional[int] = ..., initial_queue_time: _Optional[int] = ..., total_queue_time: _Optional[int] = ..., average_model_latency: _Optional[float] = ..., result_summary_started_at: _Optional[int] = ..., input_size: _Optional[int] = ..., finished: bool = ..., elapsed_time: _Optional[int] = ..., result_summarizing_time: _Optional[int] = ...) -> None: ...
75 | ACCESS_KEY_FIELD_NUMBER: _ClassVar[int]
76 | END_TIME_FIELD_NUMBER: _ClassVar[int]
77 | ENGINE_FIELD_NUMBER: _ClassVar[int]
78 | EXPLAINED_FIELD_NUMBER: _ClassVar[int]
79 | INPUT_COMPLETED_TIME_FIELD_NUMBER: _ClassVar[int]
80 | INPUT_FETCHING_TIME_FIELD_NUMBER: _ClassVar[int]
81 | INPUT_NAME_FIELD_NUMBER: _ClassVar[int]
82 | INPUT_SIZE_FIELD_NUMBER: _ClassVar[int]
83 | JOB_FIELD_NUMBER: _ClassVar[int]
84 | MODEL_LATENCY_FIELD_NUMBER: _ClassVar[int]
85 | OUTPUT_UPLOADING_TIME_FIELD_NUMBER: _ClassVar[int]
86 | QUEUE_TIME_FIELD_NUMBER: _ClassVar[int]
87 | RESULT_FIELD_NUMBER: _ClassVar[int]
88 | START_TIME_FIELD_NUMBER: _ClassVar[int]
89 | UPDATE_TIME_FIELD_NUMBER: _ClassVar[int]
90 | USE_LEGACY_DATASOURCE_JSON_FIELD_NUMBER: _ClassVar[int]
91 | VOTING_FIELD_NUMBER: _ClassVar[int]
92 | access_key: str
93 | end_time: int
94 | engine: str
95 | explained: bool
96 | input_completed_time: int
97 | input_fetching_time: int
98 | input_name: str
99 | input_size: int
100 | job: Result.Job
101 | model_latency: float
102 | output_uploading_time: int
103 | queue_time: int
104 | result: _struct_pb2.Struct
105 | start_time: int
106 | update_time: int
107 | use_legacy_datasource_json: bool
108 | voting: Voting
109 | def __init__(self, job: _Optional[_Union[Result.Job, _Mapping]] = ..., input_name: _Optional[str] = ..., start_time: _Optional[int] = ..., update_time: _Optional[int] = ..., end_time: _Optional[int] = ..., explained: bool = ..., engine: _Optional[str] = ..., queue_time: _Optional[int] = ..., input_fetching_time: _Optional[int] = ..., output_uploading_time: _Optional[int] = ..., model_latency: _Optional[float] = ..., input_completed_time: _Optional[int] = ..., access_key: _Optional[str] = ..., input_size: _Optional[int] = ..., use_legacy_datasource_json: bool = ..., result: _Optional[_Union[_struct_pb2.Struct, _Mapping]] = ..., voting: _Optional[_Union[Voting, _Mapping]] = ...) -> None: ...
110 |
111 | class ResultIdentifier(_message.Message):
112 | __slots__ = ["input_identifier", "job_identifier"]
113 | INPUT_IDENTIFIER_FIELD_NUMBER: _ClassVar[int]
114 | JOB_IDENTIFIER_FIELD_NUMBER: _ClassVar[int]
115 | input_identifier: str
116 | job_identifier: _jobs_pb2.JobIdentifier
117 | def __init__(self, job_identifier: _Optional[_Union[_jobs_pb2.JobIdentifier, _Mapping]] = ..., input_identifier: _Optional[str] = ...) -> None: ...
118 |
119 | class ResultOutput(_message.Message):
120 | __slots__ = ["data"]
121 | DATA_FIELD_NUMBER: _ClassVar[int]
122 | data: bytes
123 | def __init__(self, data: _Optional[bytes] = ...) -> None: ...
124 |
125 | class ResultVote(_message.Message):
126 | __slots__ = ["vote"]
127 | class Vote(int, metaclass=_enum_type_wrapper.EnumTypeWrapper):
128 | __slots__ = []
129 | DOWN: ResultVote.Vote
130 | NO_VOTES: ResultVote.Vote
131 | UP: ResultVote.Vote
132 | VOTE_FIELD_NUMBER: _ClassVar[int]
133 | vote: ResultVote.Vote
134 | def __init__(self, vote: _Optional[_Union[ResultVote.Vote, str]] = ...) -> None: ...
135 |
136 | class Results(_message.Message):
137 | __slots__ = ["account_identifier", "average_model_latency", "completed", "elapsed_time", "explained", "failed", "failures", "finished", "initial_queue_time", "input_size", "job_identifier", "result_summarizing", "results", "starting_result_summarizing", "submitted_at", "submitted_by_key", "team", "total", "total_model_latency", "total_queue_time"]
138 | class FailuresEntry(_message.Message):
139 | __slots__ = ["key", "value"]
140 | KEY_FIELD_NUMBER: _ClassVar[int]
141 | VALUE_FIELD_NUMBER: _ClassVar[int]
142 | key: str
143 | value: _any_pb2.Any
144 | def __init__(self, key: _Optional[str] = ..., value: _Optional[_Union[_any_pb2.Any, _Mapping]] = ...) -> None: ...
145 | class InputResult(_message.Message):
146 | __slots__ = ["end_time", "engine", "input_fetching", "model_latency", "output_uploading", "queue_time", "results", "start_time", "status", "update_time", "voting"]
147 | END_TIME_FIELD_NUMBER: _ClassVar[int]
148 | ENGINE_FIELD_NUMBER: _ClassVar[int]
149 | INPUT_FETCHING_FIELD_NUMBER: _ClassVar[int]
150 | MODEL_LATENCY_FIELD_NUMBER: _ClassVar[int]
151 | OUTPUT_UPLOADING_FIELD_NUMBER: _ClassVar[int]
152 | QUEUE_TIME_FIELD_NUMBER: _ClassVar[int]
153 | RESULTS_FIELD_NUMBER: _ClassVar[int]
154 | START_TIME_FIELD_NUMBER: _ClassVar[int]
155 | STATUS_FIELD_NUMBER: _ClassVar[int]
156 | UPDATE_TIME_FIELD_NUMBER: _ClassVar[int]
157 | VOTING_FIELD_NUMBER: _ClassVar[int]
158 | end_time: _timestamp_pb2.Timestamp
159 | engine: str
160 | input_fetching: int
161 | model_latency: float
162 | output_uploading: int
163 | queue_time: int
164 | results: _struct_pb2.Struct
165 | start_time: _timestamp_pb2.Timestamp
166 | status: str
167 | update_time: _timestamp_pb2.Timestamp
168 | voting: Voting
169 | def __init__(self, status: _Optional[str] = ..., engine: _Optional[str] = ..., input_fetching: _Optional[int] = ..., output_uploading: _Optional[int] = ..., model_latency: _Optional[float] = ..., queue_time: _Optional[int] = ..., start_time: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., update_time: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., end_time: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., results: _Optional[_Union[_struct_pb2.Struct, _Mapping]] = ..., voting: _Optional[_Union[Voting, _Mapping]] = ...) -> None: ...
170 | class ResultsEntry(_message.Message):
171 | __slots__ = ["key", "value"]
172 | KEY_FIELD_NUMBER: _ClassVar[int]
173 | VALUE_FIELD_NUMBER: _ClassVar[int]
174 | key: str
175 | value: Results.InputResult
176 | def __init__(self, key: _Optional[str] = ..., value: _Optional[_Union[Results.InputResult, _Mapping]] = ...) -> None: ...
177 | ACCOUNT_IDENTIFIER_FIELD_NUMBER: _ClassVar[int]
178 | AVERAGE_MODEL_LATENCY_FIELD_NUMBER: _ClassVar[int]
179 | COMPLETED_FIELD_NUMBER: _ClassVar[int]
180 | ELAPSED_TIME_FIELD_NUMBER: _ClassVar[int]
181 | EXPLAINED_FIELD_NUMBER: _ClassVar[int]
182 | FAILED_FIELD_NUMBER: _ClassVar[int]
183 | FAILURES_FIELD_NUMBER: _ClassVar[int]
184 | FINISHED_FIELD_NUMBER: _ClassVar[int]
185 | INITIAL_QUEUE_TIME_FIELD_NUMBER: _ClassVar[int]
186 | INPUT_SIZE_FIELD_NUMBER: _ClassVar[int]
187 | JOB_IDENTIFIER_FIELD_NUMBER: _ClassVar[int]
188 | RESULTS_FIELD_NUMBER: _ClassVar[int]
189 | RESULT_SUMMARIZING_FIELD_NUMBER: _ClassVar[int]
190 | STARTING_RESULT_SUMMARIZING_FIELD_NUMBER: _ClassVar[int]
191 | SUBMITTED_AT_FIELD_NUMBER: _ClassVar[int]
192 | SUBMITTED_BY_KEY_FIELD_NUMBER: _ClassVar[int]
193 | TEAM_FIELD_NUMBER: _ClassVar[int]
194 | TOTAL_FIELD_NUMBER: _ClassVar[int]
195 | TOTAL_MODEL_LATENCY_FIELD_NUMBER: _ClassVar[int]
196 | TOTAL_QUEUE_TIME_FIELD_NUMBER: _ClassVar[int]
197 | account_identifier: str
198 | average_model_latency: float
199 | completed: int
200 | elapsed_time: int
201 | explained: bool
202 | failed: int
203 | failures: _containers.MessageMap[str, _any_pb2.Any]
204 | finished: bool
205 | initial_queue_time: int
206 | input_size: int
207 | job_identifier: str
208 | result_summarizing: int
209 | results: _containers.MessageMap[str, Results.InputResult]
210 | starting_result_summarizing: _timestamp_pb2.Timestamp
211 | submitted_at: _timestamp_pb2.Timestamp
212 | submitted_by_key: str
213 | team: _accounting_pb2.TeamIdentifier
214 | total: int
215 | total_model_latency: float
216 | total_queue_time: int
217 | def __init__(self, job_identifier: _Optional[str] = ..., account_identifier: _Optional[str] = ..., team: _Optional[_Union[_accounting_pb2.TeamIdentifier, _Mapping]] = ..., total: _Optional[int] = ..., completed: _Optional[int] = ..., failed: _Optional[int] = ..., finished: bool = ..., submitted_by_key: _Optional[str] = ..., explained: bool = ..., submitted_at: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., initial_queue_time: _Optional[int] = ..., total_queue_time: _Optional[int] = ..., average_model_latency: _Optional[float] = ..., total_model_latency: _Optional[float] = ..., elapsed_time: _Optional[int] = ..., starting_result_summarizing: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., result_summarizing: _Optional[int] = ..., input_size: _Optional[int] = ..., results: _Optional[_Mapping[str, Results.InputResult]] = ..., failures: _Optional[_Mapping[str, _any_pb2.Any]] = ...) -> None: ...
218 |
219 | class Voting(_message.Message):
220 | __slots__ = ["down", "up"]
221 | DOWN_FIELD_NUMBER: _ClassVar[int]
222 | UP_FIELD_NUMBER: _ClassVar[int]
223 | down: int
224 | up: int
225 | def __init__(self, up: _Optional[int] = ..., down: _Optional[int] = ...) -> None: ...
226 |
--------------------------------------------------------------------------------
/modzy/edge/proto/results/v1/results_pb2_grpc.py:
--------------------------------------------------------------------------------
1 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
2 | """Client and server classes corresponding to protobuf-defined services."""
3 | import grpc
4 |
5 | from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
6 | from ...jobs.v1 import jobs_pb2 as protos_dot_modzy_dot_jobs_dot_v1_dot_jobs__pb2
7 | from ..v1 import results_pb2 as protos_dot_modzy_dot_results_dot_v1_dot_results__pb2
8 |
9 |
10 | class ResultsServiceStub(object):
11 | """Missing associated documentation comment in .proto file."""
12 |
13 | def __init__(self, channel):
14 | """Constructor.
15 |
16 | Args:
17 | channel: A grpc.Channel.
18 | """
19 | self.GetResults = channel.unary_unary(
20 | '/results.v1.ResultsService/GetResults',
21 | request_serializer=protos_dot_modzy_dot_jobs_dot_v1_dot_jobs__pb2.JobIdentifier.SerializeToString,
22 | response_deserializer=protos_dot_modzy_dot_results_dot_v1_dot_results__pb2.Results.FromString,
23 | )
24 | self.GetResult = channel.unary_unary(
25 | '/results.v1.ResultsService/GetResult',
26 | request_serializer=protos_dot_modzy_dot_results_dot_v1_dot_results__pb2.ResultIdentifier.SerializeToString,
27 | response_deserializer=protos_dot_modzy_dot_results_dot_v1_dot_results__pb2.Result.FromString,
28 | )
29 | self.GetResultOutput = channel.unary_unary(
30 | '/results.v1.ResultsService/GetResultOutput',
31 | request_serializer=protos_dot_modzy_dot_results_dot_v1_dot_results__pb2.OutputIdentifier.SerializeToString,
32 | response_deserializer=protos_dot_modzy_dot_results_dot_v1_dot_results__pb2.ResultOutput.FromString,
33 | )
34 | self.UpVoteResult = channel.unary_unary(
35 | '/results.v1.ResultsService/UpVoteResult',
36 | request_serializer=protos_dot_modzy_dot_results_dot_v1_dot_results__pb2.ResultIdentifier.SerializeToString,
37 | response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
38 | )
39 | self.DownVoteResult = channel.unary_unary(
40 | '/results.v1.ResultsService/DownVoteResult',
41 | request_serializer=protos_dot_modzy_dot_results_dot_v1_dot_results__pb2.ResultIdentifier.SerializeToString,
42 | response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
43 | )
44 | self.ResetVotesForResult = channel.unary_unary(
45 | '/results.v1.ResultsService/ResetVotesForResult',
46 | request_serializer=protos_dot_modzy_dot_results_dot_v1_dot_results__pb2.ResultIdentifier.SerializeToString,
47 | response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
48 | )
49 | self.GetVoteForResult = channel.unary_unary(
50 | '/results.v1.ResultsService/GetVoteForResult',
51 | request_serializer=protos_dot_modzy_dot_results_dot_v1_dot_results__pb2.ResultIdentifier.SerializeToString,
52 | response_deserializer=protos_dot_modzy_dot_results_dot_v1_dot_results__pb2.ResultVote.FromString,
53 | )
54 |
55 |
56 | class ResultsServiceServicer(object):
57 | """Missing associated documentation comment in .proto file."""
58 |
59 | def GetResults(self, request, context):
60 | """Missing associated documentation comment in .proto file."""
61 | context.set_code(grpc.StatusCode.UNIMPLEMENTED)
62 | context.set_details('Method not implemented!')
63 | raise NotImplementedError('Method not implemented!')
64 |
65 | def GetResult(self, request, context):
66 | """Missing associated documentation comment in .proto file."""
67 | context.set_code(grpc.StatusCode.UNIMPLEMENTED)
68 | context.set_details('Method not implemented!')
69 | raise NotImplementedError('Method not implemented!')
70 |
71 | def GetResultOutput(self, request, context):
72 | """Missing associated documentation comment in .proto file."""
73 | context.set_code(grpc.StatusCode.UNIMPLEMENTED)
74 | context.set_details('Method not implemented!')
75 | raise NotImplementedError('Method not implemented!')
76 |
77 | def UpVoteResult(self, request, context):
78 | """Missing associated documentation comment in .proto file."""
79 | context.set_code(grpc.StatusCode.UNIMPLEMENTED)
80 | context.set_details('Method not implemented!')
81 | raise NotImplementedError('Method not implemented!')
82 |
83 | def DownVoteResult(self, request, context):
84 | """Missing associated documentation comment in .proto file."""
85 | context.set_code(grpc.StatusCode.UNIMPLEMENTED)
86 | context.set_details('Method not implemented!')
87 | raise NotImplementedError('Method not implemented!')
88 |
89 | def ResetVotesForResult(self, request, context):
90 | """Missing associated documentation comment in .proto file."""
91 | context.set_code(grpc.StatusCode.UNIMPLEMENTED)
92 | context.set_details('Method not implemented!')
93 | raise NotImplementedError('Method not implemented!')
94 |
95 | def GetVoteForResult(self, request, context):
96 | """Missing associated documentation comment in .proto file."""
97 | context.set_code(grpc.StatusCode.UNIMPLEMENTED)
98 | context.set_details('Method not implemented!')
99 | raise NotImplementedError('Method not implemented!')
100 |
101 |
102 | def add_ResultsServiceServicer_to_server(servicer, server):
103 | rpc_method_handlers = {
104 | 'GetResults': grpc.unary_unary_rpc_method_handler(
105 | servicer.GetResults,
106 | request_deserializer=protos_dot_modzy_dot_jobs_dot_v1_dot_jobs__pb2.JobIdentifier.FromString,
107 | response_serializer=protos_dot_modzy_dot_results_dot_v1_dot_results__pb2.Results.SerializeToString,
108 | ),
109 | 'GetResult': grpc.unary_unary_rpc_method_handler(
110 | servicer.GetResult,
111 | request_deserializer=protos_dot_modzy_dot_results_dot_v1_dot_results__pb2.ResultIdentifier.FromString,
112 | response_serializer=protos_dot_modzy_dot_results_dot_v1_dot_results__pb2.Result.SerializeToString,
113 | ),
114 | 'GetResultOutput': grpc.unary_unary_rpc_method_handler(
115 | servicer.GetResultOutput,
116 | request_deserializer=protos_dot_modzy_dot_results_dot_v1_dot_results__pb2.OutputIdentifier.FromString,
117 | response_serializer=protos_dot_modzy_dot_results_dot_v1_dot_results__pb2.ResultOutput.SerializeToString,
118 | ),
119 | 'UpVoteResult': grpc.unary_unary_rpc_method_handler(
120 | servicer.UpVoteResult,
121 | request_deserializer=protos_dot_modzy_dot_results_dot_v1_dot_results__pb2.ResultIdentifier.FromString,
122 | response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
123 | ),
124 | 'DownVoteResult': grpc.unary_unary_rpc_method_handler(
125 | servicer.DownVoteResult,
126 | request_deserializer=protos_dot_modzy_dot_results_dot_v1_dot_results__pb2.ResultIdentifier.FromString,
127 | response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
128 | ),
129 | 'ResetVotesForResult': grpc.unary_unary_rpc_method_handler(
130 | servicer.ResetVotesForResult,
131 | request_deserializer=protos_dot_modzy_dot_results_dot_v1_dot_results__pb2.ResultIdentifier.FromString,
132 | response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
133 | ),
134 | 'GetVoteForResult': grpc.unary_unary_rpc_method_handler(
135 | servicer.GetVoteForResult,
136 | request_deserializer=protos_dot_modzy_dot_results_dot_v1_dot_results__pb2.ResultIdentifier.FromString,
137 | response_serializer=protos_dot_modzy_dot_results_dot_v1_dot_results__pb2.ResultVote.SerializeToString,
138 | ),
139 | }
140 | generic_handler = grpc.method_handlers_generic_handler(
141 | 'results.v1.ResultsService', rpc_method_handlers)
142 | server.add_generic_rpc_handlers((generic_handler,))
143 |
144 |
145 | # This class is part of an EXPERIMENTAL API.
146 | class ResultsService(object):
147 | """Missing associated documentation comment in .proto file."""
148 |
149 | @staticmethod
150 | def GetResults(request,
151 | target,
152 | options=(),
153 | channel_credentials=None,
154 | call_credentials=None,
155 | insecure=False,
156 | compression=None,
157 | wait_for_ready=None,
158 | timeout=None,
159 | metadata=None):
160 | return grpc.experimental.unary_unary(request, target, '/results.v1.ResultsService/GetResults',
161 | protos_dot_modzy_dot_jobs_dot_v1_dot_jobs__pb2.JobIdentifier.SerializeToString,
162 | protos_dot_modzy_dot_results_dot_v1_dot_results__pb2.Results.FromString,
163 | options, channel_credentials,
164 | insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
165 |
166 | @staticmethod
167 | def GetResult(request,
168 | target,
169 | options=(),
170 | channel_credentials=None,
171 | call_credentials=None,
172 | insecure=False,
173 | compression=None,
174 | wait_for_ready=None,
175 | timeout=None,
176 | metadata=None):
177 | return grpc.experimental.unary_unary(request, target, '/results.v1.ResultsService/GetResult',
178 | protos_dot_modzy_dot_results_dot_v1_dot_results__pb2.ResultIdentifier.SerializeToString,
179 | protos_dot_modzy_dot_results_dot_v1_dot_results__pb2.Result.FromString,
180 | options, channel_credentials,
181 | insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
182 |
183 | @staticmethod
184 | def GetResultOutput(request,
185 | target,
186 | options=(),
187 | channel_credentials=None,
188 | call_credentials=None,
189 | insecure=False,
190 | compression=None,
191 | wait_for_ready=None,
192 | timeout=None,
193 | metadata=None):
194 | return grpc.experimental.unary_unary(request, target, '/results.v1.ResultsService/GetResultOutput',
195 | protos_dot_modzy_dot_results_dot_v1_dot_results__pb2.OutputIdentifier.SerializeToString,
196 | protos_dot_modzy_dot_results_dot_v1_dot_results__pb2.ResultOutput.FromString,
197 | options, channel_credentials,
198 | insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
199 |
200 | @staticmethod
201 | def UpVoteResult(request,
202 | target,
203 | options=(),
204 | channel_credentials=None,
205 | call_credentials=None,
206 | insecure=False,
207 | compression=None,
208 | wait_for_ready=None,
209 | timeout=None,
210 | metadata=None):
211 | return grpc.experimental.unary_unary(request, target, '/results.v1.ResultsService/UpVoteResult',
212 | protos_dot_modzy_dot_results_dot_v1_dot_results__pb2.ResultIdentifier.SerializeToString,
213 | google_dot_protobuf_dot_empty__pb2.Empty.FromString,
214 | options, channel_credentials,
215 | insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
216 |
217 | @staticmethod
218 | def DownVoteResult(request,
219 | target,
220 | options=(),
221 | channel_credentials=None,
222 | call_credentials=None,
223 | insecure=False,
224 | compression=None,
225 | wait_for_ready=None,
226 | timeout=None,
227 | metadata=None):
228 | return grpc.experimental.unary_unary(request, target, '/results.v1.ResultsService/DownVoteResult',
229 | protos_dot_modzy_dot_results_dot_v1_dot_results__pb2.ResultIdentifier.SerializeToString,
230 | google_dot_protobuf_dot_empty__pb2.Empty.FromString,
231 | options, channel_credentials,
232 | insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
233 |
234 | @staticmethod
235 | def ResetVotesForResult(request,
236 | target,
237 | options=(),
238 | channel_credentials=None,
239 | call_credentials=None,
240 | insecure=False,
241 | compression=None,
242 | wait_for_ready=None,
243 | timeout=None,
244 | metadata=None):
245 | return grpc.experimental.unary_unary(request, target, '/results.v1.ResultsService/ResetVotesForResult',
246 | protos_dot_modzy_dot_results_dot_v1_dot_results__pb2.ResultIdentifier.SerializeToString,
247 | google_dot_protobuf_dot_empty__pb2.Empty.FromString,
248 | options, channel_credentials,
249 | insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
250 |
251 | @staticmethod
252 | def GetVoteForResult(request,
253 | target,
254 | options=(),
255 | channel_credentials=None,
256 | call_credentials=None,
257 | insecure=False,
258 | compression=None,
259 | wait_for_ready=None,
260 | timeout=None,
261 | metadata=None):
262 | return grpc.experimental.unary_unary(request, target, '/results.v1.ResultsService/GetVoteForResult',
263 | protos_dot_modzy_dot_results_dot_v1_dot_results__pb2.ResultIdentifier.SerializeToString,
264 | protos_dot_modzy_dot_results_dot_v1_dot_results__pb2.ResultVote.FromString,
265 | options, channel_credentials,
266 | insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
267 |
--------------------------------------------------------------------------------