├── proteus_api ├── app │ ├── __init__.py │ ├── routers │ │ └── __init__.py │ ├── logging.conf │ ├── helper.py │ └── main.py ├── Dockerfile ├── Dockerfile.prod ├── requirements.txt └── README.md ├── tools ├── templating │ ├── proteus │ │ └── tools │ │ │ └── templating │ │ │ ├── __init__.py │ │ │ ├── templates │ │ │ ├── proteus.models.{{cookiecutter.package_name}} │ │ │ │ ├── __init__.py │ │ │ │ ├── MANIFEST.in │ │ │ │ ├── README.rst │ │ │ │ ├── proteus │ │ │ │ │ └── models │ │ │ │ │ │ └── {{cookiecutter.package_name}} │ │ │ │ │ │ ├── config.template │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ └── client.py │ │ │ │ ├── setup.py │ │ │ │ ├── .gitignore │ │ │ │ └── tests │ │ │ │ │ └── test_{{cookiecutter.package_name}}.py │ │ │ └── cookiecutter.json │ │ │ └── command_line.py │ ├── __init__.py │ ├── README.md │ ├── setup.py │ └── .gitignore └── benchmarking │ ├── __init__.py │ ├── benchmarks │ ├── README.md │ ├── MaskRCNN │ │ ├── benchmark.json │ │ └── Benchmark.md │ ├── SuperRes │ │ ├── benchmark.json │ │ └── Benchmark.md │ ├── EfficientDetD0 │ │ ├── benchmark.json │ │ └── Benchmark.md │ ├── EfficientDetD2 │ │ ├── benchmark.json │ │ └── Benchmark.md │ ├── benchmarks.sh │ ├── MobileNet │ │ ├── benchmark.json │ │ └── Benchmark.md │ ├── ResNet50 │ │ ├── benchmark.json │ │ └── Benchmark.md │ ├── RetinaNet │ │ ├── benchmark.json │ │ └── Benchmark.md │ ├── EfficientNetLite4 │ │ ├── benchmark.json │ │ └── Benchmark.md │ ├── EfficientPoseI │ │ └── benchmark.json │ ├── EfficientPoseIV │ │ └── benchmark.json │ ├── EfficientPoseRT │ │ └── benchmark.json │ └── YoloV4 │ │ ├── benchmark.json │ │ └── Benchmark.md │ ├── proteus │ └── tools │ │ └── benchmarking │ │ └── templates │ │ └── Benchmark.md │ ├── README.md │ ├── configs │ └── example.json │ ├── setup.py │ └── .gitignore ├── packages ├── proteus.types │ ├── __init__.py │ ├── README.md │ ├── proteus │ │ └── types │ │ │ ├── __init__.py │ │ │ └── types.py │ ├── setup.py │ └── .gitignore ├── proteus.datasets │ ├── __init__.py │ ├── proteus │ │ └── datasets │ │ │ ├── datasets.py │ │ │ ├── __init__.py │ │ │ ├── mpii.py │ │ │ ├── imagenette.py │ │ │ ├── bsds.py │ │ │ └── coco.py │ ├── README.md │ ├── setup.py │ └── .gitignore ├── proteus.models │ ├── __init__.py │ ├── proteus │ │ └── models │ │ │ └── base │ │ │ ├── __init__.py │ │ │ └── modelconfigs.py │ ├── README.md │ ├── setup.py │ ├── .gitignore │ └── tests │ │ └── test_models.py ├── proteus.models.maskrcnn │ ├── __init__.py │ ├── MANIFEST.in │ ├── proteus │ │ └── models │ │ │ └── maskrcnn │ │ │ ├── __init__.py │ │ │ ├── config.template │ │ │ ├── coco_names.txt │ │ │ ├── helpers.py │ │ │ └── client.py │ ├── README.md │ ├── setup.py │ └── .gitignore ├── proteus.models.resnet50 │ ├── __init__.py │ ├── MANIFEST.in │ ├── proteus │ │ └── models │ │ │ └── resnet50 │ │ │ ├── __init__.py │ │ │ ├── helpers.py │ │ │ ├── config.template │ │ │ └── client.py │ ├── README.md │ ├── setup.py │ └── .gitignore ├── proteus.models.superres │ ├── __init__.py │ ├── MANIFEST.in │ ├── proteus │ │ └── models │ │ │ └── superres │ │ │ ├── __init__.py │ │ │ ├── config.template │ │ │ └── client.py │ ├── README.md │ ├── setup.py │ ├── .gitignore │ └── tests │ │ └── test_superres.py ├── proteus.models.yolov4 │ ├── __init__.py │ ├── MANIFEST.in │ ├── proteus │ │ └── models │ │ │ └── yolov4 │ │ │ ├── yolov4_anchors.txt │ │ │ ├── __init__.py │ │ │ ├── config.template │ │ │ ├── coco_names.txt │ │ │ └── client.py │ ├── README.md │ ├── setup.py │ └── .gitignore ├── proteus.models.efficientdet │ ├── __init__.py │ ├── MANIFEST.in │ ├── proteus │ │ └── models │ │ │ └── efficientdet │ │ │ ├── helpers.py │ │ │ ├── __init__.py │ │ │ ├── config.template │ │ │ ├── coco_names.txt │ │ │ └── client.py │ ├── README.md │ ├── setup.py │ └── .gitignore ├── proteus.models.efficientpose │ ├── __init__.py │ ├── MANIFEST.in │ ├── proteus │ │ └── models │ │ │ └── efficientpose │ │ │ ├── config_I.template │ │ │ ├── config_II.template │ │ │ ├── config_III.template │ │ │ ├── config_IV.template │ │ │ ├── config_RT.template │ │ │ ├── __init__.py │ │ │ └── client.py │ ├── README.md │ ├── setup.py │ ├── .gitignore │ └── tests │ │ └── test_efficientpose.py ├── proteus.models.mobilenet │ ├── __init__.py │ ├── MANIFEST.in │ ├── proteus │ │ └── models │ │ │ └── mobilenet │ │ │ ├── __init__.py │ │ │ ├── helpers.py │ │ │ ├── config.template │ │ │ └── client.py │ ├── README.md │ ├── setup.py │ └── .gitignore ├── proteus.models.retinanet │ ├── __init__.py │ ├── MANIFEST.in │ ├── proteus │ │ └── models │ │ │ └── retinanet │ │ │ ├── __init__.py │ │ │ ├── coco_names.txt │ │ │ ├── config.template │ │ │ └── client.py │ ├── README.md │ ├── setup.py │ └── .gitignore ├── proteus.models.efficientnetlite4 │ ├── __init__.py │ ├── MANIFEST.in │ ├── proteus │ │ └── models │ │ │ └── efficientnetlite4 │ │ │ ├── __init__.py │ │ │ ├── helpers.py │ │ │ ├── config.template │ │ │ └── client.py │ ├── README.md │ ├── setup.py │ └── .gitignore └── package_install.txt ├── prometheus ├── Dockerfile ├── alert.yml └── prometheus.yml ├── pytest.ini ├── grafana ├── config.ini ├── provisioning │ ├── dashboards │ │ └── all.yml │ └── datasources │ │ └── all.yml └── Dockerfile ├── kubernetes ├── logs-persistentvolumeclaim.yaml ├── model-repository-persistentvolumeclaim.yaml ├── prometheus-data-persistentvolumeclaim.yaml ├── api-service.yaml ├── grafana-service.yaml ├── prometheus-service.yaml ├── triton-service.yaml ├── grafana-deployment.yaml ├── ingresses.yaml ├── prometheus-deployment.yaml ├── triton-deployment.yaml └── api-deployment.yaml ├── test_requirements.txt ├── LICENSE ├── docker-compose.yml ├── .github └── workflows │ └── main.yml ├── README.md └── .gitignore /proteus_api/app/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /proteus_api/app/routers/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tools/templating/proteus/tools/templating/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tools/templating/__init__.py: -------------------------------------------------------------------------------- 1 | # Do not remove, needed for pytest 2 | -------------------------------------------------------------------------------- /packages/proteus.types/__init__.py: -------------------------------------------------------------------------------- 1 | # Do not remove, needed for pytest 2 | -------------------------------------------------------------------------------- /tools/benchmarking/__init__.py: -------------------------------------------------------------------------------- 1 | # Do not remove, needed for pytest 2 | -------------------------------------------------------------------------------- /packages/proteus.datasets/__init__.py: -------------------------------------------------------------------------------- 1 | # Do not remove, needed for pytest 2 | -------------------------------------------------------------------------------- /packages/proteus.models/__init__.py: -------------------------------------------------------------------------------- 1 | # Do not remove, needed for pytest 2 | -------------------------------------------------------------------------------- /packages/proteus.models.maskrcnn/__init__.py: -------------------------------------------------------------------------------- 1 | # Do not remove, needed for pytest 2 | -------------------------------------------------------------------------------- /packages/proteus.models.resnet50/__init__.py: -------------------------------------------------------------------------------- 1 | # Do not remove, needed for pytest 2 | -------------------------------------------------------------------------------- /packages/proteus.models.superres/__init__.py: -------------------------------------------------------------------------------- 1 | # Do not remove, needed for pytest 2 | -------------------------------------------------------------------------------- /packages/proteus.models.yolov4/__init__.py: -------------------------------------------------------------------------------- 1 | # Do not remove, needed for pytest 2 | -------------------------------------------------------------------------------- /prometheus/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM prom/prometheus:v2.21.0 2 | ADD . /etc/prometheus 3 | -------------------------------------------------------------------------------- /packages/proteus.models.efficientdet/__init__.py: -------------------------------------------------------------------------------- 1 | # Do not remove, needed for pytest 2 | -------------------------------------------------------------------------------- /packages/proteus.models.efficientpose/__init__.py: -------------------------------------------------------------------------------- 1 | # Do not remove, needed for pytest 2 | -------------------------------------------------------------------------------- /packages/proteus.models.mobilenet/__init__.py: -------------------------------------------------------------------------------- 1 | # Do not remove, needed for pytest 2 | -------------------------------------------------------------------------------- /packages/proteus.models.retinanet/__init__.py: -------------------------------------------------------------------------------- 1 | # Do not remove, needed for pytest 2 | -------------------------------------------------------------------------------- /packages/proteus.models.efficientnetlite4/__init__.py: -------------------------------------------------------------------------------- 1 | # Do not remove, needed for pytest 2 | -------------------------------------------------------------------------------- /packages/proteus.models.maskrcnn/MANIFEST.in: -------------------------------------------------------------------------------- 1 | recursive-include proteus/models/* *.txt *.py *.pbtxt -------------------------------------------------------------------------------- /packages/proteus.models.mobilenet/MANIFEST.in: -------------------------------------------------------------------------------- 1 | recursive-include proteus/models/* *.txt *.py *.pbtxt -------------------------------------------------------------------------------- /packages/proteus.models.resnet50/MANIFEST.in: -------------------------------------------------------------------------------- 1 | recursive-include proteus/models/* *.txt *.py *.pbtxt -------------------------------------------------------------------------------- /packages/proteus.models.retinanet/MANIFEST.in: -------------------------------------------------------------------------------- 1 | recursive-include proteus/models/* *.txt *.py *.pbtxt -------------------------------------------------------------------------------- /packages/proteus.models.superres/MANIFEST.in: -------------------------------------------------------------------------------- 1 | recursive-include proteus/models//* *.txt *.py *.pbtxt -------------------------------------------------------------------------------- /packages/proteus.models.yolov4/MANIFEST.in: -------------------------------------------------------------------------------- 1 | recursive-include proteus/models//* *.txt *.py *.pbtxt -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | markers = 3 | slow: marks tests as slow (deselect with '-m "not slow"') -------------------------------------------------------------------------------- /packages/proteus.models.efficientdet/MANIFEST.in: -------------------------------------------------------------------------------- 1 | recursive-include proteus/models/* *.txt *.py *.pbtxt -------------------------------------------------------------------------------- /packages/proteus.models.efficientpose/MANIFEST.in: -------------------------------------------------------------------------------- 1 | recursive-include proteus/models//* *.txt *.py *.pbtxt -------------------------------------------------------------------------------- /packages/proteus.models.efficientnetlite4/MANIFEST.in: -------------------------------------------------------------------------------- 1 | recursive-include proteus/models/* *.txt *.py *.pbtxt -------------------------------------------------------------------------------- /packages/proteus.models.yolov4/proteus/models/yolov4/yolov4_anchors.txt: -------------------------------------------------------------------------------- 1 | 12,16, 19,36, 40,28, 36,75, 76,55, 72,146, 142,110, 192,243, 459,401 -------------------------------------------------------------------------------- /tools/templating/proteus/tools/templating/templates/proteus.models.{{cookiecutter.package_name}}/__init__.py: -------------------------------------------------------------------------------- 1 | # Do not remove, needed for pytest 2 | -------------------------------------------------------------------------------- /tools/templating/proteus/tools/templating/templates/proteus.models.{{cookiecutter.package_name}}/MANIFEST.in: -------------------------------------------------------------------------------- 1 | recursive-include proteus/models//* *.txt *.py *.pbtxt -------------------------------------------------------------------------------- /prometheus/alert.yml: -------------------------------------------------------------------------------- 1 | groups: 2 | - name: ServiceAlerts 3 | rules: 4 | - alert: InstanceDown 5 | expr: up{job="services"} < 1 6 | for: 10s -------------------------------------------------------------------------------- /grafana/config.ini: -------------------------------------------------------------------------------- 1 | [paths] 2 | provisioning = /etc/grafana/provisioning 3 | 4 | [server] 5 | enable_gzip = true 6 | 7 | [users] 8 | default_theme = light 9 | 10 | -------------------------------------------------------------------------------- /grafana/provisioning/dashboards/all.yml: -------------------------------------------------------------------------------- 1 | - name: 'default' 2 | org_id: 1 3 | folder: '' 4 | type: 'file' 5 | options: 6 | folder: '/var/lib/grafana/dashboards' 7 | -------------------------------------------------------------------------------- /tools/templating/README.md: -------------------------------------------------------------------------------- 1 | ======== 2 | Templating 3 | ======== 4 | 5 | Small CLI to help with model templating. 6 | Execute command 'proteus.template' to generate a model package -------------------------------------------------------------------------------- /grafana/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM grafana/grafana:latest 2 | ADD ./provisioning /etc/grafana/provisioning 3 | ADD ./config.ini /etc/grafana/config.ini 4 | ADD ./dashboards /var/lib/grafana/dashboards 5 | -------------------------------------------------------------------------------- /tools/templating/proteus/tools/templating/templates/proteus.models.{{cookiecutter.package_name}}/README.rst: -------------------------------------------------------------------------------- 1 | ======== 2 | Proteus {{cookiecutter.model_name}} 3 | ======== 4 | 5 | Package for {{cookiecutter.model_name}} model usage in Proteus -------------------------------------------------------------------------------- /grafana/provisioning/datasources/all.yml: -------------------------------------------------------------------------------- 1 | datasources: 2 | - name: 'prom1' 3 | type: 'prometheus' 4 | access: 'proxy' 5 | org_id: 1 6 | url: 'http://prometheus:9090' 7 | is_default: true 8 | version: 1 9 | editable: true 10 | -------------------------------------------------------------------------------- /packages/proteus.types/README.md: -------------------------------------------------------------------------------- 1 | ======== 2 | Proteus Types 3 | ======== 4 | 5 | Available types: 6 | - BoundingBox 7 | - Class 8 | - Segmentation 9 | - Coordinate 10 | 11 | Note: Image has no separate type. You can use PIL.Image.Image -------------------------------------------------------------------------------- /packages/proteus.models.yolov4/proteus/models/yolov4/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = """Pieter Blomme""" 2 | __email__ = "pieter.blomme@gmail.com" 3 | __version__ = "0.0.1" 4 | 5 | from .client import YoloV4 6 | 7 | model_dict = {YoloV4.__name__: YoloV4} 8 | -------------------------------------------------------------------------------- /packages/proteus.models.resnet50/proteus/models/resnet50/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = """Pieter Blomme""" 2 | __email__ = "pieter.blomme@gmail.com" 3 | __version__ = "0.0.1" 4 | 5 | from .client import Resnet50V2 6 | 7 | model_dict = {Resnet50V2.__name__: Resnet50V2} 8 | -------------------------------------------------------------------------------- /packages/proteus.models.mobilenet/proteus/models/mobilenet/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = """Pieter Blomme""" 2 | __email__ = "pieter.blomme@gmail.com" 3 | __version__ = "0.0.1" 4 | 5 | from .client import MobileNetV2 6 | 7 | model_dict = {MobileNetV2.__name__: MobileNetV2} 8 | -------------------------------------------------------------------------------- /tools/templating/proteus/tools/templating/command_line.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pathlib import Path 3 | 4 | template_path = Path(__file__).parent / "templates" 5 | 6 | 7 | def main(): 8 | print(template_path) 9 | os.system(f"cookiecutter {template_path}") 10 | -------------------------------------------------------------------------------- /packages/proteus.models.maskrcnn/proteus/models/maskrcnn/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = """Pieter Blomme""" 2 | __email__ = "pieter.blomme@gmail.com" 3 | __version__ = "0.0.1" 4 | 5 | from .client import MaskRCNN 6 | 7 | model_dict = { 8 | MaskRCNN.__name__: MaskRCNN, 9 | } 10 | -------------------------------------------------------------------------------- /packages/proteus.models.superres/proteus/models/superres/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = """Pieter Blomme""" 2 | __email__ = "pieter.blomme@gmail.com" 3 | __version__ = "0.0.1" 4 | 5 | from .client import SuperResolution 6 | 7 | model_dict = {SuperResolution.__name__: SuperResolution} 8 | -------------------------------------------------------------------------------- /packages/proteus.models.retinanet/proteus/models/retinanet/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = """Pieter Blomme""" 2 | __email__ = "pieter.blomme@gmail.com" 3 | __version__ = "0.0.1" 4 | 5 | from .client import RetinaNet 6 | 7 | model_dict = { 8 | RetinaNet.__name__: RetinaNet, 9 | } 10 | -------------------------------------------------------------------------------- /tools/templating/proteus/tools/templating/templates/cookiecutter.json: -------------------------------------------------------------------------------- 1 | { 2 | "package_name": "retinanet", 3 | "model_name": "RetinaNet", 4 | "test_dataset": "Dataset", 5 | "model_url": "URL to download the model file", 6 | "model_description": "Description for model" 7 | } 8 | -------------------------------------------------------------------------------- /packages/proteus.types/proteus/types/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = """Pieter Blomme""" 2 | __email__ = "pieter.blomme@gmail.com" 3 | __version__ = "0.0.1" 4 | 5 | from .types import BoundingBox, Class, Coordinate, Segmentation 6 | 7 | __all__ = ["BoundingBox", "Class", "Segmentation", "Coordinate"] 8 | -------------------------------------------------------------------------------- /tools/benchmarking/benchmarks/README.md: -------------------------------------------------------------------------------- 1 | ======== 2 | Workstation benchmarks 3 | ======== 4 | 5 | I used the benchmark tooling to run some benchmarks on my personal workstation, with following hardware: 6 | - GeForce GTX 1080 Ti with 12Gi memory 7 | - Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz (12 cores) -------------------------------------------------------------------------------- /packages/proteus.models.efficientnetlite4/proteus/models/efficientnetlite4/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = """Pieter Blomme""" 2 | __email__ = "pieter.blomme@gmail.com" 3 | __version__ = "0.0.1" 4 | 5 | from .client import EfficientNetLite4 6 | 7 | model_dict = {EfficientNetLite4.__name__: EfficientNetLite4} 8 | -------------------------------------------------------------------------------- /packages/proteus.models.resnet50/proteus/models/resnet50/helpers.py: -------------------------------------------------------------------------------- 1 | def read_class_names(class_file_name): 2 | """loads class name from a file""" 3 | names = {} 4 | with open(class_file_name, "r") as data: 5 | for ID, name in enumerate(data): 6 | names[ID] = name.strip("\n") 7 | return names 8 | -------------------------------------------------------------------------------- /prometheus/prometheus.yml: -------------------------------------------------------------------------------- 1 | global: 2 | scrape_interval: 30s 3 | scrape_timeout: 10s 4 | 5 | rule_files: 6 | - alert.yml 7 | 8 | scrape_configs: 9 | - job_name: services 10 | metrics_path: /metrics 11 | static_configs: 12 | - targets: 13 | - 'prometheus:9090' 14 | - 'triton:8002' -------------------------------------------------------------------------------- /packages/proteus.models.mobilenet/proteus/models/mobilenet/helpers.py: -------------------------------------------------------------------------------- 1 | def read_class_names(class_file_name): 2 | """loads class name from a file""" 3 | names = {} 4 | with open(class_file_name, "r") as data: 5 | for ID, name in enumerate(data): 6 | names[ID] = name.strip("\n") 7 | return names 8 | -------------------------------------------------------------------------------- /kubernetes/logs-persistentvolumeclaim.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | io.kompose.service: logs 7 | name: logs 8 | spec: 9 | accessModes: 10 | - ReadWriteOnce 11 | resources: 12 | requests: 13 | storage: 1Gi 14 | status: {} 15 | -------------------------------------------------------------------------------- /packages/proteus.models.efficientdet/proteus/models/efficientdet/helpers.py: -------------------------------------------------------------------------------- 1 | def read_class_names(class_file_name): 2 | """loads class name from a file""" 3 | names = {} 4 | with open(class_file_name, "r") as data: 5 | for ID, name in enumerate(data): 6 | names[ID] = name.strip("\n") 7 | return names 8 | -------------------------------------------------------------------------------- /packages/proteus.models/proteus/models/base/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = """Pieter Blomme""" 2 | __email__ = "pieter.blomme@gmail.com" 3 | __version__ = "0.0.1" 4 | 5 | from .base import BaseModel 6 | from .classification import ClassificationModel 7 | 8 | __all__ = ["BaseModel", "ClassificationModel"] 9 | 10 | model_dict = {} 11 | -------------------------------------------------------------------------------- /packages/proteus.models.efficientnetlite4/proteus/models/efficientnetlite4/helpers.py: -------------------------------------------------------------------------------- 1 | def read_class_names(class_file_name): 2 | """loads class name from a file""" 3 | names = {} 4 | with open(class_file_name, "r") as data: 5 | for ID, name in enumerate(data): 6 | names[ID] = name.strip("\n") 7 | return names 8 | -------------------------------------------------------------------------------- /tools/templating/proteus/tools/templating/templates/proteus.models.{{cookiecutter.package_name}}/proteus/models/{{cookiecutter.package_name}}/config.template: -------------------------------------------------------------------------------- 1 | {% raw %} 2 | platform: "onnxruntime_onnx" 3 | max_batch_size : 0 4 | input [ 5 | ] 6 | output [ 7 | ] 8 | {{triton_optimization}} 9 | {{dynamic_batching}} 10 | {{num_instances}} 11 | {% endraw %} -------------------------------------------------------------------------------- /kubernetes/model-repository-persistentvolumeclaim.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | io.kompose.service: model-repository 7 | name: model-repository 8 | spec: 9 | accessModes: 10 | - ReadWriteOnce 11 | resources: 12 | requests: 13 | storage: 5Gi 14 | status: {} 15 | -------------------------------------------------------------------------------- /kubernetes/prometheus-data-persistentvolumeclaim.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | io.kompose.service: prometheus-data 7 | name: prometheus-data 8 | spec: 9 | accessModes: 10 | - ReadWriteOnce 11 | resources: 12 | requests: 13 | storage: 1Gi 14 | status: {} 15 | -------------------------------------------------------------------------------- /packages/proteus.models.efficientdet/proteus/models/efficientdet/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = """Pieter Blomme""" 2 | __email__ = "pieter.blomme@gmail.com" 3 | __version__ = "0.0.1" 4 | 5 | from .client import EfficientDetD0, EfficientDetD2 6 | 7 | model_dict = { 8 | EfficientDetD0.__name__: EfficientDetD0, 9 | EfficientDetD2.__name__: EfficientDetD2, 10 | } 11 | -------------------------------------------------------------------------------- /tools/templating/proteus/tools/templating/templates/proteus.models.{{cookiecutter.package_name}}/proteus/models/{{cookiecutter.package_name}}/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = """Pieter Blomme""" 2 | __email__ = "pieter.blomme@gmail.com" 3 | __version__ = "0.0.1" 4 | 5 | from .client import {{cookiecutter.model_name}} 6 | 7 | model_dict = { {{cookiecutter.model_name}}.__name__: {{cookiecutter.model_name}}} 8 | -------------------------------------------------------------------------------- /packages/proteus.models.mobilenet/proteus/models/mobilenet/config.template: -------------------------------------------------------------------------------- 1 | platform: "onnxruntime_onnx" 2 | max_batch_size : 0 3 | input [ 4 | { 5 | name: "input" 6 | data_type: TYPE_FP32 7 | dims: [1, 3, 224, 224] 8 | } 9 | ] 10 | output [ 11 | { 12 | name: "output" 13 | data_type: TYPE_FP32 14 | dims: [-1, 1000] 15 | } 16 | ] 17 | {{triton_optimization}} 18 | {{num_instances}} -------------------------------------------------------------------------------- /packages/proteus.models.resnet50/proteus/models/resnet50/config.template: -------------------------------------------------------------------------------- 1 | platform: "onnxruntime_onnx" 2 | max_batch_size : 0 3 | input [ 4 | { 5 | name: "data" 6 | data_type: TYPE_FP32 7 | dims: [1, 3, 224, 224] 8 | } 9 | ] 10 | output [ 11 | { 12 | name: "resnetv24_dense0_fwd" 13 | data_type: TYPE_FP32 14 | dims: [1, 1000] 15 | } 16 | ] 17 | {{triton_optimization}} 18 | {{num_instances}} -------------------------------------------------------------------------------- /packages/proteus.models.efficientnetlite4/proteus/models/efficientnetlite4/config.template: -------------------------------------------------------------------------------- 1 | platform: "onnxruntime_onnx" 2 | max_batch_size : 0 3 | input [ 4 | { 5 | name: "images:0" 6 | data_type: TYPE_FP32 7 | dims: [1, 224, 224, 3] 8 | } 9 | ] 10 | output [ 11 | { 12 | name: "Softmax:0" 13 | data_type: TYPE_FP32 14 | dims: [1, 1000] 15 | } 16 | ] 17 | {{triton_optimization}} 18 | {{num_instances}} -------------------------------------------------------------------------------- /kubernetes/api-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | annotations: 5 | kompose.cmd: kompose convert 6 | kompose.version: 1.22.0 (955b78124) 7 | creationTimestamp: null 8 | labels: 9 | io.kompose.service: api 10 | name: api 11 | spec: 12 | ports: 13 | - name: "80" 14 | port: 80 15 | targetPort: 80 16 | selector: 17 | io.kompose.service: api 18 | status: 19 | loadBalancer: {} 20 | -------------------------------------------------------------------------------- /packages/proteus.models.maskrcnn/README.md: -------------------------------------------------------------------------------- 1 | # Proteus MaskRCNN 2 | 3 | Package for MaskRCNN model usage in Proteus 4 | 5 | Model and implementation taken from https://github.com/onnx/models/tree/master/vision/object_detection_segmentation/mask-rcnn 6 | MIT License 7 | 8 | ## Pre/Postprocessing 9 | Pre/postprocessing is taken from the example code in above repo 10 | 11 | ## Models 12 | 13 | Available Proteus configuration options: 14 | - Num Instances -------------------------------------------------------------------------------- /tools/benchmarking/benchmarks/MaskRCNN/benchmark.json: -------------------------------------------------------------------------------- 1 | { 2 | "BasePath": "http://proteus.localhost", 3 | "Model": "MaskRCNN", 4 | "Dataset": "CocoValMask", 5 | "Throughput" : [ 6 | {"num_workers": 1, "num_instances": 1 }, 7 | {"num_workers": 2, "num_instances": 1 }, 8 | {"num_workers": 1, "num_instances": 2 }, 9 | {"num_workers": 4, "num_instances": 1 } 10 | ], 11 | "Latency" : [ 12 | {} 13 | ], 14 | "Score" : [ 15 | {} 16 | ] 17 | } -------------------------------------------------------------------------------- /packages/proteus.models.efficientdet/proteus/models/efficientdet/config.template: -------------------------------------------------------------------------------- 1 | platform: "onnxruntime_onnx" 2 | max_batch_size : 0 3 | input [ 4 | { 5 | name: "image_arrays:0" 6 | data_type: TYPE_UINT8 7 | format: FORMAT_NONE 8 | dims: [-1, -1, -1, 3] 9 | } 10 | ] 11 | output [ 12 | { 13 | name: "detections:0" 14 | data_type: TYPE_FP32 15 | dims: [1, -1, 7] 16 | } 17 | ] 18 | {{triton_optimization}} 19 | {{num_instances}} -------------------------------------------------------------------------------- /kubernetes/grafana-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | annotations: 5 | kompose.cmd: kompose convert 6 | kompose.version: 1.22.0 (955b78124) 7 | creationTimestamp: null 8 | labels: 9 | io.kompose.service: grafana 10 | name: grafana 11 | spec: 12 | ports: 13 | - name: "3000" 14 | port: 3000 15 | targetPort: 3000 16 | selector: 17 | io.kompose.service: grafana 18 | status: 19 | loadBalancer: {} 20 | -------------------------------------------------------------------------------- /packages/proteus.models.superres/proteus/models/superres/config.template: -------------------------------------------------------------------------------- 1 | 2 | platform: "onnxruntime_onnx" 3 | max_batch_size : 10 4 | input [ 5 | { 6 | name: "input" 7 | data_type: TYPE_FP32 8 | format: FORMAT_NONE 9 | dims: [1, 224, 224] 10 | } 11 | ] 12 | output [ 13 | { 14 | name: "output" 15 | data_type: TYPE_FP32 16 | dims: [1, 672, 672] 17 | } 18 | ] 19 | {{triton_optimization}} 20 | {{dynamic_batching}} 21 | {{num_instances}} -------------------------------------------------------------------------------- /test_requirements.txt: -------------------------------------------------------------------------------- 1 | ./packages/proteus.types 2 | ./packages/proteus.datasets 3 | ./packages/proteus.models 4 | ./packages/proteus.models.maskrcnn 5 | ./packages/proteus.models.mobilenet 6 | ./packages/proteus.models.resnet50 7 | ./packages/proteus.models.efficientnetlite4 8 | ./packages/proteus.models.retinanet 9 | ./packages/proteus.models.efficientdet 10 | ./packages/proteus.models.yolov4 11 | ./packages/proteus.models.superres 12 | ./packages/proteus.models.efficientpose -------------------------------------------------------------------------------- /packages/proteus.models/proteus/models/base/modelconfigs.py: -------------------------------------------------------------------------------- 1 | import pydantic 2 | 3 | 4 | class BaseModelConfig(pydantic.BaseModel): 5 | num_instances: int = 1 6 | 7 | 8 | class TritonOptimizationModelConfig(pydantic.BaseModel): 9 | triton_optimization: bool = True 10 | 11 | 12 | class QuantizationModelConfig(pydantic.BaseModel): 13 | quantize: bool = False 14 | 15 | 16 | class BatchingModelConfig(pydantic.BaseModel): 17 | dynamic_batching: bool = True 18 | -------------------------------------------------------------------------------- /tools/benchmarking/proteus/tools/benchmarking/templates/Benchmark.md: -------------------------------------------------------------------------------- 1 | # Benchmark for {{model}} 2 | 3 | ## Score 4 | 5 | Score on {{num_samples_score}} samples from {{dataset}} dataset 6 | {{score_table}} 7 | 8 | ## Throughput 9 | Average throughput in FPS on {{num_samples_throughput}} samples from {{dataset}} dataset 10 | {{throughput_table}} 11 | 12 | ## Latency 13 | 14 | Average latency in ms on {{num_samples_latency}} samples from {{dataset}} dataset 15 | {{latency_table}} -------------------------------------------------------------------------------- /kubernetes/prometheus-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | annotations: 5 | kompose.cmd: kompose convert 6 | kompose.version: 1.22.0 (955b78124) 7 | creationTimestamp: null 8 | labels: 9 | io.kompose.service: prometheus 10 | name: prometheus 11 | spec: 12 | ports: 13 | - name: "9090" 14 | port: 9090 15 | targetPort: 9090 16 | selector: 17 | io.kompose.service: prometheus 18 | status: 19 | loadBalancer: {} 20 | -------------------------------------------------------------------------------- /packages/package_install.txt: -------------------------------------------------------------------------------- 1 | -e /packages/proteus.types/. 2 | -e /packages/proteus.models/. 3 | -e /packages/proteus.models.maskrcnn/. 4 | -e /packages/proteus.models.mobilenet/. 5 | -e /packages/proteus.models.resnet50/. 6 | -e /packages/proteus.models.efficientnetlite4/. 7 | -e /packages/proteus.models.retinanet/. 8 | -e /packages/proteus.models.efficientdet/. 9 | -e /packages/proteus.models.superres/. 10 | -e /packages/proteus.models.efficientpose/. 11 | -e /packages/proteus.models.yolov4/. 12 | -------------------------------------------------------------------------------- /packages/proteus.models/README.md: -------------------------------------------------------------------------------- 1 | ======== 2 | Proteus Models 3 | ======== 4 | 5 | Common interfaces for models. 6 | 7 | base.py: Contains shared functionality for models. Subclasses in theory only need to reimplement preprocesing and postprocessing functions 8 | classifcation.py: Superclass for classification models. Implementing a new classifiers should be very easy 9 | modelconfigs.py: contains the different configs that can be enabled for your model when deploying to Triton. Subclass those that work. -------------------------------------------------------------------------------- /packages/proteus.datasets/proteus/datasets/datasets.py: -------------------------------------------------------------------------------- 1 | import tempfile 2 | 3 | import requests 4 | 5 | tmpfolder = tempfile.gettempdir() 6 | 7 | 8 | class Dataset: 9 | """ 10 | Dataset interface: 11 | - implement __getitem__ to generate files 12 | - implement eval(preds) to predict a score 13 | """ 14 | 15 | def __getitem__(self, index): 16 | None 17 | 18 | def __len__(self): 19 | return 1 20 | 21 | def eval(self, preds): 22 | return 1.0 23 | -------------------------------------------------------------------------------- /packages/proteus.models.efficientpose/proteus/models/efficientpose/config_I.template: -------------------------------------------------------------------------------- 1 | 2 | platform: "onnxruntime_onnx" 3 | max_batch_size : 10 4 | input [ 5 | { 6 | name: "input_res1:0" 7 | data_type: TYPE_FP32 8 | format: FORMAT_NONE 9 | dims: [256, 256, 3] 10 | } 11 | ] 12 | output [ 13 | { 14 | name: "upscaled_confs/BiasAdd:0" 15 | data_type: TYPE_FP32 16 | dims: [-1, -1, 16] 17 | } 18 | ] 19 | {{triton_optimization}} 20 | {{dynamic_batching}} 21 | {{num_instances}} 22 | -------------------------------------------------------------------------------- /packages/proteus.models.efficientpose/proteus/models/efficientpose/config_II.template: -------------------------------------------------------------------------------- 1 | 2 | platform: "onnxruntime_onnx" 3 | max_batch_size : 10 4 | input [ 5 | { 6 | name: "input_res1:0" 7 | data_type: TYPE_FP32 8 | format: FORMAT_NONE 9 | dims: [368, 368, 3] 10 | } 11 | ] 12 | output [ 13 | { 14 | name: "upscaled_confs/BiasAdd:0" 15 | data_type: TYPE_FP32 16 | dims: [-1, -1, 16] 17 | } 18 | ] 19 | {{triton_optimization}} 20 | {{dynamic_batching}} 21 | {{num_instances}} 22 | -------------------------------------------------------------------------------- /packages/proteus.models.efficientpose/proteus/models/efficientpose/config_III.template: -------------------------------------------------------------------------------- 1 | 2 | platform: "onnxruntime_onnx" 3 | max_batch_size : 10 4 | input [ 5 | { 6 | name: "input_res1:0" 7 | data_type: TYPE_FP32 8 | format: FORMAT_NONE 9 | dims: [480, 480, 3] 10 | } 11 | ] 12 | output [ 13 | { 14 | name: "upscaled_confs/BiasAdd:0" 15 | data_type: TYPE_FP32 16 | dims: [-1, -1, 16] 17 | } 18 | ] 19 | {{triton_optimization}} 20 | {{dynamic_batching}} 21 | {{num_instances}} 22 | -------------------------------------------------------------------------------- /packages/proteus.models.efficientpose/proteus/models/efficientpose/config_IV.template: -------------------------------------------------------------------------------- 1 | 2 | platform: "onnxruntime_onnx" 3 | max_batch_size : 10 4 | input [ 5 | { 6 | name: "input_res1:0" 7 | data_type: TYPE_FP32 8 | format: FORMAT_NONE 9 | dims: [600, 600, 3] 10 | } 11 | ] 12 | output [ 13 | { 14 | name: "upscaled_confs/BiasAdd:0" 15 | data_type: TYPE_FP32 16 | dims: [-1, -1, 16] 17 | } 18 | ] 19 | {{triton_optimization}} 20 | {{dynamic_batching}} 21 | {{num_instances}} 22 | -------------------------------------------------------------------------------- /packages/proteus.models.efficientpose/proteus/models/efficientpose/config_RT.template: -------------------------------------------------------------------------------- 1 | 2 | platform: "onnxruntime_onnx" 3 | max_batch_size : 10 4 | input [ 5 | { 6 | name: "input_res1:0" 7 | data_type: TYPE_FP32 8 | format: FORMAT_NONE 9 | dims: [224, 224, 3] 10 | } 11 | ] 12 | output [ 13 | { 14 | name: "upscaled_confs/BiasAdd:0" 15 | data_type: TYPE_FP32 16 | dims: [-1, -1, 16] 17 | } 18 | ] 19 | {{triton_optimization}} 20 | {{dynamic_batching}} 21 | {{num_instances}} 22 | -------------------------------------------------------------------------------- /tools/benchmarking/benchmarks/SuperRes/benchmark.json: -------------------------------------------------------------------------------- 1 | { 2 | "BasePath": "http://proteus.localhost", 3 | "Model": "SuperResolution", 4 | "Dataset": "BSDSSuperRes", 5 | "Throughput" : [ 6 | {"num_workers": 1, "num_instances": 1 }, 7 | {"num_workers": 2, "num_instances": 1 }, 8 | {"num_workers": 1, "num_instances": 2 }, 9 | {"num_workers": 4, "num_instances": 1 } 10 | ], 11 | "Latency" : [ 12 | {} 13 | ], 14 | "Score" : [ 15 | {} 16 | ] 17 | } -------------------------------------------------------------------------------- /packages/proteus.datasets/proteus/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = """Pieter Blomme""" 2 | __email__ = "pieter.blomme@gmail.com" 3 | __version__ = "0.0.1" 4 | 5 | from .bsds import BSDSSuperRes 6 | from .coco import CocoValBBox, CocoValMask 7 | from .datasets import Dataset 8 | from .imagenette import ImageNette 9 | from .mpii import MPIIPoseEstimation 10 | 11 | __all__ = [ 12 | "Dataset", 13 | "ImageNette", 14 | "CocoValBBox", 15 | "CocoValMask", 16 | "BSDSSuperRes", 17 | "MPIIPoseEstimation", 18 | ] 19 | -------------------------------------------------------------------------------- /tools/benchmarking/benchmarks/EfficientDetD0/benchmark.json: -------------------------------------------------------------------------------- 1 | { 2 | "BasePath": "http://proteus.localhost", 3 | "Model": "EfficientDetD0", 4 | "Dataset": "CocoValBBox", 5 | "Throughput" : [ 6 | {"num_workers": 1, "num_instances": 1 }, 7 | {"num_workers": 2, "num_instances": 1 }, 8 | {"num_workers": 4, "num_instances": 1 }, 9 | {"num_workers": 2, "num_instances": 2}, 10 | {"num_workers": 1, "num_instances": 2} 11 | ], 12 | "Latency" : [ 13 | {"triton_optimization": true} 14 | ], 15 | "Score" : [ 16 | {} 17 | ] 18 | } -------------------------------------------------------------------------------- /tools/benchmarking/benchmarks/EfficientDetD2/benchmark.json: -------------------------------------------------------------------------------- 1 | { 2 | "BasePath": "http://proteus.localhost", 3 | "Model": "EfficientDetD2", 4 | "Dataset": "CocoValBBox", 5 | "Throughput" : [ 6 | {"num_workers": 1, "num_instances": 1 }, 7 | {"num_workers": 2, "num_instances": 1 }, 8 | {"num_workers": 4, "num_instances": 1 }, 9 | {"num_workers": 2, "num_instances": 2}, 10 | {"num_workers": 1, "num_instances": 2} 11 | ], 12 | "Latency" : [ 13 | {"triton_optimization": true} 14 | ], 15 | "Score" : [ 16 | {} 17 | ] 18 | } -------------------------------------------------------------------------------- /packages/proteus.datasets/proteus/datasets/mpii.py: -------------------------------------------------------------------------------- 1 | import tempfile 2 | 3 | import numpy as np 4 | import requests 5 | from PIL import Image 6 | 7 | from .imagenette import ImageNette 8 | 9 | tmpfolder = tempfile.gettempdir() 10 | 11 | 12 | class MPIIPoseEstimation(ImageNette): 13 | """ 14 | Just a dummy implementation. MPII dataset is huge and 15 | the evaluation code is written in matlab. Can't be bothered to work out 16 | scoring code. Using ImageNet instead 17 | """ 18 | 19 | def eval(self, preds): 20 | return 0.0 21 | -------------------------------------------------------------------------------- /proteus_api/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.7 2 | 3 | RUN pip install fastapi uvicorn 4 | 5 | COPY proteus_api/requirements.txt ./requirements.txt 6 | 7 | # to be sure, old pip resolver 8 | RUN python -m pip install --upgrade pip==20.2 9 | 10 | #Install nvidia-pyindex first to have access to nvidia pypi 11 | RUN pip install nvidia-pyindex==1.0.4 12 | RUN pip install -r requirements.txt 13 | 14 | EXPOSE 80 15 | 16 | COPY proteus_api/app /app 17 | 18 | CMD ["sh","-c","pip install -r /packages/package_install.txt && uvicorn app.main:app --reload --host 0.0.0.0 --port 80"] -------------------------------------------------------------------------------- /tools/benchmarking/README.md: -------------------------------------------------------------------------------- 1 | ======== 2 | Benchmarking 3 | ======== 4 | 5 | Small CLI to help with model benchmarking. 6 | Usage: 'proteus.benchmark ' 7 | Where is the path to a json file containing the benchmark suite to be run. Output will be written to a markdown file. 8 | See eg. configs/example.json for an example config file. 9 | 10 | Benchmarks on my own workstation available under benchmarks/ 11 | 12 | Note: the benchmarking needs proteus.datasets. Install it from source 13 | ``` 14 | pip install -e ../../packages/proteus.datasets 15 | ``` -------------------------------------------------------------------------------- /proteus_api/Dockerfile.prod: -------------------------------------------------------------------------------- 1 | FROM python:3.7 2 | 3 | RUN pip install fastapi uvicorn 4 | 5 | COPY proteus_api/requirements.txt ./requirements.txt 6 | 7 | # to be sure, old pip resolver 8 | RUN python -m pip install --upgrade pip==20.2 9 | 10 | #Install nvidia-pyindex first to have access to nvidia pypi 11 | RUN pip install nvidia-pyindex==1.0.4 12 | RUN pip install -r requirements.txt 13 | 14 | EXPOSE 80 15 | 16 | COPY proteus_api/app /app 17 | COPY packages/ /packages 18 | 19 | RUN pip install -r /packages/package_install.txt 20 | 21 | CMD ["sh","-c","uvicorn app.main:app --host 0.0.0.0 --port 80"] -------------------------------------------------------------------------------- /kubernetes/triton-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | annotations: 5 | kompose.cmd: kompose convert 6 | kompose.version: 1.22.0 (955b78124) 7 | creationTimestamp: null 8 | labels: 9 | io.kompose.service: triton 10 | name: triton 11 | spec: 12 | ports: 13 | - name: "8000" 14 | port: 8000 15 | targetPort: 8000 16 | - name: "8001" 17 | port: 8001 18 | targetPort: 8001 19 | - name: "8002" 20 | port: 8002 21 | targetPort: 8002 22 | selector: 23 | io.kompose.service: triton 24 | status: 25 | loadBalancer: {} 26 | -------------------------------------------------------------------------------- /packages/proteus.models.retinanet/README.md: -------------------------------------------------------------------------------- 1 | # Proteus RetinaNet 2 | 3 | Package for RetinaNet model usage in Proteus 4 | 5 | Model and implementation taken from https://github.com/onnx/models/tree/master/vision/object_detection_segmentation/retinanet, 6 | which bases its code on https://github.com/NVIDIA/retinanet-examples 7 | 8 | The NVIDIA source version has a BSD-3-Clause License 9 | 10 | ## Pre/Postprocessing 11 | Taken from https://github.com/onnx/models/tree/master/vision/object_detection_segmentation/retinanet 12 | 13 | ## Models 14 | 15 | Available Proteus configuration options: 16 | - Num Instances -------------------------------------------------------------------------------- /packages/proteus.models.efficientpose/proteus/models/efficientpose/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = """Pieter Blomme""" 2 | __email__ = "pieter.blomme@gmail.com" 3 | __version__ = "0.0.1" 4 | 5 | from .client import ( 6 | EfficientPoseI, 7 | EfficientPoseII, 8 | EfficientPoseIII, 9 | EfficientPoseIV, 10 | EfficientPoseRT, 11 | ) 12 | 13 | model_dict = { 14 | EfficientPoseI.__name__: EfficientPoseI, 15 | EfficientPoseII.__name__: EfficientPoseII, 16 | EfficientPoseIII.__name__: EfficientPoseIII, 17 | EfficientPoseIV.__name__: EfficientPoseIV, 18 | EfficientPoseRT.__name__: EfficientPoseRT, 19 | } 20 | -------------------------------------------------------------------------------- /packages/proteus.models.mobilenet/README.md: -------------------------------------------------------------------------------- 1 | # Proteus MobileNet 2 | 3 | Package for MobileNet model usage in Proteus 4 | 5 | Model and implementation taken from https://github.com/onnx/models/tree/master/vision/classification/mobilenet 6 | MIT License 7 | 8 | ## Pre/Postprocessing 9 | I wrote shared preprocessing for all classification models to keep it simple. Essentially this is limited to a resize with aspect ratio. 10 | Postprocessing is also coming from ClassificationModel 11 | 12 | ## Models 13 | 14 | Available Proteus configuration options: 15 | - Num Instances 16 | - Quantization (INT8 precision) 17 | - TritonOptimization -------------------------------------------------------------------------------- /proteus_api/requirements.txt: -------------------------------------------------------------------------------- 1 | click==7.1.1 2 | fastapi==0.65.2 3 | fastapi-utils==0.2.1 4 | file-read-backwards==2.0.0 5 | gevent==20.9.0 6 | geventhttpclient==1.4.4 7 | greenlet==0.4.17 8 | grpcio==1.32.0 9 | gunicorn==20.0.4 10 | h11==0.9.0 11 | httptools==0.1.1 12 | jinja2==2.11.2 13 | numpy==1.19.2 14 | nvidia-pyindex==1.0.4 15 | Pillow==9.0.1 16 | protobuf==3.15.0 17 | pydantic==1.5.1 18 | python-multipart==0.0.5 19 | python-rapidjson==0.9.1 20 | scipy==1.5.2 21 | six==1.15.0 22 | starlette==0.13.2 23 | tritonclient==2.3.0 24 | uvicorn==0.11.7 25 | uvloop==0.14.0 26 | websockets==8.1 27 | zope.event==4.5.0 28 | zope.interface==5.1.0 -------------------------------------------------------------------------------- /packages/proteus.models.efficientnetlite4/README.md: -------------------------------------------------------------------------------- 1 | # Proteus EfficientNetLite4 2 | 3 | Package for RetinaNet model usage in Proteus 4 | 5 | Model and implementation taken from https://github.com/onnx/models/tree/master/vision/classification/efficientnet-lite4 6 | MIT License 7 | 8 | ## Pre/Postprocessing 9 | I wrote shared preprocessing for all classification models to keep it simple. Essentially this is limited to a resize with aspect ratio. 10 | Postprocessing is also coming from ClassificationModel 11 | 12 | ## Models 13 | 14 | Available Proteus configuration options: 15 | - Num Instances 16 | - Quantization (INT8 precision) 17 | - TritonOptimization -------------------------------------------------------------------------------- /tools/benchmarking/benchmarks/benchmarks.sh: -------------------------------------------------------------------------------- 1 | 2 | proteus.benchmark ./MobileNet/benchmark.json 3 | proteus.benchmark ./ResNet50/benchmark.json 4 | proteus.benchmark ./EfficientNetLite4/benchmark.json 5 | proteus.benchmark ./EfficientPoseI/benchmark.json 6 | proteus.benchmark ./EfficientPoseIV/benchmark.json 7 | proteus.benchmark ./EfficientPoseRT/benchmark.json 8 | proteus.benchmark ./SuperRes/benchmark.json 9 | proteus.benchmark ./EfficientDetD0/benchmark.json 10 | proteus.benchmark ./EfficientDetD2/benchmark.json 11 | proteus.benchmark ./RetinaNet/benchmark.json 12 | proteus.benchmark ./YoloV4/benchmark.json 13 | proteus.benchmark ./MaskRCNN/benchmark.json 14 | -------------------------------------------------------------------------------- /packages/proteus.models.resnet50/README.md: -------------------------------------------------------------------------------- 1 | # Proteus ResNet50 2 | 3 | Package for ResNet50 model usage in Proteus 4 | 5 | Model and implementation taken from https://github.com/onnx/models/tree/master/vision/classification/resnet 6 | MIT License 7 | 8 | ## Pre/Postprocessing 9 | I wrote shared preprocessing for all classification models to keep it simple. Essentially this is limited to a resize with aspect ratio. 10 | Postprocessing is also coming from ClassificationModel 11 | 12 | ## Models 13 | I am using the Resnet50 V2 version. 14 | 15 | Available Proteus configuration options: 16 | - Num Instances 17 | - Quantization (INT8 precision) 18 | - TritonOptimization -------------------------------------------------------------------------------- /packages/proteus.models.maskrcnn/proteus/models/maskrcnn/config.template: -------------------------------------------------------------------------------- 1 | platform: "onnxruntime_onnx" 2 | max_batch_size : 0 3 | input [ 4 | { 5 | name: "image" 6 | data_type: TYPE_FP32 7 | dims: [3, -1, -1] 8 | } 9 | ] 10 | output [ 11 | { 12 | name: "6572" 13 | data_type: TYPE_FP32 14 | dims: [-1] 15 | }, 16 | { 17 | name: "6568" 18 | data_type: TYPE_FP32 19 | dims: [-1, 4] 20 | }, 21 | { 22 | name: "6887" 23 | data_type: TYPE_FP32 24 | dims: [-1, 1, 28, 28] 25 | }, 26 | { 27 | name: "6570" 28 | data_type: TYPE_INT64 29 | dims: [-1] 30 | } 31 | ] 32 | {{triton_optimization}} 33 | {{num_instances}} -------------------------------------------------------------------------------- /tools/benchmarking/configs/example.json: -------------------------------------------------------------------------------- 1 | { 2 | "BasePath": "http://localhost", 3 | "Model": "EfficientNetLite4", 4 | "Dataset": "ImageNette", 5 | "Throughput" : [ 6 | {"triton_optimization": true, "quantize": false, "num_workers": 1, "num_instances": 1 }, 7 | {"triton_optimization": false, "quantize": false, "num_workers": 1, "num_instances": 1 }, 8 | {"triton_optimization": true, "quantize": true, "num_workers": 1, "num_instances": 1 } 9 | ], 10 | "Latency" : [ 11 | {"triton_optimization": true, "quantize": true}, 12 | {"triton_optimization": true, "quantize": false} 13 | ], 14 | "Score" : [ 15 | {"quantize": true}, 16 | {"quantize": false} 17 | ] 18 | } -------------------------------------------------------------------------------- /packages/proteus.models.yolov4/proteus/models/yolov4/config.template: -------------------------------------------------------------------------------- 1 | platform: "onnxruntime_onnx" 2 | max_batch_size : 2 3 | input [ 4 | { 5 | name: "input_1:0" 6 | data_type: TYPE_FP32 7 | format: FORMAT_NONE 8 | dims: [416, 416, 3] 9 | } 10 | ] 11 | output [ 12 | { 13 | name: "Identity:0" 14 | data_type: TYPE_FP32 15 | dims: [-1, -1, 3, 85] 16 | }, 17 | { 18 | name: "Identity_1:0" 19 | data_type: TYPE_FP32 20 | dims: [-1, -1, 3, 85] 21 | }, 22 | { 23 | name: "Identity_2:0" 24 | data_type: TYPE_FP32 25 | dims: [-1, -1, 3, 85] 26 | } 27 | ] 28 | {{triton_optimization}} 29 | {{dynamic_batching}} 30 | {{num_instances}} -------------------------------------------------------------------------------- /packages/proteus.datasets/README.md: -------------------------------------------------------------------------------- 1 | ======== 2 | Proteus Datasets 3 | ======== 4 | 5 | Datasets to be used for testing and benchmarking 6 | 7 | Dataset template looks like this: 8 | ``` 9 | class Dataset: 10 | """ 11 | Dataset interface: 12 | - implement __getitem__ to generate files 13 | - implement eval(preds) to predict a score 14 | """ 15 | 16 | def __getitem__(self, index): 17 | None 18 | 19 | def __len__(self): 20 | return 1 21 | 22 | def eval(self, preds): 23 | return 1.0 24 | ``` 25 | 26 | Implemented datasets: 27 | 28 | - ImageNette 29 | - CocoValBBox 30 | - CocoValMask 31 | - BSDSSuperRes 32 | - MPIIPoseEstimation 33 | -------------------------------------------------------------------------------- /tools/benchmarking/benchmarks/MobileNet/benchmark.json: -------------------------------------------------------------------------------- 1 | { 2 | "BasePath": "http://proteus.localhost", 3 | "Model": "MobileNetV2", 4 | "Dataset": "ImageNette", 5 | "Throughput" : [ 6 | {"triton_optimization": true, "num_workers": 1, "num_instances": 1 }, 7 | {"triton_optimization": false, "num_workers": 1, "num_instances": 1 }, 8 | {"triton_optimization": true, "num_workers": 2, "num_instances": 1 }, 9 | {"triton_optimization": true, "num_workers": 1, "num_instances": 2 }, 10 | {"triton_optimization": true, "num_workers": 4, "num_instances": 1 } 11 | ], 12 | "Latency" : [ 13 | {"triton_optimization": true}, 14 | {"triton_optimization": false} 15 | ], 16 | "Score" : [ 17 | {"triton_optimization": true} 18 | ] 19 | } -------------------------------------------------------------------------------- /tools/benchmarking/benchmarks/ResNet50/benchmark.json: -------------------------------------------------------------------------------- 1 | { 2 | "BasePath": "http://proteus.localhost", 3 | "Model": "Resnet50V2", 4 | "Dataset": "ImageNette", 5 | "Throughput" : [ 6 | {"triton_optimization": true, "num_workers": 1, "num_instances": 1 }, 7 | {"triton_optimization": false, "num_workers": 1, "num_instances": 1 }, 8 | {"triton_optimization": true, "num_workers": 2, "num_instances": 1 }, 9 | {"triton_optimization": true, "num_workers": 1, "num_instances": 2 }, 10 | {"triton_optimization": true, "num_workers": 4, "num_instances": 1 } 11 | ], 12 | "Latency" : [ 13 | {"triton_optimization": true}, 14 | {"triton_optimization": false} 15 | ], 16 | "Score" : [ 17 | {"triton_optimization": true} 18 | ] 19 | } -------------------------------------------------------------------------------- /tools/benchmarking/benchmarks/RetinaNet/benchmark.json: -------------------------------------------------------------------------------- 1 | { 2 | "BasePath": "http://proteus.localhost", 3 | "Model": "RetinaNet", 4 | "Dataset": "CocoValBBox", 5 | "Throughput" : [ 6 | {"triton_optimization": true, "num_workers": 1, "num_instances": 1 }, 7 | {"triton_optimization": false, "num_workers": 1, "num_instances": 1 }, 8 | {"triton_optimization": true, "num_workers": 2, "num_instances": 1 }, 9 | {"triton_optimization": true, "num_workers": 1, "num_instances": 2 }, 10 | {"triton_optimization": true, "num_workers": 4, "num_instances": 1 } 11 | ], 12 | "Latency" : [ 13 | {"triton_optimization": true}, 14 | {"triton_optimization": false} 15 | ], 16 | "Score" : [ 17 | {"triton_optimization": true} 18 | ] 19 | } -------------------------------------------------------------------------------- /packages/proteus.models.yolov4/README.md: -------------------------------------------------------------------------------- 1 | # Proteus YOLOV4 2 | 3 | Package for YOLOV4 model usage in Proteus 4 | 5 | Model and code taken from https://github.com/onnx/models/tree/master/vision/object_detection_segmentation/yolov4, which in turn based the code on: 6 | - https://github.com/hunglc007/tensorflow-yolov4-tflite 7 | 8 | Licensing: the original implementation from hunglc007 has an MIT license 9 | 10 | The model acts on 416x416x3 inputs. Proteus rescales if needed. 11 | 12 | ## Pre/Postprocessing 13 | Taken from https://github.com/onnx/models/tree/master/vision/object_detection_segmentation/yolov4 14 | 15 | ## Models 16 | 17 | Available Proteus configuration options: 18 | - Num Instances 19 | - Quantization (INT8 precision) 20 | - TritonOptimization 21 | - Dynamic Batching -------------------------------------------------------------------------------- /proteus_api/app/logging.conf: -------------------------------------------------------------------------------- 1 | [loggers] 2 | keys=root,predictions 3 | 4 | [handlers] 5 | keys=consoleHandler,fileHandler 6 | 7 | [formatters] 8 | keys=normalFormatter, predictionFormatter 9 | 10 | [logger_root] 11 | args=('%(logfilename)s',) 12 | handlers=consoleHandler 13 | 14 | [logger_predictions] 15 | qualname=predictions 16 | handlers=fileHandler 17 | 18 | [handler_consoleHandler] 19 | class=StreamHandler 20 | formatter=normalFormatter 21 | args=(sys.stdout,) 22 | 23 | [handler_fileHandler] 24 | class=FileHandler 25 | level=INFO 26 | formatter=predictionFormatter 27 | args=('/logs/predictions.log', 'a+') 28 | 29 | [formatter_predictionFormatter] 30 | format=%(asctime)s|%(message)s 31 | 32 | [formatter_normalFormatter] 33 | format=%(asctime)s - %(levelname)s - %(name)s - %(message)s -------------------------------------------------------------------------------- /proteus_api/README.md: -------------------------------------------------------------------------------- 1 | # apiclient 2 | FastAPI for Triton 3 | 4 | Upon start-up, model discovery will happen by looking through the proteus.models namespace and registering all available models. 5 | For each model a load, unload and predict endpoint will be generated. 6 | 7 | Models can be loaded with certain configs, if exposed by the model. 8 | - num_instances: Triton allows to put multiple copies of the same model on a GPU 9 | - triton_optimization: Whether to use Triton Optimization. This is recommended, but older Onnx models may not support it. 10 | - quantize: You can quantize models to make them smaller. There seems to be no performance benefit using Triton, but they take less space. 11 | - dynamic batching: If the model allows batching, Triton can batch dynamically if too many concurrent requests come in. -------------------------------------------------------------------------------- /tools/benchmarking/benchmarks/MaskRCNN/Benchmark.md: -------------------------------------------------------------------------------- 1 | # Benchmark for MaskRCNN 2 | 3 | ## Score 4 | 5 | Score on 100 samples from CocoValMask dataset 6 | | | score | 7 | |---:|---------:| 8 | | 0 | 0.327063 | 9 | 10 | ## Throughput 11 | Average throughput in FPS on 50 samples from CocoValMask dataset 12 | | | num_workers | num_instances | throughput | 13 | |---:|--------------:|----------------:|-------------:| 14 | | 3 | 4 | 1 | 4.02101 | 15 | | 1 | 2 | 1 | 3.9751 | 16 | | 0 | 1 | 1 | 3.56431 | 17 | | 2 | 1 | 2 | 2.94482 | 18 | 19 | ## Latency 20 | 21 | Average latency in ms on 10 samples from CocoValMask dataset 22 | | | latency | 23 | |---:|----------:| 24 | | 0 | 452.66 | -------------------------------------------------------------------------------- /tools/benchmarking/benchmarks/SuperRes/Benchmark.md: -------------------------------------------------------------------------------- 1 | # Benchmark for SuperResolution 2 | 3 | ## Score 4 | 5 | Score on 100 samples from BSDSSuperRes dataset 6 | | | score | 7 | |---:|--------:| 8 | | 0 | 47.5618 | 9 | 10 | ## Throughput 11 | Average throughput in FPS on 50 samples from BSDSSuperRes dataset 12 | | | num_workers | num_instances | throughput | 13 | |---:|--------------:|----------------:|-------------:| 14 | | 1 | 2 | 1 | 0.399698 | 15 | | 2 | 1 | 2 | 0.39951 | 16 | | 0 | 1 | 1 | 0.398853 | 17 | | 3 | 4 | 1 | 0.397348 | 18 | 19 | ## Latency 20 | 21 | Average latency in ms on 10 samples from BSDSSuperRes dataset 22 | | | latency | 23 | |---:|----------:| 24 | | 0 | 145.807 | -------------------------------------------------------------------------------- /tools/benchmarking/benchmarks/EfficientNetLite4/benchmark.json: -------------------------------------------------------------------------------- 1 | { 2 | "BasePath": "http://proteus.localhost", 3 | "Model": "EfficientNetLite4", 4 | "Dataset": "ImageNette", 5 | "Throughput" : [ 6 | {"triton_optimization": true, "quantize": false, "num_workers": 1, "num_instances": 1 }, 7 | {"triton_optimization": false, "quantize": false, "num_workers": 1, "num_instances": 1 }, 8 | {"triton_optimization": true, "quantize": false, "num_workers": 1, "num_instances": 2 }, 9 | {"triton_optimization": true, "quantize": false, "num_workers": 2, "num_instances": 1 }, 10 | {"triton_optimization": true, "quantize": false, "num_workers": 4, "num_instances": 1 } 11 | ], 12 | "Latency" : [ 13 | {"triton_optimization": true, "quantize": false}, 14 | {"triton_optimization": false, "quantize": false} 15 | ], 16 | "Score" : [ 17 | {"quantize": false} 18 | ] 19 | } -------------------------------------------------------------------------------- /tools/benchmarking/benchmarks/EfficientPoseI/benchmark.json: -------------------------------------------------------------------------------- 1 | { 2 | "BasePath": "http://proteus.localhost", 3 | "Model": "EfficientPoseI", 4 | "Dataset": "MPIIPoseEstimation", 5 | "Throughput" : [ 6 | {"triton_optimization": true, "quantize": false, "num_workers": 1, "num_instances": 1 }, 7 | {"triton_optimization": false, "quantize": false, "num_workers": 1, "num_instances": 1 }, 8 | {"triton_optimization": true, "quantize": false, "num_workers": 1, "num_instances": 2 }, 9 | {"triton_optimization": true, "quantize": false, "num_workers": 2, "num_instances": 1 }, 10 | {"triton_optimization": true, "quantize": false, "num_workers": 4, "num_instances": 1 } 11 | ], 12 | "Latency" : [ 13 | {"triton_optimization": true, "quantize": false}, 14 | {"triton_optimization": false, "quantize": false} 15 | ], 16 | "Score" : [ 17 | {"quantize": false} 18 | ] 19 | } -------------------------------------------------------------------------------- /tools/benchmarking/benchmarks/EfficientPoseIV/benchmark.json: -------------------------------------------------------------------------------- 1 | { 2 | "BasePath": "http://proteus.localhost", 3 | "Model": "EfficientPoseIV", 4 | "Dataset": "MPIIPoseEstimation", 5 | "Throughput" : [ 6 | {"triton_optimization": true, "quantize": false, "num_workers": 1, "num_instances": 1 }, 7 | {"triton_optimization": false, "quantize": false, "num_workers": 1, "num_instances": 1 }, 8 | {"triton_optimization": true, "quantize": false, "num_workers": 1, "num_instances": 2 }, 9 | {"triton_optimization": true, "quantize": false, "num_workers": 2, "num_instances": 1 }, 10 | {"triton_optimization": true, "quantize": false, "num_workers": 4, "num_instances": 1 } 11 | ], 12 | "Latency" : [ 13 | {"triton_optimization": true, "quantize": false}, 14 | {"triton_optimization": false, "quantize": false} 15 | ], 16 | "Score" : [ 17 | {"quantize": false} 18 | ] 19 | } -------------------------------------------------------------------------------- /tools/benchmarking/benchmarks/EfficientPoseRT/benchmark.json: -------------------------------------------------------------------------------- 1 | { 2 | "BasePath": "http://proteus.localhost", 3 | "Model": "EfficientPoseRT", 4 | "Dataset": "MPIIPoseEstimation", 5 | "Throughput" : [ 6 | {"triton_optimization": true, "quantize": false, "num_workers": 1, "num_instances": 1 }, 7 | {"triton_optimization": false, "quantize": false, "num_workers": 1, "num_instances": 1 }, 8 | {"triton_optimization": true, "quantize": false, "num_workers": 1, "num_instances": 2 }, 9 | {"triton_optimization": true, "quantize": false, "num_workers": 2, "num_instances": 1 }, 10 | {"triton_optimization": true, "quantize": false, "num_workers": 4, "num_instances": 1 } 11 | ], 12 | "Latency" : [ 13 | {"triton_optimization": true, "quantize": false}, 14 | {"triton_optimization": false, "quantize": false} 15 | ], 16 | "Score" : [ 17 | {"quantize": false} 18 | ] 19 | } -------------------------------------------------------------------------------- /kubernetes/grafana-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | annotations: 5 | kompose.cmd: kompose convert 6 | kompose.version: 1.22.0 (955b78124) 7 | creationTimestamp: null 8 | labels: 9 | io.kompose.service: grafana 10 | name: grafana 11 | spec: 12 | replicas: 0 13 | selector: 14 | matchLabels: 15 | io.kompose.service: grafana 16 | strategy: {} 17 | template: 18 | metadata: 19 | annotations: 20 | kompose.cmd: kompose convert 21 | kompose.version: 1.22.0 (955b78124) 22 | creationTimestamp: null 23 | labels: 24 | io.kompose.service: grafana 25 | spec: 26 | containers: 27 | - image: pieterblomme/grafana:0.3 28 | name: grafana 29 | ports: 30 | - containerPort: 3000 31 | resources: {} 32 | restartPolicy: Always 33 | status: {} 34 | -------------------------------------------------------------------------------- /kubernetes/ingresses.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | annotations: 5 | kubernetes.io/ingress.class: nginx 6 | ingress.kubernetes.io/proxy-body-size: "4000m" 7 | nginx.ingress.kubernetes.io/proxy-body-size: "4000m" 8 | ingress.kubernetes.io/proxy-connect-timeout: "1000" 9 | nginx.ingress.kubernetes.io/proxy-connect-timeout: "1000" 10 | ingress.kubernetes.io/proxy-send-timeout: "1000" 11 | nginx.ingress.kubernetes.io/proxy-send-timeout: "1000" 12 | ingress.kubernetes.io/proxy-read-timeout: "1000" 13 | nginx.ingress.kubernetes.io/proxy-read-timeout: "1000" 14 | nginx.org/mergeable-ingress-type: master 15 | name: ingress-paths 16 | labels: 17 | name: ingress-paths 18 | spec: 19 | rules: 20 | - host: proteus.link 21 | http: 22 | paths: 23 | - path: / 24 | backend: 25 | serviceName: api 26 | servicePort: 80 -------------------------------------------------------------------------------- /packages/proteus.types/setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """The setup script.""" 4 | 5 | from setuptools import find_namespace_packages, setup 6 | 7 | with open("README.md") as readme_file: 8 | readme = readme_file.read() 9 | 10 | requirements = [] 11 | 12 | test_requirements = ["pytest>=3"] 13 | 14 | setup( 15 | name="proteus.types", 16 | version="0.0.1", 17 | description="Proteus types", 18 | author="Pieter Blomme", 19 | author_email="pieter.blomme@gmail.com", 20 | python_requires=">=3.6", 21 | classifiers=[], 22 | keywords="", 23 | entry_points={}, 24 | install_requires=requirements, 25 | long_description=readme, 26 | include_package_data=True, 27 | namespace_packages=["proteus"], 28 | packages=find_namespace_packages(exclude=["tests"]), 29 | test_suite="tests", 30 | tests_require=test_requirements, 31 | extras_require={"test": test_requirements}, 32 | zip_safe=False, 33 | ) 34 | -------------------------------------------------------------------------------- /tools/benchmarking/benchmarks/EfficientDetD2/Benchmark.md: -------------------------------------------------------------------------------- 1 | # Benchmark for EfficientDetD2 2 | 3 | ## Score 4 | 5 | Score on 100 samples from CocoValBBox dataset 6 | | | score | 7 | |---:|--------:| 8 | | 0 | 0.2116 | 9 | 10 | ## Throughput 11 | Average throughput in FPS on 50 samples from CocoValBBox dataset 12 | | | num_workers | num_instances | throughput | 13 | |---:|--------------:|----------------:|-------------:| 14 | | 2 | 4 | 1 | 7.58956 | 15 | | 1 | 2 | 1 | 7.29533 | 16 | | 0 | 1 | 1 | 6.95794 | 17 | | 3 | 2 | 2 | 4.91367 | 18 | | 4 | 1 | 2 | 4.42691 | 19 | 20 | ## Latency 21 | 22 | Average latency in ms on 10 samples from CocoValBBox dataset 23 | | | triton_optimization | latency | 24 | |---:|:----------------------|----------:| 25 | | 0 | True | 472.207 | -------------------------------------------------------------------------------- /packages/proteus.models.mobilenet/proteus/models/mobilenet/client.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | from proteus.models.base import ClassificationModel 4 | from proteus.models.base.modelconfigs import BaseModelConfig 5 | 6 | from .helpers import read_class_names 7 | 8 | folder_path = Path(__file__).parent 9 | 10 | 11 | class ModelConfig(BaseModelConfig): 12 | pass 13 | 14 | 15 | class MobileNetV2(ClassificationModel): 16 | 17 | CHANNEL_FIRST = True 18 | DESCRIPTION = ( 19 | "Very efficient model with 70.94 % Top-1 accuracy on ImageNet. " 20 | " Taken from https://github.com/onnx/models." 21 | ) 22 | CLASSES = read_class_names(f"{folder_path}/imagenet_labels.txt") 23 | MODEL_URL = "https://github.com/onnx/models/raw/master/vision/classification/mobilenet/model/mobilenetv2-7.onnx" 24 | CONFIG_PATH = f"{folder_path}/config.template" 25 | INPUT_NAME = "input" 26 | OUTPUT_NAMES = ["output"] 27 | DTYPE = "FP32" 28 | -------------------------------------------------------------------------------- /tools/benchmarking/benchmarks/EfficientDetD0/Benchmark.md: -------------------------------------------------------------------------------- 1 | # Benchmark for EfficientDetD0 2 | 3 | ## Score 4 | 5 | Score on 100 samples from CocoValBBox dataset 6 | | | score | 7 | |---:|---------:| 8 | | 0 | 0.101062 | 9 | 10 | ## Throughput 11 | Average throughput in FPS on 50 samples from CocoValBBox dataset 12 | | | num_workers | num_instances | throughput | 13 | |---:|--------------:|----------------:|-------------:| 14 | | 1 | 2 | 1 | 17.2073 | 15 | | 2 | 4 | 1 | 17.1759 | 16 | | 0 | 1 | 1 | 13.9253 | 17 | | 3 | 2 | 2 | 11.1501 | 18 | | 4 | 1 | 2 | 9.38441 | 19 | 20 | ## Latency 21 | 22 | Average latency in ms on 10 samples from CocoValBBox dataset 23 | | | triton_optimization | latency | 24 | |---:|:----------------------|----------:| 25 | | 0 | True | 215.727 | -------------------------------------------------------------------------------- /packages/proteus.models.superres/README.md: -------------------------------------------------------------------------------- 1 | # Proteus SuperResolution 2 | 3 | Package for SuperResolution model usage in Proteus 4 | 5 | Model and code taken from https://github.com/onnx/models/tree/master/vision/super_resolution/sub_pixel_cnn_2016, which in turn based the code on: 6 | - https://github.com/pytorch/examples/tree/master/super_resolution 7 | - https://arxiv.org/abs/1609.05158 8 | 9 | Licensing: the original Pytorch implementation has a BSD-3 license. 10 | 11 | The model takes a 224x224x3 channel input and upscales it to 672x672x3. Images of other sizes will be resized first, but ofcourse it makes little sense to use this model if you already have a hi-res image. 12 | 13 | ## Pre/Postprocessing 14 | Taken from https://github.com/onnx/models/tree/master/vision/super_resolution/sub_pixel_cnn_2016 15 | 16 | Available Proteus configuration options: 17 | - Num Instances 18 | - Quantization (INT8 precision) 19 | - TritonOptimization 20 | - Dynamic Batching 21 | 22 | -------------------------------------------------------------------------------- /packages/proteus.datasets/setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """The setup script.""" 4 | 5 | from setuptools import find_namespace_packages, setup 6 | 7 | with open("README.md") as readme_file: 8 | readme = readme_file.read() 9 | 10 | requirements = ["pycocotools==2.0.2"] 11 | 12 | test_requirements = ["pytest>=3"] 13 | 14 | setup( 15 | name="proteus.datasets", 16 | version="0.0.1", 17 | description="Proteus datasets", 18 | author="Pieter Blomme", 19 | author_email="pieter.blomme@gmail.com", 20 | python_requires=">=3.6", 21 | classifiers=[], 22 | keywords="", 23 | entry_points={}, 24 | install_requires=requirements, 25 | long_description=readme, 26 | include_package_data=True, 27 | namespace_packages=["proteus"], 28 | packages=find_namespace_packages(exclude=["tests"]), 29 | test_suite="tests", 30 | tests_require=test_requirements, 31 | extras_require={"test": test_requirements}, 32 | zip_safe=False, 33 | ) 34 | -------------------------------------------------------------------------------- /packages/proteus.models.efficientdet/README.md: -------------------------------------------------------------------------------- 1 | # Proteus EfficientDet 2 | 3 | Package for EfficientDet model usage in Proteus 4 | 5 | Models and implementation courtesy of the original Google implementation at https://github.com/google/automl/tree/master/efficientdet 6 | 7 | Google Automml repo is licensed under Apache License 2.0 8 | 9 | ## Process for model conversion to ONNX 10 | I used following example: https://github.com/onnx/tensorflow-onnx/blob/master/tutorials/efficientdet.ipynb 11 | However, due to recent changes in the automl efficientdet repo, you should use commit 57621e8f3eaddd2c0b421c65c0bbd323ebcf8f2d 12 | when running this notebook 13 | 14 | ## Pre/Postprocessing 15 | https://github.com/onnx/tensorflow-onnx/blob/master/tutorials/efficientdet.ipynb was used as an example. 16 | However, hardly any pre/postpressing is needed for efficientdet. 17 | 18 | ## Models 19 | - EfficientPoseD0 20 | - EfficientPoseD2 21 | 22 | Available Proteus configuration options: 23 | - Num Instances -------------------------------------------------------------------------------- /packages/proteus.models.maskrcnn/setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """The setup script.""" 4 | 5 | from setuptools import find_namespace_packages, setup 6 | 7 | with open("README.md") as readme_file: 8 | readme = readme_file.read() 9 | 10 | requirements = [] 11 | 12 | test_requirements = ["pytest>=3"] 13 | 14 | setup( 15 | name="proteus.models.maskrcnn", 16 | version="0.0.1", 17 | description="Proteus Mask RCNN", 18 | author="Pieter Blomme", 19 | author_email="pieter.blomme@gmail.com", 20 | python_requires=">=3.6", 21 | classifiers=[], 22 | keywords="", 23 | entry_points={}, 24 | install_requires=requirements, 25 | long_description=readme, 26 | include_package_data=True, 27 | namespace_packages=["proteus", "proteus.models"], 28 | packages=find_namespace_packages(exclude=["tests"]), 29 | test_suite="tests", 30 | tests_require=test_requirements, 31 | extras_require={"test": test_requirements}, 32 | zip_safe=False, 33 | ) 34 | -------------------------------------------------------------------------------- /packages/proteus.models.yolov4/setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """The setup script.""" 4 | 5 | from setuptools import find_namespace_packages, setup 6 | 7 | with open("README.md") as readme_file: 8 | readme = readme_file.read() 9 | 10 | requirements = ["scipy==1.5.2"] 11 | 12 | test_requirements = ["pytest>=3"] 13 | 14 | setup( 15 | name="proteus.models.yolov4", 16 | version="0.0.1", 17 | description="Proteus yolov4", 18 | author="Pieter Blomme", 19 | author_email="pieter.blomme@gmail.com", 20 | python_requires=">=3.6", 21 | classifiers=[], 22 | keywords="", 23 | entry_points={}, 24 | install_requires=requirements, 25 | long_description=readme, 26 | include_package_data=True, 27 | namespace_packages=["proteus", "proteus.models"], 28 | packages=find_namespace_packages(exclude=["tests"]), 29 | test_suite="tests", 30 | tests_require=test_requirements, 31 | extras_require={"test": test_requirements}, 32 | zip_safe=False, 33 | ) 34 | -------------------------------------------------------------------------------- /packages/proteus.models.efficientdet/setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """The setup script.""" 4 | 5 | from setuptools import find_namespace_packages, setup 6 | 7 | with open("README.md") as readme_file: 8 | readme = readme_file.read() 9 | 10 | requirements = [] 11 | 12 | test_requirements = ["pytest>=3"] 13 | 14 | setup( 15 | name="proteus.models.efficientdet", 16 | version="0.0.1", 17 | description="Proteus EfficientDet", 18 | author="Pieter Blomme", 19 | author_email="pieter.blomme@gmail.com", 20 | python_requires=">=3.6", 21 | classifiers=[], 22 | keywords="", 23 | entry_points={}, 24 | install_requires=requirements, 25 | long_description=readme, 26 | include_package_data=True, 27 | namespace_packages=["proteus", "proteus.models"], 28 | packages=find_namespace_packages(exclude=["tests"]), 29 | test_suite="tests", 30 | tests_require=test_requirements, 31 | extras_require={"test": test_requirements}, 32 | zip_safe=False, 33 | ) 34 | -------------------------------------------------------------------------------- /packages/proteus.models.resnet50/setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """The setup script.""" 4 | 5 | from setuptools import find_namespace_packages, setup 6 | 7 | with open("README.md") as readme_file: 8 | readme = readme_file.read() 9 | 10 | requirements = ["opencv-python-headless==4.4.0.40"] 11 | 12 | test_requirements = ["pytest>=3"] 13 | 14 | setup( 15 | name="proteus.models.resnet50", 16 | version="0.0.1", 17 | description="Proteus resnet50", 18 | author="Pieter Blomme", 19 | author_email="pieter.blomme@gmail.com", 20 | python_requires=">=3.6", 21 | classifiers=[], 22 | keywords="", 23 | entry_points={}, 24 | install_requires=requirements, 25 | long_description=readme, 26 | include_package_data=True, 27 | namespace_packages=["proteus", "proteus.models"], 28 | packages=find_namespace_packages(exclude=["tests"]), 29 | test_suite="tests", 30 | tests_require=test_requirements, 31 | extras_require={"test": test_requirements}, 32 | zip_safe=False, 33 | ) 34 | -------------------------------------------------------------------------------- /packages/proteus.models.superres/setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """The setup script.""" 4 | 5 | from setuptools import find_namespace_packages, setup 6 | 7 | with open("README.md") as readme_file: 8 | readme = readme_file.read() 9 | 10 | requirements = ["python-resize-image==1.1.19"] 11 | 12 | test_requirements = ["pytest>=3"] 13 | 14 | setup( 15 | name="proteus.models.superres", 16 | version="0.0.1", 17 | description="Proteus SuperResolution", 18 | author="Pieter Blomme", 19 | author_email="pieter.blomme@gmail.com", 20 | python_requires=">=3.6", 21 | classifiers=[], 22 | keywords="", 23 | entry_points={}, 24 | install_requires=requirements, 25 | long_description=readme, 26 | include_package_data=True, 27 | namespace_packages=["proteus", "proteus.models"], 28 | packages=find_namespace_packages(exclude=["tests"]), 29 | test_suite="tests", 30 | tests_require=test_requirements, 31 | extras_require={"test": test_requirements}, 32 | zip_safe=False, 33 | ) 34 | -------------------------------------------------------------------------------- /packages/proteus.models.resnet50/proteus/models/resnet50/client.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | from proteus.models.base import ClassificationModel 4 | from proteus.models.base.modelconfigs import BaseModelConfig 5 | 6 | from .helpers import read_class_names 7 | 8 | folder_path = Path(__file__).parent 9 | 10 | 11 | class ModelConfig(BaseModelConfig): 12 | pass 13 | 14 | 15 | class Resnet50V2(ClassificationModel): 16 | 17 | CHANNEL_FIRST = True 18 | DESCRIPTION = ( 19 | "ResNet models provide very high accuracies with affordable model sizes. " 20 | "75.81% Top-1 on Imagenet for Resnet50 V2" 21 | "Taken from https://github.com/onnx/models." 22 | ) 23 | CLASSES = read_class_names(f"{folder_path}/imagenet_labels.txt") 24 | MODEL_URL = "https://github.com/onnx/models/raw/master/vision/classification/resnet/model/resnet50-v2-7.onnx" 25 | CONFIG_PATH = f"{folder_path}/config.template" 26 | INPUT_NAME = "data" 27 | OUTPUT_NAMES = ["resnetv24_dense0_fwd"] 28 | DTYPE = "FP32" 29 | -------------------------------------------------------------------------------- /packages/proteus.models.retinanet/setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """The setup script.""" 4 | 5 | from setuptools import find_namespace_packages, setup 6 | 7 | with open("README.md") as readme_file: 8 | readme = readme_file.read() 9 | 10 | requirements = ["torch==1.4.0", "torchvision==0.5.0"] 11 | 12 | test_requirements = ["pytest>=3"] 13 | 14 | setup( 15 | name="proteus.models.retinanet", 16 | version="0.0.1", 17 | description="Proteus RetinaNet", 18 | author="Pieter Blomme", 19 | author_email="pieter.blomme@gmail.com", 20 | python_requires=">=3.6", 21 | classifiers=[], 22 | keywords="", 23 | entry_points={}, 24 | install_requires=requirements, 25 | long_description=readme, 26 | include_package_data=True, 27 | namespace_packages=["proteus", "proteus.models"], 28 | packages=find_namespace_packages(exclude=["tests"]), 29 | test_suite="tests", 30 | tests_require=test_requirements, 31 | extras_require={"test": test_requirements}, 32 | zip_safe=False, 33 | ) 34 | -------------------------------------------------------------------------------- /packages/proteus.models.mobilenet/setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """The setup script.""" 4 | 5 | from setuptools import find_namespace_packages, setup 6 | 7 | with open("README.md") as readme_file: 8 | readme = readme_file.read() 9 | 10 | requirements = ["opencv-python-headless==4.4.0.40"] 11 | 12 | 13 | test_requirements = ["pytest>=3"] 14 | 15 | setup( 16 | name="proteus.models.mobilenet", 17 | version="0.0.1", 18 | description="Proteus mobilenet", 19 | author="Pieter Blomme", 20 | author_email="pieter.blomme@gmail.com", 21 | python_requires=">=3.6", 22 | classifiers=[], 23 | keywords="", 24 | entry_points={}, 25 | install_requires=requirements, 26 | long_description=readme, 27 | include_package_data=True, 28 | namespace_packages=["proteus", "proteus.models"], 29 | packages=find_namespace_packages(exclude=["tests"]), 30 | test_suite="tests", 31 | tests_require=test_requirements, 32 | extras_require={"test": test_requirements}, 33 | zip_safe=False, 34 | ) 35 | -------------------------------------------------------------------------------- /packages/proteus.models.yolov4/proteus/models/yolov4/coco_names.txt: -------------------------------------------------------------------------------- 1 | person 2 | bicycle 3 | car 4 | motorcycle 5 | airplane 6 | bus 7 | train 8 | truck 9 | boat 10 | traffic light 11 | fire hydrant 12 | stop sign 13 | parking meter 14 | bench 15 | bird 16 | cat 17 | dog 18 | horse 19 | sheep 20 | cow 21 | elephant 22 | bear 23 | zebra 24 | giraffe 25 | backpack 26 | umbrella 27 | handbag 28 | tie 29 | suitcase 30 | frisbee 31 | skis 32 | snowboard 33 | sports ball 34 | kite 35 | baseball bat 36 | baseball glove 37 | skateboard 38 | surfboard 39 | tennis racket 40 | bottle 41 | wine glass 42 | cup 43 | fork 44 | knife 45 | spoon 46 | bowl 47 | banana 48 | apple 49 | sandwich 50 | orange 51 | broccoli 52 | carrot 53 | hot dog 54 | pizza 55 | donut 56 | cake 57 | chair 58 | couch 59 | potted plant 60 | bed 61 | dining table 62 | toilet 63 | tv 64 | laptop 65 | mouse 66 | remote 67 | keyboard 68 | cell phone 69 | microwave 70 | oven 71 | toaster 72 | sink 73 | refrigerator 74 | book 75 | clock 76 | vase 77 | scissors 78 | teddy bear 79 | hair drier 80 | toothbrush -------------------------------------------------------------------------------- /packages/proteus.models.retinanet/proteus/models/retinanet/coco_names.txt: -------------------------------------------------------------------------------- 1 | person 2 | bicycle 3 | car 4 | motorcycle 5 | airplane 6 | bus 7 | train 8 | truck 9 | boat 10 | traffic light 11 | fire hydrant 12 | stop sign 13 | parking meter 14 | bench 15 | bird 16 | cat 17 | dog 18 | horse 19 | sheep 20 | cow 21 | elephant 22 | bear 23 | zebra 24 | giraffe 25 | backpack 26 | umbrella 27 | handbag 28 | tie 29 | suitcase 30 | frisbee 31 | skis 32 | snowboard 33 | sports ball 34 | kite 35 | baseball bat 36 | baseball glove 37 | skateboard 38 | surfboard 39 | tennis racket 40 | bottle 41 | wine glass 42 | cup 43 | fork 44 | knife 45 | spoon 46 | bowl 47 | banana 48 | apple 49 | sandwich 50 | orange 51 | broccoli 52 | carrot 53 | hot dog 54 | pizza 55 | donut 56 | cake 57 | chair 58 | couch 59 | potted plant 60 | bed 61 | dining table 62 | toilet 63 | tv 64 | laptop 65 | mouse 66 | remote 67 | keyboard 68 | cell phone 69 | microwave 70 | oven 71 | toaster 72 | sink 73 | refrigerator 74 | book 75 | clock 76 | vase 77 | scissors 78 | teddy bear 79 | hair drier 80 | toothbrush -------------------------------------------------------------------------------- /packages/proteus.models.maskrcnn/proteus/models/maskrcnn/coco_names.txt: -------------------------------------------------------------------------------- 1 | __background 2 | person 3 | bicycle 4 | car 5 | motorcycle 6 | airplane 7 | bus 8 | train 9 | truck 10 | boat 11 | traffic light 12 | fire hydrant 13 | stop sign 14 | parking meter 15 | bench 16 | bird 17 | cat 18 | dog 19 | horse 20 | sheep 21 | cow 22 | elephant 23 | bear 24 | zebra 25 | giraffe 26 | backpack 27 | umbrella 28 | handbag 29 | tie 30 | suitcase 31 | frisbee 32 | skis 33 | snowboard 34 | sports ball 35 | kite 36 | baseball bat 37 | baseball glove 38 | skateboard 39 | surfboard 40 | tennis racket 41 | bottle 42 | wine glass 43 | cup 44 | fork 45 | knife 46 | spoon 47 | bowl 48 | banana 49 | apple 50 | sandwich 51 | orange 52 | broccoli 53 | carrot 54 | hot dog 55 | pizza 56 | donut 57 | cake 58 | chair 59 | couch 60 | potted plant 61 | bed 62 | dining table 63 | toilet 64 | tv 65 | laptop 66 | mouse 67 | remote 68 | keyboard 69 | cell phone 70 | microwave 71 | oven 72 | toaster 73 | sink 74 | refrigerator 75 | book 76 | clock 77 | vase 78 | scissors 79 | teddy bear 80 | hair drier 81 | toothbrush -------------------------------------------------------------------------------- /packages/proteus.models.efficientnetlite4/setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """The setup script.""" 4 | 5 | from setuptools import find_namespace_packages, setup 6 | 7 | with open("README.md") as readme_file: 8 | readme = readme_file.read() 9 | 10 | requirements = ["opencv-python-headless==4.4.0.40"] 11 | 12 | test_requirements = ["pytest>=3", "proteus.datasets"] 13 | 14 | setup( 15 | name="proteus.models.efficientnetlite4", 16 | version="0.0.1", 17 | description="Proteus EfficientNet-Lite4", 18 | author="Pieter Blomme", 19 | author_email="pieter.blomme@gmail.com", 20 | python_requires=">=3.6", 21 | classifiers=[], 22 | keywords="", 23 | entry_points={}, 24 | install_requires=requirements, 25 | long_description=readme, 26 | include_package_data=True, 27 | namespace_packages=["proteus", "proteus.models"], 28 | packages=find_namespace_packages(exclude=["tests"]), 29 | test_suite="tests", 30 | tests_require=test_requirements, 31 | extras_require={"test": test_requirements}, 32 | zip_safe=False, 33 | ) 34 | -------------------------------------------------------------------------------- /packages/proteus.types/proteus/types/types.py: -------------------------------------------------------------------------------- 1 | class BoundingBox: 2 | def __init__( 3 | self, 4 | x1: int, 5 | y1: int, 6 | x2: int, 7 | y2: int, 8 | class_name: str = None, 9 | score: float = None, 10 | ): 11 | self.x1 = x1 12 | self.y1 = y1 13 | self.x2 = x2 14 | self.y2 = y2 15 | self.class_name = class_name 16 | self.score = score 17 | 18 | 19 | class Class: 20 | def __init__(self, class_name: str, score: float = None): 21 | self.class_name = class_name 22 | self.score = score 23 | 24 | 25 | class Segmentation: 26 | def __init__( 27 | self, 28 | segmentation: list, 29 | class_name: str = None, 30 | score: float = None, 31 | ): 32 | self.segmentation = segmentation 33 | self.class_name = class_name 34 | self.score = score 35 | 36 | 37 | class Coordinate: 38 | def __init__(self, name: str, x: int, y: int): 39 | self.name = name 40 | self.x = x 41 | self.y = y 42 | -------------------------------------------------------------------------------- /tools/benchmarking/benchmarks/YoloV4/benchmark.json: -------------------------------------------------------------------------------- 1 | { 2 | "BasePath": "http://proteus.localhost", 3 | "Model": "YoloV4", 4 | "Dataset": "CocoValBBox", 5 | "Throughput" : [ 6 | {"triton_optimization": true, "quantize": false, "dynamic_batching": true, "num_workers": 1, "num_instances": 1 }, 7 | {"triton_optimization": true, "quantize": false, "dynamic_batching": false, "num_workers": 1, "num_instances": 1 }, 8 | {"triton_optimization": false, "quantize": false, "dynamic_batching": false, "num_workers": 1, "num_instances": 1 }, 9 | {"triton_optimization": true, "quantize": false, "dynamic_batching": true, "num_workers": 2, "num_instances": 1 }, 10 | {"triton_optimization": true, "quantize": false, "dynamic_batching": true, "num_workers": 1, "num_instances": 2 }, 11 | {"triton_optimization": true, "quantize": false, "dynamic_batching": true, "num_workers": 4, "num_instances": 1 } 12 | ], 13 | "Latency" : [ 14 | {"triton_optimization": true, "quantize": false}, 15 | {"triton_optimization": false, "quantize": false} 16 | ], 17 | "Score" : [ 18 | {"quantize": false} 19 | ] 20 | } -------------------------------------------------------------------------------- /tools/templating/proteus/tools/templating/templates/proteus.models.{{cookiecutter.package_name}}/setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """The setup script.""" 4 | 5 | from setuptools import find_namespace_packages, setup 6 | 7 | with open("README.md") as readme_file: 8 | readme = readme_file.read() 9 | 10 | requirements = [] 11 | 12 | test_requirements = ["pytest>=3"] 13 | 14 | setup( 15 | name="proteus.models.{{cookiecutter.package_name}}", 16 | version="0.0.1", 17 | description="Proteus {{cookiecutter.model_name}}", 18 | author="Pieter Blomme", 19 | author_email="pieter.blomme@gmail.com", 20 | python_requires=">=3.6", 21 | classifiers=[], 22 | keywords="", 23 | entry_points={}, 24 | install_requires=requirements, 25 | long_description=readme, 26 | include_package_data=True, 27 | namespace_packages=["proteus", "proteus.models"], 28 | packages=find_namespace_packages(exclude=["tests"]), 29 | test_suite="tests", 30 | tests_require=test_requirements, 31 | extras_require={"test": test_requirements}, 32 | zip_safe=False, 33 | ) 34 | -------------------------------------------------------------------------------- /tools/templating/setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """The setup script.""" 4 | 5 | from setuptools import find_namespace_packages, setup 6 | 7 | with open("README.md") as readme_file: 8 | readme = readme_file.read() 9 | 10 | requirements = ["cookiecutter==2.1.1"] 11 | test_requirements = [] 12 | 13 | setup( 14 | name="proteus.tools.templating", 15 | version="0.0.1", 16 | description="Proteus RetinaNet", 17 | author="Pieter Blomme", 18 | author_email="pieter.blomme@gmail.com", 19 | python_requires=">=3.6", 20 | classifiers=[], 21 | keywords="", 22 | entry_points={ 23 | "console_scripts": [ 24 | "proteus.template=proteus.tools.templating.command_line:main" 25 | ], 26 | }, 27 | install_requires=requirements, 28 | long_description=readme, 29 | include_package_data=True, 30 | namespace_packages=["proteus.tools"], 31 | packages=find_namespace_packages(exclude=["tests"]), 32 | test_suite="tests", 33 | tests_require=test_requirements, 34 | extras_require={"test": test_requirements}, 35 | zip_safe=False, 36 | ) 37 | -------------------------------------------------------------------------------- /packages/proteus.models.efficientpose/README.md: -------------------------------------------------------------------------------- 1 | # Proteus EfficientPose 2 | 3 | Package for EfficientPose model usage in Proteus 4 | 5 | Models and implementation courtesy of https://github.com/daniegr/EfficientPose (licensed under Creative Commons Attribution 4.0 International). 6 | 7 | ## Process for model conversion to ONNX 8 | ```` 9 | pip install tf2onnx==1.7.2 10 | git clone https://github.com/daniegr/EfficientPose 11 | cd models/tensorflow 12 | python -m tf2onnx.convert --graphdef EfficientPoseIV.pb --output EfficientPoseIV.onnx --inputs input_res1:0 --outputs upscaled_confs/BiasAdd:0 --opset 11 13 | ```` 14 | 15 | ## Pre/Postprocessing 16 | Almost exact copy paste from daniegr's repo. 17 | The only change is that the normalized coords are converted to actual pixel values. 18 | 19 | ## Models 20 | - EfficientPoseRT: 224x224 resolution 21 | - EfficientPoseI: 256x256 22 | - EfficientPoseII: 368x368 23 | - EfficientPoseIII: 480x480 24 | - EfficientPoseIV: 600x600 25 | 26 | Available Proteus configuration options: 27 | - Num Instances 28 | - Quantization (INT8 precision) 29 | - TritonOptimization 30 | - Dynamic Batching -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 PieterBlomme 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /packages/proteus.models/setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """The setup script.""" 4 | 5 | from setuptools import find_namespace_packages, setup 6 | 7 | with open("README.md") as readme_file: 8 | readme = readme_file.read() 9 | 10 | requirements = [ 11 | "opencv-python-headless==4.4.0.40", 12 | "requests==2.25.1", 13 | "onnx==1.8.0", 14 | "onnxruntime==1.6.0", 15 | "jinja2==2.11.2", 16 | "pydantic==1.5.1", 17 | "tritonclient[http]==2.3.0", 18 | ] 19 | 20 | test_requirements = ["pytest>=3"] 21 | 22 | setup( 23 | name="proteus.models.base", 24 | version="0.0.1", 25 | description="Proteus models", 26 | author="Pieter Blomme", 27 | author_email="pieter.blomme@gmail.com", 28 | python_requires=">=3.6", 29 | classifiers=[], 30 | keywords="", 31 | entry_points={}, 32 | install_requires=requirements, 33 | long_description=readme, 34 | include_package_data=True, 35 | namespace_packages=["proteus.models"], 36 | packages=find_namespace_packages(exclude=["tests"]), 37 | test_suite="tests", 38 | tests_require=test_requirements, 39 | extras_require={"test": test_requirements}, 40 | zip_safe=False, 41 | ) 42 | -------------------------------------------------------------------------------- /packages/proteus.models.efficientdet/proteus/models/efficientdet/coco_names.txt: -------------------------------------------------------------------------------- 1 | __background__ 2 | person 3 | bicycle 4 | car 5 | motorcycle 6 | airplane 7 | bus 8 | train 9 | truck 10 | boat 11 | traffic light 12 | fire hydrant 13 | bad class 14 | stop sign 15 | parking meter 16 | bench 17 | bird 18 | cat 19 | dog 20 | horse 21 | sheep 22 | cow 23 | elephant 24 | bear 25 | zebra 26 | giraffe 27 | bad class 28 | backpack 29 | umbrella 30 | bad class 31 | bad class 32 | handbag 33 | tie 34 | suitcase 35 | frisbee 36 | skis 37 | snowboard 38 | sports ball 39 | kite 40 | baseball bat 41 | baseball glove 42 | skateboard 43 | surfboard 44 | tennis racket 45 | bottle 46 | bad class 47 | wine glass 48 | cup 49 | fork 50 | knife 51 | spoon 52 | bowl 53 | banana 54 | apple 55 | sandwich 56 | orange 57 | broccoli 58 | carrot 59 | hot dog 60 | pizza 61 | donut 62 | cake 63 | chair 64 | couch 65 | potted plant 66 | bed 67 | bad class 68 | dining table 69 | bad class 70 | bad class 71 | toilet 72 | bad class 73 | tv 74 | laptop 75 | mouse 76 | remote 77 | keyboard 78 | cell phone 79 | microwave 80 | oven 81 | toaster 82 | sink 83 | refrigerator 84 | bad class 85 | book 86 | clock 87 | vase 88 | scissors 89 | teddy bear 90 | hair drier 91 | toothbrush -------------------------------------------------------------------------------- /tools/benchmarking/benchmarks/MobileNet/Benchmark.md: -------------------------------------------------------------------------------- 1 | # Benchmark for MobileNetV2 2 | 3 | ## Score 4 | 5 | Score on 100 samples from ImageNette dataset 6 | | | triton_optimization | score | 7 | |---:|:----------------------|--------:| 8 | | 0 | True | 0.67 | 9 | 10 | ## Throughput 11 | Average throughput in FPS on 50 samples from ImageNette dataset 12 | | | triton_optimization | num_workers | num_instances | throughput | 13 | |---:|:----------------------|--------------:|----------------:|-------------:| 14 | | 4 | True | 4 | 1 | 212.988 | 15 | | 2 | True | 2 | 1 | 143.151 | 16 | | 3 | True | 1 | 2 | 88.5616 | 17 | | 0 | True | 1 | 1 | 80.648 | 18 | | 1 | False | 1 | 1 | 35.4668 | 19 | 20 | ## Latency 21 | 22 | Average latency in ms on 10 samples from ImageNette dataset 23 | | | triton_optimization | latency | 24 | |---:|:----------------------|----------:| 25 | | 0 | True | 9.6533 | 26 | | 1 | False | 69.8804 | -------------------------------------------------------------------------------- /tools/benchmarking/benchmarks/ResNet50/Benchmark.md: -------------------------------------------------------------------------------- 1 | # Benchmark for Resnet50V2 2 | 3 | ## Score 4 | 5 | Score on 100 samples from ImageNette dataset 6 | | | triton_optimization | score | 7 | |---:|:----------------------|--------:| 8 | | 0 | True | 0.74 | 9 | 10 | ## Throughput 11 | Average throughput in FPS on 50 samples from ImageNette dataset 12 | | | triton_optimization | num_workers | num_instances | throughput | 13 | |---:|:----------------------|--------------:|----------------:|-------------:| 14 | | 4 | True | 4 | 1 | 192.942 | 15 | | 2 | True | 2 | 1 | 123.461 | 16 | | 3 | True | 1 | 2 | 76.5906 | 17 | | 0 | True | 1 | 1 | 76.5442 | 18 | | 1 | False | 1 | 1 | 64.2332 | 19 | 20 | ## Latency 21 | 22 | Average latency in ms on 10 samples from ImageNette dataset 23 | | | triton_optimization | latency | 24 | |---:|:----------------------|----------:| 25 | | 0 | True | 11.8449 | 26 | | 1 | False | 18.6604 | -------------------------------------------------------------------------------- /tools/benchmarking/benchmarks/RetinaNet/Benchmark.md: -------------------------------------------------------------------------------- 1 | # Benchmark for RetinaNet 2 | 3 | ## Score 4 | 5 | Score on 100 samples from CocoValBBox dataset 6 | | | triton_optimization | score | 7 | |---:|:----------------------|---------:| 8 | | 0 | True | 0.353822 | 9 | 10 | ## Throughput 11 | Average throughput in FPS on 50 samples from CocoValBBox dataset 12 | | | triton_optimization | num_workers | num_instances | throughput | 13 | |---:|:----------------------|--------------:|----------------:|-------------:| 14 | | 4 | True | 4 | 1 | 23.2188 | 15 | | 2 | True | 2 | 1 | 13.9999 | 16 | | 3 | True | 1 | 2 | 9.29552 | 17 | | 0 | True | 1 | 1 | 9.12073 | 18 | | 1 | False | 1 | 1 | 7.6443 | 19 | 20 | ## Latency 21 | 22 | Average latency in ms on 10 samples from CocoValBBox dataset 23 | | | triton_optimization | latency | 24 | |---:|:----------------------|----------:| 25 | | 0 | True | 111.923 | 26 | | 1 | False | 179.619 | -------------------------------------------------------------------------------- /tools/benchmarking/setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """The setup script.""" 4 | 5 | from setuptools import find_namespace_packages, setup 6 | 7 | with open("README.md") as readme_file: 8 | readme = readme_file.read() 9 | 10 | requirements = [ 11 | "proteus.datasets==0.0.1", 12 | "requests==2.25.1", 13 | "jinja2==2.11.2", 14 | "pandas==1.2.0", 15 | "tabulate==0.8.7", 16 | ] 17 | test_requirements = [] 18 | 19 | setup( 20 | name="proteus.tools.benchmarking", 21 | version="0.0.1", 22 | description="Proteus benchmarking tool", 23 | author="Pieter Blomme", 24 | author_email="pieter.blomme@gmail.com", 25 | python_requires=">=3.6", 26 | classifiers=[], 27 | keywords="", 28 | entry_points={ 29 | "console_scripts": [ 30 | "proteus.benchmark=proteus.tools.benchmarking.suite:main", 31 | ], 32 | }, 33 | install_requires=requirements, 34 | long_description=readme, 35 | include_package_data=True, 36 | namespace_packages=["proteus.tools"], 37 | packages=find_namespace_packages(exclude=["tests"]), 38 | test_suite="tests", 39 | tests_require=test_requirements, 40 | extras_require={"test": test_requirements}, 41 | zip_safe=False, 42 | ) 43 | -------------------------------------------------------------------------------- /packages/proteus.models.efficientpose/setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """The setup script.""" 4 | 5 | from setuptools import find_namespace_packages, setup 6 | 7 | with open("README.md") as readme_file: 8 | readme = readme_file.read() 9 | 10 | requirements = [ 11 | "pymediainfo==4.1", 12 | "numpy>=1.16.0", 13 | "tensorflow==2.5.3", 14 | "scikit-image==0.16.2", 15 | "opencv-python-headless>=3.3.1.11", 16 | "sk-video==1.1.10", 17 | "torch==1.4.0", 18 | "h5py==2.10.0", 19 | ] 20 | 21 | test_requirements = ["pytest>=3"] 22 | 23 | setup( 24 | name="proteus.models.efficientpose", 25 | version="0.0.1", 26 | description="Proteus EfficientPose", 27 | author="Pieter Blomme", 28 | author_email="pieter.blomme@gmail.com", 29 | python_requires=">=3.6", 30 | classifiers=[], 31 | keywords="", 32 | entry_points={}, 33 | install_requires=requirements, 34 | long_description=readme, 35 | include_package_data=True, 36 | namespace_packages=["proteus", "proteus.models"], 37 | packages=find_namespace_packages(exclude=["tests"]), 38 | test_suite="tests", 39 | tests_require=test_requirements, 40 | extras_require={"test": test_requirements}, 41 | zip_safe=False, 42 | ) 43 | -------------------------------------------------------------------------------- /kubernetes/prometheus-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | annotations: 5 | kompose.cmd: kompose convert 6 | kompose.version: 1.22.0 (955b78124) 7 | creationTimestamp: null 8 | labels: 9 | io.kompose.service: prometheus 10 | name: prometheus 11 | spec: 12 | replicas: 0 13 | selector: 14 | matchLabels: 15 | io.kompose.service: prometheus 16 | strategy: 17 | type: Recreate 18 | template: 19 | metadata: 20 | annotations: 21 | kompose.cmd: kompose convert 22 | kompose.version: 1.22.0 (955b78124) 23 | creationTimestamp: null 24 | labels: 25 | io.kompose.service: prometheus 26 | spec: 27 | containers: 28 | - args: 29 | - --web.enable-lifecycle 30 | - --config.file=/etc/prometheus/prometheus.yml 31 | image: pieterblomme/prometheus:0.1 32 | name: prometheus 33 | ports: 34 | - containerPort: 9090 35 | resources: {} 36 | volumeMounts: 37 | - mountPath: /prometheus 38 | name: prometheus-data 39 | restartPolicy: Always 40 | volumes: 41 | - name: prometheus-data 42 | persistentVolumeClaim: 43 | claimName: prometheus-data 44 | status: {} 45 | -------------------------------------------------------------------------------- /tools/benchmarking/benchmarks/EfficientNetLite4/Benchmark.md: -------------------------------------------------------------------------------- 1 | # Benchmark for EfficientNetLite4 2 | 3 | ## Score 4 | 5 | Score on 100 samples from ImageNette dataset 6 | | | quantize | score | 7 | |---:|:-----------|--------:| 8 | | 0 | False | 0.85 | 9 | 10 | ## Throughput 11 | Average throughput in FPS on 50 samples from ImageNette dataset 12 | | | triton_optimization | quantize | num_workers | num_instances | throughput | 13 | |---:|:----------------------|:-----------|--------------:|----------------:|-------------:| 14 | | 4 | True | False | 4 | 1 | 215.22 | 15 | | 3 | True | False | 2 | 1 | 133.814 | 16 | | 2 | True | False | 1 | 2 | 81.008 | 17 | | 0 | True | False | 1 | 1 | 80.4053 | 18 | | 1 | False | False | 1 | 1 | 17.0931 | 19 | 20 | ## Latency 21 | 22 | Average latency in ms on 10 samples from ImageNette dataset 23 | | | triton_optimization | quantize | latency | 24 | |---:|:----------------------|:-----------|----------:| 25 | | 0 | True | False | 11.5929 | 26 | | 1 | False | False | 192.211 | -------------------------------------------------------------------------------- /packages/proteus.models.retinanet/proteus/models/retinanet/config.template: -------------------------------------------------------------------------------- 1 | platform: "onnxruntime_onnx" 2 | max_batch_size : 0 3 | input [ 4 | { 5 | name: "input" 6 | data_type: TYPE_FP32 7 | format: FORMAT_NONE 8 | dims: [ 1, 3, 480, 640 ] 9 | } 10 | ] 11 | output [ 12 | { 13 | name: "output10" 14 | data_type: TYPE_FP32 15 | dims: [1, 36, 4, 5] 16 | }, 17 | { 18 | name: "output9" 19 | data_type: TYPE_FP32 20 | dims: [1, 36, 8, 10] 21 | }, 22 | { 23 | name: "output8" 24 | data_type: TYPE_FP32 25 | dims: [1, 36, 15, 20] 26 | }, 27 | { 28 | name: "output7" 29 | data_type: TYPE_FP32 30 | dims: [1, 36, 30, 40] 31 | }, 32 | { 33 | name: "output1" 34 | data_type: TYPE_FP32 35 | dims: [1, 720, 60, 80] 36 | }, 37 | { 38 | name: "output2" 39 | data_type: TYPE_FP32 40 | dims: [1, 720, 30, 40] 41 | }, 42 | { 43 | name: "output4" 44 | data_type: TYPE_FP32 45 | dims: [1, 720, 8, 10] 46 | }, 47 | { 48 | name: "output6" 49 | data_type: TYPE_FP32 50 | dims: [1, 36, 60, 80] 51 | }, 52 | { 53 | name: "output3" 54 | data_type: TYPE_FP32 55 | dims: [1, 720, 15, 20] 56 | }, 57 | { 58 | name: "output5" 59 | data_type: TYPE_FP32 60 | dims: [1, 720, 4, 5] 61 | } 62 | ] 63 | {{triton_optimization}} 64 | {{num_instances}} -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.8" 2 | 3 | services: 4 | triton: 5 | image: nvcr.io/nvidia/tritonserver:20.12-py3 6 | command: tritonserver --model-repository=/models --strict-model-config=False --model-control-mode=explicit 7 | shm_size: '2gb' 8 | ports: 9 | - "8000:8000" 10 | - "8001:8001" 11 | - "8002:8002" 12 | volumes: 13 | - model-repository:/models 14 | api: 15 | build: 16 | context: . 17 | dockerfile: proteus_api/Dockerfile 18 | ports: 19 | - "80:80" 20 | depends_on: 21 | - triton 22 | volumes: 23 | - model-repository:/models 24 | - logs:/logs 25 | - ./packages:/packages 26 | environment: 27 | - LOGLEVEL=INFO 28 | - MAX_ACTIVE_MODELS=3 29 | - MODEL_INACTIVITY=1 30 | - TRITON_CONCURRENCY=1 31 | - TRITON_CLIENT_TIMEOUT=300 32 | - TIMEOUT=120 # gunicorn setting 33 | - KEEP_ALIVE=2 # gunicorn setting 34 | prometheus: 35 | image: prom/prometheus:v2.21.0 36 | ports: 37 | - 9090:9090 38 | volumes: 39 | - ./prometheus:/etc/prometheus 40 | - prometheus-data:/prometheus 41 | command: --web.enable-lifecycle --config.file=/etc/prometheus/prometheus.yml 42 | grafana: 43 | build: './grafana' 44 | container_name: 'grafana' 45 | ports: 46 | - '3000:3000' 47 | 48 | volumes: 49 | model-repository: 50 | logs: 51 | prometheus-data: -------------------------------------------------------------------------------- /packages/proteus.models.efficientnetlite4/proteus/models/efficientnetlite4/client.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | from proteus.models.base import ClassificationModel 4 | from proteus.models.base.modelconfigs import ( 5 | BaseModelConfig, 6 | QuantizationModelConfig, 7 | TritonOptimizationModelConfig, 8 | ) 9 | 10 | from .helpers import read_class_names 11 | 12 | folder_path = Path(__file__).parent 13 | 14 | 15 | class ModelConfig( 16 | BaseModelConfig, QuantizationModelConfig, TritonOptimizationModelConfig 17 | ): 18 | pass 19 | 20 | 21 | class EfficientNetLite4(ClassificationModel): 22 | 23 | CHANNEL_FIRST = False 24 | DESCRIPTION = ( 25 | "EfficientNet-Lite 4 is the largest variant and most accurate " 26 | "of the set of EfficientNet-Lite model. It is an integer-only quantized " 27 | "model that produces the highest accuracy of all of the EfficientNet models. " 28 | "It achieves 80.4% ImageNet top-1 accuracy, while still running in real-time " 29 | "(e.g. 30ms/image) on a Pixel 4 CPU. Taken from https://github.com/onnx/models." 30 | ) 31 | CLASSES = read_class_names(f"{folder_path}/imagenet_labels.txt") 32 | MODEL_URL = "https://github.com/onnx/models/raw/master/vision/classification/efficientnet-lite4/model/efficientnet-lite4-11.onnx" 33 | CONFIG_PATH = f"{folder_path}/config.template" 34 | INPUT_NAME = "images:0" 35 | OUTPUT_NAMES = ["Softmax:0"] 36 | DTYPE = "FP32" 37 | MODEL_CONFIG = ModelConfig 38 | -------------------------------------------------------------------------------- /kubernetes/triton-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | annotations: 5 | kompose.cmd: kompose convert 6 | kompose.version: 1.22.0 (955b78124) 7 | creationTimestamp: null 8 | labels: 9 | io.kompose.service: triton 10 | name: triton 11 | spec: 12 | replicas: 1 13 | selector: 14 | matchLabels: 15 | io.kompose.service: triton 16 | strategy: 17 | type: Recreate 18 | template: 19 | metadata: 20 | annotations: 21 | kompose.cmd: kompose convert 22 | kompose.version: 1.22.0 (955b78124) 23 | creationTimestamp: null 24 | labels: 25 | io.kompose.service: triton 26 | spec: 27 | containers: 28 | - args: 29 | - tritonserver 30 | - --model-repository=/models 31 | - --strict-model-config=False 32 | - --model-control-mode=explicit 33 | image: nvcr.io/nvidia/tritonserver:20.09-py3 34 | name: triton 35 | ports: 36 | - containerPort: 8000 37 | - containerPort: 8001 38 | - containerPort: 8002 39 | resources: 40 | limits: 41 | nvidia.com/gpu: 0 # requesting 1 GPU 42 | volumeMounts: 43 | - mountPath: /models 44 | name: model-repository 45 | restartPolicy: Always 46 | volumes: 47 | - name: model-repository 48 | persistentVolumeClaim: 49 | claimName: model-repository 50 | status: {} 51 | -------------------------------------------------------------------------------- /tools/benchmarking/benchmarks/YoloV4/Benchmark.md: -------------------------------------------------------------------------------- 1 | # Benchmark for YoloV4 2 | 3 | ## Score 4 | 5 | Score on 100 samples from CocoValBBox dataset 6 | | | quantize | score | 7 | |---:|:-----------|---------:| 8 | | 0 | False | 0.342349 | 9 | 10 | ## Throughput 11 | Average throughput in FPS on 50 samples from CocoValBBox dataset 12 | | | triton_optimization | quantize | dynamic_batching | num_workers | num_instances | throughput | 13 | |---:|:----------------------|:-----------|:-------------------|--------------:|----------------:|-------------:| 14 | | 5 | True | False | True | 4 | 1 | 59.0431 | 15 | | 3 | True | False | True | 2 | 1 | 43.8421 | 16 | | 1 | True | False | False | 1 | 1 | 27.4329 | 17 | | 0 | True | False | True | 1 | 1 | 27.2565 | 18 | | 4 | True | False | True | 1 | 2 | 24.9829 | 19 | | 2 | False | False | False | 1 | 1 | 9.23008 | 20 | 21 | ## Latency 22 | 23 | Average latency in ms on 10 samples from CocoValBBox dataset 24 | | | triton_optimization | quantize | latency | 25 | |---:|:----------------------|:-----------|----------:| 26 | | 0 | True | False | 42.3883 | 27 | | 1 | False | False | 125.323 | -------------------------------------------------------------------------------- /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | name: CI for push 2 | 3 | # Controls when the action will run. Triggers the workflow on push 4 | on: 5 | push: 6 | branches: 7 | - '*' 8 | - '!master' 9 | 10 | jobs: 11 | test: 12 | # The type of runner that the job will run on 13 | runs-on: ubuntu-latest 14 | strategy: 15 | matrix: 16 | python-version: [3.7] 17 | 18 | steps: 19 | # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it 20 | - uses: actions/checkout@v2 21 | 22 | - name: Set up Python ${{ matrix.python-version }} 23 | uses: actions/setup-python@v2 24 | with: 25 | python-version: ${{ matrix.python-version }} 26 | 27 | - name: Install dependencies 28 | run: | 29 | python -m pip install --upgrade pip==20.2 30 | pip install black autoflake isort pytest requests Pillow 31 | pip install nvidia-pyindex==1.0.4 32 | if [ -f test_requirements.txt ]; then pip install -r test_requirements.txt; fi 33 | 34 | - name: Lint 35 | run: | 36 | autoflake --check --recursive . 37 | black --check --exclude=/tools/templating/proteus/tools/templating/templates . 38 | isort --check-only --multi-line=3 --trailing-comma --force-grid-wrap=0 --use-parentheses --line-width=88 --diff . 39 | 40 | - name: Build the docker-compose stack 41 | run: docker-compose up -d 42 | 43 | - name: Test with pytest 44 | run: | 45 | pytest -s --ignore=tools/templating/proteus/tools/templating/templates -m "not slow" 46 | 47 | - name: Test slow tests with pytest 48 | run: | 49 | pytest -s --ignore=tools/templating/proteus/tools/templating/templates -m "slow" -------------------------------------------------------------------------------- /kubernetes/api-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | annotations: 5 | kompose.cmd: kompose convert 6 | kompose.version: 1.22.0 (955b78124) 7 | creationTimestamp: null 8 | labels: 9 | io.kompose.service: api 10 | name: api 11 | spec: 12 | replicas: 1 13 | selector: 14 | matchLabels: 15 | io.kompose.service: api 16 | strategy: 17 | type: Recreate 18 | template: 19 | metadata: 20 | annotations: 21 | kompose.cmd: kompose convert 22 | kompose.version: 1.22.0 (955b78124) 23 | creationTimestamp: null 24 | labels: 25 | io.kompose.service: api 26 | spec: 27 | containers: 28 | - env: 29 | - name: LOGLEVEL 30 | value: "INFO" 31 | - name: MAX_ACTIVE_MODELS 32 | value: "3" 33 | - name: MODEL_INACTIVITY 34 | value: "1" 35 | - name: TRITON_CONCURRENCY 36 | value: "1" 37 | - name: "TRITON_CLIENT_TIMEOUT" 38 | value: "1000" 39 | - name: "TIMEOUT" 40 | value: "1000" 41 | - name: "KEEP_ALIVE" 42 | value: "1000" 43 | - name: "WEB_CONCURRENCY" 44 | value: "4" 45 | image: pieterblomme/api:release-0.0.2 46 | name: api 47 | ports: 48 | - containerPort: 80 49 | resources: {} 50 | volumeMounts: 51 | - mountPath: /models 52 | name: model-repository 53 | - mountPath: /logs 54 | name: logs 55 | restartPolicy: Always 56 | volumes: 57 | - name: model-repository 58 | persistentVolumeClaim: 59 | claimName: model-repository 60 | - name: logs 61 | persistentVolumeClaim: 62 | claimName: logs 63 | status: {} 64 | -------------------------------------------------------------------------------- /packages/proteus.datasets/proteus/datasets/imagenette.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import os 3 | import random 4 | import tarfile 5 | import tempfile 6 | import urllib.request 7 | 8 | import requests 9 | 10 | from .datasets import Dataset 11 | 12 | tmpfolder = tempfile.gettempdir() 13 | 14 | 15 | class ImageNette(Dataset): 16 | 17 | lbl_dict = dict( 18 | n01440764="tench", 19 | n02102040="english springer", 20 | n02979186="cassette player", 21 | n03000684="chain saw", 22 | n03028079="church", 23 | n03394916="french horn", 24 | n03417042="garbage truck", 25 | n03425413="gas pump", 26 | n03445777="golf ball", 27 | n03888257="parachute", 28 | ) 29 | 30 | def __init__(self, k=50): 31 | self.maybe_download() 32 | files = glob.glob(f"{tmpfolder}/datasets/imagenette2-320/val/*/*") 33 | random.shuffle(files) 34 | self.files = files[:k] 35 | 36 | def maybe_download(self): 37 | if not os.path.isdir(f"{tmpfolder}/datasets/imagenette2-320"): 38 | print("Downloading ImageNette 320 px") 39 | thetarfile = ( 40 | "https://s3.amazonaws.com/fast-ai-imageclas/imagenette2-320.tgz" 41 | ) 42 | ftpstream = urllib.request.urlopen(thetarfile) 43 | thetarfile = tarfile.open(fileobj=ftpstream, mode="r|gz") 44 | thetarfile.extractall(path=f"{tmpfolder}/datasets") 45 | 46 | def __getitem__(self, index): 47 | fpath = self.files[index] 48 | synset = fpath.split("/")[-2] 49 | target = self.lbl_dict[synset].lower() 50 | return fpath, target 51 | 52 | def __len__(self): 53 | return len(self.files) 54 | 55 | def eval(self, preds): 56 | preds = [pred[0][0]["class_name"].lower() for pred in preds] 57 | targets = [self.__getitem__(i)[1] for i in range(self.__len__())] 58 | correct = 0 59 | for p, t in zip(preds, targets): 60 | if p == t: 61 | correct += 1 62 | return correct / self.__len__() 63 | -------------------------------------------------------------------------------- /packages/proteus.datasets/proteus/datasets/bsds.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import os 3 | import random 4 | import tarfile 5 | import tempfile 6 | import urllib.request 7 | 8 | import numpy as np 9 | import requests 10 | from PIL import Image 11 | 12 | from .datasets import Dataset 13 | 14 | tmpfolder = tempfile.gettempdir() 15 | 16 | 17 | class BSDSSuperRes(Dataset): 18 | """ 19 | Will yield resized (224,224) images of BSDS500 20 | For evaluation, we resize to 572,572 and check average MSE 21 | It's not a very good metric, but it'll do ... 22 | """ 23 | 24 | def __init__(self, k=50): 25 | self.maybe_download() 26 | files = glob.glob(f"{tmpfolder}/datasets/BSR/BSDS500/data/images/test/*.jpg") 27 | random.shuffle(files) 28 | self.files = files[:k] 29 | 30 | def maybe_download(self): 31 | if not os.path.isdir(f"{tmpfolder}/datasets/BSR"): 32 | print("Downloading BSDS500") 33 | thetarfile = "http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/BSR/BSR_bsds500.tgz" 34 | ftpstream = urllib.request.urlopen(thetarfile) 35 | thetarfile = tarfile.open(fileobj=ftpstream, mode="r|gz") 36 | thetarfile.extractall(path=f"{tmpfolder}/datasets") 37 | 38 | def __getitem__(self, index): 39 | fpath = self.files[index] 40 | with tempfile.NamedTemporaryFile(suffix=".jpg", delete=False) as tmp: 41 | Image.open(fpath).resize((224, 224)).save(tmp.name) 42 | Image.open(tmp.name) 43 | return tmp.name, None 44 | 45 | def __len__(self): 46 | return len(self.files) 47 | 48 | def eval(self, preds): 49 | originals = [self.files[i] for i in range(self.__len__())] 50 | mses = [] 51 | for original, pred in zip(originals, preds): 52 | np_original = np.array(Image.open(original).resize((672, 672))) 53 | np_pred = np.array(Image.open(pred)) 54 | mse = np.square(np.subtract(np_original, np_pred)).mean() 55 | mses.append(mse) 56 | return np.array(mses).mean() 57 | -------------------------------------------------------------------------------- /tools/templating/proteus/tools/templating/templates/proteus.models.{{cookiecutter.package_name}}/proteus/models/{{cookiecutter.package_name}}/client.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from pathlib import Path 3 | 4 | from proteus.models.base import BaseModel 5 | from proteus.models.base.modelconfigs import ( 6 | BaseModelConfig, 7 | BatchingModelConfig, 8 | QuantizationModelConfig, 9 | TritonOptimizationModelConfig, 10 | ) 11 | 12 | folder_path = Path(__file__).parent 13 | logger = logging.getLogger(__name__) 14 | 15 | class ModelConfig( 16 | BaseModelConfig, 17 | TritonOptimizationModelConfig, 18 | BatchingModelConfig, 19 | QuantizationModelConfig, # this will require ONNX opset 11 20 | ): 21 | pass 22 | 23 | class {{cookiecutter.model_name}}(BaseModel): 24 | 25 | DESCRIPTION = ( 26 | "{{cookiecutter.model_description}}" 27 | ) 28 | MODEL_URL = "{{cookiecutter.model_url}}" 29 | 30 | """ 31 | Note: if CONFIG_PATH is None, Triton will figure out a default configuration from the ONNX file. 32 | The {{cookiecutter.model_name}}/load endpoint will return the used configuration, which can then be 33 | used to fill the actual config.template. It is not recommended to leave CONFIG_PATH empty in production 34 | because it will not support features like batching, num_instances and TritonOptimization. 35 | """ 36 | CONFIG_PATH = None 37 | #CONFIG_PATH = f"{folder_path}/config.template" 38 | 39 | INPUT_NAME = None 40 | OUTPUT_NAMES = None 41 | DTYPE = None 42 | MODEL_CONFIG = ModelConfig 43 | 44 | @classmethod 45 | def preprocess(cls, img): 46 | """ 47 | Pre-process an image to meet the size, type and format 48 | requirements specified by the parameters. 49 | 50 | :param img: Pillow image 51 | 52 | :returns: 53 | - model_input: input as required by the model 54 | - extra_data: dict of data that is needed by the postprocess function 55 | """ 56 | extra_data = {} 57 | return img, extra_data 58 | 59 | @classmethod 60 | def postprocess(cls, results, extra_data, batch_size, batching): 61 | """ 62 | Post-process results to return valid outputs. 63 | :param results: model outputs 64 | :param extra_data: dict of data that is needed by the postprocess function 65 | :param batch_size 66 | :param batching: boolean flag indicating if batching 67 | 68 | :returns: json result 69 | """ 70 | return results 71 | -------------------------------------------------------------------------------- /tools/templating/.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | #Vscode 132 | .vscode 133 | -------------------------------------------------------------------------------- /packages/proteus.types/.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | #Vscode 132 | .vscode 133 | -------------------------------------------------------------------------------- /tools/benchmarking/.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | #Vscode 132 | .vscode 133 | -------------------------------------------------------------------------------- /packages/proteus.datasets/.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | #Vscode 132 | .vscode 133 | -------------------------------------------------------------------------------- /packages/proteus.models/.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | #Vscode 132 | .vscode 133 | -------------------------------------------------------------------------------- /packages/proteus.models.maskrcnn/.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | #Vscode 132 | .vscode 133 | -------------------------------------------------------------------------------- /packages/proteus.models.resnet50/.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | #Vscode 132 | .vscode 133 | -------------------------------------------------------------------------------- /packages/proteus.models.superres/.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | #Vscode 132 | .vscode 133 | -------------------------------------------------------------------------------- /packages/proteus.models.yolov4/.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | #Vscode 132 | .vscode 133 | -------------------------------------------------------------------------------- /packages/proteus.models.efficientdet/.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | #Vscode 132 | .vscode 133 | -------------------------------------------------------------------------------- /packages/proteus.models.efficientpose/.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | #Vscode 132 | .vscode 133 | -------------------------------------------------------------------------------- /packages/proteus.models.mobilenet/.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | #Vscode 132 | .vscode 133 | -------------------------------------------------------------------------------- /packages/proteus.models.retinanet/.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | #Vscode 132 | .vscode 133 | -------------------------------------------------------------------------------- /packages/proteus.models.efficientnetlite4/.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | #Vscode 132 | .vscode 133 | -------------------------------------------------------------------------------- /tools/templating/proteus/tools/templating/templates/proteus.models.{{cookiecutter.package_name}}/.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | #Vscode 132 | .vscode 133 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Proteus 2 | 3 | ## Goals 4 | 5 | This was a personal project, born from following "frustrations": 6 | - There's a lot of open-sourced code available, but often it is written towards training/evaluation on a specific dataset 7 | - It can be time-consuming to rework the code towards an inference oriented scenario 8 | - It's not easy to benchmark accuracy of models in a standardized environment 9 | - It's not easy to benchmark speed of models against eachother. 10 | 11 | With proteus, I wanted to: 12 | - Unify inference for a bunch of models 13 | - Benchmark them easily against eachother 14 | - Have a reusable framework where I can easily add extra models. 15 | 16 | It is my intention to add more vision models to this repository, especially state of the art as they come out. The repo is open-source and I very much welcome any help ! 17 | Unfortunately some models will not fit within this framework. eg. for Transformer-based architectures it's very hard to convert them to an Onnx model. Still figuring out how to handle this. 18 | 19 | ## Architecture 20 | 21 | Proteus works with ONNX model files. Model inference is handled by a Triton backend, with FastApi providing an API service in front of it. I also supply Prometheus and Grafana for monitoring, but that's still work in progress. 22 | 23 | ## Howto (Development) 24 | 25 | You will need docker and docker-compose installed. Development can be done using the docker-compose.yml. 26 | In development mode, the proteus_api/Dockerfile is used. This will mount the packages folder and watch file changes. So you do not have to restart for every code change. For changes to the API, or new package requirements, you will need to restart docker compose. 27 | 28 | packages/package_install.txt contains a list of the packages to install. If you need only 1 or 2 models for your use-case, I recommend you install only those. 29 | 30 | You can create boiler plate code for a new model implementation using tools/templating. 31 | In general you will need 3 things to deploy a model: 32 | - ONNX model file 33 | - preprocessing code 34 | - postprocessing code 35 | 36 | ## Howto (Production) 37 | 38 | For production, you need to build using proteus_api/Dockerfile.prod. This will pre-install the model packges. 39 | I am also providing a Kubernetes example, this is the recommended way to deploy in production. 40 | 41 | ## Available models 42 | 43 | ### Classification 44 | MobileNetV2 45 | Resnet50V2 46 | EfficientNetLite4 47 | 48 | ### Instance Segmentation 49 | MaskRCNN 50 | 51 | ### Detection 52 | RetinaNet 53 | EfficientDetD0 54 | EfficientDetD2 55 | YoloV4 56 | 57 | ### Pose Estimation 58 | Note: known issue when images are smaller than expected size. Still need to fix 59 | 60 | EfficientPoseRT 61 | EfficientPoseI 62 | EfficientPoseII 63 | EfficientPoseIII 64 | EfficientPoseIV 65 | 66 | ### Misc 67 | SuperResolution -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | #Vscode 132 | .vscode 133 | 134 | #Vue 135 | .DS_Store 136 | node_modules/ 137 | /dist/ 138 | 139 | # local env files 140 | .env.local 141 | .env.*.local 142 | 143 | # Log files 144 | npm-debug.log* 145 | yarn-debug.log* 146 | yarn-error.log* 147 | 148 | # Editor directories and files 149 | .idea 150 | .vscode 151 | *.suo 152 | *.ntvs* 153 | *.njsproj 154 | *.sln 155 | *.sw* 156 | 157 | # model files 158 | *.onnx -------------------------------------------------------------------------------- /proteus_api/app/helper.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import importlib 3 | import logging 4 | import os 5 | import pkgutil 6 | from pathlib import Path 7 | 8 | import proteus.models 9 | import tritonclient.http as httpclient 10 | from file_read_backwards import FileReadBackwards 11 | from jinja2 import Environment, FileSystemLoader 12 | 13 | currdir = os.path.dirname(os.path.abspath(__file__)) 14 | 15 | env = Environment( 16 | loader=FileSystemLoader([f"{currdir}/routers/templates"]), 17 | ) 18 | template = env.get_template("template.py") 19 | 20 | logger = logging.getLogger(__name__) 21 | 22 | 23 | def get_triton_client(): 24 | # set up Triton connection 25 | TRITONURL = "triton:8000" 26 | # TODO check that always available ... 27 | try: 28 | # Specify large enough concurrency to handle the 29 | # the number of requests. 30 | concurrency = 1 31 | triton_client = httpclient.InferenceServerClient( 32 | url=TRITONURL, concurrency=concurrency 33 | ) 34 | logger.info(f"Server ready? {triton_client.is_server_ready()}") 35 | except Exception as e: 36 | logger.error("client creation failed: " + str(e)) 37 | return triton_client 38 | 39 | 40 | def get_model_dict(): 41 | # discover models 42 | def iter_namespace(ns_pkg): 43 | # Specifying the second argument (prefix) to iter_modules makes the 44 | # returned name an absolute name instead of a relative one. This allows 45 | # import_module to work without having to do additional modification to 46 | # the name. 47 | return pkgutil.iter_modules(ns_pkg.__path__, ns_pkg.__name__ + ".") 48 | 49 | model_dict = {} 50 | for finder, name, ispkg in iter_namespace(proteus.models): 51 | module = importlib.import_module(name) 52 | model_dict.update(module.model_dict) 53 | logger.debug(model_dict) 54 | return model_dict 55 | 56 | 57 | def generate_endpoints(model): 58 | targetfile = Path(f"{currdir}/routers/{model}.py") 59 | if not targetfile.is_file(): 60 | # file does not exist yet 61 | with open(targetfile, "w") as fh: 62 | fh.write(template.render(name=model)) 63 | 64 | 65 | def check_last_active(model): 66 | with FileReadBackwards("/logs/predictions.log") as frb: 67 | 68 | # getting lines by lines starting from the last line up 69 | for l in frb: 70 | ts, name, action = l.split("|")[0], l.split("|")[1], l.split("|")[2] 71 | if model == name and action == "LOADING": 72 | # Never trigger unload if still loading ... 73 | return 0 74 | elif model == name: 75 | last_call = datetime.datetime.strptime(ts, "%Y-%m-%d %H:%M:%S,%f") 76 | elapsed = datetime.datetime.now() - last_call 77 | return elapsed.total_seconds() / 60 78 | return 60 * 60 * 24 # some very large number, eg. 1 day 79 | -------------------------------------------------------------------------------- /packages/proteus.models.maskrcnn/proteus/models/maskrcnn/helpers.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import math 3 | 4 | import cv2 5 | import numpy as np 6 | from PIL import Image 7 | 8 | logger = logging.getLogger(__name__) 9 | 10 | 11 | def read_class_names(class_file_name): 12 | """loads class name from a file""" 13 | names = {} 14 | with open(class_file_name, "r") as data: 15 | for ID, name in enumerate(data): 16 | names[ID] = name.strip("\n") 17 | return names 18 | 19 | 20 | def image_preprocess(image): 21 | # Resize 22 | ratio = 800.0 / min(image.size[0], image.size[1]) 23 | image = image.resize( 24 | (int(ratio * image.size[0]), int(ratio * image.size[1])), Image.BILINEAR 25 | ) 26 | 27 | # Convert to BGR 28 | image = np.array(image)[:, :, [2, 1, 0]].astype("float32") 29 | 30 | # HWC -> CHW 31 | image = np.transpose(image, [2, 0, 1]) 32 | 33 | # Normalize 34 | mean_vec = np.array([102.9801, 115.9465, 122.7717]) 35 | for i in range(image.shape[0]): 36 | image[i, :, :] = image[i, :, :] - mean_vec[i] 37 | 38 | # Pad to be divisible of 32 39 | padded_h = int(math.ceil(image.shape[1] / 32) * 32) 40 | padded_w = int(math.ceil(image.shape[2] / 32) * 32) 41 | 42 | padded_image = np.zeros((3, padded_h, padded_w), dtype=np.float32) 43 | padded_image[:, : image.shape[1], : image.shape[2]] = image 44 | image = padded_image 45 | 46 | return image 47 | 48 | 49 | def detection_postprocess( 50 | original_image_size, boxes, labels, scores, masks, score_threshold=0.7 51 | ): 52 | # Resize boxes 53 | logger.info(f"original_image_size {original_image_size}") 54 | ratio = 800.0 / min(original_image_size[0], original_image_size[1]) 55 | boxes /= ratio 56 | 57 | results = [] 58 | for mask, box, label, score in zip(masks, boxes, labels, scores): 59 | # Showing boxes with score > 0.7 60 | if score <= score_threshold: 61 | continue 62 | 63 | # Finding contour based on mask 64 | mask = mask[0, :, :, None] 65 | int_box = [int(i) for i in box] 66 | mask = cv2.resize( 67 | mask, (int_box[2] - int_box[0] + 1, int_box[3] - int_box[1] + 1) 68 | ) 69 | mask = mask > 0.5 70 | im_mask = np.zeros( 71 | (original_image_size[0], original_image_size[1]), dtype=np.uint8 72 | ) 73 | x_0 = max(int_box[0], 0) 74 | x_1 = min(int_box[2] + 1, original_image_size[1]) 75 | y_0 = max(int_box[1], 0) 76 | y_1 = min(int_box[3] + 1, original_image_size[0]) 77 | mask_y_0 = int(max(y_0 - box[1], 0)) 78 | mask_y_1 = int(mask_y_0 + y_1 - y_0) 79 | mask_x_0 = int(max(x_0 - box[0], 0)) 80 | mask_x_1 = int(mask_x_0 + x_1 - x_0) 81 | im_mask[y_0:y_1, x_0:x_1] = mask[mask_y_0:mask_y_1, mask_x_0:mask_x_1] 82 | im_mask = im_mask[:, :, None] 83 | 84 | bbox = [box[0], box[1], box[2] - box[0], box[3] - box[1]] 85 | results.append((score, bbox, label, im_mask)) 86 | return results 87 | -------------------------------------------------------------------------------- /proteus_api/app/main.py: -------------------------------------------------------------------------------- 1 | import importlib.util 2 | import logging 3 | import os 4 | 5 | from fastapi import FastAPI 6 | from fastapi_utils.tasks import repeat_every 7 | from fastapi_utils.timing import add_timing_middleware 8 | from starlette.status import ( 9 | HTTP_422_UNPROCESSABLE_ENTITY, 10 | HTTP_500_INTERNAL_SERVER_ERROR, 11 | HTTP_503_SERVICE_UNAVAILABLE, 12 | ) 13 | 14 | from .helper import ( 15 | check_last_active, 16 | generate_endpoints, 17 | get_model_dict, 18 | get_triton_client, 19 | ) 20 | 21 | # Env vars 22 | LOGLEVEL = os.environ.get("LOGLEVEL", "DEBUG") 23 | MODEL_INACTIVITY = int(os.environ.get("MODEL_INACTIVITY", "10")) 24 | 25 | # Setup logging 26 | LOGLEVEL = os.environ.get("LOGLEVEL", "DEBUG") 27 | logging.config.fileConfig( 28 | "/app/logging.conf", disable_existing_loggers=False, defaults={"level": LOGLEVEL} 29 | ) 30 | logger = logging.getLogger(__name__) 31 | 32 | # Setup FastAPI 33 | app = FastAPI() 34 | add_timing_middleware(app, record=logger.info) 35 | 36 | triton_client = get_triton_client() 37 | model_dict = get_model_dict() 38 | 39 | 40 | @app.on_event("startup") 41 | @repeat_every(seconds=10) 42 | async def remove_expired_models(): 43 | # Get loaded models 44 | loaded_models = [ 45 | m.get("name") 46 | for m in triton_client.get_model_repository_index() 47 | if m.get("state", "UNAVAILABLE") == "READY" 48 | ] 49 | for model in loaded_models: 50 | last_active = check_last_active(model) 51 | if last_active > MODEL_INACTIVITY: 52 | logger.warning( 53 | f"Model was last active {last_active} minutes ago. Automatic shutdown because larger than {MODEL_INACTIVITY} MODEL_INACTIVITY" 54 | ) 55 | triton_client.unload_model(model) 56 | 57 | 58 | @app.get("/health") 59 | async def get_server_health(): 60 | if triton_client.is_server_live(): 61 | logger.info("Server is alive") 62 | return {"success": True} 63 | else: 64 | raise HTTPException( 65 | status_code=HTTP_503_SERVICE_UNAVAILABLE, 66 | detail="Triton server not available", 67 | ) 68 | 69 | 70 | @app.get("/models") 71 | async def get_models(): 72 | return {k: v.DESCRIPTION for (k, v) in model_dict.items()} 73 | 74 | 75 | @app.get("/models/status") 76 | async def get_model_repository(): 77 | try: 78 | return triton_client.get_model_repository_index() 79 | except: 80 | raise HTTPException( 81 | status_code=HTTP_503_SERVICE_UNAVAILABLE, 82 | detail="Triton server not available", 83 | ) 84 | 85 | 86 | # build model-specific routers 87 | for name, model in model_dict.items(): 88 | generate_endpoints(name) 89 | currdir = os.path.dirname(os.path.abspath(__file__)) 90 | spec = importlib.util.spec_from_file_location( 91 | f"routers.{name}", f"{currdir}/routers/{name}.py" 92 | ) 93 | module = importlib.util.module_from_spec(spec) 94 | spec.loader.exec_module(module) 95 | router = module.router 96 | app.include_router( 97 | router, 98 | prefix=f"/{name}", 99 | tags=[f"{name}"], 100 | ) 101 | -------------------------------------------------------------------------------- /packages/proteus.models.superres/proteus/models/superres/client.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from pathlib import Path 3 | 4 | import numpy as np 5 | from PIL import Image 6 | from proteus.models.base import BaseModel 7 | from proteus.models.base.modelconfigs import ( 8 | BaseModelConfig, 9 | BatchingModelConfig, 10 | QuantizationModelConfig, 11 | TritonOptimizationModelConfig, 12 | ) 13 | from resizeimage import resizeimage 14 | 15 | folder_path = Path(__file__).parent 16 | logger = logging.getLogger(__name__) 17 | 18 | 19 | class ModelConfig( 20 | BaseModelConfig, 21 | TritonOptimizationModelConfig, 22 | BatchingModelConfig, 23 | QuantizationModelConfig, # this will require ONNX opset 11 24 | ): 25 | pass 26 | 27 | 28 | class SuperResolution(BaseModel): 29 | 30 | DESCRIPTION = ( 31 | "Implementation of Sub-Pixel CNN (2016) - https://arxiv.org/abs/1609.05158" 32 | ) 33 | MODEL_URL = "https://github.com/onnx/models/raw/master/vision/super_resolution/sub_pixel_cnn_2016/model/super-resolution-10.onnx" 34 | CONFIG_PATH = f"{folder_path}/config.template" 35 | INPUT_NAME = "input" 36 | OUTPUT_NAMES = ["output"] 37 | DTYPE = "FP32" 38 | MODEL_CONFIG = ModelConfig 39 | 40 | @classmethod 41 | def preprocess(cls, img): 42 | """ 43 | Pre-process an image to meet the size, type and format 44 | requirements specified by the parameters. 45 | 46 | :param img: Pillow image 47 | 48 | :returns: 49 | - model_input: input as required by the model 50 | - extra_data: dict of data that is needed by the postprocess function 51 | """ 52 | extra_data = {} 53 | 54 | img = resizeimage.resize_cover(img, [224, 224], validate=False) 55 | img_ycbcr = img.convert("YCbCr") 56 | img_y_0, img_cb, img_cr = img_ycbcr.split() 57 | img_ndarray = np.asarray(img_y_0) 58 | img_4 = np.expand_dims(img_ndarray, axis=0) 59 | model_input = img_4.astype(np.float32) / 255.0 60 | 61 | # Save some parts in the PREDICTION_DATA store for postprocess 62 | extra_data["img_cb"] = img_cb 63 | extra_data["img_cr"] = img_cr 64 | return model_input, extra_data 65 | 66 | @classmethod 67 | def postprocess(cls, results, extra_data, batch_size, batching): 68 | """ 69 | Post-process results to return valid outputs. 70 | :param results: model outputs 71 | :param extra_data: dict of data that is needed by the postprocess function 72 | :param batch_size 73 | :param batching: boolean flag indicating if batching 74 | 75 | :returns: json result 76 | """ 77 | # Fetch from the PREDICTION_DATA store 78 | img_cb = extra_data["img_cb"] 79 | img_cr = extra_data["img_cr"] 80 | 81 | output_name = cls.OUTPUT_NAMES[0] 82 | results = results.as_numpy(output_name) 83 | logger.debug(results) 84 | img_out_y = Image.fromarray( 85 | np.uint8((results[0] * 255.0).clip(0, 255)[0]), mode="L" 86 | ) 87 | final_img = Image.merge( 88 | "YCbCr", 89 | [ 90 | img_out_y, 91 | img_cb.resize(img_out_y.size, Image.BICUBIC), 92 | img_cr.resize(img_out_y.size, Image.BICUBIC), 93 | ], 94 | ).convert("RGB") 95 | logger.debug(final_img) 96 | return final_img 97 | -------------------------------------------------------------------------------- /packages/proteus.models.efficientpose/tests/test_efficientpose.py: -------------------------------------------------------------------------------- 1 | import itertools 2 | import tempfile 3 | import time 4 | 5 | import pytest 6 | import requests 7 | from PIL import Image 8 | from PIL.ImageOps import pad 9 | from proteus.datasets import MPIIPoseEstimation 10 | from proteus.models.efficientpose.client import ModelConfig 11 | 12 | MODEL = "EfficientPoseI" 13 | 14 | # Check liveness 15 | for i in range(10): 16 | try: 17 | response = requests.get("http://localhost/health") 18 | if response.status_code == requests.codes.ok: 19 | break 20 | except: 21 | time.sleep(25) 22 | 23 | 24 | def get_prediction(fpath, model): 25 | with open(fpath, "rb") as f: 26 | jsonfiles = {"file": f} 27 | payload = {"file_id": fpath} 28 | response = requests.post( 29 | f"http://localhost/{model}/predict", 30 | files=jsonfiles, 31 | data=payload, 32 | ) 33 | return response 34 | 35 | 36 | @pytest.fixture 37 | def model(): 38 | payload = {"triton_optimization": True} 39 | response = requests.post( 40 | f"http://localhost/{MODEL}/load", 41 | json=payload, 42 | ) 43 | assert response.json()["success"] 44 | 45 | yield MODEL 46 | response = requests.post(f"http://localhost/{MODEL}/unload") 47 | assert response.json()["success"] 48 | 49 | 50 | @pytest.fixture 51 | def dataset(): 52 | return MPIIPoseEstimation(k=100) 53 | 54 | 55 | @pytest.fixture 56 | def small_dataset(): 57 | return MPIIPoseEstimation(k=10) 58 | 59 | 60 | def test_jpg(model): 61 | with tempfile.NamedTemporaryFile(suffix=".jpg") as tmp: 62 | Image.new("RGB", (800, 1280)).save(tmp.name) 63 | response = get_prediction(tmp.name, model) 64 | assert response.status_code == requests.codes.ok 65 | 66 | 67 | def test_png(model): 68 | with tempfile.NamedTemporaryFile(suffix=".png") as tmp: 69 | Image.new("RGB", (800, 1280)).save(tmp.name) 70 | response = get_prediction(tmp.name, model) 71 | assert response.status_code == requests.codes.ok 72 | 73 | 74 | def test_bmp(model): 75 | with tempfile.NamedTemporaryFile(suffix=".bmp") as tmp: 76 | Image.new("RGB", (800, 1280)).save(tmp.name) 77 | response = get_prediction(tmp.name, model) 78 | assert response.status_code == requests.codes.ok 79 | 80 | 81 | def test_modelconfig(): 82 | # Figure out which config parameters are defined 83 | schema = ModelConfig().dict() 84 | 85 | # Find all combinations that we want to test 86 | test_parameters = [] 87 | test_values = [] 88 | for k, v in schema.items(): 89 | test_parameters.append(k) 90 | if type(v) == bool: 91 | test_values.append([True, False]) 92 | elif type(v) == int: 93 | test_values.append([1, 2]) 94 | else: 95 | raise NotImplementedError( 96 | f"Config parameter of type {type(v)} not yet implemented" 97 | ) 98 | test_combinations = list(itertools.product(*test_values)) 99 | 100 | # Test load + prediction for each combination 101 | for test_config in test_combinations: 102 | mc = {k: v for k, v in zip(test_parameters, test_config)} 103 | response = requests.post( 104 | f"http://localhost/{MODEL}/load", 105 | json=mc, 106 | ) 107 | assert response.status_code == requests.codes.ok 108 | 109 | with tempfile.NamedTemporaryFile(suffix=".bmp") as tmp: 110 | Image.new("RGB", (800, 1280)).save(tmp.name) 111 | response = get_prediction(tmp.name, MODEL) 112 | assert response.status_code == requests.codes.ok 113 | 114 | response = requests.post(f"http://localhost/{MODEL}/unload") 115 | assert response.status_code == requests.codes.ok 116 | 117 | 118 | @pytest.mark.slow 119 | def test_score(dataset, model): 120 | pass 121 | -------------------------------------------------------------------------------- /packages/proteus.models/tests/test_models.py: -------------------------------------------------------------------------------- 1 | import tempfile 2 | import time 3 | 4 | import pytest 5 | import requests 6 | from PIL import Image 7 | 8 | MAX_ACTIVE_MODELS = 3 # see docker-compose.yml. can be different in production 9 | MODEL_INACTIVITY = 1 # see docker-compose.yml. can be different in production 10 | 11 | 12 | def test_max_active_models(): 13 | """ 14 | If max_active_models is reached, api should return a clean error message instead of 200 on model load 15 | """ 16 | model_dict = requests.get(f"http://localhost/models").json() 17 | loaded_models = [] 18 | for model, desc in model_dict.items(): 19 | payload = {"triton_optimization": True} 20 | response = requests.post( 21 | f"http://localhost/{model}/load", 22 | json=payload, 23 | ) 24 | if len(loaded_models) < MAX_ACTIVE_MODELS: 25 | assert response.status_code == requests.codes.ok 26 | assert response.json()["success"] 27 | loaded_models.append(model) 28 | else: 29 | assert response.status_code == requests.codes.forbidden 30 | break 31 | 32 | 33 | def test_inactivity_no_requests(): 34 | """ 35 | If MODEL_INACTIVITY minutes is reached, model should be unloaded 36 | """ 37 | model_dict = requests.get(f"http://localhost/models").json() 38 | 39 | for model, desc in model_dict.items(): 40 | payload = {"triton_optimization": True} 41 | response = requests.post( 42 | f"http://localhost/{model}/load", 43 | json=payload, 44 | ) 45 | 46 | # Model should be available for at least a grace period 47 | # Even though no predictions are done 48 | time.sleep(MODEL_INACTIVITY * 30) 49 | response = requests.get(f"http://localhost/models/status") 50 | model_status = [m for m in response.json() if m.get("name") == model][0] 51 | assert model_status.get("state", "UNAVAILABLE") == "READY" 52 | 53 | # But expire even then 54 | time.sleep(60) 55 | response = requests.get(f"http://localhost/models/status") 56 | model_status = [m for m in response.json() if m.get("name") == model][0] 57 | assert model_status.get("state", "UNAVAILABLE") != "READY" 58 | 59 | # cleanup 60 | requests.post(f"http://localhost/{model}/unload") 61 | break 62 | 63 | 64 | def test_inactivity_with_request(): 65 | """ 66 | If MODEL_INACTIVITY minutes is reached, model should be unloaded 67 | """ 68 | model_dict = requests.get(f"http://localhost/models").json() 69 | 70 | for model, desc in model_dict.items(): 71 | payload = {"triton_optimization": True} 72 | response = requests.post( 73 | f"http://localhost/{model}/load", 74 | json=payload, 75 | ) 76 | 77 | # Dummy prediction 78 | fpath = None 79 | with tempfile.NamedTemporaryFile(suffix=".jpg") as tmp: 80 | Image.new("RGB", (800, 1280)).save(tmp.name) 81 | 82 | with open(tmp.name, "rb") as f: 83 | jsonfiles = {"file": f} 84 | payload = {"file_id": fpath} 85 | response = requests.post( 86 | f"http://localhost/{model}/predict", 87 | files=jsonfiles, 88 | data=payload, 89 | ) 90 | assert response.status_code == requests.codes.ok 91 | 92 | # Model should be expire after a while 93 | time.sleep(MODEL_INACTIVITY * 60 + 30) 94 | response = requests.get(f"http://localhost/models/status") 95 | model_status = [m for m in response.json() if m.get("name") == model][0] 96 | assert model_status.get("state", "UNAVAILABLE") != "READY" 97 | 98 | # cleanup 99 | requests.post(f"http://localhost/{model}/unload") 100 | break 101 | 102 | 103 | def teardown_function(test_max_active_models): 104 | model_dict = requests.get(f"http://localhost/models").json() 105 | 106 | for model, desc in model_dict.items(): 107 | response = requests.post(f"http://localhost/{model}/unload") 108 | -------------------------------------------------------------------------------- /tools/templating/proteus/tools/templating/templates/proteus.models.{{cookiecutter.package_name}}/tests/test_{{cookiecutter.package_name}}.py: -------------------------------------------------------------------------------- 1 | import itertools 2 | import json 3 | import tempfile 4 | import time 5 | 6 | import pytest 7 | import requests 8 | from PIL import Image 9 | from PIL.ImageOps import pad 10 | from proteus.datasets import {{cookiecutter.test_dataset}} 11 | from proteus.models.{{cookiecutter.package_name}}.client import ModelConfig 12 | 13 | MODEL = '{{cookiecutter.model_name}}' 14 | 15 | # Check liveness 16 | for i in range(10): 17 | try: 18 | response = requests.get("http://localhost/health") 19 | if response.status_code == requests.codes.ok: 20 | break 21 | except: 22 | time.sleep(25) 23 | 24 | def get_prediction(fpath, model): 25 | with open(fpath, "rb") as f: 26 | jsonfiles = {"file": f} 27 | payload = {"file_id": fpath} 28 | response = requests.post( 29 | f"http://localhost/{model}/predict", 30 | files=jsonfiles, 31 | data=payload, 32 | ) 33 | return response 34 | 35 | 36 | @pytest.fixture 37 | def model(): 38 | payload = {"triton_optimization": True} 39 | response = requests.post( 40 | f"http://localhost/{MODEL}/load", 41 | json=payload, 42 | ) 43 | assert response.json()["success"] 44 | 45 | yield MODEL 46 | response = requests.post(f"http://localhost/{MODEL}/unload") 47 | assert response.json()["success"] 48 | 49 | 50 | @pytest.fixture 51 | def dataset(): 52 | return {{cookiecutter.test_dataset}}(k=100) 53 | 54 | 55 | @pytest.fixture 56 | def small_dataset(): 57 | return {{cookiecutter.test_dataset}}(k=10) 58 | 59 | 60 | def test_jpg(model): 61 | with tempfile.NamedTemporaryFile(suffix=".jpg") as tmp: 62 | Image.new("RGB", (800, 1280)).save(tmp.name) 63 | response = get_prediction(tmp.name, model) 64 | assert response.status_code == requests.codes.ok 65 | 66 | 67 | def test_png(model): 68 | with tempfile.NamedTemporaryFile(suffix=".png") as tmp: 69 | Image.new("RGB", (800, 1280)).save(tmp.name) 70 | response = get_prediction(tmp.name, model) 71 | assert response.status_code == requests.codes.ok 72 | 73 | 74 | def test_bmp(model): 75 | with tempfile.NamedTemporaryFile(suffix=".bmp") as tmp: 76 | Image.new("RGB", (800, 1280)).save(tmp.name) 77 | response = get_prediction(tmp.name, model) 78 | assert response.status_code == requests.codes.ok 79 | 80 | def test_modelconfig(): 81 | # Figure out which config parameters are defined 82 | schema = ModelConfig().dict() 83 | 84 | # Find all combinations that we want to test 85 | test_parameters = [] 86 | test_values = [] 87 | for k, v in schema.items(): 88 | test_parameters.append(k) 89 | if type(v) == bool: 90 | test_values.append([True, False]) 91 | elif type(v) == int: 92 | test_values.append([1, 2]) 93 | else: 94 | raise NotImplementedError( 95 | f"Config parameter of type {type(v)} not yet implemented" 96 | ) 97 | test_combinations = list(itertools.product(*test_values)) 98 | 99 | # Test load + prediction for each combination 100 | for test_config in test_combinations: 101 | mc = {k: v for k, v in zip(test_parameters, test_config)} 102 | response = requests.post( 103 | f"http://localhost/{MODEL}/load", 104 | json=mc, 105 | ) 106 | assert response.status_code == requests.codes.ok 107 | 108 | with tempfile.NamedTemporaryFile(suffix=".bmp") as tmp: 109 | Image.new("RGB", (800, 1280)).save(tmp.name) 110 | response = get_prediction(tmp.name, MODEL) 111 | assert response.status_code == requests.codes.ok 112 | 113 | response = requests.post(f"http://localhost/{MODEL}/unload") 114 | assert response.status_code == requests.codes.ok 115 | 116 | @pytest.mark.xfail 117 | @pytest.mark.slow 118 | def test_score(dataset, model): 119 | preds = [] 120 | for (fpath, img) in dataset: 121 | response = get_prediction(fpath, model) 122 | result = [box for box in response.json()[0]] 123 | preds.append(result) 124 | score = dataset.eval(preds) 125 | assert score > 0.0 -------------------------------------------------------------------------------- /packages/proteus.models.superres/tests/test_superres.py: -------------------------------------------------------------------------------- 1 | import itertools 2 | import tempfile 3 | import time 4 | from io import BytesIO 5 | 6 | import pytest 7 | import requests 8 | from PIL import Image 9 | from PIL.ImageOps import pad 10 | from proteus.datasets import BSDSSuperRes 11 | from proteus.models.superres.client import ModelConfig 12 | 13 | MODEL = "SuperResolution" 14 | 15 | # Check liveness 16 | for i in range(10): 17 | try: 18 | response = requests.get("http://localhost/health") 19 | if response.status_code == requests.codes.ok: 20 | break 21 | except: 22 | time.sleep(25) 23 | 24 | 25 | def get_prediction(fpath, model): 26 | with open(fpath, "rb") as f: 27 | jsonfiles = {"file": f} 28 | payload = {"file_id": fpath} 29 | response = requests.post( 30 | f"http://localhost/{model}/predict", 31 | files=jsonfiles, 32 | data=payload, 33 | ) 34 | return response 35 | 36 | 37 | @pytest.fixture 38 | def model(): 39 | payload = {"triton_optimization": True} 40 | response = requests.post( 41 | f"http://localhost/{MODEL}/load", 42 | json=payload, 43 | ) 44 | assert response.json()["success"] 45 | 46 | yield MODEL 47 | response = requests.post(f"http://localhost/{MODEL}/unload") 48 | assert response.json()["success"] 49 | 50 | 51 | @pytest.fixture 52 | def dataset(): 53 | return BSDSSuperRes(k=100) 54 | 55 | 56 | @pytest.fixture 57 | def small_dataset(): 58 | return BSDSSuperRes(k=10) 59 | 60 | 61 | def test_jpg(model): 62 | with tempfile.NamedTemporaryFile(suffix=".jpg") as tmp: 63 | Image.new("RGB", (800, 1280)).save(tmp.name) 64 | response = get_prediction(tmp.name, model) 65 | assert response.status_code == requests.codes.ok 66 | 67 | 68 | def test_png(model): 69 | with tempfile.NamedTemporaryFile(suffix=".png") as tmp: 70 | Image.new("RGB", (800, 1280)).save(tmp.name) 71 | response = get_prediction(tmp.name, model) 72 | assert response.status_code == requests.codes.ok 73 | 74 | 75 | def test_bmp(model): 76 | with tempfile.NamedTemporaryFile(suffix=".bmp") as tmp: 77 | Image.new("RGB", (800, 1280)).save(tmp.name) 78 | response = get_prediction(tmp.name, model) 79 | assert response.status_code == requests.codes.ok 80 | 81 | 82 | def test_modelconfig(): 83 | # Figure out which config parameters are defined 84 | schema = ModelConfig().dict() 85 | 86 | # Find all combinations that we want to test 87 | test_parameters = [] 88 | test_values = [] 89 | for k, v in schema.items(): 90 | test_parameters.append(k) 91 | if type(v) == bool: 92 | test_values.append([True, False]) 93 | elif type(v) == int: 94 | test_values.append([1, 2]) 95 | else: 96 | raise NotImplementedError( 97 | f"Config parameter of type {type(v)} not yet implemented" 98 | ) 99 | test_combinations = list(itertools.product(*test_values)) 100 | 101 | # Test load + prediction for each combination 102 | for test_config in test_combinations: 103 | mc = {k: v for k, v in zip(test_parameters, test_config)} 104 | response = requests.post( 105 | f"http://localhost/{MODEL}/load", 106 | json=mc, 107 | ) 108 | assert response.status_code == requests.codes.ok 109 | 110 | with tempfile.NamedTemporaryFile(suffix=".bmp") as tmp: 111 | Image.new("RGB", (800, 1280)).save(tmp.name) 112 | response = get_prediction(tmp.name, MODEL) 113 | assert response.status_code == requests.codes.ok 114 | 115 | response = requests.post(f"http://localhost/{MODEL}/unload") 116 | assert response.status_code == requests.codes.ok 117 | 118 | 119 | @pytest.mark.xfail 120 | @pytest.mark.slow 121 | def test_score(dataset, model): 122 | preds = [] 123 | for (fpath, img) in dataset: 124 | response = get_prediction(fpath, model) 125 | img_byte_arr = BytesIO(response.content) 126 | img_byte_arr.seek(0) # important here! 127 | with tempfile.NamedTemporaryFile(suffix=".jpg", delete=False) as tmp: 128 | Image.open(img_byte_arr).save(tmp.name) 129 | preds.append(tmp.name) 130 | 131 | score = dataset.eval(preds) 132 | assert score < 100.0 133 | -------------------------------------------------------------------------------- /packages/proteus.models.maskrcnn/proteus/models/maskrcnn/client.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from pathlib import Path 3 | 4 | import cv2 5 | import numpy as np 6 | from PIL import Image 7 | from proteus.models.base import BaseModel 8 | from proteus.models.base.modelconfigs import BaseModelConfig 9 | from proteus.types import BoundingBox, Segmentation 10 | from tritonclient.utils import triton_to_np_dtype 11 | 12 | from .helpers import detection_postprocess, image_preprocess, read_class_names 13 | 14 | logger = logging.getLogger(__name__) 15 | 16 | folder_path = Path(__file__).parent 17 | 18 | 19 | class ModelConfig(BaseModelConfig): 20 | pass 21 | 22 | 23 | class MaskRCNN(BaseModel): 24 | 25 | DESCRIPTION = ( 26 | "This model is a real-time neural network for object " 27 | "instance segmentation that detects 80 different classes." 28 | "mAP of 0.36" 29 | "Taken from https://github.com/onnx/models." 30 | ) 31 | CLASSES = read_class_names(f"{folder_path}/coco_names.txt") 32 | NUM_OUTPUTS = 4 33 | MAX_BATCH_SIZE = 0 34 | MODEL_URL = "https://github.com/onnx/models/raw/master/vision/object_detection_segmentation/mask-rcnn/model/MaskRCNN-10.onnx" 35 | CONFIG_PATH = f"{folder_path}/config.template" 36 | INPUT_NAME = "image" 37 | OUTPUT_NAMES = ["6568", "6570", "6572", "6887"] 38 | DTYPE = "FP32" 39 | MODEL_CONFIG = ModelConfig 40 | 41 | @classmethod 42 | def preprocess(cls, img): 43 | """ 44 | Pre-process an image to meet the size, type and format 45 | requirements specified by the parameters. 46 | 47 | 48 | :param img: Pillow image 49 | 50 | :returns: 51 | - model_input: input as required by the model 52 | - extra_data: dict of data that is needed by the postprocess function 53 | """ 54 | extra_data = {} 55 | # Careful, Pillow has (w,h) format but most models expect (h,w) 56 | w, h = img.size 57 | extra_data["original_image_size"] = (h, w) 58 | 59 | img = img.convert("RGB") 60 | 61 | logger.info(f"Original image size: {img.size}") 62 | 63 | img = image_preprocess(img) 64 | 65 | npdtype = triton_to_np_dtype(cls.DTYPE) 66 | img = img.astype(npdtype) 67 | 68 | return img, extra_data 69 | 70 | @classmethod 71 | def postprocess(cls, results, extra_data, batch_size, batching): 72 | """ 73 | Post-process results to show bounding boxes. 74 | https://github.com/onnx/models/tree/master/vision/object_detection_segmentation/retinanet 75 | :param results: model outputs 76 | :param extra_data: dict of data that is needed by the postprocess function 77 | :param batch_size 78 | :param batching: boolean flag indicating if batching 79 | 80 | :returns: json result 81 | """ 82 | original_image_size = extra_data["original_image_size"] 83 | 84 | # get outputs 85 | boxes = results.as_numpy(cls.OUTPUT_NAMES[0]) 86 | labels = results.as_numpy(cls.OUTPUT_NAMES[1]) 87 | scores = results.as_numpy(cls.OUTPUT_NAMES[2]) 88 | masks = results.as_numpy(cls.OUTPUT_NAMES[3]) 89 | 90 | postprocess_results = detection_postprocess( 91 | original_image_size, boxes, labels, scores, masks 92 | ) 93 | 94 | results = [] 95 | # TODO add another loop if batching 96 | for (score, box, cat, mask) in postprocess_results: 97 | x1, y1, x2, y2 = box 98 | 99 | bbox = BoundingBox( 100 | x1=int(x1), 101 | y1=int(y1), 102 | x2=int(x2), 103 | y2=int(y2), 104 | class_name=cls.CLASSES[int(cat)], 105 | score=float(score), 106 | ) 107 | 108 | ret, thresh = cv2.threshold(mask, 0.5, 1, cv2.THRESH_BINARY) 109 | contours, hierarchy = cv2.findContours( 110 | thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE 111 | ) 112 | 113 | polygon = contours[0].reshape(-1).tolist() 114 | if len(polygon) <= 4: 115 | # not valid, create a dummy 116 | polygon = [0, 0, 1, 0, 1, 1] 117 | 118 | segmentation = Segmentation( 119 | segmentation=polygon, 120 | class_name=cls.CLASSES[int(cat)], 121 | score=float(score), 122 | ) 123 | results.append({"bounding_box": bbox, "segmentation": segmentation}) 124 | return results 125 | -------------------------------------------------------------------------------- /packages/proteus.models.yolov4/proteus/models/yolov4/client.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from pathlib import Path 3 | 4 | import cv2 5 | import numpy as np 6 | from proteus.models.base import BaseModel 7 | from proteus.models.base.modelconfigs import ( 8 | BaseModelConfig, 9 | BatchingModelConfig, 10 | QuantizationModelConfig, 11 | TritonOptimizationModelConfig, 12 | ) 13 | from proteus.types import BoundingBox 14 | from tritonclient.utils import triton_to_np_dtype 15 | 16 | # isort: skip 17 | from .helpers import ( 18 | get_anchors, 19 | image_preprocess, 20 | nms, 21 | postprocess_bbbox, 22 | postprocess_boxes, 23 | read_class_names, 24 | ) 25 | 26 | folder_path = Path(__file__).parent 27 | logger = logging.getLogger(__name__) 28 | 29 | 30 | class ModelConfig( 31 | BaseModelConfig, 32 | TritonOptimizationModelConfig, 33 | BatchingModelConfig, 34 | QuantizationModelConfig, 35 | ): 36 | pass 37 | 38 | 39 | class YoloV4(BaseModel): 40 | 41 | CHANNEL_FIRST = False 42 | DESCRIPTION = ( 43 | "YOLOv4 optimizes the speed and accuracy of object detection. " 44 | "It is two times faster than EfficientDet. It improves YOLOv3's " 45 | "AP and FPS by 10% and 12%, respectively, with mAP50 of 52.32 " 46 | "on the COCO 2017 dataset and FPS of 41.7 on Tesla 100." 47 | "Taken from https://github.com/onnx/models." 48 | ) 49 | CLASSES = read_class_names(f"{folder_path}/coco_names.txt") 50 | ANCHORS = get_anchors(f"{folder_path}/yolov4_anchors.txt") 51 | MODEL_URL = "https://github.com/onnx/models/raw/master/vision/object_detection_segmentation/yolov4/model/yolov4.onnx" 52 | CONFIG_PATH = f"{folder_path}/config.template" 53 | INPUT_NAME = "input_1:0" 54 | OUTPUT_NAMES = ["Identity:0", "Identity_1:0", "Identity_2:0"] 55 | DTYPE = "FP32" 56 | SHAPE = (416, 416, 3) 57 | MODEL_CONFIG = ModelConfig 58 | 59 | @classmethod 60 | def preprocess(cls, img): 61 | """ 62 | Pre-process an image to meet the size, type and format 63 | requirements specified by the parameters. 64 | https://github.com/onnx/models/tree/master/vision/object_detection_segmentation/yolov4 65 | 66 | :param img: Pillow image 67 | 68 | :returns: 69 | - model_input: input as required by the model 70 | - extra_data: dict of data that is needed by the postprocess function 71 | """ 72 | extra_data = {} 73 | # Careful, Pillow has (w,h) format but most models expect (h,w) 74 | w, h = img.size 75 | extra_data["original_image_size"] = (h, w) 76 | 77 | if cls.SHAPE[2] == 1: 78 | sample_img = img.convert("L") 79 | else: 80 | sample_img = img.convert("RGB") 81 | 82 | logger.info(f"Original image size: {sample_img.size}") 83 | 84 | # convert to cv2 85 | open_cv_image = np.array(sample_img) 86 | open_cv_image = open_cv_image[:, :, ::-1].copy() 87 | 88 | image = image_preprocess(open_cv_image, (cls.SHAPE[0], cls.SHAPE[1])) 89 | 90 | npdtype = triton_to_np_dtype(cls.DTYPE) 91 | image = image.astype(npdtype) 92 | 93 | # channels first if needed 94 | if cls.CHANNEL_FIRST: 95 | img = np.transpose(img, (2, 0, 1)) 96 | 97 | return image, extra_data 98 | 99 | @classmethod 100 | def postprocess(cls, results, extra_data, batch_size, batching): 101 | """ 102 | Post-process results to show bounding boxes. 103 | :param results: model outputs 104 | :param extra_data: dict of data that is needed by the postprocess function 105 | :param batch_size 106 | :param batching: boolean flag indicating if batching 107 | 108 | :returns: json result 109 | """ 110 | original_image_size = extra_data["original_image_size"] 111 | 112 | logger.debug(cls.OUTPUT_NAMES) 113 | detections = [results.as_numpy(output_name) for output_name in cls.OUTPUT_NAMES] 114 | logger.debug(list(map(lambda detection: detection.shape, detections))) 115 | 116 | STRIDES = np.array([8, 16, 32]) 117 | XYSCALE = [1.2, 1.1, 1.05] 118 | 119 | input_size = cls.SHAPE[0] 120 | 121 | pred_bbox = postprocess_bbbox(detections, cls.ANCHORS, STRIDES, XYSCALE) 122 | bboxes = postprocess_boxes(pred_bbox, original_image_size, input_size, 0.25) 123 | bboxes = nms(bboxes, 0.213, method="nms") 124 | 125 | # bboxes: [x_min, y_min, x_max, y_max, probability, cls_id] 126 | results = [] 127 | for i, bbox in enumerate(bboxes): 128 | bbox = BoundingBox( 129 | x1=int(bbox[0]), 130 | y1=int(bbox[1]), 131 | x2=int(bbox[2]), 132 | y2=int(bbox[3]), 133 | class_name=cls.CLASSES[int(bbox[5])], 134 | score=float(bbox[4]), 135 | ) 136 | results.append(bbox) 137 | 138 | return results 139 | -------------------------------------------------------------------------------- /packages/proteus.models.efficientdet/proteus/models/efficientdet/client.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from pathlib import Path 3 | 4 | import numpy as np 5 | from proteus.models.base import BaseModel 6 | from proteus.models.base.modelconfigs import BaseModelConfig 7 | from proteus.types import BoundingBox 8 | from tritonclient.utils import triton_to_np_dtype 9 | 10 | from .helpers import read_class_names 11 | 12 | logger = logging.getLogger(__name__) 13 | 14 | folder_path = Path(__file__).parent 15 | 16 | 17 | class ModelConfig(BaseModelConfig): 18 | pass 19 | 20 | 21 | class EfficientDetD0(BaseModel): 22 | 23 | CHANNEL_FIRST = False 24 | DESCRIPTION = ( 25 | "EfficientDets are a family of object detection models, which achieve state-of-the-art " 26 | "55.1mAP on COCO test-dev, yet being 4x - 9x smaller and using 13x - 42x fewer FLOPs than previous" 27 | " detectors. Our models also run 2x - 4x faster on GPU, and 5x - 11x faster on CPU than other detectors." 28 | "Converted using https://github.com/onnx/tensorflow-onnx/blob/master/tutorials/efficientdet.ipynb" 29 | ) 30 | CLASSES = read_class_names(f"{folder_path}/coco_names.txt") 31 | MODEL_URL = "https://pieterblomme-models.s3.us-east-2.amazonaws.com/efficientdet/efficientdet-d0.onnx" 32 | CONFIG_PATH = f"{folder_path}/config.template" 33 | INPUT_NAME = "image_arrays:0" 34 | OUTPUT_NAMES = ["detections:0"] 35 | DTYPE = "UINT8" 36 | SHAPE = (416, 416, 3) 37 | MODEL_CONFIG = ModelConfig 38 | 39 | @classmethod 40 | def preprocess(cls, img): 41 | """ 42 | Pre-process an image to meet the size, type and format 43 | requirements specified by the parameters. 44 | Based on this (very few preprocess needed): 45 | https://github.com/onnx/tensorflow-onnx/blob/master/tutorials/efficientdet.ipynb 46 | 47 | :param img: Pillow image 48 | 49 | :returns: 50 | - model_input: input as required by the model 51 | - extra_data: dict of data that is needed by the postprocess function 52 | """ 53 | extra_data = {} 54 | 55 | if cls.SHAPE[2] == 1: 56 | sample_img = img.convert("L") 57 | else: 58 | sample_img = img.convert("RGB") 59 | 60 | logger.info(f"Original image size: {sample_img.size}") 61 | 62 | # convert to cv2 63 | open_cv_image = np.array(sample_img) 64 | open_cv_image = open_cv_image[:, :, ::-1].copy() 65 | 66 | npdtype = triton_to_np_dtype(cls.DTYPE) 67 | open_cv_image = open_cv_image.astype(npdtype) 68 | 69 | # channels first if needed 70 | if cls.CHANNEL_FIRST: 71 | img = np.transpose(img, (2, 0, 1)) 72 | 73 | return open_cv_image, extra_data 74 | 75 | @classmethod 76 | def postprocess(cls, results, extra_data, batch_size, batching): 77 | """ 78 | Post-process results to show bounding boxes. 79 | Based on this (very few postprocess needed): 80 | https://github.com/onnx/tensorflow-onnx/blob/master/tutorials/efficientdet.ipynb 81 | :param results: model outputs 82 | :param extra_data: dict of data that is needed by the postprocess function 83 | :param batch_size 84 | :param batching: boolean flag indicating if batching 85 | 86 | :returns: json result 87 | """ 88 | 89 | logger.debug(cls.OUTPUT_NAMES) 90 | detections = [results.as_numpy(output_name) for output_name in cls.OUTPUT_NAMES] 91 | # only one output, so 92 | detections = detections[0] 93 | logger.debug(list(map(lambda detection: detection.shape, detections))) 94 | 95 | results = [] 96 | # first dimension is the batch TODO 97 | for bbox in detections[0]: 98 | logger.debug(bbox) 99 | # bbox[0] is the image id 100 | # ymin, xmin, ymax, xmax = bbox[1=5] 101 | bbox = BoundingBox( 102 | x1=int(bbox[2]), 103 | y1=int(bbox[1]), 104 | x2=int(bbox[4]), 105 | y2=int(bbox[3]), 106 | class_name=cls.CLASSES[int(bbox[6])], 107 | score=float(bbox[5]), 108 | ) 109 | results.append(bbox) 110 | return results 111 | 112 | 113 | class EfficientDetD2(EfficientDetD0): 114 | 115 | CHANNEL_FIRST = False 116 | DESCRIPTION = ( 117 | "EfficientDets are a family of object detection models, which achieve state-of-the-art " 118 | "55.1mAP on COCO test-dev, yet being 4x - 9x smaller and using 13x - 42x fewer FLOPs than previous" 119 | " detectors. Our models also run 2x - 4x faster on GPU, and 5x - 11x faster on CPU than other detectors." 120 | "Converted using https://github.com/onnx/tensorflow-onnx/blob/master/tutorials/efficientdet.ipynb" 121 | ) 122 | CLASSES = read_class_names(f"{folder_path}/coco_names.txt") 123 | MODEL_URL = "https://pieterblomme-models.s3.us-east-2.amazonaws.com/efficientdet/efficientdet-d2.onnx" 124 | -------------------------------------------------------------------------------- /packages/proteus.datasets/proteus/datasets/coco.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import random 4 | import tempfile 5 | from pathlib import Path 6 | 7 | import requests 8 | from pycocotools.coco import COCO 9 | from pycocotools.cocoeval import COCOeval 10 | 11 | from .datasets import Dataset 12 | 13 | tmpfolder = tempfile.gettempdir() 14 | # We want a good random sample from CocoVal 15 | # But we want pseudorandom to get consistent test results 16 | random.seed(42) 17 | 18 | 19 | class CocoValBBox(Dataset): 20 | def __init__(self, k=50): 21 | self.maybe_download() 22 | self.coco = COCO(f"{tmpfolder}/datasets/coco/instances_val2017.json") 23 | self.cats = { 24 | cat["name"]: cat["id"] for cat in self.coco.loadCats(self.coco.getCatIds()) 25 | } 26 | 27 | imgs = self.coco.loadImgs(self.coco.getImgIds()) 28 | random.shuffle(imgs) 29 | self.imgs = imgs[:k] 30 | 31 | def maybe_download(self): 32 | target = f"{tmpfolder}/datasets/coco/instances_val2017.json" 33 | if not os.path.isfile(target): 34 | Path(f"{tmpfolder}/datasets/coco").mkdir(parents=True, exist_ok=True) 35 | print("Downloading COCO validation json") 36 | url = "https://pieterblomme-models.s3.us-east-2.amazonaws.com/coco/instances_val2017.json" 37 | response = requests.get(url, allow_redirects=True) 38 | open(target, "wb").write(response.content) 39 | 40 | Path(f"{tmpfolder}/coco_imgs").mkdir(parents=True, exist_ok=True) 41 | 42 | def _getfile(self, url): 43 | filename = f"{tmpfolder}/coco_imgs/" + url.split("/")[-1] 44 | if not os.path.isfile(filename): 45 | print(f"Downloading {url}") 46 | response = requests.get(url, allow_redirects=True) 47 | open(filename, "wb").write(response.content) 48 | return filename 49 | 50 | def _prepare_preds(self, preds_in): 51 | preds_out = [] 52 | 53 | for index, pred in enumerate(preds_in): 54 | img_id = self.imgs[index]["id"] 55 | for box in pred: 56 | try: 57 | result = { 58 | "image_id": img_id, 59 | "category_id": self.cats[box["class_name"]], 60 | "score": box["score"], 61 | "bbox": [ 62 | box["x1"], 63 | box["y1"], 64 | box["x2"] - box["x1"], 65 | box["y2"] - box["y1"], 66 | ], 67 | } 68 | preds_out.append(result) 69 | except Exception as e: 70 | print(e) 71 | return preds_out 72 | 73 | def __getitem__(self, index): 74 | url = self.imgs[index]["coco_url"] 75 | fpath = self._getfile(url) 76 | return fpath, self.imgs[index] 77 | 78 | def __len__(self): 79 | return len(self.imgs) 80 | 81 | def eval(self, preds): 82 | preds = self._prepare_preds(preds) 83 | with open(f"{tmpfolder}/results_coco.json", "w") as f: 84 | json.dump(preds, f) 85 | cocoDT = self.coco.loadRes(f"{tmpfolder}/results_coco.json") 86 | cocoEval = COCOeval(self.coco, cocoDT, "bbox") 87 | cocoEval.params.imgIds = [s["id"] for s in self.imgs] 88 | cocoEval.evaluate() 89 | cocoEval.accumulate() 90 | cocoEval.summarize() 91 | return cocoEval.stats[0] 92 | 93 | 94 | class CocoValMask(CocoValBBox): 95 | def _prepare_preds(self, preds_in): 96 | preds_out = [] 97 | for index, pred in enumerate(preds_in): 98 | img_id = self.imgs[index]["id"] 99 | for ann in pred: 100 | segm = ann["segmentation"] 101 | box = ann["bounding_box"] 102 | try: 103 | result = { 104 | "image_id": img_id, 105 | "category_id": self.cats[box["class_name"]], 106 | "score": segm["score"], 107 | "bbox": [ 108 | box["x1"], 109 | box["y1"], 110 | box["x2"] - box["x1"], 111 | box["y2"] - box["y1"], 112 | ], 113 | "segmentation": [segm["segmentation"]], 114 | } 115 | preds_out.append(result) 116 | except Exception as e: 117 | print(e) 118 | return preds_out 119 | 120 | def eval(self, preds): 121 | preds = self._prepare_preds(preds) 122 | with open(f"{tmpfolder}/results_coco.json", "w") as f: 123 | json.dump(preds, f) 124 | cocoDT = self.coco.loadRes(f"{tmpfolder}/results_coco.json") 125 | cocoEval = COCOeval(self.coco, cocoDT, "segm") 126 | cocoEval.params.imgIds = [s["id"] for s in self.imgs] 127 | cocoEval.evaluate() 128 | cocoEval.accumulate() 129 | cocoEval.summarize() 130 | return cocoEval.stats[0] 131 | -------------------------------------------------------------------------------- /packages/proteus.models.retinanet/proteus/models/retinanet/client.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from pathlib import Path 3 | 4 | import cv2 5 | import numpy as np 6 | import torch 7 | from proteus.models.base import BaseModel 8 | from proteus.models.base.modelconfigs import BaseModelConfig 9 | from proteus.types import BoundingBox 10 | from tritonclient.utils import triton_to_np_dtype 11 | 12 | from .helpers import ( 13 | decode, 14 | detection_postprocess, 15 | generate_anchors, 16 | image_preprocess, 17 | image_resize, 18 | nms, 19 | read_class_names, 20 | ) 21 | 22 | logger = logging.getLogger(__name__) 23 | 24 | 25 | folder_path = Path(__file__).parent 26 | 27 | 28 | class ModelConfig(BaseModelConfig): 29 | pass 30 | 31 | 32 | class RetinaNet(BaseModel): 33 | 34 | DESCRIPTION = ( 35 | "RetinaNet is a single-stage object detection model. " 36 | "This version uses ResNet101 backbone. mAP 0.376" 37 | "Taken from https://github.com/onnx/models." 38 | ) 39 | CLASSES = read_class_names(f"{folder_path}/coco_names.txt") 40 | SHAPE = (3, 480, 640) 41 | MODEL_URL = "https://github.com/onnx/models/raw/master/vision/object_detection_segmentation/retinanet/model/retinanet-9.onnx" 42 | CONFIG_PATH = f"{folder_path}/config.template" 43 | INPUT_NAME = "input" 44 | OUTPUT_NAMES = [ 45 | "output1", 46 | "output2", 47 | "output3", 48 | "output4", 49 | "output5", 50 | "output6", 51 | "output7", 52 | "output8", 53 | "output9", 54 | "output10", 55 | ] 56 | DTYPE = "FP32" 57 | 58 | @classmethod 59 | def preprocess(cls, img): 60 | """ 61 | Pre-process an image to meet the size, type and format 62 | requirements specified by the parameters. 63 | 64 | :param img: Pillow image 65 | 66 | :returns: 67 | - model_input: input as required by the model 68 | - extra_data: dict of data that is needed by the postprocess function 69 | """ 70 | extra_data = {} 71 | # Careful, Pillow has (w,h) format but most models expect (h,w) 72 | w, h = img.size 73 | extra_data["original_image_size"] = (h, w) 74 | 75 | if cls.SHAPE[2] == 1: 76 | img = img.convert("L") 77 | else: 78 | img = img.convert("RGB") 79 | 80 | logger.info(f"Original image size: {img.size}") 81 | 82 | # convert to cv2 83 | img = np.array(img) 84 | img = img[:, :, ::-1].copy() 85 | 86 | img = image_resize(img, cls.SHAPE[1:]) 87 | img = image_preprocess(img) 88 | 89 | npdtype = triton_to_np_dtype(cls.DTYPE) 90 | img = img.astype(npdtype) 91 | 92 | return img, extra_data 93 | 94 | @classmethod 95 | def postprocess(cls, results, extra_data, batch_size, batching): 96 | """ 97 | Post-process results to show bounding boxes. 98 | https://github.com/onnx/models/tree/master/vision/object_detection_segmentation/retinanet 99 | :param results: model outputs 100 | :param extra_data: dict of data that is needed by the postprocess function 101 | :param batch_size 102 | :param batching: boolean flag indicating if batching 103 | 104 | :returns: json result 105 | """ 106 | original_image_size = extra_data["original_image_size"] 107 | 108 | cls_heads = [ 109 | torch.from_numpy(results.as_numpy(output_name)) 110 | for output_name in cls.OUTPUT_NAMES[:5] 111 | ] 112 | logger.debug(list(map(lambda detection: detection.shape, cls_heads))) 113 | box_heads = [ 114 | torch.from_numpy(results.as_numpy(output_name)) 115 | for output_name in cls.OUTPUT_NAMES[5:] 116 | ] 117 | logger.debug(list(map(lambda detection: detection.shape, box_heads))) 118 | 119 | # Size here is input size of the model !! 120 | # Still postprocessing needed to invert padding and scaling. 121 | scores, boxes, labels = detection_postprocess( 122 | cls.SHAPE[1:], cls_heads, box_heads 123 | ) 124 | 125 | # scale, delta width, delta height 126 | _, ih, iw = cls.SHAPE 127 | h, w = original_image_size 128 | scale = min(iw / w, ih / h) 129 | nw, nh = int(scale * w), int(scale * h) 130 | dw, dh = (iw - nw) // 2, (ih - nh) // 2 131 | 132 | results = [] 133 | # TODO add another loop if batching 134 | for score, box, cat in zip(scores[0], boxes[0], labels[0]): 135 | x1, y1, x2, y2 = box.data.tolist() 136 | 137 | # unpad bbox 138 | x1 = max(x1 - dw, 0) 139 | x2 = min(x2 - dw, w) 140 | y1 = max(y1 - dh, 0) 141 | y2 = min(y2 - dh, h) 142 | 143 | # scale 144 | x1, x2, y1, y2 = x1 / scale, x2 / scale, y1 / scale, y2 / scale 145 | 146 | bbox = BoundingBox( 147 | x1=int(x1), 148 | y1=int(y1), 149 | x2=int(x2), 150 | y2=int(y2), 151 | class_name=cls.CLASSES[int(cat.item())], 152 | score=float(score.item()), 153 | ) 154 | results.append(bbox) 155 | return results 156 | -------------------------------------------------------------------------------- /packages/proteus.models.efficientpose/proteus/models/efficientpose/client.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from pathlib import Path 3 | 4 | import numpy as np 5 | from proteus.models.base import BaseModel 6 | from proteus.models.base.modelconfigs import ( 7 | BaseModelConfig, 8 | BatchingModelConfig, 9 | QuantizationModelConfig, 10 | TritonOptimizationModelConfig, 11 | ) 12 | from proteus.types import Coordinate 13 | 14 | from .helpers import extract_coordinates, preprocess 15 | 16 | folder_path = Path(__file__).parent 17 | logger = logging.getLogger(__name__) 18 | 19 | 20 | class ModelConfig( 21 | BaseModelConfig, 22 | TritonOptimizationModelConfig, 23 | BatchingModelConfig, 24 | QuantizationModelConfig, 25 | ): 26 | pass 27 | 28 | 29 | class EfficientPoseRT(BaseModel): 30 | 31 | DESCRIPTION = ( 32 | "EfficientPoseRT implementation from https://github.com/daniegr/EfficientPose" 33 | ) 34 | MODEL_URL = "https://pieterblomme-models.s3.us-east-2.amazonaws.com/effpose/EfficientPoseRT.onnx" 35 | 36 | """ 37 | Note: if CONFIG_PATH is None, Triton will figure out a default configuration from the ONNX file. 38 | The EfficientPose/load endpoint will return the used configuration, which can then be 39 | used to fill the actual config.template. It is not recommended to leave CONFIG_PATH empty in production 40 | because it will not support features like batching, num_instances and TritonOptimization. 41 | """ 42 | CONFIG_PATH = f"{folder_path}/config_RT.template" 43 | 44 | INPUT_NAME = "input_res1:0" 45 | OUTPUT_NAMES = ["upscaled_confs/BiasAdd:0"] 46 | DTYPE = "FP32" 47 | MODEL_CONFIG = ModelConfig 48 | RESOLUTION = 224 49 | 50 | @classmethod 51 | def preprocess(cls, img): 52 | """ 53 | Pre-process an image to meet the size, type and format 54 | requirements specified by the parameters. 55 | 56 | :param img: Pillow image 57 | 58 | :returns: 59 | - model_input: input as required by the model 60 | - extra_data: dict of data that is needed by the postprocess function 61 | """ 62 | extra_data = {} 63 | 64 | # Load image 65 | image = np.array(img) 66 | image_height, image_width = image.shape[:2] 67 | extra_data["image_height"] = image_height 68 | extra_data["image_width"] = image_width 69 | 70 | # For simplicity so we don't have to rewrite the original code 71 | batch = np.expand_dims(image, axis=0) 72 | # Preprocess batch 73 | batch = preprocess(batch, cls.RESOLUTION) 74 | # Pull single image out of batch 75 | image = batch[0] 76 | 77 | return image, extra_data 78 | 79 | @classmethod 80 | def postprocess(cls, results, extra_data, batch_size, batching): 81 | """ 82 | Post-process results to return valid outputs. 83 | :param results: model outputs 84 | :param extra_data: dict of data that is needed by the postprocess function 85 | :param batch_size 86 | :param batching: boolean flag indicating if batching 87 | 88 | :returns: json result 89 | """ 90 | image_height = extra_data["image_height"] 91 | image_width = extra_data["image_width"] 92 | 93 | batch_outputs = results.as_numpy(cls.OUTPUT_NAMES[0]) 94 | logger.debug(f"Shape of outputs: {batch_outputs.shape}") 95 | coordinates = extract_coordinates( 96 | batch_outputs[0, ...], image_height, image_width 97 | ) 98 | logger.debug(f"Coordinates: {coordinates}") 99 | 100 | # Coordinates are normalized, so convert to real pixel values 101 | coordinates = [ 102 | (name, x * image_width, y * image_height) for (name, x, y) in coordinates 103 | ] 104 | 105 | # Convert to Proteus type for JSON response 106 | proteus_coords = [ 107 | Coordinate(name=name, x=x, y=y) for (name, x, y) in coordinates 108 | ] 109 | 110 | return proteus_coords 111 | 112 | 113 | class EfficientPoseI(EfficientPoseRT): 114 | 115 | DESCRIPTION = ( 116 | "EfficientPoseI implementation from https://github.com/daniegr/EfficientPose" 117 | ) 118 | MODEL_URL = "https://pieterblomme-models.s3.us-east-2.amazonaws.com/effpose/EfficientPoseI.onnx" 119 | 120 | CONFIG_PATH = f"{folder_path}/config_I.template" 121 | RESOLUTION = 256 122 | 123 | 124 | class EfficientPoseII(EfficientPoseRT): 125 | 126 | DESCRIPTION = ( 127 | "EfficientPoseII implementation from https://github.com/daniegr/EfficientPose" 128 | ) 129 | MODEL_URL = "https://pieterblomme-models.s3.us-east-2.amazonaws.com/effpose/EfficientPoseII.onnx" 130 | 131 | CONFIG_PATH = f"{folder_path}/config_II.template" 132 | RESOLUTION = 368 133 | 134 | 135 | class EfficientPoseIII(EfficientPoseRT): 136 | 137 | DESCRIPTION = ( 138 | "EfficientPoseIII implementation from https://github.com/daniegr/EfficientPose" 139 | ) 140 | MODEL_URL = "https://pieterblomme-models.s3.us-east-2.amazonaws.com/effpose/EfficientPoseIII.onnx" 141 | 142 | CONFIG_PATH = f"{folder_path}/config_III.template" 143 | RESOLUTION = 480 144 | 145 | 146 | class EfficientPoseIV(EfficientPoseRT): 147 | 148 | DESCRIPTION = ( 149 | "EfficientPoseIV implementation from https://github.com/daniegr/EfficientPose" 150 | ) 151 | MODEL_URL = "https://pieterblomme-models.s3.us-east-2.amazonaws.com/effpose/EfficientPoseIV.onnx" 152 | 153 | CONFIG_PATH = f"{folder_path}/config_IV.template" 154 | RESOLUTION = 600 155 | --------------------------------------------------------------------------------