├── .flake8 ├── .gitattributes ├── .github ├── ISSUE_TEMPLATE │ ├── auto_issue_template.md │ ├── bug_report.md │ ├── compute_issue_template.md │ ├── feature_request.md │ ├── iot_issue_template.md │ ├── mobile_issue_template.md │ └── request-new-model-on-qai-hub-models.md └── actions │ ├── configure-aws-profile │ ├── action.yml │ ├── cleanup.js │ └── index.js │ └── configure-multiple-aws-roles │ └── action.yml ├── .gitignore ├── .isort.cfg ├── .pre-commit-config.yaml ├── .pre-commit-license-header.txt ├── .pre-commit-line-ending-check.yaml ├── .shellcheckrc ├── LICENSE ├── README.md ├── mypy.ini ├── pyrightconfig.json ├── pytest.ini ├── qai_hub_models ├── __init__.py ├── _version.py ├── asset_bases.yaml ├── configs │ ├── __init__.py │ ├── _info_yaml_enums.py │ ├── _info_yaml_llm_details.py │ ├── code_gen_yaml.py │ ├── devices_and_chipsets_yaml.py │ ├── info_yaml.py │ ├── model_disable_reasons.py │ └── perf_yaml.py ├── conftest.py ├── datasets │ ├── __init__.py │ ├── ade20k.py │ ├── bsd300.py │ ├── carvana.py │ ├── cityscapes.py │ ├── cityscapes_lowres.py │ ├── coco.py │ ├── coco91class.py │ ├── coco_face.py │ ├── coco_face_480x640.py │ ├── coco_foot_track_dataset.py │ ├── coco_panoptic_seg.py │ ├── coco_seg.py │ ├── cocobody.py │ ├── cocobody_513x257.py │ ├── cocowholebody.py │ ├── common.py │ ├── configure_dataset.py │ ├── face_attrib_dataset.py │ ├── foot_track_dataset.py │ ├── human_faces.py │ ├── imagenet.py │ ├── imagenet_256.py │ ├── imagenet_colorization.py │ ├── imagenette.py │ ├── imagenette_256.py │ ├── imagenette_colorization.py │ ├── kinetics400.py │ ├── kinetics400_224.py │ ├── mmlu.py │ ├── mmmlu.py │ ├── mpii.py │ ├── nyuv2.py │ ├── nyuv2x518.py │ ├── pascal_voc.py │ ├── ppe.py │ ├── stable_diffusion_calib.py │ ├── tiny_mmlu.py │ ├── wikitext.py │ └── wikitext_ja.py ├── devices_and_chipsets.yaml ├── evaluators │ ├── __init__.py │ ├── ade_evaluator.py │ ├── base_evaluators.py │ ├── classification_evaluator.py │ ├── coco_foot_track_evaluator.py │ ├── colorization_evaluator.py │ ├── depth_evaluator.py │ ├── detection_evaluator.py │ ├── face_attrib_evaluator.py │ ├── face_detection_evaluator.py │ ├── face_landmark_evaluator.py │ ├── foot_track_evaluator.py │ ├── mmlu_evaluator.py │ ├── panoptic_segmentation_evaluator.py │ ├── pose_evaluator.py │ ├── ppe_evaluator.py │ ├── ppl_evaluator.py │ ├── segmentation_evaluator.py │ ├── superres_evaluator.py │ ├── utils │ │ └── pose.py │ ├── wholebody_pose_evaluator.py │ └── yolo_segmentation_evaluator.py ├── global_requirements.txt ├── labels │ ├── cityscapes_labels.txt │ ├── coco_labels.txt │ ├── coco_labels_91.txt │ ├── facemap_3dmm_labels.jpg │ ├── imagenet_labels.txt │ ├── kinetics400_labels.txt │ ├── ppe_labels.txt │ └── voc_labels.txt ├── models │ ├── __init__.py │ ├── _shared │ │ ├── __init__.py │ │ ├── body_detection │ │ │ ├── __init__.py │ │ │ ├── app.py │ │ │ ├── demo.py │ │ │ └── model.py │ │ ├── cityscapes_segmentation │ │ │ ├── __init__.py │ │ │ ├── app.py │ │ │ ├── demo.py │ │ │ ├── evaluator.py │ │ │ ├── model.py │ │ │ └── patches │ │ │ │ └── move_datasets.diff │ │ ├── common.py │ │ ├── deeplab │ │ │ ├── __init__.py │ │ │ ├── app.py │ │ │ ├── demo.py │ │ │ └── model.py │ │ ├── depth_estimation │ │ │ ├── __init__.py │ │ │ ├── app.py │ │ │ ├── demo.py │ │ │ └── model.py │ │ ├── detr │ │ │ ├── __init__.py │ │ │ ├── app.py │ │ │ ├── coco_label_map.py │ │ │ ├── demo.py │ │ │ └── model.py │ │ ├── face_attrib_net │ │ │ ├── __init__.py │ │ │ ├── app.py │ │ │ ├── demo.py │ │ │ └── model.py │ │ ├── face_detection │ │ │ ├── __init__.py │ │ │ ├── layers.py │ │ │ └── model.py │ │ ├── fastsam │ │ │ ├── __init__.py │ │ │ ├── app.py │ │ │ ├── demo.py │ │ │ └── model.py │ │ ├── ffnet │ │ │ ├── __init__.py │ │ │ ├── model.py │ │ │ └── test_utils.py │ │ ├── foot_track_net │ │ │ ├── __init__.py │ │ │ ├── app.py │ │ │ └── demo.py │ │ ├── hf_whisper │ │ │ ├── __init__.py │ │ │ ├── app.py │ │ │ ├── demo.py │ │ │ ├── model.py │ │ │ ├── model_adaptation.py │ │ │ └── test_utils.py │ │ ├── imagenet_classifier │ │ │ ├── __init__.py │ │ │ ├── app.py │ │ │ ├── demo.py │ │ │ ├── model.py │ │ │ └── test_utils.py │ │ ├── llama │ │ │ ├── __init__.py │ │ │ ├── app.py │ │ │ ├── demo.py │ │ │ └── model.py │ │ ├── llama3 │ │ │ ├── __init__.py │ │ │ ├── app.py │ │ │ ├── demo.py │ │ │ ├── export.py │ │ │ ├── model.py │ │ │ ├── model_adaptations.py │ │ │ └── split_onnx_utils │ │ │ │ ├── __init__.py │ │ │ │ ├── split_onnx.py │ │ │ │ └── utils.py │ │ ├── llama3_ao │ │ │ ├── __init__.py │ │ │ ├── _utils.py │ │ │ ├── app.py │ │ │ ├── demo.py │ │ │ ├── evaluate.py │ │ │ ├── model.py │ │ │ └── quantize.py │ │ ├── llm │ │ │ ├── __init__.py │ │ │ ├── _utils.py │ │ │ └── model.py │ │ ├── mediapipe │ │ │ ├── __init__.py │ │ │ ├── app.py │ │ │ └── utils.py │ │ ├── quicksrnet │ │ │ ├── __init__.py │ │ │ └── common.py │ │ ├── repaint │ │ │ ├── __init__.py │ │ │ ├── app.py │ │ │ ├── demo.py │ │ │ ├── model.py │ │ │ └── utils.py │ │ ├── segmentation │ │ │ ├── __init__.py │ │ │ ├── app.py │ │ │ └── demo.py │ │ ├── sesr │ │ │ ├── __init__.py │ │ │ └── common.py │ │ ├── stable_diffusion │ │ │ ├── __init__.py │ │ │ ├── app.py │ │ │ ├── demo.py │ │ │ ├── model.py │ │ │ ├── model_adaptation.py │ │ │ ├── quantize.py │ │ │ ├── test_utils.py │ │ │ └── utils.py │ │ ├── super_resolution │ │ │ ├── __init__.py │ │ │ ├── app.py │ │ │ ├── demo.py │ │ │ └── model.py │ │ ├── swin │ │ │ ├── __init__.py │ │ │ └── swin_transformer.py │ │ ├── video_classifier │ │ │ ├── __init__.py │ │ │ ├── app.py │ │ │ ├── demo.py │ │ │ ├── model.py │ │ │ └── utils.py │ │ ├── whisper │ │ │ ├── __init__.py │ │ │ ├── app.py │ │ │ ├── demo.py │ │ │ ├── model.py │ │ │ └── test_utils.py │ │ └── yolo │ │ │ ├── __init__.py │ │ │ ├── app.py │ │ │ ├── demo.py │ │ │ ├── model.py │ │ │ └── utils.py │ ├── allam_7b │ │ ├── README.md │ │ ├── info.yaml │ │ └── perf.yaml │ ├── aotgan │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── patches │ │ │ └── layer_norm.diff │ │ ├── perf.yaml │ │ └── test.py │ ├── baichuan2_7b │ │ ├── README.md │ │ ├── __init__.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ └── test.py │ ├── beit │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── bgnet │ │ ├── README.md │ │ ├── __init__.py │ │ ├── app.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ └── test.py │ ├── bisenet │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ └── test.py │ ├── common.py │ ├── conditional_detr_resnet50 │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── controlnet │ │ ├── README.md │ │ ├── __init__.py │ │ ├── app.py │ │ ├── demo.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── convnext_base │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ └── test.py │ ├── convnext_tiny │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ └── test.py │ ├── ddrnet23_slim │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ └── test.py │ ├── deepbox │ │ ├── README.md │ │ ├── __init__.py │ │ ├── app.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── deeplabv3_plus_mobilenet │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ └── test.py │ ├── deeplabv3_resnet50 │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ └── test.py │ ├── densenet121 │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ └── test.py │ ├── depth_anything │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── depth_anything_v2 │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── detr_resnet101 │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── detr_resnet101_dc5 │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── detr_resnet50 │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── detr_resnet50_dc5 │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── dla102x │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── easyocr │ │ ├── README.md │ │ ├── __init__.py │ │ ├── app.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── efficientnet_b0 │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ └── test.py │ ├── efficientnet_b4 │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ └── test.py │ ├── efficientnet_v2_s │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ └── test.py │ ├── efficientvit_b2_cls │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── efficientvit_l2_cls │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── efficientvit_l2_seg │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── esrgan │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ └── test.py │ ├── face_attrib_net │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ └── test.py │ ├── face_det_lite │ │ ├── README.md │ │ ├── __init__.py │ │ ├── app.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ ├── test.py │ │ └── utils.py │ ├── facemap_3dmm │ │ ├── README.md │ │ ├── __init__.py │ │ ├── app.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ ├── resnet_score_rgb.py │ │ ├── test.py │ │ └── utils.py │ ├── fastsam_s │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── fastsam_x │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── fcn_resnet50 │ │ ├── README.md │ │ ├── __init__.py │ │ ├── app.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ └── test.py │ ├── ffnet_122ns_lowres │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── ffnet_40s │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── ffnet_54s │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── ffnet_78s │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── ffnet_78s_lowres │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── fomm │ │ ├── README.md │ │ ├── __init__.py │ │ ├── app.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── foot_track_net │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── foot_track_net.py │ │ ├── info.yaml │ │ ├── layers.py │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── gear_guard_net │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ └── test.py │ ├── googlenet │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ └── test.py │ ├── hrnet_pose │ │ ├── README.md │ │ ├── __init__.py │ │ ├── app.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── hrnet_w48_ocr │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── huggingface_wavlm_base_plus │ │ ├── README.md │ │ ├── __init__.py │ │ ├── app.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── ibm_granite_v3_1_8b_instruct │ │ ├── README.md │ │ ├── info.yaml │ │ └── perf.yaml │ ├── inception_v3 │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ └── test.py │ ├── indus_1b │ │ ├── README.md │ │ ├── info.yaml │ │ └── perf.yaml │ ├── jais_6p7b_chat │ │ ├── README.md │ │ ├── info.yaml │ │ └── perf.yaml │ ├── lama_dilated │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── levit │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── litehrnet │ │ ├── README.md │ │ ├── __init__.py │ │ ├── app.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── llama_v2_7b_chat │ │ ├── README.md │ │ ├── __init__.py │ │ ├── demo.py │ │ ├── export.py │ │ ├── gen_ondevice_llama │ │ │ └── README.md │ │ ├── info.yaml │ │ ├── model.py │ │ ├── modeling_llama.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── llama_v3_1_8b_instruct │ │ ├── README.md │ │ ├── __init__.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── quantize.py │ │ ├── requirements.txt │ │ └── test.py │ ├── llama_v3_2_3b_instruct │ │ ├── README.md │ │ ├── __init__.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── quantize.py │ │ ├── requirements.txt │ │ └── test.py │ ├── llama_v3_8b_instruct │ │ ├── README.md │ │ ├── __init__.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── quantize.py │ │ ├── requirements.txt │ │ └── test.py │ ├── llama_v3_taide_8b_chat │ │ ├── README.md │ │ ├── __init__.py │ │ ├── demo.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── mask2former │ │ ├── README.md │ │ ├── __init__.py │ │ ├── app.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── patches │ │ │ └── optimize.diff │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── mediapipe_face │ │ ├── README.md │ │ ├── __init__.py │ │ ├── app.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ └── test.py │ ├── mediapipe_hand │ │ ├── README.md │ │ ├── __init__.py │ │ ├── app.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ └── test.py │ ├── mediapipe_pose │ │ ├── README.md │ │ ├── __init__.py │ │ ├── app.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ └── test.py │ ├── mediapipe_selfie │ │ ├── README.md │ │ ├── __init__.py │ │ ├── app.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ ├── test.py │ │ └── utils.py │ ├── midas │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── ministral_3b │ │ ├── README.md │ │ ├── info.yaml │ │ └── perf.yaml │ ├── mistral_3b │ │ ├── README.md │ │ ├── info.yaml │ │ └── perf.yaml │ ├── mistral_7b_instruct_v0_3 │ │ ├── README.md │ │ ├── __init__.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ └── perf.yaml │ ├── mnasnet05 │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ └── test.py │ ├── mobile_vit │ │ ├── README.md │ │ ├── __init__.py │ │ ├── app.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── mobilenet_v2 │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ └── test.py │ ├── mobilenet_v3_large │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ └── test.py │ ├── mobilenet_v3_small │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ └── test.py │ ├── mobilesam │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── movenet │ │ ├── README.md │ │ ├── __init__.py │ │ ├── app.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ └── test.py │ ├── nasnet │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── nomic_embed_text │ │ ├── README.md │ │ ├── __init__.py │ │ ├── app.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── openai_clip │ │ ├── README.md │ │ ├── __init__.py │ │ ├── app.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── phi_3_5_mini_instruct │ │ ├── README.md │ │ ├── info.yaml │ │ └── perf.yaml │ ├── pidnet │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ └── test.py │ ├── plamo_1b │ │ ├── README.md │ │ ├── info.yaml │ │ └── perf.yaml │ ├── posenet_mobilenet │ │ ├── README.md │ │ ├── __init__.py │ │ ├── app.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── protocols.py │ ├── quicksrnetlarge │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ └── test.py │ ├── quicksrnetmedium │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ └── test.py │ ├── quicksrnetsmall │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ └── test.py │ ├── qwen2_7b_instruct │ │ ├── README.md │ │ ├── __init__.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ └── perf.yaml │ ├── real_esrgan_general_x4v3 │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── real_esrgan_x4plus │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── regnet │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ └── test.py │ ├── resnet101 │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ └── test.py │ ├── resnet18 │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ └── test.py │ ├── resnet50 │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ └── test.py │ ├── resnet_2plus1d │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── resnet_3d │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── resnet_mixed │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── resnext101 │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ └── test.py │ ├── resnext50 │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ └── test.py │ ├── riffusion │ │ ├── README.md │ │ ├── __init__.py │ │ ├── demo.py │ │ ├── demo_base.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── rtmdet │ │ ├── README.md │ │ ├── __init__.py │ │ ├── app.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── rtmpose_body2d │ │ ├── README.md │ │ ├── __init__.py │ │ ├── app.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── segformer_base │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirement.txt │ │ └── test.py │ ├── sesr_m5 │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ └── test.py │ ├── shufflenet_v2 │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ └── test.py │ ├── sinet │ │ ├── README.md │ │ ├── __init__.py │ │ ├── app.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ └── test.py │ ├── squeezenet1_1 │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ └── test.py │ ├── stable_diffusion_v1_5 │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── quantize.py │ │ └── requirements.txt │ ├── stable_diffusion_v2_1 │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── quantize.py │ │ └── requirements.txt │ ├── swin_base │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ └── test.py │ ├── swin_small │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ └── test.py │ ├── swin_tiny │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ └── test.py │ ├── trocr │ │ ├── README.md │ │ ├── __init__.py │ │ ├── app.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── unet_segmentation │ │ ├── README.md │ │ ├── __init__.py │ │ ├── app.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ └── test.py │ ├── video_mae │ │ ├── README.md │ │ ├── __init__.py │ │ ├── app.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── vit │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ └── test.py │ ├── whisper_base_en │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── whisper_small_en │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── whisper_small_v2 │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── whisper_tiny_en │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── wideresnet50 │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ └── test.py │ ├── xlsr │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ └── test.py │ ├── yamnet │ │ ├── README.md │ │ ├── __init__.py │ │ ├── app.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── yolov10_det │ │ ├── README.md │ │ ├── __init__.py │ │ ├── app.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── yolov11_det │ │ ├── README.md │ │ ├── __init__.py │ │ ├── app.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── yolov11_seg │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── yolov3 │ │ ├── README.md │ │ ├── __init__.py │ │ ├── app.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ └── requirements.txt │ ├── yolov5 │ │ ├── README.md │ │ ├── __init__.py │ │ ├── app.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── patches │ │ │ └── 5d_to_4d.diff │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── yolov6 │ │ ├── README.md │ │ ├── __init__.py │ │ ├── app.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── yolov7 │ │ ├── README.md │ │ ├── __init__.py │ │ ├── app.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── yolov8_det │ │ ├── README.md │ │ ├── __init__.py │ │ ├── app.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ ├── yolov8_seg │ │ ├── README.md │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py │ └── yolox │ │ ├── README.md │ │ ├── __init__.py │ │ ├── app.py │ │ ├── conftest.py │ │ ├── demo.py │ │ ├── evaluate.py │ │ ├── export.py │ │ ├── info.yaml │ │ ├── model.py │ │ ├── perf.yaml │ │ ├── requirements.txt │ │ └── test.py ├── requirements-dev.txt ├── requirements.txt ├── scorecard │ ├── __init__.py │ ├── device.py │ ├── execution_helpers.py │ ├── path_compile.py │ ├── path_profile.py │ ├── results │ │ ├── __init__.py │ │ ├── chipset_helpers.py │ │ ├── code_gen.py │ │ ├── performance_diff.py │ │ ├── performance_summary.py │ │ ├── scorecard_job.py │ │ ├── spreadsheet.py │ │ ├── test │ │ │ ├── __init__.py │ │ │ └── test_performance_diff.py │ │ └── yaml.py │ └── test │ │ └── test_parameterization.py ├── test │ ├── __init__.py │ ├── test_async_compile_jobs.py │ ├── test_models │ │ ├── __init__.py │ │ └── test_common.py │ └── test_utils │ │ ├── __init__.py │ │ ├── perf.yaml │ │ ├── test_base_model.py │ │ ├── test_dataset_util.py │ │ ├── test_evaluate.py │ │ ├── test_info_specs.py │ │ ├── test_model_cache.py │ │ ├── test_printing.py │ │ ├── test_qai_hub_helpers.py │ │ └── test_version.py └── utils │ ├── __init__.py │ ├── aimet │ ├── __init__.py │ ├── aimet_dummy_model.py │ ├── config_loader.py │ ├── default_config.json │ ├── default_config_legacy_v1.json │ ├── default_config_legacy_v2.json │ ├── default_config_legacy_v3.json │ ├── default_config_llama.json │ ├── default_config_per_channel_qnn.json │ ├── default_per_tensor_config.json │ ├── default_per_tensor_config_v69.json │ ├── default_per_tensor_config_v73.json │ ├── encodings.py │ └── repo.py │ ├── args.py │ ├── asset_loaders.py │ ├── base_config.py │ ├── base_model.py │ ├── bounding_box_processing.py │ ├── camera_capture.py │ ├── checkpoint.py │ ├── collection_model_helpers.py │ ├── compare.py │ ├── dataset_util.py │ ├── default_export_device.py │ ├── display.py │ ├── draw.py │ ├── evaluate.py │ ├── file_hash.py │ ├── huggingface.py │ ├── image_processing.py │ ├── inference.py │ ├── input_spec.py │ ├── llm_helpers.py │ ├── measurement.py │ ├── model_adapters.py │ ├── model_cache.py │ ├── onnx_helpers.py │ ├── onnx_torch_wrapper.py │ ├── path_helpers.py │ ├── printing.py │ ├── qai_hub_helpers.py │ ├── qnn_helpers.py │ ├── quantization.py │ ├── quantization_aimet_onnx.py │ ├── readme_util.py │ ├── system_info.py │ ├── test_compare.py │ ├── testing.py │ ├── testing_async_utils.py │ ├── testing_export_eval.py │ └── transpose_channel.py ├── scripts ├── build_and_test.py ├── ci │ ├── gh_askpass.sh │ ├── git-credential-helper.sh │ ├── install-aws-cli.sh │ ├── install-platform-deps.sh │ └── set-aihub-credential-vars.sh ├── examples │ ├── conftest.py │ ├── profile_piqaro_llama_v3_2_3b.py │ ├── profile_piqaro_stable_diffusion_v1_5.py │ └── quantize_llama_v3_2_3b_with_japanese_data.py ├── tasks │ ├── aws.py │ ├── changes.py │ ├── constants.py │ ├── github.py │ ├── plan.py │ ├── release.py │ ├── task.py │ ├── test.py │ ├── util.py │ └── venv.py └── util │ ├── common.sh │ ├── env_create.sh │ ├── env_sync.sh │ ├── extract_info_from_context_binary.py │ ├── github.sh │ ├── run_mypy.sh │ └── write_changed_files.py └── setup.py /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | ignore = E501,W503,E203,E266,E265,F811 3 | # E501 line too long 4 | # W503 clang-formatter can produce line break before binary operator 5 | # E203 False positive "whitespaces before :" especially when slicing list, arrays etc. 6 | # E266 Too many leading '#' for block comment 7 | # E265 Block comment should start with '# ' 8 | # F811 A module has been imported twice, but seen when importing functions for fixtures in pytest files. 9 | exclude = **/*_pb2.py 10 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quic/ai-hub-models/37938eb779babea7dc4a6e4a96a0685822bf66ba/.gitattributes -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for QAI-Hub-Models 4 | title: "[Feature Request] New feature request" 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem. 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/request-new-model-on-qai-hub-models.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Request new model on QAI-Hub-Models 3 | about: Request new model to be added on AI-Models 4 | title: "[MODEL REQUEST] requesting new model" 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem. 12 | 13 | **Details of model being requested** 14 | - Model name: 15 | - Source repo link: 16 | - Research paper link [If applicable]: 17 | - Model use case: 18 | 19 | **Additional context for requested model** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.github/actions/configure-aws-profile/action.yml: -------------------------------------------------------------------------------- 1 | name: Configure AWS profile 2 | 3 | description: Modifies the AWS credentials file for the supplied profile 4 | 5 | inputs: 6 | profile: 7 | description: Name of the profile 8 | required: true 9 | aws-region: 10 | description: AWS Region, e.g. us-east-2 11 | required: true 12 | aws-access-key-id: 13 | description: AWS Access Key ID 14 | required: true 15 | aws-secret-access-key: 16 | description: AWS Secret Access Key 17 | required: true 18 | aws-session-token: 19 | description: AWS Session Token. 20 | required: true 21 | 22 | runs: 23 | using: 'node20' 24 | main: 'index.js' 25 | post: 'cleanup.js' 26 | -------------------------------------------------------------------------------- /.isort.cfg: -------------------------------------------------------------------------------- 1 | [settings] 2 | profile=black 3 | known_first_party=qai_hub_models 4 | -------------------------------------------------------------------------------- /.pre-commit-license-header.txt: -------------------------------------------------------------------------------- 1 | --------------------------------------------------------------------- 2 | Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | SPDX-License-Identifier: BSD-3-Clause 4 | --------------------------------------------------------------------- 5 | -------------------------------------------------------------------------------- /.pre-commit-line-ending-check.yaml: -------------------------------------------------------------------------------- 1 | exclude: | 2 | (?x)( 3 | /build/ 4 | ) 5 | 6 | repos: 7 | - repo: https://github.com/pre-commit/pre-commit-hooks 8 | rev: v5.0.0 9 | hooks: 10 | - id: mixed-line-ending 11 | args: ["--fix", "lf"] 12 | -------------------------------------------------------------------------------- /pyrightconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "exclude": [ 3 | "**/.mypy_cache", 4 | "**/.pytest_cache", 5 | "**/__pycache__", 6 | "**/node_modules", 7 | "build/tungsten", 8 | "src/public/staging_python/qai_hub_staging", 9 | "src/tungsten", 10 | "src/www/onnx-optimizer/third_party", 11 | "src/www/onnx-simplifier/third_party", 12 | ], 13 | 14 | "extraPaths": [ 15 | "./build/proto" 16 | ], 17 | 18 | "reportIncompatibleMethodOverride": "none", 19 | "reportMissingModuleSource": "none", 20 | "reportMissingImports": true, 21 | "reportMissingTypeStubs": false, 22 | "reportShadowedImports": false, 23 | "verboseOutput": false, 24 | 25 | "venvPath": ".", 26 | "venv": "qaihm-dev" 27 | } 28 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | testpaths = qai_hub_models 3 | norecursedirs = build 4 | python_files = tests.py test_*.py test.py 5 | filterwarnings = 6 | ignore::DeprecationWarning:torch.*: 7 | ignore::DeprecationWarning:torchvision.*: 8 | markers = 9 | slow: marks tests as slow 10 | slow_cloud: marks test as slow and cloud-dependent 11 | compile: marks tests that run compile jobs 12 | profile: marks tests that run profile jobs 13 | inference: marks tests that run inference jobs 14 | export: marks tests that run full export scripts 15 | unmarked: default marker added to any job with no markers 16 | -------------------------------------------------------------------------------- /qai_hub_models/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | -------------------------------------------------------------------------------- /qai_hub_models/_version.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | __version__ = "0.30.2" 6 | -------------------------------------------------------------------------------- /qai_hub_models/configs/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | -------------------------------------------------------------------------------- /qai_hub_models/datasets/cityscapes_lowres.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from __future__ import annotations 6 | 7 | from qai_hub_models.datasets.cityscapes import CityscapesDataset 8 | from qai_hub_models.datasets.common import DatasetSplit 9 | 10 | 11 | class CityscapesLowResDataset(CityscapesDataset): 12 | def __init__( 13 | self, 14 | split: DatasetSplit = DatasetSplit.TRAIN, 15 | input_images_zip: str | None = None, 16 | input_gt_zip: str | None = None, 17 | ): 18 | return super().__init__(split, input_images_zip, input_gt_zip, make_lowres=True) 19 | -------------------------------------------------------------------------------- /qai_hub_models/evaluators/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | -------------------------------------------------------------------------------- /qai_hub_models/labels/cityscapes_labels.txt: -------------------------------------------------------------------------------- 1 | road 2 | sidewalk 3 | building 4 | wall 5 | fence 6 | pole 7 | traffic light 8 | traffic sign 9 | vegetation 10 | terrain 11 | sky 12 | person 13 | rider 14 | car 15 | truck 16 | bus 17 | train 18 | motorcycle 19 | bicycle 20 | -------------------------------------------------------------------------------- /qai_hub_models/labels/facemap_3dmm_labels.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quic/ai-hub-models/37938eb779babea7dc4a6e4a96a0685822bf66ba/qai_hub_models/labels/facemap_3dmm_labels.jpg -------------------------------------------------------------------------------- /qai_hub_models/labels/ppe_labels.txt: -------------------------------------------------------------------------------- 1 | helmet 2 | vest 3 | -------------------------------------------------------------------------------- /qai_hub_models/labels/voc_labels.txt: -------------------------------------------------------------------------------- 1 | BACKGROUND 2 | aeroplane 3 | bicycle 4 | bird 5 | boat 6 | bottle 7 | bus 8 | car 9 | cat 10 | chair 11 | cow 12 | diningtable 13 | dog 14 | horse 15 | motorbike 16 | person 17 | pottedplant 18 | sheep 19 | sofa 20 | train 21 | tvmonitor 22 | -------------------------------------------------------------------------------- /qai_hub_models/models/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | -------------------------------------------------------------------------------- /qai_hub_models/models/_shared/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | -------------------------------------------------------------------------------- /qai_hub_models/models/_shared/body_detection/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | -------------------------------------------------------------------------------- /qai_hub_models/models/_shared/cityscapes_segmentation/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | -------------------------------------------------------------------------------- /qai_hub_models/models/_shared/deeplab/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | -------------------------------------------------------------------------------- /qai_hub_models/models/_shared/depth_estimation/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | -------------------------------------------------------------------------------- /qai_hub_models/models/_shared/detr/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | -------------------------------------------------------------------------------- /qai_hub_models/models/_shared/face_attrib_net/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | -------------------------------------------------------------------------------- /qai_hub_models/models/_shared/face_detection/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | -------------------------------------------------------------------------------- /qai_hub_models/models/_shared/fastsam/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | -------------------------------------------------------------------------------- /qai_hub_models/models/_shared/ffnet/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | -------------------------------------------------------------------------------- /qai_hub_models/models/_shared/foot_track_net/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | -------------------------------------------------------------------------------- /qai_hub_models/models/_shared/hf_whisper/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | -------------------------------------------------------------------------------- /qai_hub_models/models/_shared/imagenet_classifier/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | -------------------------------------------------------------------------------- /qai_hub_models/models/_shared/llama/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | -------------------------------------------------------------------------------- /qai_hub_models/models/_shared/llama3/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | -------------------------------------------------------------------------------- /qai_hub_models/models/_shared/llama3/split_onnx_utils/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | -------------------------------------------------------------------------------- /qai_hub_models/models/_shared/llama3_ao/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | -------------------------------------------------------------------------------- /qai_hub_models/models/_shared/llm/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | -------------------------------------------------------------------------------- /qai_hub_models/models/_shared/mediapipe/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | -------------------------------------------------------------------------------- /qai_hub_models/models/_shared/quicksrnet/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | -------------------------------------------------------------------------------- /qai_hub_models/models/_shared/repaint/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | -------------------------------------------------------------------------------- /qai_hub_models/models/_shared/segmentation/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | -------------------------------------------------------------------------------- /qai_hub_models/models/_shared/sesr/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | -------------------------------------------------------------------------------- /qai_hub_models/models/_shared/stable_diffusion/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | -------------------------------------------------------------------------------- /qai_hub_models/models/_shared/super_resolution/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | -------------------------------------------------------------------------------- /qai_hub_models/models/_shared/swin/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | -------------------------------------------------------------------------------- /qai_hub_models/models/_shared/video_classifier/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | -------------------------------------------------------------------------------- /qai_hub_models/models/_shared/whisper/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | -------------------------------------------------------------------------------- /qai_hub_models/models/_shared/yolo/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | -------------------------------------------------------------------------------- /qai_hub_models/models/allam_7b/perf.yaml: -------------------------------------------------------------------------------- 1 | supported_devices: 2 | - Snapdragon X Elite CRD 3 | supported_chipsets: 4 | - qualcomm-snapdragon-x-elite 5 | precisions: 6 | w4a16: 7 | components: 8 | ALLaM-7B-Quantized: 9 | performance_metrics: 10 | Snapdragon X Elite CRD: 11 | qnn: 12 | time_to_first_token_range_milliseconds: 13 | min: 238.545 14 | max: 1399.168 15 | tokens_per_second: 9.5 16 | -------------------------------------------------------------------------------- /qai_hub_models/models/aotgan/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.repaint.app import ( # noqa: F401 6 | RepaintMaskApp as App, 7 | ) 8 | 9 | from .model import AOTGAN as Model # noqa: F401 10 | from .model import MODEL_ID # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/aotgan/patches/layer_norm.diff: -------------------------------------------------------------------------------- 1 | diff --git a/src/model/aotgan.py b/src/model/aotgan.py 2 | index 518b76c..75d96c3 100644 3 | --- a/src/model/aotgan.py 4 | +++ b/src/model/aotgan.py 5 | @@ -80,7 +80,8 @@ class AOTBlock(nn.Module): 6 | 7 | def my_layer_norm(feat): 8 | mean = feat.mean((2, 3), keepdim=True) 9 | - std = feat.std((2, 3), keepdim=True) + 1e-9 10 | + num_samples = feat.shape[2] * feat.shape[3] 11 | + std = torch.sqrt(torch.sum((feat - mean) ** 2 / (num_samples - 1), (2, 3), keepdim=True)) + 1e-9 12 | feat = 2 * (feat - mean) / std - 1 13 | feat = 5 * feat 14 | return feat 15 | -------------------------------------------------------------------------------- /qai_hub_models/models/baichuan2_7b/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models.baichuan2_7b.model import MODEL_ID # noqa: F401 6 | from qai_hub_models.models.baichuan2_7b.model import ( # noqa: F401 7 | Baichuan2_7B_Quantized as Model, 8 | ) 9 | -------------------------------------------------------------------------------- /qai_hub_models/models/baichuan2_7b/perf.yaml: -------------------------------------------------------------------------------- 1 | supported_devices: 2 | - Snapdragon 8 Elite QRD 3 | supported_chipsets: 4 | - qualcomm-snapdragon-8-elite 5 | precisions: 6 | w4a16: 7 | components: 8 | Baichuan2-7B: 9 | performance_metrics: 10 | Snapdragon 8 Elite QRD: 11 | qnn: 12 | time_to_first_token_range_milliseconds: 13 | min: 208.048 14 | max: 6657.536 15 | tokens_per_second: 7.72 16 | -------------------------------------------------------------------------------- /qai_hub_models/models/baichuan2_7b/test.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | import pytest 6 | 7 | from qai_hub_models.models.baichuan2_7b import Model 8 | 9 | 10 | @pytest.mark.skip( 11 | "https://github.com/qcom-ai-hub/tetracode/issues/13710 decide how to test pre-compiled LLMs." 12 | ) 13 | @pytest.mark.slow_cloud 14 | def test_load_model() -> None: 15 | Model.from_precompiled() 16 | -------------------------------------------------------------------------------- /qai_hub_models/models/beit/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 6 | ImagenetClassifierApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import Beit as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/beit/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo 6 | from qai_hub_models.models.beit.model import MODEL_ID, Beit 7 | 8 | 9 | def main(is_test: bool = False): 10 | imagenet_demo(Beit, MODEL_ID, is_test) 11 | 12 | 13 | if __name__ == "__main__": 14 | main() 15 | -------------------------------------------------------------------------------- /qai_hub_models/models/beit/requirements.txt: -------------------------------------------------------------------------------- 1 | transformers==4.51.3 2 | -------------------------------------------------------------------------------- /qai_hub_models/models/bgnet/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from .app import BGNetApp as App # noqa: F401 6 | from .model import MODEL_ID # noqa: F401 7 | from .model import BGNet as Model # noqa: F401 8 | -------------------------------------------------------------------------------- /qai_hub_models/models/bisenet/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.segmentation.app import ( # noqa: F401 6 | SegmentationApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import BiseNet as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/bisenet/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.segmentation.demo import segmentation_demo 6 | from qai_hub_models.models.bisenet.model import INPUT_IMAGE_ADDRESS, MODEL_ID, BiseNet 7 | 8 | 9 | def main(is_test: bool = False): 10 | segmentation_demo(BiseNet, MODEL_ID, INPUT_IMAGE_ADDRESS, is_test) 11 | 12 | 13 | if __name__ == "__main__": 14 | main() 15 | -------------------------------------------------------------------------------- /qai_hub_models/models/conditional_detr_resnet50/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.detr.app import DETRApp as App # noqa: F401 6 | from qai_hub_models.models.conditional_detr_resnet50.model import ( # noqa: F401 7 | ConditionalDETRResNet50 as Model, 8 | ) 9 | 10 | from .model import MODEL_ID # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/conditional_detr_resnet50/requirements.txt: -------------------------------------------------------------------------------- 1 | transformers==4.51.3 2 | -------------------------------------------------------------------------------- /qai_hub_models/models/controlnet/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models.controlnet.app import ControlNetApp as app # noqa: F401 6 | from qai_hub_models.models.controlnet.model import MODEL_ID # noqa: F401 7 | from qai_hub_models.models.controlnet.model import ControlNet as Model # noqa: F401 8 | -------------------------------------------------------------------------------- /qai_hub_models/models/controlnet/requirements.txt: -------------------------------------------------------------------------------- 1 | transformers==4.51.3 2 | diffusers[torch]==0.31.0 3 | -------------------------------------------------------------------------------- /qai_hub_models/models/convnext_base/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 6 | ImagenetClassifierApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import ConvNextBase as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/convnext_base/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo 6 | from qai_hub_models.models.convnext_base.model import MODEL_ID, ConvNextBase 7 | 8 | 9 | def main(is_test: bool = False): 10 | imagenet_demo(ConvNextBase, MODEL_ID, is_test) 11 | 12 | 13 | if __name__ == "__main__": 14 | main() 15 | -------------------------------------------------------------------------------- /qai_hub_models/models/convnext_base/model.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from __future__ import annotations 6 | 7 | import torchvision.models as tv_models 8 | 9 | from qai_hub_models.models._shared.imagenet_classifier.model import ImagenetClassifier 10 | 11 | MODEL_ID = __name__.split(".")[-2] 12 | DEFAULT_WEIGHTS = "IMAGENET1K_V1" 13 | 14 | 15 | class ConvNextBase(ImagenetClassifier): 16 | @classmethod 17 | def from_pretrained(cls, weights: str = DEFAULT_WEIGHTS) -> ConvNextBase: 18 | net = tv_models.convnext_base(weights=weights) 19 | return cls(net) 20 | -------------------------------------------------------------------------------- /qai_hub_models/models/convnext_tiny/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 6 | ImagenetClassifierApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import ConvNextTiny as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/convnext_tiny/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo 6 | from qai_hub_models.models.convnext_tiny.model import MODEL_ID, ConvNextTiny 7 | 8 | 9 | def main(is_test: bool = False): 10 | imagenet_demo(ConvNextTiny, MODEL_ID, is_test) 11 | 12 | 13 | if __name__ == "__main__": 14 | main() 15 | -------------------------------------------------------------------------------- /qai_hub_models/models/convnext_tiny/model.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from __future__ import annotations 6 | 7 | import torchvision.models as tv_models 8 | 9 | from qai_hub_models.models._shared.imagenet_classifier.model import ImagenetClassifier 10 | 11 | MODEL_ID = __name__.split(".")[-2] 12 | DEFAULT_WEIGHTS = "IMAGENET1K_V1" 13 | 14 | 15 | class ConvNextTiny(ImagenetClassifier): 16 | @classmethod 17 | def from_pretrained(cls, weights: str = DEFAULT_WEIGHTS) -> ConvNextTiny: 18 | net = tv_models.convnext_tiny(weights=weights) 19 | return cls(net) 20 | -------------------------------------------------------------------------------- /qai_hub_models/models/ddrnet23_slim/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.segmentation.app import ( # noqa: F401 6 | SegmentationApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import DDRNet as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/ddrnet23_slim/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.segmentation.demo import segmentation_demo 6 | from qai_hub_models.models.ddrnet23_slim.model import ( 7 | INPUT_IMAGE_ADDRESS, 8 | MODEL_ID, 9 | DDRNet, 10 | ) 11 | 12 | 13 | def main(is_test: bool = False): 14 | segmentation_demo(DDRNet, MODEL_ID, INPUT_IMAGE_ADDRESS, is_test) 15 | 16 | 17 | if __name__ == "__main__": 18 | main() 19 | -------------------------------------------------------------------------------- /qai_hub_models/models/deepbox/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from .app import DeepBoxApp as App # noqa: F401 6 | from .model import MODEL_ID # noqa: F401 7 | from .model import DeepBox as Model # noqa: F401 8 | -------------------------------------------------------------------------------- /qai_hub_models/models/deepbox/requirements.txt: -------------------------------------------------------------------------------- 1 | object-detection-metrics==0.4.post1 2 | shapely==2.0.3 3 | matplotlib==3.7.5 # Ultralytics does not specify a matplotlib verion, and the latest matplotlib breaks it. So we have to include the requirement manually 4 | ultralytics==8.0.193 5 | -------------------------------------------------------------------------------- /qai_hub_models/models/deeplabv3_plus_mobilenet/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.deeplab.app import DeepLabV3App as App # noqa: F401 6 | 7 | from .model import MODEL_ID # noqa: F401 8 | from .model import DeepLabV3PlusMobilenet as Model # noqa: F401 9 | -------------------------------------------------------------------------------- /qai_hub_models/models/deeplabv3_resnet50/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.deeplab.app import DeepLabV3App as App # noqa: F401 6 | 7 | from .model import MODEL_ID # noqa: F401 8 | from .model import DeepLabV3_ResNet50 as Model # noqa: F401 9 | -------------------------------------------------------------------------------- /qai_hub_models/models/densenet121/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 6 | ImagenetClassifierApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import DenseNet as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/densenet121/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo 6 | from qai_hub_models.models.densenet121.model import MODEL_ID, DenseNet 7 | 8 | 9 | def main(is_test: bool = False): 10 | imagenet_demo(DenseNet, MODEL_ID, is_test) 11 | 12 | 13 | if __name__ == "__main__": 14 | main() 15 | -------------------------------------------------------------------------------- /qai_hub_models/models/densenet121/model.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from __future__ import annotations 6 | 7 | import torchvision.models as tv_models 8 | 9 | from qai_hub_models.models._shared.imagenet_classifier.model import ImagenetClassifier 10 | 11 | MODEL_ID = __name__.split(".")[-2] 12 | DEFAULT_WEIGHTS = "IMAGENET1K_V1" 13 | 14 | 15 | class DenseNet(ImagenetClassifier): 16 | @classmethod 17 | def from_pretrained(cls, weights: str = DEFAULT_WEIGHTS) -> DenseNet: 18 | net = tv_models.densenet121(weights=weights) 19 | return cls(net) 20 | -------------------------------------------------------------------------------- /qai_hub_models/models/depth_anything/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.depth_estimation.app import ( # noqa: F401 6 | DepthEstimationApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import DepthAnything as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/depth_anything/requirements.txt: -------------------------------------------------------------------------------- 1 | matplotlib==3.7.5 2 | transformers==4.51.3 3 | -------------------------------------------------------------------------------- /qai_hub_models/models/depth_anything_v2/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.depth_estimation.app import ( # noqa: F401 6 | DepthEstimationApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import DepthAnythingV2 as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/depth_anything_v2/requirements.txt: -------------------------------------------------------------------------------- 1 | matplotlib==3.7.5 2 | transformers==4.51.3 3 | -------------------------------------------------------------------------------- /qai_hub_models/models/detr_resnet101/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.detr.app import DETRApp as App # noqa: F401 6 | from qai_hub_models.models.detr_resnet101.model import ( # noqa: F401 7 | DETRResNet101 as Model, 8 | ) 9 | 10 | from .model import MODEL_ID # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/detr_resnet101/model.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from __future__ import annotations 6 | 7 | from qai_hub_models.models._shared.detr.model import DETR 8 | 9 | MODEL_ID = __name__.split(".")[-2] 10 | DEFAULT_WEIGHTS = "facebook/detr-resnet-101" 11 | MODEL_ASSET_VERSION = 1 12 | 13 | 14 | class DETRResNet101(DETR): 15 | """Exportable DETR model, end-to-end.""" 16 | 17 | @classmethod 18 | def from_pretrained(cls, ckpt_name: str = DEFAULT_WEIGHTS): 19 | return DETR.from_pretrained(ckpt_name) 20 | -------------------------------------------------------------------------------- /qai_hub_models/models/detr_resnet101/requirements.txt: -------------------------------------------------------------------------------- 1 | transformers==4.51.3 2 | -------------------------------------------------------------------------------- /qai_hub_models/models/detr_resnet101_dc5/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.detr.app import DETRApp as App # noqa: F401 6 | from qai_hub_models.models.detr_resnet101_dc5.model import ( # noqa: F401 7 | DETRResNet101DC5 as Model, 8 | ) 9 | 10 | from .model import MODEL_ID # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/detr_resnet101_dc5/model.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from __future__ import annotations 6 | 7 | from qai_hub_models.models._shared.detr.model import DETR 8 | 9 | MODEL_ID = __name__.split(".")[-2] 10 | DEFAULT_WEIGHTS = "facebook/detr-resnet-101-dc5" 11 | MODEL_ASSET_VERSION = 1 12 | 13 | 14 | class DETRResNet101DC5(DETR): 15 | """Exportable DETR model, end-to-end.""" 16 | 17 | @classmethod 18 | def from_pretrained(cls, ckpt_name: str = DEFAULT_WEIGHTS): 19 | return DETR.from_pretrained(ckpt_name) 20 | -------------------------------------------------------------------------------- /qai_hub_models/models/detr_resnet101_dc5/requirements.txt: -------------------------------------------------------------------------------- 1 | transformers==4.51.3 2 | -------------------------------------------------------------------------------- /qai_hub_models/models/detr_resnet50/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.detr.app import DETRApp as App # noqa: F401 6 | from qai_hub_models.models.detr_resnet50.model import ( # noqa: F401 7 | DETRResNet50 as Model, 8 | ) 9 | 10 | from .model import MODEL_ID # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/detr_resnet50/model.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from __future__ import annotations 6 | 7 | from qai_hub_models.models._shared.detr.model import DETR 8 | 9 | MODEL_ID = __name__.split(".")[-2] 10 | DEFAULT_WEIGHTS = "facebook/detr-resnet-50" 11 | MODEL_ASSET_VERSION = 1 12 | 13 | 14 | class DETRResNet50(DETR): 15 | """Exportable DETR model, end-to-end.""" 16 | 17 | @classmethod 18 | def from_pretrained(cls, ckpt_name: str = DEFAULT_WEIGHTS): 19 | return DETR.from_pretrained(ckpt_name) 20 | -------------------------------------------------------------------------------- /qai_hub_models/models/detr_resnet50/requirements.txt: -------------------------------------------------------------------------------- 1 | transformers==4.51.3 2 | object-detection-metrics==0.4.post1 3 | shapely==2.0.3 4 | fiftyone==1.0.1 5 | -------------------------------------------------------------------------------- /qai_hub_models/models/detr_resnet50_dc5/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.detr.app import DETRApp as App # noqa: F401 6 | from qai_hub_models.models.detr_resnet50_dc5.model import ( # noqa: F401 7 | DETRResNet50DC5 as Model, 8 | ) 9 | 10 | from .model import MODEL_ID # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/detr_resnet50_dc5/model.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from __future__ import annotations 6 | 7 | from qai_hub_models.models._shared.detr.model import DETR 8 | 9 | MODEL_ID = __name__.split(".")[-2] 10 | DEFAULT_WEIGHTS = "facebook/detr-resnet-50-dc5" 11 | MODEL_ASSET_VERSION = 1 12 | 13 | 14 | class DETRResNet50DC5(DETR): 15 | """Exportable DETR model, end-to-end.""" 16 | 17 | @classmethod 18 | def from_pretrained(cls, ckpt_name: str = DEFAULT_WEIGHTS): 19 | return DETR.from_pretrained(ckpt_name) 20 | -------------------------------------------------------------------------------- /qai_hub_models/models/detr_resnet50_dc5/requirements.txt: -------------------------------------------------------------------------------- 1 | transformers==4.51.3 2 | timm==1.0.15 3 | -------------------------------------------------------------------------------- /qai_hub_models/models/dla102x/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 6 | ImagenetClassifierApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import dla102x as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/dla102x/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo 6 | from qai_hub_models.models.dla102x.model import MODEL_ID, dla102x 7 | 8 | 9 | def main(is_test: bool = False): 10 | imagenet_demo(dla102x, MODEL_ID, is_test) 11 | 12 | 13 | if __name__ == "__main__": 14 | main() 15 | -------------------------------------------------------------------------------- /qai_hub_models/models/dla102x/model.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from __future__ import annotations 6 | 7 | import timm 8 | 9 | from qai_hub_models.models._shared.imagenet_classifier.model import ImagenetClassifier 10 | 11 | MODEL_ID = __name__.split(".")[-2] 12 | MODEL_ASSET_VERSION = 1 13 | 14 | 15 | class dla102x(ImagenetClassifier): 16 | @classmethod 17 | def from_pretrained(cls, checkpoint_path: str | None = None): 18 | model = timm.create_model("dla102x", pretrained=True) 19 | model.eval() 20 | return cls(model) 21 | -------------------------------------------------------------------------------- /qai_hub_models/models/dla102x/requirements.txt: -------------------------------------------------------------------------------- 1 | timm==1.0.15 2 | -------------------------------------------------------------------------------- /qai_hub_models/models/easyocr/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from .app import EasyOCRApp as App # noqa: F401 6 | from .model import MODEL_ID # noqa: F401 7 | from .model import EasyOCR as Model # noqa: F401 8 | -------------------------------------------------------------------------------- /qai_hub_models/models/easyocr/requirements.txt: -------------------------------------------------------------------------------- 1 | easyocr==1.7.2 2 | -------------------------------------------------------------------------------- /qai_hub_models/models/efficientnet_b0/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 6 | ImagenetClassifierApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import EfficientNetB0 as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/efficientnet_b0/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo 6 | from qai_hub_models.models.efficientnet_b0.model import MODEL_ID, EfficientNetB0 7 | 8 | 9 | def main(is_test: bool = False): 10 | imagenet_demo(EfficientNetB0, MODEL_ID, is_test) 11 | 12 | 13 | if __name__ == "__main__": 14 | main() 15 | -------------------------------------------------------------------------------- /qai_hub_models/models/efficientnet_b0/model.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from __future__ import annotations 6 | 7 | import torchvision.models as tv_models 8 | 9 | from qai_hub_models.models._shared.imagenet_classifier.model import ImagenetClassifier 10 | 11 | MODEL_ID = __name__.split(".")[-2] 12 | DEFAULT_WEIGHTS = "IMAGENET1K_V1" 13 | 14 | 15 | class EfficientNetB0(ImagenetClassifier): 16 | @classmethod 17 | def from_pretrained(cls, weights: str = DEFAULT_WEIGHTS) -> EfficientNetB0: 18 | net = tv_models.efficientnet_b0(weights=weights) 19 | return cls(net) 20 | -------------------------------------------------------------------------------- /qai_hub_models/models/efficientnet_b4/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 6 | ImagenetClassifierApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import EfficientNetB4 as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/efficientnet_b4/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo 6 | from qai_hub_models.models.efficientnet_b4.model import MODEL_ID, EfficientNetB4 7 | 8 | 9 | def main(is_test: bool = False): 10 | imagenet_demo(EfficientNetB4, MODEL_ID, is_test) 11 | 12 | 13 | if __name__ == "__main__": 14 | main() 15 | -------------------------------------------------------------------------------- /qai_hub_models/models/efficientnet_v2_s/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 6 | ImagenetClassifierApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import EfficientNetV2s as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/efficientnet_v2_s/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo 6 | from qai_hub_models.models.efficientnet_v2_s.model import MODEL_ID, EfficientNetV2s 7 | 8 | 9 | def main(is_test: bool = False): 10 | imagenet_demo(EfficientNetV2s, MODEL_ID, is_test) 11 | 12 | 13 | if __name__ == "__main__": 14 | main() 15 | -------------------------------------------------------------------------------- /qai_hub_models/models/efficientvit_b2_cls/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | 6 | from .model import MODEL_ID # noqa: F401 7 | from .model import EfficientViT as Model # noqa: F401 8 | -------------------------------------------------------------------------------- /qai_hub_models/models/efficientvit_b2_cls/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo 6 | from qai_hub_models.models.efficientvit_b2_cls.model import MODEL_ID, EfficientViT 7 | 8 | 9 | def main(is_test: bool = False): 10 | imagenet_demo(EfficientViT, MODEL_ID, is_test) 11 | 12 | 13 | if __name__ == "__main__": 14 | main() 15 | -------------------------------------------------------------------------------- /qai_hub_models/models/efficientvit_b2_cls/requirements.txt: -------------------------------------------------------------------------------- 1 | segment-anything==1.0 2 | -------------------------------------------------------------------------------- /qai_hub_models/models/efficientvit_l2_cls/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | 6 | from .model import MODEL_ID # noqa: F401 7 | from .model import EfficientViT as Model # noqa: F401 8 | -------------------------------------------------------------------------------- /qai_hub_models/models/efficientvit_l2_cls/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo 6 | from qai_hub_models.models.efficientvit_l2_cls.model import MODEL_ID, EfficientViT 7 | 8 | 9 | def main(is_test: bool = False): 10 | imagenet_demo(EfficientViT, MODEL_ID, is_test) 11 | 12 | 13 | if __name__ == "__main__": 14 | main() 15 | -------------------------------------------------------------------------------- /qai_hub_models/models/efficientvit_l2_cls/requirements.txt: -------------------------------------------------------------------------------- 1 | segment-anything==1.0 2 | -------------------------------------------------------------------------------- /qai_hub_models/models/efficientvit_l2_seg/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | 6 | from .model import MODEL_ID # noqa: F401 7 | from .model import EfficientViT as Model # noqa: F401 8 | -------------------------------------------------------------------------------- /qai_hub_models/models/efficientvit_l2_seg/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.cityscapes_segmentation.demo import ( 6 | cityscapes_segmentation_demo, 7 | ) 8 | from qai_hub_models.models.efficientvit_l2_seg.model import MODEL_ID, EfficientViT 9 | 10 | 11 | def main(is_test: bool = False): 12 | cityscapes_segmentation_demo(EfficientViT, MODEL_ID, is_test) 13 | 14 | 15 | if __name__ == "__main__": 16 | main() 17 | -------------------------------------------------------------------------------- /qai_hub_models/models/efficientvit_l2_seg/requirements.txt: -------------------------------------------------------------------------------- 1 | segment-anything==1.0 2 | -------------------------------------------------------------------------------- /qai_hub_models/models/esrgan/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.super_resolution.app import ( # noqa: F401 6 | SuperResolutionApp as App, 7 | ) 8 | 9 | from .model import ESRGAN as Model # noqa: F401 10 | from .model import MODEL_ID # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/face_attrib_net/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.face_attrib_net.app import ( # noqa: F401 6 | FaceAttribNetApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import FaceAttribNet as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/face_attrib_net/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.face_attrib_net.demo import ( 6 | face_attrib_net_demo as demo_main, 7 | ) 8 | from qai_hub_models.models.face_attrib_net.model import FaceAttribNet 9 | 10 | 11 | def main(is_test: bool = False): 12 | demo_main(FaceAttribNet, is_test) 13 | 14 | 15 | if __name__ == "__main__": 16 | main() 17 | -------------------------------------------------------------------------------- /qai_hub_models/models/face_det_lite/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from .app import FaceDetLiteApp as App # noqa: F401 6 | from .model import MODEL_ID # noqa: F401 7 | from .model import FaceDetLiteModel as Model # noqa: F401 8 | -------------------------------------------------------------------------------- /qai_hub_models/models/face_det_lite/requirements.txt: -------------------------------------------------------------------------------- 1 | xtcocotools==1.14.3 2 | object-detection-metrics==0.4.post1 3 | -------------------------------------------------------------------------------- /qai_hub_models/models/facemap_3dmm/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models.facemap_3dmm.app import FaceMap_3DMMApp as App # noqa: F401 6 | from qai_hub_models.models.facemap_3dmm.model import MODEL_ID # noqa: F401 7 | from qai_hub_models.models.facemap_3dmm.model import FaceMap_3DMM as Model # noqa: F401 8 | -------------------------------------------------------------------------------- /qai_hub_models/models/facemap_3dmm/requirements.txt: -------------------------------------------------------------------------------- 1 | scikit-image>0.21.0,<0.25 2 | xtcocotools==1.14.3 3 | -------------------------------------------------------------------------------- /qai_hub_models/models/fastsam_s/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.fastsam.app import FastSAMApp as App # noqa: F401 6 | 7 | from .model import MODEL_ID # noqa: F401 8 | from .model import FastSAM_S as Model # noqa: F401 9 | -------------------------------------------------------------------------------- /qai_hub_models/models/fastsam_s/requirements.txt: -------------------------------------------------------------------------------- 1 | seaborn==0.11.0 2 | thop==0.1.1.post2209072238 3 | matplotlib==3.7.5 # Ultralytics does not specify a matplotlib verion, and the latest matplotlib breaks it. So we have to include the requirement manually 4 | ultralytics==8.0.193 5 | -------------------------------------------------------------------------------- /qai_hub_models/models/fastsam_x/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.fastsam.app import FastSAMApp as App # noqa: F401 6 | 7 | from .model import MODEL_ID # noqa: F401 8 | from .model import FastSAM_X as Model # noqa: F401 9 | -------------------------------------------------------------------------------- /qai_hub_models/models/fastsam_x/requirements.txt: -------------------------------------------------------------------------------- 1 | seaborn==0.11.0 2 | thop==0.1.1.post2209072238 3 | matplotlib==3.7.5 # Ultralytics does not specify a matplotlib verion, and the latest matplotlib breaks it. So we have to include the requirement manually 4 | ultralytics==8.0.193 5 | -------------------------------------------------------------------------------- /qai_hub_models/models/fcn_resnet50/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from .app import FCN_ResNet50App as App # noqa: F401 6 | from .model import MODEL_ID # noqa: F401 7 | from .model import FCN_ResNet50 as Model # noqa: F401 8 | -------------------------------------------------------------------------------- /qai_hub_models/models/ffnet_122ns_lowres/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.cityscapes_segmentation.app import ( # noqa: F401 6 | CityscapesSegmentationApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import FFNet122NSLowRes as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/ffnet_122ns_lowres/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.cityscapes_segmentation.demo import ( 6 | cityscapes_segmentation_demo, 7 | ) 8 | from qai_hub_models.models.ffnet_122ns_lowres.model import MODEL_ID, FFNet122NSLowRes 9 | 10 | 11 | def main(is_test: bool = False): 12 | cityscapes_segmentation_demo(FFNet122NSLowRes, MODEL_ID, is_test=is_test) 13 | 14 | 15 | if __name__ == "__main__": 16 | main() 17 | -------------------------------------------------------------------------------- /qai_hub_models/models/ffnet_122ns_lowres/model.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from __future__ import annotations 6 | 7 | from qai_hub_models.models._shared.ffnet.model import FFNetLowRes 8 | 9 | MODEL_ID = __name__.split(".")[-2] 10 | 11 | 12 | class FFNet122NSLowRes(FFNetLowRes): 13 | @classmethod 14 | def from_pretrained(cls) -> FFNet122NSLowRes: 15 | return super().from_pretrained("segmentation_ffnet122NS_CCC_mobile_pre_down") 16 | -------------------------------------------------------------------------------- /qai_hub_models/models/ffnet_122ns_lowres/requirements.txt: -------------------------------------------------------------------------------- 1 | scikit-image>0.21.0,<0.25 2 | -------------------------------------------------------------------------------- /qai_hub_models/models/ffnet_40s/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.cityscapes_segmentation.app import ( # noqa: F401 6 | CityscapesSegmentationApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import FFNet40S as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/ffnet_40s/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.cityscapes_segmentation.demo import ( 6 | cityscapes_segmentation_demo, 7 | ) 8 | from qai_hub_models.models.ffnet_40s.model import MODEL_ID, FFNet40S 9 | 10 | 11 | def main(is_test: bool = False): 12 | cityscapes_segmentation_demo(FFNet40S, MODEL_ID, is_test=is_test) 13 | 14 | 15 | if __name__ == "__main__": 16 | main() 17 | -------------------------------------------------------------------------------- /qai_hub_models/models/ffnet_40s/model.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from __future__ import annotations 6 | 7 | from qai_hub_models.models._shared.ffnet.model import FFNet 8 | 9 | MODEL_ID = __name__.split(".")[-2] 10 | 11 | 12 | class FFNet40S(FFNet): 13 | @classmethod 14 | def from_pretrained(cls) -> FFNet40S: 15 | return super().from_pretrained("segmentation_ffnet40S_dBBB_mobile") 16 | -------------------------------------------------------------------------------- /qai_hub_models/models/ffnet_40s/requirements.txt: -------------------------------------------------------------------------------- 1 | scikit-image>0.21.0,<0.25 2 | -------------------------------------------------------------------------------- /qai_hub_models/models/ffnet_40s/test.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.ffnet.test_utils import run_test_off_target_numerical 6 | from qai_hub_models.models.ffnet_40s.demo import main as demo_main 7 | from qai_hub_models.models.ffnet_40s.model import FFNet40S 8 | from qai_hub_models.utils.testing import skip_clone_repo_check 9 | 10 | 11 | @skip_clone_repo_check 12 | def test_off_target_numerical(): 13 | run_test_off_target_numerical(FFNet40S, "segmentation_ffnet40S_dBBB_mobile") 14 | 15 | 16 | @skip_clone_repo_check 17 | def test_demo(): 18 | demo_main(is_test=True) 19 | -------------------------------------------------------------------------------- /qai_hub_models/models/ffnet_54s/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.cityscapes_segmentation.app import ( # noqa: F401 6 | CityscapesSegmentationApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import FFNet54S as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/ffnet_54s/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.cityscapes_segmentation.demo import ( 6 | cityscapes_segmentation_demo, 7 | ) 8 | from qai_hub_models.models.ffnet_54s.model import MODEL_ID, FFNet54S 9 | 10 | 11 | def main(is_test: bool = False): 12 | cityscapes_segmentation_demo(FFNet54S, MODEL_ID, is_test=is_test) 13 | 14 | 15 | if __name__ == "__main__": 16 | main() 17 | -------------------------------------------------------------------------------- /qai_hub_models/models/ffnet_54s/model.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from __future__ import annotations 6 | 7 | from qai_hub_models.models._shared.ffnet.model import FFNet 8 | 9 | MODEL_ID = __name__.split(".")[-2] 10 | 11 | 12 | class FFNet54S(FFNet): 13 | @classmethod 14 | def from_pretrained(cls) -> FFNet54S: 15 | return super().from_pretrained("segmentation_ffnet54S_dBBB_mobile") 16 | -------------------------------------------------------------------------------- /qai_hub_models/models/ffnet_54s/requirements.txt: -------------------------------------------------------------------------------- 1 | scikit-image>0.21.0,<0.25 2 | -------------------------------------------------------------------------------- /qai_hub_models/models/ffnet_54s/test.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.ffnet.test_utils import run_test_off_target_numerical 6 | from qai_hub_models.models.ffnet_54s.demo import main as demo_main 7 | from qai_hub_models.models.ffnet_54s.model import FFNet54S 8 | from qai_hub_models.utils.testing import skip_clone_repo_check 9 | 10 | 11 | @skip_clone_repo_check 12 | def test_off_target_numerical(): 13 | run_test_off_target_numerical(FFNet54S, "segmentation_ffnet54S_dBBB_mobile") 14 | 15 | 16 | @skip_clone_repo_check 17 | def test_demo(): 18 | demo_main(is_test=True) 19 | -------------------------------------------------------------------------------- /qai_hub_models/models/ffnet_78s/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.cityscapes_segmentation.app import ( # noqa: F401 6 | CityscapesSegmentationApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import FFNet78S as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/ffnet_78s/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.cityscapes_segmentation.demo import ( 6 | cityscapes_segmentation_demo, 7 | ) 8 | from qai_hub_models.models.ffnet_78s.model import MODEL_ID, FFNet78S 9 | 10 | 11 | def main(is_test: bool = False): 12 | cityscapes_segmentation_demo(FFNet78S, MODEL_ID, is_test=is_test) 13 | 14 | 15 | if __name__ == "__main__": 16 | main() 17 | -------------------------------------------------------------------------------- /qai_hub_models/models/ffnet_78s/model.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from __future__ import annotations 6 | 7 | from qai_hub_models.models._shared.ffnet.model import FFNet 8 | 9 | MODEL_ID = __name__.split(".")[-2] 10 | 11 | 12 | class FFNet78S(FFNet): 13 | @classmethod 14 | def from_pretrained(cls) -> FFNet78S: 15 | return super().from_pretrained("segmentation_ffnet78S_dBBB_mobile") 16 | -------------------------------------------------------------------------------- /qai_hub_models/models/ffnet_78s/requirements.txt: -------------------------------------------------------------------------------- 1 | scikit-image>0.21.0,<0.25 2 | -------------------------------------------------------------------------------- /qai_hub_models/models/ffnet_78s/test.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.ffnet.test_utils import run_test_off_target_numerical 6 | from qai_hub_models.models.ffnet_78s.demo import main as demo_main 7 | from qai_hub_models.models.ffnet_78s.model import FFNet78S 8 | from qai_hub_models.utils.testing import skip_clone_repo_check 9 | 10 | 11 | @skip_clone_repo_check 12 | def test_off_target_numerical(): 13 | run_test_off_target_numerical(FFNet78S, "segmentation_ffnet78S_dBBB_mobile") 14 | 15 | 16 | @skip_clone_repo_check 17 | def test_demo(): 18 | demo_main(is_test=True) 19 | -------------------------------------------------------------------------------- /qai_hub_models/models/ffnet_78s_lowres/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.cityscapes_segmentation.app import ( # noqa: F401 6 | CityscapesSegmentationApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import FFNet78SLowRes as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/ffnet_78s_lowres/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.cityscapes_segmentation.demo import ( 6 | cityscapes_segmentation_demo, 7 | ) 8 | from qai_hub_models.models.ffnet_78s_lowres.model import MODEL_ID, FFNet78SLowRes 9 | 10 | 11 | def main(is_test: bool = False): 12 | cityscapes_segmentation_demo(FFNet78SLowRes, MODEL_ID, is_test=is_test) 13 | 14 | 15 | if __name__ == "__main__": 16 | main() 17 | -------------------------------------------------------------------------------- /qai_hub_models/models/ffnet_78s_lowres/model.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from __future__ import annotations 6 | 7 | from qai_hub_models.models._shared.ffnet.model import FFNetLowRes 8 | 9 | MODEL_ID = __name__.split(".")[-2] 10 | 11 | 12 | class FFNet78SLowRes(FFNetLowRes): 13 | @classmethod 14 | def from_pretrained(cls) -> FFNet78SLowRes: 15 | return super().from_pretrained("segmentation_ffnet78S_BCC_mobile_pre_down") 16 | -------------------------------------------------------------------------------- /qai_hub_models/models/ffnet_78s_lowres/requirements.txt: -------------------------------------------------------------------------------- 1 | scikit-image>0.21.0,<0.25 2 | -------------------------------------------------------------------------------- /qai_hub_models/models/fomm/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models.fomm.app import FOMMApp as App # noqa: F401 6 | 7 | from .model import FOMM as Model # noqa: F401 8 | from .model import MODEL_ID # noqa: F401 9 | -------------------------------------------------------------------------------- /qai_hub_models/models/fomm/requirements.txt: -------------------------------------------------------------------------------- 1 | imageio[ffmpeg]==2.31.5 2 | ffmpeg==1.4 3 | scikit-image>0.21.0,<0.25 4 | scikit-learn>1.1,<1.6 5 | matplotlib==3.7.5 6 | scipy>=1.8.1,<2 # 1.8.1 is for AIMET 7 | -------------------------------------------------------------------------------- /qai_hub_models/models/foot_track_net/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.foot_track_net.app import ( # noqa: F401 6 | FootTrackNet_App as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import FootTrackNet as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/foot_track_net/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.foot_track_net.demo import demo 6 | from qai_hub_models.models.foot_track_net.model import FootTrackNet 7 | 8 | 9 | def main(is_test: bool = False): 10 | demo(FootTrackNet, is_test) 11 | 12 | 13 | if __name__ == "__main__": 14 | main() 15 | -------------------------------------------------------------------------------- /qai_hub_models/models/foot_track_net/requirements.txt: -------------------------------------------------------------------------------- 1 | object-detection-metrics==0.4.post1 2 | seaborn==0.11.0 3 | thop==0.1.1.post2209072238 4 | matplotlib==3.7.5 # Ultralytics does not specify a matplotlib verion, and the latest matplotlib breaks it. So we have to include the requirement manually 5 | ultralytics==8.0.193 6 | shapely==2.0.3 7 | fiftyone==1.0.1 8 | object-detection-metrics==0.4.post1 9 | xtcocotools==1.14.3 10 | -------------------------------------------------------------------------------- /qai_hub_models/models/gear_guard_net/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.body_detection.app import ( # noqa: F401 6 | BodyDetectionApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import GearGuardNet as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/googlenet/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 6 | ImagenetClassifierApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import GoogLeNet as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/googlenet/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo 6 | from qai_hub_models.models.googlenet.model import MODEL_ID, GoogLeNet 7 | 8 | 9 | def main(is_test: bool = False): 10 | imagenet_demo(GoogLeNet, MODEL_ID, is_test) 11 | 12 | 13 | if __name__ == "__main__": 14 | main() 15 | -------------------------------------------------------------------------------- /qai_hub_models/models/googlenet/model.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from __future__ import annotations 6 | 7 | import torchvision.models as tv_models 8 | 9 | from qai_hub_models.models._shared.imagenet_classifier.model import ImagenetClassifier 10 | 11 | MODEL_ID = __name__.split(".")[-2] 12 | DEFAULT_WEIGHTS = "IMAGENET1K_V1" 13 | 14 | 15 | class GoogLeNet(ImagenetClassifier): 16 | @classmethod 17 | def from_pretrained(cls, weights: str = DEFAULT_WEIGHTS) -> GoogLeNet: 18 | net = tv_models.googlenet(weights=weights, transform_input=False) 19 | return cls(net, transform_input=True) 20 | -------------------------------------------------------------------------------- /qai_hub_models/models/hrnet_pose/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models.hrnet_pose.app import HRNetPoseApp as App # noqa: F401 6 | 7 | from .model import MODEL_ID # noqa: F401 8 | from .model import HRNetPose as Model # noqa: F401 9 | -------------------------------------------------------------------------------- /qai_hub_models/models/hrnet_pose/requirements.txt: -------------------------------------------------------------------------------- 1 | yacs==0.1.8 2 | chumpy==0.71 3 | mmdet==3.3.0+mmcv220 4 | mmpose==1.2.0 5 | mmcv==2.2.0 6 | xtcocotools==1.14.3 7 | -------------------------------------------------------------------------------- /qai_hub_models/models/hrnet_w48_ocr/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | 6 | from .model import HRNET_W48_OCR as Model # noqa: F401 7 | from .model import MODEL_ID # noqa: F401 8 | -------------------------------------------------------------------------------- /qai_hub_models/models/hrnet_w48_ocr/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.cityscapes_segmentation.demo import ( 6 | cityscapes_segmentation_demo, 7 | ) 8 | from qai_hub_models.models.hrnet_w48_ocr.model import HRNET_W48_OCR, MODEL_ID 9 | 10 | 11 | def main(is_test: bool = False): 12 | cityscapes_segmentation_demo(HRNET_W48_OCR, MODEL_ID, is_test) 13 | 14 | 15 | if __name__ == "__main__": 16 | main() 17 | -------------------------------------------------------------------------------- /qai_hub_models/models/hrnet_w48_ocr/requirements.txt: -------------------------------------------------------------------------------- 1 | yacs==0.1.8 2 | -------------------------------------------------------------------------------- /qai_hub_models/models/huggingface_wavlm_base_plus/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from .app import HuggingFaceWavLMBasePlusApp as App # noqa: F401 6 | from .model import MODEL_ID # noqa: F401 7 | from .model import HuggingFaceWavLMBasePlus as Model # noqa: F401 8 | -------------------------------------------------------------------------------- /qai_hub_models/models/huggingface_wavlm_base_plus/requirements.txt: -------------------------------------------------------------------------------- 1 | transformers==4.51.3 2 | soundfile==0.12.1 3 | librosa==0.10.1 4 | -------------------------------------------------------------------------------- /qai_hub_models/models/ibm_granite_v3_1_8b_instruct/perf.yaml: -------------------------------------------------------------------------------- 1 | supported_devices: 2 | - Snapdragon 8 Elite QRD 3 | - Snapdragon X Elite CRD 4 | supported_chipsets: 5 | - qualcomm-snapdragon-8-elite 6 | - qualcomm-snapdragon-x-elite 7 | precisions: 8 | w4a16: 9 | components: 10 | IBM-Granite-v3.1-8B-Instruct: 11 | performance_metrics: 12 | Snapdragon 8 Elite QRD: 13 | qnn: 14 | time_to_first_token_range_milliseconds: 15 | min: 196.7925 16 | max: 6297.360 17 | tokens_per_second: 11.01293 18 | Snapdragon X Elite CRD: 19 | qnn: 20 | time_to_first_token_range_milliseconds: 21 | min: 295.3902 22 | max: 9452.4864 23 | tokens_per_second: 8.01724 24 | -------------------------------------------------------------------------------- /qai_hub_models/models/inception_v3/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 6 | ImagenetClassifierApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import InceptionNetV3 as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/inception_v3/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo 6 | from qai_hub_models.models.inception_v3.model import MODEL_ID, InceptionNetV3 7 | 8 | 9 | def main(is_test: bool = False): 10 | imagenet_demo(InceptionNetV3, MODEL_ID, is_test) 11 | 12 | 13 | if __name__ == "__main__": 14 | main() 15 | -------------------------------------------------------------------------------- /qai_hub_models/models/indus_1b/perf.yaml: -------------------------------------------------------------------------------- 1 | supported_devices: 2 | - Snapdragon 8 Elite QRD 3 | supported_chipsets: 4 | - qualcomm-snapdragon-8-elite 5 | precisions: 6 | w4a16: 7 | components: 8 | IndusQ-1.1B: 9 | performance_metrics: 10 | Snapdragon 8 Elite QRD: 11 | qnn: 12 | time_to_first_token_range_milliseconds: 13 | min: 28.561 14 | max: 228.489 15 | tokens_per_second: 74.60 16 | -------------------------------------------------------------------------------- /qai_hub_models/models/jais_6p7b_chat/perf.yaml: -------------------------------------------------------------------------------- 1 | supported_devices: 2 | - Snapdragon 8 Elite QRD 3 | supported_chipsets: 4 | - qualcomm-snapdragon-8-elite 5 | precisions: 6 | w4a16: 7 | components: 8 | Jais-6p7b-Chat: 9 | performance_metrics: 10 | Snapdragon 8 Elite QRD: 11 | qnn: 12 | time_to_first_token_range_milliseconds: 13 | min: 238.231 14 | max: 3811.696 15 | tokens_per_second: 13.33 16 | -------------------------------------------------------------------------------- /qai_hub_models/models/lama_dilated/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.repaint.app import ( # noqa: F401 6 | RepaintMaskApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import LamaDilated as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/lama_dilated/requirements.txt: -------------------------------------------------------------------------------- 1 | albumentations==0.5.2 2 | pytorch-lightning>2,<3 3 | webdataset==0.2.86 4 | easydict==1.13 5 | kornia==0.5.0 6 | hydra-core==1.3.0 7 | scikit-learn>1.1,<1.6 8 | tensorboard==2.13.0 9 | -------------------------------------------------------------------------------- /qai_hub_models/models/levit/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 6 | ImagenetClassifierApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import LeViT as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/levit/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo 6 | from qai_hub_models.models.levit.model import MODEL_ID, LeViT 7 | 8 | 9 | def main(is_test: bool = False): 10 | imagenet_demo(LeViT, MODEL_ID, is_test) 11 | 12 | 13 | if __name__ == "__main__": 14 | main() 15 | -------------------------------------------------------------------------------- /qai_hub_models/models/levit/requirements.txt: -------------------------------------------------------------------------------- 1 | transformers==4.51.3 2 | -------------------------------------------------------------------------------- /qai_hub_models/models/litehrnet/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from .app import LiteHRNetApp as App # noqa: F401 6 | from .model import MODEL_ID # noqa: F401 7 | from .model import LiteHRNet as Model # noqa: F401 8 | -------------------------------------------------------------------------------- /qai_hub_models/models/litehrnet/requirements.txt: -------------------------------------------------------------------------------- 1 | chumpy==0.71 2 | mmdet==3.3.0+mmcv220 3 | mmpose==1.2.0 4 | mmcv==2.2.0 5 | -------------------------------------------------------------------------------- /qai_hub_models/models/llama_v2_7b_chat/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.llama.app import ChatApp as App # noqa: F401 6 | 7 | from .model import MODEL_ID # noqa: F401 8 | from .model import Llama2_Quantized as Model # noqa: F401 9 | -------------------------------------------------------------------------------- /qai_hub_models/models/llama_v2_7b_chat/gen_ondevice_llama/README.md: -------------------------------------------------------------------------------- 1 | ### Tutorials Moved 2 | 3 | The tutorial for on-device deployment has been moved to 4 | [here](https://github.com/qcom-ai-hub/ai-hub-apps-internal/tree/main/tutorials/llm_on_genie). 5 | 6 | This directory will be deprecated at the end of 2024. 7 | -------------------------------------------------------------------------------- /qai_hub_models/models/llama_v2_7b_chat/requirements.txt: -------------------------------------------------------------------------------- 1 | transformers==4.41.1 2 | sentencepiece==0.2.0 3 | psutil 4 | -------------------------------------------------------------------------------- /qai_hub_models/models/llama_v3_1_8b_instruct/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.llama.app import ChatApp as App # noqa: F401 6 | 7 | from .model import MODEL_ID # noqa: F401 8 | from .model import Llama3_1_8B as FP_Model # noqa: F401 9 | from .model import Llama3_1_8B_AIMETOnnx as Model # noqa: F401 10 | -------------------------------------------------------------------------------- /qai_hub_models/models/llama_v3_1_8b_instruct/evaluate.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.llama3_ao.evaluate import llama3_evaluate 6 | from qai_hub_models.models.llama_v3_1_8b_instruct import FP_Model, Model 7 | 8 | if __name__ == "__main__": 9 | llama3_evaluate(quantized_model_cls=Model, fp_model_cls=FP_Model) 10 | -------------------------------------------------------------------------------- /qai_hub_models/models/llama_v3_1_8b_instruct/quantize.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.llama3_ao.quantize import llama3_quantize 6 | from qai_hub_models.models.llama_v3_1_8b_instruct import MODEL_ID, Model 7 | from qai_hub_models.models.llama_v3_1_8b_instruct.model import Llama3_1_8B 8 | 9 | if __name__ == "__main__": 10 | llama3_quantize( 11 | quantized_model_cls=Model, fp_model_cls=Llama3_1_8B, model_id=MODEL_ID 12 | ) 13 | -------------------------------------------------------------------------------- /qai_hub_models/models/llama_v3_1_8b_instruct/requirements.txt: -------------------------------------------------------------------------------- 1 | datasets==2.14.5 2 | onnx==1.16.2 3 | transformers==4.45.0 4 | huggingface_hub==0.23.2 5 | sentencepiece==0.2.0 6 | psutil 7 | -------------------------------------------------------------------------------- /qai_hub_models/models/llama_v3_1_8b_instruct/test.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | import pytest 6 | 7 | from qai_hub_models.models.llama_v3_1_8b_instruct.demo import llama_3_1_chat_demo 8 | 9 | 10 | @pytest.mark.skip("#105 move slow_cloud and slow tests to nightly.") 11 | @pytest.mark.slow_cloud 12 | def test_demo() -> None: 13 | # Run demo and verify it does not crash 14 | llama_3_1_chat_demo(is_test=True) 15 | -------------------------------------------------------------------------------- /qai_hub_models/models/llama_v3_2_3b_instruct/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from .model import MODEL_ID # noqa: F401 6 | from .model import Llama3_2_3B as FP_Model # noqa: F401 7 | from .model import Llama3_2_3B_AIMETOnnx as Model # noqa: F401 8 | -------------------------------------------------------------------------------- /qai_hub_models/models/llama_v3_2_3b_instruct/evaluate.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.llama3_ao.evaluate import llama3_evaluate 6 | from qai_hub_models.models.llama_v3_2_3b_instruct import FP_Model, Model 7 | 8 | if __name__ == "__main__": 9 | llama3_evaluate(quantized_model_cls=Model, fp_model_cls=FP_Model) 10 | -------------------------------------------------------------------------------- /qai_hub_models/models/llama_v3_2_3b_instruct/quantize.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.llama3_ao.quantize import llama3_quantize 6 | from qai_hub_models.models.llama_v3_2_3b_instruct import MODEL_ID, Model 7 | from qai_hub_models.models.llama_v3_2_3b_instruct.model import Llama3_2_3B 8 | 9 | if __name__ == "__main__": 10 | llama3_quantize( 11 | quantized_model_cls=Model, fp_model_cls=Llama3_2_3B, model_id=MODEL_ID 12 | ) 13 | -------------------------------------------------------------------------------- /qai_hub_models/models/llama_v3_2_3b_instruct/requirements.txt: -------------------------------------------------------------------------------- 1 | datasets==2.14.5 2 | onnx==1.16.2 3 | transformers==4.45.0 4 | huggingface_hub==0.23.2 5 | sentencepiece==0.2.0 6 | psutil 7 | -------------------------------------------------------------------------------- /qai_hub_models/models/llama_v3_2_3b_instruct/test.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | import pytest 6 | 7 | from qai_hub_models.models.llama_v3_2_3b_instruct.demo import llama_3_2_3b_chat_demo 8 | 9 | 10 | @pytest.mark.skip("#105 move slow_cloud and slow tests to nightly.") 11 | @pytest.mark.slow_cloud 12 | def test_demo() -> None: 13 | # Run demo and verify it does not crash 14 | llama_3_2_3b_chat_demo(is_test=True) 15 | -------------------------------------------------------------------------------- /qai_hub_models/models/llama_v3_8b_instruct/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.llama.app import ChatApp as App # noqa: F401 6 | 7 | from .model import MODEL_ID # noqa: F401 8 | from .model import Llama3_8B as FP_Model # noqa: F401 9 | from .model import Llama3_8B_AIMETOnnx as Model # noqa: F401 10 | -------------------------------------------------------------------------------- /qai_hub_models/models/llama_v3_8b_instruct/evaluate.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.llama3_ao.evaluate import llama3_evaluate 6 | from qai_hub_models.models.llama_v3_8b_instruct import FP_Model, Model 7 | 8 | if __name__ == "__main__": 9 | llama3_evaluate(quantized_model_cls=Model, fp_model_cls=FP_Model) 10 | -------------------------------------------------------------------------------- /qai_hub_models/models/llama_v3_8b_instruct/quantize.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.llama3_ao.quantize import llama3_quantize 6 | from qai_hub_models.models.llama_v3_8b_instruct import MODEL_ID, Model 7 | from qai_hub_models.models.llama_v3_8b_instruct.model import Llama3_8B 8 | 9 | if __name__ == "__main__": 10 | llama3_quantize( 11 | quantized_model_cls=Model, fp_model_cls=Llama3_8B, model_id=MODEL_ID 12 | ) 13 | -------------------------------------------------------------------------------- /qai_hub_models/models/llama_v3_8b_instruct/requirements.txt: -------------------------------------------------------------------------------- 1 | datasets==2.14.5 2 | onnx==1.16.2 3 | transformers==4.45.0 4 | huggingface_hub==0.23.2 5 | sentencepiece==0.2.0 6 | psutil 7 | -------------------------------------------------------------------------------- /qai_hub_models/models/llama_v3_8b_instruct/test.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | import pytest 6 | 7 | from qai_hub_models.models.llama_v3_8b_instruct.demo import llama_3_chat_demo 8 | 9 | 10 | @pytest.mark.skip("#105 move slow_cloud and slow tests to nightly.") 11 | @pytest.mark.slow_cloud 12 | def test_demo() -> None: 13 | # Run demo and verify it does not crash 14 | llama_3_chat_demo(is_test=True) 15 | -------------------------------------------------------------------------------- /qai_hub_models/models/llama_v3_taide_8b_chat/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.llama.app import ChatApp as App # noqa: F401 6 | 7 | from .model import MODEL_ID # noqa: F401 8 | from .model import Llama3_TAIDE as Model # noqa: F401 9 | -------------------------------------------------------------------------------- /qai_hub_models/models/llama_v3_taide_8b_chat/perf.yaml: -------------------------------------------------------------------------------- 1 | supported_devices: 2 | - Snapdragon X Elite CRD 3 | - Snapdragon 8 Elite QRD 4 | supported_chipsets: 5 | - qualcomm-snapdragon-8-elite 6 | - qualcomm-snapdragon-x-elite 7 | precisions: 8 | w4a16: 9 | components: 10 | Llama3-TAIDE-LX-8B-Chat-Alpha1: 11 | performance_metrics: 12 | Snapdragon 8 Elite QRD: 13 | qnn: 14 | time_to_first_token_range_milliseconds: 15 | min: 159.383 16 | max: 5100.256 17 | tokens_per_second: 12.9262 18 | Snapdragon X Elite CRD: 19 | qnn: 20 | time_to_first_token_range_milliseconds: 21 | min: 211.644 22 | max: 6772.608 23 | tokens_per_second: 10.0367 24 | -------------------------------------------------------------------------------- /qai_hub_models/models/llama_v3_taide_8b_chat/requirements.txt: -------------------------------------------------------------------------------- 1 | onnx==1.16.2 2 | transformers==4.45.0 3 | huggingface_hub==0.23.2 4 | sentencepiece==0.2.0 5 | psutil 6 | -------------------------------------------------------------------------------- /qai_hub_models/models/llama_v3_taide_8b_chat/test.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | import pytest 6 | 7 | from qai_hub_models.models.llama_v3_taide_8b_chat.demo import llama_3_taide_chat_demo 8 | 9 | 10 | @pytest.mark.skip("#105 move slow_cloud and slow tests to nightly.") 11 | @pytest.mark.slow_cloud 12 | def test_demo() -> None: 13 | # Run demo and verify it does not crash 14 | llama_3_taide_chat_demo(is_test=True) 15 | -------------------------------------------------------------------------------- /qai_hub_models/models/mask2former/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models.mask2former.app import Mask2FormerApp as App # noqa: F401 6 | 7 | from .model import MODEL_ID # noqa: F401 8 | from .model import Mask2Former as Model # noqa: F401 9 | -------------------------------------------------------------------------------- /qai_hub_models/models/mask2former/requirements.txt: -------------------------------------------------------------------------------- 1 | transformers==4.51.3 2 | -------------------------------------------------------------------------------- /qai_hub_models/models/mediapipe_face/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from .app import MediaPipeFaceApp as App # noqa: F401 6 | from .model import MODEL_ID # noqa: F401 7 | from .model import MediaPipeFace as Model # noqa: F401 8 | -------------------------------------------------------------------------------- /qai_hub_models/models/mediapipe_hand/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from .app import MediaPipeHandApp as App # noqa: F401 6 | from .model import MODEL_ID # noqa: F401 7 | from .model import MediaPipeHand as Model # noqa: F401 8 | -------------------------------------------------------------------------------- /qai_hub_models/models/mediapipe_pose/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from .app import MediaPipePoseApp as App # noqa: F401 6 | from .model import MODEL_ID # noqa: F401 7 | from .model import MediaPipePose as Model # noqa: F401 8 | -------------------------------------------------------------------------------- /qai_hub_models/models/mediapipe_selfie/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from .model import MODEL_ID # noqa: F401 6 | from .model import SelfieSegmentation as Model # noqa: F401 7 | -------------------------------------------------------------------------------- /qai_hub_models/models/mediapipe_selfie/requirements.txt: -------------------------------------------------------------------------------- 1 | tflite==2.10.0 2 | -------------------------------------------------------------------------------- /qai_hub_models/models/midas/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.depth_estimation.app import ( # noqa: F401 6 | DepthEstimationApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import Midas as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/midas/requirements.txt: -------------------------------------------------------------------------------- 1 | timm==1.0.15 2 | matplotlib==3.7.5 3 | scipy>=1.8.1,<2 # nyuv2 dataset 4 | -------------------------------------------------------------------------------- /qai_hub_models/models/ministral_3b/perf.yaml: -------------------------------------------------------------------------------- 1 | supported_devices: 2 | - Snapdragon 8 Elite QRD 3 | supported_chipsets: 4 | - qualcomm-snapdragon-8-elite 5 | precisions: 6 | w8a8: 7 | components: 8 | Ministral-3B: 9 | performance_metrics: 10 | Snapdragon 8 Elite QRD: 11 | qnn: 12 | time_to_first_token_range_milliseconds: 13 | min: 102.379 14 | max: 3276.128 15 | tokens_per_second: 18.79867 16 | -------------------------------------------------------------------------------- /qai_hub_models/models/mistral_3b/perf.yaml: -------------------------------------------------------------------------------- 1 | supported_devices: 2 | - Snapdragon 8 Elite QRD 3 | supported_chipsets: 4 | - qualcomm-snapdragon-8-elite 5 | precisions: 6 | w4a16: 7 | components: 8 | Mistral-3B: 9 | performance_metrics: 10 | Snapdragon 8 Elite QRD: 11 | qnn: 12 | time_to_first_token_range_milliseconds: 13 | min: 92.289 14 | max: 2953.2736 15 | tokens_per_second: 21.05 16 | -------------------------------------------------------------------------------- /qai_hub_models/models/mistral_7b_instruct_v0_3/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models.mistral_7b_instruct_v0_3.model import MODEL_ID # noqa: F401 6 | from qai_hub_models.models.mistral_7b_instruct_v0_3.model import ( # noqa: F401 7 | Mistral_7B_Instruct_v0_3 as Model, 8 | ) 9 | -------------------------------------------------------------------------------- /qai_hub_models/models/mistral_7b_instruct_v0_3/perf.yaml: -------------------------------------------------------------------------------- 1 | supported_devices: 2 | - Snapdragon 8 Elite QRD 3 | supported_chipsets: 4 | - qualcomm-snapdragon-8-elite 5 | precisions: 6 | w4a16: 7 | components: 8 | Mistral-7B-Instruct-v0.3: 9 | performance_metrics: 10 | Snapdragon 8 Elite QRD: 11 | qnn: 12 | time_to_first_token_range_milliseconds: 13 | min: 165.650 14 | max: 5300.800 15 | tokens_per_second: 12.56 16 | -------------------------------------------------------------------------------- /qai_hub_models/models/mnasnet05/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 6 | ImagenetClassifierApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import MNASNet05 as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/mnasnet05/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo 6 | from qai_hub_models.models.mnasnet05.model import MODEL_ID, MNASNet05 7 | 8 | 9 | def main(is_test: bool = False): 10 | imagenet_demo(MNASNet05, MODEL_ID, is_test) 11 | 12 | 13 | if __name__ == "__main__": 14 | main() 15 | -------------------------------------------------------------------------------- /qai_hub_models/models/mnasnet05/model.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from __future__ import annotations 6 | 7 | import torchvision.models as tv_models 8 | 9 | from qai_hub_models.models._shared.imagenet_classifier.model import ImagenetClassifier 10 | 11 | MODEL_ID = __name__.split(".")[-2] 12 | DEFAULT_WEIGHTS = "IMAGENET1K_V1" 13 | 14 | 15 | class MNASNet05(ImagenetClassifier): 16 | @classmethod 17 | def from_pretrained(cls, weights: str = DEFAULT_WEIGHTS) -> MNASNet05: 18 | net = tv_models.mnasnet0_5(weights=weights) 19 | return cls(net) 20 | -------------------------------------------------------------------------------- /qai_hub_models/models/mobile_vit/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 6 | ImagenetClassifierApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import MobileVIT as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/mobile_vit/requirements.txt: -------------------------------------------------------------------------------- 1 | transformers==4.51.3 2 | -------------------------------------------------------------------------------- /qai_hub_models/models/mobilenet_v2/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 6 | ImagenetClassifierApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import MobileNetV2 as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/mobilenet_v2/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo 6 | from qai_hub_models.models.mobilenet_v2.model import MODEL_ID, MobileNetV2 7 | 8 | 9 | def main(is_test: bool = False): 10 | imagenet_demo(MobileNetV2, MODEL_ID, is_test) 11 | 12 | 13 | if __name__ == "__main__": 14 | main() 15 | -------------------------------------------------------------------------------- /qai_hub_models/models/mobilenet_v3_large/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 6 | ImagenetClassifierApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import MobileNetV3Large as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/mobilenet_v3_large/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo 6 | from qai_hub_models.models.mobilenet_v3_large.model import MODEL_ID, MobileNetV3Large 7 | 8 | 9 | def main(is_test: bool = False): 10 | imagenet_demo(MobileNetV3Large, MODEL_ID, is_test) 11 | 12 | 13 | if __name__ == "__main__": 14 | main() 15 | -------------------------------------------------------------------------------- /qai_hub_models/models/mobilenet_v3_large/model.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from __future__ import annotations 6 | 7 | import torchvision.models as tv_models 8 | 9 | from qai_hub_models.models._shared.imagenet_classifier.model import ImagenetClassifier 10 | 11 | MODEL_ID = __name__.split(".")[-2] 12 | DEFAULT_WEIGHTS = "IMAGENET1K_V1" 13 | 14 | 15 | class MobileNetV3Large(ImagenetClassifier): 16 | @classmethod 17 | def from_pretrained(cls, weights: str = DEFAULT_WEIGHTS) -> MobileNetV3Large: 18 | net = tv_models.mobilenet_v3_large(weights=weights) 19 | return cls(net) 20 | -------------------------------------------------------------------------------- /qai_hub_models/models/mobilenet_v3_small/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 6 | ImagenetClassifierApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import MobileNetV3Small as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/mobilenet_v3_small/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo 6 | from qai_hub_models.models.mobilenet_v3_small.model import MODEL_ID, MobileNetV3Small 7 | 8 | 9 | def main(is_test: bool = False): 10 | imagenet_demo(MobileNetV3Small, MODEL_ID, is_test) 11 | 12 | 13 | if __name__ == "__main__": 14 | main() 15 | -------------------------------------------------------------------------------- /qai_hub_models/models/mobilenet_v3_small/model.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from __future__ import annotations 6 | 7 | import torchvision.models as tv_models 8 | 9 | from qai_hub_models.models._shared.imagenet_classifier.model import ImagenetClassifier 10 | 11 | MODEL_ID = __name__.split(".")[-2] 12 | DEFAULT_WEIGHTS = "IMAGENET1K_V1" 13 | 14 | 15 | class MobileNetV3Small(ImagenetClassifier): 16 | @classmethod 17 | def from_pretrained(cls, weights: str = DEFAULT_WEIGHTS) -> MobileNetV3Small: 18 | net = tv_models.mobilenet_v3_small(weights=weights) 19 | return cls(net) 20 | -------------------------------------------------------------------------------- /qai_hub_models/models/mobilesam/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models.sam.app import SAMApp as App # noqa: F401 6 | 7 | from .model import MODEL_ID # noqa: F401 8 | from .model import MobileSAM as Model # noqa: F401 9 | -------------------------------------------------------------------------------- /qai_hub_models/models/mobilesam/requirements.txt: -------------------------------------------------------------------------------- 1 | pycocotools==2.0.7 2 | matplotlib==3.7.5 3 | timm==1.0.15 4 | -------------------------------------------------------------------------------- /qai_hub_models/models/movenet/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from .app import MovenetApp # noqa: F401 6 | from .model import MODEL_ID # noqa: F401 7 | from .model import Movenet as Model # noqa: F401 8 | -------------------------------------------------------------------------------- /qai_hub_models/models/nasnet/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 6 | ImagenetClassifierApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import NASNet as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/nasnet/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo 6 | from qai_hub_models.models.nasnet.model import MODEL_ID, NASNet 7 | 8 | 9 | def main(is_test: bool = False): 10 | imagenet_demo(NASNet, MODEL_ID, is_test) 11 | 12 | 13 | if __name__ == "__main__": 14 | main() 15 | -------------------------------------------------------------------------------- /qai_hub_models/models/nasnet/requirements.txt: -------------------------------------------------------------------------------- 1 | timm==1.0.15 2 | -------------------------------------------------------------------------------- /qai_hub_models/models/nomic_embed_text/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from .model import MODEL_ID # noqa: F401 6 | from .model import NomicEmbedText as Model # noqa: F401 7 | -------------------------------------------------------------------------------- /qai_hub_models/models/nomic_embed_text/requirements.txt: -------------------------------------------------------------------------------- 1 | transformers==4.51.3 2 | einops==0.3.2 3 | -------------------------------------------------------------------------------- /qai_hub_models/models/openai_clip/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from .app import ClipApp as App # noqa: F401 6 | from .model import MODEL_ID # noqa: F401 7 | from .model import OpenAIClip as Model # noqa: F401 8 | -------------------------------------------------------------------------------- /qai_hub_models/models/openai_clip/requirements.txt: -------------------------------------------------------------------------------- 1 | ftfy==6.1.1 2 | regex==2023.10.3 3 | -------------------------------------------------------------------------------- /qai_hub_models/models/pidnet/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.segmentation.app import ( # noqa: F401 6 | SegmentationApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import PidNet as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/pidnet/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.segmentation.demo import segmentation_demo 6 | from qai_hub_models.models.pidnet.model import INPUT_IMAGE_ADDRESS, MODEL_ID, PidNet 7 | 8 | 9 | def main(is_test: bool = False): 10 | segmentation_demo(PidNet, MODEL_ID, INPUT_IMAGE_ADDRESS, is_test) 11 | 12 | 13 | if __name__ == "__main__": 14 | main() 15 | -------------------------------------------------------------------------------- /qai_hub_models/models/plamo_1b/perf.yaml: -------------------------------------------------------------------------------- 1 | supported_devices: 2 | - Snapdragon 8 Elite QRD 3 | supported_chipsets: 4 | - qualcomm-snapdragon-8-elite 5 | precisions: 6 | w4a16: 7 | components: 8 | PLaMo-1B: 9 | performance_metrics: 10 | Snapdragon 8 Elite QRD: 11 | qnn: 12 | time_to_first_token_range_milliseconds: 13 | min: 31.448 14 | max: 1006.336 15 | tokens_per_second: 68.21 16 | -------------------------------------------------------------------------------- /qai_hub_models/models/posenet_mobilenet/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from .app import PosenetApp as App # noqa: F401 6 | from .model import MODEL_ID # noqa: F401 7 | from .model import PosenetMobilenet as Model # noqa: F401 8 | -------------------------------------------------------------------------------- /qai_hub_models/models/posenet_mobilenet/requirements.txt: -------------------------------------------------------------------------------- 1 | xtcocotools==1.14.3 2 | -------------------------------------------------------------------------------- /qai_hub_models/models/quicksrnetlarge/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.super_resolution.app import ( # noqa: F401 6 | SuperResolutionApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import QuickSRNetLarge as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/quicksrnetlarge/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.super_resolution.demo import super_resolution_demo 6 | from qai_hub_models.models.quicksrnetlarge.model import MODEL_ID, QuickSRNetLarge 7 | 8 | 9 | # Run QuickSRNet end-to-end on a sample image. 10 | # The demo will display an upscaled image 11 | def main(is_test: bool = False): 12 | super_resolution_demo( 13 | model_cls=QuickSRNetLarge, 14 | model_id=MODEL_ID, 15 | is_test=is_test, 16 | ) 17 | 18 | 19 | if __name__ == "__main__": 20 | main() 21 | -------------------------------------------------------------------------------- /qai_hub_models/models/quicksrnetmedium/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.super_resolution.app import ( # noqa: F401 6 | SuperResolutionApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import QuickSRNetMedium as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/quicksrnetmedium/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.super_resolution.demo import super_resolution_demo 6 | from qai_hub_models.models.quicksrnetmedium.model import MODEL_ID, QuickSRNetMedium 7 | 8 | 9 | # Run QuickSRNet end-to-end on a sample image. 10 | # The demo will display an upscaled image 11 | def main(is_test: bool = False): 12 | super_resolution_demo( 13 | model_cls=QuickSRNetMedium, 14 | model_id=MODEL_ID, 15 | is_test=is_test, 16 | ) 17 | 18 | 19 | if __name__ == "__main__": 20 | main() 21 | -------------------------------------------------------------------------------- /qai_hub_models/models/quicksrnetsmall/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.super_resolution.app import ( # noqa: F401 6 | SuperResolutionApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import QuickSRNetSmall as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/quicksrnetsmall/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.super_resolution.demo import super_resolution_demo 6 | from qai_hub_models.models.quicksrnetsmall.model import MODEL_ID, QuickSRNetSmall 7 | 8 | 9 | # Run QuickSRNet end-to-end on a sample image. 10 | # The demo will display an upscaled image 11 | def main(is_test: bool = False): 12 | super_resolution_demo( 13 | model_cls=QuickSRNetSmall, 14 | model_id=MODEL_ID, 15 | is_test=is_test, 16 | ) 17 | 18 | 19 | if __name__ == "__main__": 20 | main() 21 | -------------------------------------------------------------------------------- /qai_hub_models/models/qwen2_7b_instruct/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models.qwen2_7b_instruct.model import MODEL_ID # noqa: F401 6 | from qai_hub_models.models.qwen2_7b_instruct.model import ( # noqa: F401 7 | Qwen2_7B_Instruct as Model, 8 | ) 9 | -------------------------------------------------------------------------------- /qai_hub_models/models/qwen2_7b_instruct/perf.yaml: -------------------------------------------------------------------------------- 1 | supported_devices: 2 | - Snapdragon 8 Elite QRD 3 | supported_chipsets: 4 | - qualcomm-snapdragon-8-elite 5 | precisions: 6 | w4a16: 7 | components: 8 | Qwen2.5-3B-Instruct: 9 | performance_metrics: 10 | Snapdragon 8 Elite QRD: 11 | qnn: 12 | time_to_first_token_range_milliseconds: 13 | min: 170.593 14 | max: 5458.976 15 | tokens_per_second: 13.65 16 | -------------------------------------------------------------------------------- /qai_hub_models/models/real_esrgan_general_x4v3/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.super_resolution.app import ( # noqa: F401 6 | SuperResolutionApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import Real_ESRGAN_General_x4v3 as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/real_esrgan_general_x4v3/requirements.txt: -------------------------------------------------------------------------------- 1 | seaborn==0.11.0 2 | basicsr==1.4.2 3 | -------------------------------------------------------------------------------- /qai_hub_models/models/real_esrgan_x4plus/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.super_resolution.app import ( # noqa: F401 6 | SuperResolutionApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import Real_ESRGAN_x4plus as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/real_esrgan_x4plus/requirements.txt: -------------------------------------------------------------------------------- 1 | seaborn==0.11.0 2 | basicsr==1.4.2 3 | -------------------------------------------------------------------------------- /qai_hub_models/models/regnet/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 6 | ImagenetClassifierApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import RegNet as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/regnet/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo 6 | from qai_hub_models.models.regnet.model import MODEL_ID, RegNet 7 | 8 | 9 | def main(is_test: bool = False): 10 | imagenet_demo(RegNet, MODEL_ID, is_test) 11 | 12 | 13 | if __name__ == "__main__": 14 | main() 15 | -------------------------------------------------------------------------------- /qai_hub_models/models/regnet/model.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from __future__ import annotations 6 | 7 | import torchvision.models as tv_models 8 | 9 | from qai_hub_models.models._shared.imagenet_classifier.model import ( 10 | ImagenetClassifierWithModelBuilder, 11 | ) 12 | 13 | MODEL_ID = __name__.split(".")[-2] 14 | DEFAULT_WEIGHTS = "IMAGENET1K_V1" 15 | MODEL_ASSET_VERSION = 3 16 | 17 | 18 | class RegNet(ImagenetClassifierWithModelBuilder): 19 | model_builder = tv_models.regnet_x_3_2gf 20 | DEFAULT_WEIGHTS = DEFAULT_WEIGHTS 21 | -------------------------------------------------------------------------------- /qai_hub_models/models/resnet101/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 6 | ImagenetClassifierApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import ResNet101 as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/resnet101/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo 6 | from qai_hub_models.models.resnet101.model import MODEL_ID, ResNet101 7 | 8 | 9 | def main(is_test: bool = False): 10 | imagenet_demo(ResNet101, MODEL_ID, is_test) 11 | 12 | 13 | if __name__ == "__main__": 14 | main() 15 | -------------------------------------------------------------------------------- /qai_hub_models/models/resnet101/model.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from __future__ import annotations 6 | 7 | import torchvision.models as tv_models 8 | 9 | from qai_hub_models.models._shared.imagenet_classifier.model import ( 10 | ImagenetClassifierWithModelBuilder, 11 | ) 12 | 13 | MODEL_ID = __name__.split(".")[-2] 14 | DEFAULT_WEIGHTS = "IMAGENET1K_V1" 15 | 16 | 17 | class ResNet101(ImagenetClassifierWithModelBuilder): 18 | model_builder = tv_models.resnet101 19 | DEFAULT_WEIGHTS = DEFAULT_WEIGHTS 20 | -------------------------------------------------------------------------------- /qai_hub_models/models/resnet18/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 6 | ImagenetClassifierApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import ResNet18 as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/resnet18/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo 6 | from qai_hub_models.models.resnet18.model import MODEL_ID, ResNet18 7 | 8 | 9 | def main(is_test: bool = False): 10 | imagenet_demo(ResNet18, MODEL_ID, is_test) 11 | 12 | 13 | if __name__ == "__main__": 14 | main() 15 | -------------------------------------------------------------------------------- /qai_hub_models/models/resnet18/model.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from __future__ import annotations 6 | 7 | import torchvision.models as tv_models 8 | 9 | from qai_hub_models.models._shared.imagenet_classifier.model import ( 10 | ImagenetClassifierWithModelBuilder, 11 | ) 12 | 13 | MODEL_ID = __name__.split(".")[-2] 14 | DEFAULT_WEIGHTS = "IMAGENET1K_V1" 15 | 16 | 17 | class ResNet18(ImagenetClassifierWithModelBuilder): 18 | model_builder = tv_models.resnet18 19 | DEFAULT_WEIGHTS = DEFAULT_WEIGHTS 20 | -------------------------------------------------------------------------------- /qai_hub_models/models/resnet50/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 6 | ImagenetClassifierApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import ResNet50 as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/resnet50/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo 6 | from qai_hub_models.models.resnet50.model import MODEL_ID, ResNet50 7 | 8 | 9 | def main(is_test: bool = False): 10 | imagenet_demo(ResNet50, MODEL_ID, is_test) 11 | 12 | 13 | if __name__ == "__main__": 14 | main() 15 | -------------------------------------------------------------------------------- /qai_hub_models/models/resnet50/model.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from __future__ import annotations 6 | 7 | import torchvision.models as tv_models 8 | 9 | from qai_hub_models.models._shared.imagenet_classifier.model import ( 10 | ImagenetClassifierWithModelBuilder, 11 | ) 12 | 13 | MODEL_ID = __name__.split(".")[-2] 14 | DEFAULT_WEIGHTS = "IMAGENET1K_V1" 15 | 16 | 17 | class ResNet50(ImagenetClassifierWithModelBuilder): 18 | model_builder = tv_models.resnet50 19 | DEFAULT_WEIGHTS = DEFAULT_WEIGHTS 20 | -------------------------------------------------------------------------------- /qai_hub_models/models/resnet_2plus1d/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.video_classifier.app import ( # noqa: F401 6 | KineticsClassifierApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import ResNet2Plus1D as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/resnet_2plus1d/requirements.txt: -------------------------------------------------------------------------------- 1 | av==14.0.1 2 | -------------------------------------------------------------------------------- /qai_hub_models/models/resnet_3d/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.video_classifier.app import ( # noqa: F401 6 | KineticsClassifierApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import ResNet3D as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/resnet_3d/requirements.txt: -------------------------------------------------------------------------------- 1 | av==14.0.1 2 | -------------------------------------------------------------------------------- /qai_hub_models/models/resnet_mixed/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.video_classifier.app import ( # noqa: F401 6 | KineticsClassifierApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import ResNetMixed as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/resnet_mixed/requirements.txt: -------------------------------------------------------------------------------- 1 | av==14.0.1 2 | -------------------------------------------------------------------------------- /qai_hub_models/models/resnext101/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 6 | ImagenetClassifierApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import ResNeXt101 as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/resnext101/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo 6 | from qai_hub_models.models.resnext101.model import MODEL_ID, ResNeXt101 7 | 8 | 9 | def main(is_test: bool = False): 10 | imagenet_demo(ResNeXt101, MODEL_ID, is_test) 11 | 12 | 13 | if __name__ == "__main__": 14 | main() 15 | -------------------------------------------------------------------------------- /qai_hub_models/models/resnext101/model.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from __future__ import annotations 6 | 7 | import torchvision.models as tv_models 8 | 9 | from qai_hub_models.models._shared.imagenet_classifier.model import ( 10 | ImagenetClassifierWithModelBuilder, 11 | ) 12 | 13 | MODEL_ID = __name__.split(".")[-2] 14 | DEFAULT_WEIGHTS = "IMAGENET1K_V1" 15 | 16 | 17 | class ResNeXt101(ImagenetClassifierWithModelBuilder): 18 | model_builder = tv_models.resnext101_32x8d 19 | DEFAULT_WEIGHTS = DEFAULT_WEIGHTS 20 | -------------------------------------------------------------------------------- /qai_hub_models/models/resnext50/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 6 | ImagenetClassifierApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import ResNeXt50 as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/resnext50/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo 6 | from qai_hub_models.models.resnext50.model import MODEL_ID, ResNeXt50 7 | 8 | 9 | def main(is_test: bool = False): 10 | imagenet_demo(ResNeXt50, MODEL_ID, is_test) 11 | 12 | 13 | if __name__ == "__main__": 14 | main() 15 | -------------------------------------------------------------------------------- /qai_hub_models/models/resnext50/model.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from __future__ import annotations 6 | 7 | import torchvision.models as tv_models 8 | 9 | from qai_hub_models.models._shared.imagenet_classifier.model import ImagenetClassifier 10 | 11 | MODEL_ID = __name__.split(".")[-2] 12 | DEFAULT_WEIGHTS = "IMAGENET1K_V2" 13 | 14 | 15 | class ResNeXt50(ImagenetClassifier): 16 | @classmethod 17 | def from_pretrained(cls, weights: str = DEFAULT_WEIGHTS) -> ResNeXt50: 18 | net = tv_models.resnext50_32x4d(weights=weights) 19 | return cls(net) 20 | -------------------------------------------------------------------------------- /qai_hub_models/models/riffusion/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models.riffusion.model import MODEL_ID # noqa: F401 6 | from qai_hub_models.models.riffusion.model import Riffusion as Model # noqa: F401 7 | -------------------------------------------------------------------------------- /qai_hub_models/models/riffusion/requirements.txt: -------------------------------------------------------------------------------- 1 | transformers==4.51.3 2 | diffusers[torch]==0.31.0 3 | -------------------------------------------------------------------------------- /qai_hub_models/models/rtmdet/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from .app import RTMDetApp as App # noqa: F401 6 | from .model import MODEL_ID # noqa: F401 7 | from .model import RTMDet as Model # noqa: F401 8 | -------------------------------------------------------------------------------- /qai_hub_models/models/rtmdet/requirements.txt: -------------------------------------------------------------------------------- 1 | chumpy==0.71 2 | mmdet==3.3.0+mmcv220 3 | mmcv==2.2.0 4 | ultralytics==8.0.193 5 | object-detection-metrics==0.4.post1 6 | fiftyone==1.0.1 7 | torchmetrics==1.4.0.post0 # uses by yolo_seg models 8 | -------------------------------------------------------------------------------- /qai_hub_models/models/rtmpose_body2d/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from .app import RTMPosebody2dApp as App # noqa: F401 6 | from .model import MODEL_ID # noqa: F401 7 | from .model import RTMPosebody2d as Model # noqa: F401 8 | -------------------------------------------------------------------------------- /qai_hub_models/models/rtmpose_body2d/requirements.txt: -------------------------------------------------------------------------------- 1 | chumpy==0.71 2 | mmdet==3.3.0+mmcv220 3 | mmpose==1.2.0 4 | mmcv==2.2.0 5 | matplotlib==3.7.3 6 | -------------------------------------------------------------------------------- /qai_hub_models/models/segformer_base/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.segmentation.app import ( # noqa: F401 6 | SegmentationApp as App, 7 | ) 8 | 9 | from .model import MODEL_ASSET_VERSION, MODEL_ID # noqa: F401 10 | from .model import SegformerBase as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/segformer_base/requirement.txt: -------------------------------------------------------------------------------- 1 | transformers==4.51.3 2 | -------------------------------------------------------------------------------- /qai_hub_models/models/sesr_m5/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.super_resolution.app import ( # noqa: F401 6 | SuperResolutionApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import SESR_M5 as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/sesr_m5/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.super_resolution.demo import super_resolution_demo 6 | from qai_hub_models.models.sesr_m5.model import MODEL_ID, SESR_M5 7 | 8 | 9 | # Run QuickSRNet end-to-end on a sample image. 10 | # The demo will display an upscaled image 11 | def main(is_test: bool = False): 12 | super_resolution_demo( 13 | model_cls=SESR_M5, 14 | model_id=MODEL_ID, 15 | is_test=is_test, 16 | ) 17 | 18 | 19 | if __name__ == "__main__": 20 | main() 21 | -------------------------------------------------------------------------------- /qai_hub_models/models/shufflenet_v2/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 6 | ImagenetClassifierApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import ShufflenetV2 as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/shufflenet_v2/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo 6 | from qai_hub_models.models.shufflenet_v2.model import MODEL_ID, ShufflenetV2 7 | 8 | 9 | def main(is_test: bool = False): 10 | imagenet_demo(ShufflenetV2, MODEL_ID, is_test) 11 | 12 | 13 | if __name__ == "__main__": 14 | main() 15 | -------------------------------------------------------------------------------- /qai_hub_models/models/shufflenet_v2/model.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from __future__ import annotations 6 | 7 | import torchvision.models as tv_models 8 | 9 | from qai_hub_models.models._shared.imagenet_classifier.model import ImagenetClassifier 10 | 11 | MODEL_ID = __name__.split(".")[-2] 12 | DEFAULT_WEIGHTS = "IMAGENET1K_V1" 13 | 14 | 15 | class ShufflenetV2(ImagenetClassifier): 16 | @classmethod 17 | def from_pretrained(cls, weights: str = DEFAULT_WEIGHTS) -> ShufflenetV2: 18 | net = tv_models.shufflenet_v2_x0_5(weights=weights) 19 | return cls(net) 20 | -------------------------------------------------------------------------------- /qai_hub_models/models/sinet/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from .app import SINetApp as App # noqa: F401 6 | from .model import MODEL_ID # noqa: F401 7 | from .model import SINet as Model # noqa: F401 8 | -------------------------------------------------------------------------------- /qai_hub_models/models/squeezenet1_1/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 6 | ImagenetClassifierApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import SqueezeNet as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/squeezenet1_1/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo 6 | from qai_hub_models.models.squeezenet1_1.model import MODEL_ID, SqueezeNet 7 | 8 | 9 | def main(is_test: bool = False): 10 | imagenet_demo(SqueezeNet, MODEL_ID, is_test) 11 | 12 | 13 | if __name__ == "__main__": 14 | main() 15 | -------------------------------------------------------------------------------- /qai_hub_models/models/stable_diffusion_v1_5/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | # isort: off 6 | from qai_hub_models.models.stable_diffusion_v1_5.model import ( # noqa: F401 7 | MODEL_ID, 8 | ) 9 | from qai_hub_models.models.stable_diffusion_v1_5.model import ( # noqa: F401 10 | StableDiffusionV1_5_Quantized as Model, 11 | ) 12 | 13 | # isort: on 14 | -------------------------------------------------------------------------------- /qai_hub_models/models/stable_diffusion_v1_5/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.stable_diffusion.demo import stable_diffusion_demo 6 | from qai_hub_models.models.stable_diffusion_v1_5 import MODEL_ID, Model 7 | 8 | if __name__ == "__main__": 9 | stable_diffusion_demo(MODEL_ID, Model) 10 | -------------------------------------------------------------------------------- /qai_hub_models/models/stable_diffusion_v1_5/quantize.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.stable_diffusion.quantize import ( 6 | stable_diffusion_quantize, 7 | ) 8 | from qai_hub_models.models.stable_diffusion_v1_5 import MODEL_ID, Model 9 | 10 | if __name__ == "__main__": 11 | stable_diffusion_quantize( 12 | model_cls=Model, 13 | model_id=MODEL_ID, 14 | default_num_steps=Model.default_num_steps, 15 | ) 16 | -------------------------------------------------------------------------------- /qai_hub_models/models/stable_diffusion_v1_5/requirements.txt: -------------------------------------------------------------------------------- 1 | aimet-onnx==2.6.0; sys_platform == 'linux' and python_version == "3.10" 2 | transformers==4.51.3 3 | diffusers[torch]==0.31.0 4 | onnxsim<=0.4.36 5 | -------------------------------------------------------------------------------- /qai_hub_models/models/stable_diffusion_v2_1/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | # isort: off 6 | from qai_hub_models.models.stable_diffusion_v2_1.model import ( # noqa: F401 7 | MODEL_ID, 8 | ) 9 | from qai_hub_models.models.stable_diffusion_v2_1.model import ( # noqa: F401 10 | StableDiffusionV2_1_Quantized as Model, 11 | ) 12 | 13 | # isort: on 14 | -------------------------------------------------------------------------------- /qai_hub_models/models/stable_diffusion_v2_1/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.stable_diffusion.demo import stable_diffusion_demo 6 | from qai_hub_models.models.stable_diffusion_v2_1 import MODEL_ID, Model 7 | 8 | if __name__ == "__main__": 9 | stable_diffusion_demo(MODEL_ID, Model) 10 | -------------------------------------------------------------------------------- /qai_hub_models/models/stable_diffusion_v2_1/quantize.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.stable_diffusion.quantize import ( 6 | stable_diffusion_quantize, 7 | ) 8 | from qai_hub_models.models.stable_diffusion_v2_1 import MODEL_ID, Model 9 | 10 | if __name__ == "__main__": 11 | stable_diffusion_quantize( 12 | model_cls=Model, 13 | model_id=MODEL_ID, 14 | default_num_steps=Model.default_num_steps, 15 | ) 16 | -------------------------------------------------------------------------------- /qai_hub_models/models/stable_diffusion_v2_1/requirements.txt: -------------------------------------------------------------------------------- 1 | aimet-onnx==2.6.0; sys_platform == 'linux' and python_version == "3.10" 2 | transformers==4.51.3 3 | diffusers[torch]==0.31.0 4 | onnxsim<=0.4.36 5 | -------------------------------------------------------------------------------- /qai_hub_models/models/swin_base/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 6 | ImagenetClassifierApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import SwinBase as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/swin_base/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo 6 | from qai_hub_models.models.swin_base.model import MODEL_ID, SwinBase 7 | 8 | 9 | def main(is_test: bool = False): 10 | imagenet_demo(SwinBase, MODEL_ID, is_test) 11 | 12 | 13 | if __name__ == "__main__": 14 | main() 15 | -------------------------------------------------------------------------------- /qai_hub_models/models/swin_small/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 6 | ImagenetClassifierApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import SwinSmall as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/swin_small/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo 6 | from qai_hub_models.models.swin_small.model import MODEL_ID, SwinSmall 7 | 8 | 9 | def main(is_test: bool = False): 10 | imagenet_demo(SwinSmall, MODEL_ID, is_test) 11 | 12 | 13 | if __name__ == "__main__": 14 | main() 15 | -------------------------------------------------------------------------------- /qai_hub_models/models/swin_tiny/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 6 | ImagenetClassifierApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import SwinTiny as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/swin_tiny/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo 6 | from qai_hub_models.models.swin_tiny.model import MODEL_ID, SwinTiny 7 | 8 | 9 | def main(is_test: bool = False): 10 | imagenet_demo(SwinTiny, MODEL_ID, is_test) 11 | 12 | 13 | if __name__ == "__main__": 14 | main() 15 | -------------------------------------------------------------------------------- /qai_hub_models/models/trocr/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from .app import TrOCRApp as App # noqa: F401 6 | from .model import MODEL_ID # noqa: F401 7 | from .model import TrOCR as Model # noqa: F401 8 | -------------------------------------------------------------------------------- /qai_hub_models/models/trocr/requirements.txt: -------------------------------------------------------------------------------- 1 | transformers==4.51.3 2 | sentencepiece==0.2.0 3 | -------------------------------------------------------------------------------- /qai_hub_models/models/unet_segmentation/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from .model import MODEL_ID # noqa: F401 6 | from .model import UNet as Model # noqa: F401 7 | -------------------------------------------------------------------------------- /qai_hub_models/models/unet_segmentation/app.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | import torch 6 | 7 | from qai_hub_models.models._shared.segmentation.app import SegmentationApp 8 | 9 | 10 | class UNetSegmentationApp(SegmentationApp): 11 | def normalize_input(self, image: torch.Tensor) -> torch.Tensor: 12 | # Keep as [0, 1] 13 | return image 14 | -------------------------------------------------------------------------------- /qai_hub_models/models/video_mae/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models.video_mae.app import VideoMAEApp as App # noqa: F401 6 | 7 | from .model import MODEL_ID # noqa: F401 8 | from .model import VideoMAE as Model # noqa: F401 9 | -------------------------------------------------------------------------------- /qai_hub_models/models/video_mae/app.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from __future__ import annotations 6 | 7 | import torch 8 | 9 | from qai_hub_models.models._shared.video_classifier.app import KineticsClassifierApp 10 | from qai_hub_models.models._shared.video_classifier.utils import preprocess_video_224 11 | 12 | 13 | class VideoMAEApp(KineticsClassifierApp): 14 | def preprocess_input_tensor(self, tensor: torch.Tensor) -> torch.Tensor: 15 | return preprocess_video_224(tensor) 16 | -------------------------------------------------------------------------------- /qai_hub_models/models/video_mae/requirements.txt: -------------------------------------------------------------------------------- 1 | av==14.0.1 2 | transformers==4.51.3 3 | -------------------------------------------------------------------------------- /qai_hub_models/models/vit/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 6 | ImagenetClassifierApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import VIT as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/vit/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo 6 | from qai_hub_models.models.vit.model import MODEL_ID, VIT 7 | 8 | 9 | def main(is_test: bool = False): 10 | imagenet_demo(VIT, MODEL_ID, is_test) 11 | 12 | 13 | if __name__ == "__main__": 14 | main() 15 | -------------------------------------------------------------------------------- /qai_hub_models/models/vit/model.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from __future__ import annotations 6 | 7 | import torchvision.models as tv_models 8 | 9 | from qai_hub_models.models._shared.imagenet_classifier.model import ImagenetClassifier 10 | 11 | MODEL_ID = __name__.split(".")[-2] 12 | DEFAULT_WEIGHTS = "IMAGENET1K_V1" 13 | 14 | 15 | class VIT(ImagenetClassifier): 16 | @classmethod 17 | def from_pretrained(cls, weights: str = DEFAULT_WEIGHTS) -> VIT: 18 | net = tv_models.vit_b_16(weights=weights) 19 | return cls(net) 20 | -------------------------------------------------------------------------------- /qai_hub_models/models/whisper_base_en/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.whisper.app import WhisperApp as App # noqa: F401 6 | 7 | from .model import MODEL_ID # noqa: F401 8 | from .model import WhisperBaseEn as Model # noqa: F401 9 | -------------------------------------------------------------------------------- /qai_hub_models/models/whisper_base_en/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.whisper.demo import whisper_demo 6 | from qai_hub_models.models.whisper_base_en.model import WhisperBaseEn 7 | 8 | 9 | def main(is_test: bool = False): 10 | whisper_demo(WhisperBaseEn, is_test) 11 | 12 | 13 | if __name__ == "__main__": 14 | main() 15 | -------------------------------------------------------------------------------- /qai_hub_models/models/whisper_base_en/requirements.txt: -------------------------------------------------------------------------------- 1 | openai-whisper==20231117 2 | audio2numpy==0.1.2 3 | samplerate==0.2.1 4 | scipy>=1.8.1,<2 # 1.8.1 is for AIMET 5 | sounddevice==0.5.1 6 | -------------------------------------------------------------------------------- /qai_hub_models/models/whisper_small_en/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.whisper.app import WhisperApp as App # noqa: F401 6 | 7 | from .model import MODEL_ID # noqa: F401 8 | from .model import WhisperSmallEn as Model # noqa: F401 9 | -------------------------------------------------------------------------------- /qai_hub_models/models/whisper_small_en/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.whisper.demo import whisper_demo 6 | from qai_hub_models.models.whisper_small_en.model import WhisperSmallEn 7 | 8 | 9 | def main(is_test: bool = False): 10 | whisper_demo(WhisperSmallEn, is_test) 11 | 12 | 13 | if __name__ == "__main__": 14 | main() 15 | -------------------------------------------------------------------------------- /qai_hub_models/models/whisper_small_en/requirements.txt: -------------------------------------------------------------------------------- 1 | openai-whisper==20231117 2 | audio2numpy==0.1.2 3 | samplerate==0.2.1 4 | scipy>=1.8.1,<2 # 1.8.1 is for AIMET 5 | sounddevice==0.5.1 6 | -------------------------------------------------------------------------------- /qai_hub_models/models/whisper_small_v2/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.hf_whisper.app import ( # noqa: F401 6 | HfWhisperApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import WhisperSmallV2 as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/whisper_small_v2/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.hf_whisper.demo import hf_whisper_demo # noqa 6 | from qai_hub_models.models.whisper_small_v2.model import WhisperSmallV2 # noqa 7 | 8 | 9 | def main(is_test: bool = False): 10 | hf_whisper_demo(WhisperSmallV2, is_test) 11 | 12 | 13 | if __name__ == "__main__": 14 | main() 15 | -------------------------------------------------------------------------------- /qai_hub_models/models/whisper_small_v2/requirements.txt: -------------------------------------------------------------------------------- 1 | transformers==4.51.3 2 | audio2numpy==0.1.2 3 | samplerate==0.2.1 4 | -------------------------------------------------------------------------------- /qai_hub_models/models/whisper_small_v2/test.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.hf_whisper.test_utils import ( 6 | run_test_transcribe, 7 | run_test_wrapper_numerics, 8 | ) 9 | from qai_hub_models.models.whisper_small_v2.demo import main as demo_main 10 | from qai_hub_models.models.whisper_small_v2.model import WhisperSmallV2 11 | 12 | 13 | def test_numerics(): 14 | run_test_wrapper_numerics(WhisperSmallV2) 15 | 16 | 17 | def test_transcribe(): 18 | run_test_transcribe(WhisperSmallV2) 19 | 20 | 21 | def test_demo(): 22 | demo_main(is_test=True) 23 | -------------------------------------------------------------------------------- /qai_hub_models/models/whisper_tiny_en/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.whisper.app import WhisperApp as App # noqa: F401 6 | 7 | from .model import MODEL_ID # noqa: F401 8 | from .model import WhisperTinyEn as Model # noqa: F401 9 | -------------------------------------------------------------------------------- /qai_hub_models/models/whisper_tiny_en/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.whisper.demo import whisper_demo 6 | from qai_hub_models.models.whisper_tiny_en.model import WhisperTinyEn 7 | 8 | 9 | def main(is_test: bool = False): 10 | whisper_demo(WhisperTinyEn, is_test) 11 | 12 | 13 | if __name__ == "__main__": 14 | main() 15 | -------------------------------------------------------------------------------- /qai_hub_models/models/whisper_tiny_en/requirements.txt: -------------------------------------------------------------------------------- 1 | openai-whisper==20231117 2 | audio2numpy==0.1.2 3 | samplerate==0.2.1 4 | scipy>=1.8.1,<2 # 1.8.1 is for AIMET 5 | sounddevice==0.5.1 6 | -------------------------------------------------------------------------------- /qai_hub_models/models/wideresnet50/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 6 | ImagenetClassifierApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import WideResNet50 as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/wideresnet50/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo 6 | from qai_hub_models.models.wideresnet50.model import MODEL_ID, WideResNet50 7 | 8 | 9 | def main(is_test: bool = False): 10 | imagenet_demo(WideResNet50, MODEL_ID, is_test) 11 | 12 | 13 | if __name__ == "__main__": 14 | main() 15 | -------------------------------------------------------------------------------- /qai_hub_models/models/wideresnet50/model.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from __future__ import annotations 6 | 7 | import torchvision.models as tv_models 8 | 9 | from qai_hub_models.models._shared.imagenet_classifier.model import ImagenetClassifier 10 | 11 | MODEL_ID = __name__.split(".")[-2] 12 | DEFAULT_WEIGHTS = "IMAGENET1K_V1" 13 | 14 | 15 | class WideResNet50(ImagenetClassifier): 16 | @classmethod 17 | def from_pretrained(cls, weights: str = DEFAULT_WEIGHTS) -> WideResNet50: 18 | net = tv_models.wide_resnet50_2(weights=weights) 19 | return cls(net) 20 | -------------------------------------------------------------------------------- /qai_hub_models/models/xlsr/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.super_resolution.app import ( # noqa: F401 6 | SuperResolutionApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import XLSR as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/xlsr/demo.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.super_resolution.demo import super_resolution_demo 6 | from qai_hub_models.models.xlsr.model import MODEL_ID, XLSR 7 | 8 | 9 | def main(is_test: bool = False): 10 | super_resolution_demo(XLSR, MODEL_ID, is_test=is_test) 11 | 12 | 13 | if __name__ == "__main__": 14 | main() 15 | -------------------------------------------------------------------------------- /qai_hub_models/models/yamnet/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from .app import YamNetApp as App # noqa: F401 6 | from .model import MODEL_ID # noqa: F401 7 | from .model import YamNet as Model # noqa: F401 8 | -------------------------------------------------------------------------------- /qai_hub_models/models/yamnet/requirements.txt: -------------------------------------------------------------------------------- 1 | resampy==0.4.3 2 | torchaudio>=2.1.2,<2.5.0 3 | -------------------------------------------------------------------------------- /qai_hub_models/models/yolov10_det/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from .app import YoloV10DetectionApp as App # noqa: F401 6 | from .model import MODEL_ID # noqa: F401 7 | from .model import YoloV10Detector as Model # noqa: F401 8 | -------------------------------------------------------------------------------- /qai_hub_models/models/yolov10_det/app.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from __future__ import annotations 6 | 7 | import torch 8 | 9 | from qai_hub_models.models._shared.yolo.app import YoloObjectDetectionApp 10 | 11 | 12 | class YoloV10DetectionApp(YoloObjectDetectionApp): 13 | def check_image_size(self, pixel_values: torch.Tensor) -> None: 14 | """ 15 | YoloV10 does not check for spatial dim shapes for input image 16 | """ 17 | pass 18 | -------------------------------------------------------------------------------- /qai_hub_models/models/yolov10_det/requirements.txt: -------------------------------------------------------------------------------- 1 | object-detection-metrics==0.4.post1 2 | seaborn==0.11.0 3 | thop==0.1.1.post2209072238 4 | matplotlib==3.7.5 # Ultralytics does not specify a matplotlib verion, and the latest matplotlib breaks it. So we have to include the requirement manually 5 | ultralytics==8.0.193 6 | shapely==2.0.3 7 | -------------------------------------------------------------------------------- /qai_hub_models/models/yolov11_det/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models.yolov8_det.app import YoloV8DetectionApp as App # noqa: F401 6 | 7 | from .model import MODEL_ID # noqa: F401 8 | from .model import YoloV11Detector as Model # noqa: F401 9 | -------------------------------------------------------------------------------- /qai_hub_models/models/yolov11_det/app.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from __future__ import annotations 6 | 7 | import torch 8 | 9 | from qai_hub_models.models._shared.yolo.app import YoloObjectDetectionApp 10 | 11 | 12 | class YoloV11DetectionApp(YoloObjectDetectionApp): 13 | def check_image_size(self, pixel_values: torch.Tensor) -> None: 14 | """ 15 | YoloV11 does not check for spatial dim shapes for input image 16 | """ 17 | pass 18 | -------------------------------------------------------------------------------- /qai_hub_models/models/yolov11_det/requirements.txt: -------------------------------------------------------------------------------- 1 | object-detection-metrics==0.4.post1 2 | seaborn==0.11.0 3 | thop==0.1.1.post2209072238 4 | matplotlib==3.7.5 # Ultralytics does not specify a matplotlib verion, and the latest matplotlib breaks it. So we have to include the requirement manually 5 | ultralytics==8.0.193 6 | shapely==2.0.3 7 | -------------------------------------------------------------------------------- /qai_hub_models/models/yolov11_seg/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.yolo.app import ( # noqa: F401 6 | YoloSegmentationApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import YoloV11Segmentor as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/yolov11_seg/requirements.txt: -------------------------------------------------------------------------------- 1 | seaborn==0.11.0 2 | thop==0.1.1.post2209072238 3 | matplotlib==3.7.5 # Ultralytics does not specify a matplotlib verion, and the latest matplotlib breaks it. So we have to include the requirement manually 4 | ultralytics==8.0.193 5 | object-detection-metrics==0.4.post1 6 | shapely==2.0.3 7 | fiftyone==1.0.1 8 | torchmetrics==1.4.0.post0 9 | -------------------------------------------------------------------------------- /qai_hub_models/models/yolov3/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models.yolov3.app import YoloV3DetectionApp as App # noqa: F401 6 | 7 | from .model import MODEL_ID # noqa: F401 8 | from .model import YoloV3 as Model # noqa: F401 9 | -------------------------------------------------------------------------------- /qai_hub_models/models/yolov3/requirements.txt: -------------------------------------------------------------------------------- 1 | thop==0.1.1.post2209072238 2 | matplotlib==3.7.5 # Ultralytics does not specify a matplotlib verion, and the latest matplotlib breaks it. So we have to include the requirement manually 3 | ultralytics==8.0.193 4 | -------------------------------------------------------------------------------- /qai_hub_models/models/yolov5/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models.yolov5.app import YoloV5DetectionApp as App # noqa: F401 6 | 7 | from .model import MODEL_ID # noqa: F401 8 | from .model import YoloV5 as Model # noqa: F401 9 | -------------------------------------------------------------------------------- /qai_hub_models/models/yolov5/requirements.txt: -------------------------------------------------------------------------------- 1 | object-detection-metrics==0.4.post1 2 | shapely==2.0.3 3 | matplotlib==3.7.5 # Ultralytics does not specify a matplotlib verion, and the latest matplotlib breaks it. So we have to include the requirement manually 4 | ultralytics==8.0.193 # used by shared YOLO app 5 | -------------------------------------------------------------------------------- /qai_hub_models/models/yolov6/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models.yolov6.app import YoloV6DetectionApp as App # noqa: F401 6 | 7 | from .model import MODEL_ID # noqa: F401 8 | from .model import YoloV6 as Model # noqa: F401 9 | -------------------------------------------------------------------------------- /qai_hub_models/models/yolov6/requirements.txt: -------------------------------------------------------------------------------- 1 | matplotlib==3.7.5 # Ultralytics does not specify a matplotlib verion, and the latest matplotlib breaks it. So we have to include the requirement manually 2 | ultralytics==8.0.193 # used by shared YOLO app 3 | -------------------------------------------------------------------------------- /qai_hub_models/models/yolov7/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models.yolov7.app import YoloV7DetectionApp as App # noqa: F401 6 | 7 | from .model import MODEL_ID # noqa: F401 8 | from .model import YoloV7 as Model # noqa: F401 9 | -------------------------------------------------------------------------------- /qai_hub_models/models/yolov7/requirements.txt: -------------------------------------------------------------------------------- 1 | object-detection-metrics==0.4.post1 2 | seaborn==0.11.0 3 | shapely==2.0.3 4 | fiftyone==1.0.1 5 | matplotlib==3.7.5 # Ultralytics does not specify a matplotlib verion, and the latest matplotlib breaks it. So we have to include the requirement manually 6 | ultralytics==8.0.193 # used by shared YOLO app 7 | -------------------------------------------------------------------------------- /qai_hub_models/models/yolov8_det/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from .app import YoloV8DetectionApp as App # noqa: F401 6 | from .model import MODEL_ID # noqa: F401 7 | from .model import YoloV8Detector as Model # noqa: F401 8 | -------------------------------------------------------------------------------- /qai_hub_models/models/yolov8_det/app.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from __future__ import annotations 6 | 7 | import torch 8 | 9 | from qai_hub_models.models._shared.yolo.app import YoloObjectDetectionApp 10 | 11 | 12 | class YoloV8DetectionApp(YoloObjectDetectionApp): 13 | def check_image_size(self, pixel_values: torch.Tensor) -> None: 14 | """ 15 | YoloV8 does not check for spatial dim shapes for input image 16 | """ 17 | pass 18 | -------------------------------------------------------------------------------- /qai_hub_models/models/yolov8_det/requirements.txt: -------------------------------------------------------------------------------- 1 | object-detection-metrics==0.4.post1 2 | seaborn==0.11.0 3 | thop==0.1.1.post2209072238 4 | matplotlib==3.7.5 # Ultralytics does not specify a matplotlib verion, and the latest matplotlib breaks it. So we have to include the requirement manually 5 | ultralytics==8.0.193 6 | shapely==2.0.3 7 | fiftyone==1.0.1 8 | -------------------------------------------------------------------------------- /qai_hub_models/models/yolov8_seg/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models._shared.yolo.app import ( # noqa: F401 6 | YoloSegmentationApp as App, 7 | ) 8 | 9 | from .model import MODEL_ID # noqa: F401 10 | from .model import YoloV8Segmentor as Model # noqa: F401 11 | -------------------------------------------------------------------------------- /qai_hub_models/models/yolov8_seg/requirements.txt: -------------------------------------------------------------------------------- 1 | seaborn==0.11.0 2 | thop==0.1.1.post2209072238 3 | matplotlib==3.7.5 # Ultralytics does not specify a matplotlib verion, and the latest matplotlib breaks it. So we have to include the requirement manually 4 | ultralytics==8.0.193 5 | object-detection-metrics==0.4.post1 # used by YOLO evaluator 6 | shapely==2.0.3 7 | fiftyone==1.0.1 8 | torchmetrics==1.4.0.post0 9 | -------------------------------------------------------------------------------- /qai_hub_models/models/yolox/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from qai_hub_models.models.yolox.app import YoloXDetectionApp as App # noqa: F401 6 | 7 | from .model import MODEL_ID # noqa: F401 8 | from .model import YoloX as Model # noqa: F401 9 | -------------------------------------------------------------------------------- /qai_hub_models/models/yolox/requirements.txt: -------------------------------------------------------------------------------- 1 | object-detection-metrics==0.4.post1 2 | ultralytics==8.0.193 # used by shared YOLO app 3 | loguru==0.7.3 4 | shapely==2.0.3 5 | -------------------------------------------------------------------------------- /qai_hub_models/requirements-dev.txt: -------------------------------------------------------------------------------- 1 | boto3>=1.34,<1.36 # for fetching internal test assets 2 | botocore>=1.34,<1.36 # for fetching internal test assets 3 | jinja2<3.2 # for code generation 4 | mypy==1.13.0 # for pre-commit 5 | scipy>=1.8.1,<2 # for fetching datasets; TODO, this needs to be removed 6 | pre-commit==4.0.1 7 | pytest>7,<9 8 | pytest-cov>=5,<5.2 9 | pytest-xdist>3,<4 10 | types-pillow==10.2.0.20240213 # for pre-commit 11 | types-tabulate==0.9.0.20240106 # for pre-commit 12 | types-requests==2.31.0.6 # for pre-commit 13 | wheel==0.44.0 # only used during release 14 | packaging>23,<24 # only used during release 15 | adbutils>=2.8.0 # for a script that generates LLM perf numbers 16 | boto3-stubs[s3]==1.36.9 # for pre-commit 17 | keyrings.envvars==1.1.0 # For setting pypi credentials in CI 18 | -------------------------------------------------------------------------------- /qai_hub_models/requirements.txt: -------------------------------------------------------------------------------- 1 | Pillow>10,<12 2 | gdown==4.7.1 3 | gitpython==3.1.42 4 | huggingface_hub>=0.23.1,<1.0 5 | ipython==8.12.3 6 | numpy<2 7 | onnx>=1.16.1 # 1.16.1 is allowed for compatiblity with AIMET-ONNX 8 | onnxruntime>=1.19 # 1.19 is allowed for compatibility with AIMET-ONNX 9 | opencv-python>4,<5 10 | pandas>2,<2.3 11 | prettytable==3.11.0 12 | requests_toolbelt==1.0.0 13 | schema==0.7.5 14 | torch>=2.1,<2.6.0 15 | tabulate==0.9.0 16 | torchvision>=0.16,<0.21 17 | typing-extensions>=4.12.2 18 | tqdm>=4.66 19 | qai_hub>=0.22.0 20 | datasets==2.14.5 21 | ruamel-yaml==0.18.10 22 | filelock>=3.16.1 23 | pydantic>=2,<3 24 | pydantic_yaml==1.4.0 25 | -------------------------------------------------------------------------------- /qai_hub_models/scorecard/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | 6 | from .device import ScorecardDevice # noqa: F401 7 | from .path_compile import ScorecardCompilePath # noqa: F401 8 | from .path_profile import ScorecardProfilePath # noqa: F401 9 | 10 | try: 11 | # Register private devices 12 | from . import device_private # noqa: F401 13 | except ImportError: 14 | pass 15 | -------------------------------------------------------------------------------- /qai_hub_models/scorecard/results/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from .performance_diff import PerformanceDiff # noqa: F401 6 | from .performance_summary import ModelPerfSummary # noqa: F401 7 | from .scorecard_job import CompileScorecardJob, ProfileScorecardJob # noqa: F401 8 | -------------------------------------------------------------------------------- /qai_hub_models/scorecard/results/test/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | -------------------------------------------------------------------------------- /qai_hub_models/test/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | -------------------------------------------------------------------------------- /qai_hub_models/test/test_models/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | -------------------------------------------------------------------------------- /qai_hub_models/test/test_utils/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | -------------------------------------------------------------------------------- /qai_hub_models/test/test_utils/test_version.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | from packaging.version import Version 6 | 7 | from qai_hub_models._version import __version__ 8 | 9 | 10 | def test_version(): 11 | Version(__version__) 12 | -------------------------------------------------------------------------------- /qai_hub_models/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | -------------------------------------------------------------------------------- /qai_hub_models/utils/aimet/__init__.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | -------------------------------------------------------------------------------- /qai_hub_models/utils/default_export_device.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | DEFAULT_EXPORT_DEVICE = "Samsung Galaxy S24 (Family)" 6 | -------------------------------------------------------------------------------- /qai_hub_models/utils/llm_helpers.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # --------------------------------------------------------------------- 5 | 6 | 7 | def get_kv_cache_names(start: int, end: int) -> list[str]: 8 | out_names = [] 9 | for field in {"key", "value"}: 10 | for num in range(start, end): 11 | out_names.append(f"past_{field}_{num}_out") 12 | return out_names 13 | -------------------------------------------------------------------------------- /scripts/ci/gh_askpass.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # --------------------------------------------------------------------- 4 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 5 | # SPDX-License-Identifier: BSD-3-Clause 6 | # --------------------------------------------------------------------- 7 | # A shell script that prints the value of the SUDO_PASSWORD environment variable to stdout. 8 | # The sole purpose of this script is so that we can use it with sudo's --askpass argument in CI. 9 | 10 | if [ -z "${SUDO_PASSWORD}" ]; then 11 | echo "SUDO_PASSWORD is not set" >&2 12 | exit 1 13 | else 14 | echo "${SUDO_PASSWORD}" 15 | fi 16 | -------------------------------------------------------------------------------- /scripts/ci/git-credential-helper.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # --------------------------------------------------------------------- 3 | # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 4 | # SPDX-License-Identifier: BSD-3-Clause 5 | # --------------------------------------------------------------------- 6 | echo username="$GIT_USER" 7 | echo password="$GIT_PASSWORD" 8 | --------------------------------------------------------------------------------