├── aimet_zoo_torch ├── __init__.py ├── srgan │ ├── __init__.py │ └── evaluators │ │ └── __init__.py ├── abpn │ ├── model │ │ ├── __init__.py │ │ └── model_cards │ │ │ ├── __init__.py │ │ │ ├── abpn_28_2x_w8a8.json │ │ │ ├── abpn_28_3x_w8a8.json │ │ │ ├── abpn_28_4x_w8a8.json │ │ │ ├── abpn_32_2x_w8a8.json │ │ │ ├── abpn_32_3x_w8a8.json │ │ │ └── abpn_32_4x_w8a8.json │ ├── evaluators │ │ └── __init__.py │ └── __init__.py ├── bert │ ├── model │ │ ├── __init__.py │ │ ├── utils │ │ │ └── __init__.py │ │ ├── model_cards │ │ │ └── __init__.py │ │ └── baseline_models │ │ │ ├── __init__.py │ │ │ └── bert │ │ │ └── __init__.py │ ├── evaluators │ │ └── __init__.py │ ├── dataloader │ │ ├── utils │ │ │ └── __init__.py │ │ └── __init__.py │ └── __init__.py ├── common │ ├── __init__.py │ ├── utils │ │ ├── __init__.py │ │ └── utils.py │ └── super_resolution │ │ └── __init__.py ├── gpt2 │ ├── model │ │ ├── __init__.py │ │ ├── huggingface │ │ │ ├── __init__.py │ │ │ ├── baseline_models │ │ │ │ ├── __init__.py │ │ │ │ └── gpt2 │ │ │ │ │ └── __init__.py │ │ │ └── elementwise_ops.py │ │ └── model_cards │ │ │ └── __init__.py │ ├── evaluators │ │ └── __init__.py │ ├── __init__.py │ └── dataloader │ │ └── __init__.py ├── salsanext │ ├── __init__.py │ ├── evaluators │ │ └── __init__.py │ └── models │ │ ├── __init__.py │ │ ├── common │ │ ├── __init__.py │ │ └── sync_batchnorm │ │ │ └── __init__.py │ │ ├── tasks │ │ ├── __init__.py │ │ └── semantic │ │ │ ├── modules │ │ │ └── __init__.py │ │ │ ├── dataset │ │ │ └── kitti │ │ │ │ └── __init__.py │ │ │ ├── __init__.py │ │ │ └── postproc │ │ │ └── __init__.py │ │ └── model_cards │ │ ├── salsanext_w8a8.json │ │ └── salsanext_w4a8.json ├── sesr │ ├── model │ │ ├── __init__.py │ │ └── model_cards │ │ │ └── __init__.py │ ├── evaluators │ │ └── __init__.py │ └── __init__.py ├── ssd_res50 │ ├── __init__.py │ ├── model │ │ ├── __init__.py │ │ └── model_cards │ │ │ ├── __init__.py │ │ │ └── ssd_res50_w8a8.json │ ├── dataloader │ │ └── __init__.py │ └── evaluators │ │ └── __init__.py ├── vit │ ├── model │ │ ├── __init__.py │ │ ├── huggingface │ │ │ ├── __init__.py │ │ │ ├── baseline_models │ │ │ │ ├── __init__.py │ │ │ │ └── vit │ │ │ │ │ └── __init__.py │ │ │ └── elementwise_ops.py │ │ └── model_cards │ │ │ └── __init__.py │ ├── evaluators │ │ └── __init__.py │ ├── dataloader │ │ ├── utils │ │ │ └── __init__.py │ │ └── __init__.py │ └── __init__.py ├── xlsr │ ├── model │ │ ├── __init__.py │ │ └── model_cards │ │ │ ├── __init__.py │ │ │ ├── xlsr_2x_w8a8.json │ │ │ ├── xlsr_3x_w8a8.json │ │ │ └── xlsr_4x_w8a8.json │ ├── evaluators │ │ └── __init__.py │ └── __init__.py ├── deeplabv3 │ ├── model │ │ ├── __init__.py │ │ ├── modeling │ │ │ ├── __init__.py │ │ │ └── sync_batchnorm │ │ │ │ ├── __init__.py │ │ │ │ └── unittest.py │ │ ├── utils │ │ │ └── __init__.py │ │ ├── model_cards │ │ │ ├── __init__.py │ │ │ ├── dlv3_w4a8.json │ │ │ └── dlv3_w8a8.json │ │ ├── dataloaders │ │ │ └── datasets │ │ │ │ └── __init__.py │ │ └── LICENSE │ ├── evaluators │ │ └── __init__.py │ ├── __init__.py │ └── dataloader │ │ └── __init__.py ├── deepspeech2 │ ├── __init__.py │ └── evaluators │ │ ├── __init__.py │ │ └── requirements.txt ├── gpunet0 │ ├── model │ │ ├── __init__.py │ │ ├── src │ │ │ ├── __init__.py │ │ │ ├── configs │ │ │ │ ├── __init__.py │ │ │ │ └── batch1 │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── GV100 │ │ │ │ │ └── __init__.py │ │ │ └── models │ │ │ │ └── __init__.py │ │ └── model_cards │ │ │ ├── __init__.py │ │ │ └── gpunet0_w8a8.json │ ├── evaluator │ │ └── __init__.py │ ├── __init__.py │ └── requirements.txt ├── minilm │ ├── model │ │ ├── __init__.py │ │ ├── utils │ │ │ └── __init__.py │ │ ├── model_cards │ │ │ └── __init__.py │ │ └── baseline_models │ │ │ ├── __init__.py │ │ │ └── bert │ │ │ └── __init__.py │ ├── evaluators │ │ └── __init__.py │ ├── dataloader │ │ ├── utils │ │ │ └── __init__.py │ │ └── __init__.py │ └── __init__.py ├── mobilevit │ ├── model │ │ ├── __init__.py │ │ ├── huggingface │ │ │ ├── __init__.py │ │ │ ├── baseline_models │ │ │ │ ├── __init__.py │ │ │ │ └── mobilevit │ │ │ │ │ └── __init__.py │ │ │ └── elementwise_ops.py │ │ └── model_cards │ │ │ └── __init__.py │ ├── evaluators │ │ └── __init__.py │ ├── dataloader │ │ ├── utils │ │ │ └── __init__.py │ │ └── __init__.py │ └── __init__.py ├── poseestimation │ ├── __init__.py │ └── evaluators │ │ └── __init__.py ├── rangenet │ ├── models │ │ ├── __init__.py │ │ ├── train │ │ │ ├── __init__.py │ │ │ ├── common │ │ │ │ └── __init__.py │ │ │ ├── tasks │ │ │ │ ├── __init__.py │ │ │ │ └── semantic │ │ │ │ │ ├── config │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── arch │ │ │ │ │ │ └── __init__.py │ │ │ │ │ └── labels │ │ │ │ │ │ └── __init__.py │ │ │ │ │ ├── dataset │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── kitti │ │ │ │ │ │ └── __init__.py │ │ │ │ │ ├── decoders │ │ │ │ │ └── __init__.py │ │ │ │ │ ├── modules │ │ │ │ │ └── __init__.py │ │ │ │ │ ├── postproc │ │ │ │ │ └── __init__.py │ │ │ │ │ └── __init__.py │ │ │ ├── backbones │ │ │ │ └── __init__.py │ │ │ └── requirements.txt │ │ └── model_cards │ │ │ ├── __init__.py │ │ │ ├── rangenet_w4a8.json │ │ │ └── rangenet_w8a8.json │ ├── __init__.py │ ├── evaluators │ │ └── __init__.py │ └── requirements.txt ├── regnet │ ├── model │ │ ├── __init__.py │ │ └── model_cards │ │ │ ├── __init__.py │ │ │ ├── regnet_x_3_2gf_w4a8.json │ │ │ └── regnet_x_3_2gf_w8a8.json │ ├── dataloader │ │ └── __init__.py │ ├── evaluator │ │ └── __init__.py │ └── __init__.py ├── resnet │ ├── model │ │ ├── __init__.py │ │ └── model_cards │ │ │ ├── __init__.py │ │ │ ├── resnet101_w8a8.json │ │ │ ├── resnet18_w4a8.json │ │ │ ├── resnet18_w8a8.json │ │ │ ├── resnet50_w4a8.json │ │ │ ├── resnet50_w8a8.json │ │ │ └── resnet50_w8a16.json │ ├── dataloader │ │ └── __init__.py │ ├── evaluator │ │ └── __init__.py │ └── __init__.py ├── resnext │ ├── model │ │ ├── __init__.py │ │ └── model_cards │ │ │ ├── __init__.py │ │ │ └── resnext101_w8a8.json │ ├── dataloader │ │ ├── __init__.py │ │ └── dataloaders_and_eval_func.py │ ├── evaluator │ │ └── __init__.py │ └── __init__.py ├── roberta │ ├── model │ │ ├── __init__.py │ │ ├── utils │ │ │ └── __init__.py │ │ ├── model_cards │ │ │ └── __init__.py │ │ └── baseline_models │ │ │ ├── __init__.py │ │ │ └── roberta │ │ │ └── __init__.py │ ├── evaluators │ │ └── __init__.py │ ├── dataloader │ │ ├── utils │ │ │ └── __init__.py │ │ └── __init__.py │ └── __init__.py ├── segnet │ ├── model │ │ ├── __init__.py │ │ └── model_cards │ │ │ ├── __init__.py │ │ │ ├── segnet_w8a8.json │ │ │ └── segnet_w4a8.json │ ├── dataloader │ │ └── __init__.py │ ├── evaluator │ │ └── __init__.py │ ├── requirements.txt │ └── __init__.py ├── distilbert │ ├── model │ │ ├── __init__.py │ │ ├── utils │ │ │ └── __init__.py │ │ ├── model_cards │ │ │ └── __init__.py │ │ └── baseline_models │ │ │ ├── __init__.py │ │ │ └── distilbert │ │ │ └── __init__.py │ ├── evaluators │ │ └── __init__.py │ ├── dataloader │ │ ├── utils │ │ │ └── __init__.py │ │ └── __init__.py │ └── __init__.py ├── ffnet │ ├── evaluators │ │ └── __init__.py │ ├── model │ │ ├── model_cards │ │ │ ├── __init__.py │ │ │ ├── segmentation_ffnet40S_dBBB_mobile.json │ │ │ ├── segmentation_ffnet54S_dBBB_mobile.json │ │ │ ├── segmentation_ffnet78S_dBBB_mobile.json │ │ │ └── segmentation_ffnet78S_BCC_mobile_pre_down.json │ │ ├── __init__.py │ │ ├── config.py │ │ ├── model_registry.py │ │ └── utils.py │ ├── dataloader │ │ ├── cityscapes │ │ │ ├── __init__.py │ │ │ ├── utils │ │ │ │ ├── __init__.py │ │ │ │ └── progress_bar.py │ │ │ ├── dataloader │ │ │ │ └── __init__.py │ │ │ ├── attribution.txt │ │ │ └── cityscapes.py │ │ └── __init__.py │ └── __init__.py ├── hrnet_posenet │ ├── models │ │ ├── __init__.py │ │ ├── core │ │ │ └── __init__.py │ │ ├── nms │ │ │ ├── __init__.py │ │ │ ├── gpu_nms.hpp │ │ │ ├── cpu_nms.cpython-38-x86_64-linux-gnu.so │ │ │ ├── gpu_nms.cpython-38-x86_64-linux-gnu.so │ │ │ └── gpu_nms.pyx │ │ ├── utils │ │ │ └── __init__.py │ │ ├── model_cards │ │ │ ├── __init__.py │ │ │ ├── hrnet_posenet_w8a8.json │ │ │ └── hrnet_posenet_w4a8.json │ │ ├── config │ │ │ └── __init__.py │ │ ├── models │ │ │ └── __init__.py │ │ └── dataset │ │ │ └── __init__.py │ ├── dataloader │ │ └── __init__.py │ ├── evaluators │ │ ├── __init__.py │ │ └── _init_paths.py │ └── __init__.py ├── inverseform │ ├── model │ │ ├── __init__.py │ │ ├── utils │ │ │ ├── __init__.py │ │ │ └── progress_bar.py │ │ ├── model_cards │ │ │ ├── __init__.py │ │ │ ├── ocrnet_48_if.json │ │ │ └── hrnet_16_slim_if.json │ │ └── models │ │ │ ├── loss │ │ │ └── __init__.py │ │ │ ├── bn_helper.py │ │ │ └── model_loader.py │ ├── evaluators │ │ └── __init__.py │ ├── dataloader │ │ ├── data │ │ │ ├── __init__.py │ │ │ └── cityscapes.py │ │ ├── datasets │ │ │ └── __init__.py │ │ ├── transforms │ │ │ └── __init__.py │ │ └── __init__.py │ └── __init__.py ├── mobilebert │ ├── model │ │ ├── __init__.py │ │ ├── utils │ │ │ └── __init__.py │ │ ├── model_cards │ │ │ └── __init__.py │ │ └── baseline_models │ │ │ ├── __init__.py │ │ │ └── mobilebert │ │ │ └── __init__.py │ ├── evaluators │ │ └── __init__.py │ ├── dataloader │ │ ├── utils │ │ │ └── __init__.py │ │ └── __init__.py │ └── __init__.py ├── mobilenetv2 │ ├── model │ │ ├── __init__.py │ │ └── model_cards │ │ │ ├── __init__.py │ │ │ └── mobilenetv2_w8a8.json │ ├── evaluators │ │ └── __init__.py │ ├── __init__.py │ └── dataloader │ │ └── __init__.py ├── quicksrnet │ ├── model │ │ ├── __init__.py │ │ └── model_cards │ │ │ └── __init__.py │ ├── dataloader │ │ └── __init__.py │ ├── evaluators │ │ └── __init__.py │ └── __init__.py ├── yolox │ ├── evaluators │ │ └── __init__.py │ ├── model │ │ ├── yolo_x │ │ │ ├── __init__.py │ │ │ ├── utils │ │ │ │ └── __init__.py │ │ │ └── models │ │ │ │ └── __init__.py │ │ ├── model_cards │ │ │ ├── __init__.py │ │ │ ├── yolox_l.json │ │ │ └── yolox_s.json │ │ ├── __init__.py │ │ └── yolox_model.py │ ├── dataloader │ │ ├── data │ │ │ ├── datasets │ │ │ │ └── __init__.py │ │ │ └── __init__.py │ │ ├── __init__.py │ │ └── dataloaders.py │ └── __init__.py ├── efficientnetlite0 │ ├── model │ │ ├── __init__.py │ │ └── model_cards │ │ │ ├── __init__.py │ │ │ ├── efficientnetlite0_w4a8.json │ │ │ └── efficientnetlite0_w8a8.json │ ├── evaluators │ │ └── __init__.py │ ├── __init__.py │ └── dataloader │ │ └── __init__.py ├── ssd_mobilenetv2 │ ├── model │ │ ├── __init__.py │ │ ├── vision │ │ │ ├── __init__.py │ │ │ ├── nn │ │ │ │ └── __init__.py │ │ │ ├── ssd │ │ │ │ ├── __init__.py │ │ │ │ └── config │ │ │ │ │ └── __init__.py │ │ │ ├── prunning │ │ │ │ └── __init__.py │ │ │ ├── transforms │ │ │ │ └── __init__.py │ │ │ └── utils │ │ │ │ └── __init__.py │ │ └── model_cards │ │ │ ├── __init__.py │ │ │ └── ssd_mobilenetv2_w8a8.json │ ├── dataloader │ │ ├── __init__.py │ │ └── datasets │ │ │ └── __init__.py │ ├── evaluators │ │ ├── __init__.py │ │ └── voc-model-labels.txt │ └── __init__.py ├── uniformer_classification │ ├── model │ │ ├── __init__.py │ │ ├── model_cards │ │ │ ├── __init__.py │ │ │ └── uniformer_classification_w8a8.json │ │ └── image_classification │ │ │ ├── models │ │ │ └── __init__.py │ │ │ ├── elementwise_ops.py │ │ │ ├── no_scaling_scaler.py │ │ │ └── generate_tensorboard.py │ ├── evaluators │ │ └── __init__.py │ ├── requirements.txt │ ├── __init__.py │ └── dataloader │ │ └── __init__.py ├── hrnet_image_classification │ ├── model │ │ ├── __init__.py │ │ ├── lib │ │ │ ├── __init__.py │ │ │ ├── core │ │ │ │ ├── __init__.py │ │ │ │ └── evaluate.py │ │ │ ├── utils │ │ │ │ └── __init__.py │ │ │ ├── config │ │ │ │ └── __init__.py │ │ │ └── models │ │ │ │ └── __init__.py │ │ ├── experiments │ │ │ └── __init__.py │ │ └── model_cards │ │ │ ├── __init__.py │ │ │ └── hrnet_w32_w8a8.json │ ├── dataloader │ │ ├── __init__.py │ │ └── list │ │ │ ├── __init__.py │ │ │ ├── lip │ │ │ └── __init__.py │ │ │ └── cityscapes │ │ │ └── __init__.py │ ├── evaluators │ │ └── __init__.py │ └── __init__.py ├── hrnet_semantic_segmentation │ ├── model │ │ ├── __init__.py │ │ ├── core │ │ │ └── __init__.py │ │ ├── utils │ │ │ ├── __init__.py │ │ │ └── distributed.py │ │ ├── model_cards │ │ │ ├── __init__.py │ │ │ ├── hrnet_sem_seg_w4a8.json │ │ │ └── hrnet_sem_seg_w8a8.json │ │ ├── models │ │ │ ├── sync_bn │ │ │ │ ├── inplace_abn │ │ │ │ │ ├── src │ │ │ │ │ │ └── __init__.py │ │ │ │ │ └── __init__.py │ │ │ │ └── __init__.py │ │ │ └── __init__.py │ │ ├── datasets │ │ │ └── __init__.py │ │ └── config │ │ │ └── __init__.py │ ├── dataloader │ │ ├── __init__.py │ │ └── list │ │ │ ├── __init__.py │ │ │ ├── lip │ │ │ └── __init__.py │ │ │ └── cityscapes │ │ │ └── __init__.py │ ├── evaluators │ │ ├── __init__.py │ │ └── experiments │ │ │ ├── __init__.py │ │ │ ├── lip │ │ │ └── __init__.py │ │ │ ├── cityscapes │ │ │ └── __init__.py │ │ │ └── pascal_ctx │ │ │ └── __init__.py │ └── __init__.py └── mmaction2 │ ├── evaluators │ ├── __init__.py │ └── metrics │ │ └── __init__.py │ ├── model │ ├── configs │ │ ├── __init__.py │ │ ├── _base_ │ │ │ ├── models │ │ │ │ ├── bmn_400x100.py │ │ │ │ └── __init__.py │ │ │ ├── __init__.py │ │ │ └── default_runtime.py │ │ └── localization │ │ │ ├── __init__.py │ │ │ └── bmn │ │ │ └── __init__.py │ ├── __init__.py │ ├── base_model │ │ └── __init__.py │ └── model_cards │ │ └── bmn_w8a8.json │ ├── runner │ └── __init__.py │ └── __init__.py ├── aimet_zoo_tensorflow ├── __init__.py ├── srgan │ ├── __init__.py │ └── evaluators │ │ └── __init__.py ├── common │ ├── __init__.py │ ├── utils │ │ └── __init__.py │ └── object_detection │ │ └── __init__.py ├── resnet50 │ ├── __init__.py │ └── evaluators │ │ └── __init__.py ├── retinanet │ ├── __init__.py │ └── evaluators │ │ └── __init__.py ├── efficientnet │ ├── __init__.py │ └── evaluators │ │ └── __init__.py ├── mobilenet_v2 │ ├── __init__.py │ └── evaluators │ │ └── __init__.py ├── pose_estimation │ ├── __init__.py │ └── evaluators │ │ └── __init__.py ├── resnet50_tf2 │ ├── model │ │ ├── __init__.py │ │ └── model_cards │ │ │ └── resnet50_w8a8.json │ ├── evaluators │ │ └── __init__.py │ ├── requirements.txt │ └── __init__.py ├── mobiledetedgetpu │ ├── model │ │ ├── __init__.py │ │ └── model_cards │ │ │ └── __init__.py │ ├── dataloader │ │ └── __init__.py │ ├── evaluators │ │ └── __init__.py │ └── __init__.py ├── mobilenet_v2_tf2 │ ├── model │ │ ├── __init__.py │ │ └── model_cards │ │ │ └── mobilenetv2_w8a8.json │ ├── evaluators │ │ └── __init__.py │ ├── requirements.txt │ └── __init__.py ├── mobilenetedgetpu │ ├── model │ │ ├── __init__.py │ │ └── model_cards │ │ │ ├── __init__.py │ │ │ └── mobilenetedgetpu_w8a8.json │ ├── dataloader │ │ └── __init__.py │ ├── evaluators │ │ └── __init__.py │ └── __init__.py ├── ssd_mobilenet_v2 │ ├── model │ │ ├── __init__.py │ │ └── model_cards │ │ │ ├── __init__.py │ │ │ └── ssd_mobilenetv2_w8a8.json │ ├── dataloader │ │ └── __init__.py │ ├── evaluators │ │ └── __init__.py │ └── __init__.py └── deeplabv3plus_tf2 │ ├── requirements.txt │ ├── dataloader │ ├── utils.py │ └── dataloader.py │ ├── model │ ├── nets │ │ ├── Xception.py │ │ ├── deeplab.py │ │ └── mobilenet.py │ └── model_cards │ │ ├── deeplabv3plus_xception_w8a8.json │ │ └── deeplabv3plus_mbnv2_w8a8.json │ └── evaluators │ └── utils_metrics.py ├── packaging ├── version.txt ├── aimet_zoo_torch_pyproject.toml └── aimet_zoo_tensorflow_pyproject.toml ├── LICENSE.pdf ├── NOTICE.txt ├── .omniscanignore ├── images └── logo-quic-on@h68.png ├── Jenkins ├── opencv_320_python38.patch ├── jenkins_threshold_configs.json └── Dockerfile.tf-torch-cpu ├── AcceptanceTests ├── torch │ ├── voc-model-labels.txt │ ├── pytest.ini │ ├── test_yolox_quanteval.py │ ├── staging │ │ ├── test_hrnet_posenet_quanteval.py │ │ ├── test_ssd_res50_quanteval.py │ │ ├── test_resnext_quanteval.py │ │ └── test_uniformer_classification_quanteval.py │ ├── test_regnet_quanteval.py │ ├── test_gpunet0_quanteval.py │ ├── test_resnet_quanteval.py │ ├── test_mobilenetv2_quanteval.py │ ├── test_vit_quanteval.py │ └── test_efficientnetlite0_quanteval.py ├── CMakeLists.txt └── tensorflow │ ├── pytest.ini │ ├── staging │ ├── test_deeplabv3plus_xception_tf2_quanteval.py │ └── test_mobilenet_edgetpu_quanteval.py │ ├── test_resnet50_tf2_quanteval.py │ └── test_mobilenet_v2_tf2_quanteval.py └── .gitignore /aimet_zoo_torch/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_tensorflow/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/srgan/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /packaging/version.txt: -------------------------------------------------------------------------------- 1 | 1.5.0 2 | -------------------------------------------------------------------------------- /aimet_zoo_tensorflow/srgan/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/abpn/model/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/bert/model/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/common/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/gpt2/model/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/salsanext/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/sesr/model/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/ssd_res50/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/vit/model/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/xlsr/model/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_tensorflow/common/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_tensorflow/resnet50/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_tensorflow/retinanet/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/abpn/evaluators/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/bert/evaluators/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/common/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/deeplabv3/model/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/deepspeech2/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/gpt2/evaluators/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/gpunet0/model/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/minilm/model/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/mobilevit/model/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/poseestimation/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/rangenet/models/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/regnet/model/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/resnet/model/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/resnext/model/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/roberta/model/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/segnet/model/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/sesr/evaluators/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/ssd_res50/model/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/vit/evaluators/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/xlsr/evaluators/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_tensorflow/common/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_tensorflow/efficientnet/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_tensorflow/mobilenet_v2/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_tensorflow/pose_estimation/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/bert/model/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/deeplabv3/evaluators/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/distilbert/model/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/ffnet/evaluators/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/gpunet0/evaluator/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/gpunet0/model/src/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_posenet/models/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/inverseform/model/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/minilm/evaluators/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/minilm/model/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/mobilebert/model/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/mobilenetv2/model/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/mobilevit/evaluators/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/quicksrnet/model/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/regnet/dataloader/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/regnet/evaluator/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/resnet/dataloader/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/resnet/evaluator/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/resnext/dataloader/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/resnext/evaluator/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/roberta/evaluators/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/roberta/model/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/salsanext/evaluators/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/salsanext/models/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/segnet/dataloader/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/segnet/evaluator/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/srgan/evaluators/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/ssd_res50/dataloader/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/ssd_res50/evaluators/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/vit/dataloader/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/yolox/evaluators/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/yolox/model/yolo_x/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_tensorflow/resnet50/evaluators/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_tensorflow/resnet50_tf2/model/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_tensorflow/retinanet/evaluators/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_tensorflow/srgan/evaluators/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/abpn/model/model_cards/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/bert/dataloader/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/bert/model/model_cards/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/common/super_resolution/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/deeplabv3/model/modeling/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/deeplabv3/model/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/deepspeech2/evaluators/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/distilbert/evaluators/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/distilbert/model/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/efficientnetlite0/model/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/ffnet/model/model_cards/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/gpt2/model/huggingface/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/gpt2/model/model_cards/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/gpunet0/model/model_cards/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/gpunet0/model/src/configs/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/gpunet0/model/src/models/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_posenet/dataloader/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_posenet/evaluators/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_posenet/models/core/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_posenet/models/nms/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/inverseform/evaluators/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/inverseform/model/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/minilm/dataloader/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/minilm/model/model_cards/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/mobilebert/evaluators/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/mobilebert/model/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/mobilenetv2/evaluators/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/poseestimation/evaluators/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/quicksrnet/dataloader/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/quicksrnet/evaluators/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/rangenet/models/train/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/regnet/model/model_cards/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/resnet/model/model_cards/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/resnext/model/model_cards/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/roberta/dataloader/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/roberta/model/model_cards/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/salsanext/models/common/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/salsanext/models/tasks/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/segnet/model/model_cards/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/sesr/model/model_cards/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/ssd_mobilenetv2/model/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/vit/model/huggingface/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/vit/model/model_cards/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/xlsr/model/model_cards/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/yolox/model/model_cards/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_tensorflow/common/object_detection/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_tensorflow/efficientnet/evaluators/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_tensorflow/mobiledetedgetpu/model/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_tensorflow/mobilenet_v2/evaluators/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_tensorflow/mobilenet_v2_tf2/model/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_tensorflow/mobilenetedgetpu/model/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_tensorflow/resnet50_tf2/evaluators/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_tensorflow/ssd_mobilenet_v2/model/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/bert/model/baseline_models/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/deeplabv3/model/model_cards/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/distilbert/dataloader/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/distilbert/model/model_cards/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/efficientnetlite0/evaluators/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/ffnet/dataloader/cityscapes/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_posenet/models/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/inverseform/dataloader/data/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/inverseform/model/model_cards/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/inverseform/model/models/loss/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/minilm/model/baseline_models/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/mobilebert/dataloader/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/mobilebert/model/model_cards/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/mobilenetv2/model/model_cards/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/mobilevit/dataloader/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/mobilevit/model/huggingface/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/mobilevit/model/model_cards/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/quicksrnet/model/model_cards/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/rangenet/models/model_cards/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/rangenet/models/train/common/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/rangenet/models/train/tasks/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/roberta/model/baseline_models/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/ssd_mobilenetv2/dataloader/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/ssd_mobilenetv2/evaluators/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/ssd_mobilenetv2/model/vision/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/ssd_res50/model/model_cards/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/uniformer_classification/model/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_tensorflow/mobiledetedgetpu/dataloader/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_tensorflow/mobiledetedgetpu/evaluators/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_tensorflow/mobilenet_v2_tf2/evaluators/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_tensorflow/mobilenetedgetpu/dataloader/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_tensorflow/mobilenetedgetpu/evaluators/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_tensorflow/pose_estimation/evaluators/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_tensorflow/ssd_mobilenet_v2/dataloader/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_tensorflow/ssd_mobilenet_v2/evaluators/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/bert/model/baseline_models/bert/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/distilbert/model/baseline_models/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/efficientnetlite0/model/model_cards/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/ffnet/dataloader/cityscapes/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/gpunet0/model/src/configs/batch1/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_image_classification/model/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_posenet/models/model_cards/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_semantic_segmentation/model/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/inverseform/dataloader/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/inverseform/dataloader/transforms/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/minilm/model/baseline_models/bert/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/mobilebert/model/baseline_models/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/rangenet/models/train/backbones/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/ssd_mobilenetv2/dataloader/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/ssd_mobilenetv2/model/model_cards/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/ssd_mobilenetv2/model/vision/nn/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/ssd_mobilenetv2/model/vision/ssd/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/uniformer_classification/evaluators/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_tensorflow/mobiledetedgetpu/model/model_cards/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_tensorflow/mobilenetedgetpu/model/model_cards/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_tensorflow/resnet50_tf2/requirements.txt: -------------------------------------------------------------------------------- 1 | numpy==1.19.5 2 | -------------------------------------------------------------------------------- /aimet_zoo_tensorflow/ssd_mobilenet_v2/model/model_cards/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/deeplabv3/model/dataloaders/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/ffnet/dataloader/cityscapes/dataloader/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/gpt2/model/huggingface/baseline_models/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/gpunet0/model/src/configs/batch1/GV100/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_image_classification/dataloader/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_image_classification/evaluators/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_image_classification/model/lib/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_semantic_segmentation/dataloader/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_semantic_segmentation/evaluators/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_semantic_segmentation/model/core/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_semantic_segmentation/model/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/roberta/model/baseline_models/roberta/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/salsanext/models/common/sync_batchnorm/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/salsanext/models/tasks/semantic/modules/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/ssd_mobilenetv2/model/vision/prunning/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/ssd_mobilenetv2/model/vision/ssd/config/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/ssd_mobilenetv2/model/vision/transforms/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/vit/model/huggingface/baseline_models/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_tensorflow/mobilenet_v2_tf2/requirements.txt: -------------------------------------------------------------------------------- 1 | numpy==1.19.5 2 | -------------------------------------------------------------------------------- /aimet_zoo_torch/distilbert/model/baseline_models/distilbert/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/gpt2/model/huggingface/baseline_models/gpt2/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_image_classification/dataloader/list/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_image_classification/model/experiments/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_image_classification/model/lib/core/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_image_classification/model/lib/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_image_classification/model/model_cards/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_semantic_segmentation/dataloader/list/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_semantic_segmentation/model/model_cards/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/mobilebert/model/baseline_models/mobilebert/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/mobilevit/model/huggingface/baseline_models/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/rangenet/models/train/tasks/semantic/config/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/rangenet/models/train/tasks/semantic/dataset/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/rangenet/models/train/tasks/semantic/decoders/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/rangenet/models/train/tasks/semantic/modules/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/salsanext/models/tasks/semantic/dataset/kitti/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/segnet/requirements.txt: -------------------------------------------------------------------------------- 1 | scikit-image 2 | torchmetrics 3 | -------------------------------------------------------------------------------- /aimet_zoo_torch/uniformer_classification/model/model_cards/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/vit/model/huggingface/baseline_models/vit/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /LICENSE.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quic/aimet-model-zoo/HEAD/LICENSE.pdf -------------------------------------------------------------------------------- /NOTICE.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quic/aimet-model-zoo/HEAD/NOTICE.txt -------------------------------------------------------------------------------- /aimet_zoo_tensorflow/deeplabv3plus_tf2/requirements.txt: -------------------------------------------------------------------------------- 1 | matplotlib==3.2.1 2 | -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_image_classification/dataloader/list/lip/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_semantic_segmentation/dataloader/list/lip/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_semantic_segmentation/evaluators/experiments/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/rangenet/models/train/tasks/semantic/config/arch/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/rangenet/models/train/tasks/semantic/config/labels/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/rangenet/models/train/tasks/semantic/dataset/kitti/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.omniscanignore: -------------------------------------------------------------------------------- 1 | .githooks 2 | .gitignore 3 | .pylintrc 4 | 5 | LICENSE.pdf 6 | -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_image_classification/dataloader/list/cityscapes/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_semantic_segmentation/dataloader/list/cityscapes/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_semantic_segmentation/evaluators/experiments/lip/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/mobilevit/model/huggingface/baseline_models/mobilevit/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_semantic_segmentation/evaluators/experiments/cityscapes/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_semantic_segmentation/evaluators/experiments/pascal_ctx/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_semantic_segmentation/model/models/sync_bn/inplace_abn/src/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aimet_zoo_torch/gpunet0/__init__.py: -------------------------------------------------------------------------------- 1 | """GPUNet-0""" 2 | from .model.model_definition import GPUNet0 3 | -------------------------------------------------------------------------------- /aimet_zoo_torch/rangenet/__init__.py: -------------------------------------------------------------------------------- 1 | """ RangeNet++ """ 2 | from .models.model_definition import RangeNet 3 | -------------------------------------------------------------------------------- /aimet_zoo_torch/ssd_mobilenetv2/model/vision/utils/__init__.py: -------------------------------------------------------------------------------- 1 | #pylint: skip-file 2 | from .misc import * 3 | -------------------------------------------------------------------------------- /aimet_zoo_torch/uniformer_classification/model/image_classification/models/__init__.py: -------------------------------------------------------------------------------- 1 | from .uniformer import * -------------------------------------------------------------------------------- /images/logo-quic-on@h68.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quic/aimet-model-zoo/HEAD/images/logo-quic-on@h68.png -------------------------------------------------------------------------------- /aimet_zoo_torch/uniformer_classification/requirements.txt: -------------------------------------------------------------------------------- 1 | torch>=1.9.0 2 | torchvision>=0.8.1 3 | timm>=0.4.12 4 | -------------------------------------------------------------------------------- /aimet_zoo_torch/abpn/__init__.py: -------------------------------------------------------------------------------- 1 | """ loading ABPN downloader class """ 2 | from .model.model_definition import ABPN 3 | -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_posenet/__init__.py: -------------------------------------------------------------------------------- 1 | """ HRNet PoseNet """ 2 | from .models.model_definition import PoseHRNet 3 | -------------------------------------------------------------------------------- /aimet_zoo_torch/ffnet/__init__.py: -------------------------------------------------------------------------------- 1 | """ loading ffnet model downloader class""" 2 | from .model.model_definition import FFNet 3 | -------------------------------------------------------------------------------- /aimet_zoo_tensorflow/mobiledetedgetpu/__init__.py: -------------------------------------------------------------------------------- 1 | """ MobileDet Edge TPU """ 2 | from .model.model_definition import MobileDet 3 | -------------------------------------------------------------------------------- /aimet_zoo_tensorflow/mobilenetedgetpu/__init__.py: -------------------------------------------------------------------------------- 1 | """ MobileNet Edge TPU """ 2 | from .model.model_definition import MobileNet 3 | -------------------------------------------------------------------------------- /aimet_zoo_tensorflow/ssd_mobilenet_v2/__init__.py: -------------------------------------------------------------------------------- 1 | """ SSD-MobileNetV2 """ 2 | from .model.model_definition import SSDMobileNetV2 3 | -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_semantic_segmentation/model/models/sync_bn/__init__.py: -------------------------------------------------------------------------------- 1 | """ init file """ 2 | from .inplace_abn import bn 3 | -------------------------------------------------------------------------------- /aimet_zoo_torch/deeplabv3/__init__.py: -------------------------------------------------------------------------------- 1 | """ loading deeplabv3 downloader class """ 2 | from .model.model_definition import DeepLabV3_Plus 3 | -------------------------------------------------------------------------------- /aimet_zoo_torch/bert/__init__.py: -------------------------------------------------------------------------------- 1 | """ package for getting bert original model and quantized model""" 2 | from .model.model_definition import Bert 3 | -------------------------------------------------------------------------------- /aimet_zoo_torch/gpt2/__init__.py: -------------------------------------------------------------------------------- 1 | """ package for getting gpt2 original model and quantized model""" 2 | from .model.model_definition import gpt2 3 | -------------------------------------------------------------------------------- /aimet_zoo_torch/sesr/__init__.py: -------------------------------------------------------------------------------- 1 | """ package for getting SESR original model and quantized model""" 2 | from .model.model_definition import SESR 3 | -------------------------------------------------------------------------------- /aimet_zoo_torch/vit/__init__.py: -------------------------------------------------------------------------------- 1 | """ package for getting vit original model and quantized model""" 2 | from .model.model_definition import vit 3 | -------------------------------------------------------------------------------- /aimet_zoo_torch/xlsr/__init__.py: -------------------------------------------------------------------------------- 1 | """ package for getting xlsr original model and quantized model""" 2 | from .model.model_definition import XLSR 3 | -------------------------------------------------------------------------------- /Jenkins/opencv_320_python38.patch: -------------------------------------------------------------------------------- 1 | 730c730 2 | < char* str = PyString_AsString(obj); 3 | --- 4 | > const char* str = PyString_AsString(obj); 5 | -------------------------------------------------------------------------------- /aimet_zoo_torch/gpt2/dataloader/__init__.py: -------------------------------------------------------------------------------- 1 | """ datasets and eval function are defined and loaded""" 2 | from .dataloaders import get_dataloaders 3 | -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_semantic_segmentation/__init__.py: -------------------------------------------------------------------------------- 1 | """ HRNet Semantic Segmentation """ 2 | from .model.model_definition import HRNetSemSeg 3 | -------------------------------------------------------------------------------- /aimet_zoo_torch/uniformer_classification/__init__.py: -------------------------------------------------------------------------------- 1 | """ loading downloader class """ 2 | from .model.model_definition import UniformerClassification 3 | -------------------------------------------------------------------------------- /aimet_zoo_torch/efficientnetlite0/__init__.py: -------------------------------------------------------------------------------- 1 | """loading efficientnetlite0 downloader class""" 2 | from .model.model_definition import EfficientNetLite0 3 | -------------------------------------------------------------------------------- /aimet_zoo_torch/minilm/__init__.py: -------------------------------------------------------------------------------- 1 | """ package for getting minilm original model and quantized model""" 2 | from .model.model_definition import Minilm 3 | -------------------------------------------------------------------------------- /aimet_zoo_torch/regnet/__init__.py: -------------------------------------------------------------------------------- 1 | """ package for getting regnet original model and quantized model""" 2 | from .model.model_definition import RegNet 3 | -------------------------------------------------------------------------------- /aimet_zoo_torch/resnet/__init__.py: -------------------------------------------------------------------------------- 1 | """ package for getting resnet original model and quantized model""" 2 | from .model.model_definition import ResNet 3 | -------------------------------------------------------------------------------- /aimet_zoo_torch/resnext/__init__.py: -------------------------------------------------------------------------------- 1 | """ package for getting resnext original model and quantized model""" 2 | from .model.model_definition import ResNext 3 | -------------------------------------------------------------------------------- /aimet_zoo_torch/roberta/__init__.py: -------------------------------------------------------------------------------- 1 | """ package for getting roberta original model and quantized model""" 2 | from .model.model_definition import Roberta 3 | -------------------------------------------------------------------------------- /aimet_zoo_torch/segnet/__init__.py: -------------------------------------------------------------------------------- 1 | """ package for getting segnet original model and quantized model""" 2 | from .model.model_definition import SegNet 3 | -------------------------------------------------------------------------------- /aimet_zoo_torch/mobilevit/__init__.py: -------------------------------------------------------------------------------- 1 | """ package for getting mobilevit original model and quantized model""" 2 | from .model.model_definition import mobilevit 3 | -------------------------------------------------------------------------------- /aimet_zoo_torch/rangenet/evaluators/__init__.py: -------------------------------------------------------------------------------- 1 | """ init file """ 2 | import sys 3 | 4 | TRAIN_PATH = "../models/train" 5 | sys.path.insert(0, TRAIN_PATH) 6 | -------------------------------------------------------------------------------- /aimet_zoo_torch/distilbert/__init__.py: -------------------------------------------------------------------------------- 1 | """ package for getting distilbert original model and quantized model""" 2 | from .model.model_definition import DistilBert 3 | -------------------------------------------------------------------------------- /aimet_zoo_torch/mobilebert/__init__.py: -------------------------------------------------------------------------------- 1 | """ package for getting distilbert original model and quantized model""" 2 | from .model.model_definition import MobileBert 3 | -------------------------------------------------------------------------------- /aimet_zoo_torch/quicksrnet/__init__.py: -------------------------------------------------------------------------------- 1 | """ package for getting quicksrnet original model and quantized model""" 2 | from .model.model_definition import QuickSRNet 3 | -------------------------------------------------------------------------------- /aimet_zoo_torch/roberta/dataloader/__init__.py: -------------------------------------------------------------------------------- 1 | """ datasets and eval function are defined and loaded""" 2 | from .dataloaders import get_datasets, eval_function 3 | -------------------------------------------------------------------------------- /aimet_zoo_tensorflow/deeplabv3plus_tf2/dataloader/utils.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quic/aimet-model-zoo/HEAD/aimet_zoo_tensorflow/deeplabv3plus_tf2/dataloader/utils.py -------------------------------------------------------------------------------- /aimet_zoo_torch/mobilenetv2/__init__.py: -------------------------------------------------------------------------------- 1 | """ package for getting mobilenetv2 original model and quantized model""" 2 | from .model.model_definition import MobileNetV2 3 | -------------------------------------------------------------------------------- /aimet_zoo_tensorflow/deeplabv3plus_tf2/model/nets/Xception.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quic/aimet-model-zoo/HEAD/aimet_zoo_tensorflow/deeplabv3plus_tf2/model/nets/Xception.py -------------------------------------------------------------------------------- /aimet_zoo_tensorflow/deeplabv3plus_tf2/model/nets/deeplab.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quic/aimet-model-zoo/HEAD/aimet_zoo_tensorflow/deeplabv3plus_tf2/model/nets/deeplab.py -------------------------------------------------------------------------------- /aimet_zoo_tensorflow/resnet50_tf2/__init__.py: -------------------------------------------------------------------------------- 1 | ''' 2 | import resnet50 model from tensorflow built-in API 3 | ''' 4 | from tensorflow.keras.applications.resnet import ResNet50 5 | -------------------------------------------------------------------------------- /aimet_zoo_torch/bert/dataloader/__init__.py: -------------------------------------------------------------------------------- 1 | """ datasets and eval function are defined and loaded""" 2 | from .dataloaders import get_datasets, get_num_labels, eval_function 3 | -------------------------------------------------------------------------------- /aimet_zoo_torch/deeplabv3/dataloader/__init__.py: -------------------------------------------------------------------------------- 1 | """loading dataloader and evaluation function""" 2 | from .dataloaders_and_eval_func import get_dataloaders_and_eval_func 3 | -------------------------------------------------------------------------------- /aimet_zoo_torch/ffnet/dataloader/__init__.py: -------------------------------------------------------------------------------- 1 | """ datasets and eval function are defined and loaded""" 2 | from .dataloaders_and_eval_func import get_dataloaders_and_eval_func 3 | -------------------------------------------------------------------------------- /aimet_zoo_tensorflow/deeplabv3plus_tf2/dataloader/dataloader.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quic/aimet-model-zoo/HEAD/aimet_zoo_tensorflow/deeplabv3plus_tf2/dataloader/dataloader.py -------------------------------------------------------------------------------- /aimet_zoo_tensorflow/deeplabv3plus_tf2/model/nets/mobilenet.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quic/aimet-model-zoo/HEAD/aimet_zoo_tensorflow/deeplabv3plus_tf2/model/nets/mobilenet.py -------------------------------------------------------------------------------- /aimet_zoo_torch/distilbert/dataloader/__init__.py: -------------------------------------------------------------------------------- 1 | """ datasets and eval function are defined and loaded""" 2 | from .dataloaders import get_datasets, get_num_labels, eval_function 3 | -------------------------------------------------------------------------------- /aimet_zoo_torch/inverseform/__init__.py: -------------------------------------------------------------------------------- 1 | """ package for getting hrnetinverseform original model and quantized model""" 2 | from .model.model_definition import HRNetInverseForm 3 | -------------------------------------------------------------------------------- /aimet_zoo_torch/mobilebert/dataloader/__init__.py: -------------------------------------------------------------------------------- 1 | """ datasets and eval function are defined and loaded""" 2 | from .dataloaders import get_datasets, get_num_labels, eval_function 3 | -------------------------------------------------------------------------------- /aimet_zoo_torch/vit/dataloader/__init__.py: -------------------------------------------------------------------------------- 1 | """modules for getting dataloaders and dataset""" 2 | from .dataloaders import get_dataloaders 3 | from .dataloaders import get_dataset 4 | -------------------------------------------------------------------------------- /aimet_zoo_tensorflow/deeplabv3plus_tf2/evaluators/utils_metrics.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quic/aimet-model-zoo/HEAD/aimet_zoo_tensorflow/deeplabv3plus_tf2/evaluators/utils_metrics.py -------------------------------------------------------------------------------- /aimet_zoo_torch/efficientnetlite0/dataloader/__init__.py: -------------------------------------------------------------------------------- 1 | """ datasets and eval function are defined and loaded""" 2 | from .dataloaders_and_eval_func import eval_func, forward_pass 3 | -------------------------------------------------------------------------------- /aimet_zoo_torch/inverseform/dataloader/__init__.py: -------------------------------------------------------------------------------- 1 | """ datasets and eval function are defined and loaded""" 2 | from .dataloaders_and_eval_func import get_dataloaders_and_eval_func 3 | -------------------------------------------------------------------------------- /aimet_zoo_torch/mobilenetv2/dataloader/__init__.py: -------------------------------------------------------------------------------- 1 | """ datasets and eval function are defined and loaded""" 2 | from .dataloaders_and_eval_func import get_dataloaders_and_eval_func 3 | -------------------------------------------------------------------------------- /aimet_zoo_torch/mobilevit/dataloader/__init__.py: -------------------------------------------------------------------------------- 1 | """modules for getting dataloaders and dataset""" 2 | from .dataloaders import get_dataloaders 3 | from .dataloaders import get_dataset 4 | -------------------------------------------------------------------------------- /Jenkins/jenkins_threshold_configs.json: -------------------------------------------------------------------------------- 1 | { 2 | "pylint_fail_thresholds" : { 3 | "high_priority" : "0", 4 | "normal_priority" : "0", 5 | "low_priority" : "0" 6 | } 7 | } 8 | 9 | -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_image_classification/__init__.py: -------------------------------------------------------------------------------- 1 | """ package for getting bert original model and quantized model""" 2 | from .model.model_definition import HRNetImageClassification 3 | -------------------------------------------------------------------------------- /aimet_zoo_torch/minilm/dataloader/__init__.py: -------------------------------------------------------------------------------- 1 | """ package for getting minilm original model and quantized model""" 2 | from .dataloaders import get_datasets, get_num_labels, eval_function 3 | -------------------------------------------------------------------------------- /aimet_zoo_torch/uniformer_classification/dataloader/__init__.py: -------------------------------------------------------------------------------- 1 | """loading dataloader and evaluation function""" 2 | from .dataloaders_and_eval_func import get_dataloaders_and_eval_func 3 | -------------------------------------------------------------------------------- /aimet_zoo_tensorflow/mobilenet_v2_tf2/__init__.py: -------------------------------------------------------------------------------- 1 | ''' 2 | import mobilenetv2 model from tensorflow built-in API 3 | ''' 4 | from tensorflow.keras.applications.mobilenet_v2 import MobileNetV2 5 | -------------------------------------------------------------------------------- /aimet_zoo_torch/rangenet/models/train/tasks/semantic/postproc/__init__.py: -------------------------------------------------------------------------------- 1 | """ init """ 2 | import sys 3 | 4 | TRAIN_PATH = "../" 5 | DEPLOY_PATH = "../../deploy" 6 | sys.path.insert(0, TRAIN_PATH) 7 | -------------------------------------------------------------------------------- /aimet_zoo_torch/salsanext/models/tasks/semantic/__init__.py: -------------------------------------------------------------------------------- 1 | # pylint: skip-file 2 | import sys 3 | 4 | TRAIN_PATH = "../../" 5 | DEPLOY_PATH = "../../../deploy" 6 | sys.path.insert(0, TRAIN_PATH) 7 | -------------------------------------------------------------------------------- /aimet_zoo_torch/salsanext/models/tasks/semantic/postproc/__init__.py: -------------------------------------------------------------------------------- 1 | # pylint: skip-file 2 | import sys 3 | 4 | TRAIN_PATH = "../" 5 | DEPLOY_PATH = "../../deploy" 6 | sys.path.insert(0, TRAIN_PATH) 7 | -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_posenet/models/nms/gpu_nms.hpp: -------------------------------------------------------------------------------- 1 | void _nms(int* keep_out, int* num_out, const float* boxes_host, int boxes_num, 2 | int boxes_dim, float nms_overlap_thresh, int device_id); 3 | -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_posenet/models/nms/cpu_nms.cpython-38-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quic/aimet-model-zoo/HEAD/aimet_zoo_torch/hrnet_posenet/models/nms/cpu_nms.cpython-38-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_posenet/models/nms/gpu_nms.cpython-38-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quic/aimet-model-zoo/HEAD/aimet_zoo_torch/hrnet_posenet/models/nms/gpu_nms.cpython-38-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /aimet_zoo_torch/yolox/dataloader/data/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding:utf-8 -*- 3 | # Copyright (c) Megvii, Inc. and its affiliates. 4 | # pylint: skip-file 5 | 6 | from .coco import COCODataset 7 | -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_semantic_segmentation/model/models/sync_bn/inplace_abn/__init__.py: -------------------------------------------------------------------------------- 1 | """ init file """ 2 | from .bn import ABN, InPlaceABN, InPlaceABNSync 3 | from .functions import ACT_RELU, ACT_LEAKY_RELU, ACT_ELU, ACT_NONE 4 | -------------------------------------------------------------------------------- /aimet_zoo_torch/ffnet/dataloader/cityscapes/attribution.txt: -------------------------------------------------------------------------------- 1 | Cityscapes evaluation code adapted from https://github.com/Qualcomm-AI-research/InverseForm, which in turn adapts the code from https://github.com/HRNet/HRNet-Semantic-Segmentation -------------------------------------------------------------------------------- /aimet_zoo_torch/yolox/model/yolo_x/utils/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding:utf-8 -*- 3 | # Copyright (c) Megvii Inc. All rights reserved. 4 | # pylint: skip-file 5 | 6 | from .boxes import bboxes_iou, postprocess, xyxy2xywh 7 | -------------------------------------------------------------------------------- /aimet_zoo_torch/rangenet/models/train/tasks/semantic/__init__.py: -------------------------------------------------------------------------------- 1 | """ init """ 2 | import sys 3 | import os 4 | import pathlib 5 | 6 | TRAIN_PATH = str(pathlib.Path(os.path.abspath(__file__)).parent.parent) # "../../" 7 | sys.path.insert(0, TRAIN_PATH) 8 | -------------------------------------------------------------------------------- /aimet_zoo_torch/ssd_mobilenetv2/__init__.py: -------------------------------------------------------------------------------- 1 | """ package for creating and getting ssdmobilenetv2 original model and quantized model""" 2 | from .model.model_definition import ( 3 | SSDMobileNetV2, 4 | create_mobilenetv2_ssd_lite_predictor, 5 | ) 6 | -------------------------------------------------------------------------------- /aimet_zoo_torch/yolox/dataloader/data/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding:utf-8 -*- 3 | # Copyright (c) Megvii, Inc. and its affiliates. 4 | # pylint: skip-file 5 | 6 | from .data_augment import ValTransform 7 | from .datasets import COCODataset 8 | -------------------------------------------------------------------------------- /aimet_zoo_torch/rangenet/models/train/requirements.txt: -------------------------------------------------------------------------------- 1 | numpy==1.14.0 2 | scipy==0.19.1 3 | torch==1.1.0 4 | tensorflow==1.13.1 5 | vispy==0.5.3 6 | torchvision==0.2.2.post3 7 | opencv_contrib_python==4.1.0.25 8 | matplotlib==2.2.3 9 | Pillow==6.1.0 10 | PyYAML==5.1.1 11 | -------------------------------------------------------------------------------- /AcceptanceTests/torch/voc-model-labels.txt: -------------------------------------------------------------------------------- 1 | BACKGROUND 2 | aeroplane 3 | bicycle 4 | bird 5 | boat 6 | bottle 7 | bus 8 | car 9 | cat 10 | chair 11 | cow 12 | diningtable 13 | dog 14 | horse 15 | motorbike 16 | person 17 | pottedplant 18 | sheep 19 | sofa 20 | train 21 | tvmonitor -------------------------------------------------------------------------------- /aimet_zoo_torch/ssd_mobilenetv2/evaluators/voc-model-labels.txt: -------------------------------------------------------------------------------- 1 | BACKGROUND 2 | aeroplane 3 | bicycle 4 | bird 5 | boat 6 | bottle 7 | bus 8 | car 9 | cat 10 | chair 11 | cow 12 | diningtable 13 | dog 14 | horse 15 | motorbike 16 | person 17 | pottedplant 18 | sheep 19 | sofa 20 | train 21 | tvmonitor -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Ignore IDE files 2 | .idea 3 | .vscode 4 | 5 | # MAC OS 6 | .DS_Store 7 | 8 | # Python 9 | *__pycache__* 10 | *.cpython-26.pyc 11 | 12 | # Jupyter notebooks 13 | .ipynb_checkpoints/ 14 | 15 | # Generated artifacts 16 | *.bak* 17 | *.whl 18 | *.egg-info/ 19 | build/ 20 | packaging/results 21 | -------------------------------------------------------------------------------- /aimet_zoo_torch/rangenet/requirements.txt: -------------------------------------------------------------------------------- 1 | # first apt install python3-tk 2 | numpy==1.14.0 3 | torchvision==0.2.2.post3 4 | matplotlib==2.2.3 5 | tensorflow==1.13.1 6 | scipy==0.19.1 7 | torch==1.1.0 8 | vispy==0.5.3 9 | opencv_python==4.1.0.25 10 | opencv_contrib_python==4.1.0.25 11 | Pillow==6.1.0 12 | PyYAML==5.1.1 13 | -------------------------------------------------------------------------------- /aimet_zoo_torch/yolox/model/yolo_x/models/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding:utf-8 -*- 3 | # Copyright (c) Megvii Inc. All rights reserved. 4 | # pylint: skip-file 5 | 6 | from .darknet import CSPDarknet, Darknet 7 | from .yolo_head import YOLOXHead 8 | from .yolo_pafpn import YOLOPAFPN 9 | from .yolox import YOLOX 10 | -------------------------------------------------------------------------------- /aimet_zoo_torch/yolox/model/__init__.py: -------------------------------------------------------------------------------- 1 | # ============================================================================= 2 | # @@-COPYRIGHT-START-@@ 3 | # 4 | # Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. 5 | # 6 | # @@-COPYRIGHT-END-@@ 7 | # ============================================================================= 8 | -------------------------------------------------------------------------------- /aimet_zoo_torch/yolox/dataloader/__init__.py: -------------------------------------------------------------------------------- 1 | # ============================================================================= 2 | # @@-COPYRIGHT-START-@@ 3 | # 4 | # Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. 5 | # 6 | # @@-COPYRIGHT-END-@@ 7 | # ============================================================================= 8 | -------------------------------------------------------------------------------- /aimet_zoo_torch/ffnet/model/__init__.py: -------------------------------------------------------------------------------- 1 | # pylint: skip-file 2 | # Copyright (c) 2022 Qualcomm Technologies, Inc. 3 | # All Rights Reserved. 4 | 5 | from .ffnet_S_mobile import * 6 | from .ffnet_NS_mobile import * 7 | from .ffnet_gpu_large import * 8 | from .ffnet_S_gpu_large import * 9 | from .ffnet_N_gpu_large import * 10 | from .ffnet_gpu_small import * 11 | from .ffnet_S_gpu_small import * 12 | -------------------------------------------------------------------------------- /aimet_zoo_torch/mmaction2/evaluators/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- mode: python -*- 3 | # ============================================================================= 4 | # @@-COPYRIGHT-START-@@ 5 | # 6 | # Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. 7 | # 8 | # @@-COPYRIGHT-END-@@ 9 | # ============================================================================= 10 | -------------------------------------------------------------------------------- /aimet_zoo_torch/mmaction2/model/configs/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- mode: python -*- 3 | # ============================================================================= 4 | # @@-COPYRIGHT-START-@@ 5 | # 6 | # Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. 7 | # 8 | # @@-COPYRIGHT-END-@@ 9 | # ============================================================================= 10 | -------------------------------------------------------------------------------- /aimet_zoo_torch/mmaction2/model/configs/_base_/models/bmn_400x100.py: -------------------------------------------------------------------------------- 1 | # pylint: skip-file 2 | # model settings 3 | model = dict( 4 | type='BMN_AIMET', 5 | temporal_dim=100, 6 | boundary_ratio=0.5, 7 | num_samples=32, 8 | num_samples_per_bin=3, 9 | feat_dim=400, 10 | soft_nms_alpha=0.4, 11 | soft_nms_low_threshold=0.5, 12 | soft_nms_high_threshold=0.9, 13 | post_process_top_k=100) 14 | -------------------------------------------------------------------------------- /aimet_zoo_torch/mmaction2/model/configs/_base_/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- mode: python -*- 3 | # ============================================================================= 4 | # @@-COPYRIGHT-START-@@ 5 | # 6 | # Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. 7 | # 8 | # @@-COPYRIGHT-END-@@ 9 | # ============================================================================= 10 | -------------------------------------------------------------------------------- /aimet_zoo_torch/mmaction2/model/configs/_base_/models/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- mode: python -*- 3 | # ============================================================================= 4 | # @@-COPYRIGHT-START-@@ 5 | # 6 | # Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. 7 | # 8 | # @@-COPYRIGHT-END-@@ 9 | # ============================================================================= 10 | -------------------------------------------------------------------------------- /aimet_zoo_torch/mmaction2/model/configs/localization/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- mode: python -*- 3 | # ============================================================================= 4 | # @@-COPYRIGHT-START-@@ 5 | # 6 | # Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. 7 | # 8 | # @@-COPYRIGHT-END-@@ 9 | # ============================================================================= 10 | -------------------------------------------------------------------------------- /aimet_zoo_torch/mmaction2/model/configs/localization/bmn/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- mode: python -*- 3 | # ============================================================================= 4 | # @@-COPYRIGHT-START-@@ 5 | # 6 | # Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. 7 | # 8 | # @@-COPYRIGHT-END-@@ 9 | # ============================================================================= 10 | -------------------------------------------------------------------------------- /aimet_zoo_torch/yolox/__init__.py: -------------------------------------------------------------------------------- 1 | # ============================================================================= 2 | # @@-COPYRIGHT-START-@@ 3 | # 4 | # Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. 5 | # 6 | # @@-COPYRIGHT-END-@@ 7 | # ============================================================================= 8 | """ loading yolox model downloader class""" 9 | from .model.model_definition import YOLOX 10 | -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_posenet/models/config/__init__.py: -------------------------------------------------------------------------------- 1 | # pylint: skip-file 2 | # ------------------------------------------------------------------------------ 3 | # Copyright (c) Microsoft 4 | # Licensed under the MIT License. 5 | # Written by Bin Xiao (Bin.Xiao@microsoft.com) 6 | # ------------------------------------------------------------------------------ 7 | 8 | from .default import _C as cfg 9 | from .default import update_config 10 | from .models import MODEL_EXTRAS 11 | -------------------------------------------------------------------------------- /aimet_zoo_torch/mmaction2/model/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- mode: python -*- 3 | # ============================================================================= 4 | # @@-COPYRIGHT-START-@@ 5 | # 6 | # Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. 7 | # 8 | # @@-COPYRIGHT-END-@@ 9 | # ============================================================================= 10 | """import bmn libraries""" 11 | from .bmn import * 12 | -------------------------------------------------------------------------------- /aimet_zoo_torch/inverseform/model/models/bn_helper.py: -------------------------------------------------------------------------------- 1 | # pylint: skip-file 2 | # pylint: skip-file 3 | import torch 4 | import functools 5 | 6 | if torch.__version__.startswith('0'): 7 | from .sync_bn.inplace_abn.bn import InPlaceABNSync 8 | BatchNorm2d = functools.partial(InPlaceABNSync, activation='none') 9 | BatchNorm2d_class = InPlaceABNSync 10 | relu_inplace = False 11 | else: 12 | BatchNorm2d_class = BatchNorm2d = torch.nn.SyncBatchNorm 13 | relu_inplace = True 14 | -------------------------------------------------------------------------------- /aimet_zoo_torch/mmaction2/runner/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- mode: python -*- 3 | # ============================================================================= 4 | # @@-COPYRIGHT-START-@@ 5 | # 6 | # Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. 7 | # 8 | # @@-COPYRIGHT-END-@@ 9 | # ============================================================================= 10 | """loading AIMET test loop""" 11 | from .loops import AIMETTestLoop 12 | -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_image_classification/model/lib/config/__init__.py: -------------------------------------------------------------------------------- 1 | #pylint: skip-file 2 | # ------------------------------------------------------------------------------ 3 | # Copyright (c) Microsoft 4 | # Licensed under the MIT License. 5 | # Written by Bin Xiao (Bin.Xiao@microsoft.com) 6 | # ------------------------------------------------------------------------------ 7 | 8 | from .default import _C as config 9 | from .default import update_config 10 | from .models import MODEL_EXTRAS 11 | -------------------------------------------------------------------------------- /aimet_zoo_torch/mmaction2/model/base_model/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- mode: python -*- 3 | # ============================================================================= 4 | # @@-COPYRIGHT-START-@@ 5 | # 6 | # Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. 7 | # 8 | # @@-COPYRIGHT-END-@@ 9 | # ============================================================================= 10 | """import basemodel""" 11 | from .base_model import BaseModel 12 | -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_semantic_segmentation/model/models/__init__.py: -------------------------------------------------------------------------------- 1 | """ init file """ 2 | 3 | # ------------------------------------------------------------------------------ 4 | # Copyright (c) Microsoft 5 | # Licensed under the MIT License. 6 | # Written by Ke Sun (sunk@mail.ustc.edu.cn) 7 | # ------------------------------------------------------------------------------ 8 | 9 | from __future__ import absolute_import 10 | from __future__ import division 11 | from __future__ import print_function 12 | -------------------------------------------------------------------------------- /aimet_zoo_torch/inverseform/model/utils/progress_bar.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 Qualcomm Technologies, Inc. 2 | 3 | # All Rights Reserved. 4 | 5 | """module for download progress bar""" 6 | import sys 7 | 8 | 9 | def printProgressBar(i, max, postText): 10 | """ print download progress bar""" 11 | #pylint:disable = redefined-builtin 12 | n_bar = 10 13 | j = i / max 14 | sys.stdout.write("\r") 15 | sys.stdout.write(f"[{'=' * int(n_bar * j):{n_bar}s}] {postText}") 16 | sys.stdout.flush() 17 | -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_posenet/models/models/__init__.py: -------------------------------------------------------------------------------- 1 | # pylint: skip-file 2 | # ------------------------------------------------------------------------------ 3 | # Copyright (c) Microsoft 4 | # Licensed under the MIT License. 5 | # Written by Bin Xiao (Bin.Xiao@microsoft.com) 6 | # ------------------------------------------------------------------------------ 7 | 8 | from __future__ import absolute_import 9 | from __future__ import division 10 | from __future__ import print_function 11 | 12 | from . import pose_hrnet 13 | -------------------------------------------------------------------------------- /aimet_zoo_torch/mmaction2/evaluators/metrics/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- mode: python -*- 3 | # ============================================================================= 4 | # @@-COPYRIGHT-START-@@ 5 | # 6 | # Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. 7 | # 8 | # @@-COPYRIGHT-END-@@ 9 | # ============================================================================= 10 | """ Init file to import Aimet Anet Metric """ 11 | from .anet_metric import AIMETANetMetric 12 | -------------------------------------------------------------------------------- /aimet_zoo_torch/ffnet/dataloader/cityscapes/utils/progress_bar.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 Qualcomm Technologies, Inc. 2 | 3 | # All Rights Reserved. 4 | """ download progress bar class""" 5 | import sys 6 | 7 | 8 | def printProgressBar(i, max, postText): 9 | """function for printing progress of downloading""" 10 | #pylint:disable = redefined-builtin 11 | n_bar = 10 12 | j = i / max 13 | sys.stdout.write("\r") 14 | sys.stdout.write(f"[{'=' * int(n_bar * j):{n_bar}s}] {postText}") 15 | sys.stdout.flush() 16 | -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_semantic_segmentation/model/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | """ init file """ 2 | # ------------------------------------------------------------------------------ 3 | # Copyright (c) Microsoft 4 | # Licensed under the MIT License. 5 | # Written by Ke Sun (sunk@mail.ustc.edu.cn) 6 | # ------------------------------------------------------------------------------ 7 | 8 | from __future__ import absolute_import 9 | from __future__ import division 10 | from __future__ import print_function 11 | 12 | from .cityscapes import Cityscapes as cityscapes 13 | -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_posenet/models/dataset/__init__.py: -------------------------------------------------------------------------------- 1 | # pylint: skip-file 2 | # ------------------------------------------------------------------------------ 3 | # Copyright (c) Microsoft 4 | # Licensed under the MIT License. 5 | # Written by Bin Xiao (Bin.Xiao@microsoft.com) 6 | # ------------------------------------------------------------------------------ 7 | 8 | from __future__ import absolute_import 9 | from __future__ import division 10 | from __future__ import print_function 11 | 12 | from .mpii import MPIIDataset as mpii 13 | from .coco import COCODataset as coco 14 | -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_image_classification/model/lib/models/__init__.py: -------------------------------------------------------------------------------- 1 | # ------------------------------------------------------------------------------ 2 | # Copyright (c) Microsoft 3 | # Licensed under the MIT License. 4 | # Written by Ke Sun (sunk@mail.ustc.edu.cn) 5 | # ------------------------------------------------------------------------------ 6 | # pylint: skip-file 7 | from __future__ import absolute_import 8 | from __future__ import division 9 | from __future__ import print_function 10 | 11 | from aimet_zoo_torch.hrnet_image_classification.model.lib.models import cls_hrnet 12 | -------------------------------------------------------------------------------- /aimet_zoo_torch/mmaction2/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- mode: python -*- 3 | # ============================================================================= 4 | # @@-COPYRIGHT-START-@@ 5 | # 6 | # Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. 7 | # 8 | # @@-COPYRIGHT-END-@@ 9 | # ============================================================================= 10 | """ package for getting mmaction2 original model and quantized model""" 11 | from .runner.loops import AIMETTestLoop 12 | from .evaluators.metrics import AIMETANetMetric 13 | -------------------------------------------------------------------------------- /aimet_zoo_torch/deeplabv3/model/modeling/sync_batchnorm/__init__.py: -------------------------------------------------------------------------------- 1 | #pylint: skip-file 2 | # -*- coding: utf-8 -*- 3 | # File : __init__.py 4 | # Author : Jiayuan Mao 5 | # Email : maojiayuan@gmail.com 6 | # Date : 27/01/2018 7 | # 8 | # This file is part of Synchronized-BatchNorm-PyTorch. 9 | # https://github.com/vacancy/Synchronized-BatchNorm-PyTorch 10 | # Distributed under MIT License. 11 | 12 | from .batchnorm import ( 13 | SynchronizedBatchNorm1d, 14 | SynchronizedBatchNorm2d, 15 | SynchronizedBatchNorm3d, 16 | ) 17 | from .replicate import DataParallelWithCallback, patch_replication_callback 18 | -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_semantic_segmentation/model/config/__init__.py: -------------------------------------------------------------------------------- 1 | """ init file """ 2 | 3 | # ------------------------------------------------------------------------------ 4 | # Copyright (c) Microsoft 5 | # Licensed under the MIT License. 6 | # Written by Ke Sun (sunk@mail.ustc.edu.cn) 7 | # ------------------------------------------------------------------------------ 8 | from __future__ import absolute_import 9 | from __future__ import division 10 | from __future__ import print_function 11 | 12 | from .default import _C as config 13 | from .default import update_config 14 | from .models import MODEL_EXTRAS 15 | -------------------------------------------------------------------------------- /aimet_zoo_torch/ffnet/dataloader/cityscapes/cityscapes.py: -------------------------------------------------------------------------------- 1 | # pylint: skip-file 2 | import os 3 | import os.path as path 4 | from . import cityscapes_labels 5 | 6 | 7 | def find_directories(root): 8 | """ 9 | Find folders in validation set. 10 | """ 11 | trn_path = path.join(root, "leftImg8bit", "train") 12 | val_path = path.join(root, "leftImg8bit", "val") 13 | 14 | trn_directories = ["train/" + c for c in os.listdir(trn_path)] 15 | trn_directories = sorted(trn_directories) # sort to insure reproducibility 16 | val_directories = ["val/" + c for c in os.listdir(val_path)] 17 | 18 | return val_directories 19 | -------------------------------------------------------------------------------- /aimet_zoo_torch/gpunet0/requirements.txt: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | timm==0.5.4 16 | -------------------------------------------------------------------------------- /aimet_zoo_torch/inverseform/dataloader/data/cityscapes.py: -------------------------------------------------------------------------------- 1 | # pylint: skip-file 2 | import os 3 | import os.path as path 4 | from aimet_zoo_torch.inverseform.model.utils.config import cfg 5 | import aimet_zoo_torch.inverseform.dataloader.data.cityscapes_labels as cityscapes_labels 6 | 7 | 8 | def find_directories(root): 9 | """ 10 | Find folders in validation set. 11 | """ 12 | trn_path = path.join(root, 'leftImg8bit', 'train') 13 | val_path = path.join(root, 'leftImg8bit', 'val') 14 | 15 | trn_directories = ['train/' + c for c in os.listdir(trn_path)] 16 | trn_directories = sorted(trn_directories) # sort to insure reproducibility 17 | val_directories = ['val/' + c for c in os.listdir(val_path)] 18 | 19 | return val_directories -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_semantic_segmentation/model/utils/distributed.py: -------------------------------------------------------------------------------- 1 | # pylint: skip-file 2 | # ------------------------------------------------------------------------------ 3 | # Copyright (c) Microsoft 4 | # Licensed under the MIT License. 5 | # Written by Jingyi Xie (hsfzxjy@gmail.com) 6 | # ------------------------------------------------------------------------------ 7 | 8 | import torch 9 | import torch.distributed as torch_dist 10 | 11 | def is_distributed(): 12 | return torch_dist.is_initialized() 13 | 14 | def get_world_size(): 15 | if not torch_dist.is_initialized(): 16 | return 1 17 | return torch_dist.get_world_size() 18 | 19 | def get_rank(): 20 | if not torch_dist.is_initialized(): 21 | return 0 22 | return torch_dist.get_rank() -------------------------------------------------------------------------------- /packaging/aimet_zoo_torch_pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools", "setuptools-scm"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | name = "aimet_zoo_torch" 7 | version = "1.5.0" 8 | description = "Collection of popular PyTorch neural network models and evaluators to quantize floating-point models using the AI Model Efficiency ToolKit (AIMET)." 9 | readme = "README.md" 10 | requires-python = ">= 3.6" 11 | license = {text = "Proprietary"} 12 | authors = [ 13 | { name="Qualcomm Innovation Center, Inc.", email="aimet.os@quicinc.com" }, 14 | ] 15 | 16 | [project.urls] 17 | "Homepage" = "https://github.com/quic/aimet-model-zoo" 18 | "Bug Tracker" = "https://github.com/quic/aimet-model-zoo/issues" 19 | 20 | [tool.setuptools] 21 | include-package-data = true 22 | packages = ["aimet_zoo_torch"] 23 | -------------------------------------------------------------------------------- /packaging/aimet_zoo_tensorflow_pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools", "setuptools-scm"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | name = "aimet_zoo_tensorflow" 7 | version = "1.5.0" 8 | description = "Collection of popular TensorFlow neural network models and evaluators to quantize floating-point models using the AI Model Efficiency ToolKit (AIMET)." 9 | readme = "README.md" 10 | requires-python = ">= 3.6" 11 | license = {text = "Proprietary"} 12 | authors = [ 13 | { name="Qualcomm Innovation Center, Inc.", email="aimet.os@quicinc.com" }, 14 | ] 15 | 16 | [project.urls] 17 | "Homepage" = "https://github.com/quic/aimet-model-zoo" 18 | "Bug Tracker" = "https://github.com/quic/aimet-model-zoo/issues" 19 | 20 | [tool.setuptools] 21 | include-package-data = true 22 | packages = ["aimet_zoo_tensorflow"] 23 | -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_posenet/evaluators/_init_paths.py: -------------------------------------------------------------------------------- 1 | # pylint: skip-file 2 | # ------------------------------------------------------------------------------ 3 | # pose.pytorch 4 | # Copyright (c) 2018-present Microsoft 5 | # Licensed under The Apache-2.0 License [see LICENSE for details] 6 | # Written by Bin Xiao (Bin.Xiao@microsoft.com) 7 | # ------------------------------------------------------------------------------ 8 | 9 | from __future__ import absolute_import 10 | from __future__ import division 11 | from __future__ import print_function 12 | 13 | import os.path as osp 14 | import sys 15 | 16 | 17 | def add_path(path): 18 | if path not in sys.path: 19 | sys.path.insert(0, path) 20 | 21 | 22 | this_dir = osp.dirname(__file__) 23 | 24 | lib_path = osp.join(this_dir, '.', 'lib') 25 | add_path(lib_path) 26 | 27 | mm_path = osp.join(this_dir, '.', 'lib/poseeval/py-motmetrics') 28 | add_path(mm_path) 29 | -------------------------------------------------------------------------------- /aimet_zoo_torch/common/utils/utils.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- mode: python -*- 3 | # ============================================================================= 4 | # @@-COPYRIGHT-START-@@ 5 | # 6 | # Copyright (c) 2022 of Qualcomm Innovation Center, Inc. All rights reserved. 7 | # 8 | # @@-COPYRIGHT-END-@@ 9 | # ============================================================================= 10 | """ util functions to get available device""" 11 | import torch 12 | 13 | 14 | def get_device(args): 15 | """ 16 | Returns 'cuda' only when use_cuda is True and a GPU is available 17 | Throws exception when user enables use_cuda but no GPU is available 18 | """ 19 | #pylint:disable = broad-exception-raised 20 | if args.use_cuda and not torch.cuda.is_available(): 21 | raise Exception("use-cuda set to True, but cuda is not available") 22 | return torch.device("cuda" if args.use_cuda else "cpu") 23 | -------------------------------------------------------------------------------- /AcceptanceTests/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | 2 | # ============================================================================= 3 | # @@-COPYRIGHT-START-@@ 4 | # 5 | # Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. 6 | # 7 | # @@-COPYRIGHT-END-@@ 8 | # ============================================================================= 9 | 10 | add_custom_target(AcceptanceTests) 11 | 12 | if (ENABLE_TORCH) 13 | add_dependencies(AcceptanceTests 14 | AcceptanceTests.Torch) 15 | 16 | endif (ENABLE_TORCH) 17 | 18 | if (ENABLE_TENSORFLOW) 19 | add_dependencies(AcceptanceTests 20 | AcceptanceTests.Tensorflow) 21 | 22 | endif (ENABLE_TENSORFLOW) 23 | 24 | if (ENABLE_TORCH) 25 | message(STATUS "Torch has been enabled") 26 | add_subdirectory(torch) 27 | endif (ENABLE_TORCH) 28 | 29 | if (ENABLE_TENSORFLOW) 30 | message(STATUS "Tensorflow has been enabled") 31 | add_subdirectory(tensorflow) 32 | endif (ENABLE_TENSORFLOW) 33 | -------------------------------------------------------------------------------- /aimet_zoo_torch/mmaction2/model/configs/_base_/default_runtime.py: -------------------------------------------------------------------------------- 1 | # pylint: skip-file 2 | default_scope = 'mmaction' 3 | 4 | default_hooks = dict( 5 | runtime_info=dict(type='RuntimeInfoHook'), 6 | timer=dict(type='IterTimerHook'), 7 | logger=dict(type='LoggerHook', interval=20, ignore_last=False), 8 | param_scheduler=dict(type='ParamSchedulerHook'), 9 | checkpoint=dict(type='CheckpointHook', interval=1, save_best='auto'), 10 | sampler_seed=dict(type='DistSamplerSeedHook'), 11 | sync_buffers=dict(type='SyncBuffersHook')) 12 | 13 | env_cfg = dict( 14 | cudnn_benchmark=False, 15 | mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), 16 | dist_cfg=dict(backend='nccl')) 17 | 18 | log_processor = dict(type='LogProcessor', window_size=20, by_epoch=True) 19 | 20 | vis_backends = [dict(type='LocalVisBackend')] 21 | visualizer = dict(type='ActionVisualizer', vis_backends=vis_backends) 22 | 23 | log_level = 'INFO' 24 | load_from = None 25 | resume = False 26 | -------------------------------------------------------------------------------- /aimet_zoo_torch/ffnet/model/config.py: -------------------------------------------------------------------------------- 1 | # pylint: skip-file 2 | # pylint: skip-file 3 | #!/usr/bin/env python3 4 | # -*- mode: python -*- 5 | # ============================================================================= 6 | # @@-COPYRIGHT-START-@@ 7 | # 8 | # Copyright (c) 2022 of Qualcomm Innovation Center, Inc. All rights reserved. 9 | # Changes from QuIC are licensed under the terms and conditions at 10 | # https://github.com/quic/aimet-model-zoo/blob/develop/LICENSE.pdf 11 | # 12 | # @@-COPYRIGHT-END-@@ 13 | # ============================================================================= 14 | 15 | # ---------------------------------------------- 16 | # Copyright (c) 2022 Qualcomm Technologies, Inc. 17 | # All Rights Reserved. 18 | # ---------------------------------------------- 19 | 20 | 21 | model_weights_base_path = "/workspace/ffnet_weights/" 22 | 23 | 24 | CITYSCAPES_MEAN = [0.485, 0.456, 0.406] 25 | CITYSCAPES_STD = [0.229, 0.224, 0.225] 26 | CITYSCAPES_NUM_CLASSES = 19 27 | CITYSCAPES_IGNORE_LABEL = 255 28 | -------------------------------------------------------------------------------- /aimet_zoo_torch/ffnet/model/model_registry.py: -------------------------------------------------------------------------------- 1 | # pylint: skip-file 2 | # Copyright (c) 2022 Qualcomm Technologies, Inc. 3 | # All Rights Reserved. 4 | 5 | import sys 6 | 7 | _model_entrypoints = {} 8 | 9 | 10 | def register_model(fn): 11 | # lookup containing module 12 | mod = sys.modules[fn.__module__] 13 | # add model to __all__ in module 14 | model_name = fn.__name__ 15 | if hasattr(mod, "__all__"): 16 | mod.__all__.append(model_name) 17 | else: 18 | mod.__all__ = [model_name] 19 | 20 | # add entries to registry dict/sets 21 | _model_entrypoints[model_name] = fn 22 | return fn 23 | 24 | 25 | def model_entrypoint(model_name): 26 | """Fetch a model entrypoint for specified model name""" 27 | if model_name in _model_entrypoints: 28 | return _model_entrypoints[model_name] 29 | else: 30 | raise RuntimeError( 31 | f"Unknown model ({model_name}); known models are: " 32 | f"{_model_entrypoints.keys()}" 33 | ) 34 | -------------------------------------------------------------------------------- /AcceptanceTests/torch/pytest.ini: -------------------------------------------------------------------------------- 1 | # ============================================================================= 2 | # @@-COPYRIGHT-START-@@ 3 | # 4 | # Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. 5 | # Changes from QuIC are licensed under the terms and conditions at 6 | # https://github.com/quic/aimet-model-zoo/blob/develop/LICENSE.pdf 7 | # 8 | # @@-COPYRIGHT-END-@@ 9 | # ============================================================================= 10 | [pytest] 11 | markers = 12 | cuda: test that require CUDA to be installed 13 | image_classification : test that belong to task of image classifcation 14 | slow: test that runs slow 15 | nlp: tests that belong to natual language process task 16 | object_detection: tests that belong to object detection task 17 | pose_estimation: tests that belong to pose estimation task 18 | semantic_segmentation: tests that belong to sementic segmentation task 19 | super_resolution: tests that belong to super resolution task 20 | -------------------------------------------------------------------------------- /aimet_zoo_torch/deeplabv3/model/modeling/sync_batchnorm/unittest.py: -------------------------------------------------------------------------------- 1 | # pylint: skip-file 2 | # -*- coding: utf-8 -*- 3 | # File : unittest.py 4 | # Author : Jiayuan Mao 5 | # Email : maojiayuan@gmail.com 6 | # Date : 27/01/2018 7 | # 8 | # This file is part of Synchronized-BatchNorm-PyTorch. 9 | # https://github.com/vacancy/Synchronized-BatchNorm-PyTorch 10 | # Distributed under MIT License. 11 | 12 | import unittest 13 | 14 | import numpy as np 15 | from torch.autograd import Variable 16 | 17 | 18 | def as_numpy(v): 19 | if isinstance(v, Variable): 20 | v = v.data 21 | return v.cpu().numpy() 22 | 23 | 24 | class TorchTestCase(unittest.TestCase): 25 | def assertTensorClose(self, a, b, atol=1e-3, rtol=1e-3): 26 | npa, npb = as_numpy(a), as_numpy(b) 27 | self.assertTrue( 28 | np.allclose(npa, npb, atol=atol), 29 | 'Tensor close check failed\n{}\n{}\nadiff={}, rdiff={}'.format(a, b, np.abs(npa - npb).max(), np.abs((npa - npb) / np.fmax(npa, 1e-5)).max()) 30 | ) 31 | -------------------------------------------------------------------------------- /aimet_zoo_tensorflow/resnet50_tf2/model/model_cards/resnet50_w8a8.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "resnet50", 3 | "framework": "tensorflow2.x", 4 | "task": "image classification", 5 | "input_shape": [null, 256, 256, 3], 6 | "dataset": "imagenet", 7 | "optimization_config": { 8 | "quantization_configuration": 9 | { 10 | "param_bw": 8, 11 | "output_bw": 8, 12 | "input_quantization": true, 13 | "quant_scheme": "tf", 14 | "techniques": null 15 | } 16 | }, 17 | "artifacts": { 18 | "url_pre_opt_weights": null, 19 | "url_post_opt_weights": null, 20 | "url_adaround_encodings": null, 21 | "url_aimet_encodings": "https://github.com/quic/aimet-model-zoo/releases/download/tensorflow2_resnet50/resnet50_w8a8.encodings", 22 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/release-aimet-1.25/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config_per_channel.json" 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /aimet_zoo_tensorflow/mobilenet_v2_tf2/model/model_cards/mobilenetv2_w8a8.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "mobilenetv2", 3 | "framework": "tensorflow2.x", 4 | "task": "image classification", 5 | "input_shape": [null, 256, 256, 3], 6 | "dataset": "imagenet", 7 | "optimization_config": { 8 | "quantization_configuration": 9 | { 10 | "param_bw": 8, 11 | "output_bw": 8, 12 | "input_quantization": true, 13 | "quant_scheme": "tf", 14 | "techniques": "simple PTQ" 15 | } 16 | }, 17 | "artifacts": { 18 | "url_pre_opt_weights": null, 19 | "url_post_opt_weights": null, 20 | "url_adaround_encodings": null, 21 | "url_aimet_encodings": "https://github.com/quic/aimet-model-zoo/releases/download/tensorflow2-mobilenetv2/mobilenetv2_w8a8.encodings", 22 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/release-aimet-1.25/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config_per_channel.json" 23 | } 24 | } -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_image_classification/model/lib/core/evaluate.py: -------------------------------------------------------------------------------- 1 | # ------------------------------------------------------------------------------ 2 | # Copyright (c) Microsoft 3 | # Licensed under the MIT License. 4 | # Written by Bin Xiao (Bin.Xiao@microsoft.com) 5 | # ------------------------------------------------------------------------------ 6 | # pylint: skip-file 7 | from __future__ import absolute_import 8 | from __future__ import division 9 | from __future__ import print_function 10 | 11 | import torch 12 | 13 | 14 | def accuracy(output, target, topk=(1,)): 15 | """Computes the precision@k for the specified values of k""" 16 | with torch.no_grad(): 17 | maxk = max(topk) 18 | batch_size = target.size(0) 19 | 20 | _, pred = output.topk(maxk, 1, True, True) 21 | pred = pred.t() 22 | correct = pred.eq(target.view(1, -1).expand_as(pred)) 23 | 24 | res = [] 25 | for k in topk: 26 | correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True) 27 | res.append(correct_k.mul_(100.0 / batch_size)) 28 | return res 29 | -------------------------------------------------------------------------------- /AcceptanceTests/tensorflow/pytest.ini: -------------------------------------------------------------------------------- 1 | # ============================================================================= 2 | # @@-COPYRIGHT-START-@@ 3 | # 4 | # Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. 5 | # Changes from QuIC are licensed under the terms and conditions at 6 | # https://github.com/quic/aimet-model-zoo/blob/develop/LICENSE.pdf 7 | # 8 | # @@-COPYRIGHT-END-@@ 9 | # ============================================================================= 10 | # content of pytest.ini 11 | [pytest] 12 | markers = 13 | cuda: test that require CUDA to be installed 14 | image_classification : test that belong to task of image classifcation 15 | slow: test that runs slow 16 | nlp: tests that belong to natual language process task 17 | object_detection: tests that belong to object detection task 18 | pose_estimation: tests that belong to pose estimation task 19 | sementic_segmentation: tests that belong to sementic segmentation task 20 | super_resolution: tests that belong to super resolution task 21 | slow: tests that runs longer than 1 minutes per model config 22 | -------------------------------------------------------------------------------- /aimet_zoo_torch/yolox/model/yolox_model.py: -------------------------------------------------------------------------------- 1 | # pylint: skip-file 2 | #!/usr/bin/env python3 3 | # -*- coding:utf-8 -*- 4 | # Copyright (c) Megvii Inc. All rights reserved. 5 | #pylint: skip-file 6 | 7 | import torch.nn as nn 8 | from aimet_zoo_torch.yolox.model.yolo_x.models import YOLOX, YOLOPAFPN, YOLOXHead 9 | 10 | 11 | def model_entrypoint(model_name): 12 | def init_yolo(M): 13 | for m in M.modules(): 14 | if isinstance(m, nn.BatchNorm2d): 15 | m.eps = 1e-3 16 | m.momentum = 0.03 17 | 18 | if model_name.endswith("s"): 19 | depth, width = 0.33, 0.5 20 | elif model_name.endswith("l"): 21 | depth, width = 1.0, 1.0 22 | else: 23 | raise ValueError("Currently only YOLOX-s (small) and YOLOX-l (large) model are allowed.") 24 | 25 | in_channels = [256, 512, 1024] 26 | backbone = YOLOPAFPN(depth, width, in_channels=in_channels, act='silu') 27 | head = YOLOXHead(80, width, in_channels=in_channels, act='silu') 28 | model = YOLOX(backbone, head) 29 | 30 | model.apply(init_yolo) 31 | model.head.initialize_biases(1e-2) 32 | model.train() 33 | return model -------------------------------------------------------------------------------- /aimet_zoo_torch/deeplabv3/model/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Pyjcsx 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /aimet_zoo_torch/uniformer_classification/model/image_classification/elementwise_ops.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- mode: python -*- 3 | # ============================================================================= 4 | # @@-COPYRIGHT-START-@@ 5 | # 6 | # Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. 7 | # 8 | # @@-COPYRIGHT-END-@@ 9 | # ============================================================================= 10 | 11 | 12 | import torch 13 | import torch.nn 14 | 15 | 16 | class FloorDivide(torch.nn.Module): 17 | """ Add module for floor divide """ 18 | # pylint:disable=arguments-differ 19 | @staticmethod 20 | def forward(x: int, y: int) -> int: 21 | """ 22 | Forward-pass routine for floor-divide op 23 | """ 24 | return x // y 25 | 26 | class SoftMax(torch.nn.Module): 27 | """ Add module for softmax """ 28 | # pylint:disable=arguments-differ 29 | @staticmethod 30 | def forward(x: torch.Tensor, dim: int) -> torch.Tensor: 31 | """ 32 | Forward-pass routine for softmax 33 | """ 34 | return x.softmax(dim=dim) -------------------------------------------------------------------------------- /aimet_zoo_torch/ssd_res50/model/model_cards/ssd_res50_w8a8.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "SSD Res50", 3 | "framework": "pytorch", 4 | "task": "object detection", 5 | "evaluation": { 6 | "nms_threshold": 0.5 7 | }, 8 | "input_shape": [null, 3, 300, 300], 9 | "optimization_config": { 10 | "quantization_configuration": 11 | { 12 | "param_bw": 8, 13 | "output_bw": 8, 14 | "input_quantization": true, 15 | "quant_scheme": "tf", 16 | "techniques": ["cle"] 17 | } 18 | }, 19 | "artifacts": { 20 | "url_pre_opt_weights": "https://drive.google.com/u/0/uc?id=1NGh8D7zAStasdLvLT1dRRiYFZRr0K4j3", 21 | "url_post_opt_weights": null, 22 | "url_adaround_encodings": null, 23 | "url_aimet_encodings": "https://github.com/quic/aimet-model-zoo/releases/download/torch_ssd_res50/SSD_Res50_torch.encodings", 24 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/release-aimet-1.23/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config_per_channel.json" 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /aimet_zoo_torch/segnet/model/model_cards/segnet_w8a8.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "SegNet", 3 | "framework": "pytorch", 4 | "task": "semantic segmentation", 5 | "model_args": {}, 6 | "input_shape": [null, 3, 360, 480], 7 | "training_dataset": "CamVid", 8 | "optimization_config": { 9 | "quantization_configuration": 10 | { 11 | "param_bw": 8, 12 | "output_bw": 8, 13 | "input_quantization": true, 14 | "quant_scheme": "tf_enhanced", 15 | "techniques": ["ptq"] 16 | } 17 | }, 18 | "artifacts": { 19 | "url_pre_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/torch_segnet/SegNet.pth", 20 | "url_post_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/torch_segnet/segnet_w8a8_pc_state_dict.pth", 21 | "url_adaround_encodings": null, 22 | "url_aimet_encodings": null, 23 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/release-aimet-1.23/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config_per_channel.json" 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /aimet_zoo_torch/resnext/dataloader/dataloaders_and_eval_func.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- mode: python -*- 3 | # ============================================================================= 4 | # @@-COPYRIGHT-START-@@ 5 | # 6 | # Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. 7 | # 8 | # @@-COPYRIGHT-END-@@ 9 | # ============================================================================= 10 | """module for getting evaluation function of dataloader""" 11 | import torch 12 | 13 | 14 | def eval_func(model, dataloader, BATCH_SIZE=128, device=torch.device("cuda")): 15 | """Evaluates the model on validation dataset and returns the classification accuracy""" 16 | #pylint:disable = unused-argument 17 | # Get Dataloader 18 | model.eval() 19 | correct = 0 20 | total_samples = 0 21 | with torch.no_grad(): 22 | for data, label in dataloader: 23 | data, label = data.to(device), label.to(device) 24 | output = model(data) 25 | _, prediction = torch.max(output, 1) 26 | correct += (prediction == label).sum() 27 | total_samples += len(output) 28 | del dataloader 29 | return float(100 * correct / total_samples) 30 | -------------------------------------------------------------------------------- /aimet_zoo_torch/mobilenetv2/model/model_cards/mobilenetv2_w8a8.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "MobileNetV2", 3 | "framework": "pytorch", 4 | "task": "image classification", 5 | "model_args": { 6 | "num_classes": 1000, 7 | "input_size": 224, 8 | "width_mult": 1.0 9 | }, 10 | "input_shape": [null, 3, 224, 224], 11 | "trainig_dataset": "ImageNet", 12 | "optimization_config": { 13 | "quantization_configuration": 14 | { 15 | "param_bw": 8, 16 | "output_bw": 8, 17 | "input_quantization": true, 18 | "quant_scheme": "tf_enhanced", 19 | "techniques": ["ptq", "qat"] 20 | } 21 | }, 22 | "artifacts": { 23 | "url_pre_opt_weights": null, 24 | "url_post_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_january_artifacts/torch_mobilenetv2_w8a8_state_dict.pth", 25 | "url_adaround_encodings": null, 26 | "url_aimet_encodings": null, 27 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/release-aimet-1.22.1/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config.json" 28 | } 29 | } -------------------------------------------------------------------------------- /aimet_zoo_torch/inverseform/model/model_cards/ocrnet_48_if.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "HRNet W48 OCR InverseForm", 3 | "framework": "pytorch", 4 | "task": "semantic segmentation", 5 | "model_args": {}, 6 | "input_shape": [null, 3, 1024, 2048], 7 | "trainig_dataset": "Cityscapes", 8 | "optimization_config": { 9 | "quantization_configuration": 10 | { 11 | "param_bw": 8, 12 | "output_bw": 8, 13 | "input_quantization": true, 14 | "quant_scheme": "tf_enhanced", 15 | "techniques": ["cle", "adaround"] 16 | } 17 | }, 18 | "artifacts": { 19 | "url_pre_opt_weights": null, 20 | "url_post_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_january_artifacts/OCRNet-48-IF_state_dict.pth", 21 | "url_adaround_encodings": "https://github.com/quic/aimet-model-zoo/releases/download/torch_inverseform/inverseform-w48_w8a8.encodings", 22 | "url_aimet_encodings": null, 23 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/release-aimet-1.23/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config_per_channel.json" 24 | } 25 | } -------------------------------------------------------------------------------- /aimet_zoo_torch/yolox/dataloader/dataloaders.py: -------------------------------------------------------------------------------- 1 | # ============================================================================= 2 | # @@-COPYRIGHT-START-@@ 3 | # 4 | # Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. 5 | # 6 | # @@-COPYRIGHT-END-@@ 7 | # ============================================================================= 8 | """ yolox module for getting data loader""" 9 | 10 | from torch.utils.data import SequentialSampler, DataLoader 11 | from .data import COCODataset, ValTransform 12 | 13 | 14 | def get_data_loader(dataset_path, img_size, batch_size, num_workers): 15 | """function to get coco 2017 dataset dataloader""" 16 | dataset = COCODataset( 17 | data_dir=dataset_path, 18 | json_file="instances_val2017.json", 19 | name="images/val2017", 20 | img_size=img_size, 21 | preproc=ValTransform(legacy=False), 22 | ) 23 | 24 | sampler = SequentialSampler(dataset) 25 | 26 | dataloader_kwargs = { 27 | "num_workers": num_workers, 28 | "pin_memory": True, 29 | "sampler": sampler, 30 | } 31 | dataloader_kwargs["batch_size"] = batch_size 32 | data_loader = DataLoader(dataset, **dataloader_kwargs) 33 | 34 | return data_loader 35 | -------------------------------------------------------------------------------- /AcceptanceTests/torch/test_yolox_quanteval.py: -------------------------------------------------------------------------------- 1 | # /usr/bin/env python3 2 | # -*- mode: python -*- 3 | # ============================================================================= 4 | # @@-COPYRIGHT-START-@@ 5 | # 6 | # Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. 7 | # Changes from QuIC are licensed under the terms and conditions at 8 | # https://github.com/quic/aimet-model-zoo/blob/develop/LICENSE.pdf 9 | # 10 | # @@-COPYRIGHT-END-@@ 11 | # ============================================================================= 12 | 13 | """ acceptance test for yolox object detection""" 14 | 15 | import pytest 16 | import torch 17 | 18 | from aimet_zoo_torch.yolox.evaluators import yolox_quanteval 19 | 20 | @pytest.mark.object_detection 21 | @pytest.mark.cuda 22 | @pytest.mark.parametrize("model_config",["yolox_s","yolox_l"]) 23 | def test_quaneval_yolox(model_config, tiny_mscoco_validation_path): 24 | torch.cuda.empty_cache() 25 | if tiny_mscoco_validation_path is None: 26 | pytest.fail('Dataset path is not set') 27 | yolox_quanteval.main( 28 | [ 29 | "--model-config", 30 | model_config, 31 | "--dataset-path", 32 | tiny_mscoco_validation_path, 33 | ] 34 | ) 35 | -------------------------------------------------------------------------------- /aimet_zoo_torch/gpt2/model/huggingface/elementwise_ops.py: -------------------------------------------------------------------------------- 1 | # ============================================================================= 2 | # @@-COPYRIGHT-START-@@ 3 | # 4 | # Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. 5 | # 6 | # @@-COPYRIGHT-END-@@ 7 | # ============================================================================= 8 | #pylint: skip-file 9 | from typing import Optional 10 | 11 | import torch 12 | 13 | class Interpolate(torch.nn.Module): 14 | """ Interpolate module for a functional interpolate""" 15 | 16 | def __init__(self, mode: str = "nearest", align_corners: bool = True, scale_factor: Optional[float] = None): 17 | super(Interpolate, self).__init__() 18 | self.mode = mode 19 | self.align_corners = align_corners 20 | self.scale_factor = scale_factor 21 | 22 | def forward(self, *inputs) -> torch.Tensor: 23 | """ 24 | Forward-pass routine for interpolate op 25 | """ 26 | x = inputs[0] 27 | size = inputs[1].tolist() 28 | out = torch.nn.functional.interpolate( 29 | input=x, size=size, scale_factor=self.scale_factor, mode=self.mode, align_corners=self.align_corners 30 | ) 31 | return out 32 | -------------------------------------------------------------------------------- /aimet_zoo_torch/inverseform/model/model_cards/hrnet_16_slim_if.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "HRNet W16 Slim InverseForm", 3 | "framework": "pytorch", 4 | "task": "semantic segmentation", 5 | "model_args": {}, 6 | "input_shape": [null, 3, 1024, 2048], 7 | "trainig_dataset": "Cityscapes", 8 | "optimization_config": { 9 | "quantization_configuration": 10 | { 11 | "param_bw": 8, 12 | "output_bw": 8, 13 | "input_quantization": true, 14 | "quant_scheme": "tf_enhanced", 15 | "techniques": ["cle", "adaround"] 16 | } 17 | }, 18 | "artifacts": { 19 | "url_pre_opt_weights": null, 20 | "url_post_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_january_artifacts/hrnet_16_slim_if_state_dict.pth", 21 | "url_adaround_encodings": "https://github.com/quic/aimet-model-zoo/releases/download/torch_inverseform/inverseform-w16_w8a8.encodings", 22 | "url_aimet_encodings": null, 23 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/release-aimet-1.23/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config_per_channel.json" 24 | } 25 | } -------------------------------------------------------------------------------- /aimet_zoo_torch/vit/model/huggingface/elementwise_ops.py: -------------------------------------------------------------------------------- 1 | #pylint: skip-file 2 | # ============================================================================= 3 | # @@-COPYRIGHT-START-@@ 4 | # 5 | # Copyright (c) 2022 of Qualcomm Innovation Center, Inc. All rights reserved. 6 | # 7 | # @@-COPYRIGHT-END-@@ 8 | # ============================================================================= 9 | 10 | from typing import Optional 11 | 12 | import torch 13 | 14 | class Interpolate(torch.nn.Module): 15 | """ Interpolate module for a functional interpolate""" 16 | 17 | def __init__(self, mode: str = "nearest", align_corners: bool = True, scale_factor: Optional[float] = None): 18 | super(Interpolate, self).__init__() 19 | self.mode = mode 20 | self.align_corners = align_corners 21 | self.scale_factor = scale_factor 22 | 23 | def forward(self, *inputs) -> torch.Tensor: 24 | """ 25 | Forward-pass routine for interpolate op 26 | """ 27 | x = inputs[0] 28 | size = inputs[1].tolist() 29 | out = torch.nn.functional.interpolate( 30 | input=x, size=size, scale_factor=self.scale_factor, mode=self.mode, align_corners=self.align_corners 31 | ) 32 | return out 33 | -------------------------------------------------------------------------------- /aimet_zoo_torch/mobilevit/model/huggingface/elementwise_ops.py: -------------------------------------------------------------------------------- 1 | #pylint: skip-file 2 | # ============================================================================= 3 | # @@-COPYRIGHT-START-@@ 4 | # 5 | # Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. 6 | # 7 | # @@-COPYRIGHT-END-@@ 8 | # ============================================================================= 9 | 10 | from typing import Optional 11 | 12 | import torch 13 | 14 | class Interpolate(torch.nn.Module): 15 | """ Interpolate module for a functional interpolate""" 16 | 17 | def __init__(self, mode: str = "nearest", align_corners: bool = True, scale_factor: Optional[float] = None): 18 | super(Interpolate, self).__init__() 19 | self.mode = mode 20 | self.align_corners = align_corners 21 | self.scale_factor = scale_factor 22 | 23 | def forward(self, *inputs) -> torch.Tensor: 24 | """ 25 | Forward-pass routine for interpolate op 26 | """ 27 | x = inputs[0] 28 | size = inputs[1].tolist() 29 | out = torch.nn.functional.interpolate( 30 | input=x, size=size, scale_factor=self.scale_factor, mode=self.mode, align_corners=self.align_corners 31 | ) 32 | return out 33 | -------------------------------------------------------------------------------- /aimet_zoo_torch/ffnet/model/utils.py: -------------------------------------------------------------------------------- 1 | # pylint: skip-file 2 | # Copyright (c) 2022 Qualcomm Technologies, Inc. 3 | # All Rights Reserved. 4 | 5 | import torch 6 | from torch import nn 7 | from torch.nn import init 8 | import numpy as np 9 | 10 | 11 | def dense_kernel_initializer(tensor): 12 | _, fan_out = nn.init._calculate_fan_in_and_fan_out(tensor) 13 | init_range = 1.0 / np.sqrt(fan_out) 14 | 15 | return nn.init.uniform_(tensor, a=-init_range, b=init_range) 16 | 17 | 18 | def model_weight_initializer(m): 19 | """ 20 | Usage: 21 | model = Model() 22 | model.apply(weight_init) 23 | """ 24 | if isinstance(m, nn.Conv2d): 25 | # Yes, this non-fancy init is on purpose, 26 | # and seems to work better in practice for segmentation 27 | if hasattr(m, "weight"): 28 | nn.init.normal_(m.weight, std=0.01) 29 | if m.bias is not None: 30 | nn.init.constant_(m.bias, 0.0001) 31 | 32 | elif isinstance(m, nn.BatchNorm2d): 33 | nn.init.constant_(m.weight, 1) 34 | nn.init.constant_(m.bias, 0) 35 | 36 | elif isinstance(m, nn.Linear): 37 | dense_kernel_initializer(m.weight.data) 38 | if m.bias is not None: 39 | nn.init.zeros_(m.bias.data) 40 | -------------------------------------------------------------------------------- /aimet_zoo_torch/yolox/model/model_cards/yolox_l.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "YOLOX_l", 3 | "framework": "pytorch", 4 | "task": "Objection Detection", 5 | "model_args": {}, 6 | "input_shape": [1, 3, 640, 640], 7 | "trainig_dataset": "MSCOCO2017", 8 | "optimization_config": { 9 | "quantization_configuration": 10 | { 11 | "param_bw": 8, 12 | "output_bw": 8, 13 | "input_quantization": true, 14 | "quant_scheme": "percentile", 15 | "techniques": ["fold_all_batch_norms"] 16 | } 17 | }, 18 | "artifacts": { 19 | "url_pre_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/torch_yolox_int8/yolox_l.pth", 20 | "url_post_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/torch_yolox_int8/yolox_l_W8A8.pth", 21 | "url_adaround_encodings": null, 22 | "url_aimet_encodings": "https://github.com/quic/aimet-model-zoo/releases/download/torch_yolox_int8/yolox_l_W8A8_torch.encodings", 23 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/release-aimet-1.25/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config_per_channel.json" 24 | } 25 | } -------------------------------------------------------------------------------- /aimet_zoo_torch/yolox/model/model_cards/yolox_s.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "YOLOX_s", 3 | "framework": "pytorch", 4 | "task": "Objection Detection", 5 | "model_args": {}, 6 | "input_shape": [1, 3, 640, 640], 7 | "trainig_dataset": "MSCOCO2017", 8 | "optimization_config": { 9 | "quantization_configuration": 10 | { 11 | "param_bw": 8, 12 | "output_bw": 8, 13 | "input_quantization": true, 14 | "quant_scheme": "percentile", 15 | "techniques": ["fold_all_batch_norms"] 16 | } 17 | }, 18 | "artifacts": { 19 | "url_pre_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/torch_yolox_int8/yolox_s.pth", 20 | "url_post_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/torch_yolox_int8/yolox_s_W8A8.pth", 21 | "url_adaround_encodings": null, 22 | "url_aimet_encodings": "https://github.com/quic/aimet-model-zoo/releases/download/torch_yolox_int8/yolox_s_W8A8_torch.encodings", 23 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/release-aimet-1.25/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config_per_channel.json" 24 | } 25 | } -------------------------------------------------------------------------------- /aimet_zoo_torch/efficientnetlite0/model/model_cards/efficientnetlite0_w4a8.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "EfficientNet Lite0", 3 | "framework": "pytorch", 4 | "task": "classification", 5 | "model_args": {}, 6 | "input_shape": [null, 3, 224, 224], 7 | "trainig_dataset": "ImageNet", 8 | "optimization_config": { 9 | "quantization_configuration": 10 | { 11 | "param_bw": 4, 12 | "output_bw": 8, 13 | "input_quantization": true, 14 | "quant_scheme": "tf_enhanced", 15 | "techniques": ["bnfold", "adaround"] 16 | } 17 | }, 18 | "artifacts": { 19 | "url_pre_opt_weights": null, 20 | "url_post_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/torch_effnet_lite0_w8a8_pc/model_efficientnetlite0_w4a8_pc_checkpoint.pth", 21 | "url_adaround_encodings": "https://github.com/quic/aimet-model-zoo/releases/download/torch_effnet_lite0_w8a8_pc/efficientnetlite0_w4a8_pc.encodings", 22 | "url_aimet_encodings": null, 23 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/release-aimet-1.23/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config_per_channel.json" 24 | } 25 | } -------------------------------------------------------------------------------- /aimet_zoo_torch/efficientnetlite0/model/model_cards/efficientnetlite0_w8a8.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "EfficientNet Lite0", 3 | "framework": "pytorch", 4 | "task": "classification", 5 | "model_args": {}, 6 | "input_shape": [null, 3, 224, 224], 7 | "trainig_dataset": "ImageNet", 8 | "optimization_config": { 9 | "quantization_configuration": 10 | { 11 | "param_bw": 8, 12 | "output_bw": 8, 13 | "input_quantization": true, 14 | "quant_scheme": "tf_enhanced", 15 | "techniques": ["bnfold", "adaround"] 16 | } 17 | }, 18 | "artifacts": { 19 | "url_pre_opt_weights": null, 20 | "url_post_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/torch_effnet_lite0_w8a8_pc/model_efficientnetlite0_w8a8_pc_checkpoint.pth", 21 | "url_adaround_encodings": "https://github.com/quic/aimet-model-zoo/releases/download/torch_effnet_lite0_w8a8_pc/efficientnetlite0_w8a8_pc.encodings", 22 | "url_aimet_encodings": null, 23 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/release-aimet-1.23/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config_per_channel.json" 24 | } 25 | } -------------------------------------------------------------------------------- /aimet_zoo_torch/inverseform/model/models/model_loader.py: -------------------------------------------------------------------------------- 1 | # pylint: skip-file 2 | # pylint: skip-file 3 | def load_model(net, pretrained): 4 | pretrained_dict = pretrained['state_dict'] 5 | model_dict = net.state_dict() 6 | updated_model_dict = {} 7 | lookup_table = {} 8 | for k_model, v_model in model_dict.items(): 9 | if k_model.startswith('backbone'): 10 | k_updated = '.'.join(k_model.split('.')[1:]) 11 | 12 | lookup_table[k_updated] = k_model 13 | updated_model_dict[k_updated] = k_model 14 | else: 15 | lookup_table[k_model] = k_model 16 | updated_model_dict[k_model] = k_model 17 | 18 | updated_pretrained_dict = {} 19 | for k, v in pretrained_dict.items(): 20 | if k.startswith('model') or k.startswith('modules') or k.startswith('module'): 21 | k = '.'.join(k.split('.')[1:]) 22 | if k.startswith('backbone'): 23 | k = '.'.join(k.split('.')[1:]) 24 | 25 | if k in updated_model_dict.keys() and model_dict[lookup_table[k]].shape==v.shape: 26 | updated_pretrained_dict[updated_model_dict[k]] = v 27 | 28 | 29 | model_dict.update(updated_pretrained_dict) 30 | net.load_state_dict(model_dict) 31 | return net -------------------------------------------------------------------------------- /aimet_zoo_torch/resnext/model/model_cards/resnext101_w8a8.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "ResNeXt101", 3 | "framework": "pytorch", 4 | "task": "image classification", 5 | "model_args": { 6 | "num_classes": 1000 7 | }, 8 | "input_shape": [null, 3, 224, 224], 9 | "training_dataset": "ImageNet", 10 | "optimization_config": { 11 | "quantization_configuration": 12 | { 13 | "param_bw": 8, 14 | "output_bw": 8, 15 | "input_quantization": true, 16 | "quant_scheme": "tf_enhanced", 17 | "techniques": ["autoquant"] 18 | } 19 | }, 20 | "artifacts": { 21 | "url_pre_opt_weights": null, 22 | "url_post_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/torch_resnext101/resnext101_w8a8_state_dict.pth", 23 | "url_adaround_encodings": null, 24 | "url_aimet_encodings": "https://github.com/quic/aimet-model-zoo/releases/download/torch_resnext101/resnext101_w8a8.encodings", 25 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/release-aimet-1.24/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config.json" 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /aimet_zoo_tensorflow/deeplabv3plus_tf2/model/model_cards/deeplabv3plus_xception_w8a8.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "deeplabv3plus_xception_tf2", 3 | "framework": "tensorflow2.x", 4 | "task": "semantic segmentation", 5 | "input_shape": [512, 512, 3], 6 | "dataset": "PascalVOC2012", 7 | "optimization_config": { 8 | "quantization_configuration": 9 | { 10 | "param_bw": 8, 11 | "output_bw": 8, 12 | "input_quantization": true, 13 | "quant_scheme": "percentile", 14 | "techniques": ["bn_fold"] 15 | } 16 | }, 17 | "artifacts": { 18 | "url_pre_opt_weights": "https://github.qualcomm.com/qualcomm-ai/aimet-model-zoo/releases/download/tensorflow2-deeplabv3plus_xception/deeplabv3_xception.h5", 19 | "url_post_opt_weights": null, 20 | "url_adaround_encodings": null, 21 | "url_aimet_encodings": "https://github.qualcomm.com/qualcomm-ai/aimet-model-zoo/releases/download/tensorflow2-deeplabv3plus_xception/deeplabv3_xception_w8a8.encodings", 22 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/release-aimet-1.25/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config_per_channel.json" 23 | } 24 | } -------------------------------------------------------------------------------- /aimet_zoo_torch/resnet/model/model_cards/resnet101_w8a8.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "ResNet101", 3 | "framework": "pytorch", 4 | "task": "image classification", 5 | "model_args": { 6 | "num_classes": 1000 7 | }, 8 | "input_shape": [null, 3, 224, 224], 9 | "training_dataset": "ImageNet", 10 | "optimization_config": { 11 | "quantization_configuration": 12 | { 13 | "param_bw": 8, 14 | "output_bw": 8, 15 | "input_quantization": true, 16 | "quant_scheme": "tf_enhanced", 17 | "techniques": ["autoquant"] 18 | } 19 | }, 20 | "artifacts": { 21 | "url_pre_opt_weights": null, 22 | "url_post_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/torch_resnet101_w8a8/resnet101_w8a8_state_dict.pth", 23 | "url_adaround_encodings": "https://github.com/quic/aimet-model-zoo/releases/download/torch_resnet101_w8a8/resnet101_w8a8.encodings", 24 | "url_aimet_encodings": null, 25 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/release-aimet-1.24/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config.json" 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /Jenkins/Dockerfile.tf-torch-cpu: -------------------------------------------------------------------------------- 1 | # ============================================================================= 2 | # @@-COPYRIGHT-START-@@ 3 | # 4 | # Copyright (c) 2022 of Qualcomm Innovation Center, Inc. All rights reserved. 5 | # Changes from QuIC are licensed under the terms and conditions at 6 | # https://github.com/quic/aimet-model-zoo/blob/develop/LICENSE.pdf 7 | # 8 | # @@-COPYRIGHT-END-@@ 9 | # ============================================================================= 10 | 11 | # ---------------------------------------------- 12 | # Copyright (c) 2022 Qualcomm Technologies, Inc. 13 | # All Rights Reserved. 14 | # ---------------------------------------------- 15 | 16 | # Docker image file to build and test AIMET for both Tensorflow and PyTorch in a CPU environment 17 | 18 | FROM artifacts.codelinaro.org/codelinaro-aimet/aimet:latest.tf-torch-cpu 19 | 20 | ARG DEBIAN_FRONTEND=noninteractive 21 | 22 | RUN apt-get update -y > /dev/null && \ 23 | apt-get install --no-install-recommends -y \ 24 | python3-pip && \ 25 | rm -rf /var/lib/apt/lists/* 26 | 27 | # Upgrade Python3 pip and install some more packages 28 | RUN python3 -m pip --no-cache-dir install --upgrade pip && \ 29 | pip install pylint==2.17.2 && \ 30 | pip install astroid==2.15.2 31 | -------------------------------------------------------------------------------- /aimet_zoo_torch/segnet/model/model_cards/segnet_w4a8.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "SegNet", 3 | "framework": "pytorch", 4 | "task": "semantic segmentation", 5 | "model_args": {}, 6 | "input_shape": [null, 3, 360, 480], 7 | "training_dataset": "CamVid", 8 | "optimization_config": { 9 | "quantization_configuration": 10 | { 11 | "param_bw": 4, 12 | "output_bw": 8, 13 | "input_quantization": true, 14 | "quant_scheme": "tf_enhanced", 15 | "techniques": ["adaround"] 16 | } 17 | }, 18 | "artifacts": { 19 | "url_pre_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/torch_segnet/SegNet.pth", 20 | "url_post_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/torch_segnet/segnet_w4a8_pc_state_dict.pth", 21 | "url_adaround_encodings": "https://github.com/quic/aimet-model-zoo/releases/download/torch_segnet/segnet_w4a8_pc.encodings", 22 | "url_aimet_encodings": null, 23 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/release-aimet-1.23/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config_per_channel.json" 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /aimet_zoo_torch/uniformer_classification/model/image_classification/no_scaling_scaler.py: -------------------------------------------------------------------------------- 1 | # ============================================================================= 2 | # @@-COPYRIGHT-START-@@ 3 | # 4 | # Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. 5 | # Changes from QuIC are licensed under the terms and conditions at 6 | # https://github.com/quic/aimet-model-zoo/blob/develop/LICENSE.pdf 7 | # 8 | # @@-COPYRIGHT-END-@@ 9 | # ============================================================================= 10 | 11 | import torch 12 | 13 | class NoScalingScaler: 14 | state_dict_key = "amp_scaler" #"noscaling_scaler" 15 | 16 | def __init__(self): 17 | self._scaler = torch.cuda.amp.GradScaler() 18 | 19 | def __call__(self, loss, optimizer, clip_grad=None, clip_mode='norm', parameters=None, create_graph=False): 20 | loss.backward(create_graph=create_graph) 21 | if clip_grad is not None: 22 | assert parameters is not None 23 | dispatch_clip_grad(parameters, clip_grad, mode=clip_mode) 24 | optimizer.step() 25 | 26 | 27 | def state_dict(self): 28 | return self._scaler.state_dict() 29 | 30 | def load_state_dict(self, state_dict): 31 | self._scaler.load_state_dict(state_dict) 32 | -------------------------------------------------------------------------------- /AcceptanceTests/torch/staging/test_hrnet_posenet_quanteval.py: -------------------------------------------------------------------------------- 1 | # /usr/bin/env python3 2 | # -*- mode: python -*- 3 | # ============================================================================= 4 | # @@-COPYRIGHT-START-@@ 5 | # 6 | # Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. 7 | # Changes from QuIC are licensed under the terms and conditions at 8 | # https://github.com/quic/aimet-model-zoo/blob/develop/LICENSE.pdf 9 | # 10 | # @@-COPYRIGHT-END-@@ 11 | # ============================================================================= 12 | 13 | """ acceptance test for hrnet posenet pose estimation""" 14 | 15 | import pytest 16 | import torch 17 | from aimet_zoo_torch.hrnet_posenet.evaluators import hrnet_posenet_quanteval 18 | 19 | @pytest.mark.pose_estimation 20 | @pytest.mark.cuda 21 | @pytest.mark.parametrize("model_config",["hrnet_posenet_w4a8","hrnet_posenet_w8a8"]) 22 | def test_quaneval_hrnet_posenet(model_config, tiny_mscoco_validation_path): 23 | """hrnet_posenet pose estimation test""" 24 | torch.cuda.empty_cache() 25 | 26 | accuracy = hrnet_posenet_quanteval.main( 27 | [ 28 | "--model-config", 29 | model_config, 30 | "--dataset-path", 31 | tiny_mscoco_validation_path, 32 | ] 33 | ) 34 | -------------------------------------------------------------------------------- /aimet_zoo_torch/salsanext/models/model_cards/salsanext_w8a8.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "SalsaNext", 3 | "framework": "pytorch", 4 | "task": "semantic segmentation", 5 | "model_args": {}, 6 | "input_shape": [1, 5, 64, 2048], 7 | "training_dataset": "SemanticKitti", 8 | "optimization_config": { 9 | "quantization_configuration": 10 | { 11 | "param_bw": 8, 12 | "output_bw": 8, 13 | "input_quantization": true, 14 | "quant_scheme": "percentile", 15 | "techniques": ["bath_norm_folding", "adaround"] 16 | } 17 | }, 18 | "artifacts": { 19 | "url_pre_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/torch_salsanext/SalsaNext", 20 | "url_post_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/torch_salsanext/SalsaNext_optimized_model.pth", 21 | "url_aimet_encodings": "https://github.com/quic/aimet-model-zoo/releases/download/torch_salsanext/SalsaNext_optimized_encoding.encodings", 22 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/release-aimet-1.24/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config.json" 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /aimet_zoo_torch/resnet/model/model_cards/resnet18_w4a8.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "ResNet18", 3 | "framework": "pytorch", 4 | "task": "image classification", 5 | "model_args": { 6 | "num_classes": 1000 7 | }, 8 | "input_shape": [null, 3, 224, 224], 9 | "training_dataset": "ImageNet", 10 | "optimization_config": { 11 | "quantization_configuration": 12 | { 13 | "param_bw": 4, 14 | "output_bw": 8, 15 | "input_quantization": true, 16 | "quant_scheme": "tf_enhanced", 17 | "techniques": ["cle", "adaround"] 18 | } 19 | }, 20 | "artifacts": { 21 | "url_pre_opt_weights": null, 22 | "url_post_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_february_artifacts/resnet18_w4a8_state_dict.pth", 23 | "url_adaround_encodings": "https://github.com/quic/aimet-model-zoo/releases/download/torchvision_classification_INT4%2F8/resnet18_W4A8.encodings", 24 | "url_aimet_encodings": null, 25 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/release-aimet-1.23/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config_per_channel.json" 26 | } 27 | } -------------------------------------------------------------------------------- /aimet_zoo_torch/resnet/model/model_cards/resnet18_w8a8.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "ResNet18", 3 | "framework": "pytorch", 4 | "task": "image classification", 5 | "model_args": { 6 | "num_classes": 1000 7 | }, 8 | "input_shape": [null, 3, 224, 224], 9 | "training_dataset": "ImageNet", 10 | "optimization_config": { 11 | "quantization_configuration": 12 | { 13 | "param_bw": 8, 14 | "output_bw": 8, 15 | "input_quantization": true, 16 | "quant_scheme": "tf_enhanced", 17 | "techniques": ["cle", "adaround"] 18 | } 19 | }, 20 | "artifacts": { 21 | "url_pre_opt_weights": null, 22 | "url_post_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_february_artifacts/resnet18_w8a8_state_dict.pth", 23 | "url_adaround_encodings": "https://github.com/quic/aimet-model-zoo/releases/download/torchvision_classification_INT4%2F8/resnet18_W8A8.encodings", 24 | "url_aimet_encodings": null, 25 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/release-aimet-1.23/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config_per_channel.json" 26 | } 27 | } -------------------------------------------------------------------------------- /aimet_zoo_torch/resnet/model/model_cards/resnet50_w4a8.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "ResNet50", 3 | "framework": "pytorch", 4 | "task": "image classification", 5 | "model_args": { 6 | "num_classes": 1000 7 | }, 8 | "input_shape": [null, 3, 224, 224], 9 | "training_dataset": "ImageNet", 10 | "optimization_config": { 11 | "quantization_configuration": 12 | { 13 | "param_bw": 4, 14 | "output_bw": 8, 15 | "input_quantization": true, 16 | "quant_scheme": "tf_enhanced", 17 | "techniques": ["cle", "adaround"] 18 | } 19 | }, 20 | "artifacts": { 21 | "url_pre_opt_weights": null, 22 | "url_post_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_february_artifacts/resnet50_w4a8_state_dict.pth", 23 | "url_adaround_encodings": "https://github.com/quic/aimet-model-zoo/releases/download/torchvision_classification_INT4%2F8/resnet50_W4A8.encodings", 24 | "url_aimet_encodings": null, 25 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/release-aimet-1.23/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config_per_channel.json" 26 | } 27 | } -------------------------------------------------------------------------------- /aimet_zoo_torch/resnet/model/model_cards/resnet50_w8a8.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "ResNet50", 3 | "framework": "pytorch", 4 | "task": "image classification", 5 | "model_args": { 6 | "num_classes": 1000 7 | }, 8 | "input_shape": [null, 3, 224, 224], 9 | "training_dataset": "ImageNet", 10 | "optimization_config": { 11 | "quantization_configuration": 12 | { 13 | "param_bw": 8, 14 | "output_bw": 8, 15 | "input_quantization": true, 16 | "quant_scheme": "tf_enhanced", 17 | "techniques": ["cle", "adaround"] 18 | } 19 | }, 20 | "artifacts": { 21 | "url_pre_opt_weights": null, 22 | "url_post_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_february_artifacts/resnet50_w8a8_state_dict.pth", 23 | "url_adaround_encodings": "https://github.com/quic/aimet-model-zoo/releases/download/torchvision_classification_INT4%2F8/resnet50_W8A8.encodings", 24 | "url_aimet_encodings": null, 25 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/release-aimet-1.23/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config_per_channel.json" 26 | } 27 | } -------------------------------------------------------------------------------- /aimet_zoo_torch/resnet/model/model_cards/resnet50_w8a16.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "ResNet50", 3 | "framework": "pytorch", 4 | "task": "image classification", 5 | "model_args": { 6 | "num_classes": 1000 7 | }, 8 | "input_shape": [null, 3, 224, 224], 9 | "training_dataset": "ImageNet", 10 | "optimization_config": { 11 | "quantization_configuration": 12 | { 13 | "param_bw": 8, 14 | "output_bw": 16, 15 | "input_quantization": true, 16 | "quant_scheme": "tf_enhanced", 17 | "techniques": ["bnfold"] 18 | } 19 | }, 20 | "artifacts": { 21 | "url_pre_opt_weights": null, 22 | "url_post_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/torch_resnet50_w8a16/resnet50_w8a16_state_dict.pth", 23 | "url_adaround_encodings": null, 24 | "url_aimet_encodings": "https://github.com/quic/aimet-model-zoo/releases/download/torch_resnet50_w8a16/resnet50_w8a16_torch.encodings", 25 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/50cfafe353b530d81c52188151c418ba16e92261/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config_per_channel.json" 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /aimet_zoo_torch/deepspeech2/evaluators/requirements.txt: -------------------------------------------------------------------------------- 1 | apex==0.9.10.dev0 2 | astor==0.8.1 3 | cffi==1.15.1 4 | codes==0.1.5 5 | colorama==0.4.5 6 | Cython==0.29.32 7 | dataloaders==0.0.1 8 | datasets==2.4.0 9 | decorator==5.1.1 10 | Flask==2.2.2 11 | gdown==4.5.1 12 | geffnet==1.0.2 13 | genapi==0.0.8 14 | graphviz==0.20.1 15 | Hydra==2.5 16 | hydra-core==1.2.0 17 | hypothesis==6.54.3 18 | importlib_metadata==4.12.0 19 | importlib_resources==5.9.0 20 | jsonschema==4.9.0 21 | librosa==0.9.2 22 | mad==0.2.2 23 | mmcv==1.6.1 24 | mock==4.0.3 25 | mypy==0.971 26 | nets==0.0.3.1 27 | nose==1.3.7 28 | numpy==1.22.0 29 | object_detection==0.0.3 30 | omegaconf==2.2.3 31 | paramiko==2.11.0 32 | pexpect==4.8.0 33 | pickle5 34 | preprocessing==0.1.13 35 | progressbar33==2.4 36 | protobuf==3.19.0 37 | psutil==5.9.1 38 | pycocotools==2.0.4 39 | pyelftools==0.29 40 | Pygments==2.12.0 41 | pytest==4.6.5 42 | python-levenshtein 43 | pytz==2022.1 44 | PyYAML==6.0 45 | r2pipe==1.7.1 46 | requests==2.28.1 47 | samplerate==0.1.0 48 | scikit_learn==1.1.2 49 | scikit-image==0.15.0 50 | scipy==1.3.2 51 | simplejson==3.17.6 52 | six==1.16.0 53 | sox 54 | soxr==0.3.1 55 | torch==1.9.1+cu111 56 | torchvision==0.10.1+cu111 57 | tornado==6.2 58 | tqdm==4.64.0 59 | visdom==0.1.8.9 60 | warpctc_pytorch==0.2.1 61 | wget==3.2 62 | xxhash==3.0.0 63 | attrs==22.1.0 64 | -------------------------------------------------------------------------------- /aimet_zoo_torch/salsanext/models/model_cards/salsanext_w4a8.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "SalsaNext", 3 | "framework": "pytorch", 4 | "task": "semantic segmentation", 5 | "model_args": {}, 6 | "input_shape": [1, 5, 64, 2048], 7 | "training_dataset": "SemanticKitti", 8 | "optimization_config": { 9 | "quantization_configuration": 10 | { 11 | "param_bw": 4, 12 | "output_bw": 8, 13 | "input_quantization": true, 14 | "quant_scheme": "tf", 15 | "techniques": ["bath_norm_folding"] 16 | } 17 | }, 18 | "artifacts": { 19 | "url_pre_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/torch_salsanext/SalsaNext", 20 | "url_post_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/torch_salsanext_models_update/SalsaNext_optimized_w4A8_model.pth", 21 | "url_aimet_encodings": "https://github.com/quic/aimet-model-zoo/releases/download/torch_salsanext_models_update/SalsaNext_optimized_w4A8_encoding.encodings", 22 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/release-aimet-1.24/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config_per_channel.json" 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /aimet_zoo_torch/gpunet0/model/model_cards/gpunet0_w8a8.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "GPUNet-0", 3 | "framework": "pytorch", 4 | "task": "image classification", 5 | "model_args": {}, 6 | "input_shape": [1, 3, 320, 320], 7 | "training_dataset": "ImageNet", 8 | "optimization_config": { 9 | "quantization_configuration": 10 | { 11 | "param_bw": 8, 12 | "output_bw": 8, 13 | "input_quantization": true, 14 | "quant_scheme": "percentile", 15 | "techniques": ["adaround", "fold_all_batch_norms_to_scale"] 16 | } 17 | }, 18 | "artifacts": { 19 | "url_pre_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/torch_gpunet0_w8a8/0.65ms.pth.tar", 20 | "url_post_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/torch_gpunet0_w8a8/GPUNet0_w8a8_state_dict.pth", 21 | "url_adaround_encodings": null, 22 | "url_aimet_encodings": "https://github.com/quic/aimet-model-zoo/releases/download/torch_gpunet0_w8a8/GPUNet0_w8a8_torch.encodings", 23 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/release-aimet-1.23/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config_per_channel.json" 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /aimet_zoo_torch/regnet/model/model_cards/regnet_x_3_2gf_w4a8.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "ResNet18", 3 | "framework": "pytorch", 4 | "task": "image classification", 5 | "model_args": { 6 | "num_classes": 1000 7 | }, 8 | "input_shape": [null, 3, 224, 224], 9 | "training_dataset": "ImageNet", 10 | "optimization_config": { 11 | "quantization_configuration": 12 | { 13 | "param_bw": 4, 14 | "output_bw": 8, 15 | "input_quantization": true, 16 | "quant_scheme": "tf_enhanced", 17 | "techniques": ["cle", "adaround"] 18 | } 19 | }, 20 | "artifacts": { 21 | "url_pre_opt_weights": null, 22 | "url_post_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_february_artifacts/regnet_x_3_2gf_w4a8_state_dict.pth", 23 | "url_adaround_encodings": "https://github.com/quic/aimet-model-zoo/releases/download/torchvision_classification_INT4%2F8/regnet_x_3_2gf_W4A8.encodings", 24 | "url_aimet_encodings": null, 25 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/release-aimet-1.23/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config_per_channel.json" 26 | } 27 | } -------------------------------------------------------------------------------- /aimet_zoo_torch/regnet/model/model_cards/regnet_x_3_2gf_w8a8.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "ResNet18", 3 | "framework": "pytorch", 4 | "task": "image classification", 5 | "model_args": { 6 | "num_classes": 1000 7 | }, 8 | "input_shape": [null, 3, 224, 224], 9 | "training_dataset": "ImageNet", 10 | "optimization_config": { 11 | "quantization_configuration": 12 | { 13 | "param_bw": 8, 14 | "output_bw": 8, 15 | "input_quantization": true, 16 | "quant_scheme": "tf_enhanced", 17 | "techniques": ["cle", "adaround"] 18 | } 19 | }, 20 | "artifacts": { 21 | "url_pre_opt_weights": null, 22 | "url_post_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_february_artifacts/regnet_x_3_2gf_w8a8_state_dict.pth", 23 | "url_adaround_encodings": "https://github.com/quic/aimet-model-zoo/releases/download/torchvision_classification_INT4%2F8/regnet_x_3_2gf_W8A8.encodings", 24 | "url_aimet_encodings": null, 25 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/release-aimet-1.23/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config_per_channel.json" 26 | } 27 | } -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_semantic_segmentation/model/model_cards/hrnet_sem_seg_w4a8.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "HRNet W48", 3 | "framework": "pytorch", 4 | "task": "semantic segmentation", 5 | "model_args": { 6 | "num_classes": 21 7 | }, 8 | "input_shape": [null, 3, 1024, 2048], 9 | "training_dataset": "PascalVOC2012", 10 | "optimization_config": { 11 | "quantization_configuration": 12 | { 13 | "param_bw": 4, 14 | "output_bw": 8, 15 | "input_quantization": true, 16 | "quant_scheme": "tf_enhanced", 17 | "techniques": ["cle", "adaround"] 18 | } 19 | }, 20 | "artifacts": { 21 | "url_pre_opt_weights": null, 22 | "url_post_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_february_artifacts/hrnet_w4a8_pc_state_dict.pth", 23 | "url_adaround_encodings": null, 24 | "url_aimet_encodings": "https://github.com/quic/aimet-model-zoo/releases/download/torch_hrnet_w8a8_pc/hrnet_w4a8_pc.encodings", 25 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/release-aimet-1.23/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config_per_channel.json" 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_semantic_segmentation/model/model_cards/hrnet_sem_seg_w8a8.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "HRNet W48", 3 | "framework": "pytorch", 4 | "task": "semantic segmentation", 5 | "model_args": { 6 | "num_classes": 21 7 | }, 8 | "input_shape": [null, 3, 1024, 2048], 9 | "training_dataset": "PascalVOC2012", 10 | "optimization_config": { 11 | "quantization_configuration": 12 | { 13 | "param_bw": 8, 14 | "output_bw": 8, 15 | "input_quantization": true, 16 | "quant_scheme": "tf_enhanced", 17 | "techniques": ["cle", "adaround"] 18 | } 19 | }, 20 | "artifacts": { 21 | "url_pre_opt_weights": null, 22 | "url_post_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_february_artifacts/hrnet_w8a8_pc_state_dict.pth", 23 | "url_adaround_encodings": null, 24 | "url_aimet_encodings": "https://github.com/quic/aimet-model-zoo/releases/download/torch_hrnet_w8a8_pc/hrnet_w8a8_pc.encodings", 25 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/release-aimet-1.23/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config_per_channel.json" 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /aimet_zoo_torch/mmaction2/model/model_cards/bmn_w8a8.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "bmn", 3 | "framework": "pytorch", 4 | "task": "action localization", 5 | "input_shape": [null, 400, 100], 6 | "evaluation": { 7 | "config": "/path/to/aimet-model-zoo/aimet_zoo_torch/mmaction2/model/configs/localization/bmn/bmn_2xb8-400x100-9e_activitynet-feature.py" 8 | }, 9 | "optimization_config": { 10 | "quantization_configuration": 11 | { 12 | "param_bw": 8, 13 | "output_bw": 8, 14 | "input_quantization": true, 15 | "quant_scheme": "tf_enhanced", 16 | "techniques": null 17 | } 18 | }, 19 | "artifacts": { 20 | "url_pre_opt_weights": "https://download.openmmlab.com/mmaction/v1.0/localization/bmn/bmn_2xb8-400x100-9e_activitynet-feature_20220908-79f92857.pth", 21 | "url_post_opt_weights": null, 22 | "url_adaround_encodings": null, 23 | "url_aimet_encodings": "https://github.com/quic/aimet-model-zoo/releases/download/torch_mmaction2/bmn_w8a8_torch.encodings", 24 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/release-aimet-1.24/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config.json" 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /AcceptanceTests/tensorflow/staging/test_deeplabv3plus_xception_tf2_quanteval.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- mode: python -*- 3 | # ============================================================================= 4 | # @@-COPYRIGHT-START-@@ 5 | # 6 | # Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. 7 | # 8 | # @@-COPYRIGHT-END-@@ 9 | # ============================================================================= 10 | 11 | """ acceptance test for deeplabv3plus xception""" 12 | 13 | import pytest 14 | from aimet_zoo_tensorflow.deeplabv3plus_tf2.evaluators import deeplabv3plus_tf2_quanteval 15 | 16 | @pytest.mark.slow 17 | @pytest.mark.cuda 18 | @pytest.mark.sementic_segmentation 19 | # pylint:disable = redefined-outer-name 20 | @pytest.mark.parametrize("model_config", ["deeplabv3plus_xception_w8a8"]) 21 | def test_quanteval_deeplabv3plus_xception_tf2(model_config, PascalVOC_segmentation_test_data_path): 22 | """mobiledet edgetpu image classification test""" 23 | 24 | if PascalVOC_segmentation_test_data_path is None: 25 | pytest.xfail(f'Dataset path is not set') 26 | 27 | deeplabv3plus_tf2_quanteval.main( 28 | [ 29 | "--model-config", 30 | model_config, 31 | "--dataset-path", 32 | PascalVOC_segmentation_test_data_path 33 | ] 34 | ) 35 | 36 | -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_posenet/models/model_cards/hrnet_posenet_w8a8.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "HRNet PoseNet", 3 | "framework": "pytorch", 4 | "task": "pose estimation", 5 | "model_args": {}, 6 | "input_shape": [null, 3, 256, 192], 7 | "trainig_dataset": "COCO 2014", 8 | "optimization_config": { 9 | "quantization_configuration": 10 | { 11 | "param_bw": 8, 12 | "output_bw": 8, 13 | "input_quantization": true, 14 | "quant_scheme": "tf", 15 | "techniques": ["bn_fold"] 16 | } 17 | }, 18 | "artifacts": { 19 | "url_pre_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_march_artifacts/hrnet_posenet_FP32_state_dict.pth", 20 | "url_post_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_march_artifacts/hrnet_posenet_W8A8_state_dict.pth", 21 | "url_adaround_encodings": "https://github.com/quic/aimet-model-zoo/releases/download/hrnet-posenet/hrnet_posenet_W8A8.encodings", 22 | "url_aimet_encodings": null, 23 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/release-aimet-1.22.1/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config_per_channel.json" 24 | } 25 | } -------------------------------------------------------------------------------- /AcceptanceTests/torch/test_regnet_quanteval.py: -------------------------------------------------------------------------------- 1 | # /usr/bin/env python3 2 | # -*- mode: python -*- 3 | # ============================================================================= 4 | # @@-COPYRIGHT-START-@@ 5 | # 6 | # Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. 7 | # Changes from QuIC are licensed under the terms and conditions at 8 | # https://github.com/quic/aimet-model-zoo/blob/develop/LICENSE.pdf 9 | # 10 | # @@-COPYRIGHT-END-@@ 11 | # ============================================================================= 12 | 13 | """ acceptance test for regnet""" 14 | 15 | import pytest 16 | import torch 17 | from aimet_zoo_torch.regnet.evaluator import regnet_quanteval 18 | 19 | @pytest.mark.cuda 20 | @pytest.mark.image_classification 21 | # pylint:disable = redefined-outer-name 22 | @pytest.mark.parametrize("model_config", ["regnet_x_3_2gf_w8a8"]) 23 | def test_quanteval_regnet(model_config, tiny_imageNet_validation_path): 24 | """regnet image classification test""" 25 | if tiny_imageNet_validation_path is None: 26 | pytest.fail(f'Dataset path is not set') 27 | 28 | torch.cuda.empty_cache() 29 | regnet_quanteval.main( 30 | [ 31 | "--model-config", 32 | model_config, 33 | "--dataset-path", 34 | tiny_imageNet_validation_path, 35 | ] 36 | ) 37 | 38 | -------------------------------------------------------------------------------- /aimet_zoo_torch/uniformer_classification/model/image_classification/generate_tensorboard.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | from tensorboardX import SummaryWriter 4 | 5 | exp_path_list = ['exp'] 6 | log_keys = ['train_lr', 'train_loss', 'test_loss', 'test_acc1', 'test_acc5'] 7 | ignore_exp = [] 8 | 9 | for path in exp_path_list: 10 | for exp in os.listdir(path): 11 | log_path = os.path.join('.', path, exp, 'ckpt', 'log.txt') 12 | if os.path.exists(log_path): 13 | tensorboard_path = os.path.join('.', path, exp, 'events') 14 | if os.path.exists(tensorboard_path): 15 | for old_exp in os.listdir(tensorboard_path): 16 | delete_path = os.path.join(tensorboard_path, old_exp) 17 | print('delete:', delete_path) 18 | os.remove(delete_path) 19 | tb_logger = SummaryWriter(tensorboard_path) 20 | if exp not in ignore_exp: 21 | with open(log_path, 'r') as f: 22 | lines = f.readlines() 23 | for line in lines: 24 | log = json.loads(line.rstrip()) 25 | for k in log_keys: 26 | tb_logger.add_scalar(k, log[k], log['epoch']) 27 | print("load ok in:", tensorboard_path) 28 | tb_logger.close() 29 | -------------------------------------------------------------------------------- /AcceptanceTests/torch/staging/test_ssd_res50_quanteval.py: -------------------------------------------------------------------------------- 1 | # /usr/bin/env python3 2 | # -*- mode: python -*- 3 | # ============================================================================= 4 | # @@-COPYRIGHT-START-@@ 5 | # 6 | # Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. 7 | # Changes from QuIC are licensed under the terms and conditions at 8 | # https://github.com/quic/aimet-model-zoo/blob/develop/LICENSE.pdf 9 | # 10 | # @@-COPYRIGHT-END-@@ 11 | # ============================================================================= 12 | 13 | """ acceptance test for ssd_res50 object detection""" 14 | 15 | import pytest 16 | import torch 17 | from aimet_zoo_torch.ssd_res50.evaluators import ssd_res50_quanteval 18 | 19 | #some issues with the code , failed as SIT test results 20 | @pytest.mark.cuda 21 | @pytest.mark.object_detection 22 | @pytest.mark.parametrize("model_config",["ssd_res50_w8a8"]) 23 | def test_quaneval_ssd_res50(model_config, tiny_mscoco_validation_path): 24 | torch.cuda.empty_cache() 25 | if tiny_mscoco_validation_path is None: 26 | pytest.fail('Dataset not set') 27 | ssd_res50_quanteval.main( 28 | [ 29 | "--model-config", 30 | model_config, 31 | "--dataset-path", 32 | tiny_mscoco_validation_path, 33 | "--use-cuda" 34 | ] 35 | ) 36 | 37 | 38 | -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_posenet/models/model_cards/hrnet_posenet_w4a8.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "HRNet PoseNet", 3 | "framework": "pytorch", 4 | "task": "pose estimation", 5 | "model_args": {}, 6 | "input_shape": [null, 3, 256, 192], 7 | "trainig_dataset": "COCO 2014", 8 | "optimization_config": { 9 | "quantization_configuration": 10 | { 11 | "param_bw": 4, 12 | "output_bw": 8, 13 | "input_quantization": true, 14 | "quant_scheme": "tf", 15 | "techniques": ["bn_fold"] 16 | } 17 | }, 18 | "artifacts": { 19 | "url_pre_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_march_artifacts/hrnet_posenet_FP32_state_dict.pth", 20 | "url_post_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_march_artifacts/hrnet_posenet_W4A8_state_dict.pth", 21 | "url_adaround_encodings": "https://github.com/quic/aimet-model-zoo/releases/download/hrnet-posenet/hrnet_posenet_W4A8.encodings", 22 | "url_aimet_encodings": null, 23 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/release-aimet-1.22.1/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config_per_channel.json" 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /aimet_zoo_torch/ssd_mobilenetv2/model/model_cards/ssd_mobilenetv2_w8a8.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "SSD MobileNet V2", 3 | "framework": "pytorch", 4 | "task": "object detection", 5 | "model_args": { 6 | "num_classes": 21, 7 | "width_mult": 1, 8 | "is_test": true 9 | }, 10 | "input_shape": [null, 3, 300, 300], 11 | "trainig_dataset": "Pascal VOC 2007", 12 | "optimization_config": { 13 | "quantization_configuration": 14 | { 15 | "param_bw": 8, 16 | "output_bw": 8, 17 | "input_quantization": true, 18 | "quant_scheme": "tf_enhanced", 19 | "techniques": ["cle", "adaround"] 20 | } 21 | }, 22 | "artifacts": { 23 | "url_pre_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_january_artifacts/ssd_mobilenetv2_fp32_state_dict.pth", 24 | "url_post_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_january_artifacts/ssd_mobilenetv2_w8a8_state_dict.pth", 25 | "url_adaround_encodings": null, 26 | "url_aimet_encodings": null, 27 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/release-aimet-1.23/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config_per_channel.json" 28 | } 29 | } -------------------------------------------------------------------------------- /AcceptanceTests/torch/test_gpunet0_quanteval.py: -------------------------------------------------------------------------------- 1 | # /usr/bin/env python3 2 | # -*- mode: python -*- 3 | # ============================================================================= 4 | # @@-COPYRIGHT-START-@@ 5 | # 6 | # Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. 7 | # Changes from QuIC are licensed under the terms and conditions at 8 | # https://github.com/quic/aimet-model-zoo/blob/develop/LICENSE.pdf 9 | # 10 | # @@-COPYRIGHT-END-@@ 11 | # ============================================================================= 12 | 13 | """ acceptance test for image classification""" 14 | 15 | import pytest 16 | import torch 17 | from aimet_zoo_torch.gpunet0.evaluator import gpunet0_quanteval 18 | 19 | @pytest.mark.cuda 20 | @pytest.mark.image_classification 21 | # pylint:disable = redefined-outer-name 22 | @pytest.mark.parametrize("model_config", ["gpunet0_w8a8"]) 23 | def test_quanteval_gpunet0_image_classification(model_config, tiny_imageNet_root_path): 24 | """gpunet0 image classification test""" 25 | 26 | if tiny_imageNet_root_path is None: 27 | pytest.fail(f'Dataset path is not set') 28 | 29 | torch.cuda.empty_cache() 30 | gpunet0_quanteval.main( 31 | [ 32 | "--model-config", 33 | model_config, 34 | "--dataset-path", 35 | tiny_imageNet_root_path, 36 | ] 37 | ) 38 | -------------------------------------------------------------------------------- /AcceptanceTests/torch/test_resnet_quanteval.py: -------------------------------------------------------------------------------- 1 | # /usr/bin/env python3 2 | # -*- mode: python -*- 3 | # ============================================================================= 4 | # @@-COPYRIGHT-START-@@ 5 | # 6 | # Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. 7 | # Changes from QuIC are licensed under the terms and conditions at 8 | # https://github.com/quic/aimet-model-zoo/blob/develop/LICENSE.pdf 9 | # 10 | # @@-COPYRIGHT-END-@@ 11 | # ============================================================================= 12 | """ acceptance test for resnet""" 13 | import pytest 14 | import torch 15 | from aimet_zoo_torch.resnet.evaluator import resnet_quanteval 16 | 17 | @pytest.mark.cuda 18 | @pytest.mark.image_classification 19 | # pylint:disable = redefined-outer-name 20 | @pytest.mark.parametrize("model_config", ["resnet18_w8a8","resnet50_w8a8","resnet101_w8a8"]) 21 | def test_quanteval_resnet(model_config, tiny_imageNet_validation_path): 22 | """resnet image classification test""" 23 | 24 | if tiny_imageNet_validation_path is None: 25 | pytest.fail(f'Dataset path is not set') 26 | 27 | torch.cuda.empty_cache() 28 | resnet_quanteval.main( 29 | [ 30 | "--model-config", 31 | model_config, 32 | "--dataset-path", 33 | tiny_imageNet_validation_path, 34 | ] 35 | ) 36 | -------------------------------------------------------------------------------- /aimet_zoo_torch/deeplabv3/model/model_cards/dlv3_w4a8.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "DeepLabV3+", 3 | "framework": "pytorch", 4 | "task": "semantic segmentation", 5 | "model_args": { 6 | "num_classes": 21 7 | }, 8 | "input_shape": [null, 3, 513, 513], 9 | "trainig_dataset": "PascalVOC2012", 10 | "optimization_config": { 11 | "quantization_configuration": 12 | { 13 | "param_bw": 4, 14 | "output_bw": 8, 15 | "input_quantization": true, 16 | "quant_scheme": "tf_enhanced", 17 | "techniques": ["qat", "cle", "adaround"] 18 | } 19 | }, 20 | "artifacts": { 21 | "url_pre_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_january_artifacts/deeplab-mobilenet.pth.tar", 22 | "url_post_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_january_artifacts/dlv3_w4a8.pth", 23 | "url_adaround_encodings": null, 24 | "url_aimet_encodings": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_january_artifacts/dlv3_w4a8_torch.encodings", 25 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/release-aimet-1.23/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config_per_channel.json" 26 | } 27 | } -------------------------------------------------------------------------------- /aimet_zoo_tensorflow/mobilenetedgetpu/model/model_cards/mobilenetedgetpu_w8a8.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "MobileNet Edge TPU", 3 | "framework": "tensorflow", 4 | "task": "classification", 5 | "model_args": { 6 | "num_classes": 1001, 7 | "starting_op_names": ["IteratorGetNext"], 8 | "output_op_names": ["MobilenetEdgeTPU/Logits/output"] 9 | }, 10 | "input_shape": [null, 224, 224, 3], 11 | "trainig_dataset": "ImageNet", 12 | "optimization_config": { 13 | "quantization_configuration": 14 | { 15 | "param_bw": 8, 16 | "output_bw": 8, 17 | "input_quantization": true, 18 | "quant_scheme": "tf_enhanced", 19 | "techniques": ["bn_fold"] 20 | } 21 | }, 22 | "artifacts": { 23 | "url_pre_opt_weights": null, 24 | "url_post_opt_weights": null, 25 | "url_adaround_encodings": null, 26 | "url_aimet_encodings": null, 27 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/release-aimet-1.23/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config_per_channel.json", 28 | "url_zipped_checkpoint": "https://github.com/quic/aimet-model-zoo/releases/download/mobilenetedgetpu_tf1/mobileNetEdgeTPU_ckpt.zip" 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /aimet_zoo_tensorflow/ssd_mobilenet_v2/model/model_cards/ssd_mobilenetv2_w8a8.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "SSD-MobileNetV2", 3 | "framework": "tensorflow", 4 | "task": "object detection", 5 | "model_args": { 6 | "num_classes": 81, 7 | "starting_op_names": ["FeatureExtractor/MobilenetV2/MobilenetV2/input"], 8 | "output_op_names": ["concat", "concat_1"] 9 | }, 10 | "input_shape": [null, 3, 640, 480], 11 | "trainig_dataset": "MSCOCO", 12 | "optimization_config": { 13 | "quantization_configuration": 14 | { 15 | "param_bw": 8, 16 | "output_bw": 8, 17 | "input_quantization": true, 18 | "quant_scheme": "tf", 19 | "techniques": ["bnfold", "adaround"] 20 | } 21 | }, 22 | "artifacts": { 23 | "url_pre_opt_weights": null, 24 | "url_post_opt_weights": null, 25 | "url_adaround_encodings": null, 26 | "url_aimet_encodings": null, 27 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/release-aimet-1.19/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config.json", 28 | "url_zipped_checkpoint": "https://github.com/quic/aimet-model-zoo/releases/download/ssd_mobilenet_v2_tf/ssd_mobilenet_v2.tar.gz" 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /AcceptanceTests/torch/staging/test_resnext_quanteval.py: -------------------------------------------------------------------------------- 1 | # /usr/bin/env python3 2 | # -*- mode: python -*- 3 | # ============================================================================= 4 | # @@-COPYRIGHT-START-@@ 5 | # 6 | # Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. 7 | # Changes from QuIC are licensed under the terms and conditions at 8 | # https://github.com/quic/aimet-model-zoo/blob/develop/LICENSE.pdf 9 | # 10 | # @@-COPYRIGHT-END-@@ 11 | # ============================================================================= 12 | 13 | """ acceptance test for resnext image classification""" 14 | 15 | import pytest 16 | import torch 17 | from aimet_zoo_torch.resnext.evaluator import resnext_quanteval 18 | 19 | 20 | @pytest.mark.image_classification 21 | @pytest.mark.cuda 22 | @pytest.mark.parametrize("model_config",["resnext101_w8a8"]) 23 | # pylint:disable = redefined-outer-name 24 | def test_quanteval_resnext(model_config, tiny_imageNet_validation_path): 25 | """resnext image classification test""" 26 | if tiny_imageNet_validation_path is None: 27 | pytest.fail('Dataset is not set') 28 | 29 | torch.cuda.empty_cache() 30 | resnext_quanteval.main( 31 | [ 32 | "--model-config", 33 | model_config, 34 | "--dataset-path", 35 | tiny_imageNet_validation_path, 36 | ] 37 | ) 38 | 39 | 40 | 41 | -------------------------------------------------------------------------------- /AcceptanceTests/tensorflow/test_resnet50_tf2_quanteval.py: -------------------------------------------------------------------------------- 1 | # /usr/bin/env python3 2 | # -*- mode: python -*- 3 | # ============================================================================= 4 | # @@-COPYRIGHT-START-@@ 5 | # 6 | # Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. 7 | # Changes from QuIC are licensed under the terms and conditions at 8 | # https://github.com/quic/aimet-model-zoo/blob/develop/LICENSE.pdf 9 | # 10 | # @@-COPYRIGHT-END-@@ 11 | # ============================================================================= 12 | 13 | """ acceptance test for resnet50_tf2_quanteval image classification""" 14 | 15 | import pytest 16 | from aimet_zoo_tensorflow.resnet50_tf2.evaluators import resnet50_tf2_quanteval 17 | 18 | @pytest.mark.cuda 19 | @pytest.mark.object_detection 20 | # pylint:disable = redefined-outer-name 21 | @pytest.mark.parametrize("model_config", ["resnet50_w8a8"]) 22 | def test_quanteval_ssd_mobilenetv2(model_config, tiny_imageNet_root_path): 23 | """resnet50_tf2 image classification acceptance test""" 24 | 25 | if tiny_imageNet_root_path is None: 26 | pytest.fail(f'Dataset path is not set') 27 | 28 | resnet50_tf2_quanteval.main( 29 | [ 30 | "--model-config", 31 | model_config, 32 | "--dataset-path", 33 | tiny_imageNet_root_path, 34 | ] 35 | ) 36 | 37 | 38 | 39 | -------------------------------------------------------------------------------- /aimet_zoo_torch/xlsr/model/model_cards/xlsr_2x_w8a8.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "XLSR 2x", 3 | "framework": "pytorch", 4 | "task": "super resolution", 5 | "model_args": { 6 | "scaling_factor": 2 7 | }, 8 | "input_shape": [null, 3, 256, 512], 9 | "training_dataset": "DIV2k", 10 | "optimization_config": { 11 | "quantization_configuration": 12 | { 13 | "param_bw": 8, 14 | "output_bw": 8, 15 | "input_quantization": true, 16 | "quant_scheme": "tf_enhanced", 17 | "techniques": ["autoquant"] 18 | } 19 | }, 20 | "artifacts": { 21 | "url_pre_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_february_artifacts/xlsr_2x_checkpoint_float32.pth.tar", 22 | "url_post_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_february_artifacts/xlsr_2x_checkpoint_int8.pth", 23 | "url_adaround_encodings": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_february_artifacts/xlsr_2x_checkpoint_int8.encodings", 24 | "url_aimet_encodings": null, 25 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/release-aimet-1.23/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config_per_channel.json" 26 | } 27 | } -------------------------------------------------------------------------------- /aimet_zoo_torch/xlsr/model/model_cards/xlsr_3x_w8a8.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "XLSR 3x", 3 | "framework": "pytorch", 4 | "task": "super resolution", 5 | "model_args": { 6 | "scaling_factor": 3 7 | }, 8 | "input_shape": [null, 3, 256, 512], 9 | "training_dataset": "DIV2k", 10 | "optimization_config": { 11 | "quantization_configuration": 12 | { 13 | "param_bw": 8, 14 | "output_bw": 8, 15 | "input_quantization": true, 16 | "quant_scheme": "tf_enhanced", 17 | "techniques": ["autoquant"] 18 | } 19 | }, 20 | "artifacts": { 21 | "url_pre_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_february_artifacts/xlsr_3x_checkpoint_float32.pth.tar", 22 | "url_post_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_february_artifacts/xlsr_3x_checkpoint_int8.pth", 23 | "url_adaround_encodings": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_february_artifacts/xlsr_3x_checkpoint_int8.encodings", 24 | "url_aimet_encodings": null, 25 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/release-aimet-1.23/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config_per_channel.json" 26 | } 27 | } -------------------------------------------------------------------------------- /aimet_zoo_torch/xlsr/model/model_cards/xlsr_4x_w8a8.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "XLSR 4x", 3 | "framework": "pytorch", 4 | "task": "super resolution", 5 | "model_args": { 6 | "scaling_factor": 4 7 | }, 8 | "input_shape": [null, 3, 256, 512], 9 | "training_dataset": "DIV2k", 10 | "optimization_config": { 11 | "quantization_configuration": 12 | { 13 | "param_bw": 8, 14 | "output_bw": 8, 15 | "input_quantization": true, 16 | "quant_scheme": "tf_enhanced", 17 | "techniques": ["autoquant"] 18 | } 19 | }, 20 | "artifacts": { 21 | "url_pre_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_february_artifacts/xlsr_4x_checkpoint_float32.pth.tar", 22 | "url_post_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_february_artifacts/xlsr_4x_checkpoint_int8.pth", 23 | "url_adaround_encodings": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_february_artifacts/xlsr_4x_checkpoint_int8.encodings", 24 | "url_aimet_encodings": null, 25 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/release-aimet-1.23/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config_per_channel.json" 26 | } 27 | } -------------------------------------------------------------------------------- /AcceptanceTests/torch/test_mobilenetv2_quanteval.py: -------------------------------------------------------------------------------- 1 | # /usr/bin/env python3 2 | # -*- mode: python -*- 3 | # ============================================================================= 4 | # @@-COPYRIGHT-START-@@ 5 | # 6 | # Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. 7 | # Changes from QuIC are licensed under the terms and conditions at 8 | # https://github.com/quic/aimet-model-zoo/blob/develop/LICENSE.pdf 9 | # 10 | # @@-COPYRIGHT-END-@@ 11 | # ============================================================================= 12 | 13 | """ acceptance test for image classification""" 14 | 15 | import pytest 16 | import torch 17 | from aimet_zoo_torch.mobilenetv2.evaluators import mobilenetv2_quanteval 18 | 19 | @pytest.mark.cuda 20 | @pytest.mark.image_classification 21 | # pylint:disable = redefined-outer-name 22 | @pytest.mark.parametrize("model_config", ["mobilenetv2_w8a8"]) 23 | def test_quanteval_mobilenetv2(model_config, tiny_imageNet_validation_path): 24 | """mobilenetv2 image classification test""" 25 | 26 | if tiny_imageNet_validation_path is None: 27 | pytest.fail(f'Dataset path is not set') 28 | 29 | torch.cuda.empty_cache() 30 | mobilenetv2_quanteval.main( 31 | [ 32 | "--model-config", 33 | model_config, 34 | "--dataset-path", 35 | tiny_imageNet_validation_path, 36 | ] 37 | ) 38 | -------------------------------------------------------------------------------- /aimet_zoo_tensorflow/deeplabv3plus_tf2/model/model_cards/deeplabv3plus_mbnv2_w8a8.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "deeplabv3plus_mbnv2_tf2", 3 | "framework": "tensorflow2.x", 4 | "task": "semantic segmentation", 5 | "input_shape": [512, 512, 3], 6 | "dataset": "PascalVOC2012", 7 | "optimization_config": { 8 | "quantization_configuration": 9 | { 10 | "param_bw": 8, 11 | "output_bw": 8, 12 | "input_quantization": true, 13 | "quant_scheme": "tf_enhanced", 14 | "techniques": ["bn_fold", "qat"] 15 | } 16 | }, 17 | "artifacts": { 18 | "url_pre_opt_weights": "https://github.qualcomm.com/qualcomm-ai/aimet-model-zoo/releases/download/tensorflow2-deeplabv3plus_mbnv2/deeplabv3_mobilenetv2.h5", 19 | "url_post_opt_weights": "https://github.qualcomm.com/qualcomm-ai/aimet-model-zoo/releases/download/tensorflow2-deeplabv3plus_mbnv2/deeplabv3_mbnv2_w8a8.h5", 20 | "url_adaround_encodings": null, 21 | "url_aimet_encodings": "https://github.qualcomm.com/qualcomm-ai/aimet-model-zoo/releases/download/tensorflow2-deeplabv3plus_mbnv2/deeplabv3_mbnv2_w8a8.encodings", 22 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/release-aimet-1.25/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config_per_channel.json" 23 | } 24 | } -------------------------------------------------------------------------------- /AcceptanceTests/tensorflow/staging/test_mobilenet_edgetpu_quanteval.py: -------------------------------------------------------------------------------- 1 | # /usr/bin/env python3 2 | # -*- mode: python -*- 3 | # ============================================================================= 4 | # @@-COPYRIGHT-START-@@ 5 | # 6 | # Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. 7 | # Changes from QuIC are licensed under the terms and conditions at 8 | # https://github.com/quic/aimet-model-zoo/blob/develop/LICENSE.pdf 9 | # 10 | # @@-COPYRIGHT-END-@@ 11 | # ============================================================================= 12 | 13 | """ acceptance test for resnet""" 14 | import pytest 15 | from aimet_zoo_tensorflow.mobilenetedgetpu.evaluators import mobilenet_edgetpu_quanteval 16 | 17 | @pytest.mark.cuda 18 | @pytest.mark.image_classification 19 | # pylint:disable = redefined-outer-name 20 | @pytest.mark.parametrize("model_config", ["mobilenetedgetpu_w8a8"]) 21 | def test_quanteval_mobilenet_edgetpu(model_config, tiny_imageNet_tfrecords): 22 | """resnet image classification test""" 23 | 24 | if tiny_imageNet_tfrecords is None: 25 | pytest.xfail(f'failed since dataset path is not set') 26 | 27 | mobilenet_edgetpu_quanteval.main( 28 | [ 29 | "--model-config", 30 | model_config, 31 | "--dataset-path", 32 | tiny_imageNet_tfrecords, 33 | ] 34 | ) -------------------------------------------------------------------------------- /aimet_zoo_torch/deeplabv3/model/model_cards/dlv3_w8a8.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "DeepLabV3+", 3 | "framework": "pytorch", 4 | "task": "semantic segmentation", 5 | "model_args": { 6 | "num_classes": 21 7 | }, 8 | "input_shape": [null, 3, 513, 513], 9 | "trainig_dataset": "PascalVOC2012", 10 | "optimization_config": { 11 | "quantization_configuration": 12 | { 13 | "param_bw": 8, 14 | "output_bw": 8, 15 | "input_quantization": true, 16 | "quant_scheme": "tf_enhanced", 17 | "techniques": ["cle", "bn_fold", "adaround"] 18 | } 19 | }, 20 | "artifacts": { 21 | "url_pre_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_january_artifacts/deeplab-mobilenet.pth.tar", 22 | "url_post_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_january_artifacts/dlv3_w8a8_state_dict.pth", 23 | "url_adaround_encodings": "https://github.com/quic/aimet-model-zoo/releases/download/torch_dlv3_w8a8_pc/deeplabv3+w8a8_tfe_perchannel_param.encodings", 24 | "url_aimet_encodings": null, 25 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/release-aimet-1.22.1/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config_per_channel.json" 26 | } 27 | } -------------------------------------------------------------------------------- /aimet_zoo_torch/rangenet/models/model_cards/rangenet_w4a8.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "RangeNet++", 3 | "framework": "pytorch", 4 | "task": "semantic segmentation", 5 | "model_args": {}, 6 | "input_shape": [1, 5, 64, 2048], 7 | "training_dataset": "SemanticKitti", 8 | "optimization_config": { 9 | "quantization_configuration": 10 | { 11 | "param_bw": 4, 12 | "output_bw": 8, 13 | "input_quantization": true, 14 | "quant_scheme": "percentile", 15 | "techniques": ["cle", "adaround"] 16 | } 17 | }, 18 | "artifacts": { 19 | "url_pre_opt_weights": null, 20 | "url_post_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_march_artifacts/rangenet_w4a8_state_dict.pth", 21 | "url_adaround_encodings": null, 22 | "url_aimet_encodings": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_march_artifacts/rangenet_w4a8_torch.encodings", 23 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/release-aimet-1.23/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config_per_channel.json", 24 | "url_zipped_checkpoint": "https://github.com/quic/aimet-model-zoo/releases/download/torch_rangenet_plus_w8a8/rangeNet_plus_FP32.tar.gz" 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /aimet_zoo_torch/rangenet/models/model_cards/rangenet_w8a8.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "RangeNet++", 3 | "framework": "pytorch", 4 | "task": "semantic segmentation", 5 | "model_args": {}, 6 | "input_shape": [1, 5, 64, 2048], 7 | "training_dataset": "SemanticKitti", 8 | "optimization_config": { 9 | "quantization_configuration": 10 | { 11 | "param_bw": 8, 12 | "output_bw": 8, 13 | "input_quantization": true, 14 | "quant_scheme": "percentile", 15 | "techniques": ["cle", "adaround"] 16 | } 17 | }, 18 | "artifacts": { 19 | "url_pre_opt_weights": null, 20 | "url_post_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_march_artifacts/rangenet_w4a8_state_dict.pth", 21 | "url_adaround_encodings": null, 22 | "url_aimet_encodings": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_march_artifacts/rangenet_w4a8_torch.encodings", 23 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/release-aimet-1.23/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config_per_channel.json", 24 | "url_zipped_checkpoint": "https://github.com/quic/aimet-model-zoo/releases/download/torch_rangenet_plus_w8a8/rangeNet_plus_FP32.tar.gz" 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /aimet_zoo_torch/uniformer_classification/model/model_cards/uniformer_classification_w8a8.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "Uniformer Small", 3 | "framework": "pytorch", 4 | "task": "classification", 5 | "model_args": { 6 | "num_classes": 1000 7 | }, 8 | "input_shape": [null, 3, 224, 224], 9 | "trainig_dataset": "Imagenet", 10 | "optimization_config": { 11 | "quantization_configuration": 12 | { 13 | "param_bw": 8, 14 | "output_bw": 8, 15 | "input_quantization": true, 16 | "quant_scheme": "tf_enhanced", 17 | "techniques": ["qat", "bn_fold"] 18 | } 19 | }, 20 | "artifacts": { 21 | "url_pre_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/torch_uniformer_classification/uniformer_classification_fp32.pth", 22 | "url_post_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/torch_uniformer_classification/uniformer_classification_w8a8.pth", 23 | "url_aimet_encodings": "https://github.com/quic/aimet-model-zoo/releases/download/torch_uniformer_classification/uniformer_classification_w8a8.encodings", 24 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/release-aimet-1.22.1/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config_per_channel.json" 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /AcceptanceTests/tensorflow/test_mobilenet_v2_tf2_quanteval.py: -------------------------------------------------------------------------------- 1 | # /usr/bin/env python3 2 | # -*- mode: python -*- 3 | # ============================================================================= 4 | # @@-COPYRIGHT-START-@@ 5 | # 6 | # Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. 7 | # Changes from QuIC are licensed under the terms and conditions at 8 | # https://github.com/quic/aimet-model-zoo/blob/develop/LICENSE.pdf 9 | # 10 | # @@-COPYRIGHT-END-@@ 11 | # ============================================================================= 12 | 13 | """ acceptance test for mobilenet_v2_tf2_quanteval image classification""" 14 | 15 | import pytest 16 | from aimet_zoo_tensorflow.mobilenet_v2_tf2.evaluators import mobilenet_v2_tf2_quanteval 17 | 18 | @pytest.mark.cuda 19 | @pytest.mark.object_detection 20 | # pylint:disable = redefined-outer-name 21 | @pytest.mark.parametrize("model_config", ["mobilenetv2_w8a8"]) 22 | def test_quanteval_mobilenet_v2(model_config, tiny_imageNet_root_path): 23 | """mobilenet_v2_tf2 image classification acceptance test""" 24 | 25 | if tiny_imageNet_root_path is None: 26 | pytest.fail(f'Dataset path is not set') 27 | 28 | mobilenet_v2_tf2_quanteval.main( 29 | [ 30 | "--model-config", 31 | model_config, 32 | "--dataset-path", 33 | tiny_imageNet_root_path, 34 | ] 35 | ) 36 | 37 | 38 | 39 | -------------------------------------------------------------------------------- /aimet_zoo_torch/ffnet/model/model_cards/segmentation_ffnet40S_dBBB_mobile.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "DeepLabV3+", 3 | "framework": "pytorch", 4 | "task": "semantic segmentation", 5 | "model_args": {}, 6 | "input_shape": [null, 3, 1024, 2048], 7 | "trainig_dataset": "PascalVOC2012", 8 | "optimization_config": { 9 | "quantization_configuration": 10 | { 11 | "param_bw": 8, 12 | "output_bw": 8, 13 | "input_quantization": true, 14 | "quant_scheme": "tf_enhanced", 15 | "techniques": ["cle"] 16 | } 17 | }, 18 | "artifacts": { 19 | "url_pre_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/torch_segmentation_ffnet/prepared_segmentation_ffnet40S_dBBB_mobile.pth", 20 | "url_post_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/torch_segmentation_ffnet/segmentation_ffnet40S_dBBB_mobile_W8A8_CLE_tfe_perchannel.pth", 21 | "url_adaround_encodings": null, 22 | "url_aimet_encodings": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_january_artifacts/segmentation_ffnet40S_dBBB_mobile_torch.encodings", 23 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/release-aimet-1.23/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config_per_channel.json" 24 | } 25 | } -------------------------------------------------------------------------------- /aimet_zoo_torch/ffnet/model/model_cards/segmentation_ffnet54S_dBBB_mobile.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "DeepLabV3+", 3 | "framework": "pytorch", 4 | "task": "semantic segmentation", 5 | "model_args": {}, 6 | "input_shape": [null, 3, 1024, 2048], 7 | "trainig_dataset": "PascalVOC2012", 8 | "optimization_config": { 9 | "quantization_configuration": 10 | { 11 | "param_bw": 8, 12 | "output_bw": 8, 13 | "input_quantization": true, 14 | "quant_scheme": "tf_enhanced", 15 | "techniques": ["cle"] 16 | } 17 | }, 18 | "artifacts": { 19 | "url_pre_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/torch_segmentation_ffnet/prepared_segmentation_ffnet54S_dBBB_mobile.pth", 20 | "url_post_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/torch_segmentation_ffnet/segmentation_ffnet54S_dBBB_mobile_W8A8_CLE_tfe_perchannel.pth", 21 | "url_adaround_encodings": null, 22 | "url_aimet_encodings": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_january_artifacts/segmentation_ffnet54S_dBBB_mobile_torch.encodings", 23 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/release-aimet-1.23/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config_per_channel.json" 24 | } 25 | } -------------------------------------------------------------------------------- /aimet_zoo_torch/ffnet/model/model_cards/segmentation_ffnet78S_dBBB_mobile.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "DeepLabV3+", 3 | "framework": "pytorch", 4 | "task": "semantic segmentation", 5 | "model_args": {}, 6 | "input_shape": [null, 3, 1024, 2048], 7 | "trainig_dataset": "PascalVOC2012", 8 | "optimization_config": { 9 | "quantization_configuration": 10 | { 11 | "param_bw": 8, 12 | "output_bw": 8, 13 | "input_quantization": true, 14 | "quant_scheme": "tf_enhanced", 15 | "techniques": ["cle"] 16 | } 17 | }, 18 | "artifacts": { 19 | "url_pre_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/torch_segmentation_ffnet/prepared_segmentation_ffnet78S_dBBB_mobile.pth", 20 | "url_post_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/torch_segmentation_ffnet/segmentation_ffnet78S_dBBB_mobile_W8A8_CLE_tfe_perchannel.pth", 21 | "url_adaround_encodings": null, 22 | "url_aimet_encodings": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_january_artifacts/segmentation_ffnet78S_dBBB_mobile_torch.encodings", 23 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/release-aimet-1.23/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config_per_channel.json" 24 | } 25 | } -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_image_classification/model/model_cards/hrnet_w32_w8a8.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "HRNet W32", 3 | "framework": "pytorch", 4 | "task": "image classification", 5 | "model_args": { 6 | "model_definition":"cls_hrnet_w32_sgd_lr5e-2_wd1e-4_bs32_x100.yaml" 7 | }, 8 | "input_shape": [1, 3, 224, 224], 9 | "training_dataset": "ImageNet", 10 | "optimization_config": { 11 | "quantization_configuration": 12 | { 13 | "param_bw": 8, 14 | "output_bw": 8, 15 | "input_quantization": true, 16 | "quant_scheme": "tf_enhanced", 17 | "techniques": ["autoquant"] 18 | } 19 | }, 20 | "artifacts": { 21 | "url_pre_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/torch_hrnet_w32_w8a8/hrnetv2_w32_imagenet_pretrained.pth", 22 | "url_post_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/torch_hrnet_w32_w8a8/hrnet_w32_w8a8_state_dict.pth", 23 | "url_adaround_encodings": null, 24 | "url_aimet_encodings": "https://github.com/quic/aimet-model-zoo/releases/download/torch_hrnet_w32_w8a8/hrnet_w32_w8a8.encodings", 25 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/release-aimet-1.24/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config.json" 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /aimet_zoo_torch/hrnet_posenet/models/nms/gpu_nms.pyx: -------------------------------------------------------------------------------- 1 | # ------------------------------------------------------------------------------ 2 | # Copyright (c) Microsoft 3 | # Licensed under the MIT License. 4 | # Written by Bin Xiao (Bin.Xiao@microsoft.com) 5 | # ------------------------------------------------------------------------------ 6 | 7 | from __future__ import absolute_import 8 | from __future__ import division 9 | from __future__ import print_function 10 | 11 | import numpy as np 12 | cimport numpy as np 13 | 14 | assert sizeof(int) == sizeof(np.int32_t) 15 | 16 | cdef extern from "gpu_nms.hpp": 17 | void _nms(np.int32_t*, int*, np.float32_t*, int, int, float, int) 18 | 19 | def gpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh, 20 | np.int32_t device_id=0): 21 | cdef int boxes_num = dets.shape[0] 22 | cdef int boxes_dim = dets.shape[1] 23 | cdef int num_out 24 | cdef np.ndarray[np.int32_t, ndim=1] \ 25 | keep = np.zeros(boxes_num, dtype=np.int32) 26 | cdef np.ndarray[np.float32_t, ndim=1] \ 27 | scores = dets[:, 4] 28 | cdef np.ndarray[np.int32_t, ndim=1] \ 29 | order = scores.argsort()[::-1].astype(np.int32) 30 | cdef np.ndarray[np.float32_t, ndim=2] \ 31 | sorted_dets = dets[order, :] 32 | _nms(&keep[0], &num_out, &sorted_dets[0, 0], boxes_num, boxes_dim, thresh, device_id) 33 | keep = keep[:num_out] 34 | return list(order[keep]) 35 | -------------------------------------------------------------------------------- /aimet_zoo_torch/abpn/model/model_cards/abpn_28_2x_w8a8.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "ABPN 28 2x", 3 | "framework": "pytorch", 4 | "task": "super resolution", 5 | "model_args": { 6 | "scaling_factor": 2, 7 | "num_channels": 28 8 | }, 9 | "input_shape": [null, 3, 256, 512], 10 | "training_dataset": "DIV2k", 11 | "optimization_config": { 12 | "quantization_configuration": 13 | { 14 | "param_bw": 8, 15 | "output_bw": 8, 16 | "input_quantization": true, 17 | "quant_scheme": "tf_enhanced", 18 | "techniques": ["autoquant"] 19 | } 20 | }, 21 | "artifacts": { 22 | "url_pre_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_february_artifacts/abpn_28_2x_checkpoint_float32.pth.tar", 23 | "url_post_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_february_artifacts/abpn_28_2x_checkpoint_int8.pth", 24 | "url_adaround_encodings": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_february_artifacts/abpn_28_2x_checkpoint_int8.encodings", 25 | "url_aimet_encodings": null, 26 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/release-aimet-1.23/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config_per_channel.json" 27 | } 28 | } -------------------------------------------------------------------------------- /aimet_zoo_torch/abpn/model/model_cards/abpn_28_3x_w8a8.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "ABPN 28 3x", 3 | "framework": "pytorch", 4 | "task": "super resolution", 5 | "model_args": { 6 | "scaling_factor": 3, 7 | "num_channels": 28 8 | }, 9 | "input_shape": [null, 3, 256, 512], 10 | "training_dataset": "DIV2k", 11 | "optimization_config": { 12 | "quantization_configuration": 13 | { 14 | "param_bw": 8, 15 | "output_bw": 8, 16 | "input_quantization": true, 17 | "quant_scheme": "tf_enhanced", 18 | "techniques": ["autoquant"] 19 | } 20 | }, 21 | "artifacts": { 22 | "url_pre_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_february_artifacts/abpn_28_3x_checkpoint_float32.pth.tar", 23 | "url_post_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_february_artifacts/abpn_28_3x_checkpoint_int8.pth", 24 | "url_adaround_encodings": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_february_artifacts/abpn_28_3x_checkpoint_int8.encodings", 25 | "url_aimet_encodings": null, 26 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/release-aimet-1.23/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config_per_channel.json" 27 | } 28 | } -------------------------------------------------------------------------------- /aimet_zoo_torch/abpn/model/model_cards/abpn_28_4x_w8a8.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "ABPN 28 4x", 3 | "framework": "pytorch", 4 | "task": "super resolution", 5 | "model_args": { 6 | "scaling_factor": 4, 7 | "num_channels": 28 8 | }, 9 | "input_shape": [null, 3, 256, 512], 10 | "training_dataset": "DIV2k", 11 | "optimization_config": { 12 | "quantization_configuration": 13 | { 14 | "param_bw": 8, 15 | "output_bw": 8, 16 | "input_quantization": true, 17 | "quant_scheme": "tf_enhanced", 18 | "techniques": ["autoquant"] 19 | } 20 | }, 21 | "artifacts": { 22 | "url_pre_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_february_artifacts/abpn_28_4x_checkpoint_float32.pth.tar", 23 | "url_post_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_february_artifacts/abpn_28_4x_checkpoint_int8.pth", 24 | "url_adaround_encodings": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_february_artifacts/abpn_28_4x_checkpoint_int8.encodings", 25 | "url_aimet_encodings": null, 26 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/release-aimet-1.23/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config_per_channel.json" 27 | } 28 | } -------------------------------------------------------------------------------- /aimet_zoo_torch/abpn/model/model_cards/abpn_32_2x_w8a8.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "ABPN 32 2x", 3 | "framework": "pytorch", 4 | "task": "super resolution", 5 | "model_args": { 6 | "scaling_factor": 2, 7 | "num_channels": 32 8 | }, 9 | "input_shape": [null, 3, 256, 512], 10 | "training_dataset": "DIV2k", 11 | "optimization_config": { 12 | "quantization_configuration": 13 | { 14 | "param_bw": 8, 15 | "output_bw": 8, 16 | "input_quantization": true, 17 | "quant_scheme": "tf_enhanced", 18 | "techniques": ["autoquant"] 19 | } 20 | }, 21 | "artifacts": { 22 | "url_pre_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_february_artifacts/abpn_32_2x_checkpoint_float32.pth.tar", 23 | "url_post_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_february_artifacts/abpn_32_2x_checkpoint_int8.pth", 24 | "url_adaround_encodings": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_february_artifacts/abpn_32_2x_checkpoint_int8.encodings", 25 | "url_aimet_encodings": null, 26 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/release-aimet-1.23/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config_per_channel.json" 27 | } 28 | } -------------------------------------------------------------------------------- /aimet_zoo_torch/abpn/model/model_cards/abpn_32_3x_w8a8.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "ABPN 32 3x", 3 | "framework": "pytorch", 4 | "task": "super resolution", 5 | "model_args": { 6 | "scaling_factor": 3, 7 | "num_channels": 32 8 | }, 9 | "input_shape": [null, 3, 256, 512], 10 | "training_dataset": "DIV2k", 11 | "optimization_config": { 12 | "quantization_configuration": 13 | { 14 | "param_bw": 8, 15 | "output_bw": 8, 16 | "input_quantization": true, 17 | "quant_scheme": "tf_enhanced", 18 | "techniques": ["autoquant"] 19 | } 20 | }, 21 | "artifacts": { 22 | "url_pre_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_february_artifacts/abpn_32_3x_checkpoint_float32.pth.tar", 23 | "url_post_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_february_artifacts/abpn_32_3x_checkpoint_int8.pth", 24 | "url_adaround_encodings": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_february_artifacts/abpn_32_3x_checkpoint_int8.encodings", 25 | "url_aimet_encodings": null, 26 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/release-aimet-1.23/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config_per_channel.json" 27 | } 28 | } -------------------------------------------------------------------------------- /aimet_zoo_torch/abpn/model/model_cards/abpn_32_4x_w8a8.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "ABPN 32 4x", 3 | "framework": "pytorch", 4 | "task": "super resolution", 5 | "model_args": { 6 | "scaling_factor": 4, 7 | "num_channels": 32 8 | }, 9 | "input_shape": [null, 3, 256, 512], 10 | "training_dataset": "DIV2k", 11 | "optimization_config": { 12 | "quantization_configuration": 13 | { 14 | "param_bw": 8, 15 | "output_bw": 8, 16 | "input_quantization": true, 17 | "quant_scheme": "tf_enhanced", 18 | "techniques": ["autoquant"] 19 | } 20 | }, 21 | "artifacts": { 22 | "url_pre_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_february_artifacts/abpn_32_4x_checkpoint_float32.pth.tar", 23 | "url_post_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_february_artifacts/abpn_32_4x_checkpoint_int8.pth", 24 | "url_adaround_encodings": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_february_artifacts/abpn_32_4x_checkpoint_int8.encodings", 25 | "url_aimet_encodings": null, 26 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/release-aimet-1.23/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config_per_channel.json" 27 | } 28 | } -------------------------------------------------------------------------------- /AcceptanceTests/torch/test_vit_quanteval.py: -------------------------------------------------------------------------------- 1 | # /usr/bin/env python3 2 | # -*- mode: python -*- 3 | # ============================================================================= 4 | # @@-COPYRIGHT-START-@@ 5 | # 6 | # Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. 7 | # Changes from QuIC are licensed under the terms and conditions at 8 | # https://github.com/quic/aimet-model-zoo/blob/develop/LICENSE.pdf 9 | # 10 | # @@-COPYRIGHT-END-@@ 11 | # ============================================================================= 12 | 13 | """ acceptance test for vit image classification""" 14 | 15 | import pytest 16 | import torch 17 | from aimet_zoo_torch.vit.evaluators import vit_quanteval 18 | 19 | @pytest.mark.image_classification 20 | @pytest.mark.cuda 21 | @pytest.mark.parametrize("model_config", ["vit_w8a8"]) 22 | # pylint:disable = redefined-outer-name 23 | def test_quanteval_vit_image_classification(model_config, tiny_imageNet_validation_path, tiny_imageNet_train_path): 24 | """vit image classification test""" 25 | torch.cuda.empty_cache() 26 | if tiny_imageNet_validation_path is None: 27 | pytest.fail('Dataset not set') 28 | vit_quanteval.main( 29 | [ 30 | "--model_config", 31 | model_config, 32 | "--train_dir", 33 | tiny_imageNet_train_path, 34 | "--validation_dir", 35 | tiny_imageNet_validation_path 36 | ] 37 | ) 38 | -------------------------------------------------------------------------------- /aimet_zoo_torch/ffnet/model/model_cards/segmentation_ffnet78S_BCC_mobile_pre_down.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "DeepLabV3+", 3 | "framework": "pytorch", 4 | "task": "semantic segmentation", 5 | "model_args": {}, 6 | "input_shape": [null, 3, 1024, 2048], 7 | "trainig_dataset": "PascalVOC2012", 8 | "optimization_config": { 9 | "quantization_configuration": 10 | { 11 | "param_bw": 8, 12 | "output_bw": 8, 13 | "input_quantization": true, 14 | "quant_scheme": "tf_enhanced", 15 | "techniques": ["cle"] 16 | } 17 | }, 18 | "artifacts": { 19 | "url_pre_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/torch_segmentation_ffnet/prepared_segmentation_ffnet78S_BCC_mobile_pre_down.pth", 20 | "url_post_opt_weights": "https://github.com/quic/aimet-model-zoo/releases/download/torch_segmentation_ffnet/segmentation_ffnet78S_BCC_mobile_pre_down_W8A8_CLE_tfe_perchannel.pth", 21 | "url_adaround_encodings": null, 22 | "url_aimet_encodings": "https://github.com/quic/aimet-model-zoo/releases/download/phase_2_january_artifacts/segmentation_ffnet78S_BCC_mobile_pre_down_torch.encodings", 23 | "url_aimet_config": "https://raw.githubusercontent.com/quic/aimet/release-aimet-1.23/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config_per_channel.json" 24 | } 25 | } -------------------------------------------------------------------------------- /AcceptanceTests/torch/test_efficientnetlite0_quanteval.py: -------------------------------------------------------------------------------- 1 | # /usr/bin/env python3 2 | # -*- mode: python -*- 3 | # ============================================================================= 4 | # @@-COPYRIGHT-START-@@ 5 | # 6 | # Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. 7 | # Changes from QuIC are licensed under the terms and conditions at 8 | # https://github.com/quic/aimet-model-zoo/blob/develop/LICENSE.pdf 9 | # 10 | # @@-COPYRIGHT-END-@@ 11 | # ============================================================================= 12 | 13 | """ acceptance test for image classification""" 14 | 15 | import pytest 16 | import torch 17 | from aimet_zoo_torch.efficientnetlite0.evaluators import efficientnetlite0_quanteval 18 | 19 | @pytest.mark.cuda 20 | @pytest.mark.image_classification 21 | # pylint:disable = redefined-outer-name 22 | @pytest.mark.parametrize("model_config",["efficientnetlite0_w8a8"]) 23 | def test_quanteval_efficientnetlite0_image_classification(model_config, tiny_imageNet_validation_path): 24 | """efficientnetlite0 image classification test""" 25 | if tiny_imageNet_validation_path is None: 26 | pytest.fail('Dataset is not set') 27 | torch.cuda.empty_cache() 28 | efficientnetlite0_quanteval.main( 29 | [ 30 | "--model-config", 31 | model_config, 32 | "--dataset-path", 33 | tiny_imageNet_validation_path, 34 | ] 35 | ) 36 | 37 | -------------------------------------------------------------------------------- /AcceptanceTests/torch/staging/test_uniformer_classification_quanteval.py: -------------------------------------------------------------------------------- 1 | # /usr/bin/env python3 2 | # -*- mode: python -*- 3 | # ============================================================================= 4 | # @@-COPYRIGHT-START-@@ 5 | # 6 | # Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. 7 | # Changes from QuIC are licensed under the terms and conditions at 8 | # https://github.com/quic/aimet-model-zoo/blob/develop/LICENSE.pdf 9 | # 10 | # @@-COPYRIGHT-END-@@ 11 | # ============================================================================= 12 | 13 | """ acceptance test for uniformer image classification""" 14 | 15 | import pytest 16 | import torch 17 | from aimet_zoo_torch.uniformer_classification.evaluators import uniformer_classification_quanteval 18 | 19 | @pytest.mark.cuda 20 | @pytest.mark.image_classification 21 | # pylint:disable = redefined-outer-name 22 | @pytest.mark.parametrize("model_config", ["uniformer_classification_w8a8"]) 23 | def test_quanteval_resnet(model_config, tiny_imageNet_root_path): 24 | """resnet image classification test""" 25 | 26 | if tiny_imageNet_root_path is None: 27 | pytest.fail(f'dataset path is not set') 28 | 29 | torch.cuda.empty_cache() 30 | uniformer_classification_quanteval.main( 31 | [ 32 | "--model-config", 33 | model_config, 34 | "--dataset-path", 35 | tiny_imageNet_root_path, 36 | ] 37 | ) 38 | --------------------------------------------------------------------------------