├── .gitignore ├── LICENSE ├── README.md ├── checkpoints └── README.txt ├── images ├── demo.png └── workflow.png ├── ootd ├── inference_ootd.py ├── inference_ootd_dc.py ├── inference_ootd_hd.py └── pipelines_ootd │ ├── attention_garm.py │ ├── attention_vton.py │ ├── pipeline_ootd.py │ ├── transformer_garm_2d.py │ ├── transformer_vton_2d.py │ ├── unet_garm_2d_blocks.py │ ├── unet_garm_2d_condition.py │ ├── unet_vton_2d_blocks.py │ └── unet_vton_2d_condition.py ├── preprocess ├── humanparsing │ ├── datasets │ │ ├── __init__.py │ │ ├── datasets.py │ │ ├── simple_extractor_dataset.py │ │ └── target_generation.py │ ├── mhp_extension │ │ ├── coco_style_annotation_creator │ │ │ ├── human_to_coco.py │ │ │ ├── pycococreatortools.py │ │ │ └── test_human2coco_format.py │ │ ├── detectron2 │ │ │ ├── .circleci │ │ │ │ └── config.yml │ │ │ ├── .clang-format │ │ │ ├── .flake8 │ │ │ ├── .github │ │ │ │ ├── CODE_OF_CONDUCT.md │ │ │ │ ├── CONTRIBUTING.md │ │ │ │ ├── Detectron2-Logo-Horz.svg │ │ │ │ ├── ISSUE_TEMPLATE.md │ │ │ │ ├── ISSUE_TEMPLATE │ │ │ │ │ ├── bugs.md │ │ │ │ │ ├── config.yml │ │ │ │ │ ├── feature-request.md │ │ │ │ │ ├── questions-help-support.md │ │ │ │ │ └── unexpected-problems-bugs.md │ │ │ │ └── pull_request_template.md │ │ │ ├── .gitignore │ │ │ ├── GETTING_STARTED.md │ │ │ ├── INSTALL.md │ │ │ ├── LICENSE │ │ │ ├── MODEL_ZOO.md │ │ │ ├── README.md │ │ │ ├── configs │ │ │ │ ├── Base-RCNN-C4.yaml │ │ │ │ ├── Base-RCNN-DilatedC5.yaml │ │ │ │ ├── Base-RCNN-FPN.yaml │ │ │ │ ├── Base-RetinaNet.yaml │ │ │ │ ├── COCO-Detection │ │ │ │ │ ├── fast_rcnn_R_50_FPN_1x.yaml │ │ │ │ │ ├── faster_rcnn_R_101_C4_3x.yaml │ │ │ │ │ ├── faster_rcnn_R_101_DC5_3x.yaml │ │ │ │ │ ├── faster_rcnn_R_101_FPN_3x.yaml │ │ │ │ │ ├── faster_rcnn_R_50_C4_1x.yaml │ │ │ │ │ ├── faster_rcnn_R_50_C4_3x.yaml │ │ │ │ │ ├── faster_rcnn_R_50_DC5_1x.yaml │ │ │ │ │ ├── faster_rcnn_R_50_DC5_3x.yaml │ │ │ │ │ ├── faster_rcnn_R_50_FPN_1x.yaml │ │ │ │ │ ├── faster_rcnn_R_50_FPN_3x.yaml │ │ │ │ │ ├── faster_rcnn_X_101_32x8d_FPN_3x.yaml │ │ │ │ │ ├── retinanet_R_101_FPN_3x.yaml │ │ │ │ │ ├── retinanet_R_50_FPN_1x.yaml │ │ │ │ │ ├── retinanet_R_50_FPN_3x.yaml │ │ │ │ │ ├── rpn_R_50_C4_1x.yaml │ │ │ │ │ └── rpn_R_50_FPN_1x.yaml │ │ │ │ ├── COCO-InstanceSegmentation │ │ │ │ │ ├── mask_rcnn_R_101_C4_3x.yaml │ │ │ │ │ ├── mask_rcnn_R_101_DC5_3x.yaml │ │ │ │ │ ├── mask_rcnn_R_101_FPN_3x.yaml │ │ │ │ │ ├── mask_rcnn_R_50_C4_1x.yaml │ │ │ │ │ ├── mask_rcnn_R_50_C4_3x.yaml │ │ │ │ │ ├── mask_rcnn_R_50_DC5_1x.yaml │ │ │ │ │ ├── mask_rcnn_R_50_DC5_3x.yaml │ │ │ │ │ ├── mask_rcnn_R_50_FPN_1x.yaml │ │ │ │ │ ├── mask_rcnn_R_50_FPN_3x.yaml │ │ │ │ │ └── mask_rcnn_X_101_32x8d_FPN_3x.yaml │ │ │ │ ├── COCO-Keypoints │ │ │ │ │ ├── Base-Keypoint-RCNN-FPN.yaml │ │ │ │ │ ├── keypoint_rcnn_R_101_FPN_3x.yaml │ │ │ │ │ ├── keypoint_rcnn_R_50_FPN_1x.yaml │ │ │ │ │ ├── keypoint_rcnn_R_50_FPN_3x.yaml │ │ │ │ │ └── keypoint_rcnn_X_101_32x8d_FPN_3x.yaml │ │ │ │ ├── COCO-PanopticSegmentation │ │ │ │ │ ├── Base-Panoptic-FPN.yaml │ │ │ │ │ ├── panoptic_fpn_R_101_3x.yaml │ │ │ │ │ ├── panoptic_fpn_R_50_1x.yaml │ │ │ │ │ └── panoptic_fpn_R_50_3x.yaml │ │ │ │ ├── Cityscapes │ │ │ │ │ └── mask_rcnn_R_50_FPN.yaml │ │ │ │ ├── Detectron1-Comparisons │ │ │ │ │ ├── README.md │ │ │ │ │ ├── faster_rcnn_R_50_FPN_noaug_1x.yaml │ │ │ │ │ ├── keypoint_rcnn_R_50_FPN_1x.yaml │ │ │ │ │ └── mask_rcnn_R_50_FPN_noaug_1x.yaml │ │ │ │ ├── LVIS-InstanceSegmentation │ │ │ │ │ ├── mask_rcnn_R_101_FPN_1x.yaml │ │ │ │ │ ├── mask_rcnn_R_50_FPN_1x.yaml │ │ │ │ │ └── mask_rcnn_X_101_32x8d_FPN_1x.yaml │ │ │ │ ├── Misc │ │ │ │ │ ├── cascade_mask_rcnn_R_50_FPN_1x.yaml │ │ │ │ │ ├── cascade_mask_rcnn_R_50_FPN_3x.yaml │ │ │ │ │ ├── cascade_mask_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv.yaml │ │ │ │ │ ├── cascade_mask_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv_parsing.yaml │ │ │ │ │ ├── demo.yaml │ │ │ │ │ ├── mask_rcnn_R_50_FPN_1x_cls_agnostic.yaml │ │ │ │ │ ├── mask_rcnn_R_50_FPN_1x_dconv_c3-c5.yaml │ │ │ │ │ ├── mask_rcnn_R_50_FPN_3x_dconv_c3-c5.yaml │ │ │ │ │ ├── mask_rcnn_R_50_FPN_3x_gn.yaml │ │ │ │ │ ├── mask_rcnn_R_50_FPN_3x_syncbn.yaml │ │ │ │ │ ├── panoptic_fpn_R_101_dconv_cascade_gn_3x.yaml │ │ │ │ │ ├── parsing_finetune_cihp.yaml │ │ │ │ │ ├── parsing_inference.yaml │ │ │ │ │ ├── scratch_mask_rcnn_R_50_FPN_3x_gn.yaml │ │ │ │ │ ├── scratch_mask_rcnn_R_50_FPN_9x_gn.yaml │ │ │ │ │ ├── scratch_mask_rcnn_R_50_FPN_9x_syncbn.yaml │ │ │ │ │ └── semantic_R_50_FPN_1x.yaml │ │ │ │ ├── PascalVOC-Detection │ │ │ │ │ ├── faster_rcnn_R_50_C4.yaml │ │ │ │ │ └── faster_rcnn_R_50_FPN.yaml │ │ │ │ ├── my_Base-RCNN-FPN.yaml │ │ │ │ └── quick_schedules │ │ │ │ │ ├── README.md │ │ │ │ │ ├── cascade_mask_rcnn_R_50_FPN_inference_acc_test.yaml │ │ │ │ │ ├── cascade_mask_rcnn_R_50_FPN_instant_test.yaml │ │ │ │ │ ├── fast_rcnn_R_50_FPN_inference_acc_test.yaml │ │ │ │ │ ├── fast_rcnn_R_50_FPN_instant_test.yaml │ │ │ │ │ ├── keypoint_rcnn_R_50_FPN_inference_acc_test.yaml │ │ │ │ │ ├── keypoint_rcnn_R_50_FPN_instant_test.yaml │ │ │ │ │ ├── keypoint_rcnn_R_50_FPN_normalized_training_acc_test.yaml │ │ │ │ │ ├── keypoint_rcnn_R_50_FPN_training_acc_test.yaml │ │ │ │ │ ├── mask_rcnn_R_50_C4_GCV_instant_test.yaml │ │ │ │ │ ├── mask_rcnn_R_50_C4_inference_acc_test.yaml │ │ │ │ │ ├── mask_rcnn_R_50_C4_instant_test.yaml │ │ │ │ │ ├── mask_rcnn_R_50_C4_training_acc_test.yaml │ │ │ │ │ ├── mask_rcnn_R_50_DC5_inference_acc_test.yaml │ │ │ │ │ ├── mask_rcnn_R_50_FPN_inference_acc_test.yaml │ │ │ │ │ ├── mask_rcnn_R_50_FPN_instant_test.yaml │ │ │ │ │ ├── mask_rcnn_R_50_FPN_training_acc_test.yaml │ │ │ │ │ ├── panoptic_fpn_R_50_inference_acc_test.yaml │ │ │ │ │ ├── panoptic_fpn_R_50_instant_test.yaml │ │ │ │ │ ├── panoptic_fpn_R_50_training_acc_test.yaml │ │ │ │ │ ├── retinanet_R_50_FPN_inference_acc_test.yaml │ │ │ │ │ ├── retinanet_R_50_FPN_instant_test.yaml │ │ │ │ │ ├── rpn_R_50_FPN_inference_acc_test.yaml │ │ │ │ │ ├── rpn_R_50_FPN_instant_test.yaml │ │ │ │ │ ├── semantic_R_50_FPN_inference_acc_test.yaml │ │ │ │ │ ├── semantic_R_50_FPN_instant_test.yaml │ │ │ │ │ └── semantic_R_50_FPN_training_acc_test.yaml │ │ │ ├── demo │ │ │ │ ├── README.md │ │ │ │ ├── demo.py │ │ │ │ └── predictor.py │ │ │ ├── detectron2 │ │ │ │ ├── __init__.py │ │ │ │ ├── checkpoint │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── c2_model_loading.py │ │ │ │ │ ├── catalog.py │ │ │ │ │ └── detection_checkpoint.py │ │ │ │ ├── config │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── compat.py │ │ │ │ │ ├── config.py │ │ │ │ │ └── defaults.py │ │ │ │ ├── data │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── build.py │ │ │ │ │ ├── catalog.py │ │ │ │ │ ├── common.py │ │ │ │ │ ├── dataset_mapper.py │ │ │ │ │ ├── datasets │ │ │ │ │ │ ├── README.md │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── builtin.py │ │ │ │ │ │ ├── builtin_meta.py │ │ │ │ │ │ ├── cityscapes.py │ │ │ │ │ │ ├── coco.py │ │ │ │ │ │ ├── lvis.py │ │ │ │ │ │ ├── lvis_v0_5_categories.py │ │ │ │ │ │ ├── pascal_voc.py │ │ │ │ │ │ └── register_coco.py │ │ │ │ │ ├── detection_utils.py │ │ │ │ │ ├── samplers │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── distributed_sampler.py │ │ │ │ │ │ └── grouped_batch_sampler.py │ │ │ │ │ └── transforms │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── transform.py │ │ │ │ │ │ └── transform_gen.py │ │ │ │ ├── engine │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── defaults.py │ │ │ │ │ ├── hooks.py │ │ │ │ │ ├── launch.py │ │ │ │ │ └── train_loop.py │ │ │ │ ├── evaluation │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── cityscapes_evaluation.py │ │ │ │ │ ├── coco_evaluation.py │ │ │ │ │ ├── evaluator.py │ │ │ │ │ ├── lvis_evaluation.py │ │ │ │ │ ├── panoptic_evaluation.py │ │ │ │ │ ├── pascal_voc_evaluation.py │ │ │ │ │ ├── rotated_coco_evaluation.py │ │ │ │ │ ├── sem_seg_evaluation.py │ │ │ │ │ └── testing.py │ │ │ │ ├── export │ │ │ │ │ ├── README.md │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── api.py │ │ │ │ │ ├── c10.py │ │ │ │ │ ├── caffe2_export.py │ │ │ │ │ ├── caffe2_inference.py │ │ │ │ │ ├── caffe2_modeling.py │ │ │ │ │ ├── patcher.py │ │ │ │ │ └── shared.py │ │ │ │ ├── layers │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── batch_norm.py │ │ │ │ │ ├── blocks.py │ │ │ │ │ ├── csrc │ │ │ │ │ │ ├── README.md │ │ │ │ │ │ ├── ROIAlign │ │ │ │ │ │ │ ├── ROIAlign.h │ │ │ │ │ │ │ ├── ROIAlign_cpu.cpp │ │ │ │ │ │ │ └── ROIAlign_cuda.cu │ │ │ │ │ │ ├── ROIAlignRotated │ │ │ │ │ │ │ ├── ROIAlignRotated.h │ │ │ │ │ │ │ ├── ROIAlignRotated_cpu.cpp │ │ │ │ │ │ │ └── ROIAlignRotated_cuda.cu │ │ │ │ │ │ ├── box_iou_rotated │ │ │ │ │ │ │ ├── box_iou_rotated.h │ │ │ │ │ │ │ ├── box_iou_rotated_cpu.cpp │ │ │ │ │ │ │ ├── box_iou_rotated_cuda.cu │ │ │ │ │ │ │ └── box_iou_rotated_utils.h │ │ │ │ │ │ ├── cuda_version.cu │ │ │ │ │ │ ├── deformable │ │ │ │ │ │ │ ├── deform_conv.h │ │ │ │ │ │ │ ├── deform_conv_cuda.cu │ │ │ │ │ │ │ └── deform_conv_cuda_kernel.cu │ │ │ │ │ │ ├── nms_rotated │ │ │ │ │ │ │ ├── nms_rotated.h │ │ │ │ │ │ │ ├── nms_rotated_cpu.cpp │ │ │ │ │ │ │ └── nms_rotated_cuda.cu │ │ │ │ │ │ └── vision.cpp │ │ │ │ │ ├── deform_conv.py │ │ │ │ │ ├── mask_ops.py │ │ │ │ │ ├── nms.py │ │ │ │ │ ├── roi_align.py │ │ │ │ │ ├── roi_align_rotated.py │ │ │ │ │ ├── rotated_boxes.py │ │ │ │ │ ├── shape_spec.py │ │ │ │ │ └── wrappers.py │ │ │ │ ├── model_zoo │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── model_zoo.py │ │ │ │ ├── modeling │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── anchor_generator.py │ │ │ │ │ ├── backbone │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── backbone.py │ │ │ │ │ │ ├── build.py │ │ │ │ │ │ ├── fpn.py │ │ │ │ │ │ └── resnet.py │ │ │ │ │ ├── box_regression.py │ │ │ │ │ ├── matcher.py │ │ │ │ │ ├── meta_arch │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── build.py │ │ │ │ │ │ ├── panoptic_fpn.py │ │ │ │ │ │ ├── rcnn.py │ │ │ │ │ │ ├── retinanet.py │ │ │ │ │ │ └── semantic_seg.py │ │ │ │ │ ├── poolers.py │ │ │ │ │ ├── postprocessing.py │ │ │ │ │ ├── proposal_generator │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── build.py │ │ │ │ │ │ ├── proposal_utils.py │ │ │ │ │ │ ├── rpn.py │ │ │ │ │ │ ├── rpn_outputs.py │ │ │ │ │ │ └── rrpn.py │ │ │ │ │ ├── roi_heads │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── box_head.py │ │ │ │ │ │ ├── cascade_rcnn.py │ │ │ │ │ │ ├── fast_rcnn.py │ │ │ │ │ │ ├── keypoint_head.py │ │ │ │ │ │ ├── mask_head.py │ │ │ │ │ │ ├── roi_heads.py │ │ │ │ │ │ └── rotated_fast_rcnn.py │ │ │ │ │ ├── sampling.py │ │ │ │ │ └── test_time_augmentation.py │ │ │ │ ├── solver │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── build.py │ │ │ │ │ └── lr_scheduler.py │ │ │ │ ├── structures │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── boxes.py │ │ │ │ │ ├── image_list.py │ │ │ │ │ ├── instances.py │ │ │ │ │ ├── keypoints.py │ │ │ │ │ ├── masks.py │ │ │ │ │ └── rotated_boxes.py │ │ │ │ └── utils │ │ │ │ │ ├── README.md │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── analysis.py │ │ │ │ │ ├── collect_env.py │ │ │ │ │ ├── colormap.py │ │ │ │ │ ├── comm.py │ │ │ │ │ ├── env.py │ │ │ │ │ ├── events.py │ │ │ │ │ ├── logger.py │ │ │ │ │ ├── memory.py │ │ │ │ │ ├── registry.py │ │ │ │ │ ├── serialize.py │ │ │ │ │ ├── video_visualizer.py │ │ │ │ │ └── visualizer.py │ │ │ ├── dev │ │ │ │ ├── README.md │ │ │ │ ├── linter.sh │ │ │ │ ├── packaging │ │ │ │ │ ├── README.md │ │ │ │ │ ├── build_all_wheels.sh │ │ │ │ │ ├── build_wheel.sh │ │ │ │ │ ├── gen_wheel_index.sh │ │ │ │ │ └── pkg_helpers.bash │ │ │ │ ├── parse_results.sh │ │ │ │ ├── run_inference_tests.sh │ │ │ │ └── run_instant_tests.sh │ │ │ ├── docker │ │ │ │ ├── Dockerfile │ │ │ │ ├── Dockerfile-circleci │ │ │ │ ├── README.md │ │ │ │ └── docker-compose.yml │ │ │ ├── docs │ │ │ │ ├── .gitignore │ │ │ │ ├── Makefile │ │ │ │ ├── README.md │ │ │ │ ├── conf.py │ │ │ │ ├── index.rst │ │ │ │ ├── modules │ │ │ │ │ ├── checkpoint.rst │ │ │ │ │ ├── config.rst │ │ │ │ │ ├── data.rst │ │ │ │ │ ├── engine.rst │ │ │ │ │ ├── evaluation.rst │ │ │ │ │ ├── export.rst │ │ │ │ │ ├── index.rst │ │ │ │ │ ├── layers.rst │ │ │ │ │ ├── model_zoo.rst │ │ │ │ │ ├── modeling.rst │ │ │ │ │ ├── solver.rst │ │ │ │ │ ├── structures.rst │ │ │ │ │ └── utils.rst │ │ │ │ ├── notes │ │ │ │ │ ├── benchmarks.md │ │ │ │ │ ├── changelog.md │ │ │ │ │ ├── compatibility.md │ │ │ │ │ ├── contributing.md │ │ │ │ │ └── index.rst │ │ │ │ └── tutorials │ │ │ │ │ ├── README.md │ │ │ │ │ ├── builtin_datasets.md │ │ │ │ │ ├── configs.md │ │ │ │ │ ├── data_loading.md │ │ │ │ │ ├── datasets.md │ │ │ │ │ ├── deployment.md │ │ │ │ │ ├── evaluation.md │ │ │ │ │ ├── extend.md │ │ │ │ │ ├── getting_started.md │ │ │ │ │ ├── index.rst │ │ │ │ │ ├── install.md │ │ │ │ │ ├── models.md │ │ │ │ │ ├── training.md │ │ │ │ │ └── write-models.md │ │ │ ├── projects │ │ │ │ ├── DensePose │ │ │ │ │ ├── README.md │ │ │ │ │ ├── apply_net.py │ │ │ │ │ ├── configs │ │ │ │ │ │ ├── Base-DensePose-RCNN-FPN.yaml │ │ │ │ │ │ ├── densepose_rcnn_R_101_FPN_DL_WC1_s1x.yaml │ │ │ │ │ │ ├── densepose_rcnn_R_101_FPN_DL_WC2_s1x.yaml │ │ │ │ │ │ ├── densepose_rcnn_R_101_FPN_DL_s1x.yaml │ │ │ │ │ │ ├── densepose_rcnn_R_101_FPN_WC1_s1x.yaml │ │ │ │ │ │ ├── densepose_rcnn_R_101_FPN_WC2_s1x.yaml │ │ │ │ │ │ ├── densepose_rcnn_R_101_FPN_s1x.yaml │ │ │ │ │ │ ├── densepose_rcnn_R_101_FPN_s1x_legacy.yaml │ │ │ │ │ │ ├── densepose_rcnn_R_50_FPN_DL_WC1_s1x.yaml │ │ │ │ │ │ ├── densepose_rcnn_R_50_FPN_DL_WC2_s1x.yaml │ │ │ │ │ │ ├── densepose_rcnn_R_50_FPN_DL_s1x.yaml │ │ │ │ │ │ ├── densepose_rcnn_R_50_FPN_WC1_s1x.yaml │ │ │ │ │ │ ├── densepose_rcnn_R_50_FPN_WC2_s1x.yaml │ │ │ │ │ │ ├── densepose_rcnn_R_50_FPN_s1x.yaml │ │ │ │ │ │ ├── densepose_rcnn_R_50_FPN_s1x_legacy.yaml │ │ │ │ │ │ ├── evolution │ │ │ │ │ │ │ ├── Base-RCNN-FPN-MC.yaml │ │ │ │ │ │ │ └── faster_rcnn_R_50_FPN_1x_MC.yaml │ │ │ │ │ │ └── quick_schedules │ │ │ │ │ │ │ ├── densepose_rcnn_R_50_FPN_DL_instant_test.yaml │ │ │ │ │ │ │ ├── densepose_rcnn_R_50_FPN_TTA_inference_acc_test.yaml │ │ │ │ │ │ │ ├── densepose_rcnn_R_50_FPN_WC1_instant_test.yaml │ │ │ │ │ │ │ ├── densepose_rcnn_R_50_FPN_WC2_instant_test.yaml │ │ │ │ │ │ │ ├── densepose_rcnn_R_50_FPN_inference_acc_test.yaml │ │ │ │ │ │ │ ├── densepose_rcnn_R_50_FPN_instant_test.yaml │ │ │ │ │ │ │ └── densepose_rcnn_R_50_FPN_training_acc_test.yaml │ │ │ │ │ ├── densepose │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── config.py │ │ │ │ │ │ ├── data │ │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ │ ├── build.py │ │ │ │ │ │ │ ├── dataset_mapper.py │ │ │ │ │ │ │ ├── datasets │ │ │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ │ │ ├── builtin.py │ │ │ │ │ │ │ │ └── coco.py │ │ │ │ │ │ │ └── structures.py │ │ │ │ │ │ ├── densepose_coco_evaluation.py │ │ │ │ │ │ ├── densepose_head.py │ │ │ │ │ │ ├── evaluator.py │ │ │ │ │ │ ├── modeling │ │ │ │ │ │ │ └── test_time_augmentation.py │ │ │ │ │ │ ├── roi_head.py │ │ │ │ │ │ ├── utils │ │ │ │ │ │ │ ├── dbhelper.py │ │ │ │ │ │ │ ├── logger.py │ │ │ │ │ │ │ └── transform.py │ │ │ │ │ │ └── vis │ │ │ │ │ │ │ ├── base.py │ │ │ │ │ │ │ ├── bounding_box.py │ │ │ │ │ │ │ ├── densepose.py │ │ │ │ │ │ │ └── extractor.py │ │ │ │ │ ├── dev │ │ │ │ │ │ ├── README.md │ │ │ │ │ │ ├── run_inference_tests.sh │ │ │ │ │ │ └── run_instant_tests.sh │ │ │ │ │ ├── doc │ │ │ │ │ │ ├── GETTING_STARTED.md │ │ │ │ │ │ ├── MODEL_ZOO.md │ │ │ │ │ │ ├── TOOL_APPLY_NET.md │ │ │ │ │ │ └── TOOL_QUERY_DB.md │ │ │ │ │ ├── query_db.py │ │ │ │ │ ├── tests │ │ │ │ │ │ ├── common.py │ │ │ │ │ │ ├── test_model_e2e.py │ │ │ │ │ │ ├── test_setup.py │ │ │ │ │ │ └── test_structures.py │ │ │ │ │ └── train_net.py │ │ │ │ ├── PointRend │ │ │ │ │ ├── README.md │ │ │ │ │ ├── configs │ │ │ │ │ │ ├── InstanceSegmentation │ │ │ │ │ │ │ ├── Base-PointRend-RCNN-FPN.yaml │ │ │ │ │ │ │ ├── pointrend_rcnn_R_50_FPN_1x_cityscapes.yaml │ │ │ │ │ │ │ ├── pointrend_rcnn_R_50_FPN_1x_coco.yaml │ │ │ │ │ │ │ ├── pointrend_rcnn_R_50_FPN_3x_coco.yaml │ │ │ │ │ │ │ ├── pointrend_rcnn_R_50_FPN_3x_parsing.yaml │ │ │ │ │ │ │ └── pointrend_rcnn_X_101_32x8d_FPN_3x_parsing.yaml │ │ │ │ │ │ └── SemanticSegmentation │ │ │ │ │ │ │ ├── Base-PointRend-Semantic-FPN.yaml │ │ │ │ │ │ │ ├── pointrend_semantic_R_101_FPN_1x_cityscapes.yaml │ │ │ │ │ │ │ └── pointrend_semantic_R_50_FPN_1x_coco.yaml │ │ │ │ │ ├── finetune_net.py │ │ │ │ │ ├── logs │ │ │ │ │ │ └── hadoop.kylin.libdfs.log │ │ │ │ │ ├── point_rend │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── coarse_mask_head.py │ │ │ │ │ │ ├── color_augmentation.py │ │ │ │ │ │ ├── config.py │ │ │ │ │ │ ├── dataset_mapper.py │ │ │ │ │ │ ├── point_features.py │ │ │ │ │ │ ├── point_head.py │ │ │ │ │ │ ├── roi_heads.py │ │ │ │ │ │ └── semantic_seg.py │ │ │ │ │ ├── run.sh │ │ │ │ │ └── train_net.py │ │ │ │ ├── README.md │ │ │ │ ├── TensorMask │ │ │ │ │ ├── README.md │ │ │ │ │ ├── configs │ │ │ │ │ │ ├── Base-TensorMask.yaml │ │ │ │ │ │ ├── tensormask_R_50_FPN_1x.yaml │ │ │ │ │ │ └── tensormask_R_50_FPN_6x.yaml │ │ │ │ │ ├── setup.py │ │ │ │ │ ├── tensormask │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── arch.py │ │ │ │ │ │ ├── config.py │ │ │ │ │ │ └── layers │ │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ │ ├── csrc │ │ │ │ │ │ │ ├── SwapAlign2Nat │ │ │ │ │ │ │ │ ├── SwapAlign2Nat.h │ │ │ │ │ │ │ │ └── SwapAlign2Nat_cuda.cu │ │ │ │ │ │ │ └── vision.cpp │ │ │ │ │ │ │ └── swap_align2nat.py │ │ │ │ │ ├── tests │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ └── test_swap_align2nat.py │ │ │ │ │ └── train_net.py │ │ │ │ └── TridentNet │ │ │ │ │ ├── README.md │ │ │ │ │ ├── configs │ │ │ │ │ ├── Base-TridentNet-Fast-C4.yaml │ │ │ │ │ ├── tridentnet_fast_R_101_C4_3x.yaml │ │ │ │ │ ├── tridentnet_fast_R_50_C4_1x.yaml │ │ │ │ │ └── tridentnet_fast_R_50_C4_3x.yaml │ │ │ │ │ ├── train_net.py │ │ │ │ │ └── tridentnet │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── config.py │ │ │ │ │ ├── trident_backbone.py │ │ │ │ │ ├── trident_conv.py │ │ │ │ │ ├── trident_rcnn.py │ │ │ │ │ └── trident_rpn.py │ │ │ ├── setup.cfg │ │ │ ├── setup.py │ │ │ ├── tests │ │ │ │ ├── README.md │ │ │ │ ├── __init__.py │ │ │ │ ├── data │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── test_coco.py │ │ │ │ │ ├── test_detection_utils.py │ │ │ │ │ ├── test_rotation_transform.py │ │ │ │ │ ├── test_sampler.py │ │ │ │ │ └── test_transforms.py │ │ │ │ ├── layers │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── test_mask_ops.py │ │ │ │ │ ├── test_nms_rotated.py │ │ │ │ │ ├── test_roi_align.py │ │ │ │ │ └── test_roi_align_rotated.py │ │ │ │ ├── modeling │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── test_anchor_generator.py │ │ │ │ │ ├── test_box2box_transform.py │ │ │ │ │ ├── test_fast_rcnn.py │ │ │ │ │ ├── test_model_e2e.py │ │ │ │ │ ├── test_roi_heads.py │ │ │ │ │ ├── test_roi_pooler.py │ │ │ │ │ └── test_rpn.py │ │ │ │ ├── structures │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── test_boxes.py │ │ │ │ │ ├── test_imagelist.py │ │ │ │ │ ├── test_instances.py │ │ │ │ │ └── test_rotated_boxes.py │ │ │ │ ├── test_checkpoint.py │ │ │ │ ├── test_config.py │ │ │ │ ├── test_export_caffe2.py │ │ │ │ ├── test_model_analysis.py │ │ │ │ ├── test_model_zoo.py │ │ │ │ └── test_visualizer.py │ │ │ └── tools │ │ │ │ ├── README.md │ │ │ │ ├── analyze_model.py │ │ │ │ ├── benchmark.py │ │ │ │ ├── convert-torchvision-to-d2.py │ │ │ │ ├── deploy │ │ │ │ ├── README.md │ │ │ │ ├── caffe2_converter.py │ │ │ │ ├── caffe2_mask_rcnn.cpp │ │ │ │ └── torchscript_traced_mask_rcnn.cpp │ │ │ │ ├── finetune_net.py │ │ │ │ ├── inference.sh │ │ │ │ ├── plain_train_net.py │ │ │ │ ├── run.sh │ │ │ │ ├── train_net.py │ │ │ │ ├── visualize_data.py │ │ │ │ └── visualize_json_results.py │ │ ├── global_local_parsing │ │ │ ├── global_local_datasets.py │ │ │ ├── global_local_evaluate.py │ │ │ ├── global_local_train.py │ │ │ └── make_id_list.py │ │ ├── logits_fusion.py │ │ ├── make_crop_and_mask_w_mask_nms.py │ │ └── scripts │ │ │ ├── make_coco_style_annotation.sh │ │ │ ├── make_crop.sh │ │ │ └── parsing_fusion.sh │ ├── modules │ │ ├── __init__.py │ │ ├── bn.py │ │ ├── deeplab.py │ │ ├── dense.py │ │ ├── functions.py │ │ ├── misc.py │ │ ├── residual.py │ │ └── src │ │ │ ├── checks.h │ │ │ ├── inplace_abn.cpp │ │ │ ├── inplace_abn.h │ │ │ ├── inplace_abn_cpu.cpp │ │ │ ├── inplace_abn_cuda.cu │ │ │ ├── inplace_abn_cuda_half.cu │ │ │ └── utils │ │ │ ├── checks.h │ │ │ ├── common.h │ │ │ └── cuda.cuh │ ├── networks │ │ ├── AugmentCE2P.py │ │ ├── __init__.py │ │ ├── backbone │ │ │ ├── mobilenetv2.py │ │ │ ├── resnet.py │ │ │ └── resnext.py │ │ └── context_encoding │ │ │ ├── aspp.py │ │ │ ├── ocnet.py │ │ │ └── psp.py │ ├── parsing_api.py │ ├── run_parsing.py │ └── utils │ │ ├── __init__.py │ │ ├── consistency_loss.py │ │ ├── criterion.py │ │ ├── encoding.py │ │ ├── kl_loss.py │ │ ├── lovasz_softmax.py │ │ ├── miou.py │ │ ├── schp.py │ │ ├── soft_dice_loss.py │ │ ├── transforms.py │ │ └── warmup_scheduler.py └── openpose │ ├── annotator │ ├── openpose │ │ ├── LICENSE │ │ ├── __init__.py │ │ ├── body.py │ │ ├── face.py │ │ ├── hand.py │ │ ├── model.py │ │ └── util.py │ └── util.py │ └── run_openpose.py ├── requirements.txt └── run ├── examples ├── garment │ ├── 00055_00.jpg │ ├── 00126_00.jpg │ ├── 00151_00.jpg │ ├── 00470_00.jpg │ ├── 02015_00.jpg │ ├── 02305_00.jpg │ ├── 03032_00.jpg │ ├── 03244_00.jpg │ ├── 04825_00.jpg │ ├── 048554_1.jpg │ ├── 048769_1.jpg │ ├── 049805_1.jpg │ ├── 049920_1.jpg │ ├── 049940_1.jpg │ ├── 049947_1.jpg │ ├── 049949_1.jpg │ ├── 049965_1.jpg │ ├── 050002_1.jpg │ ├── 050105_1.jpg │ ├── 050181_1.jpg │ ├── 050191_1.jpg │ ├── 051412_1.jpg │ ├── 051473_1.jpg │ ├── 051515_1.jpg │ ├── 051517_1.jpg │ ├── 051827_1.jpg │ ├── 051946_1.jpg │ ├── 051988_1.jpg │ ├── 051991_1.jpg │ ├── 051998_1.jpg │ ├── 052234_1.jpg │ ├── 053290_1.jpg │ ├── 053319_1.jpg │ ├── 053742_1.jpg │ ├── 053744_1.jpg │ ├── 053786_1.jpg │ ├── 053790_1.jpg │ ├── 06123_00.jpg │ ├── 07382_00.jpg │ ├── 07764_00.jpg │ ├── 10297_00.jpg │ └── 12562_00.jpg └── model │ ├── 01008_00.jpg │ ├── 01861_00.jpg │ ├── 02849_00.jpg │ ├── 049205_0.jpg │ ├── 049447_0.jpg │ ├── 049713_0.jpg │ ├── 051482_0.jpg │ ├── 051918_0.jpg │ ├── 051962_0.jpg │ ├── 052472_0.jpg │ ├── 052767_0.jpg │ ├── 052964_0.jpg │ ├── 053228_0.jpg │ ├── 053514_0.jpg │ ├── 053700_0.jpg │ ├── 05997_00.jpg │ ├── 07966_00.jpg │ ├── 09597_00.jpg │ ├── 14627_00.jpg │ ├── model_1.png │ ├── model_2.png │ ├── model_3.png │ ├── model_4.png │ ├── model_5.png │ ├── model_6.png │ ├── model_7.png │ ├── model_8.png │ └── model_9.png ├── gradio_ootd.py ├── images_output ├── mask.jpg ├── out_hd_0.png ├── out_hd_1.png ├── out_hd_2.png └── out_hd_3.png ├── run_ootd.py └── utils_ootd.py /.gitignore: -------------------------------------------------------------------------------- 1 | *.ckpt 2 | __pycache__/ 3 | .vscode/ 4 | *.pyc 5 | .uuid -------------------------------------------------------------------------------- /checkpoints/README.txt: -------------------------------------------------------------------------------- 1 | Put checkpoints here, including ootd, humanparsing, openpose and clip-vit-large-patch14 -------------------------------------------------------------------------------- /images/demo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/images/demo.png -------------------------------------------------------------------------------- /images/workflow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/images/workflow.png -------------------------------------------------------------------------------- /preprocess/humanparsing/datasets/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/preprocess/humanparsing/datasets/__init__.py -------------------------------------------------------------------------------- /preprocess/humanparsing/datasets/target_generation.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.nn import functional as F 3 | 4 | 5 | def generate_edge_tensor(label, edge_width=3): 6 | label = label.type(torch.cuda.FloatTensor) 7 | if len(label.shape) == 2: 8 | label = label.unsqueeze(0) 9 | n, h, w = label.shape 10 | edge = torch.zeros(label.shape, dtype=torch.float).cuda() 11 | # right 12 | edge_right = edge[:, 1:h, :] 13 | edge_right[(label[:, 1:h, :] != label[:, :h - 1, :]) & (label[:, 1:h, :] != 255) 14 | & (label[:, :h - 1, :] != 255)] = 1 15 | 16 | # up 17 | edge_up = edge[:, :, :w - 1] 18 | edge_up[(label[:, :, :w - 1] != label[:, :, 1:w]) 19 | & (label[:, :, :w - 1] != 255) 20 | & (label[:, :, 1:w] != 255)] = 1 21 | 22 | # upright 23 | edge_upright = edge[:, :h - 1, :w - 1] 24 | edge_upright[(label[:, :h - 1, :w - 1] != label[:, 1:h, 1:w]) 25 | & (label[:, :h - 1, :w - 1] != 255) 26 | & (label[:, 1:h, 1:w] != 255)] = 1 27 | 28 | # bottomright 29 | edge_bottomright = edge[:, :h - 1, 1:w] 30 | edge_bottomright[(label[:, :h - 1, 1:w] != label[:, 1:h, :w - 1]) 31 | & (label[:, :h - 1, 1:w] != 255) 32 | & (label[:, 1:h, :w - 1] != 255)] = 1 33 | 34 | kernel = torch.ones((1, 1, edge_width, edge_width), dtype=torch.float).cuda() 35 | with torch.no_grad(): 36 | edge = edge.unsqueeze(1) 37 | edge = F.conv2d(edge, kernel, stride=1, padding=1) 38 | edge[edge!=0] = 1 39 | edge = edge.squeeze() 40 | return edge 41 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/.flake8: -------------------------------------------------------------------------------- 1 | # This is an example .flake8 config, used when developing *Black* itself. 2 | # Keep in sync with setup.cfg which is used for source packages. 3 | 4 | [flake8] 5 | ignore = W503, E203, E221, C901, C408, E741 6 | max-line-length = 100 7 | max-complexity = 18 8 | select = B,C,E,F,W,T4,B9 9 | exclude = build,__init__.py 10 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/.github/CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Code of Conduct 2 | 3 | Facebook has adopted a Code of Conduct that we expect project participants to adhere to. 4 | Please read the [full text](https://code.fb.com/codeofconduct/) 5 | so that you can understand what actions will and will not be tolerated. 6 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/.github/ISSUE_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | 2 | Please select an issue template from 3 | https://github.com/facebookresearch/detectron2/issues/new/choose . 4 | 5 | Otherwise your issue will be closed. 6 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/.github/ISSUE_TEMPLATE/bugs.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: "🐛 Bugs" 3 | about: Report bugs in detectron2 4 | title: Please read & provide the following 5 | 6 | --- 7 | 8 | ## Instructions To Reproduce the 🐛 Bug: 9 | 10 | 1. what changes you made (`git diff`) or what code you wrote 11 | ``` 12 | 13 | ``` 14 | 2. what exact command you run: 15 | 3. what you observed (including __full logs__): 16 | ``` 17 | 18 | ``` 19 | 4. please simplify the steps as much as possible so they do not require additional resources to 20 | run, such as a private dataset. 21 | 22 | ## Expected behavior: 23 | 24 | If there are no obvious error in "what you observed" provided above, 25 | please tell us the expected behavior. 26 | 27 | ## Environment: 28 | 29 | Provide your environment information using the following command: 30 | ``` 31 | wget -nc -q https://github.com/facebookresearch/detectron2/raw/master/detectron2/utils/collect_env.py && python collect_env.py 32 | ``` 33 | 34 | If your issue looks like an installation issue / environment issue, 35 | please first try to solve it yourself with the instructions in 36 | https://detectron2.readthedocs.io/tutorials/install.html#common-installation-issues 37 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | # require an issue template to be chosen 2 | blank_issues_enabled: false 3 | 4 | # Unexpected behaviors & bugs are split to two templates. 5 | # When they are one template, users think "it's not a bug" and don't choose the template. 6 | # 7 | # But the file name is still "unexpected-problems-bugs.md" so that old references 8 | # to this issue template still works. 9 | # It's ok since this template should be a superset of "bugs.md" (unexpected behaviors is a superset of bugs) 10 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/.github/ISSUE_TEMPLATE/feature-request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: "\U0001F680Feature Request" 3 | about: Submit a proposal/request for a new detectron2 feature 4 | 5 | --- 6 | 7 | ## 🚀 Feature 8 | A clear and concise description of the feature proposal. 9 | 10 | 11 | ## Motivation & Examples 12 | 13 | Tell us why the feature is useful. 14 | 15 | Describe what the feature would look like, if it is implemented. 16 | Best demonstrated using **code examples** in addition to words. 17 | 18 | ## Note 19 | 20 | We only consider adding new features if they are relevant to many users. 21 | 22 | If you request implementation of research papers -- 23 | we only consider papers that have enough significance and prevalance in the object detection field. 24 | 25 | We do not take requests for most projects in the `projects/` directory, 26 | because they are research code release that is mainly for other researchers to reproduce results. 27 | 28 | Instead of adding features inside detectron2, 29 | you can implement many features by [extending detectron2](https://detectron2.readthedocs.io/tutorials/extend.html). 30 | The [projects/](https://github.com/facebookresearch/detectron2/tree/master/projects/) directory contains many of such examples. 31 | 32 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/.github/ISSUE_TEMPLATE/questions-help-support.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: "❓How to do something?" 3 | about: How to do something using detectron2? What does an API do? 4 | 5 | --- 6 | 7 | ## ❓ How to do something using detectron2 8 | 9 | Describe what you want to do, including: 10 | 1. what inputs you will provide, if any: 11 | 2. what outputs you are expecting: 12 | 13 | ## ❓ What does an API do and how to use it? 14 | Please link to which API or documentation you're asking about from 15 | https://detectron2.readthedocs.io/ 16 | 17 | 18 | NOTE: 19 | 20 | 1. Only general answers are provided. 21 | If you want to ask about "why X did not work", please use the 22 | [Unexpected behaviors](https://github.com/facebookresearch/detectron2/issues/new/choose) issue template. 23 | 24 | 2. About how to implement new models / new dataloader / new training logic, etc., check documentation first. 25 | 26 | 3. We do not answer general machine learning / computer vision questions that are not specific to detectron2, such as how a model works, how to improve your training/make it converge, or what algorithm/methods can be used to achieve X. 27 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/.github/ISSUE_TEMPLATE/unexpected-problems-bugs.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: "Unexpected behaviors" 3 | about: Run into unexpected behaviors when using detectron2 4 | title: Please read & provide the following 5 | 6 | --- 7 | 8 | If you do not know the root cause of the problem, and wish someone to help you, please 9 | post according to this template: 10 | 11 | ## Instructions To Reproduce the Issue: 12 | 13 | 1. what changes you made (`git diff`) or what code you wrote 14 | ``` 15 | 16 | ``` 17 | 2. what exact command you run: 18 | 3. what you observed (including __full logs__): 19 | ``` 20 | 21 | ``` 22 | 4. please simplify the steps as much as possible so they do not require additional resources to 23 | run, such as a private dataset. 24 | 25 | ## Expected behavior: 26 | 27 | If there are no obvious error in "what you observed" provided above, 28 | please tell us the expected behavior. 29 | 30 | If you expect the model to converge / work better, note that we do not give suggestions 31 | on how to train a new model. 32 | Only in one of the two conditions we will help with it: 33 | (1) You're unable to reproduce the results in detectron2 model zoo. 34 | (2) It indicates a detectron2 bug. 35 | 36 | ## Environment: 37 | 38 | Provide your environment information using the following command: 39 | ``` 40 | wget -nc -q https://github.com/facebookresearch/detectron2/raw/master/detectron2/utils/collect_env.py && python collect_env.py 41 | ``` 42 | 43 | If your issue looks like an installation issue / environment issue, 44 | please first try to solve it yourself with the instructions in 45 | https://detectron2.readthedocs.io/tutorials/install.html#common-installation-issues 46 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | Thanks for your contribution! 2 | 3 | If you're sending a large PR (e.g., >50 lines), 4 | please open an issue first about the feature / bug, and indicate how you want to contribute. 5 | 6 | Before submitting a PR, please run `dev/linter.sh` to lint the code. 7 | 8 | See https://detectron2.readthedocs.io/notes/contributing.html#pull-requests 9 | about how we handle PRs. 10 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/.gitignore: -------------------------------------------------------------------------------- 1 | # output dir 2 | output 3 | instant_test_output 4 | inference_test_output 5 | 6 | 7 | *.jpg 8 | *.png 9 | *.txt 10 | *.json 11 | *.diff 12 | 13 | # compilation and distribution 14 | __pycache__ 15 | _ext 16 | *.pyc 17 | *.so 18 | detectron2.egg-info/ 19 | build/ 20 | dist/ 21 | wheels/ 22 | 23 | # pytorch/python/numpy formats 24 | *.pth 25 | *.pkl 26 | *.npy 27 | 28 | # ipython/jupyter notebooks 29 | *.ipynb 30 | **/.ipynb_checkpoints/ 31 | 32 | # Editor temporaries 33 | *.swn 34 | *.swo 35 | *.swp 36 | *~ 37 | 38 | # editor settings 39 | .idea 40 | .vscode 41 | 42 | # project dirs 43 | /detectron2/model_zoo/configs 44 | /datasets 45 | /projects/*/datasets 46 | /models 47 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/Base-RCNN-C4.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | META_ARCHITECTURE: "GeneralizedRCNN" 3 | RPN: 4 | PRE_NMS_TOPK_TEST: 6000 5 | POST_NMS_TOPK_TEST: 1000 6 | ROI_HEADS: 7 | NAME: "Res5ROIHeads" 8 | DATASETS: 9 | TRAIN: ("coco_2017_train",) 10 | TEST: ("coco_2017_val",) 11 | SOLVER: 12 | IMS_PER_BATCH: 16 13 | BASE_LR: 0.02 14 | STEPS: (60000, 80000) 15 | MAX_ITER: 90000 16 | INPUT: 17 | MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) 18 | VERSION: 2 19 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/Base-RCNN-DilatedC5.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | META_ARCHITECTURE: "GeneralizedRCNN" 3 | RESNETS: 4 | OUT_FEATURES: ["res5"] 5 | RES5_DILATION: 2 6 | RPN: 7 | IN_FEATURES: ["res5"] 8 | PRE_NMS_TOPK_TEST: 6000 9 | POST_NMS_TOPK_TEST: 1000 10 | ROI_HEADS: 11 | NAME: "StandardROIHeads" 12 | IN_FEATURES: ["res5"] 13 | ROI_BOX_HEAD: 14 | NAME: "FastRCNNConvFCHead" 15 | NUM_FC: 2 16 | POOLER_RESOLUTION: 7 17 | ROI_MASK_HEAD: 18 | NAME: "MaskRCNNConvUpsampleHead" 19 | NUM_CONV: 4 20 | POOLER_RESOLUTION: 14 21 | DATASETS: 22 | TRAIN: ("coco_2017_train",) 23 | TEST: ("coco_2017_val",) 24 | SOLVER: 25 | IMS_PER_BATCH: 16 26 | BASE_LR: 0.02 27 | STEPS: (60000, 80000) 28 | MAX_ITER: 90000 29 | INPUT: 30 | MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) 31 | VERSION: 2 32 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/Base-RCNN-FPN.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | META_ARCHITECTURE: "GeneralizedRCNN" 3 | BACKBONE: 4 | NAME: "build_resnet_fpn_backbone" 5 | RESNETS: 6 | OUT_FEATURES: ["res2", "res3", "res4", "res5"] 7 | FPN: 8 | IN_FEATURES: ["res2", "res3", "res4", "res5"] 9 | ANCHOR_GENERATOR: 10 | SIZES: [[32], [64], [128], [256], [512]] # One size for each in feature map 11 | ASPECT_RATIOS: [[0.5, 1.0, 2.0]] # Three aspect ratios (same for all in feature maps) 12 | RPN: 13 | IN_FEATURES: ["p2", "p3", "p4", "p5", "p6"] 14 | PRE_NMS_TOPK_TRAIN: 2000 # Per FPN level 15 | PRE_NMS_TOPK_TEST: 1000 # Per FPN level 16 | # Detectron1 uses 2000 proposals per-batch, 17 | # (See "modeling/rpn/rpn_outputs.py" for details of this legacy issue) 18 | # which is approximately 1000 proposals per-image since the default batch size for FPN is 2. 19 | POST_NMS_TOPK_TRAIN: 1000 20 | POST_NMS_TOPK_TEST: 1000 21 | ROI_HEADS: 22 | NAME: "StandardROIHeads" 23 | IN_FEATURES: ["p2", "p3", "p4", "p5"] 24 | ROI_BOX_HEAD: 25 | NAME: "FastRCNNConvFCHead" 26 | NUM_FC: 2 27 | POOLER_RESOLUTION: 7 28 | ROI_MASK_HEAD: 29 | NAME: "MaskRCNNConvUpsampleHead" 30 | NUM_CONV: 4 31 | POOLER_RESOLUTION: 14 32 | DATASETS: 33 | TRAIN: ("coco_2017_train",) 34 | TEST: ("coco_2017_val",) 35 | SOLVER: 36 | IMS_PER_BATCH: 16 37 | BASE_LR: 0.02 38 | STEPS: (60000, 80000) 39 | MAX_ITER: 90000 40 | INPUT: 41 | MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) 42 | VERSION: 2 43 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/Base-RetinaNet.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | META_ARCHITECTURE: "RetinaNet" 3 | BACKBONE: 4 | NAME: "build_retinanet_resnet_fpn_backbone" 5 | RESNETS: 6 | OUT_FEATURES: ["res3", "res4", "res5"] 7 | ANCHOR_GENERATOR: 8 | SIZES: !!python/object/apply:eval ["[[x, x * 2**(1.0/3), x * 2**(2.0/3) ] for x in [32, 64, 128, 256, 512 ]]"] 9 | FPN: 10 | IN_FEATURES: ["res3", "res4", "res5"] 11 | RETINANET: 12 | IOU_THRESHOLDS: [0.4, 0.5] 13 | IOU_LABELS: [0, -1, 1] 14 | DATASETS: 15 | TRAIN: ("coco_2017_train",) 16 | TEST: ("coco_2017_val",) 17 | SOLVER: 18 | IMS_PER_BATCH: 16 19 | BASE_LR: 0.01 # Note that RetinaNet uses a different default learning rate 20 | STEPS: (60000, 80000) 21 | MAX_ITER: 90000 22 | INPUT: 23 | MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) 24 | VERSION: 2 25 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/COCO-Detection/fast_rcnn_R_50_FPN_1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: False 5 | LOAD_PROPOSALS: True 6 | RESNETS: 7 | DEPTH: 50 8 | PROPOSAL_GENERATOR: 9 | NAME: "PrecomputedProposals" 10 | DATASETS: 11 | TRAIN: ("coco_2017_train",) 12 | PROPOSAL_FILES_TRAIN: ("detectron2://COCO-Detection/rpn_R_50_FPN_1x/137258492/coco_2017_train_box_proposals_21bc3a.pkl", ) 13 | TEST: ("coco_2017_val",) 14 | PROPOSAL_FILES_TEST: ("detectron2://COCO-Detection/rpn_R_50_FPN_1x/137258492/coco_2017_val_box_proposals_ee0dad.pkl", ) 15 | DATALOADER: 16 | # proposals are part of the dataset_dicts, and take a lot of RAM 17 | NUM_WORKERS: 2 18 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/COCO-Detection/faster_rcnn_R_101_C4_3x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-C4.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" 4 | MASK_ON: False 5 | RESNETS: 6 | DEPTH: 101 7 | SOLVER: 8 | STEPS: (210000, 250000) 9 | MAX_ITER: 270000 10 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/COCO-Detection/faster_rcnn_R_101_DC5_3x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-DilatedC5.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" 4 | MASK_ON: False 5 | RESNETS: 6 | DEPTH: 101 7 | SOLVER: 8 | STEPS: (210000, 250000) 9 | MAX_ITER: 270000 10 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/COCO-Detection/faster_rcnn_R_101_FPN_3x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" 4 | MASK_ON: False 5 | RESNETS: 6 | DEPTH: 101 7 | SOLVER: 8 | STEPS: (210000, 250000) 9 | MAX_ITER: 270000 10 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/COCO-Detection/faster_rcnn_R_50_C4_1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-C4.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: False 5 | RESNETS: 6 | DEPTH: 50 7 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/COCO-Detection/faster_rcnn_R_50_C4_3x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-C4.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: False 5 | RESNETS: 6 | DEPTH: 50 7 | SOLVER: 8 | STEPS: (210000, 250000) 9 | MAX_ITER: 270000 10 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/COCO-Detection/faster_rcnn_R_50_DC5_1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-DilatedC5.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: False 5 | RESNETS: 6 | DEPTH: 50 7 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/COCO-Detection/faster_rcnn_R_50_DC5_3x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-DilatedC5.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: False 5 | RESNETS: 6 | DEPTH: 50 7 | SOLVER: 8 | STEPS: (210000, 250000) 9 | MAX_ITER: 270000 10 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/COCO-Detection/faster_rcnn_R_50_FPN_1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: False 5 | RESNETS: 6 | DEPTH: 50 7 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: False 5 | RESNETS: 6 | DEPTH: 50 7 | SOLVER: 8 | STEPS: (210000, 250000) 9 | MAX_ITER: 270000 10 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | MASK_ON: False 4 | WEIGHTS: "detectron2://ImageNetPretrained/FAIR/X-101-32x8d.pkl" 5 | PIXEL_STD: [57.375, 57.120, 58.395] 6 | RESNETS: 7 | STRIDE_IN_1X1: False # this is a C2 model 8 | NUM_GROUPS: 32 9 | WIDTH_PER_GROUP: 8 10 | DEPTH: 101 11 | SOLVER: 12 | STEPS: (210000, 250000) 13 | MAX_ITER: 270000 14 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/COCO-Detection/retinanet_R_101_FPN_3x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RetinaNet.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" 4 | RESNETS: 5 | DEPTH: 101 6 | SOLVER: 7 | STEPS: (210000, 250000) 8 | MAX_ITER: 270000 9 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/COCO-Detection/retinanet_R_50_FPN_1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RetinaNet.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | RESNETS: 5 | DEPTH: 50 6 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/COCO-Detection/retinanet_R_50_FPN_3x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RetinaNet.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | RESNETS: 5 | DEPTH: 50 6 | SOLVER: 7 | STEPS: (210000, 250000) 8 | MAX_ITER: 270000 9 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/COCO-Detection/rpn_R_50_C4_1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-C4.yaml" 2 | MODEL: 3 | META_ARCHITECTURE: "ProposalNetwork" 4 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 5 | MASK_ON: False 6 | RESNETS: 7 | DEPTH: 50 8 | RPN: 9 | PRE_NMS_TOPK_TEST: 12000 10 | POST_NMS_TOPK_TEST: 2000 11 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/COCO-Detection/rpn_R_50_FPN_1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | META_ARCHITECTURE: "ProposalNetwork" 4 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 5 | MASK_ON: False 6 | RESNETS: 7 | DEPTH: 50 8 | RPN: 9 | POST_NMS_TOPK_TEST: 2000 10 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_101_C4_3x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-C4.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" 4 | MASK_ON: True 5 | RESNETS: 6 | DEPTH: 101 7 | SOLVER: 8 | STEPS: (210000, 250000) 9 | MAX_ITER: 270000 10 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_101_DC5_3x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-DilatedC5.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" 4 | MASK_ON: True 5 | RESNETS: 6 | DEPTH: 101 7 | SOLVER: 8 | STEPS: (210000, 250000) 9 | MAX_ITER: 270000 10 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" 4 | MASK_ON: True 5 | RESNETS: 6 | DEPTH: 101 7 | SOLVER: 8 | STEPS: (210000, 250000) 9 | MAX_ITER: 270000 10 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-C4.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: True 5 | RESNETS: 6 | DEPTH: 50 7 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_C4_3x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-C4.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: True 5 | RESNETS: 6 | DEPTH: 50 7 | SOLVER: 8 | STEPS: (210000, 250000) 9 | MAX_ITER: 270000 10 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-DilatedC5.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: True 5 | RESNETS: 6 | DEPTH: 50 7 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_3x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-DilatedC5.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: True 5 | RESNETS: 6 | DEPTH: 50 7 | SOLVER: 8 | STEPS: (210000, 250000) 9 | MAX_ITER: 270000 10 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: True 5 | RESNETS: 6 | DEPTH: 50 7 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: True 5 | RESNETS: 6 | DEPTH: 50 7 | SOLVER: 8 | STEPS: (210000, 250000) 9 | MAX_ITER: 270000 10 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | MASK_ON: True 4 | WEIGHTS: "detectron2://ImageNetPretrained/FAIR/X-101-32x8d.pkl" 5 | PIXEL_STD: [57.375, 57.120, 58.395] 6 | RESNETS: 7 | STRIDE_IN_1X1: False # this is a C2 model 8 | NUM_GROUPS: 32 9 | WIDTH_PER_GROUP: 8 10 | DEPTH: 101 11 | SOLVER: 12 | STEPS: (210000, 250000) 13 | MAX_ITER: 270000 14 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/COCO-Keypoints/Base-Keypoint-RCNN-FPN.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | KEYPOINT_ON: True 4 | ROI_HEADS: 5 | NUM_CLASSES: 1 6 | ROI_BOX_HEAD: 7 | SMOOTH_L1_BETA: 0.5 # Keypoint AP degrades (though box AP improves) when using plain L1 loss 8 | RPN: 9 | # Detectron1 uses 2000 proposals per-batch, but this option is per-image in detectron2. 10 | # 1000 proposals per-image is found to hurt box AP. 11 | # Therefore we increase it to 1500 per-image. 12 | POST_NMS_TOPK_TRAIN: 1500 13 | DATASETS: 14 | TRAIN: ("keypoints_coco_2017_train",) 15 | TEST: ("keypoints_coco_2017_val",) 16 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/COCO-Keypoints/keypoint_rcnn_R_101_FPN_3x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "Base-Keypoint-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" 4 | RESNETS: 5 | DEPTH: 101 6 | SOLVER: 7 | STEPS: (210000, 250000) 8 | MAX_ITER: 270000 9 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "Base-Keypoint-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | RESNETS: 5 | DEPTH: 50 6 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "Base-Keypoint-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | RESNETS: 5 | DEPTH: 50 6 | SOLVER: 7 | STEPS: (210000, 250000) 8 | MAX_ITER: 270000 9 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/COCO-Keypoints/keypoint_rcnn_X_101_32x8d_FPN_3x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "Base-Keypoint-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/FAIR/X-101-32x8d.pkl" 4 | PIXEL_STD: [57.375, 57.120, 58.395] 5 | RESNETS: 6 | STRIDE_IN_1X1: False # this is a C2 model 7 | NUM_GROUPS: 32 8 | WIDTH_PER_GROUP: 8 9 | DEPTH: 101 10 | SOLVER: 11 | STEPS: (210000, 250000) 12 | MAX_ITER: 270000 13 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/COCO-PanopticSegmentation/Base-Panoptic-FPN.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | META_ARCHITECTURE: "PanopticFPN" 4 | MASK_ON: True 5 | SEM_SEG_HEAD: 6 | LOSS_WEIGHT: 0.5 7 | DATASETS: 8 | TRAIN: ("coco_2017_train_panoptic_separated",) 9 | TEST: ("coco_2017_val_panoptic_separated",) 10 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "Base-Panoptic-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" 4 | RESNETS: 5 | DEPTH: 101 6 | SOLVER: 7 | STEPS: (210000, 250000) 8 | MAX_ITER: 270000 9 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/COCO-PanopticSegmentation/panoptic_fpn_R_50_1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "Base-Panoptic-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | RESNETS: 5 | DEPTH: 50 6 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/COCO-PanopticSegmentation/panoptic_fpn_R_50_3x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "Base-Panoptic-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | RESNETS: 5 | DEPTH: 50 6 | SOLVER: 7 | STEPS: (210000, 250000) 8 | MAX_ITER: 270000 9 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/Cityscapes/mask_rcnn_R_50_FPN.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | # WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | # For better, more stable performance initialize from COCO 5 | WEIGHTS: "detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl" 6 | MASK_ON: True 7 | ROI_HEADS: 8 | NUM_CLASSES: 8 9 | # This is similar to the setting used in Mask R-CNN paper, Appendix A 10 | # But there are some differences, e.g., we did not initialize the output 11 | # layer using the corresponding classes from COCO 12 | INPUT: 13 | MIN_SIZE_TRAIN: (800, 832, 864, 896, 928, 960, 992, 1024) 14 | MIN_SIZE_TRAIN_SAMPLING: "choice" 15 | MIN_SIZE_TEST: 1024 16 | MAX_SIZE_TRAIN: 2048 17 | MAX_SIZE_TEST: 2048 18 | DATASETS: 19 | TRAIN: ("cityscapes_fine_instance_seg_train",) 20 | TEST: ("cityscapes_fine_instance_seg_val",) 21 | SOLVER: 22 | BASE_LR: 0.01 23 | STEPS: (18000,) 24 | MAX_ITER: 24000 25 | IMS_PER_BATCH: 8 26 | TEST: 27 | EVAL_PERIOD: 8000 28 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/Detectron1-Comparisons/faster_rcnn_R_50_FPN_noaug_1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: False 5 | RESNETS: 6 | DEPTH: 50 7 | # Detectron1 uses smooth L1 loss with some magic beta values. 8 | # The defaults are changed to L1 loss in Detectron2. 9 | RPN: 10 | SMOOTH_L1_BETA: 0.1111 11 | ROI_BOX_HEAD: 12 | SMOOTH_L1_BETA: 1.0 13 | POOLER_SAMPLING_RATIO: 2 14 | POOLER_TYPE: "ROIAlign" 15 | INPUT: 16 | # no scale augmentation 17 | MIN_SIZE_TRAIN: (800, ) 18 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/Detectron1-Comparisons/keypoint_rcnn_R_50_FPN_1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | KEYPOINT_ON: True 5 | RESNETS: 6 | DEPTH: 50 7 | ROI_HEADS: 8 | NUM_CLASSES: 1 9 | ROI_KEYPOINT_HEAD: 10 | POOLER_RESOLUTION: 14 11 | POOLER_SAMPLING_RATIO: 2 12 | POOLER_TYPE: "ROIAlign" 13 | # Detectron1 uses smooth L1 loss with some magic beta values. 14 | # The defaults are changed to L1 loss in Detectron2. 15 | ROI_BOX_HEAD: 16 | SMOOTH_L1_BETA: 1.0 17 | POOLER_SAMPLING_RATIO: 2 18 | POOLER_TYPE: "ROIAlign" 19 | RPN: 20 | SMOOTH_L1_BETA: 0.1111 21 | # Detectron1 uses 2000 proposals per-batch, but this option is per-image in detectron2 22 | # 1000 proposals per-image is found to hurt box AP. 23 | # Therefore we increase it to 1500 per-image. 24 | POST_NMS_TOPK_TRAIN: 1500 25 | DATASETS: 26 | TRAIN: ("keypoints_coco_2017_train",) 27 | TEST: ("keypoints_coco_2017_val",) 28 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/Detectron1-Comparisons/mask_rcnn_R_50_FPN_noaug_1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: True 5 | RESNETS: 6 | DEPTH: 50 7 | # Detectron1 uses smooth L1 loss with some magic beta values. 8 | # The defaults are changed to L1 loss in Detectron2. 9 | RPN: 10 | SMOOTH_L1_BETA: 0.1111 11 | ROI_BOX_HEAD: 12 | SMOOTH_L1_BETA: 1.0 13 | POOLER_SAMPLING_RATIO: 2 14 | POOLER_TYPE: "ROIAlign" 15 | ROI_MASK_HEAD: 16 | POOLER_SAMPLING_RATIO: 2 17 | POOLER_TYPE: "ROIAlign" 18 | INPUT: 19 | # no scale augmentation 20 | MIN_SIZE_TRAIN: (800, ) 21 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/LVIS-InstanceSegmentation/mask_rcnn_R_101_FPN_1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" 4 | MASK_ON: True 5 | RESNETS: 6 | DEPTH: 101 7 | ROI_HEADS: 8 | NUM_CLASSES: 1230 9 | SCORE_THRESH_TEST: 0.0001 10 | INPUT: 11 | MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) 12 | DATASETS: 13 | TRAIN: ("lvis_v0.5_train",) 14 | TEST: ("lvis_v0.5_val",) 15 | TEST: 16 | DETECTIONS_PER_IMAGE: 300 # LVIS allows up to 300 17 | DATALOADER: 18 | SAMPLER_TRAIN: "RepeatFactorTrainingSampler" 19 | REPEAT_THRESHOLD: 0.001 20 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/LVIS-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: True 5 | RESNETS: 6 | DEPTH: 50 7 | ROI_HEADS: 8 | NUM_CLASSES: 1230 9 | SCORE_THRESH_TEST: 0.0001 10 | INPUT: 11 | MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) 12 | DATASETS: 13 | TRAIN: ("lvis_v0.5_train",) 14 | TEST: ("lvis_v0.5_val",) 15 | TEST: 16 | DETECTIONS_PER_IMAGE: 300 # LVIS allows up to 300 17 | DATALOADER: 18 | SAMPLER_TRAIN: "RepeatFactorTrainingSampler" 19 | REPEAT_THRESHOLD: 0.001 20 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/LVIS-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/FAIR/X-101-32x8d.pkl" 4 | PIXEL_STD: [57.375, 57.120, 58.395] 5 | MASK_ON: True 6 | RESNETS: 7 | STRIDE_IN_1X1: False # this is a C2 model 8 | NUM_GROUPS: 32 9 | WIDTH_PER_GROUP: 8 10 | DEPTH: 101 11 | ROI_HEADS: 12 | NUM_CLASSES: 1230 13 | SCORE_THRESH_TEST: 0.0001 14 | INPUT: 15 | MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) 16 | DATASETS: 17 | TRAIN: ("lvis_v0.5_train",) 18 | TEST: ("lvis_v0.5_val",) 19 | TEST: 20 | DETECTIONS_PER_IMAGE: 300 # LVIS allows up to 300 21 | DATALOADER: 22 | SAMPLER_TRAIN: "RepeatFactorTrainingSampler" 23 | REPEAT_THRESHOLD: 0.001 24 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/Misc/cascade_mask_rcnn_R_50_FPN_1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: True 5 | RESNETS: 6 | DEPTH: 50 7 | ROI_HEADS: 8 | NAME: CascadeROIHeads 9 | ROI_BOX_HEAD: 10 | CLS_AGNOSTIC_BBOX_REG: True 11 | RPN: 12 | POST_NMS_TOPK_TRAIN: 2000 13 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/Misc/cascade_mask_rcnn_R_50_FPN_3x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: True 5 | RESNETS: 6 | DEPTH: 50 7 | ROI_HEADS: 8 | NAME: CascadeROIHeads 9 | ROI_BOX_HEAD: 10 | CLS_AGNOSTIC_BBOX_REG: True 11 | RPN: 12 | POST_NMS_TOPK_TRAIN: 2000 13 | SOLVER: 14 | STEPS: (210000, 250000) 15 | MAX_ITER: 270000 16 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/Misc/cascade_mask_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | MASK_ON: True 4 | WEIGHTS: "catalog://ImageNetPretrained/FAIR/X-152-32x8d-IN5k" 5 | RESNETS: 6 | STRIDE_IN_1X1: False # this is a C2 model 7 | NUM_GROUPS: 32 8 | WIDTH_PER_GROUP: 8 9 | DEPTH: 152 10 | DEFORM_ON_PER_STAGE: [False, True, True, True] 11 | ROI_HEADS: 12 | NAME: "CascadeROIHeads" 13 | ROI_BOX_HEAD: 14 | NAME: "FastRCNNConvFCHead" 15 | NUM_CONV: 4 16 | NUM_FC: 1 17 | NORM: "GN" 18 | CLS_AGNOSTIC_BBOX_REG: True 19 | ROI_MASK_HEAD: 20 | NUM_CONV: 8 21 | NORM: "GN" 22 | RPN: 23 | POST_NMS_TOPK_TRAIN: 2000 24 | SOLVER: 25 | IMS_PER_BATCH: 128 26 | STEPS: (35000, 45000) 27 | MAX_ITER: 50000 28 | BASE_LR: 0.16 29 | INPUT: 30 | MIN_SIZE_TRAIN: (640, 864) 31 | MIN_SIZE_TRAIN_SAMPLING: "range" 32 | MAX_SIZE_TRAIN: 1440 33 | CROP: 34 | ENABLED: True 35 | TEST: 36 | EVAL_PERIOD: 2500 37 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/Misc/cascade_mask_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv_parsing.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | MASK_ON: True 4 | # WEIGHTS: "catalog://ImageNetPretrained/FAIR/X-152-32x8d-IN5k" 5 | WEIGHTS: "model_0039999_e76410.pkl" 6 | RESNETS: 7 | STRIDE_IN_1X1: False # this is a C2 model 8 | NUM_GROUPS: 32 9 | WIDTH_PER_GROUP: 8 10 | DEPTH: 152 11 | DEFORM_ON_PER_STAGE: [False, True, True, True] 12 | ROI_HEADS: 13 | NAME: "CascadeROIHeads" 14 | NUM_CLASSES: 1 15 | ROI_BOX_HEAD: 16 | NAME: "FastRCNNConvFCHead" 17 | NUM_CONV: 4 18 | NUM_FC: 1 19 | NORM: "GN" 20 | CLS_AGNOSTIC_BBOX_REG: True 21 | ROI_MASK_HEAD: 22 | NUM_CONV: 8 23 | NORM: "GN" 24 | RPN: 25 | POST_NMS_TOPK_TRAIN: 2000 26 | SOLVER: 27 | # IMS_PER_BATCH: 128 28 | IMS_PER_BATCH: 1 29 | STEPS: (35000, 45000) 30 | MAX_ITER: 50000 31 | BASE_LR: 0.16 32 | INPUT: 33 | MIN_SIZE_TRAIN: (640, 864) 34 | MIN_SIZE_TRAIN_SAMPLING: "range" 35 | MAX_SIZE_TRAIN: 1440 36 | CROP: 37 | ENABLED: True 38 | TEST: 39 | EVAL_PERIOD: 2500 40 | DATASETS: 41 | TRAIN: ("CIHP_train","VIP_trainval") 42 | TEST: ("CIHP_val",) 43 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/Misc/demo.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "cascade_mask_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv.yaml" 2 | MODEL: 3 | MASK_ON: True 4 | ROI_HEADS: 5 | NMS_THRESH_TEST: 0.95 6 | SCORE_THRESH_TEST: 0.5 7 | NUM_CLASSES: 1 8 | SOLVER: 9 | IMS_PER_BATCH: 1 10 | STEPS: (30000, 45000) 11 | MAX_ITER: 50000 12 | BASE_LR: 0.02 13 | INPUT: 14 | MIN_SIZE_TRAIN: (640, 864) 15 | MIN_SIZE_TRAIN_SAMPLING: "range" 16 | MAX_SIZE_TRAIN: 1440 17 | CROP: 18 | ENABLED: True 19 | TEST: 20 | AUG: 21 | ENABLED: True 22 | DATASETS: 23 | TRAIN: ("demo_train",) 24 | TEST: ("demo_val",) 25 | OUTPUT_DIR: "../../data/DemoDataset/detectron2_prediction" 26 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/Misc/mask_rcnn_R_50_FPN_1x_cls_agnostic.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: True 5 | RESNETS: 6 | DEPTH: 50 7 | ROI_BOX_HEAD: 8 | CLS_AGNOSTIC_BBOX_REG: True 9 | ROI_MASK_HEAD: 10 | CLS_AGNOSTIC_MASK: True 11 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/Misc/mask_rcnn_R_50_FPN_1x_dconv_c3-c5.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: True 5 | RESNETS: 6 | DEPTH: 50 7 | DEFORM_ON_PER_STAGE: [False, True, True, True] # on Res3,Res4,Res5 8 | DEFORM_MODULATED: False 9 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/Misc/mask_rcnn_R_50_FPN_3x_dconv_c3-c5.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: True 5 | RESNETS: 6 | DEPTH: 50 7 | DEFORM_ON_PER_STAGE: [False, True, True, True] # on Res3,Res4,Res5 8 | DEFORM_MODULATED: False 9 | SOLVER: 10 | STEPS: (210000, 250000) 11 | MAX_ITER: 270000 12 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/Misc/mask_rcnn_R_50_FPN_3x_gn.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "catalog://ImageNetPretrained/FAIR/R-50-GN" 4 | MASK_ON: True 5 | RESNETS: 6 | DEPTH: 50 7 | NORM: "GN" 8 | STRIDE_IN_1X1: False 9 | FPN: 10 | NORM: "GN" 11 | ROI_BOX_HEAD: 12 | NAME: "FastRCNNConvFCHead" 13 | NUM_CONV: 4 14 | NUM_FC: 1 15 | NORM: "GN" 16 | ROI_MASK_HEAD: 17 | NORM: "GN" 18 | SOLVER: 19 | # 3x schedule 20 | STEPS: (210000, 250000) 21 | MAX_ITER: 270000 22 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/Misc/mask_rcnn_R_50_FPN_3x_syncbn.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: True 5 | RESNETS: 6 | DEPTH: 50 7 | NORM: "SyncBN" 8 | STRIDE_IN_1X1: True 9 | FPN: 10 | NORM: "SyncBN" 11 | ROI_BOX_HEAD: 12 | NAME: "FastRCNNConvFCHead" 13 | NUM_CONV: 4 14 | NUM_FC: 1 15 | NORM: "SyncBN" 16 | ROI_MASK_HEAD: 17 | NORM: "SyncBN" 18 | SOLVER: 19 | # 3x schedule 20 | STEPS: (210000, 250000) 21 | MAX_ITER: 270000 22 | TEST: 23 | PRECISE_BN: 24 | ENABLED: True 25 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/Misc/panoptic_fpn_R_101_dconv_cascade_gn_3x.yaml: -------------------------------------------------------------------------------- 1 | # A large PanopticFPN for demo purposes. 2 | # Use GN on backbone to support semantic seg. 3 | # Use Cascade + Deform Conv to improve localization. 4 | _BASE_: "../COCO-PanopticSegmentation/Base-Panoptic-FPN.yaml" 5 | MODEL: 6 | WEIGHTS: "catalog://ImageNetPretrained/FAIR/R-101-GN" 7 | RESNETS: 8 | DEPTH: 101 9 | NORM: "GN" 10 | DEFORM_ON_PER_STAGE: [False, True, True, True] 11 | STRIDE_IN_1X1: False 12 | FPN: 13 | NORM: "GN" 14 | ROI_HEADS: 15 | NAME: CascadeROIHeads 16 | ROI_BOX_HEAD: 17 | CLS_AGNOSTIC_BBOX_REG: True 18 | ROI_MASK_HEAD: 19 | NORM: "GN" 20 | RPN: 21 | POST_NMS_TOPK_TRAIN: 2000 22 | SOLVER: 23 | STEPS: (105000, 125000) 24 | MAX_ITER: 135000 25 | IMS_PER_BATCH: 32 26 | BASE_LR: 0.04 27 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/Misc/parsing_finetune_cihp.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "cascade_mask_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv.yaml" 2 | MODEL: 3 | MASK_ON: True 4 | WEIGHTS: "model_0039999_e76410.pkl" 5 | ROI_HEADS: 6 | NUM_CLASSES: 1 7 | SOLVER: 8 | IMS_PER_BATCH: 16 9 | STEPS: (140000, 180000) 10 | MAX_ITER: 200000 11 | BASE_LR: 0.02 12 | INPUT: 13 | MIN_SIZE_TRAIN: (640, 864) 14 | MIN_SIZE_TRAIN_SAMPLING: "range" 15 | MAX_SIZE_TRAIN: 1440 16 | CROP: 17 | ENABLED: True 18 | TEST: 19 | EVAL_PERIOD: 0 20 | DATASETS: 21 | TRAIN: ("CIHP_train") 22 | TEST: ("CIHP_val",) 23 | OUTPUT_DIR: "./finetune_output" 24 | 25 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/Misc/parsing_inference.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "cascade_mask_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv.yaml" 2 | MODEL: 3 | MASK_ON: True 4 | WEIGHTS: "./finetune_ouput/model_final.pth" 5 | ROI_HEADS: 6 | NMS_THRESH_TEST: 0.95 7 | SCORE_THRESH_TEST: 0.5 8 | NUM_CLASSES: 1 9 | SOLVER: 10 | IMS_PER_BATCH: 1 11 | STEPS: (30000, 45000) 12 | MAX_ITER: 50000 13 | BASE_LR: 0.02 14 | INPUT: 15 | MIN_SIZE_TRAIN: (640, 864) 16 | MIN_SIZE_TRAIN_SAMPLING: "range" 17 | MAX_SIZE_TRAIN: 1440 18 | CROP: 19 | ENABLED: True 20 | TEST: 21 | AUG: 22 | ENABLED: True 23 | DATASETS: 24 | TRAIN: ("CIHP_trainval",) 25 | TEST: ("CIHP_test",) 26 | OUTPUT_DIR: "./inference_output" 27 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/Misc/scratch_mask_rcnn_R_50_FPN_3x_gn.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "mask_rcnn_R_50_FPN_3x_gn.yaml" 2 | MODEL: 3 | # Train from random initialization. 4 | WEIGHTS: "" 5 | # It makes sense to divide by STD when training from scratch 6 | # But it seems to make no difference on the results and C2's models didn't do this. 7 | # So we keep things consistent with C2. 8 | # PIXEL_STD: [57.375, 57.12, 58.395] 9 | MASK_ON: True 10 | BACKBONE: 11 | FREEZE_AT: 0 12 | # NOTE: Please refer to Rethinking ImageNet Pre-training https://arxiv.org/abs/1811.08883 13 | # to learn what you need for training from scratch. 14 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/Misc/scratch_mask_rcnn_R_50_FPN_9x_gn.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "mask_rcnn_R_50_FPN_3x_gn.yaml" 2 | MODEL: 3 | PIXEL_STD: [57.375, 57.12, 58.395] 4 | WEIGHTS: "" 5 | MASK_ON: True 6 | RESNETS: 7 | STRIDE_IN_1X1: False 8 | BACKBONE: 9 | FREEZE_AT: 0 10 | SOLVER: 11 | # 9x schedule 12 | IMS_PER_BATCH: 64 # 4x the standard 13 | STEPS: (187500, 197500) # last 60/4==15k and last 20/4==5k 14 | MAX_ITER: 202500 # 90k * 9 / 4 15 | BASE_LR: 0.08 16 | TEST: 17 | EVAL_PERIOD: 2500 18 | # NOTE: Please refer to Rethinking ImageNet Pre-training https://arxiv.org/abs/1811.08883 19 | # to learn what you need for training from scratch. 20 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/Misc/scratch_mask_rcnn_R_50_FPN_9x_syncbn.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "mask_rcnn_R_50_FPN_3x_syncbn.yaml" 2 | MODEL: 3 | PIXEL_STD: [57.375, 57.12, 58.395] 4 | WEIGHTS: "" 5 | MASK_ON: True 6 | RESNETS: 7 | STRIDE_IN_1X1: False 8 | BACKBONE: 9 | FREEZE_AT: 0 10 | SOLVER: 11 | # 9x schedule 12 | IMS_PER_BATCH: 64 # 4x the standard 13 | STEPS: (187500, 197500) # last 60/4==15k and last 20/4==5k 14 | MAX_ITER: 202500 # 90k * 9 / 4 15 | BASE_LR: 0.08 16 | TEST: 17 | EVAL_PERIOD: 2500 18 | # NOTE: Please refer to Rethinking ImageNet Pre-training https://arxiv.org/abs/1811.08883 19 | # to learn what you need for training from scratch. 20 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/Misc/semantic_R_50_FPN_1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | META_ARCHITECTURE: "SemanticSegmentor" 4 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 5 | RESNETS: 6 | DEPTH: 50 7 | DATASETS: 8 | TRAIN: ("coco_2017_train_panoptic_stuffonly",) 9 | TEST: ("coco_2017_val_panoptic_stuffonly",) 10 | INPUT: 11 | MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) 12 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/PascalVOC-Detection/faster_rcnn_R_50_C4.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-C4.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: False 5 | RESNETS: 6 | DEPTH: 50 7 | ROI_HEADS: 8 | NUM_CLASSES: 20 9 | INPUT: 10 | MIN_SIZE_TRAIN: (480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800) 11 | MIN_SIZE_TEST: 800 12 | DATASETS: 13 | TRAIN: ('voc_2007_trainval', 'voc_2012_trainval') 14 | TEST: ('voc_2007_test',) 15 | SOLVER: 16 | STEPS: (12000, 16000) 17 | MAX_ITER: 18000 # 17.4 epochs 18 | WARMUP_ITERS: 100 19 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/PascalVOC-Detection/faster_rcnn_R_50_FPN.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: False 5 | RESNETS: 6 | DEPTH: 50 7 | ROI_HEADS: 8 | NUM_CLASSES: 20 9 | INPUT: 10 | MIN_SIZE_TRAIN: (480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800) 11 | MIN_SIZE_TEST: 800 12 | DATASETS: 13 | TRAIN: ('voc_2007_trainval', 'voc_2012_trainval') 14 | TEST: ('voc_2007_test',) 15 | SOLVER: 16 | STEPS: (12000, 16000) 17 | MAX_ITER: 18000 # 17.4 epochs 18 | WARMUP_ITERS: 100 19 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/my_Base-RCNN-FPN.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | META_ARCHITECTURE: "GeneralizedRCNN" 3 | BACKBONE: 4 | NAME: "build_resnet_fpn_backbone" 5 | RESNETS: 6 | OUT_FEATURES: ["res2", "res3", "res4", "res5"] 7 | FPN: 8 | IN_FEATURES: ["res2", "res3", "res4", "res5"] 9 | ANCHOR_GENERATOR: 10 | SIZES: [[32], [64], [128], [256], [512]] # One size for each in feature map 11 | ASPECT_RATIOS: [[0.5, 1.0, 2.0]] # Three aspect ratios (same for all in feature maps) 12 | RPN: 13 | IN_FEATURES: ["p2", "p3", "p4", "p5", "p6"] 14 | PRE_NMS_TOPK_TRAIN: 2000 # Per FPN level 15 | PRE_NMS_TOPK_TEST: 1000 # Per FPN level 16 | # Detectron1 uses 2000 proposals per-batch, 17 | # (See "modeling/rpn/rpn_outputs.py" for details of this legacy issue) 18 | # which is approximately 1000 proposals per-image since the default batch size for FPN is 2. 19 | POST_NMS_TOPK_TRAIN: 1000 20 | POST_NMS_TOPK_TEST: 1000 21 | ROI_HEADS: 22 | NAME: "StandardROIHeads" 23 | IN_FEATURES: ["p2", "p3", "p4", "p5"] 24 | ROI_BOX_HEAD: 25 | NAME: "FastRCNNConvFCHead" 26 | NUM_FC: 2 27 | POOLER_RESOLUTION: 7 28 | ROI_MASK_HEAD: 29 | NAME: "MaskRCNNConvUpsampleHead" 30 | NUM_CONV: 4 31 | POOLER_RESOLUTION: 14 32 | DATASETS: 33 | TRAIN: ("coco_2017_train",) 34 | TEST: ("coco_2017_val",) 35 | SOLVER: 36 | IMS_PER_BATCH: 2 37 | BASE_LR: 0.02 38 | STEPS: (60000, 80000) 39 | MAX_ITER: 90000 40 | INPUT: 41 | MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) 42 | VERSION: 2 43 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/quick_schedules/README.md: -------------------------------------------------------------------------------- 1 | These are quick configs for performance or accuracy regression tracking purposes. 2 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/quick_schedules/cascade_mask_rcnn_R_50_FPN_inference_acc_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Misc/cascade_mask_rcnn_R_50_FPN_3x.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://Misc/cascade_mask_rcnn_R_50_FPN_3x/144998488/model_final_480dd8.pkl" 4 | DATASETS: 5 | TEST: ("coco_2017_val_100",) 6 | TEST: 7 | EXPECTED_RESULTS: [["bbox", "AP", 50.18, 0.02], ["segm", "AP", 43.87, 0.02]] 8 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/quick_schedules/cascade_mask_rcnn_R_50_FPN_instant_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Misc/cascade_mask_rcnn_R_50_FPN_3x.yaml" 2 | DATASETS: 3 | TRAIN: ("coco_2017_val_100",) 4 | TEST: ("coco_2017_val_100",) 5 | SOLVER: 6 | BASE_LR: 0.005 7 | STEPS: (30,) 8 | MAX_ITER: 40 9 | IMS_PER_BATCH: 4 10 | DATALOADER: 11 | NUM_WORKERS: 2 12 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/quick_schedules/fast_rcnn_R_50_FPN_inference_acc_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../COCO-Detection/fast_rcnn_R_50_FPN_1x.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://COCO-Detection/fast_rcnn_R_50_FPN_1x/137635226/model_final_e5f7ce.pkl" 4 | DATASETS: 5 | TEST: ("coco_2017_val_100",) 6 | TEST: 7 | EXPECTED_RESULTS: [["bbox", "AP", 45.70, 0.02]] 8 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/quick_schedules/fast_rcnn_R_50_FPN_instant_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../COCO-Detection/fast_rcnn_R_50_FPN_1x.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | DATASETS: 5 | TRAIN: ("coco_2017_val_100",) 6 | PROPOSAL_FILES_TRAIN: ("detectron2://COCO-Detection/rpn_R_50_FPN_1x/137258492/coco_2017_val_box_proposals_ee0dad.pkl", ) 7 | TEST: ("coco_2017_val_100",) 8 | PROPOSAL_FILES_TEST: ("detectron2://COCO-Detection/rpn_R_50_FPN_1x/137258492/coco_2017_val_box_proposals_ee0dad.pkl", ) 9 | SOLVER: 10 | BASE_LR: 0.005 11 | STEPS: (30,) 12 | MAX_ITER: 40 13 | IMS_PER_BATCH: 4 14 | DATALOADER: 15 | NUM_WORKERS: 2 16 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/quick_schedules/keypoint_rcnn_R_50_FPN_inference_acc_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x/137849621/model_final_a6e10b.pkl" 4 | DATASETS: 5 | TEST: ("keypoints_coco_2017_val_100",) 6 | TEST: 7 | EXPECTED_RESULTS: [["bbox", "AP", 52.47, 0.02], ["keypoints", "AP", 67.36, 0.02]] 8 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/quick_schedules/keypoint_rcnn_R_50_FPN_instant_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | KEYPOINT_ON: True 5 | DATASETS: 6 | TRAIN: ("keypoints_coco_2017_val_100",) 7 | TEST: ("keypoints_coco_2017_val_100",) 8 | SOLVER: 9 | BASE_LR: 0.005 10 | STEPS: (30,) 11 | MAX_ITER: 40 12 | IMS_PER_BATCH: 4 13 | DATALOADER: 14 | NUM_WORKERS: 2 15 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/quick_schedules/keypoint_rcnn_R_50_FPN_normalized_training_acc_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | KEYPOINT_ON: True 5 | RESNETS: 6 | DEPTH: 50 7 | ROI_HEADS: 8 | BATCH_SIZE_PER_IMAGE: 256 9 | NUM_CLASSES: 1 10 | ROI_KEYPOINT_HEAD: 11 | POOLER_RESOLUTION: 14 12 | POOLER_SAMPLING_RATIO: 2 13 | NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS: False 14 | LOSS_WEIGHT: 4.0 15 | ROI_BOX_HEAD: 16 | SMOOTH_L1_BETA: 1.0 # Keypoint AP degrades when using plain L1 loss 17 | RPN: 18 | SMOOTH_L1_BETA: 0.2 # Keypoint AP degrades when using plain L1 loss 19 | DATASETS: 20 | TRAIN: ("keypoints_coco_2017_val",) 21 | TEST: ("keypoints_coco_2017_val",) 22 | INPUT: 23 | MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) 24 | SOLVER: 25 | WARMUP_FACTOR: 0.33333333 26 | WARMUP_ITERS: 100 27 | STEPS: (5500, 5800) 28 | MAX_ITER: 6000 29 | TEST: 30 | EXPECTED_RESULTS: [["bbox", "AP", 55.35, 1.0], ["keypoints", "AP", 76.91, 1.0]] 31 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/quick_schedules/keypoint_rcnn_R_50_FPN_training_acc_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | KEYPOINT_ON: True 5 | RESNETS: 6 | DEPTH: 50 7 | ROI_HEADS: 8 | BATCH_SIZE_PER_IMAGE: 256 9 | NUM_CLASSES: 1 10 | ROI_KEYPOINT_HEAD: 11 | POOLER_RESOLUTION: 14 12 | POOLER_SAMPLING_RATIO: 2 13 | ROI_BOX_HEAD: 14 | SMOOTH_L1_BETA: 1.0 # Keypoint AP degrades when using plain L1 loss 15 | RPN: 16 | SMOOTH_L1_BETA: 0.2 # Keypoint AP degrades when using plain L1 loss 17 | DATASETS: 18 | TRAIN: ("keypoints_coco_2017_val",) 19 | TEST: ("keypoints_coco_2017_val",) 20 | INPUT: 21 | MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) 22 | SOLVER: 23 | WARMUP_FACTOR: 0.33333333 24 | WARMUP_ITERS: 100 25 | STEPS: (5500, 5800) 26 | MAX_ITER: 6000 27 | TEST: 28 | EXPECTED_RESULTS: [["bbox", "AP", 53.5, 1.0], ["keypoints", "AP", 72.4, 1.0]] 29 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/quick_schedules/mask_rcnn_R_50_C4_GCV_instant_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-C4.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: True 5 | DATASETS: 6 | TRAIN: ("coco_2017_val_100",) 7 | TEST: ("coco_2017_val_100",) 8 | SOLVER: 9 | BASE_LR: 0.001 10 | STEPS: (30,) 11 | MAX_ITER: 40 12 | IMS_PER_BATCH: 4 13 | CLIP_GRADIENTS: 14 | ENABLED: True 15 | CLIP_TYPE: "value" 16 | CLIP_VALUE: 1.0 17 | DATALOADER: 18 | NUM_WORKERS: 2 19 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/quick_schedules/mask_rcnn_R_50_C4_inference_acc_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../COCO-InstanceSegmentation/mask_rcnn_R_50_C4_3x.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_C4_3x/137849525/model_final_4ce675.pkl" 4 | DATASETS: 5 | TEST: ("coco_2017_val_100",) 6 | TEST: 7 | EXPECTED_RESULTS: [["bbox", "AP", 47.37, 0.02], ["segm", "AP", 40.99, 0.02]] 8 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/quick_schedules/mask_rcnn_R_50_C4_instant_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-C4.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: True 5 | DATASETS: 6 | TRAIN: ("coco_2017_val_100",) 7 | TEST: ("coco_2017_val_100",) 8 | SOLVER: 9 | BASE_LR: 0.001 10 | STEPS: (30,) 11 | MAX_ITER: 40 12 | IMS_PER_BATCH: 4 13 | DATALOADER: 14 | NUM_WORKERS: 2 15 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/quick_schedules/mask_rcnn_R_50_C4_training_acc_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-C4.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | ROI_HEADS: 5 | BATCH_SIZE_PER_IMAGE: 256 6 | MASK_ON: True 7 | DATASETS: 8 | TRAIN: ("coco_2017_val",) 9 | TEST: ("coco_2017_val",) 10 | INPUT: 11 | MIN_SIZE_TRAIN: (600,) 12 | MAX_SIZE_TRAIN: 1000 13 | MIN_SIZE_TEST: 800 14 | MAX_SIZE_TEST: 1000 15 | SOLVER: 16 | IMS_PER_BATCH: 8 # base uses 16 17 | WARMUP_FACTOR: 0.33333 18 | WARMUP_ITERS: 100 19 | STEPS: (11000, 11600) 20 | MAX_ITER: 12000 21 | TEST: 22 | EXPECTED_RESULTS: [["bbox", "AP", 41.88, 0.7], ["segm", "AP", 33.79, 0.5]] 23 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/quick_schedules/mask_rcnn_R_50_DC5_inference_acc_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_3x.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_3x/137849551/model_final_84107b.pkl" 4 | DATASETS: 5 | TEST: ("coco_2017_val_100",) 6 | TEST: 7 | EXPECTED_RESULTS: [["bbox", "AP", 47.44, 0.02], ["segm", "AP", 42.94, 0.02]] 8 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/quick_schedules/mask_rcnn_R_50_FPN_inference_acc_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl" 4 | DATASETS: 5 | TEST: ("coco_2017_val_100",) 6 | TEST: 7 | EXPECTED_RESULTS: [["bbox", "AP", 47.34, 0.02], ["segm", "AP", 42.67, 0.02], ["bbox_TTA", "AP", 49.11, 0.02], ["segm_TTA", "AP", 45.04, 0.02]] 8 | AUG: 9 | ENABLED: True 10 | MIN_SIZES: (700, 800) # to save some time 11 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/quick_schedules/mask_rcnn_R_50_FPN_instant_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: True 5 | DATASETS: 6 | TRAIN: ("coco_2017_val_100",) 7 | TEST: ("coco_2017_val_100",) 8 | SOLVER: 9 | BASE_LR: 0.005 10 | STEPS: (30,) 11 | MAX_ITER: 40 12 | IMS_PER_BATCH: 4 13 | DATALOADER: 14 | NUM_WORKERS: 2 15 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/quick_schedules/mask_rcnn_R_50_FPN_training_acc_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | ROI_HEADS: 5 | BATCH_SIZE_PER_IMAGE: 256 6 | MASK_ON: True 7 | DATASETS: 8 | TRAIN: ("coco_2017_val",) 9 | TEST: ("coco_2017_val",) 10 | INPUT: 11 | MIN_SIZE_TRAIN: (600,) 12 | MAX_SIZE_TRAIN: 1000 13 | MIN_SIZE_TEST: 800 14 | MAX_SIZE_TEST: 1000 15 | SOLVER: 16 | WARMUP_FACTOR: 0.3333333 17 | WARMUP_ITERS: 100 18 | STEPS: (5500, 5800) 19 | MAX_ITER: 6000 20 | TEST: 21 | EXPECTED_RESULTS: [["bbox", "AP", 42.0, 1.6], ["segm", "AP", 35.4, 1.25]] 22 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/quick_schedules/panoptic_fpn_R_50_inference_acc_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../COCO-PanopticSegmentation/panoptic_fpn_R_50_3x.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://COCO-PanopticSegmentation/panoptic_fpn_R_50_3x/139514569/model_final_c10459.pkl" 4 | DATASETS: 5 | TEST: ("coco_2017_val_100_panoptic_separated",) 6 | TEST: 7 | EXPECTED_RESULTS: [["bbox", "AP", 46.47, 0.02], ["segm", "AP", 43.39, 0.02], ["sem_seg", "mIoU", 42.55, 0.02], ["panoptic_seg", "PQ", 38.99, 0.02]] 8 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/quick_schedules/panoptic_fpn_R_50_instant_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | META_ARCHITECTURE: "PanopticFPN" 4 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 5 | MASK_ON: True 6 | RESNETS: 7 | DEPTH: 50 8 | SEM_SEG_HEAD: 9 | LOSS_WEIGHT: 0.5 10 | DATASETS: 11 | TRAIN: ("coco_2017_val_100_panoptic_separated",) 12 | TEST: ("coco_2017_val_100_panoptic_separated",) 13 | SOLVER: 14 | BASE_LR: 0.005 15 | STEPS: (30,) 16 | MAX_ITER: 40 17 | IMS_PER_BATCH: 4 18 | DATALOADER: 19 | NUM_WORKERS: 1 20 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/quick_schedules/panoptic_fpn_R_50_training_acc_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | META_ARCHITECTURE: "PanopticFPN" 4 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 5 | MASK_ON: True 6 | RESNETS: 7 | DEPTH: 50 8 | SEM_SEG_HEAD: 9 | LOSS_WEIGHT: 0.5 10 | DATASETS: 11 | TRAIN: ("coco_2017_val_panoptic_separated",) 12 | TEST: ("coco_2017_val_panoptic_separated",) 13 | SOLVER: 14 | BASE_LR: 0.01 15 | WARMUP_FACTOR: 0.001 16 | WARMUP_ITERS: 500 17 | STEPS: (5500,) 18 | MAX_ITER: 7000 19 | TEST: 20 | EXPECTED_RESULTS: [["bbox", "AP", 46.70, 1.1], ["segm", "AP", 38.73, 0.7], ["sem_seg", "mIoU", 64.73, 1.2], ["panoptic_seg", "PQ", 48.13, 0.8]] 21 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/quick_schedules/retinanet_R_50_FPN_inference_acc_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../COCO-Detection/retinanet_R_50_FPN_3x.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://COCO-Detection/retinanet_R_50_FPN_3x/137849486/model_final_4cafe0.pkl" 4 | DATASETS: 5 | TEST: ("coco_2017_val_100",) 6 | TEST: 7 | EXPECTED_RESULTS: [["bbox", "AP", 44.36, 0.02]] 8 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/quick_schedules/retinanet_R_50_FPN_instant_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../COCO-Detection/retinanet_R_50_FPN_1x.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | DATASETS: 5 | TRAIN: ("coco_2017_val_100",) 6 | TEST: ("coco_2017_val_100",) 7 | SOLVER: 8 | BASE_LR: 0.005 9 | STEPS: (30,) 10 | MAX_ITER: 40 11 | IMS_PER_BATCH: 4 12 | DATALOADER: 13 | NUM_WORKERS: 2 14 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/quick_schedules/rpn_R_50_FPN_inference_acc_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../COCO-Detection/rpn_R_50_FPN_1x.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://COCO-Detection/rpn_R_50_FPN_1x/137258492/model_final_02ce48.pkl" 4 | DATASETS: 5 | TEST: ("coco_2017_val_100",) 6 | TEST: 7 | EXPECTED_RESULTS: [["box_proposals", "AR@1000", 58.16, 0.02]] 8 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/quick_schedules/rpn_R_50_FPN_instant_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../COCO-Detection/rpn_R_50_FPN_1x.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | DATASETS: 5 | TRAIN: ("coco_2017_val_100",) 6 | TEST: ("coco_2017_val_100",) 7 | SOLVER: 8 | STEPS: (30,) 9 | MAX_ITER: 40 10 | BASE_LR: 0.005 11 | IMS_PER_BATCH: 4 12 | DATALOADER: 13 | NUM_WORKERS: 2 14 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/quick_schedules/semantic_R_50_FPN_inference_acc_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | META_ARCHITECTURE: "SemanticSegmentor" 4 | WEIGHTS: "detectron2://semantic_R_50_FPN_1x/111802073/model_final_c18079783c55a94968edc28b7101c5f0.pkl" 5 | RESNETS: 6 | DEPTH: 50 7 | DATASETS: 8 | TEST: ("coco_2017_val_100_panoptic_stuffonly",) 9 | TEST: 10 | EXPECTED_RESULTS: [["sem_seg", "mIoU", 39.53, 0.02], ["sem_seg", "mACC", 51.50, 0.02]] 11 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/quick_schedules/semantic_R_50_FPN_instant_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | META_ARCHITECTURE: "SemanticSegmentor" 4 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 5 | RESNETS: 6 | DEPTH: 50 7 | DATASETS: 8 | TRAIN: ("coco_2017_val_100_panoptic_stuffonly",) 9 | TEST: ("coco_2017_val_100_panoptic_stuffonly",) 10 | INPUT: 11 | MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) 12 | SOLVER: 13 | BASE_LR: 0.005 14 | STEPS: (30,) 15 | MAX_ITER: 40 16 | IMS_PER_BATCH: 4 17 | DATALOADER: 18 | NUM_WORKERS: 2 19 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/configs/quick_schedules/semantic_R_50_FPN_training_acc_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | META_ARCHITECTURE: "SemanticSegmentor" 4 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 5 | RESNETS: 6 | DEPTH: 50 7 | DATASETS: 8 | TRAIN: ("coco_2017_val_panoptic_stuffonly",) 9 | TEST: ("coco_2017_val_panoptic_stuffonly",) 10 | SOLVER: 11 | BASE_LR: 0.01 12 | WARMUP_FACTOR: 0.001 13 | WARMUP_ITERS: 300 14 | STEPS: (5500,) 15 | MAX_ITER: 7000 16 | TEST: 17 | EXPECTED_RESULTS: [["sem_seg", "mIoU", 76.51, 1.0], ["sem_seg", "mACC", 83.25, 1.0]] 18 | INPUT: 19 | # no scale augmentation 20 | MIN_SIZE_TRAIN: (800, ) 21 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/demo/README.md: -------------------------------------------------------------------------------- 1 | 2 | ## Detectron2 Demo 3 | 4 | We provide a command line tool to run a simple demo of builtin models. 5 | The usage is explained in [GETTING_STARTED.md](../GETTING_STARTED.md). 6 | 7 | See our [blog post](https://ai.facebook.com/blog/-detectron2-a-pytorch-based-modular-object-detection-library-) 8 | for a high-quality demo generated with this tool. 9 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/detectron2/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. 2 | 3 | from .utils.env import setup_environment 4 | 5 | setup_environment() 6 | 7 | 8 | # This line will be programatically read/write by setup.py. 9 | # Leave them at the bottom of this file and don't touch them. 10 | __version__ = "0.1.3" 11 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/detectron2/checkpoint/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 3 | # File: 4 | 5 | 6 | from . import catalog as _UNUSED # register the handler 7 | from .detection_checkpoint import DetectionCheckpointer 8 | from fvcore.common.checkpoint import Checkpointer, PeriodicCheckpointer 9 | 10 | __all__ = ["Checkpointer", "PeriodicCheckpointer", "DetectionCheckpointer"] 11 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/detectron2/config/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | from .compat import downgrade_config, upgrade_config 3 | from .config import CfgNode, get_cfg, global_cfg, set_global_cfg, configurable 4 | 5 | __all__ = [ 6 | "CfgNode", 7 | "get_cfg", 8 | "global_cfg", 9 | "set_global_cfg", 10 | "downgrade_config", 11 | "upgrade_config", 12 | "configurable", 13 | ] 14 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/detectron2/data/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | from . import transforms # isort:skip 3 | 4 | from .build import ( 5 | build_detection_test_loader, 6 | build_detection_train_loader, 7 | get_detection_dataset_dicts, 8 | load_proposals_into_dataset, 9 | print_instances_class_histogram, 10 | ) 11 | from .catalog import DatasetCatalog, MetadataCatalog 12 | from .common import DatasetFromList, MapDataset 13 | from .dataset_mapper import DatasetMapper 14 | 15 | # ensure the builtin data are registered 16 | from . import datasets, samplers # isort:skip 17 | 18 | __all__ = [k for k in globals().keys() if not k.startswith("_")] 19 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/detectron2/data/datasets/README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ### Common Datasets 4 | 5 | The dataset implemented here do not need to load the data into the final format. 6 | It should provide the minimal data structure needed to use the dataset, so it can be very efficient. 7 | 8 | For example, for an image dataset, just provide the file names and labels, but don't read the images. 9 | Let the downstream decide how to read. 10 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/detectron2/data/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | from .cityscapes import load_cityscapes_instances 3 | from .coco import load_coco_json, load_sem_seg 4 | from .lvis import load_lvis_json, register_lvis_instances, get_lvis_instances_meta 5 | from .register_coco import register_coco_instances, register_coco_panoptic_separated 6 | from . import builtin # ensure the builtin data are registered 7 | 8 | 9 | __all__ = [k for k in globals().keys() if "builtin" not in k and not k.startswith("_")] 10 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/detectron2/data/samplers/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | from .distributed_sampler import InferenceSampler, RepeatFactorTrainingSampler, TrainingSampler 3 | from .grouped_batch_sampler import GroupedBatchSampler 4 | 5 | __all__ = [ 6 | "GroupedBatchSampler", 7 | "TrainingSampler", 8 | "InferenceSampler", 9 | "RepeatFactorTrainingSampler", 10 | ] 11 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/detectron2/data/transforms/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | from .transform import * 3 | from fvcore.transforms.transform import * 4 | from .transform_gen import * 5 | 6 | __all__ = [k for k in globals().keys() if not k.startswith("_")] 7 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/detectron2/engine/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. 2 | 3 | from .launch import * 4 | from .train_loop import * 5 | 6 | __all__ = [k for k in globals().keys() if not k.startswith("_")] 7 | 8 | 9 | # prefer to let hooks and defaults live in separate namespaces (therefore not in __all__) 10 | # but still make them available here 11 | from .hooks import * 12 | from .defaults import * 13 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/detectron2/evaluation/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | from .cityscapes_evaluation import CityscapesInstanceEvaluator, CityscapesSemSegEvaluator 3 | from .coco_evaluation import COCOEvaluator 4 | from .rotated_coco_evaluation import RotatedCOCOEvaluator 5 | from .evaluator import DatasetEvaluator, DatasetEvaluators, inference_context, inference_on_dataset 6 | from .lvis_evaluation import LVISEvaluator 7 | from .panoptic_evaluation import COCOPanopticEvaluator 8 | from .pascal_voc_evaluation import PascalVOCDetectionEvaluator 9 | from .sem_seg_evaluation import SemSegEvaluator 10 | from .testing import print_csv_format, verify_results 11 | 12 | __all__ = [k for k in globals().keys() if not k.startswith("_")] 13 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/detectron2/export/README.md: -------------------------------------------------------------------------------- 1 | 2 | This directory contains code to prepare a detectron2 model for deployment. 3 | Currently it supports exporting a detectron2 model to Caffe2 format through ONNX. 4 | 5 | Please see [documentation](https://detectron2.readthedocs.io/tutorials/deployment.html) for its usage. 6 | 7 | 8 | ### Acknowledgements 9 | 10 | Thanks to Mobile Vision team at Facebook for developing the conversion tools. 11 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/detectron2/export/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from .api import * 4 | 5 | __all__ = [k for k in globals().keys() if not k.startswith("_")] 6 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/detectron2/layers/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | from .batch_norm import FrozenBatchNorm2d, get_norm, NaiveSyncBatchNorm 3 | from .deform_conv import DeformConv, ModulatedDeformConv 4 | from .mask_ops import paste_masks_in_image 5 | from .nms import batched_nms, batched_nms_rotated, nms, nms_rotated 6 | from .roi_align import ROIAlign, roi_align 7 | from .roi_align_rotated import ROIAlignRotated, roi_align_rotated 8 | from .shape_spec import ShapeSpec 9 | from .wrappers import BatchNorm2d, Conv2d, ConvTranspose2d, cat, interpolate, Linear 10 | from .blocks import CNNBlockBase 11 | 12 | __all__ = [k for k in globals().keys() if not k.startswith("_")] 13 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/detectron2/layers/blocks.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 3 | 4 | from torch import nn 5 | 6 | from .batch_norm import FrozenBatchNorm2d 7 | 8 | 9 | class CNNBlockBase(nn.Module): 10 | """ 11 | A CNN block is assumed to have input channels, output channels and a stride. 12 | The input and output of `forward()` method must be NCHW tensors. 13 | The method can perform arbitrary computation but must match the given 14 | channels and stride specification. 15 | 16 | Attribute: 17 | in_channels (int): 18 | out_channels (int): 19 | stride (int): 20 | """ 21 | 22 | def __init__(self, in_channels, out_channels, stride): 23 | """ 24 | The `__init__` method of any subclass should also contain these arguments. 25 | 26 | Args: 27 | in_channels (int): 28 | out_channels (int): 29 | stride (int): 30 | """ 31 | super().__init__() 32 | self.in_channels = in_channels 33 | self.out_channels = out_channels 34 | self.stride = stride 35 | 36 | def freeze(self): 37 | """ 38 | Make this block not trainable. 39 | This method sets all parameters to `requires_grad=False`, 40 | and convert all BatchNorm layers to FrozenBatchNorm 41 | 42 | Returns: 43 | the block itself 44 | """ 45 | for p in self.parameters(): 46 | p.requires_grad = False 47 | FrozenBatchNorm2d.convert_frozen_batchnorm(self) 48 | return self 49 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/detectron2/layers/csrc/README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | To add a new Op: 4 | 5 | 1. Create a new directory 6 | 2. Implement new ops there 7 | 3. Delcare its Python interface in `vision.cpp`. 8 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | #pragma once 3 | #include 4 | 5 | namespace detectron2 { 6 | 7 | at::Tensor box_iou_rotated_cpu( 8 | const at::Tensor& boxes1, 9 | const at::Tensor& boxes2); 10 | 11 | #ifdef WITH_CUDA 12 | at::Tensor box_iou_rotated_cuda( 13 | const at::Tensor& boxes1, 14 | const at::Tensor& boxes2); 15 | #endif 16 | 17 | // Interface for Python 18 | // inline is needed to prevent multiple function definitions when this header is 19 | // included by different cpps 20 | inline at::Tensor box_iou_rotated( 21 | const at::Tensor& boxes1, 22 | const at::Tensor& boxes2) { 23 | assert(boxes1.device().is_cuda() == boxes2.device().is_cuda()); 24 | if (boxes1.device().is_cuda()) { 25 | #ifdef WITH_CUDA 26 | return box_iou_rotated_cuda(boxes1.contiguous(), boxes2.contiguous()); 27 | #else 28 | AT_ERROR("Not compiled with GPU support"); 29 | #endif 30 | } 31 | 32 | return box_iou_rotated_cpu(boxes1.contiguous(), boxes2.contiguous()); 33 | } 34 | 35 | } // namespace detectron2 36 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cpu.cpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | #include "box_iou_rotated.h" 3 | #include "box_iou_rotated_utils.h" 4 | 5 | namespace detectron2 { 6 | 7 | template 8 | void box_iou_rotated_cpu_kernel( 9 | const at::Tensor& boxes1, 10 | const at::Tensor& boxes2, 11 | at::Tensor& ious) { 12 | auto num_boxes1 = boxes1.size(0); 13 | auto num_boxes2 = boxes2.size(0); 14 | 15 | for (int i = 0; i < num_boxes1; i++) { 16 | for (int j = 0; j < num_boxes2; j++) { 17 | ious[i * num_boxes2 + j] = single_box_iou_rotated( 18 | boxes1[i].data_ptr(), boxes2[j].data_ptr()); 19 | } 20 | } 21 | } 22 | 23 | at::Tensor box_iou_rotated_cpu( 24 | // input must be contiguous: 25 | const at::Tensor& boxes1, 26 | const at::Tensor& boxes2) { 27 | auto num_boxes1 = boxes1.size(0); 28 | auto num_boxes2 = boxes2.size(0); 29 | at::Tensor ious = 30 | at::empty({num_boxes1 * num_boxes2}, boxes1.options().dtype(at::kFloat)); 31 | 32 | box_iou_rotated_cpu_kernel(boxes1, boxes2, ious); 33 | 34 | // reshape from 1d array to 2d array 35 | auto shape = std::vector{num_boxes1, num_boxes2}; 36 | return ious.reshape(shape); 37 | } 38 | 39 | } // namespace detectron2 40 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/detectron2/layers/csrc/cuda_version.cu: -------------------------------------------------------------------------------- 1 | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. 2 | 3 | #include 4 | 5 | namespace detectron2 { 6 | int get_cudart_version() { 7 | return CUDART_VERSION; 8 | } 9 | } // namespace detectron2 10 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/detectron2/layers/csrc/nms_rotated/nms_rotated.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | #pragma once 3 | #include 4 | 5 | namespace detectron2 { 6 | 7 | at::Tensor nms_rotated_cpu( 8 | const at::Tensor& dets, 9 | const at::Tensor& scores, 10 | const float iou_threshold); 11 | 12 | #ifdef WITH_CUDA 13 | at::Tensor nms_rotated_cuda( 14 | const at::Tensor& dets, 15 | const at::Tensor& scores, 16 | const float iou_threshold); 17 | #endif 18 | 19 | // Interface for Python 20 | // inline is needed to prevent multiple function definitions when this header is 21 | // included by different cpps 22 | inline at::Tensor nms_rotated( 23 | const at::Tensor& dets, 24 | const at::Tensor& scores, 25 | const float iou_threshold) { 26 | assert(dets.device().is_cuda() == scores.device().is_cuda()); 27 | if (dets.device().is_cuda()) { 28 | #ifdef WITH_CUDA 29 | return nms_rotated_cuda( 30 | dets.contiguous(), scores.contiguous(), iou_threshold); 31 | #else 32 | AT_ERROR("Not compiled with GPU support"); 33 | #endif 34 | } 35 | 36 | return nms_rotated_cpu(dets.contiguous(), scores.contiguous(), iou_threshold); 37 | } 38 | 39 | } // namespace detectron2 40 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/detectron2/layers/rotated_boxes.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | from __future__ import absolute_import, division, print_function, unicode_literals 3 | 4 | from detectron2 import _C 5 | 6 | 7 | def pairwise_iou_rotated(boxes1, boxes2): 8 | """ 9 | Return intersection-over-union (Jaccard index) of boxes. 10 | 11 | Both sets of boxes are expected to be in 12 | (x_center, y_center, width, height, angle) format. 13 | 14 | Arguments: 15 | boxes1 (Tensor[N, 5]) 16 | boxes2 (Tensor[M, 5]) 17 | 18 | Returns: 19 | iou (Tensor[N, M]): the NxM matrix containing the pairwise 20 | IoU values for every element in boxes1 and boxes2 21 | """ 22 | return _C.box_iou_rotated(boxes1, boxes2) 23 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/detectron2/layers/shape_spec.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 3 | from collections import namedtuple 4 | 5 | 6 | class ShapeSpec(namedtuple("_ShapeSpec", ["channels", "height", "width", "stride"])): 7 | """ 8 | A simple structure that contains basic shape specification about a tensor. 9 | It is often used as the auxiliary inputs/outputs of models, 10 | to obtain the shape inference ability among pytorch modules. 11 | 12 | Attributes: 13 | channels: 14 | height: 15 | width: 16 | stride: 17 | """ 18 | 19 | def __new__(cls, *, channels=None, height=None, width=None, stride=None): 20 | return super().__new__(cls, channels, height, width, stride) 21 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/detectron2/model_zoo/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | """ 3 | Model Zoo API for Detectron2: a collection of functions to create common model architectures and 4 | optionally load pre-trained weights as released in 5 | `MODEL_ZOO.md `_. 6 | """ 7 | from .model_zoo import get, get_config_file, get_checkpoint_url 8 | 9 | __all__ = ["get_checkpoint_url", "get", "get_config_file"] 10 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/detectron2/modeling/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | import torch 3 | 4 | from detectron2.layers import ShapeSpec 5 | 6 | from .anchor_generator import build_anchor_generator, ANCHOR_GENERATOR_REGISTRY 7 | from .backbone import ( 8 | BACKBONE_REGISTRY, 9 | FPN, 10 | Backbone, 11 | ResNet, 12 | ResNetBlockBase, 13 | build_backbone, 14 | build_resnet_backbone, 15 | make_stage, 16 | ) 17 | from .meta_arch import ( 18 | META_ARCH_REGISTRY, 19 | SEM_SEG_HEADS_REGISTRY, 20 | GeneralizedRCNN, 21 | PanopticFPN, 22 | ProposalNetwork, 23 | RetinaNet, 24 | SemanticSegmentor, 25 | build_model, 26 | build_sem_seg_head, 27 | ) 28 | from .postprocessing import detector_postprocess 29 | from .proposal_generator import ( 30 | PROPOSAL_GENERATOR_REGISTRY, 31 | build_proposal_generator, 32 | RPN_HEAD_REGISTRY, 33 | build_rpn_head, 34 | ) 35 | from .roi_heads import ( 36 | ROI_BOX_HEAD_REGISTRY, 37 | ROI_HEADS_REGISTRY, 38 | ROI_KEYPOINT_HEAD_REGISTRY, 39 | ROI_MASK_HEAD_REGISTRY, 40 | ROIHeads, 41 | StandardROIHeads, 42 | BaseMaskRCNNHead, 43 | BaseKeypointRCNNHead, 44 | build_box_head, 45 | build_keypoint_head, 46 | build_mask_head, 47 | build_roi_heads, 48 | ) 49 | from .test_time_augmentation import DatasetMapperTTA, GeneralizedRCNNWithTTA 50 | 51 | _EXCLUDE = {"torch", "ShapeSpec"} 52 | __all__ = [k for k in globals().keys() if k not in _EXCLUDE and not k.startswith("_")] 53 | 54 | assert ( 55 | torch.Tensor([1]) == torch.Tensor([2]) 56 | ).dtype == torch.bool, "Your Pytorch is too old. Please update to contain https://github.com/pytorch/pytorch/pull/21113" 57 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/detectron2/modeling/backbone/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | from .build import build_backbone, BACKBONE_REGISTRY # noqa F401 isort:skip 3 | 4 | from .backbone import Backbone 5 | from .fpn import FPN 6 | from .resnet import ResNet, ResNetBlockBase, build_resnet_backbone, make_stage 7 | 8 | __all__ = [k for k in globals().keys() if not k.startswith("_")] 9 | # TODO can expose more resnet blocks after careful consideration 10 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/detectron2/modeling/backbone/build.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | from detectron2.layers import ShapeSpec 3 | from detectron2.utils.registry import Registry 4 | 5 | from .backbone import Backbone 6 | 7 | BACKBONE_REGISTRY = Registry("BACKBONE") 8 | BACKBONE_REGISTRY.__doc__ = """ 9 | Registry for backbones, which extract feature maps from images 10 | 11 | The registered object must be a callable that accepts two arguments: 12 | 13 | 1. A :class:`detectron2.config.CfgNode` 14 | 2. A :class:`detectron2.layers.ShapeSpec`, which contains the input shape specification. 15 | 16 | It must returns an instance of :class:`Backbone`. 17 | """ 18 | 19 | 20 | def build_backbone(cfg, input_shape=None): 21 | """ 22 | Build a backbone from `cfg.MODEL.BACKBONE.NAME`. 23 | 24 | Returns: 25 | an instance of :class:`Backbone` 26 | """ 27 | if input_shape is None: 28 | input_shape = ShapeSpec(channels=len(cfg.MODEL.PIXEL_MEAN)) 29 | 30 | backbone_name = cfg.MODEL.BACKBONE.NAME 31 | backbone = BACKBONE_REGISTRY.get(backbone_name)(cfg, input_shape) 32 | assert isinstance(backbone, Backbone) 33 | return backbone 34 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/detectron2/modeling/meta_arch/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 3 | 4 | from .build import META_ARCH_REGISTRY, build_model # isort:skip 5 | 6 | from .panoptic_fpn import PanopticFPN 7 | 8 | # import all the meta_arch, so they will be registered 9 | from .rcnn import GeneralizedRCNN, ProposalNetwork 10 | from .retinanet import RetinaNet 11 | from .semantic_seg import SEM_SEG_HEADS_REGISTRY, SemanticSegmentor, build_sem_seg_head 12 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/detectron2/modeling/meta_arch/build.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | import torch 3 | 4 | from detectron2.utils.registry import Registry 5 | 6 | META_ARCH_REGISTRY = Registry("META_ARCH") # noqa F401 isort:skip 7 | META_ARCH_REGISTRY.__doc__ = """ 8 | Registry for meta-architectures, i.e. the whole model. 9 | 10 | The registered object will be called with `obj(cfg)` 11 | and expected to return a `nn.Module` object. 12 | """ 13 | 14 | 15 | def build_model(cfg): 16 | """ 17 | Build the whole model architecture, defined by ``cfg.MODEL.META_ARCHITECTURE``. 18 | Note that it does not load any weights from ``cfg``. 19 | """ 20 | meta_arch = cfg.MODEL.META_ARCHITECTURE 21 | model = META_ARCH_REGISTRY.get(meta_arch)(cfg) 22 | model.to(torch.device(cfg.MODEL.DEVICE)) 23 | return model 24 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/detectron2/modeling/proposal_generator/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | from .build import PROPOSAL_GENERATOR_REGISTRY, build_proposal_generator 3 | from .rpn import RPN_HEAD_REGISTRY, build_rpn_head, RPN 4 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/detectron2/modeling/proposal_generator/build.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | from detectron2.utils.registry import Registry 3 | 4 | PROPOSAL_GENERATOR_REGISTRY = Registry("PROPOSAL_GENERATOR") 5 | PROPOSAL_GENERATOR_REGISTRY.__doc__ = """ 6 | Registry for proposal generator, which produces object proposals from feature maps. 7 | 8 | The registered object will be called with `obj(cfg, input_shape)`. 9 | The call should return a `nn.Module` object. 10 | """ 11 | 12 | from . import rpn, rrpn # noqa F401 isort:skip 13 | 14 | 15 | def build_proposal_generator(cfg, input_shape): 16 | """ 17 | Build a proposal generator from `cfg.MODEL.PROPOSAL_GENERATOR.NAME`. 18 | The name can be "PrecomputedProposals" to use no proposal generator. 19 | """ 20 | name = cfg.MODEL.PROPOSAL_GENERATOR.NAME 21 | if name == "PrecomputedProposals": 22 | return None 23 | 24 | return PROPOSAL_GENERATOR_REGISTRY.get(name)(cfg, input_shape) 25 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/detectron2/modeling/roi_heads/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | from .box_head import ROI_BOX_HEAD_REGISTRY, build_box_head 3 | from .keypoint_head import ROI_KEYPOINT_HEAD_REGISTRY, build_keypoint_head, BaseKeypointRCNNHead 4 | from .mask_head import ROI_MASK_HEAD_REGISTRY, build_mask_head, BaseMaskRCNNHead 5 | from .roi_heads import ( 6 | ROI_HEADS_REGISTRY, 7 | ROIHeads, 8 | Res5ROIHeads, 9 | StandardROIHeads, 10 | build_roi_heads, 11 | select_foreground_proposals, 12 | ) 13 | from .rotated_fast_rcnn import RROIHeads 14 | from .fast_rcnn import FastRCNNOutputLayers 15 | 16 | from . import cascade_rcnn # isort:skip 17 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/detectron2/solver/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | from .build import build_lr_scheduler, build_optimizer 3 | from .lr_scheduler import WarmupCosineLR, WarmupMultiStepLR 4 | 5 | __all__ = [k for k in globals().keys() if not k.startswith("_")] 6 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/detectron2/structures/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | from .boxes import Boxes, BoxMode, pairwise_iou 3 | from .image_list import ImageList 4 | 5 | from .instances import Instances 6 | from .keypoints import Keypoints, heatmaps_to_keypoints 7 | from .masks import BitMasks, PolygonMasks, rasterize_polygons_within_box, polygons_to_bitmask 8 | from .rotated_boxes import RotatedBoxes 9 | from .rotated_boxes import pairwise_iou as pairwise_iou_rotated 10 | 11 | __all__ = [k for k in globals().keys() if not k.startswith("_")] 12 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/detectron2/utils/README.md: -------------------------------------------------------------------------------- 1 | # Utility functions 2 | 3 | This folder contain utility functions that are not used in the 4 | core library, but are useful for building models or training 5 | code using the config system. 6 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/detectron2/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/detectron2/utils/registry.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | 3 | # Keep this module for backward compatibility. 4 | from fvcore.common.registry import Registry # noqa 5 | 6 | __all__ = ["Registry"] 7 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/detectron2/utils/serialize.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | import cloudpickle 3 | 4 | 5 | class PicklableWrapper(object): 6 | """ 7 | Wrap an object to make it more picklable, note that it uses 8 | heavy weight serialization libraries that are slower than pickle. 9 | It's best to use it only on closures (which are usually not picklable). 10 | 11 | This is a simplified version of 12 | https://github.com/joblib/joblib/blob/master/joblib/externals/loky/cloudpickle_wrapper.py 13 | """ 14 | 15 | def __init__(self, obj): 16 | self._obj = obj 17 | 18 | def __reduce__(self): 19 | s = cloudpickle.dumps(self._obj) 20 | return cloudpickle.loads, (s,) 21 | 22 | def __call__(self, *args, **kwargs): 23 | return self._obj(*args, **kwargs) 24 | 25 | def __getattr__(self, attr): 26 | # Ensure that the wrapped object can be used seamlessly as the previous object. 27 | if attr not in ["_obj"]: 28 | return getattr(self._obj, attr) 29 | return getattr(self, attr) 30 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/dev/README.md: -------------------------------------------------------------------------------- 1 | 2 | ## Some scripts for developers to use, include: 3 | 4 | - `linter.sh`: lint the codebase before commit 5 | - `run_{inference,instant}_tests.sh`: run inference/training for a few iterations. 6 | Note that these tests require 2 GPUs. 7 | - `parse_results.sh`: parse results from a log file. 8 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/dev/linter.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 3 | 4 | # Run this script at project root by "./dev/linter.sh" before you commit 5 | 6 | vergte() { 7 | [ "$2" = "$(echo -e "$1\\n$2" | sort -V | head -n1)" ] 8 | } 9 | 10 | { 11 | black --version | grep -E "(19.3b0.*6733274)|(19.3b0\\+8)" > /dev/null 12 | } || { 13 | echo "Linter requires 'black @ git+https://github.com/psf/black@673327449f86fce558adde153bb6cbe54bfebad2' !" 14 | exit 1 15 | } 16 | 17 | ISORT_TARGET_VERSION="4.3.21" 18 | ISORT_VERSION=$(isort -v | grep VERSION | awk '{print $2}') 19 | vergte "$ISORT_VERSION" "$ISORT_TARGET_VERSION" || { 20 | echo "Linter requires isort>=${ISORT_TARGET_VERSION} !" 21 | exit 1 22 | } 23 | 24 | set -v 25 | 26 | echo "Running isort ..." 27 | isort -y -sp . --atomic 28 | 29 | echo "Running black ..." 30 | black -l 100 . 31 | 32 | echo "Running flake8 ..." 33 | if [ -x "$(command -v flake8-3)" ]; then 34 | flake8-3 . 35 | else 36 | python3 -m flake8 . 37 | fi 38 | 39 | # echo "Running mypy ..." 40 | # Pytorch does not have enough type annotations 41 | # mypy detectron2/solver detectron2/structures detectron2/config 42 | 43 | echo "Running clang-format ..." 44 | find . -regex ".*\.\(cpp\|c\|cc\|cu\|cxx\|h\|hh\|hpp\|hxx\|tcc\|mm\|m\)" -print0 | xargs -0 clang-format -i 45 | 46 | command -v arc > /dev/null && arc lint 47 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/dev/packaging/README.md: -------------------------------------------------------------------------------- 1 | 2 | ## To build a cu101 wheel for release: 3 | 4 | ``` 5 | $ nvidia-docker run -it --storage-opt "size=20GB" --name pt pytorch/manylinux-cuda101 6 | # inside the container: 7 | # git clone https://github.com/facebookresearch/detectron2/ 8 | # cd detectron2 9 | # export CU_VERSION=cu101 D2_VERSION_SUFFIX= PYTHON_VERSION=3.7 PYTORCH_VERSION=1.4 10 | # ./dev/packaging/build_wheel.sh 11 | ``` 12 | 13 | ## To build all wheels for `CUDA {9.2,10.0,10.1}` x `Python {3.6,3.7,3.8}`: 14 | ``` 15 | ./dev/packaging/build_all_wheels.sh 16 | ./dev/packaging/gen_wheel_index.sh /path/to/wheels 17 | ``` 18 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/dev/packaging/build_all_wheels.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 3 | 4 | PYTORCH_VERSION=1.5 5 | 6 | build_for_one_cuda() { 7 | cu=$1 8 | 9 | case "$cu" in 10 | cu*) 11 | container_name=manylinux-cuda${cu/cu/} 12 | ;; 13 | cpu) 14 | container_name=manylinux-cuda101 15 | ;; 16 | *) 17 | echo "Unrecognized cu=$cu" 18 | exit 1 19 | ;; 20 | esac 21 | 22 | echo "Launching container $container_name ..." 23 | 24 | for py in 3.6 3.7 3.8; do 25 | docker run -itd \ 26 | --name $container_name \ 27 | --mount type=bind,source="$(pwd)",target=/detectron2 \ 28 | pytorch/$container_name 29 | 30 | cat </dev/null 2>&1 && pwd )" 8 | . "$script_dir/pkg_helpers.bash" 9 | 10 | echo "Build Settings:" 11 | echo "CU_VERSION: $CU_VERSION" # e.g. cu101 12 | echo "D2_VERSION_SUFFIX: $D2_VERSION_SUFFIX" # e.g. +cu101 or "" 13 | echo "PYTHON_VERSION: $PYTHON_VERSION" # e.g. 3.6 14 | echo "PYTORCH_VERSION: $PYTORCH_VERSION" # e.g. 1.4 15 | 16 | setup_cuda 17 | setup_wheel_python 18 | yum install ninja-build -y && ln -sv /usr/bin/ninja-build /usr/bin/ninja 19 | 20 | export TORCH_VERSION_SUFFIX="+$CU_VERSION" 21 | if [[ "$CU_VERSION" == "cu102" ]]; then 22 | export TORCH_VERSION_SUFFIX="" 23 | fi 24 | pip_install pip numpy -U 25 | pip_install "torch==$PYTORCH_VERSION$TORCH_VERSION_SUFFIX" \ 26 | -f https://download.pytorch.org/whl/$CU_VERSION/torch_stable.html 27 | 28 | # use separate directories to allow parallel build 29 | BASE_BUILD_DIR=build/$CU_VERSION/$PYTHON_VERSION 30 | python setup.py \ 31 | build -b $BASE_BUILD_DIR \ 32 | bdist_wheel -b $BASE_BUILD_DIR/build_dist -d wheels/$CU_VERSION 33 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/dev/packaging/gen_wheel_index.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 3 | 4 | 5 | root=$1 6 | if [[ -z "$root" ]]; then 7 | echo "Usage: ./gen_wheel_index.sh /path/to/wheels" 8 | exit 9 | fi 10 | 11 | index=$root/index.html 12 | 13 | cd "$root" 14 | for cu in cpu cu92 cu100 cu101 cu102; do 15 | cd $cu 16 | echo "Creating $PWD/index.html ..." 17 | for whl in *.whl; do 18 | echo "$whl
" 19 | done > index.html 20 | cd "$root" 21 | done 22 | 23 | echo "Creating $index ..." 24 | for whl in $(find . -type f -name '*.whl' -printf '%P\n' | sort); do 25 | echo "$whl
" 26 | done > "$index" 27 | 28 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/dev/run_inference_tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 3 | 4 | BIN="python tools/train_net.py" 5 | OUTPUT="inference_test_output" 6 | NUM_GPUS=2 7 | 8 | CFG_LIST=( "${@:1}" ) 9 | 10 | if [ ${#CFG_LIST[@]} -eq 0 ]; then 11 | CFG_LIST=( ./configs/quick_schedules/*inference_acc_test.yaml ) 12 | fi 13 | 14 | echo "========================================================================" 15 | echo "Configs to run:" 16 | echo "${CFG_LIST[@]}" 17 | echo "========================================================================" 18 | 19 | 20 | for cfg in "${CFG_LIST[@]}"; do 21 | echo "========================================================================" 22 | echo "Running $cfg ..." 23 | echo "========================================================================" 24 | $BIN \ 25 | --eval-only \ 26 | --num-gpus $NUM_GPUS \ 27 | --config-file "$cfg" \ 28 | OUTPUT_DIR $OUTPUT 29 | rm -rf $OUTPUT 30 | done 31 | 32 | 33 | echo "========================================================================" 34 | echo "Running demo.py ..." 35 | echo "========================================================================" 36 | DEMO_BIN="python demo/demo.py" 37 | COCO_DIR=datasets/coco/val2014 38 | mkdir -pv $OUTPUT 39 | 40 | set -v 41 | 42 | $DEMO_BIN --config-file ./configs/quick_schedules/panoptic_fpn_R_50_inference_acc_test.yaml \ 43 | --input $COCO_DIR/COCO_val2014_0000001933* --output $OUTPUT 44 | rm -rf $OUTPUT 45 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/dev/run_instant_tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 3 | 4 | BIN="python tools/train_net.py" 5 | OUTPUT="instant_test_output" 6 | NUM_GPUS=2 7 | 8 | CFG_LIST=( "${@:1}" ) 9 | if [ ${#CFG_LIST[@]} -eq 0 ]; then 10 | CFG_LIST=( ./configs/quick_schedules/*instant_test.yaml ) 11 | fi 12 | 13 | echo "========================================================================" 14 | echo "Configs to run:" 15 | echo "${CFG_LIST[@]}" 16 | echo "========================================================================" 17 | 18 | for cfg in "${CFG_LIST[@]}"; do 19 | echo "========================================================================" 20 | echo "Running $cfg ..." 21 | echo "========================================================================" 22 | $BIN --num-gpus $NUM_GPUS --config-file "$cfg" \ 23 | SOLVER.IMS_PER_BATCH $(($NUM_GPUS * 2)) \ 24 | OUTPUT_DIR "$OUTPUT" 25 | rm -rf "$OUTPUT" 26 | done 27 | 28 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/docker/Dockerfile-circleci: -------------------------------------------------------------------------------- 1 | FROM nvidia/cuda:10.1-cudnn7-devel 2 | # This dockerfile only aims to provide an environment for unittest on CircleCI 3 | 4 | ENV DEBIAN_FRONTEND noninteractive 5 | RUN apt-get update && apt-get install -y \ 6 | python3-opencv ca-certificates python3-dev git wget sudo ninja-build && \ 7 | rm -rf /var/lib/apt/lists/* 8 | 9 | RUN wget -q https://bootstrap.pypa.io/get-pip.py && \ 10 | python3 get-pip.py && \ 11 | rm get-pip.py 12 | 13 | # install dependencies 14 | # See https://pytorch.org/ for other options if you use a different version of CUDA 15 | RUN pip install tensorboard cython 16 | RUN pip install torch==1.5+cu101 torchvision==0.6+cu101 -f https://download.pytorch.org/whl/torch_stable.html 17 | RUN pip install 'git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI' 18 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/docker/README.md: -------------------------------------------------------------------------------- 1 | 2 | ## Use the container (with docker ≥ 19.03) 3 | 4 | ``` 5 | cd docker/ 6 | # Build: 7 | docker build --build-arg USER_ID=$UID -t detectron2:v0 . 8 | # Run: 9 | docker run --gpus all -it \ 10 | --shm-size=8gb --env="DISPLAY" --volume="/tmp/.X11-unix:/tmp/.X11-unix:rw" \ 11 | --name=detectron2 detectron2:v0 12 | 13 | # Grant docker access to host X server to show images 14 | xhost +local:`docker inspect --format='{{ .Config.Hostname }}' detectron2` 15 | ``` 16 | 17 | ## Use the container (with docker < 19.03) 18 | 19 | Install docker-compose and nvidia-docker2, then run: 20 | ``` 21 | cd docker && USER_ID=$UID docker-compose run detectron2 22 | ``` 23 | 24 | #### Using a persistent cache directory 25 | 26 | You can prevent models from being re-downloaded on every run, 27 | by storing them in a cache directory. 28 | 29 | To do this, add `--volume=$HOME/.torch/fvcore_cache:/tmp:rw` in the run command. 30 | 31 | ## Install new dependencies 32 | Add the following to `Dockerfile` to make persistent changes. 33 | ``` 34 | RUN sudo apt-get update && sudo apt-get install -y vim 35 | ``` 36 | Or run them in the container to make temporary changes. 37 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/docker/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "2.3" 2 | services: 3 | detectron2: 4 | build: 5 | context: . 6 | dockerfile: Dockerfile 7 | args: 8 | USER_ID: ${USER_ID:-1000} 9 | runtime: nvidia # TODO: Exchange with "gpu: all" in the future (see https://github.com/facebookresearch/detectron2/pull/197/commits/00545e1f376918db4a8ce264d427a07c1e896c5a). 10 | shm_size: "8gb" 11 | ulimits: 12 | memlock: -1 13 | stack: 67108864 14 | volumes: 15 | - /tmp/.X11-unix:/tmp/.X11-unix:ro 16 | environment: 17 | - DISPLAY=$DISPLAY 18 | - NVIDIA_VISIBLE_DEVICES=all 19 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/docs/.gitignore: -------------------------------------------------------------------------------- 1 | _build 2 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | SOURCEDIR = . 8 | BUILDDIR = _build 9 | 10 | # Put it first so that "make" without argument is like "make help". 11 | help: 12 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 13 | 14 | .PHONY: help Makefile 15 | 16 | # Catch-all target: route all unknown targets to Sphinx using the new 17 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 18 | %: Makefile 19 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 20 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/docs/README.md: -------------------------------------------------------------------------------- 1 | # Read the docs: 2 | 3 | The latest documentation built from this directory is available at [detectron2.readthedocs.io](https://detectron2.readthedocs.io/). 4 | Documents in this directory are not meant to be read on github. 5 | 6 | # Build the docs: 7 | 8 | 1. Install detectron2 according to [INSTALL.md](INSTALL.md). 9 | 2. Install additional libraries required to build docs: 10 | - docutils==0.16 11 | - Sphinx==3.0.0 12 | - recommonmark==0.6.0 13 | - sphinx_rtd_theme 14 | - mock 15 | 16 | 3. Run `make html` from this directory. 17 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/docs/index.rst: -------------------------------------------------------------------------------- 1 | .. detectron2 documentation master file, created by 2 | sphinx-quickstart on Sat Sep 21 13:46:45 2019. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to detectron2's documentation! 7 | ====================================== 8 | 9 | .. toctree:: 10 | :maxdepth: 2 11 | 12 | tutorials/index 13 | notes/index 14 | modules/index 15 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/docs/modules/checkpoint.rst: -------------------------------------------------------------------------------- 1 | detectron2.checkpoint package 2 | ============================= 3 | 4 | .. automodule:: detectron2.checkpoint 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/docs/modules/config.rst: -------------------------------------------------------------------------------- 1 | detectron2.config package 2 | ========================= 3 | 4 | .. automodule:: detectron2.config 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | :inherited-members: 9 | 10 | 11 | Config References 12 | ----------------- 13 | 14 | .. literalinclude:: ../../detectron2/config/defaults.py 15 | :language: python 16 | :linenos: 17 | :lines: 4- 18 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/docs/modules/data.rst: -------------------------------------------------------------------------------- 1 | detectron2.data package 2 | ======================= 3 | 4 | .. automodule:: detectron2.data 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | 9 | detectron2.data.detection\_utils module 10 | --------------------------------------- 11 | 12 | .. automodule:: detectron2.data.detection_utils 13 | :members: 14 | :undoc-members: 15 | :show-inheritance: 16 | 17 | detectron2.data.datasets module 18 | --------------------------------------- 19 | 20 | .. automodule:: detectron2.data.datasets 21 | :members: 22 | :undoc-members: 23 | :show-inheritance: 24 | 25 | detectron2.data.samplers module 26 | --------------------------------------- 27 | 28 | .. automodule:: detectron2.data.samplers 29 | :members: 30 | :undoc-members: 31 | :show-inheritance: 32 | 33 | 34 | detectron2.data.transforms module 35 | --------------------------------------- 36 | 37 | .. automodule:: detectron2.data.transforms 38 | :members: 39 | :undoc-members: 40 | :show-inheritance: 41 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/docs/modules/engine.rst: -------------------------------------------------------------------------------- 1 | detectron2.engine package 2 | ========================= 3 | 4 | 5 | .. automodule:: detectron2.engine 6 | :members: 7 | :undoc-members: 8 | :show-inheritance: 9 | 10 | 11 | detectron2.engine.defaults module 12 | --------------------------------- 13 | 14 | .. automodule:: detectron2.engine.defaults 15 | :members: 16 | :undoc-members: 17 | :show-inheritance: 18 | 19 | detectron2.engine.hooks module 20 | --------------------------------- 21 | 22 | .. automodule:: detectron2.engine.hooks 23 | :members: 24 | :undoc-members: 25 | :show-inheritance: 26 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/docs/modules/evaluation.rst: -------------------------------------------------------------------------------- 1 | detectron2.evaluation package 2 | ============================= 3 | 4 | .. automodule:: detectron2.evaluation 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/docs/modules/export.rst: -------------------------------------------------------------------------------- 1 | detectron2.export package 2 | ========================= 3 | 4 | .. automodule:: detectron2.export 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/docs/modules/index.rst: -------------------------------------------------------------------------------- 1 | API Documentation 2 | ================== 3 | 4 | .. toctree:: 5 | 6 | checkpoint 7 | config 8 | data 9 | engine 10 | evaluation 11 | layers 12 | model_zoo 13 | modeling 14 | solver 15 | structures 16 | utils 17 | export 18 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/docs/modules/layers.rst: -------------------------------------------------------------------------------- 1 | detectron2.layers package 2 | ========================= 3 | 4 | .. automodule:: detectron2.layers 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/docs/modules/model_zoo.rst: -------------------------------------------------------------------------------- 1 | detectron2.model_zoo package 2 | ============================ 3 | 4 | .. automodule:: detectron2.model_zoo 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/docs/modules/solver.rst: -------------------------------------------------------------------------------- 1 | detectron2.solver package 2 | ========================= 3 | 4 | .. automodule:: detectron2.solver 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/docs/modules/structures.rst: -------------------------------------------------------------------------------- 1 | detectron2.structures package 2 | ============================= 3 | 4 | .. automodule:: detectron2.structures 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/docs/notes/changelog.md: -------------------------------------------------------------------------------- 1 | # Change Log 2 | 3 | ### Releases 4 | See release log at 5 | [https://github.com/facebookresearch/detectron2/releases](https://github.com/facebookresearch/detectron2/releases). 6 | 7 | ### Notable Backward Incompatible Changes: 8 | 9 | * 03/30/2020: Custom box head's `output_size` changed to `output_shape`. 10 | * 02/14/2020,02/18/2020: Mask head and keypoint head now include logic for losses & inference. Custom heads 11 | should overwrite the feature computation by `layers()` method. 12 | * 11/11/2019: `detectron2.data.detection_utils.read_image` transposes images with exif information. 13 | 14 | ### Config Version Change Log 15 | 16 | * v1: Rename `RPN_HEAD.NAME` to `RPN.HEAD_NAME`. 17 | * v2: A batch of rename of many configurations before release. 18 | 19 | ### Silent Regression in Historical Versions: 20 | 21 | We list a few silent regressions since they may silently produce incorrect results and will be hard to debug. 22 | 23 | * 04/01/2020 - 05/11/2020: Bad accuracy if `TRAIN_ON_PRED_BOXES` is set to True. 24 | * 03/30/2020 - 04/01/2020: ResNets are not correctly built. 25 | * 12/19/2019 - 12/26/2019: Using aspect ratio grouping causes a drop in accuracy. 26 | * release - 11/9/2019: Test time augmentation does not predict the last category. 27 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/docs/notes/index.rst: -------------------------------------------------------------------------------- 1 | Notes 2 | ====================================== 3 | 4 | .. toctree:: 5 | :maxdepth: 2 6 | 7 | benchmarks 8 | compatibility 9 | contributing 10 | changelog 11 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/docs/tutorials/README.md: -------------------------------------------------------------------------------- 1 | # Read the docs: 2 | 3 | The latest documentation built from this directory is available at [detectron2.readthedocs.io](https://detectron2.readthedocs.io/). 4 | Documents in this directory are not meant to be read on github. 5 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/docs/tutorials/index.rst: -------------------------------------------------------------------------------- 1 | Tutorials 2 | ====================================== 3 | 4 | .. toctree:: 5 | :maxdepth: 2 6 | 7 | install 8 | getting_started 9 | builtin_datasets 10 | extend 11 | datasets 12 | data_loading 13 | models 14 | write-models 15 | training 16 | evaluation 17 | configs 18 | deployment 19 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/DensePose/configs/Base-DensePose-RCNN-FPN.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | META_ARCHITECTURE: "GeneralizedRCNN" 3 | BACKBONE: 4 | NAME: "build_resnet_fpn_backbone" 5 | RESNETS: 6 | OUT_FEATURES: ["res2", "res3", "res4", "res5"] 7 | FPN: 8 | IN_FEATURES: ["res2", "res3", "res4", "res5"] 9 | ANCHOR_GENERATOR: 10 | SIZES: [[32], [64], [128], [256], [512]] # One size for each in feature map 11 | ASPECT_RATIOS: [[0.5, 1.0, 2.0]] # Three aspect ratios (same for all in feature maps) 12 | RPN: 13 | IN_FEATURES: ["p2", "p3", "p4", "p5", "p6"] 14 | PRE_NMS_TOPK_TRAIN: 2000 # Per FPN level 15 | PRE_NMS_TOPK_TEST: 1000 # Per FPN level 16 | # Detectron1 uses 2000 proposals per-batch, 17 | # (See "modeling/rpn/rpn_outputs.py" for details of this legacy issue) 18 | # which is approximately 1000 proposals per-image since the default batch size for FPN is 2. 19 | POST_NMS_TOPK_TRAIN: 1000 20 | POST_NMS_TOPK_TEST: 1000 21 | 22 | DENSEPOSE_ON: True 23 | ROI_HEADS: 24 | NAME: "DensePoseROIHeads" 25 | IN_FEATURES: ["p2", "p3", "p4", "p5"] 26 | NUM_CLASSES: 1 27 | ROI_BOX_HEAD: 28 | NAME: "FastRCNNConvFCHead" 29 | NUM_FC: 2 30 | POOLER_RESOLUTION: 7 31 | POOLER_SAMPLING_RATIO: 2 32 | POOLER_TYPE: "ROIAlign" 33 | ROI_DENSEPOSE_HEAD: 34 | NAME: "DensePoseV1ConvXHead" 35 | POOLER_TYPE: "ROIAlign" 36 | NUM_COARSE_SEGM_CHANNELS: 2 37 | DATASETS: 38 | TRAIN: ("densepose_coco_2014_train", "densepose_coco_2014_valminusminival") 39 | TEST: ("densepose_coco_2014_minival",) 40 | SOLVER: 41 | IMS_PER_BATCH: 16 42 | BASE_LR: 0.01 43 | STEPS: (60000, 80000) 44 | MAX_ITER: 90000 45 | WARMUP_FACTOR: 0.1 46 | INPUT: 47 | MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) 48 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_DL_WC1_s1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "Base-DensePose-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" 4 | RESNETS: 5 | DEPTH: 101 6 | ROI_DENSEPOSE_HEAD: 7 | NAME: "DensePoseDeepLabHead" 8 | UV_CONFIDENCE: 9 | ENABLED: True 10 | TYPE: "iid_iso" 11 | POINT_REGRESSION_WEIGHTS: 0.0005 12 | SOLVER: 13 | CLIP_GRADIENTS: 14 | ENABLED: True 15 | MAX_ITER: 130000 16 | STEPS: (100000, 120000) 17 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_DL_WC2_s1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "Base-DensePose-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" 4 | RESNETS: 5 | DEPTH: 101 6 | ROI_DENSEPOSE_HEAD: 7 | NAME: "DensePoseDeepLabHead" 8 | UV_CONFIDENCE: 9 | ENABLED: True 10 | TYPE: "indep_aniso" 11 | POINT_REGRESSION_WEIGHTS: 0.0005 12 | SOLVER: 13 | CLIP_GRADIENTS: 14 | ENABLED: True 15 | MAX_ITER: 130000 16 | STEPS: (100000, 120000) 17 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_DL_s1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "Base-DensePose-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" 4 | RESNETS: 5 | DEPTH: 101 6 | ROI_DENSEPOSE_HEAD: 7 | NAME: "DensePoseDeepLabHead" 8 | SOLVER: 9 | MAX_ITER: 130000 10 | STEPS: (100000, 120000) 11 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_WC1_s1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "Base-DensePose-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" 4 | RESNETS: 5 | DEPTH: 101 6 | ROI_DENSEPOSE_HEAD: 7 | UV_CONFIDENCE: 8 | ENABLED: True 9 | TYPE: "iid_iso" 10 | POINT_REGRESSION_WEIGHTS: 0.0005 11 | SOLVER: 12 | CLIP_GRADIENTS: 13 | ENABLED: True 14 | MAX_ITER: 130000 15 | STEPS: (100000, 120000) 16 | WARMUP_FACTOR: 0.025 17 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_WC2_s1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "Base-DensePose-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" 4 | RESNETS: 5 | DEPTH: 101 6 | ROI_DENSEPOSE_HEAD: 7 | UV_CONFIDENCE: 8 | ENABLED: True 9 | TYPE: "indep_aniso" 10 | POINT_REGRESSION_WEIGHTS: 0.0005 11 | SOLVER: 12 | CLIP_GRADIENTS: 13 | ENABLED: True 14 | MAX_ITER: 130000 15 | STEPS: (100000, 120000) 16 | WARMUP_FACTOR: 0.025 17 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_s1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "Base-DensePose-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" 4 | RESNETS: 5 | DEPTH: 101 6 | SOLVER: 7 | MAX_ITER: 130000 8 | STEPS: (100000, 120000) 9 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_s1x_legacy.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "Base-DensePose-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" 4 | RESNETS: 5 | DEPTH: 101 6 | ROI_DENSEPOSE_HEAD: 7 | NUM_COARSE_SEGM_CHANNELS: 15 8 | POOLER_RESOLUTION: 14 9 | HEATMAP_SIZE: 56 10 | INDEX_WEIGHTS: 2.0 11 | PART_WEIGHTS: 0.3 12 | POINT_REGRESSION_WEIGHTS: 0.1 13 | DECODER_ON: False 14 | SOLVER: 15 | BASE_LR: 0.002 16 | MAX_ITER: 130000 17 | STEPS: (100000, 120000) 18 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_DL_WC1_s1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "Base-DensePose-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | RESNETS: 5 | DEPTH: 50 6 | ROI_DENSEPOSE_HEAD: 7 | NAME: "DensePoseDeepLabHead" 8 | UV_CONFIDENCE: 9 | ENABLED: True 10 | TYPE: "iid_iso" 11 | POINT_REGRESSION_WEIGHTS: 0.0005 12 | SOLVER: 13 | CLIP_GRADIENTS: 14 | ENABLED: True 15 | MAX_ITER: 130000 16 | STEPS: (100000, 120000) 17 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_DL_WC2_s1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "Base-DensePose-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | RESNETS: 5 | DEPTH: 50 6 | ROI_DENSEPOSE_HEAD: 7 | NAME: "DensePoseDeepLabHead" 8 | UV_CONFIDENCE: 9 | ENABLED: True 10 | TYPE: "indep_aniso" 11 | POINT_REGRESSION_WEIGHTS: 0.0005 12 | SOLVER: 13 | CLIP_GRADIENTS: 14 | ENABLED: True 15 | MAX_ITER: 130000 16 | STEPS: (100000, 120000) 17 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_DL_s1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "Base-DensePose-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | RESNETS: 5 | DEPTH: 50 6 | ROI_DENSEPOSE_HEAD: 7 | NAME: "DensePoseDeepLabHead" 8 | SOLVER: 9 | MAX_ITER: 130000 10 | STEPS: (100000, 120000) 11 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_WC1_s1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "Base-DensePose-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | RESNETS: 5 | DEPTH: 50 6 | ROI_DENSEPOSE_HEAD: 7 | UV_CONFIDENCE: 8 | ENABLED: True 9 | TYPE: "iid_iso" 10 | POINT_REGRESSION_WEIGHTS: 0.0005 11 | SOLVER: 12 | CLIP_GRADIENTS: 13 | ENABLED: True 14 | MAX_ITER: 130000 15 | STEPS: (100000, 120000) 16 | WARMUP_FACTOR: 0.025 17 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_WC2_s1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "Base-DensePose-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | RESNETS: 5 | DEPTH: 50 6 | ROI_DENSEPOSE_HEAD: 7 | UV_CONFIDENCE: 8 | ENABLED: True 9 | TYPE: "indep_aniso" 10 | POINT_REGRESSION_WEIGHTS: 0.0005 11 | SOLVER: 12 | CLIP_GRADIENTS: 13 | ENABLED: True 14 | MAX_ITER: 130000 15 | STEPS: (100000, 120000) 16 | WARMUP_FACTOR: 0.025 17 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_s1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "Base-DensePose-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | RESNETS: 5 | DEPTH: 50 6 | SOLVER: 7 | MAX_ITER: 130000 8 | STEPS: (100000, 120000) 9 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_s1x_legacy.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "Base-DensePose-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | RESNETS: 5 | DEPTH: 50 6 | ROI_DENSEPOSE_HEAD: 7 | NUM_COARSE_SEGM_CHANNELS: 15 8 | POOLER_RESOLUTION: 14 9 | HEATMAP_SIZE: 56 10 | INDEX_WEIGHTS: 2.0 11 | PART_WEIGHTS: 0.3 12 | POINT_REGRESSION_WEIGHTS: 0.1 13 | DECODER_ON: False 14 | SOLVER: 15 | BASE_LR: 0.002 16 | MAX_ITER: 130000 17 | STEPS: (100000, 120000) 18 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/DensePose/configs/evolution/faster_rcnn_R_50_FPN_1x_MC.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "Base-RCNN-FPN-MC.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: False 5 | DENSEPOSE_ON: False 6 | RESNETS: 7 | DEPTH: 50 8 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/DensePose/configs/quick_schedules/densepose_rcnn_R_50_FPN_DL_instant_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-DensePose-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | ROI_DENSEPOSE_HEAD: 5 | NAME: "DensePoseDeepLabHead" 6 | DATASETS: 7 | TRAIN: ("densepose_coco_2014_minival_100",) 8 | TEST: ("densepose_coco_2014_minival_100",) 9 | SOLVER: 10 | MAX_ITER: 40 11 | STEPS: (30,) 12 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/DensePose/configs/quick_schedules/densepose_rcnn_R_50_FPN_TTA_inference_acc_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../densepose_rcnn_R_50_FPN_s1x.yaml" 2 | MODEL: 3 | WEIGHTS: "https://dl.fbaipublicfiles.com/densepose/densepose_rcnn_R_50_FPN_s1x/165712039/model_final_162be9.pkl" 4 | DATASETS: 5 | TRAIN: () 6 | TEST: ("densepose_coco_2014_minival_100",) 7 | TEST: 8 | AUG: 9 | ENABLED: True 10 | MIN_SIZES: (400, 500, 600, 700, 800, 900, 1000, 1100, 1200) 11 | MAX_SIZE: 4000 12 | FLIP: True 13 | EXPECTED_RESULTS: [["bbox_TTA", "AP", 61.74, 0.03], ["densepose_gps_TTA", "AP", 60.22, 0.03], ["densepose_gpsm_TTA", "AP", 63.85, 0.03]] 14 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/DensePose/configs/quick_schedules/densepose_rcnn_R_50_FPN_WC1_instant_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-DensePose-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | RESNETS: 5 | DEPTH: 50 6 | ROI_DENSEPOSE_HEAD: 7 | UV_CONFIDENCE: 8 | ENABLED: True 9 | TYPE: "iid_iso" 10 | POINT_REGRESSION_WEIGHTS: 0.0005 11 | DATASETS: 12 | TRAIN: ("densepose_coco_2014_minival_100",) 13 | TEST: ("densepose_coco_2014_minival_100",) 14 | SOLVER: 15 | CLIP_GRADIENTS: 16 | ENABLED: True 17 | MAX_ITER: 40 18 | STEPS: (30,) 19 | WARMUP_FACTOR: 0.025 20 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/DensePose/configs/quick_schedules/densepose_rcnn_R_50_FPN_WC2_instant_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-DensePose-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | RESNETS: 5 | DEPTH: 50 6 | ROI_DENSEPOSE_HEAD: 7 | UV_CONFIDENCE: 8 | ENABLED: True 9 | TYPE: "indep_aniso" 10 | POINT_REGRESSION_WEIGHTS: 0.0005 11 | DATASETS: 12 | TRAIN: ("densepose_coco_2014_minival_100",) 13 | TEST: ("densepose_coco_2014_minival_100",) 14 | SOLVER: 15 | CLIP_GRADIENTS: 16 | ENABLED: True 17 | MAX_ITER: 40 18 | STEPS: (30,) 19 | WARMUP_FACTOR: 0.025 20 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/DensePose/configs/quick_schedules/densepose_rcnn_R_50_FPN_inference_acc_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../densepose_rcnn_R_50_FPN_s1x.yaml" 2 | MODEL: 3 | WEIGHTS: "https://dl.fbaipublicfiles.com/densepose/densepose_rcnn_R_50_FPN_s1x/165712039/model_final_162be9.pkl" 4 | DATASETS: 5 | TRAIN: () 6 | TEST: ("densepose_coco_2014_minival_100",) 7 | TEST: 8 | EXPECTED_RESULTS: [["bbox", "AP", 59.27, 0.025], ["densepose_gps", "AP", 60.11, 0.02], ["densepose_gpsm", "AP", 64.20, 0.02]] 9 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/DensePose/configs/quick_schedules/densepose_rcnn_R_50_FPN_instant_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-DensePose-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | DATASETS: 5 | TRAIN: ("densepose_coco_2014_minival_100",) 6 | TEST: ("densepose_coco_2014_minival_100",) 7 | SOLVER: 8 | MAX_ITER: 40 9 | STEPS: (30,) 10 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/DensePose/configs/quick_schedules/densepose_rcnn_R_50_FPN_training_acc_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-DensePose-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | ROI_HEADS: 5 | NUM_CLASSES: 1 6 | DATASETS: 7 | TRAIN: ("densepose_coco_2014_minival",) 8 | TEST: ("densepose_coco_2014_minival",) 9 | SOLVER: 10 | MAX_ITER: 6000 11 | STEPS: (5500, 5800) 12 | TEST: 13 | EXPECTED_RESULTS: [["bbox", "AP", 58.27, 1.0], ["densepose_gps", "AP", 42.47, 1.5], ["densepose_gpsm", "AP", 49.20, 1.5]] 14 | 15 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/DensePose/densepose/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | from .data.datasets import builtin # just to register data 3 | from .config import add_densepose_config, add_dataset_category_config 4 | from .densepose_head import ROI_DENSEPOSE_HEAD_REGISTRY 5 | from .evaluator import DensePoseCOCOEvaluator 6 | from .roi_head import DensePoseROIHeads 7 | from .data.structures import DensePoseDataRelative, DensePoseList, DensePoseTransformData 8 | from .modeling.test_time_augmentation import DensePoseGeneralizedRCNNWithTTA 9 | from .utils.transform import load_from_cfg 10 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/DensePose/densepose/data/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | 3 | from .build import build_detection_test_loader, build_detection_train_loader 4 | from .dataset_mapper import DatasetMapper 5 | 6 | # ensure the builtin data are registered 7 | from . import datasets 8 | 9 | __all__ = [k for k in globals().keys() if not k.startswith("_")] 10 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/DensePose/densepose/data/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | 3 | from . import builtin # ensure the builtin data are registered 4 | 5 | __all__ = [k for k in globals().keys() if "builtin" not in k and not k.startswith("_")] 6 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/DensePose/densepose/data/datasets/builtin.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | from .coco import BASE_DATASETS as BASE_COCO_DATASETS 3 | from .coco import DATASETS as COCO_DATASETS 4 | from .coco import register_datasets as register_coco_datasets 5 | 6 | DEFAULT_DATASETS_ROOT = "data" 7 | 8 | 9 | register_coco_datasets(COCO_DATASETS, DEFAULT_DATASETS_ROOT) 10 | register_coco_datasets(BASE_COCO_DATASETS, DEFAULT_DATASETS_ROOT) 11 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/DensePose/densepose/utils/logger.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | import logging 3 | 4 | 5 | def verbosity_to_level(verbosity): 6 | if verbosity is not None: 7 | if verbosity == 0: 8 | return logging.WARNING 9 | elif verbosity == 1: 10 | return logging.INFO 11 | elif verbosity >= 2: 12 | return logging.DEBUG 13 | return logging.WARNING 14 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/DensePose/densepose/utils/transform.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | from fvcore.common.file_io import PathManager 3 | 4 | from detectron2.data import MetadataCatalog 5 | 6 | from densepose import DensePoseTransformData 7 | 8 | 9 | def load_for_dataset(dataset_name): 10 | path = MetadataCatalog.get(dataset_name).densepose_transform_src 11 | densepose_transform_data_fpath = PathManager.get_local_path(path) 12 | return DensePoseTransformData.load(densepose_transform_data_fpath) 13 | 14 | 15 | def load_from_cfg(cfg): 16 | return load_for_dataset(cfg.DATASETS.TEST[0]) 17 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/DensePose/densepose/vis/bounding_box.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | from .base import RectangleVisualizer, TextVisualizer 3 | 4 | 5 | class BoundingBoxVisualizer(object): 6 | def __init__(self): 7 | self.rectangle_visualizer = RectangleVisualizer() 8 | 9 | def visualize(self, image_bgr, boxes_xywh): 10 | for bbox_xywh in boxes_xywh: 11 | image_bgr = self.rectangle_visualizer.visualize(image_bgr, bbox_xywh) 12 | return image_bgr 13 | 14 | 15 | class ScoredBoundingBoxVisualizer(object): 16 | def __init__(self, bbox_visualizer_params=None, score_visualizer_params=None): 17 | if bbox_visualizer_params is None: 18 | bbox_visualizer_params = {} 19 | if score_visualizer_params is None: 20 | score_visualizer_params = {} 21 | self.visualizer_bbox = RectangleVisualizer(**bbox_visualizer_params) 22 | self.visualizer_score = TextVisualizer(**score_visualizer_params) 23 | 24 | def visualize(self, image_bgr, scored_bboxes): 25 | boxes_xywh, box_scores = scored_bboxes 26 | assert len(boxes_xywh) == len( 27 | box_scores 28 | ), "Number of bounding boxes {} should be equal to the number of scores {}".format( 29 | len(boxes_xywh), len(box_scores) 30 | ) 31 | for i, box_xywh in enumerate(boxes_xywh): 32 | score_i = box_scores[i] 33 | image_bgr = self.visualizer_bbox.visualize(image_bgr, box_xywh) 34 | score_txt = "{0:6.4f}".format(score_i) 35 | topleft_xy = box_xywh[0], box_xywh[1] 36 | image_bgr = self.visualizer_score.visualize(image_bgr, score_txt, topleft_xy) 37 | return image_bgr 38 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/DensePose/dev/README.md: -------------------------------------------------------------------------------- 1 | 2 | ## Some scripts for developers to use, include: 3 | 4 | - `run_instant_tests.sh`: run training for a few iterations. 5 | - `run_inference_tests.sh`: run inference on a small dataset. 6 | - `../../dev/linter.sh`: lint the codebase before commit 7 | - `../../dev/parse_results.sh`: parse results from log file. 8 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/DensePose/dev/run_inference_tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 3 | 4 | BIN="python train_net.py" 5 | OUTPUT="inference_test_output" 6 | NUM_GPUS=2 7 | IMS_PER_GPU=2 8 | IMS_PER_BATCH=$(( NUM_GPUS * IMS_PER_GPU )) 9 | 10 | CFG_LIST=( "${@:1}" ) 11 | 12 | if [ ${#CFG_LIST[@]} -eq 0 ]; then 13 | CFG_LIST=( ./configs/quick_schedules/*inference_acc_test.yaml ) 14 | fi 15 | 16 | echo "========================================================================" 17 | echo "Configs to run:" 18 | echo "${CFG_LIST[@]}" 19 | echo "========================================================================" 20 | 21 | for cfg in "${CFG_LIST[@]}"; do 22 | echo "========================================================================" 23 | echo "Running $cfg ..." 24 | echo "========================================================================" 25 | $BIN \ 26 | --eval-only \ 27 | --num-gpus $NUM_GPUS \ 28 | --config-file "$cfg" \ 29 | OUTPUT_DIR "$OUTPUT" \ 30 | SOLVER.IMS_PER_BATCH $IMS_PER_BATCH 31 | rm -rf $OUTPUT 32 | done 33 | 34 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/DensePose/dev/run_instant_tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 3 | 4 | BIN="python train_net.py" 5 | OUTPUT="instant_test_output" 6 | NUM_GPUS=2 7 | SOLVER_IMS_PER_BATCH=$((NUM_GPUS * 2)) 8 | 9 | CFG_LIST=( "${@:1}" ) 10 | if [ ${#CFG_LIST[@]} -eq 0 ]; then 11 | CFG_LIST=( ./configs/quick_schedules/*instant_test.yaml ) 12 | fi 13 | 14 | echo "========================================================================" 15 | echo "Configs to run:" 16 | echo "${CFG_LIST[@]}" 17 | echo "========================================================================" 18 | 19 | for cfg in "${CFG_LIST[@]}"; do 20 | echo "========================================================================" 21 | echo "Running $cfg ..." 22 | echo "========================================================================" 23 | $BIN --num-gpus $NUM_GPUS --config-file "$cfg" \ 24 | SOLVER.IMS_PER_BATCH $SOLVER_IMS_PER_BATCH \ 25 | OUTPUT_DIR "$OUTPUT" 26 | rm -rf "$OUTPUT" 27 | done 28 | 29 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/DensePose/tests/test_model_e2e.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. 2 | 3 | import unittest 4 | import torch 5 | 6 | from detectron2.structures import BitMasks, Boxes, Instances 7 | 8 | from .common import get_model 9 | 10 | 11 | # TODO(plabatut): Modularize detectron2 tests and re-use 12 | def make_model_inputs(image, instances=None): 13 | if instances is None: 14 | return {"image": image} 15 | 16 | return {"image": image, "instances": instances} 17 | 18 | 19 | def make_empty_instances(h, w): 20 | instances = Instances((h, w)) 21 | instances.gt_boxes = Boxes(torch.rand(0, 4)) 22 | instances.gt_classes = torch.tensor([]).to(dtype=torch.int64) 23 | instances.gt_masks = BitMasks(torch.rand(0, h, w)) 24 | return instances 25 | 26 | 27 | class ModelE2ETest(unittest.TestCase): 28 | CONFIG_PATH = "" 29 | 30 | def setUp(self): 31 | self.model = get_model(self.CONFIG_PATH) 32 | 33 | def _test_eval(self, sizes): 34 | inputs = [make_model_inputs(torch.rand(3, size[0], size[1])) for size in sizes] 35 | self.model.eval() 36 | self.model(inputs) 37 | 38 | 39 | class DensePoseRCNNE2ETest(ModelE2ETest): 40 | CONFIG_PATH = "densepose_rcnn_R_101_FPN_s1x.yaml" 41 | 42 | def test_empty_data(self): 43 | self._test_eval([(200, 250), (200, 249)]) 44 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/DensePose/tests/test_setup.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. 2 | 3 | import unittest 4 | 5 | from .common import ( 6 | get_config_files, 7 | get_evolution_config_files, 8 | get_quick_schedules_config_files, 9 | setup, 10 | ) 11 | 12 | 13 | class TestSetup(unittest.TestCase): 14 | def _test_setup(self, config_file): 15 | setup(config_file) 16 | 17 | def test_setup_configs(self): 18 | config_files = get_config_files() 19 | for config_file in config_files: 20 | self._test_setup(config_file) 21 | 22 | def test_setup_evolution_configs(self): 23 | config_files = get_evolution_config_files() 24 | for config_file in config_files: 25 | self._test_setup(config_file) 26 | 27 | def test_setup_quick_schedules_configs(self): 28 | config_files = get_quick_schedules_config_files() 29 | for config_file in config_files: 30 | self._test_setup(config_file) 31 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/DensePose/tests/test_structures.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. 2 | 3 | import unittest 4 | 5 | from densepose.data.structures import normalized_coords_transform 6 | 7 | 8 | class TestStructures(unittest.TestCase): 9 | def test_normalized_coords_transform(self): 10 | bbox = (32, 24, 288, 216) 11 | x0, y0, w, h = bbox 12 | xmin, ymin, xmax, ymax = x0, y0, x0 + w, y0 + h 13 | f = normalized_coords_transform(*bbox) 14 | # Top-left 15 | expected_p, actual_p = (-1, -1), f((xmin, ymin)) 16 | self.assertEqual(expected_p, actual_p) 17 | # Top-right 18 | expected_p, actual_p = (1, -1), f((xmax, ymin)) 19 | self.assertEqual(expected_p, actual_p) 20 | # Bottom-left 21 | expected_p, actual_p = (-1, 1), f((xmin, ymax)) 22 | self.assertEqual(expected_p, actual_p) 23 | # Bottom-right 24 | expected_p, actual_p = (1, 1), f((xmax, ymax)) 25 | self.assertEqual(expected_p, actual_p) 26 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/PointRend/configs/InstanceSegmentation/Base-PointRend-RCNN-FPN.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../../../../configs/Base-RCNN-FPN.yaml" 2 | MODEL: 3 | ROI_HEADS: 4 | NAME: "PointRendROIHeads" 5 | IN_FEATURES: ["p2", "p3", "p4", "p5"] 6 | ROI_BOX_HEAD: 7 | TRAIN_ON_PRED_BOXES: True 8 | ROI_MASK_HEAD: 9 | NAME: "CoarseMaskHead" 10 | FC_DIM: 1024 11 | NUM_FC: 2 12 | OUTPUT_SIDE_RESOLUTION: 7 13 | IN_FEATURES: ["p2"] 14 | POINT_HEAD_ON: True 15 | POINT_HEAD: 16 | FC_DIM: 256 17 | NUM_FC: 3 18 | IN_FEATURES: ["p2"] 19 | INPUT: 20 | # PointRend for instance segmenation does not work with "polygon" mask_format. 21 | MASK_FORMAT: "bitmask" 22 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/PointRend/configs/InstanceSegmentation/pointrend_rcnn_R_50_FPN_1x_cityscapes.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: Base-PointRend-RCNN-FPN.yaml 2 | MODEL: 3 | WEIGHTS: detectron2://ImageNetPretrained/MSRA/R-50.pkl 4 | MASK_ON: true 5 | RESNETS: 6 | DEPTH: 50 7 | ROI_HEADS: 8 | NUM_CLASSES: 8 9 | POINT_HEAD: 10 | NUM_CLASSES: 8 11 | DATASETS: 12 | TEST: ("cityscapes_fine_instance_seg_val",) 13 | TRAIN: ("cityscapes_fine_instance_seg_train",) 14 | SOLVER: 15 | BASE_LR: 0.01 16 | IMS_PER_BATCH: 8 17 | MAX_ITER: 24000 18 | STEPS: (18000,) 19 | INPUT: 20 | MAX_SIZE_TEST: 2048 21 | MAX_SIZE_TRAIN: 2048 22 | MIN_SIZE_TEST: 1024 23 | MIN_SIZE_TRAIN: (800, 832, 864, 896, 928, 960, 992, 1024) 24 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/PointRend/configs/InstanceSegmentation/pointrend_rcnn_R_50_FPN_1x_coco.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: Base-PointRend-RCNN-FPN.yaml 2 | MODEL: 3 | WEIGHTS: detectron2://ImageNetPretrained/MSRA/R-50.pkl 4 | MASK_ON: true 5 | RESNETS: 6 | DEPTH: 50 7 | # To add COCO AP evaluation against the higher-quality LVIS annotations. 8 | # DATASETS: 9 | # TEST: ("coco_2017_val", "lvis_v0.5_val_cocofied") 10 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/PointRend/configs/InstanceSegmentation/pointrend_rcnn_R_50_FPN_3x_coco.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: Base-PointRend-RCNN-FPN.yaml 2 | MODEL: 3 | WEIGHTS: detectron2://ImageNetPretrained/MSRA/R-50.pkl 4 | MASK_ON: true 5 | RESNETS: 6 | DEPTH: 50 7 | SOLVER: 8 | STEPS: (210000, 250000) 9 | MAX_ITER: 270000 10 | # To add COCO AP evaluation against the higher-quality LVIS annotations. 11 | # DATASETS: 12 | # TEST: ("coco_2017_val", "lvis_v0.5_val_cocofied") 13 | 14 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/PointRend/configs/InstanceSegmentation/pointrend_rcnn_R_50_FPN_3x_parsing.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: Base-PointRend-RCNN-FPN.yaml 2 | MODEL: 3 | WEIGHTS: detectron2://ImageNetPretrained/MSRA/R-50.pkl 4 | MASK_ON: true 5 | RESNETS: 6 | DEPTH: 50 7 | ROI_HEADS: 8 | NUM_CLASSES: 1 9 | POINT_HEAD: 10 | NUM_CLASSES: 1 11 | SOLVER: 12 | STEPS: (210000, 250000) 13 | MAX_ITER: 270000 14 | IMS_PER_BATCH: 1 15 | # To add COCO AP evaluation against the higher-quality LVIS annotations. 16 | # DATASETS: 17 | # TEST: ("coco_2017_val", "lvis_v0.5_val_cocofied") 18 | DATASETS: 19 | TRAIN: ("CIHP_train",) 20 | TEST: ("CIHP_val",) 21 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/PointRend/configs/InstanceSegmentation/pointrend_rcnn_X_101_32x8d_FPN_3x_parsing.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: Base-PointRend-RCNN-FPN.yaml 2 | MODEL: 3 | WEIGHTS: "./X-101-32x8d.pkl" 4 | PIXEL_STD: [57.375, 57.120, 58.395] 5 | MASK_ON: true 6 | RESNETS: 7 | STRIDE_IN_1X1: False # this is a C2 model 8 | NUM_GROUPS: 32 9 | WIDTH_PER_GROUP: 8 10 | DEPTH: 101 11 | ROI_HEADS: 12 | NUM_CLASSES: 1 13 | POINT_HEAD: 14 | NUM_CLASSES: 1 15 | SOLVER: 16 | STEPS: (210000, 250000) 17 | MAX_ITER: 270000 18 | IMS_PER_BATCH: 1 19 | # To add COCO AP evaluation against the higher-quality LVIS annotations. 20 | # DATASETS: 21 | # TEST: ("coco_2017_val", "lvis_v0.5_val_cocofied") 22 | INPUT: 23 | MIN_SIZE_TRAIN: (640, 864) 24 | MIN_SIZE_TRAIN_SAMPLING: "range" 25 | MAX_SIZE_TRAIN: 1440 26 | DATASETS: 27 | TRAIN: ("CIHP_train",) 28 | TEST: ("CIHP_val",) 29 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/PointRend/configs/SemanticSegmentation/Base-PointRend-Semantic-FPN.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../../../../configs/Base-RCNN-FPN.yaml" 2 | MODEL: 3 | META_ARCHITECTURE: "SemanticSegmentor" 4 | BACKBONE: 5 | FREEZE_AT: 0 6 | SEM_SEG_HEAD: 7 | NAME: "PointRendSemSegHead" 8 | POINT_HEAD: 9 | NUM_CLASSES: 54 10 | FC_DIM: 256 11 | NUM_FC: 3 12 | IN_FEATURES: ["p2"] 13 | TRAIN_NUM_POINTS: 1024 14 | SUBDIVISION_STEPS: 2 15 | SUBDIVISION_NUM_POINTS: 8192 16 | COARSE_SEM_SEG_HEAD_NAME: "SemSegFPNHead" 17 | DATASETS: 18 | TRAIN: ("coco_2017_train_panoptic_stuffonly",) 19 | TEST: ("coco_2017_val_panoptic_stuffonly",) 20 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/PointRend/configs/SemanticSegmentation/pointrend_semantic_R_101_FPN_1x_cityscapes.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: Base-PointRend-Semantic-FPN.yaml 2 | MODEL: 3 | WEIGHTS: detectron2://ImageNetPretrained/MSRA/R-101.pkl 4 | RESNETS: 5 | DEPTH: 101 6 | SEM_SEG_HEAD: 7 | NUM_CLASSES: 19 8 | POINT_HEAD: 9 | NUM_CLASSES: 19 10 | TRAIN_NUM_POINTS: 2048 11 | SUBDIVISION_NUM_POINTS: 8192 12 | DATASETS: 13 | TRAIN: ("cityscapes_fine_sem_seg_train",) 14 | TEST: ("cityscapes_fine_sem_seg_val",) 15 | SOLVER: 16 | BASE_LR: 0.01 17 | STEPS: (40000, 55000) 18 | MAX_ITER: 65000 19 | IMS_PER_BATCH: 32 20 | INPUT: 21 | MIN_SIZE_TRAIN: (512, 768, 1024, 1280, 1536, 1792, 2048) 22 | MIN_SIZE_TRAIN_SAMPLING: "choice" 23 | MIN_SIZE_TEST: 1024 24 | MAX_SIZE_TRAIN: 4096 25 | MAX_SIZE_TEST: 2048 26 | CROP: 27 | ENABLED: True 28 | TYPE: "absolute" 29 | SIZE: (512, 1024) 30 | SINGLE_CATEGORY_MAX_AREA: 0.75 31 | COLOR_AUG_SSD: True 32 | DATALOADER: 33 | NUM_WORKERS: 16 34 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/PointRend/configs/SemanticSegmentation/pointrend_semantic_R_50_FPN_1x_coco.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: Base-PointRend-Semantic-FPN.yaml 2 | MODEL: 3 | WEIGHTS: detectron2://ImageNetPretrained/MSRA/R-50.pkl 4 | RESNETS: 5 | DEPTH: 50 6 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/PointRend/logs/hadoop.kylin.libdfs.log: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/preprocess/humanparsing/mhp_extension/detectron2/projects/PointRend/logs/hadoop.kylin.libdfs.log -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/PointRend/point_rend/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | from .config import add_pointrend_config 3 | from .coarse_mask_head import CoarseMaskHead 4 | from .roi_heads import PointRendROIHeads 5 | from .dataset_mapper import SemSegDatasetMapper 6 | from .semantic_seg import PointRendSemSegHead 7 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/PointRend/run.sh: -------------------------------------------------------------------------------- 1 | python finetune_net.py --config-file configs/InstanceSegmentation/pointrend_rcnn_X_101_32x8d_FPN_3x_parsing.yaml --num-gpus 1 2 | #python finetune_net.py --config-file configs/InstanceSegmentation/pointrend_rcnn_R_50_FPN_3x_parsing.yaml --num-gpus 1 3 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/README.md: -------------------------------------------------------------------------------- 1 | 2 | Here are a few projects that are built on detectron2. 3 | They are examples of how to use detectron2 as a library, to make your projects more 4 | maintainable. 5 | 6 | ## Projects by Facebook 7 | 8 | Note that these are research projects, and therefore may not have the same level 9 | of support or stability of detectron2. 10 | 11 | + [DensePose: Dense Human Pose Estimation In The Wild](DensePose) 12 | + [Scale-Aware Trident Networks for Object Detection](TridentNet) 13 | + [TensorMask: A Foundation for Dense Object Segmentation](TensorMask) 14 | + [Mesh R-CNN](https://github.com/facebookresearch/meshrcnn) 15 | + [PointRend: Image Segmentation as Rendering](PointRend) 16 | + [Momentum Contrast for Unsupervised Visual Representation Learning](https://github.com/facebookresearch/moco/tree/master/detection) 17 | 18 | 19 | ## External Projects 20 | 21 | External projects in the community that use detectron2: 22 | 23 | 28 | 29 | + [VoVNet backbones](https://github.com/youngwanLEE/vovnet-detectron2). 30 | + [AdelaiDet](https://github.com/aim-uofa/adet), a detection toolbox from the Universtiy of Adelaide. 31 | + [CenterMask : Real-Time Anchor-Free Instance Segmentation](https://github.com/youngwanLEE/centermask2) 32 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/TensorMask/configs/Base-TensorMask.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | META_ARCHITECTURE: "TensorMask" 3 | MASK_ON: True 4 | BACKBONE: 5 | NAME: "build_retinanet_resnet_fpn_backbone" 6 | RESNETS: 7 | OUT_FEATURES: ["res2", "res3", "res4", "res5"] 8 | ANCHOR_GENERATOR: 9 | SIZES: [[44, 60], [88, 120], [176, 240], [352, 480], [704, 960], [1408, 1920]] 10 | ASPECT_RATIOS: [[1.0]] 11 | FPN: 12 | IN_FEATURES: ["res2", "res3", "res4", "res5"] 13 | FUSE_TYPE: "avg" 14 | TENSOR_MASK: 15 | ALIGNED_ON: True 16 | BIPYRAMID_ON: True 17 | DATASETS: 18 | TRAIN: ("coco_2017_train",) 19 | TEST: ("coco_2017_val",) 20 | SOLVER: 21 | IMS_PER_BATCH: 16 22 | BASE_LR: 0.02 23 | STEPS: (60000, 80000) 24 | MAX_ITER: 90000 25 | VERSION: 2 26 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/TensorMask/configs/tensormask_R_50_FPN_1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "Base-TensorMask.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | RESNETS: 5 | DEPTH: 50 6 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/TensorMask/configs/tensormask_R_50_FPN_6x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "Base-TensorMask.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | RESNETS: 5 | DEPTH: 50 6 | SOLVER: 7 | STEPS: (480000, 520000) 8 | MAX_ITER: 540000 9 | INPUT: 10 | MIN_SIZE_TRAIN_SAMPLING: "range" 11 | MIN_SIZE_TRAIN: (640, 800) 12 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/TensorMask/tensormask/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | from .config import add_tensormask_config 3 | from .arch import TensorMask 4 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/TensorMask/tensormask/layers/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | from .swap_align2nat import SwapAlign2Nat, swap_align2nat 3 | 4 | __all__ = [k for k in globals().keys() if not k.startswith("_")] 5 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/TensorMask/tensormask/layers/csrc/SwapAlign2Nat/SwapAlign2Nat.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | #pragma once 3 | #include 4 | 5 | namespace tensormask { 6 | 7 | #ifdef WITH_CUDA 8 | at::Tensor SwapAlign2Nat_forward_cuda( 9 | const at::Tensor& X, 10 | const int lambda_val, 11 | const float pad_val); 12 | 13 | at::Tensor SwapAlign2Nat_backward_cuda( 14 | const at::Tensor& gY, 15 | const int lambda_val, 16 | const int batch_size, 17 | const int channel, 18 | const int height, 19 | const int width); 20 | #endif 21 | 22 | inline at::Tensor SwapAlign2Nat_forward( 23 | const at::Tensor& X, 24 | const int lambda_val, 25 | const float pad_val) { 26 | if (X.type().is_cuda()) { 27 | #ifdef WITH_CUDA 28 | return SwapAlign2Nat_forward_cuda(X, lambda_val, pad_val); 29 | #else 30 | AT_ERROR("Not compiled with GPU support"); 31 | #endif 32 | } 33 | AT_ERROR("Not implemented on the CPU"); 34 | } 35 | 36 | inline at::Tensor SwapAlign2Nat_backward( 37 | const at::Tensor& gY, 38 | const int lambda_val, 39 | const int batch_size, 40 | const int channel, 41 | const int height, 42 | const int width) { 43 | if (gY.type().is_cuda()) { 44 | #ifdef WITH_CUDA 45 | return SwapAlign2Nat_backward_cuda( 46 | gY, lambda_val, batch_size, channel, height, width); 47 | #else 48 | AT_ERROR("Not compiled with GPU support"); 49 | #endif 50 | } 51 | AT_ERROR("Not implemented on the CPU"); 52 | } 53 | 54 | } // namespace tensormask 55 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/TensorMask/tensormask/layers/csrc/vision.cpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | 3 | #include 4 | #include "SwapAlign2Nat/SwapAlign2Nat.h" 5 | 6 | namespace tensormask { 7 | 8 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 9 | m.def( 10 | "swap_align2nat_forward", 11 | &SwapAlign2Nat_forward, 12 | "SwapAlign2Nat_forward"); 13 | m.def( 14 | "swap_align2nat_backward", 15 | &SwapAlign2Nat_backward, 16 | "SwapAlign2Nat_backward"); 17 | } 18 | 19 | } // namespace tensormask 20 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/TensorMask/tests/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/TensorMask/tests/test_swap_align2nat.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 3 | 4 | import unittest 5 | import torch 6 | from torch.autograd import gradcheck 7 | 8 | from tensormask.layers.swap_align2nat import SwapAlign2Nat 9 | 10 | 11 | class SwapAlign2NatTest(unittest.TestCase): 12 | @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") 13 | def test_swap_align2nat_gradcheck_cuda(self): 14 | dtype = torch.float64 15 | device = torch.device("cuda") 16 | m = SwapAlign2Nat(2).to(dtype=dtype, device=device) 17 | x = torch.rand(2, 4, 10, 10, dtype=dtype, device=device, requires_grad=True) 18 | 19 | self.assertTrue(gradcheck(m, x), "gradcheck failed for SwapAlign2Nat CUDA") 20 | 21 | def _swap_align2nat(self, tensor, lambda_val): 22 | """ 23 | The basic setup for testing Swap_Align 24 | """ 25 | op = SwapAlign2Nat(lambda_val, pad_val=0.0) 26 | input = torch.from_numpy(tensor[None, :, :, :].astype("float32")) 27 | output = op.forward(input.cuda()).cpu().numpy() 28 | return output[0] 29 | 30 | 31 | if __name__ == "__main__": 32 | unittest.main() 33 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/TridentNet/configs/Base-TridentNet-Fast-C4.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | META_ARCHITECTURE: "GeneralizedRCNN" 3 | BACKBONE: 4 | NAME: "build_trident_resnet_backbone" 5 | ROI_HEADS: 6 | NAME: "TridentRes5ROIHeads" 7 | POSITIVE_FRACTION: 0.5 8 | BATCH_SIZE_PER_IMAGE: 128 9 | PROPOSAL_APPEND_GT: False 10 | PROPOSAL_GENERATOR: 11 | NAME: "TridentRPN" 12 | RPN: 13 | POST_NMS_TOPK_TRAIN: 500 14 | TRIDENT: 15 | NUM_BRANCH: 3 16 | BRANCH_DILATIONS: [1, 2, 3] 17 | TEST_BRANCH_IDX: 1 18 | TRIDENT_STAGE: "res4" 19 | DATASETS: 20 | TRAIN: ("coco_2017_train",) 21 | TEST: ("coco_2017_val",) 22 | SOLVER: 23 | IMS_PER_BATCH: 16 24 | BASE_LR: 0.02 25 | STEPS: (60000, 80000) 26 | MAX_ITER: 90000 27 | INPUT: 28 | MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) 29 | VERSION: 2 30 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/TridentNet/configs/tridentnet_fast_R_101_C4_3x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "Base-TridentNet-Fast-C4.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" 4 | MASK_ON: False 5 | RESNETS: 6 | DEPTH: 101 7 | SOLVER: 8 | STEPS: (210000, 250000) 9 | MAX_ITER: 270000 10 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/TridentNet/configs/tridentnet_fast_R_50_C4_1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "Base-TridentNet-Fast-C4.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: False 5 | RESNETS: 6 | DEPTH: 50 7 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/TridentNet/configs/tridentnet_fast_R_50_C4_3x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "Base-TridentNet-Fast-C4.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: False 5 | RESNETS: 6 | DEPTH: 50 7 | SOLVER: 8 | STEPS: (210000, 250000) 9 | MAX_ITER: 270000 10 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/TridentNet/tridentnet/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | from .config import add_tridentnet_config 3 | from .trident_backbone import ( 4 | TridentBottleneckBlock, 5 | build_trident_resnet_backbone, 6 | make_trident_stage, 7 | ) 8 | from .trident_rpn import TridentRPN 9 | from .trident_rcnn import TridentRes5ROIHeads, TridentStandardROIHeads 10 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/TridentNet/tridentnet/config.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 3 | 4 | from detectron2.config import CfgNode as CN 5 | 6 | 7 | def add_tridentnet_config(cfg): 8 | """ 9 | Add config for tridentnet. 10 | """ 11 | _C = cfg 12 | 13 | _C.MODEL.TRIDENT = CN() 14 | 15 | # Number of branches for TridentNet. 16 | _C.MODEL.TRIDENT.NUM_BRANCH = 3 17 | # Specify the dilations for each branch. 18 | _C.MODEL.TRIDENT.BRANCH_DILATIONS = [1, 2, 3] 19 | # Specify the stage for applying trident blocks. Default stage is Res4 according to the 20 | # TridentNet paper. 21 | _C.MODEL.TRIDENT.TRIDENT_STAGE = "res4" 22 | # Specify the test branch index TridentNet Fast inference: 23 | # - use -1 to aggregate results of all branches during inference. 24 | # - otherwise, only using specified branch for fast inference. Recommended setting is 25 | # to use the middle branch. 26 | _C.MODEL.TRIDENT.TEST_BRANCH_IDX = 1 27 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/projects/TridentNet/tridentnet/trident_rpn.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | import torch 3 | 4 | from detectron2.modeling import PROPOSAL_GENERATOR_REGISTRY 5 | from detectron2.modeling.proposal_generator.rpn import RPN 6 | from detectron2.structures import ImageList 7 | 8 | 9 | @PROPOSAL_GENERATOR_REGISTRY.register() 10 | class TridentRPN(RPN): 11 | """ 12 | Trident RPN subnetwork. 13 | """ 14 | 15 | def __init__(self, cfg, input_shape): 16 | super(TridentRPN, self).__init__(cfg, input_shape) 17 | 18 | self.num_branch = cfg.MODEL.TRIDENT.NUM_BRANCH 19 | self.trident_fast = cfg.MODEL.TRIDENT.TEST_BRANCH_IDX != -1 20 | 21 | def forward(self, images, features, gt_instances=None): 22 | """ 23 | See :class:`RPN.forward`. 24 | """ 25 | num_branch = self.num_branch if self.training or not self.trident_fast else 1 26 | # Duplicate images and gt_instances for all branches in TridentNet. 27 | all_images = ImageList( 28 | torch.cat([images.tensor] * num_branch), images.image_sizes * num_branch 29 | ) 30 | all_gt_instances = gt_instances * num_branch if gt_instances is not None else None 31 | 32 | return super(TridentRPN, self).forward(all_images, features, all_gt_instances) 33 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/setup.cfg: -------------------------------------------------------------------------------- 1 | [isort] 2 | line_length=100 3 | multi_line_output=3 4 | include_trailing_comma=True 5 | known_standard_library=numpy,setuptools,mock 6 | skip=./datasets,docs 7 | skip_glob=*/__init__.py 8 | known_myself=detectron2 9 | known_third_party=fvcore,matplotlib,cv2,torch,torchvision,PIL,pycocotools,yacs,termcolor,cityscapesscripts,tabulate,tqdm,scipy,lvis,psutil,pkg_resources,caffe2,onnx 10 | no_lines_before=STDLIB,THIRDPARTY 11 | sections=FUTURE,STDLIB,THIRDPARTY,myself,FIRSTPARTY,LOCALFOLDER 12 | default_section=FIRSTPARTY 13 | 14 | [mypy] 15 | python_version=3.6 16 | ignore_missing_imports = True 17 | warn_unused_configs = True 18 | disallow_untyped_defs = True 19 | check_untyped_defs = True 20 | warn_unused_ignores = True 21 | warn_redundant_casts = True 22 | show_column_numbers = True 23 | follow_imports = silent 24 | allow_redefinition = True 25 | ; Require all functions to be annotated 26 | disallow_incomplete_defs = True 27 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/tests/README.md: -------------------------------------------------------------------------------- 1 | ## Unit Tests 2 | 3 | To run the unittests, do: 4 | ``` 5 | cd detectron2 6 | python -m unittest discover -v -s ./tests 7 | ``` 8 | 9 | There are also end-to-end inference & training tests, in [dev/run_*_tests.sh](../dev). 10 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/tests/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/tests/data/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/preprocess/humanparsing/mhp_extension/detectron2/tests/data/__init__.py -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/tests/data/test_sampler.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. 2 | import unittest 3 | from torch.utils.data.sampler import SequentialSampler 4 | 5 | from detectron2.data.samplers import GroupedBatchSampler 6 | 7 | 8 | class TestGroupedBatchSampler(unittest.TestCase): 9 | def test_missing_group_id(self): 10 | sampler = SequentialSampler(list(range(100))) 11 | group_ids = [1] * 100 12 | samples = GroupedBatchSampler(sampler, group_ids, 2) 13 | 14 | for mini_batch in samples: 15 | self.assertEqual(len(mini_batch), 2) 16 | 17 | def test_groups(self): 18 | sampler = SequentialSampler(list(range(100))) 19 | group_ids = [1, 0] * 50 20 | samples = GroupedBatchSampler(sampler, group_ids, 2) 21 | 22 | for mini_batch in samples: 23 | self.assertEqual((mini_batch[0] + mini_batch[1]) % 2, 0) 24 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/tests/layers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/preprocess/humanparsing/mhp_extension/detectron2/tests/layers/__init__.py -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/tests/modeling/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/preprocess/humanparsing/mhp_extension/detectron2/tests/modeling/__init__.py -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/tests/structures/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/preprocess/humanparsing/mhp_extension/detectron2/tests/structures/__init__.py -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/tests/structures/test_imagelist.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | 3 | import unittest 4 | from typing import Sequence 5 | import torch 6 | 7 | from detectron2.structures import ImageList 8 | 9 | 10 | class TestImageList(unittest.TestCase): 11 | def test_imagelist_padding_shape(self): 12 | class TensorToImageList(torch.nn.Module): 13 | def forward(self, tensors: Sequence[torch.Tensor]): 14 | return ImageList.from_tensors(tensors, 4).tensor 15 | 16 | func = torch.jit.trace( 17 | TensorToImageList(), ([torch.ones((3, 10, 10), dtype=torch.float32)],) 18 | ) 19 | ret = func([torch.ones((3, 15, 20), dtype=torch.float32)]) 20 | self.assertEqual(list(ret.shape), [1, 3, 16, 20], str(ret.shape)) 21 | 22 | func = torch.jit.trace( 23 | TensorToImageList(), 24 | ( 25 | [ 26 | torch.ones((3, 16, 10), dtype=torch.float32), 27 | torch.ones((3, 13, 11), dtype=torch.float32), 28 | ], 29 | ), 30 | ) 31 | ret = func( 32 | [ 33 | torch.ones((3, 25, 20), dtype=torch.float32), 34 | torch.ones((3, 10, 10), dtype=torch.float32), 35 | ] 36 | ) 37 | # does not support calling with different #images 38 | self.assertEqual(list(ret.shape), [2, 3, 28, 20], str(ret.shape)) 39 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/tests/structures/test_instances.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | import unittest 3 | import torch 4 | 5 | from detectron2.structures import Instances 6 | 7 | 8 | class TestInstancesIndexing(unittest.TestCase): 9 | def test_int_indexing(self): 10 | attr1 = torch.tensor([[0.0, 0.0, 1.0], [0.0, 0.0, 0.5], [0.0, 0.0, 1.0], [0.0, 0.5, 0.5]]) 11 | attr2 = torch.tensor([0.1, 0.2, 0.3, 0.4]) 12 | instances = Instances((100, 100)) 13 | instances.attr1 = attr1 14 | instances.attr2 = attr2 15 | for i in range(-len(instances), len(instances)): 16 | inst = instances[i] 17 | self.assertEqual((inst.attr1 == attr1[i]).all(), True) 18 | self.assertEqual((inst.attr2 == attr2[i]).all(), True) 19 | 20 | self.assertRaises(IndexError, lambda: instances[len(instances)]) 21 | self.assertRaises(IndexError, lambda: instances[-len(instances) - 1]) 22 | 23 | 24 | if __name__ == "__main__": 25 | unittest.main() 26 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/tests/test_model_zoo.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | import logging 3 | import unittest 4 | 5 | from detectron2 import model_zoo 6 | from detectron2.modeling import FPN, GeneralizedRCNN 7 | 8 | logger = logging.getLogger(__name__) 9 | 10 | 11 | class TestModelZoo(unittest.TestCase): 12 | def test_get_returns_model(self): 13 | model = model_zoo.get("Misc/scratch_mask_rcnn_R_50_FPN_3x_gn.yaml", trained=False) 14 | self.assertIsInstance(model, GeneralizedRCNN) 15 | self.assertIsInstance(model.backbone, FPN) 16 | 17 | def test_get_invalid_model(self): 18 | self.assertRaises(RuntimeError, model_zoo.get, "Invalid/config.yaml") 19 | 20 | def test_get_url(self): 21 | url = model_zoo.get_checkpoint_url("Misc/scratch_mask_rcnn_R_50_FPN_3x_gn.yaml") 22 | self.assertEqual( 23 | url, 24 | "https://dl.fbaipublicfiles.com/detectron2/Misc/scratch_mask_rcnn_R_50_FPN_3x_gn/138602908/model_final_01ca85.pkl", # noqa 25 | ) 26 | 27 | 28 | if __name__ == "__main__": 29 | unittest.main() 30 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/tools/README.md: -------------------------------------------------------------------------------- 1 | 2 | This directory contains a few scripts that use detectron2. 3 | 4 | 5 | * `train_net.py` 6 | 7 | An example training script that's made to train builtin models of detectron2. 8 | 9 | For usage, see [GETTING_STARTED.md](../GETTING_STARTED.md). 10 | 11 | * `plain_train_net.py` 12 | 13 | Similar to `train_net.py`, but implements a training loop instead of using `Trainer`. 14 | This script includes fewer features but it may be more friendly to hackers. 15 | 16 | * `benchmark.py` 17 | 18 | Benchmark the training speed, inference speed or data loading speed of a given config. 19 | 20 | Usage: 21 | ``` 22 | python benchmark.py --config-file config.yaml --task train/eval/data [optional DDP flags] 23 | ``` 24 | 25 | * `visualize_json_results.py` 26 | 27 | Visualize the json instance detection/segmentation results dumped by `COCOEvalutor` or `LVISEvaluator` 28 | 29 | Usage: 30 | ``` 31 | python visualize_json_results.py --input x.json --output dir/ --dataset coco_2017_val 32 | ``` 33 | If not using a builtin dataset, you'll need your own script or modify this script. 34 | 35 | * `visualize_data.py` 36 | 37 | Visualize ground truth raw annotations or training data (after preprocessing/augmentations). 38 | 39 | Usage: 40 | ``` 41 | python visualize_data.py --config-file config.yaml --source annotation/dataloader --output-dir dir/ [--show] 42 | ``` 43 | 44 | NOTE: the script does not stop by itself when using `--source dataloader` because a training 45 | dataloader is usually infinite. 46 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/tools/deploy/README.md: -------------------------------------------------------------------------------- 1 | 2 | This directory contains: 3 | 4 | 1. A script that converts a detectron2 model to caffe2 format. 5 | 6 | 2. An example that loads a Mask R-CNN model in caffe2 format and runs inference. 7 | 8 | See [tutorial](https://detectron2.readthedocs.io/tutorials/deployment.html) 9 | for their usage. 10 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/tools/inference.sh: -------------------------------------------------------------------------------- 1 | python finetune_net.py \ 2 | --num-gpus 1 \ 3 | --config-file ../configs/Misc/parsing_inference.yaml \ 4 | --eval-only MODEL.WEIGHTS ./model_final.pth TEST.AUG.ENABLED False 5 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/detectron2/tools/run.sh: -------------------------------------------------------------------------------- 1 | python finetune_net.py \ 2 | --config-file ../configs/Misc/parsing_finetune_cihp+vip.yaml \ 3 | --num-gpus 8 4 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/global_local_parsing/make_id_list.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | DATASET = 'VIP' # DATASET: MHPv2 or CIHP or VIP 4 | TYPE = 'crop_pic' # crop_pic or DemoDataset 5 | IMG_DIR = '../demo/cropped_img/crop_pic' 6 | SAVE_DIR = '../demo/cropped_img' 7 | 8 | if not os.path.exists(SAVE_DIR): 9 | os.makedirs(SAVE_DIR) 10 | 11 | with open(os.path.join(SAVE_DIR, TYPE + '.txt'), "w") as f: 12 | for img_name in os.listdir(IMG_DIR): 13 | f.write(img_name[:-4] + '\n') 14 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/scripts/make_coco_style_annotation.sh: -------------------------------------------------------------------------------- 1 | python ./coco_style_annotation_creator/human_to_coco.py \ 2 | --dataset 'CIHP' \ 3 | --json_save_dir './data/CIHP/annotations' \ 4 | --train_img_dir './data/CIHP/Training/Images' \ 5 | --train_anno_dir './data/CIHP/Training/Human_ids' \ 6 | --val_img_dir './data/CIHP/Validation/Images' \ 7 | --val_anno_dir './data/CIHP/Validation/Human_ids' 8 | 9 | 10 | python ./coco_style_annotation_creator/test_human2coco_format.py \ 11 | --dataset 'CIHP' \ 12 | --json_save_dir './data/CIHP/annotations' \ 13 | --test_img_dir './data/CIHP/Testing/Images' 14 | 15 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/scripts/make_crop.sh: -------------------------------------------------------------------------------- 1 | python make_crop_and_mask_w_mask_nms.py \ 2 | --img_dir './data/CIHP/Testing/Images' \ 3 | --save_dir './data/CIHP/' \ 4 | --img_list './data/CIHP/annotations/CIHP_val.json' \ 5 | --det_res './data/CIHP/detectron2_prediction/inference/instances_predictions.pth' 6 | 7 | -------------------------------------------------------------------------------- /preprocess/humanparsing/mhp_extension/scripts/parsing_fusion.sh: -------------------------------------------------------------------------------- 1 | python logits_fusion.py \ 2 | --test_json_path ./data/CIHP/crop.json \ 3 | --global_output_dir ./data/CIHP/global_pic_parsing \ 4 | --msrcnn_output_dir ./data/CIHP/crop_pic_parsing \ 5 | --gt_output_dir ./data/CIHP/crop_pic_parsing \ 6 | --save_dir ./data/CIHP/mhp_fusion_parsing 7 | -------------------------------------------------------------------------------- /preprocess/humanparsing/modules/__init__.py: -------------------------------------------------------------------------------- 1 | from .bn import ABN, InPlaceABN, InPlaceABNSync 2 | from .functions import ACT_RELU, ACT_LEAKY_RELU, ACT_ELU, ACT_NONE 3 | from .misc import GlobalAvgPool2d, SingleGPU 4 | from .residual import IdentityResidualBlock 5 | from .dense import DenseModule 6 | -------------------------------------------------------------------------------- /preprocess/humanparsing/modules/dense.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict 2 | 3 | import torch 4 | import torch.nn as nn 5 | 6 | from .bn import ABN 7 | 8 | 9 | class DenseModule(nn.Module): 10 | def __init__(self, in_channels, growth, layers, bottleneck_factor=4, norm_act=ABN, dilation=1): 11 | super(DenseModule, self).__init__() 12 | self.in_channels = in_channels 13 | self.growth = growth 14 | self.layers = layers 15 | 16 | self.convs1 = nn.ModuleList() 17 | self.convs3 = nn.ModuleList() 18 | for i in range(self.layers): 19 | self.convs1.append(nn.Sequential(OrderedDict([ 20 | ("bn", norm_act(in_channels)), 21 | ("conv", nn.Conv2d(in_channels, self.growth * bottleneck_factor, 1, bias=False)) 22 | ]))) 23 | self.convs3.append(nn.Sequential(OrderedDict([ 24 | ("bn", norm_act(self.growth * bottleneck_factor)), 25 | ("conv", nn.Conv2d(self.growth * bottleneck_factor, self.growth, 3, padding=dilation, bias=False, 26 | dilation=dilation)) 27 | ]))) 28 | in_channels += self.growth 29 | 30 | @property 31 | def out_channels(self): 32 | return self.in_channels + self.growth * self.layers 33 | 34 | def forward(self, x): 35 | inputs = [x] 36 | for i in range(self.layers): 37 | x = torch.cat(inputs, dim=1) 38 | x = self.convs1[i](x) 39 | x = self.convs3[i](x) 40 | inputs += [x] 41 | 42 | return torch.cat(inputs, dim=1) 43 | -------------------------------------------------------------------------------- /preprocess/humanparsing/modules/misc.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import torch 3 | import torch.distributed as dist 4 | 5 | class GlobalAvgPool2d(nn.Module): 6 | def __init__(self): 7 | """Global average pooling over the input's spatial dimensions""" 8 | super(GlobalAvgPool2d, self).__init__() 9 | 10 | def forward(self, inputs): 11 | in_size = inputs.size() 12 | return inputs.view((in_size[0], in_size[1], -1)).mean(dim=2) 13 | 14 | class SingleGPU(nn.Module): 15 | def __init__(self, module): 16 | super(SingleGPU, self).__init__() 17 | self.module=module 18 | 19 | def forward(self, input): 20 | return self.module(input.cuda(non_blocking=True)) 21 | 22 | -------------------------------------------------------------------------------- /preprocess/humanparsing/modules/src/checks.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | // Define AT_CHECK for old version of ATen where the same function was called AT_ASSERT 6 | #ifndef AT_CHECK 7 | #define AT_CHECK AT_ASSERT 8 | #endif 9 | 10 | #define CHECK_CUDA(x) AT_CHECK((x).type().is_cuda(), #x " must be a CUDA tensor") 11 | #define CHECK_CPU(x) AT_CHECK(!(x).type().is_cuda(), #x " must be a CPU tensor") 12 | #define CHECK_CONTIGUOUS(x) AT_CHECK((x).is_contiguous(), #x " must be contiguous") 13 | 14 | #define CHECK_CUDA_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) 15 | #define CHECK_CPU_INPUT(x) CHECK_CPU(x); CHECK_CONTIGUOUS(x) -------------------------------------------------------------------------------- /preprocess/humanparsing/modules/src/utils/checks.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | // Define AT_CHECK for old version of ATen where the same function was called AT_ASSERT 6 | #ifndef AT_CHECK 7 | #define AT_CHECK AT_ASSERT 8 | #endif 9 | 10 | #define CHECK_CUDA(x) AT_CHECK((x).type().is_cuda(), #x " must be a CUDA tensor") 11 | #define CHECK_CPU(x) AT_CHECK(!(x).type().is_cuda(), #x " must be a CPU tensor") 12 | #define CHECK_CONTIGUOUS(x) AT_CHECK((x).is_contiguous(), #x " must be contiguous") 13 | 14 | #define CHECK_CUDA_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) 15 | #define CHECK_CPU_INPUT(x) CHECK_CPU(x); CHECK_CONTIGUOUS(x) -------------------------------------------------------------------------------- /preprocess/humanparsing/modules/src/utils/common.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | /* 6 | * Functions to share code between CPU and GPU 7 | */ 8 | 9 | #ifdef __CUDACC__ 10 | // CUDA versions 11 | 12 | #define HOST_DEVICE __host__ __device__ 13 | #define INLINE_HOST_DEVICE __host__ __device__ inline 14 | #define FLOOR(x) floor(x) 15 | 16 | #if __CUDA_ARCH__ >= 600 17 | // Recent compute capabilities have block-level atomicAdd for all data types, so we use that 18 | #define ACCUM(x,y) atomicAdd_block(&(x),(y)) 19 | #else 20 | // Older architectures don't have block-level atomicAdd, nor atomicAdd for doubles, so we defer to atomicAdd for float 21 | // and use the known atomicCAS-based implementation for double 22 | template 23 | __device__ inline data_t atomic_add(data_t *address, data_t val) { 24 | return atomicAdd(address, val); 25 | } 26 | 27 | template<> 28 | __device__ inline double atomic_add(double *address, double val) { 29 | unsigned long long int* address_as_ull = (unsigned long long int*)address; 30 | unsigned long long int old = *address_as_ull, assumed; 31 | do { 32 | assumed = old; 33 | old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); 34 | } while (assumed != old); 35 | return __longlong_as_double(old); 36 | } 37 | 38 | #define ACCUM(x,y) atomic_add(&(x),(y)) 39 | #endif // #if __CUDA_ARCH__ >= 600 40 | 41 | #else 42 | // CPU versions 43 | 44 | #define HOST_DEVICE 45 | #define INLINE_HOST_DEVICE inline 46 | #define FLOOR(x) std::floor(x) 47 | #define ACCUM(x,y) (x) += (y) 48 | 49 | #endif // #ifdef __CUDACC__ -------------------------------------------------------------------------------- /preprocess/humanparsing/networks/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from networks.AugmentCE2P import resnet101 3 | 4 | __factory = { 5 | 'resnet101': resnet101, 6 | } 7 | 8 | 9 | def init_model(name, *args, **kwargs): 10 | if name not in __factory.keys(): 11 | raise KeyError("Unknown model arch: {}".format(name)) 12 | return __factory[name](*args, **kwargs) -------------------------------------------------------------------------------- /preprocess/humanparsing/run_parsing.py: -------------------------------------------------------------------------------- 1 | import pdb 2 | from pathlib import Path 3 | import sys 4 | import os 5 | import onnxruntime as ort 6 | PROJECT_ROOT = Path(__file__).absolute().parents[0].absolute() 7 | sys.path.insert(0, str(PROJECT_ROOT)) 8 | from parsing_api import onnx_inference 9 | import torch 10 | 11 | 12 | class Parsing: 13 | def __init__(self, gpu_id: int): 14 | self.gpu_id = gpu_id 15 | torch.cuda.set_device(gpu_id) 16 | session_options = ort.SessionOptions() 17 | session_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL 18 | session_options.execution_mode = ort.ExecutionMode.ORT_SEQUENTIAL 19 | session_options.add_session_config_entry('gpu_id', str(gpu_id)) 20 | self.session = ort.InferenceSession(os.path.join(Path(__file__).absolute().parents[2].absolute(), 'checkpoints/humanparsing/parsing_atr.onnx'), 21 | sess_options=session_options, providers=['CPUExecutionProvider']) 22 | self.lip_session = ort.InferenceSession(os.path.join(Path(__file__).absolute().parents[2].absolute(), 'checkpoints/humanparsing/parsing_lip.onnx'), 23 | sess_options=session_options, providers=['CPUExecutionProvider']) 24 | 25 | 26 | def __call__(self, input_image): 27 | torch.cuda.set_device(self.gpu_id) 28 | parsed_image, face_mask = onnx_inference(self.session, self.lip_session, input_image) 29 | return parsed_image, face_mask 30 | -------------------------------------------------------------------------------- /preprocess/humanparsing/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/preprocess/humanparsing/utils/__init__.py -------------------------------------------------------------------------------- /preprocess/humanparsing/utils/consistency_loss.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- encoding: utf-8 -*- 3 | 4 | """ 5 | @Author : Peike Li 6 | @Contact : peike.li@yahoo.com 7 | @File : kl_loss.py 8 | @Time : 7/23/19 4:02 PM 9 | @Desc : 10 | @License : This source code is licensed under the license found in the 11 | LICENSE file in the root directory of this source tree. 12 | """ 13 | import torch 14 | import torch.nn.functional as F 15 | from torch import nn 16 | from datasets.target_generation import generate_edge_tensor 17 | 18 | 19 | class ConsistencyLoss(nn.Module): 20 | def __init__(self, ignore_index=255): 21 | super(ConsistencyLoss, self).__init__() 22 | self.ignore_index=ignore_index 23 | 24 | def forward(self, parsing, edge, label): 25 | parsing_pre = torch.argmax(parsing, dim=1) 26 | parsing_pre[label==self.ignore_index]=self.ignore_index 27 | generated_edge = generate_edge_tensor(parsing_pre) 28 | edge_pre = torch.argmax(edge, dim=1) 29 | v_generate_edge = generated_edge[label!=255] 30 | v_edge_pre = edge_pre[label!=255] 31 | v_edge_pre = v_edge_pre.type(torch.cuda.FloatTensor) 32 | positive_union = (v_generate_edge==1)&(v_edge_pre==1) # only the positive values count 33 | return F.smooth_l1_loss(v_generate_edge[positive_union].squeeze(0), v_edge_pre[positive_union].squeeze(0)) 34 | -------------------------------------------------------------------------------- /preprocess/humanparsing/utils/kl_loss.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- encoding: utf-8 -*- 3 | 4 | """ 5 | @Author : Peike Li 6 | @Contact : peike.li@yahoo.com 7 | @File : kl_loss.py 8 | @Time : 7/23/19 4:02 PM 9 | @Desc : 10 | @License : This source code is licensed under the license found in the 11 | LICENSE file in the root directory of this source tree. 12 | """ 13 | import torch 14 | import torch.nn.functional as F 15 | from torch import nn 16 | 17 | 18 | def flatten_probas(input, target, labels, ignore=255): 19 | """ 20 | Flattens predictions in the batch. 21 | """ 22 | B, C, H, W = input.size() 23 | input = input.permute(0, 2, 3, 1).contiguous().view(-1, C) # B * H * W, C = P, C 24 | target = target.permute(0, 2, 3, 1).contiguous().view(-1, C) # B * H * W, C = P, C 25 | labels = labels.view(-1) 26 | if ignore is None: 27 | return input, target 28 | valid = (labels != ignore) 29 | vinput = input[valid.nonzero().squeeze()] 30 | vtarget = target[valid.nonzero().squeeze()] 31 | return vinput, vtarget 32 | 33 | 34 | class KLDivergenceLoss(nn.Module): 35 | def __init__(self, ignore_index=255, T=1): 36 | super(KLDivergenceLoss, self).__init__() 37 | self.ignore_index=ignore_index 38 | self.T = T 39 | 40 | def forward(self, input, target, label): 41 | log_input_prob = F.log_softmax(input / self.T, dim=1) 42 | target_porb = F.softmax(target / self.T, dim=1) 43 | loss = F.kl_div(*flatten_probas(log_input_prob, target_porb, label, ignore=self.ignore_index)) 44 | return self.T*self.T*loss # balanced 45 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy==1.24.4 2 | scipy==1.10.1 3 | scikit-image==0.21.0 4 | opencv-python==4.7.0.72 5 | pillow==9.4.0 6 | diffusers==0.24.0 7 | transformers==4.36.2 8 | accelerate==0.26.1 9 | matplotlib==3.7.4 10 | tqdm==4.64.1 11 | gradio==4.16.0 12 | config==0.5.1 13 | einops==0.7.0 14 | onnxruntime==1.16.2 -------------------------------------------------------------------------------- /run/examples/garment/00055_00.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/garment/00055_00.jpg -------------------------------------------------------------------------------- /run/examples/garment/00126_00.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/garment/00126_00.jpg -------------------------------------------------------------------------------- /run/examples/garment/00151_00.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/garment/00151_00.jpg -------------------------------------------------------------------------------- /run/examples/garment/00470_00.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/garment/00470_00.jpg -------------------------------------------------------------------------------- /run/examples/garment/02015_00.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/garment/02015_00.jpg -------------------------------------------------------------------------------- /run/examples/garment/02305_00.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/garment/02305_00.jpg -------------------------------------------------------------------------------- /run/examples/garment/03032_00.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/garment/03032_00.jpg -------------------------------------------------------------------------------- /run/examples/garment/03244_00.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/garment/03244_00.jpg -------------------------------------------------------------------------------- /run/examples/garment/04825_00.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/garment/04825_00.jpg -------------------------------------------------------------------------------- /run/examples/garment/048554_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/garment/048554_1.jpg -------------------------------------------------------------------------------- /run/examples/garment/048769_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/garment/048769_1.jpg -------------------------------------------------------------------------------- /run/examples/garment/049805_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/garment/049805_1.jpg -------------------------------------------------------------------------------- /run/examples/garment/049920_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/garment/049920_1.jpg -------------------------------------------------------------------------------- /run/examples/garment/049940_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/garment/049940_1.jpg -------------------------------------------------------------------------------- /run/examples/garment/049947_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/garment/049947_1.jpg -------------------------------------------------------------------------------- /run/examples/garment/049949_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/garment/049949_1.jpg -------------------------------------------------------------------------------- /run/examples/garment/049965_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/garment/049965_1.jpg -------------------------------------------------------------------------------- /run/examples/garment/050002_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/garment/050002_1.jpg -------------------------------------------------------------------------------- /run/examples/garment/050105_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/garment/050105_1.jpg -------------------------------------------------------------------------------- /run/examples/garment/050181_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/garment/050181_1.jpg -------------------------------------------------------------------------------- /run/examples/garment/050191_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/garment/050191_1.jpg -------------------------------------------------------------------------------- /run/examples/garment/051412_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/garment/051412_1.jpg -------------------------------------------------------------------------------- /run/examples/garment/051473_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/garment/051473_1.jpg -------------------------------------------------------------------------------- /run/examples/garment/051515_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/garment/051515_1.jpg -------------------------------------------------------------------------------- /run/examples/garment/051517_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/garment/051517_1.jpg -------------------------------------------------------------------------------- /run/examples/garment/051827_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/garment/051827_1.jpg -------------------------------------------------------------------------------- /run/examples/garment/051946_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/garment/051946_1.jpg -------------------------------------------------------------------------------- /run/examples/garment/051988_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/garment/051988_1.jpg -------------------------------------------------------------------------------- /run/examples/garment/051991_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/garment/051991_1.jpg -------------------------------------------------------------------------------- /run/examples/garment/051998_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/garment/051998_1.jpg -------------------------------------------------------------------------------- /run/examples/garment/052234_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/garment/052234_1.jpg -------------------------------------------------------------------------------- /run/examples/garment/053290_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/garment/053290_1.jpg -------------------------------------------------------------------------------- /run/examples/garment/053319_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/garment/053319_1.jpg -------------------------------------------------------------------------------- /run/examples/garment/053742_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/garment/053742_1.jpg -------------------------------------------------------------------------------- /run/examples/garment/053744_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/garment/053744_1.jpg -------------------------------------------------------------------------------- /run/examples/garment/053786_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/garment/053786_1.jpg -------------------------------------------------------------------------------- /run/examples/garment/053790_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/garment/053790_1.jpg -------------------------------------------------------------------------------- /run/examples/garment/06123_00.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/garment/06123_00.jpg -------------------------------------------------------------------------------- /run/examples/garment/07382_00.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/garment/07382_00.jpg -------------------------------------------------------------------------------- /run/examples/garment/07764_00.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/garment/07764_00.jpg -------------------------------------------------------------------------------- /run/examples/garment/10297_00.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/garment/10297_00.jpg -------------------------------------------------------------------------------- /run/examples/garment/12562_00.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/garment/12562_00.jpg -------------------------------------------------------------------------------- /run/examples/model/01008_00.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/model/01008_00.jpg -------------------------------------------------------------------------------- /run/examples/model/01861_00.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/model/01861_00.jpg -------------------------------------------------------------------------------- /run/examples/model/02849_00.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/model/02849_00.jpg -------------------------------------------------------------------------------- /run/examples/model/049205_0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/model/049205_0.jpg -------------------------------------------------------------------------------- /run/examples/model/049447_0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/model/049447_0.jpg -------------------------------------------------------------------------------- /run/examples/model/049713_0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/model/049713_0.jpg -------------------------------------------------------------------------------- /run/examples/model/051482_0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/model/051482_0.jpg -------------------------------------------------------------------------------- /run/examples/model/051918_0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/model/051918_0.jpg -------------------------------------------------------------------------------- /run/examples/model/051962_0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/model/051962_0.jpg -------------------------------------------------------------------------------- /run/examples/model/052472_0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/model/052472_0.jpg -------------------------------------------------------------------------------- /run/examples/model/052767_0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/model/052767_0.jpg -------------------------------------------------------------------------------- /run/examples/model/052964_0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/model/052964_0.jpg -------------------------------------------------------------------------------- /run/examples/model/053228_0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/model/053228_0.jpg -------------------------------------------------------------------------------- /run/examples/model/053514_0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/model/053514_0.jpg -------------------------------------------------------------------------------- /run/examples/model/053700_0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/model/053700_0.jpg -------------------------------------------------------------------------------- /run/examples/model/05997_00.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/model/05997_00.jpg -------------------------------------------------------------------------------- /run/examples/model/07966_00.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/model/07966_00.jpg -------------------------------------------------------------------------------- /run/examples/model/09597_00.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/model/09597_00.jpg -------------------------------------------------------------------------------- /run/examples/model/14627_00.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/model/14627_00.jpg -------------------------------------------------------------------------------- /run/examples/model/model_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/model/model_1.png -------------------------------------------------------------------------------- /run/examples/model/model_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/model/model_2.png -------------------------------------------------------------------------------- /run/examples/model/model_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/model/model_3.png -------------------------------------------------------------------------------- /run/examples/model/model_4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/model/model_4.png -------------------------------------------------------------------------------- /run/examples/model/model_5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/model/model_5.png -------------------------------------------------------------------------------- /run/examples/model/model_6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/model/model_6.png -------------------------------------------------------------------------------- /run/examples/model/model_7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/model/model_7.png -------------------------------------------------------------------------------- /run/examples/model/model_8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/model/model_8.png -------------------------------------------------------------------------------- /run/examples/model/model_9.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/examples/model/model_9.png -------------------------------------------------------------------------------- /run/images_output/mask.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/images_output/mask.jpg -------------------------------------------------------------------------------- /run/images_output/out_hd_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/images_output/out_hd_0.png -------------------------------------------------------------------------------- /run/images_output/out_hd_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/images_output/out_hd_1.png -------------------------------------------------------------------------------- /run/images_output/out_hd_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/images_output/out_hd_2.png -------------------------------------------------------------------------------- /run/images_output/out_hd_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/levihsu/OOTDiffusion/13ef0faba266cdde9febc8ad39be2395bbb89d9c/run/images_output/out_hd_3.png --------------------------------------------------------------------------------