├── README.md ├── data ├── myloader.py └── rasampler.py ├── datasets.py ├── engine.py ├── mlp_based ├── hire_mlp.py └── wave_mlp.py ├── mmcv-1.3.0 ├── CONTRIBUTING.md ├── Dockerfile ├── Jenkinsfile ├── LICENSE ├── MANIFEST.in ├── README.md ├── README_zh-CN.md ├── docs │ ├── Makefile │ ├── _static │ │ ├── flow_img2toimg1.png │ │ ├── flow_raw_images.png │ │ ├── flow_visualization.png │ │ ├── flow_warp.png │ │ ├── flow_warp_diff.png │ │ ├── parallel_progress.gif │ │ └── progress.gif │ ├── api.rst │ ├── build.md │ ├── cnn.md │ ├── conf.py │ ├── image.md │ ├── index.rst │ ├── io.md │ ├── make.bat │ ├── mmcv-logo.png │ ├── onnx.md │ ├── onnxruntime_op.md │ ├── ops.md │ ├── readme.md │ ├── registry.md │ ├── runner.md │ ├── tensorrt_plugin.md │ ├── trouble_shooting.md │ ├── utils.md │ ├── video.md │ └── visualization.md ├── examples │ ├── config_cifar10.py │ ├── dist_train_cifar10.sh │ ├── resnet_cifar.py │ └── train_cifar10.py ├── mmcv │ ├── __init__.py │ ├── arraymisc │ │ ├── __init__.py │ │ └── quantization.py │ ├── cnn │ │ ├── __init__.py │ │ ├── alexnet.py │ │ ├── bricks │ │ │ ├── __init__.py │ │ │ ├── activation.py │ │ │ ├── context_block.py │ │ │ ├── conv.py │ │ │ ├── conv2d_adaptive_padding.py │ │ │ ├── conv_module.py │ │ │ ├── conv_ws.py │ │ │ ├── depthwise_separable_conv_module.py │ │ │ ├── generalized_attention.py │ │ │ ├── hsigmoid.py │ │ │ ├── hswish.py │ │ │ ├── non_local.py │ │ │ ├── norm.py │ │ │ ├── padding.py │ │ │ ├── plugin.py │ │ │ ├── registry.py │ │ │ ├── scale.py │ │ │ ├── swish.py │ │ │ ├── transformer.py │ │ │ ├── upsample.py │ │ │ └── wrappers.py │ │ ├── resnet.py │ │ ├── utils │ │ │ ├── __init__.py │ │ │ ├── flops_counter.py │ │ │ ├── fuse_conv_bn.py │ │ │ └── weight_init.py │ │ └── vgg.py │ ├── fileio │ │ ├── __init__.py │ │ ├── file_client.py │ │ ├── handlers │ │ │ ├── __init__.py │ │ │ ├── base.py │ │ │ ├── json_handler.py │ │ │ ├── pickle_handler.py │ │ │ └── yaml_handler.py │ │ ├── io.py │ │ └── parse.py │ ├── image │ │ ├── __init__.py │ │ ├── colorspace.py │ │ ├── geometric.py │ │ ├── io.py │ │ ├── misc.py │ │ └── photometric.py │ ├── model_zoo │ │ ├── deprecated.json │ │ ├── mmcls.json │ │ └── open_mmlab.json │ ├── onnx │ │ ├── __init__.py │ │ ├── info.py │ │ ├── onnx_utils │ │ │ ├── __init__.py │ │ │ └── symbolic_helper.py │ │ ├── simplify │ │ │ ├── __init__.py │ │ │ ├── common.py │ │ │ └── core.py │ │ └── symbolic.py │ ├── ops │ │ ├── __init__.py │ │ ├── bbox.py │ │ ├── box_iou_rotated.py │ │ ├── carafe.py │ │ ├── cc_attention.py │ │ ├── corner_pool.py │ │ ├── csrc │ │ │ ├── bbox_overlaps_cuda_kernel.cuh │ │ │ ├── box_iou_rotated_cuda.cuh │ │ │ ├── box_iou_rotated_utils.hpp │ │ │ ├── carafe_cuda_kernel.cuh │ │ │ ├── carafe_naive_cuda_kernel.cuh │ │ │ ├── cc_attention_cuda_kernel.cuh │ │ │ ├── common_cuda_helper.hpp │ │ │ ├── deform_conv_cuda_kernel.cuh │ │ │ ├── deform_roi_pool_cuda_kernel.cuh │ │ │ ├── masked_conv2d_cuda_kernel.cuh │ │ │ ├── modulated_deform_conv_cuda_kernel.cuh │ │ │ ├── nms_cuda_kernel.cuh │ │ │ ├── nms_rotated_cuda.cuh │ │ │ ├── onnxruntime │ │ │ │ ├── cpu │ │ │ │ │ ├── nms.cpp │ │ │ │ │ ├── onnxruntime_register.cpp │ │ │ │ │ ├── roi_align.cpp │ │ │ │ │ └── soft_nms.cpp │ │ │ │ ├── nms.h │ │ │ │ ├── onnxruntime_register.h │ │ │ │ ├── onnxruntime_session_options_config_keys.h │ │ │ │ ├── ort_mmcv_utils.h │ │ │ │ ├── roi_align.h │ │ │ │ └── soft_nms.h │ │ │ ├── parrots │ │ │ │ ├── bbox_overlaps.cpp │ │ │ │ ├── bbox_overlaps_cuda.cu │ │ │ │ ├── bbox_overlaps_parrots.cpp │ │ │ │ ├── bbox_overlaps_pytorch.h │ │ │ │ ├── box_iou_rotated.cpp │ │ │ │ ├── box_iou_rotated_cpu.cpp │ │ │ │ ├── box_iou_rotated_cuda.cu │ │ │ │ ├── box_iou_rotated_parrots.cpp │ │ │ │ ├── box_iou_rotated_pytorch.h │ │ │ │ ├── carafe.cpp │ │ │ │ ├── carafe_cuda.cu │ │ │ │ ├── carafe_naive.cpp │ │ │ │ ├── carafe_naive_cuda.cu │ │ │ │ ├── carafe_naive_parrots.cpp │ │ │ │ ├── carafe_naive_pytorch.h │ │ │ │ ├── carafe_parrots.cpp │ │ │ │ ├── carafe_pytorch.h │ │ │ │ ├── cc_attention.cpp │ │ │ │ ├── cc_attention_cuda.cu │ │ │ │ ├── cc_attention_parrots.cpp │ │ │ │ ├── cc_attention_pytorch.h │ │ │ │ ├── corner_pool.cpp │ │ │ │ ├── corner_pool_parrots.cpp │ │ │ │ ├── corner_pool_pytorch.h │ │ │ │ ├── deform_conv.cpp │ │ │ │ ├── deform_conv_cuda.cu │ │ │ │ ├── deform_conv_parrots.cpp │ │ │ │ ├── deform_conv_pytorch.h │ │ │ │ ├── deform_roi_pool.cpp │ │ │ │ ├── deform_roi_pool_cuda.cu │ │ │ │ ├── deform_roi_pool_parrots.cpp │ │ │ │ ├── deform_roi_pool_pytorch.h │ │ │ │ ├── focal_loss.cpp │ │ │ │ ├── focal_loss_cuda.cu │ │ │ │ ├── focal_loss_parrots.cpp │ │ │ │ ├── focal_loss_pytorch.h │ │ │ │ ├── masked_conv2d.cpp │ │ │ │ ├── masked_conv2d_cuda.cu │ │ │ │ ├── masked_conv2d_parrots.cpp │ │ │ │ ├── masked_conv2d_pytorch.h │ │ │ │ ├── modulated_deform_conv.cpp │ │ │ │ ├── modulated_deform_conv_cuda.cu │ │ │ │ ├── modulated_deform_conv_parrots.cpp │ │ │ │ ├── modulated_deform_conv_pytorch.h │ │ │ │ ├── nms.cpp │ │ │ │ ├── nms_cuda.cu │ │ │ │ ├── nms_parrots.cpp │ │ │ │ ├── nms_pytorch.h │ │ │ │ ├── nms_rotated.cpp │ │ │ │ ├── nms_rotated_cpu.cpp │ │ │ │ ├── nms_rotated_cuda.cu │ │ │ │ ├── psamask.cpp │ │ │ │ ├── psamask_cuda.cu │ │ │ │ ├── psamask_parrots.cpp │ │ │ │ ├── psamask_pytorch.h │ │ │ │ ├── roi_align.cpp │ │ │ │ ├── roi_align_cpu.cpp │ │ │ │ ├── roi_align_cuda.cu │ │ │ │ ├── roi_align_parrots.cpp │ │ │ │ ├── roi_align_pytorch.h │ │ │ │ ├── roi_pool.cpp │ │ │ │ ├── roi_pool_cuda.cu │ │ │ │ ├── roi_pool_parrots.cpp │ │ │ │ ├── roi_pool_pytorch.h │ │ │ │ ├── sync_bn.cpp │ │ │ │ ├── sync_bn_cuda.cu │ │ │ │ ├── sync_bn_parrots.cpp │ │ │ │ ├── sync_bn_pytorch.h │ │ │ │ ├── tin_shift.cpp │ │ │ │ ├── tin_shift_cuda.cu │ │ │ │ ├── tin_shift_parrots.cpp │ │ │ │ └── tin_shift_pytorch.h │ │ │ ├── parrots_cpp_helper.hpp │ │ │ ├── parrots_cuda_helper.hpp │ │ │ ├── parrots_cudawarpfunction.cuh │ │ │ ├── psamask_cuda_kernel.cuh │ │ │ ├── pytorch │ │ │ │ ├── bbox_overlaps.cpp │ │ │ │ ├── bbox_overlaps_cuda.cu │ │ │ │ ├── box_iou_rotated.cpp │ │ │ │ ├── box_iou_rotated_cpu.cpp │ │ │ │ ├── box_iou_rotated_cuda.cu │ │ │ │ ├── carafe.cpp │ │ │ │ ├── carafe_cuda.cu │ │ │ │ ├── carafe_naive.cpp │ │ │ │ ├── carafe_naive_cuda.cu │ │ │ │ ├── cc_attention.cpp │ │ │ │ ├── cc_attention_cuda.cu │ │ │ │ ├── corner_pool.cpp │ │ │ │ ├── deform_conv.cpp │ │ │ │ ├── deform_conv_cuda.cu │ │ │ │ ├── deform_roi_pool.cpp │ │ │ │ ├── deform_roi_pool_cuda.cu │ │ │ │ ├── focal_loss.cpp │ │ │ │ ├── focal_loss_cuda.cu │ │ │ │ ├── fused_bias_leakyrelu.cpp │ │ │ │ ├── fused_bias_leakyrelu_cuda.cu │ │ │ │ ├── info.cpp │ │ │ │ ├── masked_conv2d.cpp │ │ │ │ ├── masked_conv2d_cuda.cu │ │ │ │ ├── modulated_deform_conv.cpp │ │ │ │ ├── modulated_deform_conv_cuda.cu │ │ │ │ ├── nms.cpp │ │ │ │ ├── nms_cuda.cu │ │ │ │ ├── nms_rotated.cpp │ │ │ │ ├── nms_rotated_cpu.cpp │ │ │ │ ├── nms_rotated_cuda.cu │ │ │ │ ├── psamask.cpp │ │ │ │ ├── psamask_cuda.cu │ │ │ │ ├── pybind.cpp │ │ │ │ ├── roi_align.cpp │ │ │ │ ├── roi_align_cpu.cpp │ │ │ │ ├── roi_align_cuda.cu │ │ │ │ ├── roi_pool.cpp │ │ │ │ ├── roi_pool_cuda.cu │ │ │ │ ├── sync_bn.cpp │ │ │ │ ├── sync_bn_cuda.cu │ │ │ │ ├── tin_shift.cpp │ │ │ │ ├── tin_shift_cuda.cu │ │ │ │ ├── upfirdn2d.cpp │ │ │ │ └── upfirdn2d_kernel.cu │ │ │ ├── pytorch_cpp_helper.hpp │ │ │ ├── pytorch_cuda_helper.hpp │ │ │ ├── roi_align_cuda_kernel.cuh │ │ │ ├── roi_pool_cuda_kernel.cuh │ │ │ ├── sigmoid_focal_loss_cuda_kernel.cuh │ │ │ ├── softmax_focal_loss_cuda_kernel.cuh │ │ │ ├── sync_bn_cuda_kernel.cuh │ │ │ ├── tensorrt │ │ │ │ ├── plugins │ │ │ │ │ ├── trt_cuda_helper.cu │ │ │ │ │ ├── trt_deform_conv.cpp │ │ │ │ │ ├── trt_deform_conv_kernel.cu │ │ │ │ │ ├── trt_nms.cpp │ │ │ │ │ ├── trt_nms_kernel.cu │ │ │ │ │ ├── trt_plugin.cpp │ │ │ │ │ ├── trt_roi_align.cpp │ │ │ │ │ ├── trt_roi_align_kernel.cu │ │ │ │ │ ├── trt_scatternd.cpp │ │ │ │ │ └── trt_scatternd_kernel.cu │ │ │ │ ├── trt_cuda_helper.cuh │ │ │ │ ├── trt_deform_conv.hpp │ │ │ │ ├── trt_nms.hpp │ │ │ │ ├── trt_plugin.hpp │ │ │ │ ├── trt_plugin_helper.hpp │ │ │ │ ├── trt_roi_align.hpp │ │ │ │ ├── trt_scatternd.hpp │ │ │ │ └── trt_serialize.hpp │ │ │ └── tin_shift_cuda_kernel.cuh │ │ ├── deform_conv.py │ │ ├── deform_roi_pool.py │ │ ├── deprecated_wrappers.py │ │ ├── focal_loss.py │ │ ├── fused_bias_leakyrelu.py │ │ ├── info.py │ │ ├── masked_conv.py │ │ ├── merge_cells.py │ │ ├── modulated_deform_conv.py │ │ ├── nms.py │ │ ├── point_sample.py │ │ ├── psa_mask.py │ │ ├── roi_align.py │ │ ├── roi_pool.py │ │ ├── saconv.py │ │ ├── sync_bn.py │ │ ├── tin_shift.py │ │ └── upfirdn2d.py │ ├── parallel │ │ ├── __init__.py │ │ ├── _functions.py │ │ ├── collate.py │ │ ├── data_container.py │ │ ├── data_parallel.py │ │ ├── distributed.py │ │ ├── distributed_deprecated.py │ │ ├── registry.py │ │ ├── scatter_gather.py │ │ └── utils.py │ ├── runner │ │ ├── __init__.py │ │ ├── base_module.py │ │ ├── base_runner.py │ │ ├── builder.py │ │ ├── checkpoint.py │ │ ├── dist_utils.py │ │ ├── epoch_based_runner.py │ │ ├── fp16_utils.py │ │ ├── hooks │ │ │ ├── __init__.py │ │ │ ├── checkpoint.py │ │ │ ├── closure.py │ │ │ ├── ema.py │ │ │ ├── hook.py │ │ │ ├── iter_timer.py │ │ │ ├── logger │ │ │ │ ├── __init__.py │ │ │ │ ├── base.py │ │ │ │ ├── mlflow.py │ │ │ │ ├── pavi.py │ │ │ │ ├── tensorboard.py │ │ │ │ ├── text.py │ │ │ │ └── wandb.py │ │ │ ├── lr_updater.py │ │ │ ├── memory.py │ │ │ ├── momentum_updater.py │ │ │ ├── optimizer.py │ │ │ ├── sampler_seed.py │ │ │ └── sync_buffer.py │ │ ├── iter_based_runner.py │ │ ├── log_buffer.py │ │ ├── optimizer │ │ │ ├── __init__.py │ │ │ ├── builder.py │ │ │ └── default_constructor.py │ │ ├── priority.py │ │ └── utils.py │ ├── tensorrt │ │ ├── __init__.py │ │ ├── init_plugins.py │ │ └── tensorrt_utils.py │ ├── utils │ │ ├── __init__.py │ │ ├── config.py │ │ ├── env.py │ │ ├── ext_loader.py │ │ ├── logging.py │ │ ├── misc.py │ │ ├── parrots_jit.py │ │ ├── parrots_wrapper.py │ │ ├── path.py │ │ ├── progressbar.py │ │ ├── registry.py │ │ ├── testing.py │ │ ├── timer.py │ │ └── version_utils.py │ ├── version.py │ ├── video │ │ ├── __init__.py │ │ ├── io.py │ │ ├── optflow.py │ │ └── processing.py │ └── visualization │ │ ├── __init__.py │ │ ├── color.py │ │ ├── image.py │ │ └── optflow.py ├── requirements.txt ├── requirements │ ├── docs.txt │ ├── runtime.txt │ └── test.txt ├── setup.cfg ├── setup.py └── tests │ ├── data │ ├── batched_nms_data.pkl │ ├── color.jpg │ ├── color_exif.jpg │ ├── config │ │ ├── a.b.py │ │ ├── a.py │ │ ├── b.json │ │ ├── base.py │ │ ├── c.yaml │ │ ├── code.py │ │ ├── d.py │ │ ├── delete.py │ │ ├── e.py │ │ ├── f.py │ │ ├── g.py │ │ ├── h.py │ │ ├── i_base.py │ │ ├── i_child.py │ │ ├── l.py │ │ ├── l1.py │ │ ├── l2.yaml │ │ ├── l3.json │ │ ├── l4.py │ │ ├── m.py │ │ ├── n.py │ │ ├── o.json │ │ ├── p.yaml │ │ ├── q.py │ │ ├── r.py │ │ └── s.py │ ├── demo.lmdb │ │ ├── data.mdb │ │ └── lock.mdb │ ├── filelist.txt │ ├── for_ccattention │ │ ├── ccattention_input.bin │ │ └── ccattention_output.bin │ ├── for_psa_mask │ │ ├── psa_input.bin │ │ ├── psa_output_collect.bin │ │ └── psa_output_distribute.bin │ ├── for_scan │ │ ├── 1.json │ │ ├── 1.txt │ │ ├── 2.json │ │ ├── 2.txt │ │ ├── a.bin │ │ └── sub │ │ │ ├── 1.json │ │ │ └── 1.txt │ ├── gray_alpha.png │ ├── grayscale.jpg │ ├── grayscale_dim3.jpg │ ├── mapping.txt │ ├── model_zoo │ │ ├── deprecated.json │ │ ├── mmcv_home │ │ │ ├── open_mmlab.json │ │ │ ├── test.pth │ │ │ └── val.pth │ │ └── open_mmlab.json │ ├── optflow.flo │ ├── optflow_concat0.jpg │ ├── optflow_concat1.jpg │ ├── palette.gif │ ├── patches │ │ ├── 0.npy │ │ ├── 1.npy │ │ ├── 2.npy │ │ ├── 3.npy │ │ ├── 4.npy │ │ ├── pad0_0.npy │ │ ├── pad0_1.npy │ │ ├── pad0_2.npy │ │ ├── pad0_3.npy │ │ ├── pad0_4.npy │ │ ├── pad_0.npy │ │ ├── pad_1.npy │ │ ├── pad_2.npy │ │ ├── pad_3.npy │ │ ├── pad_4.npy │ │ ├── scale_0.npy │ │ ├── scale_1.npy │ │ ├── scale_2.npy │ │ ├── scale_3.npy │ │ └── scale_4.npy │ ├── test.mp4 │ └── uint16-5channel.tif │ ├── test_arraymisc.py │ ├── test_cnn │ ├── test_build_layers.py │ ├── test_context_block.py │ ├── test_conv2d_adaptive_padding.py │ ├── test_conv_module.py │ ├── test_depthwise_seperable_conv_module.py │ ├── test_flops_counter.py │ ├── test_fuse_conv_bn.py │ ├── test_generalized_attention.py │ ├── test_hsigmoid.py │ ├── test_hswish.py │ ├── test_non_local.py │ ├── test_scale.py │ ├── test_swish.py │ ├── test_weight_init.py │ └── test_wrappers.py │ ├── test_fileclient.py │ ├── test_fileio.py │ ├── test_image │ ├── test_colorspace.py │ ├── test_geometric.py │ ├── test_image_misc.py │ ├── test_io.py │ └── test_photometric.py │ ├── test_load_model_zoo.py │ ├── test_ops │ ├── test_bbox.py │ ├── test_box_iou_rotated.py │ ├── test_carafe.py │ ├── test_cc_attention.py │ ├── test_corner_pool.py │ ├── test_deform_conv.py │ ├── test_deform_roi_pool.py │ ├── test_focal_loss.py │ ├── test_fused_bias_leakyrelu.py │ ├── test_info.py │ ├── test_masked_conv2d.py │ ├── test_merge_cells.py │ ├── test_modulated_deform_conv.py │ ├── test_nms.py │ ├── test_nms_rotated.py │ ├── test_onnx.py │ ├── test_psa_mask.py │ ├── test_roi_align.py │ ├── test_roi_pool.py │ ├── test_saconv.py │ ├── test_syncbn.py │ ├── test_tensorrt.py │ ├── test_tin_shift.py │ └── test_upfirdn2d.py │ ├── test_parallel.py │ ├── test_runner │ ├── test_basemodule.py │ ├── test_checkpoint.py │ ├── test_dist_utils.py │ ├── test_fp16.py │ ├── test_hooks.py │ ├── test_optimizer.py │ ├── test_runner.py │ └── test_utils.py │ ├── test_utils │ ├── test_config.py │ ├── test_env.py │ ├── test_logging.py │ ├── test_misc.py │ ├── test_parrots_jit.py │ ├── test_path.py │ ├── test_progressbar.py │ ├── test_registry.py │ ├── test_testing.py │ ├── test_timer.py │ └── test_version_utils.py │ ├── test_video │ ├── test_optflow.py │ ├── test_processing.py │ └── test_reader.py │ └── test_visualization.py ├── mmseg-v0.11 ├── LICENSE ├── README.md ├── configs │ ├── _base_ │ │ ├── datasets │ │ │ ├── ade20k.py │ │ │ ├── chase_db1.py │ │ │ ├── cityscapes.py │ │ │ ├── cityscapes_769x769.py │ │ │ ├── drive.py │ │ │ ├── hrf.py │ │ │ ├── pascal_context.py │ │ │ ├── pascal_voc12.py │ │ │ ├── pascal_voc12_aug.py │ │ │ └── stare.py │ │ ├── default_runtime.py │ │ ├── models │ │ │ ├── ann_r50-d8.py │ │ │ ├── apcnet_r50-d8.py │ │ │ ├── ccnet_r50-d8.py │ │ │ ├── cgnet.py │ │ │ ├── danet_r50-d8.py │ │ │ ├── deeplabv3_r50-d8.py │ │ │ ├── deeplabv3_unet_s5-d16.py │ │ │ ├── deeplabv3plus_r50-d8.py │ │ │ ├── dmnet_r50-d8.py │ │ │ ├── dnl_r50-d8.py │ │ │ ├── emanet_r50-d8.py │ │ │ ├── encnet_r50-d8.py │ │ │ ├── fast_scnn.py │ │ │ ├── fcn_hr18.py │ │ │ ├── fcn_r50-d8.py │ │ │ ├── fcn_unet_s5-d16.py │ │ │ ├── fpn_r50.py │ │ │ ├── gcnet_r50-d8.py │ │ │ ├── lraspp_m-v3-d8.py │ │ │ ├── nonlocal_r50-d8.py │ │ │ ├── ocrnet_hr18.py │ │ │ ├── ocrnet_r50-d8.py │ │ │ ├── pointrend_r50.py │ │ │ ├── psanet_r50-d8.py │ │ │ ├── pspnet_r50-d8.py │ │ │ ├── pspnet_unet_s5-d16.py │ │ │ └── upernet_r50.py │ │ └── schedules │ │ │ ├── schedule_160k.py │ │ │ ├── schedule_20k.py │ │ │ ├── schedule_40k.py │ │ │ └── schedule_80k.py │ ├── sem_fpn │ │ └── hire_mlp_small_512x512_ade20k.py │ └── upernet │ │ └── hire_mlp_small_512x512_ade20k.py ├── mmseg │ ├── __init__.py │ ├── apis │ │ ├── __init__.py │ │ ├── inference.py │ │ ├── test.py │ │ └── train.py │ ├── core │ │ ├── __init__.py │ │ ├── evaluation │ │ │ ├── __init__.py │ │ │ ├── class_names.py │ │ │ ├── eval_hooks.py │ │ │ └── metrics.py │ │ ├── seg │ │ │ ├── __init__.py │ │ │ ├── builder.py │ │ │ └── sampler │ │ │ │ ├── __init__.py │ │ │ │ ├── base_pixel_sampler.py │ │ │ │ └── ohem_pixel_sampler.py │ │ └── utils │ │ │ ├── __init__.py │ │ │ └── misc.py │ ├── datasets │ │ ├── __init__.py │ │ ├── ade.py │ │ ├── builder.py │ │ ├── chase_db1.py │ │ ├── cityscapes.py │ │ ├── custom.py │ │ ├── dataset_wrappers.py │ │ ├── drive.py │ │ ├── hrf.py │ │ ├── pascal_context.py │ │ ├── pipelines │ │ │ ├── __init__.py │ │ │ ├── compose.py │ │ │ ├── formating.py │ │ │ ├── loading.py │ │ │ ├── test_time_aug.py │ │ │ └── transforms.py │ │ ├── stare.py │ │ └── voc.py │ ├── models │ │ ├── __init__.py │ │ ├── backbones │ │ │ ├── __init__.py │ │ │ ├── cgnet.py │ │ │ ├── fast_scnn.py │ │ │ ├── hire_mlp.py │ │ │ ├── hrnet.py │ │ │ ├── mobilenet_v2.py │ │ │ ├── mobilenet_v3.py │ │ │ ├── resnest.py │ │ │ ├── resnet.py │ │ │ ├── resnext.py │ │ │ └── unet.py │ │ ├── builder.py │ │ ├── decode_heads │ │ │ ├── __init__.py │ │ │ ├── ann_head.py │ │ │ ├── apc_head.py │ │ │ ├── aspp_head.py │ │ │ ├── cascade_decode_head.py │ │ │ ├── cc_head.py │ │ │ ├── da_head.py │ │ │ ├── decode_head.py │ │ │ ├── dm_head.py │ │ │ ├── dnl_head.py │ │ │ ├── ema_head.py │ │ │ ├── enc_head.py │ │ │ ├── fcn_head.py │ │ │ ├── fpn_head.py │ │ │ ├── gc_head.py │ │ │ ├── lraspp_head.py │ │ │ ├── nl_head.py │ │ │ ├── ocr_head.py │ │ │ ├── point_head.py │ │ │ ├── psa_head.py │ │ │ ├── psp_head.py │ │ │ ├── sep_aspp_head.py │ │ │ ├── sep_fcn_head.py │ │ │ └── uper_head.py │ │ ├── losses │ │ │ ├── __init__.py │ │ │ ├── accuracy.py │ │ │ ├── cross_entropy_loss.py │ │ │ ├── lovasz_loss.py │ │ │ └── utils.py │ │ ├── necks │ │ │ ├── __init__.py │ │ │ └── fpn.py │ │ ├── segmentors │ │ │ ├── __init__.py │ │ │ ├── base.py │ │ │ ├── cascade_encoder_decoder.py │ │ │ └── encoder_decoder.py │ │ └── utils │ │ │ ├── __init__.py │ │ │ ├── inverted_residual.py │ │ │ ├── make_divisible.py │ │ │ ├── res_layer.py │ │ │ ├── se_layer.py │ │ │ ├── self_attention_block.py │ │ │ └── up_conv_block.py │ ├── ops │ │ ├── __init__.py │ │ ├── encoding.py │ │ └── wrappers.py │ ├── utils │ │ ├── __init__.py │ │ ├── collect_env.py │ │ └── logger.py │ └── version.py ├── pytest.ini ├── requirements.txt ├── requirements │ ├── docs.txt │ ├── optional.txt │ ├── readthedocs.txt │ ├── runtime.txt │ └── tests.txt ├── resources │ ├── mmseg-logo.png │ └── seg_demo.gif ├── setup.cfg ├── setup.py ├── tests │ ├── data │ │ ├── color.jpg │ │ ├── gray.jpg │ │ ├── pseudo_dataset │ │ │ ├── gts │ │ │ │ ├── 00000_gt.png │ │ │ │ ├── 00001_gt.png │ │ │ │ ├── 00002_gt.png │ │ │ │ ├── 00003_gt.png │ │ │ │ └── 00004_gt.png │ │ │ ├── imgs │ │ │ │ ├── 00000_img.jpg │ │ │ │ ├── 00001_img.jpg │ │ │ │ ├── 00002_img.jpg │ │ │ │ ├── 00003_img.jpg │ │ │ │ └── 00004_img.jpg │ │ │ └── splits │ │ │ │ ├── train.txt │ │ │ │ └── val.txt │ │ └── seg.png │ ├── test_config.py │ ├── test_data │ │ ├── test_dataset.py │ │ ├── test_dataset_builder.py │ │ ├── test_loading.py │ │ ├── test_transform.py │ │ └── test_tta.py │ ├── test_eval_hook.py │ ├── test_inference.py │ ├── test_metrics.py │ ├── test_models │ │ ├── test_backbone.py │ │ ├── test_forward.py │ │ ├── test_heads.py │ │ ├── test_losses.py │ │ ├── test_necks.py │ │ ├── test_segmentor.py │ │ └── test_unet.py │ ├── test_sampler.py │ └── test_utils │ │ ├── test_inverted_residual_module.py │ │ ├── test_make_divisible.py │ │ └── test_se_layer.py └── tools │ ├── benchmark.py │ ├── convert_datasets │ ├── chase_db1.py │ ├── cityscapes.py │ ├── drive.py │ ├── hrf.py │ ├── pascal_context.py │ ├── stare.py │ └── voc_aug.py │ ├── dist_test.sh │ ├── dist_train.sh │ ├── get_flops.py │ ├── print_config.py │ ├── publish_model.py │ ├── pytorch2onnx.py │ ├── slurm_test.sh │ ├── slurm_train.sh │ ├── test.py │ └── train.py ├── models.py ├── samplers.py ├── timm ├── __init__.py ├── data │ ├── __init__.py │ ├── auto_augment.py │ ├── config.py │ ├── constants.py │ ├── dataset.py │ ├── dataset_factory.py │ ├── distributed_sampler.py │ ├── loader.py │ ├── mixup.py │ ├── parsers │ │ ├── __init__.py │ │ ├── class_map.py │ │ ├── constants.py │ │ ├── parser.py │ │ ├── parser_factory.py │ │ ├── parser_image_folder.py │ │ ├── parser_image_in_tar.py │ │ ├── parser_image_tar.py │ │ └── parser_tfds.py │ ├── random_erasing.py │ ├── real_labels.py │ ├── tf_preprocessing.py │ ├── transforms.py │ └── transforms_factory.py ├── loss │ ├── __init__.py │ ├── asymmetric_loss.py │ ├── cross_entropy.py │ └── jsd.py ├── models │ ├── __init__.py │ ├── byoanet.py │ ├── byobnet.py │ ├── cait.py │ ├── coat.py │ ├── convit.py │ ├── cspnet.py │ ├── densenet.py │ ├── dla.py │ ├── dpn.py │ ├── efficientnet.py │ ├── efficientnet_blocks.py │ ├── efficientnet_builder.py │ ├── factory.py │ ├── features.py │ ├── ghostnet.py │ ├── gluon_resnet.py │ ├── gluon_xception.py │ ├── hardcorenas.py │ ├── helpers.py │ ├── hrnet.py │ ├── hub.py │ ├── inception_resnet_v2.py │ ├── inception_v3.py │ ├── inception_v4.py │ ├── layers │ │ ├── __init__.py │ │ ├── activations.py │ │ ├── activations_jit.py │ │ ├── activations_me.py │ │ ├── adaptive_avgmax_pool.py │ │ ├── blur_pool.py │ │ ├── bottleneck_attn.py │ │ ├── cbam.py │ │ ├── classifier.py │ │ ├── cond_conv2d.py │ │ ├── config.py │ │ ├── conv2d_same.py │ │ ├── conv_bn_act.py │ │ ├── create_act.py │ │ ├── create_attn.py │ │ ├── create_conv2d.py │ │ ├── create_norm_act.py │ │ ├── drop.py │ │ ├── eca.py │ │ ├── evo_norm.py │ │ ├── gather_excite.py │ │ ├── global_context.py │ │ ├── halo_attn.py │ │ ├── helpers.py │ │ ├── inplace_abn.py │ │ ├── involution.py │ │ ├── lambda_layer.py │ │ ├── linear.py │ │ ├── median_pool.py │ │ ├── mixed_conv2d.py │ │ ├── mlp.py │ │ ├── non_local_attn.py │ │ ├── norm.py │ │ ├── norm_act.py │ │ ├── padding.py │ │ ├── patch_embed.py │ │ ├── pool2d_same.py │ │ ├── selective_kernel.py │ │ ├── separable_conv.py │ │ ├── space_to_depth.py │ │ ├── split_attn.py │ │ ├── split_batchnorm.py │ │ ├── squeeze_excite.py │ │ ├── std_conv.py │ │ ├── swin_attn.py │ │ ├── test_time_pool.py │ │ └── weight_init.py │ ├── levit.py │ ├── mlp_mixer.py │ ├── mobilenetv3.py │ ├── nasnet.py │ ├── nest.py │ ├── nfnet.py │ ├── pit.py │ ├── pnasnet.py │ ├── pruned │ │ ├── ecaresnet101d_pruned.txt │ │ ├── ecaresnet50d_pruned.txt │ │ ├── efficientnet_b1_pruned.txt │ │ ├── efficientnet_b2_pruned.txt │ │ └── efficientnet_b3_pruned.txt │ ├── registry.py │ ├── regnet.py │ ├── res2net.py │ ├── resnest.py │ ├── resnet.py │ ├── resnetv2.py │ ├── rexnet.py │ ├── selecsls.py │ ├── senet.py │ ├── sknet.py │ ├── swin_transformer.py │ ├── tnt.py │ ├── tresnet.py │ ├── twins.py │ ├── vgg.py │ ├── visformer.py │ ├── vision_transformer.py │ ├── vision_transformer_hybrid.py │ ├── vovnet.py │ ├── xception.py │ ├── xception_aligned.py │ └── xcit.py ├── optim │ ├── __init__.py │ ├── adabelief.py │ ├── adafactor.py │ ├── adahessian.py │ ├── adamp.py │ ├── adamw.py │ ├── lamb.py │ ├── lars.py │ ├── lookahead.py │ ├── madgrad.py │ ├── nadam.py │ ├── nvnovograd.py │ ├── optim_factory.py │ ├── radam.py │ ├── rmsprop_tf.py │ └── sgdp.py ├── scheduler │ ├── __init__.py │ ├── cosine_lr.py │ ├── multistep_lr.py │ ├── plateau_lr.py │ ├── scheduler.py │ ├── scheduler_factory.py │ ├── step_lr.py │ └── tanh_lr.py ├── utils │ ├── __init__.py │ ├── agc.py │ ├── checkpoint_saver.py │ ├── clip_grad.py │ ├── cuda.py │ ├── distributed.py │ ├── jit.py │ ├── log.py │ ├── metrics.py │ ├── misc.py │ ├── model.py │ ├── model_ema.py │ ├── random.py │ └── summary.py └── version.py ├── train.py ├── train_wave.py └── utils.py /mmcv-1.3.0/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.7 2 | 3 | WORKDIR /mmcv 4 | 5 | COPY . /mmcv 6 | 7 | RUN pip install -e . 8 | -------------------------------------------------------------------------------- /mmcv-1.3.0/MANIFEST.in: -------------------------------------------------------------------------------- 1 | include requirements/runtime.txt 2 | include mmcv/model_zoo/open_mmlab.json mmcv/model_zoo/deprecated.json mmcv/model_zoo/mmcls.json 3 | include mmcv/ops/csrc/*.cuh mmcv/ops/csrc/*.hpp 4 | include mmcv/ops/csrc/pytorch/*.cu mmcv/ops/csrc/pytorch/*.cpp 5 | include mmcv/ops/csrc/parrots/*.cu mmcv/ops/csrc/parrots/*.cpp 6 | -------------------------------------------------------------------------------- /mmcv-1.3.0/docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | SOURCEDIR = . 8 | BUILDDIR = _build 9 | 10 | # Put it first so that "make" without argument is like "make help". 11 | help: 12 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 13 | 14 | .PHONY: help Makefile 15 | 16 | # Catch-all target: route all unknown targets to Sphinx using the new 17 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 18 | %: Makefile 19 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 20 | -------------------------------------------------------------------------------- /mmcv-1.3.0/docs/_static/flow_img2toimg1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/docs/_static/flow_img2toimg1.png -------------------------------------------------------------------------------- /mmcv-1.3.0/docs/_static/flow_raw_images.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/docs/_static/flow_raw_images.png -------------------------------------------------------------------------------- /mmcv-1.3.0/docs/_static/flow_visualization.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/docs/_static/flow_visualization.png -------------------------------------------------------------------------------- /mmcv-1.3.0/docs/_static/flow_warp.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/docs/_static/flow_warp.png -------------------------------------------------------------------------------- /mmcv-1.3.0/docs/_static/flow_warp_diff.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/docs/_static/flow_warp_diff.png -------------------------------------------------------------------------------- /mmcv-1.3.0/docs/_static/parallel_progress.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/docs/_static/parallel_progress.gif -------------------------------------------------------------------------------- /mmcv-1.3.0/docs/_static/progress.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/docs/_static/progress.gif -------------------------------------------------------------------------------- /mmcv-1.3.0/docs/api.rst: -------------------------------------------------------------------------------- 1 | API Documentation 2 | ================= 3 | 4 | 5 | fileio 6 | ------- 7 | .. automodule:: mmcv.fileio 8 | :members: 9 | 10 | image 11 | ------ 12 | .. automodule:: mmcv.image 13 | :members: 14 | 15 | video 16 | ------ 17 | .. automodule:: mmcv.video 18 | :members: 19 | 20 | arraymisc 21 | --------- 22 | .. automodule:: mmcv.arraymisc 23 | :members: 24 | 25 | visualization 26 | -------------- 27 | .. automodule:: mmcv.visualization 28 | :members: 29 | 30 | utils 31 | ----- 32 | .. automodule:: mmcv.utils 33 | :members: 34 | 35 | cnn 36 | ---- 37 | .. automodule:: mmcv.cnn 38 | :members: 39 | 40 | runner 41 | ------ 42 | .. automodule:: mmcv.runner 43 | :members: 44 | 45 | ops 46 | ------ 47 | .. automodule:: mmcv.ops 48 | :members: 49 | -------------------------------------------------------------------------------- /mmcv-1.3.0/docs/index.rst: -------------------------------------------------------------------------------- 1 | 2 | .. mdinclude:: readme.md 3 | 4 | Contents 5 | ======== 6 | 7 | .. toctree:: 8 | :maxdepth: 2 9 | 10 | io.md 11 | image.md 12 | video.md 13 | visualization.md 14 | utils.md 15 | runner.md 16 | registry.md 17 | cnn.md 18 | ops.md 19 | build.md 20 | trouble_shooting.md 21 | api.rst 22 | 23 | 24 | 25 | Indices and tables 26 | ================== 27 | 28 | * :ref:`genindex` 29 | * :ref:`search` 30 | -------------------------------------------------------------------------------- /mmcv-1.3.0/docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=. 11 | set BUILDDIR=_build 12 | 13 | if "%1" == "" goto help 14 | 15 | %SPHINXBUILD% >NUL 2>NUL 16 | if errorlevel 9009 ( 17 | echo. 18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 19 | echo.installed, then set the SPHINXBUILD environment variable to point 20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 21 | echo.may add the Sphinx directory to PATH. 22 | echo. 23 | echo.If you don't have Sphinx installed, grab it from 24 | echo.http://sphinx-doc.org/ 25 | exit /b 1 26 | ) 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /mmcv-1.3.0/docs/mmcv-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/docs/mmcv-logo.png -------------------------------------------------------------------------------- /mmcv-1.3.0/docs/onnx.md: -------------------------------------------------------------------------------- 1 | # Introduction of `onnx` module in MMCV (Experimental) 2 | 3 | ## register_extra_symbolics 4 | 5 | Some extra symbolic functions need to be registered before exporting PyTorch model to ONNX. 6 | 7 | ### Example 8 | 9 | ```python 10 | import mmcv 11 | from mmcv.onnx import register_extra_symbolics 12 | 13 | opset_version = 11 14 | register_extra_symbolics(opset_version) 15 | ``` 16 | 17 | ## ONNX simplify 18 | 19 | ### Intention 20 | 21 | `mmcv.onnx.simplify` is based on [onnx-simplifier](https://github.com/daquexian/onnx-simplifier), which is a useful tool to make exported ONNX models slimmer by performing a series of optimization. However, for Pytorch models with custom op from `mmcv`, it would break down. Thus, custom ops for ONNX Runtime should be registered. 22 | 23 | ### Prerequisite 24 | 25 | `mmcv.onnx.simplify` has three dependencies: `onnx`, `onnxoptimizer`, `onnxruntime`. After installation of `mmcv`, you have to install them manually using pip. 26 | 27 | ```bash 28 | pip install onnx onnxoptimizer onnxruntime 29 | ``` 30 | 31 | ### Usage 32 | 33 | ```python 34 | import onnx 35 | import numpy as np 36 | 37 | import mmcv 38 | from mmcv.onnx.simplify import simplify 39 | 40 | dummy_input = np.random.randn(1, 3, 224, 224).astype(np.float32) 41 | input = {'input':dummy_input} 42 | input_file = 'sample.onnx' 43 | output_file = 'slim.onnx' 44 | model = simplify(input_file, [input], output_file) 45 | ``` 46 | 47 | ### FAQs 48 | 49 | - None 50 | -------------------------------------------------------------------------------- /mmcv-1.3.0/docs/ops.md: -------------------------------------------------------------------------------- 1 | ## CUDA ops 2 | 3 | We implement common CUDA ops used in detection, segmentation, etc. 4 | 5 | - BBoxOverlaps 6 | - CARAFE 7 | - CrissCrossAttention 8 | - ContextBlock 9 | - CornerPool 10 | - Deformable Convolution v1/v2 11 | - Deformable RoIPool 12 | - GeneralizedAttention 13 | - MaskedConv 14 | - NMS 15 | - PSAMask 16 | - RoIPool 17 | - RoIAlign 18 | - SimpleRoIAlign 19 | - SigmoidFocalLoss 20 | - SoftmaxFocalLoss 21 | - SoftNMS 22 | - Synchronized BatchNorm 23 | - Weight standardization 24 | -------------------------------------------------------------------------------- /mmcv-1.3.0/docs/readme.md: -------------------------------------------------------------------------------- 1 | ../README.md -------------------------------------------------------------------------------- /mmcv-1.3.0/docs/runner.md: -------------------------------------------------------------------------------- 1 | ## Runner 2 | 3 | The runner module aims to help users to start training with less code, while stays 4 | flexible and configurable. 5 | 6 | Documentation and examples are still on going. 7 | -------------------------------------------------------------------------------- /mmcv-1.3.0/docs/visualization.md: -------------------------------------------------------------------------------- 1 | ## Visualization 2 | 3 | `mmcv` can show images and annotations (currently supported types include bounding boxes). 4 | 5 | ```python 6 | # show an image file 7 | mmcv.imshow('a.jpg') 8 | 9 | # show a loaded image 10 | img = np.random.rand(100, 100, 3) 11 | mmcv.imshow(img) 12 | 13 | # show image with bounding boxes 14 | img = np.random.rand(100, 100, 3) 15 | bboxes = np.array([[0, 0, 50, 50], [20, 20, 60, 60]]) 16 | mmcv.imshow_bboxes(img, bboxes) 17 | ``` 18 | 19 | `mmcv` can also visualize special images such as optical flows. 20 | 21 | ```python 22 | flow = mmcv.flowread('test.flo') 23 | mmcv.flowshow(flow) 24 | ``` 25 | -------------------------------------------------------------------------------- /mmcv-1.3.0/examples/config_cifar10.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | model = 'resnet18' 3 | # dataset settings 4 | data_root = '/mnt/SSD/dataset/cifar10' 5 | mean = [0.4914, 0.4822, 0.4465] 6 | std = [0.2023, 0.1994, 0.2010] 7 | batch_size = 64 8 | 9 | # optimizer and learning rate 10 | optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=5e-4) 11 | optimizer_config = dict(grad_clip=None) 12 | lr_config = dict(policy='step', step=2) 13 | 14 | # runtime settings 15 | work_dir = './demo' 16 | gpus = range(2) 17 | dist_params = dict(backend='nccl') 18 | data_workers = 2 # data workers per gpu 19 | checkpoint_config = dict(interval=1) # save checkpoint at every epoch 20 | workflow = [('train', 1), ('val', 1)] 21 | total_epochs = 6 22 | resume_from = None 23 | load_from = None 24 | 25 | # logging settings 26 | log_level = 'INFO' 27 | log_config = dict( 28 | interval=50, # log at every 50 iterations 29 | hooks=[ 30 | dict(type='TextLoggerHook'), 31 | # dict(type='TensorboardLoggerHook'), 32 | ]) 33 | -------------------------------------------------------------------------------- /mmcv-1.3.0/examples/dist_train_cifar10.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | PYTHON=${PYTHON:-"python"} 4 | 5 | $PYTHON -m torch.distributed.launch --nproc_per_node=$2 train_cifar10.py $1 --launcher pytorch ${@:3} 6 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Open-MMLab. All rights reserved. 2 | # flake8: noqa 3 | from .arraymisc import * 4 | from .fileio import * 5 | from .image import * 6 | from .utils import * 7 | from .version import * 8 | from .video import * 9 | from .visualization import * 10 | 11 | # The following modules are not imported to this level, so mmcv may be used 12 | # without PyTorch. 13 | # - runner 14 | # - parallel 15 | # - op 16 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/arraymisc/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Open-MMLab. All rights reserved. 2 | from .quantization import dequantize, quantize 3 | 4 | __all__ = ['quantize', 'dequantize'] 5 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/cnn/bricks/hsigmoid.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | from .registry import ACTIVATION_LAYERS 4 | 5 | 6 | @ACTIVATION_LAYERS.register_module() 7 | class HSigmoid(nn.Module): 8 | """Hard Sigmoid Module. Apply the hard sigmoid function: 9 | Hsigmoid(x) = min(max((x + bias) / divisor, min_value), max_value) 10 | Default: Hsigmoid(x) = min(max((x + 1) / 2, 0), 1) 11 | 12 | Args: 13 | bias (float): Bias of the input feature map. Default: 1.0. 14 | divisor (float): Divisor of the input feature map. Default: 2.0. 15 | min_value (float): Lower bound value. Default: 0.0. 16 | max_value (float): Upper bound value. Default: 1.0. 17 | 18 | Returns: 19 | Tensor: The output tensor. 20 | """ 21 | 22 | def __init__(self, bias=1.0, divisor=2.0, min_value=0.0, max_value=1.0): 23 | super(HSigmoid, self).__init__() 24 | self.bias = bias 25 | self.divisor = divisor 26 | assert self.divisor != 0 27 | self.min_value = min_value 28 | self.max_value = max_value 29 | 30 | def forward(self, x): 31 | x = (x + self.bias) / self.divisor 32 | 33 | return x.clamp_(self.min_value, self.max_value) 34 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/cnn/bricks/hswish.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | from .registry import ACTIVATION_LAYERS 4 | 5 | 6 | @ACTIVATION_LAYERS.register_module() 7 | class HSwish(nn.Module): 8 | """Hard Swish Module. 9 | 10 | This module applies the hard swish function: 11 | 12 | .. math:: 13 | Hswish(x) = x * ReLU6(x + 3) / 6 14 | 15 | Args: 16 | inplace (bool): can optionally do the operation in-place. 17 | Default: False. 18 | 19 | Returns: 20 | Tensor: The output tensor. 21 | """ 22 | 23 | def __init__(self, inplace=False): 24 | super(HSwish, self).__init__() 25 | self.act = nn.ReLU6(inplace) 26 | 27 | def forward(self, x): 28 | return x * self.act(x + 3) / 6 29 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/cnn/bricks/padding.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | from .registry import PADDING_LAYERS 4 | 5 | PADDING_LAYERS.register_module('zero', module=nn.ZeroPad2d) 6 | PADDING_LAYERS.register_module('reflect', module=nn.ReflectionPad2d) 7 | PADDING_LAYERS.register_module('replicate', module=nn.ReplicationPad2d) 8 | 9 | 10 | def build_padding_layer(cfg, *args, **kwargs): 11 | """Build padding layer. 12 | 13 | Args: 14 | cfg (None or dict): The padding layer config, which should contain: 15 | - type (str): Layer type. 16 | - layer args: Args needed to instantiate a padding layer. 17 | 18 | Returns: 19 | nn.Module: Created padding layer. 20 | """ 21 | if not isinstance(cfg, dict): 22 | raise TypeError('cfg must be a dict') 23 | if 'type' not in cfg: 24 | raise KeyError('the cfg dict must contain the key "type"') 25 | 26 | cfg_ = cfg.copy() 27 | padding_type = cfg_.pop('type') 28 | if padding_type not in PADDING_LAYERS: 29 | raise KeyError(f'Unrecognized padding type {padding_type}.') 30 | else: 31 | padding_layer = PADDING_LAYERS.get(padding_type) 32 | 33 | layer = padding_layer(*args, **kwargs, **cfg_) 34 | 35 | return layer 36 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/cnn/bricks/registry.py: -------------------------------------------------------------------------------- 1 | from mmcv.utils import Registry 2 | 3 | CONV_LAYERS = Registry('conv layer') 4 | NORM_LAYERS = Registry('norm layer') 5 | ACTIVATION_LAYERS = Registry('activation layer') 6 | PADDING_LAYERS = Registry('padding layer') 7 | UPSAMPLE_LAYERS = Registry('upsample layer') 8 | PLUGIN_LAYERS = Registry('plugin layer') 9 | 10 | POSITIONAL_ENCODING = Registry('Position encoding') 11 | ATTENTION = Registry('Attention') 12 | TRANSFORMER_LAYER = Registry('TransformerLayer') 13 | TRANSFORMER_LAYER_SEQUENCE = Registry('TransformerLayerSequence') 14 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/cnn/bricks/scale.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | 5 | class Scale(nn.Module): 6 | """A learnable scale parameter. 7 | 8 | This layer scales the input by a learnable factor. It multiplies a 9 | learnable scale parameter of shape (1,) with input of any shape. 10 | 11 | Args: 12 | scale (float): Initial value of scale factor. Default: 1.0 13 | """ 14 | 15 | def __init__(self, scale=1.0): 16 | super(Scale, self).__init__() 17 | self.scale = nn.Parameter(torch.tensor(scale, dtype=torch.float)) 18 | 19 | def forward(self, x): 20 | return x * self.scale 21 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/cnn/bricks/swish.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | from .registry import ACTIVATION_LAYERS 5 | 6 | 7 | @ACTIVATION_LAYERS.register_module() 8 | class Swish(nn.Module): 9 | """Swish Module. 10 | 11 | This module applies the swish function: 12 | 13 | .. math:: 14 | Swish(x) = x * Sigmoid(x) 15 | 16 | Returns: 17 | Tensor: The output tensor. 18 | """ 19 | 20 | def __init__(self): 21 | super(Swish, self).__init__() 22 | 23 | def forward(self, x): 24 | return x * torch.sigmoid(x) 25 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/cnn/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Open-MMLab. All rights reserved. 2 | from .flops_counter import get_model_complexity_info 3 | from .fuse_conv_bn import fuse_conv_bn 4 | from .weight_init import (INITIALIZERS, Caffe2XavierInit, ConstantInit, 5 | KaimingInit, NormalInit, PretrainedInit, UniformInit, 6 | XavierInit, bias_init_with_prob, caffe2_xavier_init, 7 | constant_init, initialize, kaiming_init, normal_init, 8 | uniform_init, xavier_init) 9 | 10 | __all__ = [ 11 | 'get_model_complexity_info', 'bias_init_with_prob', 'caffe2_xavier_init', 12 | 'constant_init', 'kaiming_init', 'normal_init', 'uniform_init', 13 | 'xavier_init', 'fuse_conv_bn', 'initialize', 'INITIALIZERS', 14 | 'ConstantInit', 'XavierInit', 'NormalInit', 'UniformInit', 'KaimingInit', 15 | 'PretrainedInit', 'Caffe2XavierInit' 16 | ] 17 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/fileio/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Open-MMLab. All rights reserved. 2 | from .file_client import BaseStorageBackend, FileClient 3 | from .handlers import BaseFileHandler, JsonHandler, PickleHandler, YamlHandler 4 | from .io import dump, load, register_handler 5 | from .parse import dict_from_file, list_from_file 6 | 7 | __all__ = [ 8 | 'BaseStorageBackend', 'FileClient', 'load', 'dump', 'register_handler', 9 | 'BaseFileHandler', 'JsonHandler', 'PickleHandler', 'YamlHandler', 10 | 'list_from_file', 'dict_from_file' 11 | ] 12 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/fileio/handlers/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Open-MMLab. All rights reserved. 2 | from .base import BaseFileHandler 3 | from .json_handler import JsonHandler 4 | from .pickle_handler import PickleHandler 5 | from .yaml_handler import YamlHandler 6 | 7 | __all__ = ['BaseFileHandler', 'JsonHandler', 'PickleHandler', 'YamlHandler'] 8 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/fileio/handlers/base.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Open-MMLab. All rights reserved. 2 | from abc import ABCMeta, abstractmethod 3 | 4 | 5 | class BaseFileHandler(metaclass=ABCMeta): 6 | 7 | @abstractmethod 8 | def load_from_fileobj(self, file, **kwargs): 9 | pass 10 | 11 | @abstractmethod 12 | def dump_to_fileobj(self, obj, file, **kwargs): 13 | pass 14 | 15 | @abstractmethod 16 | def dump_to_str(self, obj, **kwargs): 17 | pass 18 | 19 | def load_from_path(self, filepath, mode='r', **kwargs): 20 | with open(filepath, mode) as f: 21 | return self.load_from_fileobj(f, **kwargs) 22 | 23 | def dump_to_path(self, obj, filepath, mode='w', **kwargs): 24 | with open(filepath, mode) as f: 25 | self.dump_to_fileobj(obj, f, **kwargs) 26 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/fileio/handlers/json_handler.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Open-MMLab. All rights reserved. 2 | import json 3 | 4 | import numpy as np 5 | 6 | from .base import BaseFileHandler 7 | 8 | 9 | def set_default(obj): 10 | """Set default json values for non-serializable values. 11 | 12 | It helps convert ``set``, ``range`` and ``np.ndarray`` data types to list. 13 | It also converts ``np.generic`` (including ``np.int32``, ``np.float32``, 14 | etc.) into plain numbers of plain python built-in types. 15 | """ 16 | if isinstance(obj, (set, range)): 17 | return list(obj) 18 | elif isinstance(obj, np.ndarray): 19 | return obj.tolist() 20 | elif isinstance(obj, np.generic): 21 | return obj.item() 22 | raise TypeError(f'{type(obj)} is unsupported for json dump') 23 | 24 | 25 | class JsonHandler(BaseFileHandler): 26 | 27 | def load_from_fileobj(self, file): 28 | return json.load(file) 29 | 30 | def dump_to_fileobj(self, obj, file, **kwargs): 31 | kwargs.setdefault('default', set_default) 32 | json.dump(obj, file, **kwargs) 33 | 34 | def dump_to_str(self, obj, **kwargs): 35 | kwargs.setdefault('default', set_default) 36 | return json.dumps(obj, **kwargs) 37 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/fileio/handlers/pickle_handler.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Open-MMLab. All rights reserved. 2 | import pickle 3 | 4 | from .base import BaseFileHandler 5 | 6 | 7 | class PickleHandler(BaseFileHandler): 8 | 9 | def load_from_fileobj(self, file, **kwargs): 10 | return pickle.load(file, **kwargs) 11 | 12 | def load_from_path(self, filepath, **kwargs): 13 | return super(PickleHandler, self).load_from_path( 14 | filepath, mode='rb', **kwargs) 15 | 16 | def dump_to_str(self, obj, **kwargs): 17 | kwargs.setdefault('protocol', 2) 18 | return pickle.dumps(obj, **kwargs) 19 | 20 | def dump_to_fileobj(self, obj, file, **kwargs): 21 | kwargs.setdefault('protocol', 2) 22 | pickle.dump(obj, file, **kwargs) 23 | 24 | def dump_to_path(self, obj, filepath, **kwargs): 25 | super(PickleHandler, self).dump_to_path( 26 | obj, filepath, mode='wb', **kwargs) 27 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/fileio/handlers/yaml_handler.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Open-MMLab. All rights reserved. 2 | import yaml 3 | 4 | try: 5 | from yaml import CLoader as Loader, CDumper as Dumper 6 | except ImportError: 7 | from yaml import Loader, Dumper 8 | 9 | from .base import BaseFileHandler # isort:skip 10 | 11 | 12 | class YamlHandler(BaseFileHandler): 13 | 14 | def load_from_fileobj(self, file, **kwargs): 15 | kwargs.setdefault('Loader', Loader) 16 | return yaml.load(file, **kwargs) 17 | 18 | def dump_to_fileobj(self, obj, file, **kwargs): 19 | kwargs.setdefault('Dumper', Dumper) 20 | yaml.dump(obj, file, **kwargs) 21 | 22 | def dump_to_str(self, obj, **kwargs): 23 | kwargs.setdefault('Dumper', Dumper) 24 | return yaml.dump(obj, **kwargs) 25 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/image/misc.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | import mmcv 4 | 5 | try: 6 | import torch 7 | except ImportError: 8 | torch = None 9 | 10 | 11 | def tensor2imgs(tensor, mean=(0, 0, 0), std=(1, 1, 1), to_rgb=True): 12 | """Convert tensor to 3-channel images. 13 | 14 | Args: 15 | tensor (torch.Tensor): Tensor that contains multiple images, shape ( 16 | N, C, H, W). 17 | mean (tuple[float], optional): Mean of images. Defaults to (0, 0, 0). 18 | std (tuple[float], optional): Standard deviation of images. 19 | Defaults to (1, 1, 1). 20 | to_rgb (bool, optional): Whether the tensor was converted to RGB 21 | format in the first place. If so, convert it back to BGR. 22 | Defaults to True. 23 | 24 | Returns: 25 | list[np.ndarray]: A list that contains multiple images. 26 | """ 27 | 28 | if torch is None: 29 | raise RuntimeError('pytorch is not installed') 30 | assert torch.is_tensor(tensor) and tensor.ndim == 4 31 | assert len(mean) == 3 32 | assert len(std) == 3 33 | 34 | num_imgs = tensor.size(0) 35 | mean = np.array(mean, dtype=np.float32) 36 | std = np.array(std, dtype=np.float32) 37 | imgs = [] 38 | for img_id in range(num_imgs): 39 | img = tensor[img_id, ...].cpu().numpy().transpose(1, 2, 0) 40 | img = mmcv.imdenormalize( 41 | img, mean, std, to_bgr=to_rgb).astype(np.uint8) 42 | imgs.append(np.ascontiguousarray(img)) 43 | return imgs 44 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/model_zoo/deprecated.json: -------------------------------------------------------------------------------- 1 | { 2 | "resnet50_caffe": "detectron/resnet50_caffe", 3 | "resnet50_caffe_bgr": "detectron2/resnet50_caffe_bgr", 4 | "resnet101_caffe": "detectron/resnet101_caffe", 5 | "resnet101_caffe_bgr": "detectron2/resnet101_caffe_bgr" 6 | } 7 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/onnx/__init__.py: -------------------------------------------------------------------------------- 1 | from .info import is_custom_op_loaded 2 | from .symbolic import register_extra_symbolics 3 | 4 | __all__ = ['register_extra_symbolics', 'is_custom_op_loaded'] 5 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/onnx/info.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | def is_custom_op_loaded(): 5 | flag = False 6 | try: 7 | from ..tensorrt import is_tensorrt_plugin_loaded 8 | flag = is_tensorrt_plugin_loaded() 9 | except (ImportError, ModuleNotFoundError): 10 | pass 11 | if not flag: 12 | try: 13 | from ..ops import get_onnxruntime_op_path 14 | ort_lib_path = get_onnxruntime_op_path() 15 | flag = os.path.exists(ort_lib_path) 16 | except (ImportError, ModuleNotFoundError): 17 | pass 18 | return flag 19 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/onnx/onnx_utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/mmcv/onnx/onnx_utils/__init__.py -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/onnx/simplify/__init__.py: -------------------------------------------------------------------------------- 1 | from .core import simplify 2 | 3 | __all__ = ['simplify'] 4 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/onnx/simplify/common.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import warnings 3 | 4 | import onnx 5 | 6 | 7 | def add_suffix2name(ori_model, suffix='__', verify=False): 8 | """Simplily add a suffix to the name of node, which has a numeric name.""" 9 | # check if has special op, which has subgraph. 10 | special_ops = ('If', 'Loop') 11 | for node in ori_model.graph.node: 12 | if node.op_type in special_ops: 13 | warnings.warn(f'This model has special op: {node.op_type}.') 14 | return ori_model 15 | 16 | model = copy.deepcopy(ori_model) 17 | 18 | def need_update(name): 19 | return name.isnumeric() 20 | 21 | def update_name(nodes): 22 | for node in nodes: 23 | if need_update(node.name): 24 | node.name += suffix 25 | 26 | update_name(model.graph.initializer) 27 | update_name(model.graph.input) 28 | update_name(model.graph.output) 29 | 30 | for i, node in enumerate(ori_model.graph.node): 31 | # process input of node 32 | for j, name in enumerate(node.input): 33 | if need_update(name): 34 | model.graph.node[i].input[j] = name + suffix 35 | 36 | # process output of node 37 | for j, name in enumerate(node.output): 38 | if need_update(name): 39 | model.graph.node[i].output[j] = name + suffix 40 | if verify: 41 | onnx.checker.check_model(model) 42 | 43 | return model 44 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/ops/csrc/onnxruntime/cpu/onnxruntime_register.cpp: -------------------------------------------------------------------------------- 1 | #include "onnxruntime_register.h" 2 | 3 | #include "nms.h" 4 | #include "ort_mmcv_utils.h" 5 | #include "roi_align.h" 6 | #include "soft_nms.h" 7 | 8 | const char *c_MMCVOpDomain = "mmcv"; 9 | SoftNmsOp c_SoftNmsOp; 10 | NmsOp c_NmsOp; 11 | MMCVRoiAlignCustomOp c_MMCVRoiAlignCustomOp; 12 | 13 | OrtStatus *ORT_API_CALL RegisterCustomOps(OrtSessionOptions *options, 14 | const OrtApiBase *api) { 15 | OrtCustomOpDomain *domain = nullptr; 16 | const OrtApi *ortApi = api->GetApi(ORT_API_VERSION); 17 | 18 | if (auto status = ortApi->CreateCustomOpDomain(c_MMCVOpDomain, &domain)) { 19 | return status; 20 | } 21 | 22 | if (auto status = ortApi->CustomOpDomain_Add(domain, &c_SoftNmsOp)) { 23 | return status; 24 | } 25 | 26 | if (auto status = ortApi->CustomOpDomain_Add(domain, &c_NmsOp)) { 27 | return status; 28 | } 29 | 30 | if (auto status = 31 | ortApi->CustomOpDomain_Add(domain, &c_MMCVRoiAlignCustomOp)) { 32 | return status; 33 | } 34 | 35 | return ortApi->AddCustomOpDomain(options, domain); 36 | } 37 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/ops/csrc/onnxruntime/nms.h: -------------------------------------------------------------------------------- 1 | #ifndef ONNXRUNTIME_NMS_H 2 | #define ONNXRUNTIME_NMS_H 3 | 4 | #include 5 | 6 | struct NmsKernel { 7 | NmsKernel(OrtApi api, const OrtKernelInfo *info); 8 | 9 | void Compute(OrtKernelContext *context); 10 | 11 | protected: 12 | OrtApi api_; 13 | Ort::CustomOpApi ort_; 14 | const OrtKernelInfo *info_; 15 | Ort::AllocatorWithDefaultOptions allocator_; 16 | 17 | float iou_threshold_; 18 | int64_t offset_; 19 | }; 20 | 21 | struct NmsOp : Ort::CustomOpBase { 22 | void *CreateKernel(OrtApi api, const OrtKernelInfo *info) const { 23 | return new NmsKernel(api, info); 24 | }; 25 | 26 | const char *GetName() const { return "NonMaxSuppression"; }; 27 | 28 | size_t GetInputTypeCount() const { return 2; }; 29 | ONNXTensorElementDataType GetInputType(size_t /*index*/) const { 30 | return ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT; 31 | }; 32 | 33 | size_t GetOutputTypeCount() const { return 1; }; 34 | ONNXTensorElementDataType GetOutputType(size_t index) const { 35 | return ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64; 36 | } 37 | 38 | // force cpu 39 | const char *GetExecutionProviderType() const { 40 | return "CPUExecutionProvider"; 41 | } 42 | }; 43 | 44 | #endif 45 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/ops/csrc/onnxruntime/onnxruntime_register.h: -------------------------------------------------------------------------------- 1 | #ifndef ONNXRUNTIME_REGISTER_H 2 | #define ONNXRUNTIME_REGISTER_H 3 | #include 4 | 5 | #ifdef __cplusplus 6 | extern "C" { 7 | #endif 8 | 9 | OrtStatus *ORT_API_CALL RegisterCustomOps(OrtSessionOptions *options, 10 | const OrtApiBase *api); 11 | 12 | #ifdef __cplusplus 13 | } 14 | #endif 15 | #endif // ONNXRUNTIME_REGISTER_H 16 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/ops/csrc/onnxruntime/ort_mmcv_utils.h: -------------------------------------------------------------------------------- 1 | #ifndef ORT_MMCV_UTILS_H 2 | #define ORT_MMCV_UTILS_H 3 | #include 4 | 5 | #include 6 | 7 | struct OrtTensorDimensions : std::vector { 8 | OrtTensorDimensions(Ort::CustomOpApi ort, const OrtValue* value) { 9 | OrtTensorTypeAndShapeInfo* info = ort.GetTensorTypeAndShape(value); 10 | std::vector::operator=(ort.GetTensorShape(info)); 11 | ort.ReleaseTensorTypeAndShapeInfo(info); 12 | } 13 | }; 14 | #endif // ORT_MMCV_UTILS_H 15 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/ops/csrc/onnxruntime/soft_nms.h: -------------------------------------------------------------------------------- 1 | #ifndef ONNXRUNTIME_SOFT_NMS_H 2 | #define ONNXRUNTIME_SOFT_NMS_H 3 | #include 4 | 5 | struct SoftNmsKernel { 6 | SoftNmsKernel(OrtApi api, const OrtKernelInfo *info); 7 | 8 | void Compute(OrtKernelContext *context); 9 | 10 | protected: 11 | OrtApi api_; 12 | Ort::CustomOpApi ort_; 13 | const OrtKernelInfo *info_; 14 | Ort::AllocatorWithDefaultOptions allocator_; 15 | 16 | float iou_threshold_; 17 | float sigma_; 18 | float min_score_; 19 | int64_t method_; 20 | int64_t offset_; 21 | }; 22 | 23 | struct SoftNmsOp : Ort::CustomOpBase { 24 | void *CreateKernel(OrtApi api, const OrtKernelInfo *info) { 25 | return new SoftNmsKernel(api, info); 26 | }; 27 | 28 | const char *GetName() const { return "SoftNonMaxSuppression"; }; 29 | 30 | size_t GetInputTypeCount() const { return 2; }; 31 | ONNXTensorElementDataType GetInputType(size_t /*index*/) const { 32 | return ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT; 33 | }; 34 | 35 | size_t GetOutputTypeCount() const { return 2; }; 36 | ONNXTensorElementDataType GetOutputType(size_t index) const { 37 | if (index == 1) { 38 | return ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64; 39 | } 40 | return ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT; 41 | }; 42 | 43 | // force cpu 44 | const char *GetExecutionProviderType() const { 45 | return "CPUExecutionProvider"; 46 | }; 47 | }; 48 | #endif // ONNXRUNTIME_SOFT_NMS_H 49 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/ops/csrc/parrots/bbox_overlaps.cpp: -------------------------------------------------------------------------------- 1 | #include "pytorch_cpp_helper.hpp" 2 | 3 | #ifdef MMCV_WITH_CUDA 4 | void BBoxOverlapsCUDAKernelLauncher(const Tensor bboxes1, const Tensor bboxes2, 5 | Tensor ious, const int mode, 6 | const bool aligned, const int offset); 7 | 8 | void bbox_overlaps_cuda(const Tensor bboxes1, const Tensor bboxes2, Tensor ious, 9 | const int mode, const bool aligned, const int offset) { 10 | BBoxOverlapsCUDAKernelLauncher(bboxes1, bboxes2, ious, mode, aligned, offset); 11 | } 12 | #endif 13 | 14 | void bbox_overlaps(const Tensor bboxes1, const Tensor bboxes2, Tensor ious, 15 | const int mode, const bool aligned, const int offset) { 16 | if (bboxes1.device().is_cuda()) { 17 | #ifdef MMCV_WITH_CUDA 18 | CHECK_CUDA_INPUT(bboxes1); 19 | CHECK_CUDA_INPUT(bboxes2); 20 | CHECK_CUDA_INPUT(ious); 21 | 22 | bbox_overlaps_cuda(bboxes1, bboxes2, ious, mode, aligned, offset); 23 | #else 24 | AT_ERROR("bbox_overlaps is not compiled with GPU support"); 25 | #endif 26 | } else { 27 | AT_ERROR("bbox_overlaps is not implemented on CPU"); 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/ops/csrc/parrots/bbox_overlaps_cuda.cu: -------------------------------------------------------------------------------- 1 | #include "bbox_overlaps_cuda_kernel.cuh" 2 | #include "pytorch_cuda_helper.hpp" 3 | 4 | void BBoxOverlapsCUDAKernelLauncher(const Tensor bboxes1, const Tensor bboxes2, 5 | Tensor ious, const int mode, 6 | const bool aligned, const int offset) { 7 | int output_size = ious.numel(); 8 | int num_bbox1 = bboxes1.size(0); 9 | int num_bbox2 = bboxes2.size(0); 10 | 11 | at::cuda::CUDAGuard device_guard(bboxes1.device()); 12 | cudaStream_t stream = at::cuda::getCurrentCUDAStream(); 13 | AT_DISPATCH_FLOATING_TYPES_AND_HALF( 14 | bboxes1.scalar_type(), "bbox_overlaps_cuda_kernel", ([&] { 15 | bbox_overlaps_cuda_kernel 16 | <<>>( 17 | bboxes1.data_ptr(), bboxes2.data_ptr(), 18 | ious.data_ptr(), num_bbox1, num_bbox2, mode, aligned, 19 | offset); 20 | })); 21 | AT_CUDA_CHECK(cudaGetLastError()); 22 | } 23 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/ops/csrc/parrots/bbox_overlaps_parrots.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include "bbox_overlaps_pytorch.h" 6 | 7 | using namespace parrots; 8 | 9 | /* 10 | * void bbox_overlaps_cuda(const Tensor bboxes1, const Tensor bboxes2, Tensor 11 | * ious, const int mode, const bool aligned, const int offset); 12 | */ 13 | void bbox_overlaps_parrots(CudaContext& ctx, const SSElement& attr, 14 | const OperatorBase::in_list_t& ins, 15 | OperatorBase::out_list_t& outs) { 16 | int mode, offset; 17 | bool aligned; 18 | SSAttrs(attr) 19 | .get("mode", mode) 20 | .get("aligned", aligned) 21 | .get("offset", offset) 22 | .done(); 23 | 24 | const auto& bboxes1 = buildATensor(ctx, ins[0]); 25 | const auto& bboxes2 = buildATensor(ctx, ins[1]); 26 | auto ious = buildATensor(ctx, outs[0]); 27 | bbox_overlaps_cuda(bboxes1, bboxes2, ious, mode, aligned, offset); 28 | } 29 | 30 | PARROTS_EXTENSION_REGISTER(bbox_overlaps) 31 | .attr("mode") 32 | .attr("aligned") 33 | .attr("offset") 34 | .input(2) 35 | .output(1) 36 | .apply(bbox_overlaps_parrots) 37 | .done(); 38 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/ops/csrc/parrots/bbox_overlaps_pytorch.h: -------------------------------------------------------------------------------- 1 | #ifndef BBOX_OVERLAPS_PYTORCH_H 2 | #define BBOX_OVERLAPS_PYTORCH_H 3 | #include 4 | using namespace at; 5 | 6 | void bbox_overlaps_cuda(const Tensor bboxes1, const Tensor bboxes2, Tensor ious, 7 | const int mode, const bool aligned, const int offset); 8 | 9 | #endif // BBOX_OVERLAPS_PYTORCH_H 10 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/ops/csrc/parrots/box_iou_rotated.cpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | // modified from 3 | // https://github.com/facebookresearch/detectron2/blob/master/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated.h 4 | #include "pytorch_cpp_helper.hpp" 5 | 6 | void box_iou_rotated_cpu(const Tensor boxes1, const Tensor boxes2, Tensor ious, 7 | const int mode_flag, const bool aligned); 8 | 9 | #ifdef MMCV_WITH_CUDA 10 | void box_iou_rotated_cuda(const Tensor boxes1, const Tensor boxes2, Tensor ious, 11 | const int mode_flag, const bool aligned); 12 | #endif 13 | 14 | // Interface for Python 15 | // inline is needed to prevent multiple function definitions when this header is 16 | // included by different cpps 17 | void box_iou_rotated(const Tensor boxes1, const Tensor boxes2, Tensor ious, 18 | const int mode_flag, const bool aligned) { 19 | assert(boxes1.device().is_cuda() == boxes2.device().is_cuda()); 20 | if (boxes1.device().is_cuda()) { 21 | #ifdef MMCV_WITH_CUDA 22 | box_iou_rotated_cuda(boxes1, boxes2, ious, mode_flag, aligned); 23 | #else 24 | AT_ERROR("Not compiled with GPU support"); 25 | #endif 26 | } else { 27 | box_iou_rotated_cpu(boxes1, boxes2, ious, mode_flag, aligned); 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/ops/csrc/parrots/box_iou_rotated_cpu.cpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | // modified from 3 | // https://github.com/facebookresearch/detectron2/blob/master/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cpu.cpp 4 | #include "box_iou_rotated_utils.hpp" 5 | #include "pytorch_cpp_helper.hpp" 6 | 7 | template 8 | void box_iou_rotated_cpu_kernel(const Tensor boxes1, const Tensor boxes2, 9 | Tensor ious, const int mode_flag, 10 | const bool aligned) { 11 | int output_size = ious.numel(); 12 | auto num_boxes1 = boxes1.size(0); 13 | auto num_boxes2 = boxes2.size(0); 14 | 15 | if (aligned) { 16 | for (int i = 0; i < output_size; i++) { 17 | ious[i] = single_box_iou_rotated(boxes1[i].data_ptr(), 18 | boxes2[i].data_ptr(), mode_flag); 19 | } 20 | } else { 21 | for (int i = 0; i < num_boxes1; i++) { 22 | for (int j = 0; j < num_boxes2; j++) { 23 | ious[i * num_boxes2 + j] = single_box_iou_rotated( 24 | boxes1[i].data_ptr(), boxes2[j].data_ptr(), mode_flag); 25 | } 26 | } 27 | } 28 | } 29 | 30 | void box_iou_rotated_cpu(const Tensor boxes1, const Tensor boxes2, Tensor ious, 31 | const int mode_flag, const bool aligned) { 32 | box_iou_rotated_cpu_kernel(boxes1, boxes2, ious, mode_flag, aligned); 33 | } 34 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/ops/csrc/parrots/box_iou_rotated_cuda.cu: -------------------------------------------------------------------------------- 1 | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | // modified from 3 | // https://github.com/facebookresearch/detectron2/blob/master/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cuda.cu 4 | #include "box_iou_rotated_cuda.cuh" 5 | #include "pytorch_cuda_helper.hpp" 6 | 7 | void box_iou_rotated_cuda(const Tensor boxes1, const Tensor boxes2, Tensor ious, 8 | const int mode_flag, const bool aligned) { 9 | using scalar_t = float; 10 | AT_ASSERTM(boxes1.type().is_cuda(), "boxes1 must be a CUDA tensor"); 11 | AT_ASSERTM(boxes2.type().is_cuda(), "boxes2 must be a CUDA tensor"); 12 | 13 | int output_size = ious.numel(); 14 | int num_boxes1 = boxes1.size(0); 15 | int num_boxes2 = boxes2.size(0); 16 | 17 | at::cuda::CUDAGuard device_guard(boxes1.device()); 18 | cudaStream_t stream = at::cuda::getCurrentCUDAStream(); 19 | box_iou_rotated_cuda_kernel 20 | <<>>( 21 | num_boxes1, num_boxes2, boxes1.data_ptr(), 22 | boxes2.data_ptr(), (scalar_t*)ious.data_ptr(), 23 | mode_flag, aligned); 24 | AT_CUDA_CHECK(cudaGetLastError()); 25 | } 26 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/ops/csrc/parrots/box_iou_rotated_pytorch.h: -------------------------------------------------------------------------------- 1 | #ifndef BOX_IOU_ROTATED_PYTORCH_H 2 | #define BOX_IOU_ROTATED_PYTORCH_H 3 | #include 4 | using namespace at; 5 | 6 | void box_iou_rotated_cpu(const Tensor boxes1, const Tensor boxes2, Tensor ious, 7 | const int mode_flag, const bool aligned); 8 | 9 | #ifdef MMCV_WITH_CUDA 10 | void box_iou_rotated_cuda(const Tensor boxes1, const Tensor boxes2, Tensor ious, 11 | const int mode_flag, const bool aligned); 12 | #endif 13 | 14 | #endif // BOX_IOU_ROTATED_PYTORCH_H 15 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/ops/csrc/parrots/carafe_naive_pytorch.h: -------------------------------------------------------------------------------- 1 | #ifndef CARAFE_NAIVE_PYTORCH_H 2 | #define CARAFE_NAIVE_PYTORCH_H 3 | #include 4 | using namespace at; 5 | 6 | void carafe_naive_forward_cuda(Tensor features, Tensor masks, Tensor output, 7 | int kernel_size, int group_size, 8 | int scale_factor); 9 | 10 | void carafe_naive_backward_cuda(Tensor top_grad, Tensor features, Tensor masks, 11 | Tensor bottom_grad, Tensor mask_grad, 12 | int kernel_size, int group_size, 13 | int scale_factor); 14 | #endif // CARAFE_NAIVE_PYTORCH_H 15 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/ops/csrc/parrots/carafe_pytorch.h: -------------------------------------------------------------------------------- 1 | #ifndef CARAFE_PYTORCH_H 2 | #define CARAFE_PYTORCH_H 3 | #include 4 | using namespace at; 5 | 6 | void carafe_forward_cuda(Tensor features, Tensor masks, Tensor rfeatures, 7 | Tensor routput, Tensor rmasks, Tensor output, 8 | int kernel_size, int group_size, int scale_factor); 9 | 10 | void carafe_backward_cuda(Tensor top_grad, Tensor rfeatures, Tensor masks, 11 | Tensor rtop_grad, Tensor rbottom_grad_hs, 12 | Tensor rbottom_grad, Tensor rmask_grad, 13 | Tensor bottom_grad, Tensor mask_grad, int kernel_size, 14 | int group_size, int scale_factor); 15 | #endif // CARAFE_PYTORCH_H 16 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/ops/csrc/parrots/cc_attention_pytorch.h: -------------------------------------------------------------------------------- 1 | #ifndef CC_ATTENTION_PYTORCH_H 2 | #define CC_ATTENTION_PYTORCH_H 3 | #include 4 | using namespace at; 5 | 6 | void ca_forward_cuda(const Tensor t, const Tensor f, Tensor weight); 7 | 8 | void ca_backward_cuda(const Tensor dw, const Tensor t, const Tensor f, 9 | Tensor dt, Tensor df); 10 | 11 | void ca_map_forward_cuda(const Tensor weight, const Tensor g, Tensor out); 12 | 13 | void ca_map_backward_cuda(const Tensor dout, const Tensor weight, 14 | const Tensor g, Tensor dw, Tensor dg); 15 | #endif // CC_ATTENTION_PYTORCH_H 16 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/ops/csrc/parrots/corner_pool_pytorch.h: -------------------------------------------------------------------------------- 1 | #ifndef CORNER_POOL_PYTORCH_H 2 | #define CORNER_POOL_PYTORCH_H 3 | #include 4 | 5 | at::Tensor bottom_pool_forward(at::Tensor input); 6 | at::Tensor bottom_pool_backward(at::Tensor input, at::Tensor grad_output); 7 | at::Tensor left_pool_forward(at::Tensor input); 8 | at::Tensor left_pool_backward(at::Tensor input, at::Tensor grad_output); 9 | at::Tensor right_pool_forward(at::Tensor input); 10 | at::Tensor right_pool_backward(at::Tensor input, at::Tensor grad_output); 11 | at::Tensor top_pool_forward(at::Tensor input); 12 | at::Tensor top_pool_backward(at::Tensor input, at::Tensor grad_output); 13 | 14 | #endif // CORNER_POOL_PYTORCH_H 15 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/ops/csrc/parrots/deform_conv_pytorch.h: -------------------------------------------------------------------------------- 1 | #ifndef DEFORM_CONV_PYTORCH_H 2 | #define DEFORM_CONV_PYTORCH_H 3 | #include 4 | using namespace at; 5 | 6 | void deform_conv_forward_cuda(Tensor input, Tensor weight, Tensor offset, 7 | Tensor output, Tensor columns, Tensor ones, 8 | int kW, int kH, int dW, int dH, int padW, 9 | int padH, int dilationW, int dilationH, int group, 10 | int deformable_group, int im2col_step); 11 | 12 | void deform_conv_backward_input_cuda(Tensor input, Tensor offset, 13 | Tensor gradOutput, Tensor gradInput, 14 | Tensor gradOffset, Tensor weight, 15 | Tensor columns, int kW, int kH, int dW, 16 | int dH, int padW, int padH, int dilationW, 17 | int dilationH, int group, 18 | int deformable_group, int im2col_step); 19 | 20 | void deform_conv_backward_parameters_cuda( 21 | Tensor input, Tensor offset, Tensor gradOutput, Tensor gradWeight, 22 | Tensor columns, Tensor ones, int kW, int kH, int dW, int dH, int padW, 23 | int padH, int dilationW, int dilationH, int group, int deformable_group, 24 | float scale, int im2col_step); 25 | 26 | #endif // DEFORM_CONV_PYTORCH_H 27 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/ops/csrc/parrots/deform_roi_pool_pytorch.h: -------------------------------------------------------------------------------- 1 | #ifndef DEFORM_ROI_POOL_PYTORCH_H 2 | #define DEFORM_ROI_POOL_PYTORCH_H 3 | #include 4 | using namespace at; 5 | 6 | void deform_roi_pool_forward_cuda(Tensor input, Tensor rois, Tensor offset, 7 | Tensor output, int pooled_height, 8 | int pooled_width, float spatial_scale, 9 | int sampling_ratio, float gamma); 10 | 11 | void deform_roi_pool_backward_cuda(Tensor grad_output, Tensor input, 12 | Tensor rois, Tensor offset, 13 | Tensor grad_input, Tensor grad_offset, 14 | int pooled_height, int pooled_width, 15 | float spatial_scale, int sampling_ratio, 16 | float gamma); 17 | #endif // DEFORM_ROI_POOL_PYTORCH_H 18 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/ops/csrc/parrots/focal_loss_pytorch.h: -------------------------------------------------------------------------------- 1 | #ifndef FOCAL_LOSS_PYTORCH_H 2 | #define FOCAL_LOSS_PYTORCH_H 3 | #include 4 | using namespace at; 5 | 6 | void sigmoid_focal_loss_forward_cuda(Tensor input, Tensor target, Tensor weight, 7 | Tensor output, float gamma, float alpha); 8 | 9 | void sigmoid_focal_loss_backward_cuda(Tensor input, Tensor target, 10 | Tensor weight, Tensor grad_input, 11 | float gamma, float alpha); 12 | 13 | void softmax_focal_loss_forward_cuda(Tensor input, Tensor target, Tensor weight, 14 | Tensor output, float gamma, float alpha); 15 | 16 | void softmax_focal_loss_backward_cuda(Tensor input, Tensor target, 17 | Tensor weight, Tensor buff, 18 | Tensor grad_input, float gamma, 19 | float alpha); 20 | #endif // FOCAL_LOSS_PYTORCH_H 21 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/ops/csrc/parrots/masked_conv2d_pytorch.h: -------------------------------------------------------------------------------- 1 | #ifndef MASKED_CONV2D_PYTORCH_H 2 | #define MASKED_CONV2D_PYTORCH_H 3 | #include 4 | using namespace at; 5 | 6 | void masked_im2col_forward_cuda(const Tensor im, const Tensor mask_h_idx, 7 | const Tensor mask_w_idx, Tensor col, 8 | const int kernel_h, const int kernel_w, 9 | const int pad_h, const int pad_w); 10 | 11 | void masked_col2im_forward_cuda(const Tensor col, const Tensor mask_h_idx, 12 | const Tensor mask_w_idx, Tensor im, int height, 13 | int width, int channels); 14 | #endif // MASKED_CONV2D_PYTORCH_H 15 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/ops/csrc/parrots/modulated_deform_conv_pytorch.h: -------------------------------------------------------------------------------- 1 | #ifndef MODULATED_DEFORM_CONV_PYTORCH_H 2 | #define MODULATED_DEFORM_CONV_PYTORCH_H 3 | #include 4 | using namespace at; 5 | 6 | void modulated_deform_conv_forward_cuda( 7 | Tensor input, Tensor weight, Tensor bias, Tensor ones, Tensor offset, 8 | Tensor mask, Tensor output, Tensor columns, int kernel_h, int kernel_w, 9 | const int stride_h, const int stride_w, const int pad_h, const int pad_w, 10 | const int dilation_h, const int dilation_w, const int group, 11 | const int deformable_group, const bool with_bias); 12 | 13 | void modulated_deform_conv_backward_cuda( 14 | Tensor input, Tensor weight, Tensor bias, Tensor ones, Tensor offset, 15 | Tensor mask, Tensor columns, Tensor grad_input, Tensor grad_weight, 16 | Tensor grad_bias, Tensor grad_offset, Tensor grad_mask, Tensor grad_output, 17 | int kernel_h, int kernel_w, int stride_h, int stride_w, int pad_h, 18 | int pad_w, int dilation_h, int dilation_w, int group, int deformable_group, 19 | const bool with_bias); 20 | #endif // MODULATED_DEFORM_CONV_PYTORCH_H 21 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/ops/csrc/parrots/nms_pytorch.h: -------------------------------------------------------------------------------- 1 | #ifndef NMS_PYTORCH_H 2 | #define NMS_PYTORCH_H 3 | #include 4 | 5 | at::Tensor nms(at::Tensor boxes, at::Tensor scores, float iou_threshold, 6 | int offset); 7 | 8 | at::Tensor softnms(at::Tensor boxes, at::Tensor scores, at::Tensor dets, 9 | float iou_threshold, float sigma, float min_score, 10 | int method, int offset); 11 | 12 | std::vector > nms_match(at::Tensor dets, float iou_threshold); 13 | 14 | at::Tensor nms_rotated(const at::Tensor dets, const at::Tensor scores, 15 | const at::Tensor order, const at::Tensor dets_sorted, 16 | const float iou_threshold, const int multi_label); 17 | #endif // NMS_PYTORCH_H 18 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/ops/csrc/parrots/nms_rotated.cpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | // modified from 3 | // https://github.com/facebookresearch/detectron2/blob/master/detectron2/layers/csrc/nms_rotated/nms_rotated.h 4 | #include "pytorch_cpp_helper.hpp" 5 | 6 | Tensor nms_rotated_cpu(const Tensor dets, const Tensor scores, 7 | const float iou_threshold); 8 | 9 | #ifdef MMCV_WITH_CUDA 10 | Tensor nms_rotated_cuda(const Tensor dets, const Tensor scores, 11 | const Tensor order, const Tensor dets_sorted, 12 | const float iou_threshold, const int multi_label); 13 | #endif 14 | 15 | // Interface for Python 16 | // inline is needed to prevent multiple function definitions when this header is 17 | // included by different cpps 18 | Tensor nms_rotated(const Tensor dets, const Tensor scores, const Tensor order, 19 | const Tensor dets_sorted, const float iou_threshold, 20 | const int multi_label) { 21 | assert(dets.device().is_cuda() == scores.device().is_cuda()); 22 | if (dets.device().is_cuda()) { 23 | #ifdef MMCV_WITH_CUDA 24 | return nms_rotated_cuda(dets, scores, order, dets_sorted, iou_threshold, 25 | multi_label); 26 | #else 27 | AT_ERROR("Not compiled with GPU support"); 28 | #endif 29 | } 30 | 31 | return nms_rotated_cpu(dets, scores, iou_threshold); 32 | } 33 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/ops/csrc/parrots/roi_pool_pytorch.h: -------------------------------------------------------------------------------- 1 | #ifndef ROI_POOL_PYTORCH_H 2 | #define ROI_POOL_PYTORCH_H 3 | #include 4 | using namespace at; 5 | 6 | #ifdef MMCV_WITH_CUDA 7 | void roi_pool_forward_cuda(Tensor input, Tensor rois, Tensor output, 8 | Tensor argmax, int pooled_height, int pooled_width, 9 | float spatial_scale); 10 | 11 | void roi_pool_backward_cuda(Tensor grad_output, Tensor rois, Tensor argmax, 12 | Tensor grad_input, int pooled_height, 13 | int pooled_width, float spatial_scale); 14 | #endif 15 | #endif // ROI_POOL_PYTORCH_H 16 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/ops/csrc/parrots/sync_bn_pytorch.h: -------------------------------------------------------------------------------- 1 | #ifndef SYNC_BN_PYTORCH_H 2 | #define SYNC_BN_PYTORCH_H 3 | #include 4 | using namespace at; 5 | 6 | void sync_bn_forward_mean_cuda(const Tensor input, Tensor mean); 7 | 8 | void sync_bn_forward_var_cuda(const Tensor input, const Tensor mean, 9 | Tensor var); 10 | 11 | void sync_bn_forward_output_cuda(const Tensor input, const Tensor mean, 12 | const Tensor var, Tensor running_mean, 13 | Tensor running_var, const Tensor weight, 14 | const Tensor bias, Tensor norm, Tensor std, 15 | Tensor output, float eps, float momentum, 16 | int group_size); 17 | 18 | void sync_bn_backward_param_cuda(const Tensor grad_output, const Tensor norm, 19 | Tensor grad_weight, Tensor grad_bias); 20 | 21 | void sync_bn_backward_data_cuda(const Tensor grad_output, const Tensor weight, 22 | const Tensor grad_weight, 23 | const Tensor grad_bias, const Tensor norm, 24 | const Tensor std, Tensor grad_input); 25 | #endif // SYNC_BN_PYTORCH_H 26 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/ops/csrc/parrots/tin_shift_parrots.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include "tin_shift_pytorch.h" 6 | using namespace parrots; 7 | 8 | void tin_shift_forward_cuda_parrots(CudaContext &ctx, const SSElement &attr, 9 | const OperatorBase::in_list_t &ins, 10 | OperatorBase::out_list_t &outs) { 11 | const auto &input = buildATensor(ctx, ins[0]); 12 | const auto &shift = buildATensor(ctx, ins[1]); 13 | auto output = buildATensor(ctx, outs[0]); 14 | tin_shift_forward_cuda(input, shift, output); 15 | } 16 | 17 | void tin_shift_backward_cuda_parrots(CudaContext &ctx, const SSElement &attr, 18 | const OperatorBase::in_list_t &ins, 19 | OperatorBase::out_list_t &outs) { 20 | const auto &grad_output = buildATensor(ctx, ins[0]); 21 | const auto &shift = buildATensor(ctx, ins[1]); 22 | auto grad_input = buildATensor(ctx, outs[0]); 23 | tin_shift_backward_cuda(grad_output, shift, grad_input); 24 | } 25 | 26 | PARROTS_EXTENSION_REGISTER(tin_shift_forward) 27 | .input(2) 28 | .output(1) 29 | .apply(tin_shift_forward_cuda_parrots) 30 | .done(); 31 | 32 | PARROTS_EXTENSION_REGISTER(tin_shift_backward) 33 | .input(2) 34 | .output(1) 35 | .apply(tin_shift_backward_cuda_parrots) 36 | .done(); 37 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/ops/csrc/parrots/tin_shift_pytorch.h: -------------------------------------------------------------------------------- 1 | #ifndef TIN_SHIFT_PYTORCH_H 2 | #define TIN_SHIFT_PYTORCH_H 3 | #include 4 | using namespace at; 5 | 6 | void tin_shift_forward_cuda(Tensor input, Tensor shift, Tensor output); 7 | 8 | void tin_shift_backward_cuda(Tensor grad_output, Tensor shift, 9 | Tensor grad_input); 10 | #endif // TIN_SHIFT_PYTORCH_H 11 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/ops/csrc/pytorch/bbox_overlaps.cpp: -------------------------------------------------------------------------------- 1 | #include "pytorch_cpp_helper.hpp" 2 | 3 | #ifdef MMCV_WITH_CUDA 4 | void BBoxOverlapsCUDAKernelLauncher(const Tensor bboxes1, const Tensor bboxes2, 5 | Tensor ious, const int mode, 6 | const bool aligned, const int offset); 7 | 8 | void bbox_overlaps_cuda(const Tensor bboxes1, const Tensor bboxes2, Tensor ious, 9 | const int mode, const bool aligned, const int offset) { 10 | BBoxOverlapsCUDAKernelLauncher(bboxes1, bboxes2, ious, mode, aligned, offset); 11 | } 12 | #endif 13 | 14 | void bbox_overlaps(const Tensor bboxes1, const Tensor bboxes2, Tensor ious, 15 | const int mode, const bool aligned, const int offset) { 16 | if (bboxes1.device().is_cuda()) { 17 | #ifdef MMCV_WITH_CUDA 18 | CHECK_CUDA_INPUT(bboxes1); 19 | CHECK_CUDA_INPUT(bboxes2); 20 | CHECK_CUDA_INPUT(ious); 21 | 22 | bbox_overlaps_cuda(bboxes1, bboxes2, ious, mode, aligned, offset); 23 | #else 24 | AT_ERROR("bbox_overlaps is not compiled with GPU support"); 25 | #endif 26 | } else { 27 | AT_ERROR("bbox_overlaps is not implemented on CPU"); 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/ops/csrc/pytorch/bbox_overlaps_cuda.cu: -------------------------------------------------------------------------------- 1 | #include "bbox_overlaps_cuda_kernel.cuh" 2 | #include "pytorch_cuda_helper.hpp" 3 | 4 | void BBoxOverlapsCUDAKernelLauncher(const Tensor bboxes1, const Tensor bboxes2, 5 | Tensor ious, const int mode, 6 | const bool aligned, const int offset) { 7 | int output_size = ious.numel(); 8 | int num_bbox1 = bboxes1.size(0); 9 | int num_bbox2 = bboxes2.size(0); 10 | 11 | at::cuda::CUDAGuard device_guard(bboxes1.device()); 12 | cudaStream_t stream = at::cuda::getCurrentCUDAStream(); 13 | AT_DISPATCH_FLOATING_TYPES_AND_HALF( 14 | bboxes1.scalar_type(), "bbox_overlaps_cuda_kernel", ([&] { 15 | bbox_overlaps_cuda_kernel 16 | <<>>( 17 | bboxes1.data_ptr(), bboxes2.data_ptr(), 18 | ious.data_ptr(), num_bbox1, num_bbox2, mode, aligned, 19 | offset); 20 | })); 21 | AT_CUDA_CHECK(cudaGetLastError()); 22 | } 23 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/ops/csrc/pytorch/box_iou_rotated.cpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | // modified from 3 | // https://github.com/facebookresearch/detectron2/blob/master/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated.h 4 | #include "pytorch_cpp_helper.hpp" 5 | 6 | void box_iou_rotated_cpu(const Tensor boxes1, const Tensor boxes2, Tensor ious, 7 | const int mode_flag, const bool aligned); 8 | 9 | #ifdef MMCV_WITH_CUDA 10 | void box_iou_rotated_cuda(const Tensor boxes1, const Tensor boxes2, Tensor ious, 11 | const int mode_flag, const bool aligned); 12 | #endif 13 | 14 | // Interface for Python 15 | // inline is needed to prevent multiple function definitions when this header is 16 | // included by different cpps 17 | void box_iou_rotated(const Tensor boxes1, const Tensor boxes2, Tensor ious, 18 | const int mode_flag, const bool aligned) { 19 | assert(boxes1.device().is_cuda() == boxes2.device().is_cuda()); 20 | if (boxes1.device().is_cuda()) { 21 | #ifdef MMCV_WITH_CUDA 22 | box_iou_rotated_cuda(boxes1, boxes2, ious, mode_flag, aligned); 23 | #else 24 | AT_ERROR("Not compiled with GPU support"); 25 | #endif 26 | } else { 27 | box_iou_rotated_cpu(boxes1, boxes2, ious, mode_flag, aligned); 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/ops/csrc/pytorch/box_iou_rotated_cpu.cpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | // modified from 3 | // https://github.com/facebookresearch/detectron2/blob/master/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cpu.cpp 4 | #include "box_iou_rotated_utils.hpp" 5 | #include "pytorch_cpp_helper.hpp" 6 | 7 | template 8 | void box_iou_rotated_cpu_kernel(const Tensor boxes1, const Tensor boxes2, 9 | Tensor ious, const int mode_flag, 10 | const bool aligned) { 11 | int output_size = ious.numel(); 12 | auto num_boxes1 = boxes1.size(0); 13 | auto num_boxes2 = boxes2.size(0); 14 | 15 | if (aligned) { 16 | for (int i = 0; i < output_size; i++) { 17 | ious[i] = single_box_iou_rotated(boxes1[i].data_ptr(), 18 | boxes2[i].data_ptr(), mode_flag); 19 | } 20 | } else { 21 | for (int i = 0; i < num_boxes1; i++) { 22 | for (int j = 0; j < num_boxes2; j++) { 23 | ious[i * num_boxes2 + j] = single_box_iou_rotated( 24 | boxes1[i].data_ptr(), boxes2[j].data_ptr(), mode_flag); 25 | } 26 | } 27 | } 28 | } 29 | 30 | void box_iou_rotated_cpu(const Tensor boxes1, const Tensor boxes2, Tensor ious, 31 | const int mode_flag, const bool aligned) { 32 | box_iou_rotated_cpu_kernel(boxes1, boxes2, ious, mode_flag, aligned); 33 | } 34 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/ops/csrc/pytorch/box_iou_rotated_cuda.cu: -------------------------------------------------------------------------------- 1 | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | // modified from 3 | // https://github.com/facebookresearch/detectron2/blob/master/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cuda.cu 4 | #include "box_iou_rotated_cuda.cuh" 5 | #include "pytorch_cuda_helper.hpp" 6 | 7 | void box_iou_rotated_cuda(const Tensor boxes1, const Tensor boxes2, Tensor ious, 8 | const int mode_flag, const bool aligned) { 9 | using scalar_t = float; 10 | AT_ASSERTM(boxes1.type().is_cuda(), "boxes1 must be a CUDA tensor"); 11 | AT_ASSERTM(boxes2.type().is_cuda(), "boxes2 must be a CUDA tensor"); 12 | 13 | int output_size = ious.numel(); 14 | int num_boxes1 = boxes1.size(0); 15 | int num_boxes2 = boxes2.size(0); 16 | 17 | at::cuda::CUDAGuard device_guard(boxes1.device()); 18 | cudaStream_t stream = at::cuda::getCurrentCUDAStream(); 19 | box_iou_rotated_cuda_kernel 20 | <<>>( 21 | num_boxes1, num_boxes2, boxes1.data_ptr(), 22 | boxes2.data_ptr(), (scalar_t*)ious.data_ptr(), 23 | mode_flag, aligned); 24 | AT_CUDA_CHECK(cudaGetLastError()); 25 | } 26 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/ops/csrc/pytorch/fused_bias_leakyrelu.cpp: -------------------------------------------------------------------------------- 1 | // Modified from 2 | // from 3 | // https://github.com/rosinality/stylegan2-pytorch/blob/master/op/fused_bias_act.cpp 4 | #include "pytorch_cpp_helper.hpp" 5 | 6 | #ifdef MMCV_WITH_CUDA 7 | torch::Tensor fused_bias_leakyrelu_op(const torch::Tensor& input, 8 | const torch::Tensor& bias, 9 | const torch::Tensor& refer, int act, 10 | int grad, float alpha, float scale); 11 | 12 | #endif 13 | 14 | torch::Tensor fused_bias_leakyrelu(const torch::Tensor& input, 15 | const torch::Tensor& bias, 16 | const torch::Tensor& refer, int act, 17 | int grad, float alpha, float scale) { 18 | #ifdef MMCV_WITH_CUDA 19 | CHECK_CUDA(input); 20 | CHECK_CUDA(bias); 21 | 22 | return fused_bias_leakyrelu_op(input, bias, refer, act, grad, alpha, scale); 23 | #else 24 | AT_ERROR("Fused bias leakyrelu is not compiled with GPU support"); 25 | #endif 26 | } 27 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/ops/csrc/pytorch/info.cpp: -------------------------------------------------------------------------------- 1 | // modified from 2 | // https://github.com/facebookresearch/detectron2/blob/master/detectron2/layers/csrc/vision.cpp 3 | #include "pytorch_cpp_helper.hpp" 4 | 5 | #ifdef MMCV_WITH_CUDA 6 | #include 7 | int get_cudart_version() { return CUDART_VERSION; } 8 | #endif 9 | 10 | std::string get_compiling_cuda_version() { 11 | #ifdef MMCV_WITH_CUDA 12 | std::ostringstream oss; 13 | // copied from 14 | // https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/cuda/detail/CUDAHooks.cpp#L231 15 | auto printCudaStyleVersion = [&](int v) { 16 | oss << (v / 1000) << "." << (v / 10 % 100); 17 | if (v % 10 != 0) { 18 | oss << "." << (v % 10); 19 | } 20 | }; 21 | printCudaStyleVersion(get_cudart_version()); 22 | return oss.str(); 23 | #else 24 | return std::string("not available"); 25 | #endif 26 | } 27 | 28 | // similar to 29 | // https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/Version.cpp 30 | std::string get_compiler_version() { 31 | std::ostringstream ss; 32 | #if defined(__GNUC__) 33 | #ifndef __clang__ 34 | { ss << "GCC " << __GNUC__ << "." << __GNUC_MINOR__; } 35 | #endif 36 | #endif 37 | 38 | #if defined(__clang_major__) 39 | { 40 | ss << "clang " << __clang_major__ << "." << __clang_minor__ << "." 41 | << __clang_patchlevel__; 42 | } 43 | #endif 44 | 45 | #if defined(_MSC_VER) 46 | { ss << "MSVC " << _MSC_FULL_VER; } 47 | #endif 48 | return ss.str(); 49 | } 50 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/ops/csrc/pytorch/nms_rotated.cpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | // modified from 3 | // https://github.com/facebookresearch/detectron2/blob/master/detectron2/layers/csrc/nms_rotated/nms_rotated.h 4 | #include "pytorch_cpp_helper.hpp" 5 | 6 | Tensor nms_rotated_cpu(const Tensor dets, const Tensor scores, 7 | const float iou_threshold); 8 | 9 | #ifdef MMCV_WITH_CUDA 10 | Tensor nms_rotated_cuda(const Tensor dets, const Tensor scores, 11 | const Tensor order, const Tensor dets_sorted, 12 | const float iou_threshold, const int multi_label); 13 | #endif 14 | 15 | // Interface for Python 16 | // inline is needed to prevent multiple function definitions when this header is 17 | // included by different cpps 18 | Tensor nms_rotated(const Tensor dets, const Tensor scores, const Tensor order, 19 | const Tensor dets_sorted, const float iou_threshold, 20 | const int multi_label) { 21 | assert(dets.device().is_cuda() == scores.device().is_cuda()); 22 | if (dets.device().is_cuda()) { 23 | #ifdef MMCV_WITH_CUDA 24 | return nms_rotated_cuda(dets, scores, order, dets_sorted, iou_threshold, 25 | multi_label); 26 | #else 27 | AT_ERROR("Not compiled with GPU support"); 28 | #endif 29 | } 30 | 31 | return nms_rotated_cpu(dets, scores, iou_threshold); 32 | } 33 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/ops/csrc/pytorch/upfirdn2d.cpp: -------------------------------------------------------------------------------- 1 | // from 2 | // https://github.com/rosinality/stylegan2-pytorch/blob/master/op/upfirdn2d.cpp 3 | #include "pytorch_cpp_helper.hpp" 4 | 5 | #ifdef MMCV_WITH_CUDA 6 | torch::Tensor upfirdn2d_op(const torch::Tensor& input, 7 | const torch::Tensor& kernel, int up_x, int up_y, 8 | int down_x, int down_y, int pad_x0, int pad_x1, 9 | int pad_y0, int pad_y1); 10 | 11 | #endif 12 | 13 | torch::Tensor upfirdn2d(const torch::Tensor& input, const torch::Tensor& kernel, 14 | int up_x, int up_y, int down_x, int down_y, int pad_x0, 15 | int pad_x1, int pad_y0, int pad_y1) { 16 | #ifdef MMCV_WITH_CUDA 17 | CHECK_CUDA(input); 18 | CHECK_CUDA(kernel); 19 | 20 | return upfirdn2d_op(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, 21 | pad_y0, pad_y1); 22 | #else 23 | AT_ERROR("UpFirDn2d is not compiled with GPU support"); 24 | #endif 25 | } 26 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/ops/csrc/pytorch_cpp_helper.hpp: -------------------------------------------------------------------------------- 1 | #ifndef PYTORCH_CPP_HELPER 2 | #define PYTORCH_CPP_HELPER 3 | #include 4 | 5 | #include 6 | 7 | using namespace at; 8 | 9 | #define CHECK_CUDA(x) \ 10 | TORCH_CHECK(x.device().is_cuda(), #x " must be a CUDA tensor") 11 | #define CHECK_CPU(x) \ 12 | TORCH_CHECK(!x.device().is_cuda(), #x " must be a CPU tensor") 13 | #define CHECK_CONTIGUOUS(x) \ 14 | TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") 15 | #define CHECK_CUDA_INPUT(x) \ 16 | CHECK_CUDA(x); \ 17 | CHECK_CONTIGUOUS(x) 18 | #define CHECK_CPU_INPUT(x) \ 19 | CHECK_CPU(x); \ 20 | CHECK_CONTIGUOUS(x) 21 | 22 | #endif // PYTORCH_CPP_HELPER 23 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/ops/csrc/pytorch_cuda_helper.hpp: -------------------------------------------------------------------------------- 1 | #ifndef PYTORCH_CUDA_HELPER 2 | #define PYTORCH_CUDA_HELPER 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | #include 9 | #include 10 | 11 | #include "common_cuda_helper.hpp" 12 | 13 | using at::Half; 14 | using at::Tensor; 15 | using phalf = at::Half; 16 | 17 | #define __PHALF(x) (x) 18 | 19 | #endif // PYTORCH_CUDA_HELPER 20 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/ops/csrc/tensorrt/plugins/trt_plugin.cpp: -------------------------------------------------------------------------------- 1 | #include "trt_plugin.hpp" 2 | 3 | #include "trt_deform_conv.hpp" 4 | #include "trt_nms.hpp" 5 | #include "trt_roi_align.hpp" 6 | #include "trt_scatternd.hpp" 7 | 8 | REGISTER_TENSORRT_PLUGIN(DeformableConvPluginDynamicCreator); 9 | REGISTER_TENSORRT_PLUGIN(NonMaxSuppressionDynamicCreator); 10 | REGISTER_TENSORRT_PLUGIN(RoIAlignPluginDynamicCreator); 11 | REGISTER_TENSORRT_PLUGIN(ONNXScatterNDDynamicCreator); 12 | 13 | extern "C" { 14 | bool initLibMMCVInferPlugins() { return true; } 15 | } // extern "C" 16 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/ops/csrc/tensorrt/plugins/trt_roi_align_kernel.cu: -------------------------------------------------------------------------------- 1 | #include "common_cuda_helper.hpp" 2 | #include "roi_align_cuda_kernel.cuh" 3 | 4 | template 5 | void TRTRoIAlignForwardCUDAKernelLauncher( 6 | const scalar_t* input, const scalar_t* rois, scalar_t* output, 7 | scalar_t* argmax_y, scalar_t* argmax_x, int output_size, int channels, 8 | int height, int width, int aligned_height, int aligned_width, 9 | scalar_t spatial_scale, int sampling_ratio, int pool_mode, bool aligned, 10 | cudaStream_t stream) { 11 | roi_align_forward_cuda_kernel 12 | <<>>( 13 | output_size, input, rois, output, argmax_y, argmax_x, aligned_height, 14 | aligned_width, static_cast(spatial_scale), sampling_ratio, 15 | pool_mode, aligned, channels, height, width); 16 | } 17 | 18 | void TRTRoIAlignForwardCUDAKernelLauncher_float( 19 | const float* input, const float* rois, float* output, float* argmax_y, 20 | float* argmax_x, int output_size, int channels, int height, int width, 21 | int aligned_height, int aligned_width, float spatial_scale, 22 | int sampling_ratio, int pool_mode, bool aligned, cudaStream_t stream) { 23 | TRTRoIAlignForwardCUDAKernelLauncher( 24 | input, rois, output, argmax_y, argmax_x, output_size, channels, height, 25 | width, aligned_height, aligned_width, spatial_scale, sampling_ratio, 26 | pool_mode, aligned, stream); 27 | } 28 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/ops/csrc/tensorrt/trt_cuda_helper.cuh: -------------------------------------------------------------------------------- 1 | #ifndef TRT_CUDA_HELPER_HPP 2 | #define TRT_CUDA_HELPER_HPP 3 | 4 | #define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) 5 | 6 | #define cudaCheckError() \ 7 | { \ 8 | cudaError_t e = cudaGetLastError(); \ 9 | if (e != cudaSuccess) { \ 10 | printf("Cuda failure %s:%d: '%s'\n", __FILE__, __LINE__, \ 11 | cudaGetErrorString(e)); \ 12 | exit(0); \ 13 | } \ 14 | } 15 | 16 | /** 17 | * Returns a view of the original tensor with its dimensions permuted. 18 | * 19 | * @param[out] dst pointer to the destination tensor 20 | * @param[in] src pointer to the source tensor 21 | * @param[in] src_size shape of the src tensor 22 | * @param[in] permute The desired ordering of dimensions 23 | * @param[in] src_dim dim of src tensor 24 | * @param[in] stream cuda stream handle 25 | */ 26 | template 27 | void memcpyPermute(scalar_t *dst, const scalar_t *src, int *src_size, 28 | int *permute, int src_dim, cudaStream_t stream = 0); 29 | 30 | #endif // TRT_CUDA_HELPER_HPP 31 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/ops/csrc/tensorrt/trt_plugin.hpp: -------------------------------------------------------------------------------- 1 | #ifndef TRT_PLUGIN_HPP 2 | #define TRT_PLUGIN_HPP 3 | 4 | extern "C" { 5 | bool initLibMMCVInferPlugins(); 6 | } // extern "C" 7 | #endif // TRT_PLUGIN_HPP 8 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/ops/csrc/tensorrt/trt_plugin_helper.hpp: -------------------------------------------------------------------------------- 1 | #ifndef TRT_PLUGIN_HELPER_HPP 2 | #define TRT_PLUGIN_HELPER_HPP 3 | #include 4 | 5 | #include "NvInferPlugin.h" 6 | 7 | namespace mmcv { 8 | 9 | const int MAXTENSORDIMS = 10; 10 | 11 | struct TensorDesc { 12 | int shape[MAXTENSORDIMS]; 13 | int stride[MAXTENSORDIMS]; 14 | int dim; 15 | }; 16 | 17 | inline unsigned int getElementSize(nvinfer1::DataType t) { 18 | switch (t) { 19 | case nvinfer1::DataType::kINT32: 20 | return 4; 21 | case nvinfer1::DataType::kFLOAT: 22 | return 4; 23 | case nvinfer1::DataType::kHALF: 24 | return 2; 25 | // case nvinfer1::DataType::kBOOL: 26 | case nvinfer1::DataType::kINT8: 27 | return 1; 28 | default: 29 | throw std::runtime_error("Invalid DataType."); 30 | } 31 | throw std::runtime_error("Invalid DataType."); 32 | return 0; 33 | } 34 | 35 | inline size_t getAlignedSize(size_t origin_size, size_t aligned_number = 16) { 36 | return size_t((origin_size + aligned_number - 1) / aligned_number) * 37 | aligned_number; 38 | } 39 | 40 | } // namespace mmcv 41 | #endif // TRT_PLUGIN_HELPER_HPP 42 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/ops/info.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import os 3 | 4 | import torch 5 | 6 | if torch.__version__ == 'parrots': 7 | import parrots 8 | 9 | def get_compiler_version(): 10 | return 'GCC ' + parrots.version.compiler 11 | 12 | def get_compiling_cuda_version(): 13 | return parrots.version.cuda 14 | else: 15 | from ..utils import ext_loader 16 | ext_module = ext_loader.load_ext( 17 | '_ext', ['get_compiler_version', 'get_compiling_cuda_version']) 18 | 19 | def get_compiler_version(): 20 | return ext_module.get_compiler_version() 21 | 22 | def get_compiling_cuda_version(): 23 | return ext_module.get_compiling_cuda_version() 24 | 25 | 26 | def get_onnxruntime_op_path(): 27 | wildcard = os.path.join( 28 | os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 29 | '_ext_ort.*.so') 30 | 31 | paths = glob.glob(wildcard) 32 | if len(paths) > 0: 33 | return paths[0] 34 | else: 35 | return '' 36 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/parallel/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Open-MMLab. All rights reserved. 2 | from .collate import collate 3 | from .data_container import DataContainer 4 | from .data_parallel import MMDataParallel 5 | from .distributed import MMDistributedDataParallel 6 | from .registry import MODULE_WRAPPERS 7 | from .scatter_gather import scatter, scatter_kwargs 8 | from .utils import is_module_wrapper 9 | 10 | __all__ = [ 11 | 'collate', 'DataContainer', 'MMDataParallel', 'MMDistributedDataParallel', 12 | 'scatter', 'scatter_kwargs', 'is_module_wrapper', 'MODULE_WRAPPERS' 13 | ] 14 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/parallel/registry.py: -------------------------------------------------------------------------------- 1 | from torch.nn.parallel import DataParallel, DistributedDataParallel 2 | 3 | from mmcv.utils import Registry 4 | 5 | MODULE_WRAPPERS = Registry('module wrapper') 6 | MODULE_WRAPPERS.register_module(module=DataParallel) 7 | MODULE_WRAPPERS.register_module(module=DistributedDataParallel) 8 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/parallel/utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Open-MMLab. All rights reserved. 2 | from .registry import MODULE_WRAPPERS 3 | 4 | 5 | def is_module_wrapper(module): 6 | """Check if a module is a module wrapper. 7 | 8 | The following 3 modules in MMCV (and their subclasses) are regarded as 9 | module wrappers: DataParallel, DistributedDataParallel, 10 | MMDistributedDataParallel (the deprecated version). You may add you own 11 | module wrapper by registering it to mmcv.parallel.MODULE_WRAPPERS. 12 | 13 | Args: 14 | module (nn.Module): The module to be checked. 15 | 16 | Returns: 17 | bool: True if the input module is a module wrapper. 18 | """ 19 | module_wrappers = tuple(MODULE_WRAPPERS.module_dict.values()) 20 | return isinstance(module, module_wrappers) 21 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/runner/builder.py: -------------------------------------------------------------------------------- 1 | from ..utils import Registry, build_from_cfg 2 | 3 | RUNNERS = Registry('runner') 4 | 5 | 6 | def build_runner(cfg, default_args=None): 7 | return build_from_cfg(cfg, RUNNERS, default_args=default_args) 8 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/runner/hooks/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Open-MMLab. All rights reserved. 2 | from .checkpoint import CheckpointHook 3 | from .closure import ClosureHook 4 | from .ema import EMAHook 5 | from .hook import HOOKS, Hook 6 | from .iter_timer import IterTimerHook 7 | from .logger import (LoggerHook, MlflowLoggerHook, PaviLoggerHook, 8 | TensorboardLoggerHook, TextLoggerHook, WandbLoggerHook) 9 | from .lr_updater import LrUpdaterHook 10 | from .memory import EmptyCacheHook 11 | from .momentum_updater import MomentumUpdaterHook 12 | from .optimizer import Fp16OptimizerHook, OptimizerHook 13 | from .sampler_seed import DistSamplerSeedHook 14 | from .sync_buffer import SyncBuffersHook 15 | 16 | __all__ = [ 17 | 'HOOKS', 'Hook', 'CheckpointHook', 'ClosureHook', 'LrUpdaterHook', 18 | 'OptimizerHook', 'Fp16OptimizerHook', 'IterTimerHook', 19 | 'DistSamplerSeedHook', 'EmptyCacheHook', 'LoggerHook', 'MlflowLoggerHook', 20 | 'PaviLoggerHook', 'TextLoggerHook', 'TensorboardLoggerHook', 21 | 'WandbLoggerHook', 'MomentumUpdaterHook', 'SyncBuffersHook', 'EMAHook' 22 | ] 23 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/runner/hooks/closure.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Open-MMLab. All rights reserved. 2 | from .hook import HOOKS, Hook 3 | 4 | 5 | @HOOKS.register_module() 6 | class ClosureHook(Hook): 7 | 8 | def __init__(self, fn_name, fn): 9 | assert hasattr(self, fn_name) 10 | assert callable(fn) 11 | setattr(self, fn_name, fn) 12 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/runner/hooks/iter_timer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Open-MMLab. All rights reserved. 2 | import time 3 | 4 | from .hook import HOOKS, Hook 5 | 6 | 7 | @HOOKS.register_module() 8 | class IterTimerHook(Hook): 9 | 10 | def before_epoch(self, runner): 11 | self.t = time.time() 12 | 13 | def before_iter(self, runner): 14 | runner.log_buffer.update({'data_time': time.time() - self.t}) 15 | 16 | def after_iter(self, runner): 17 | runner.log_buffer.update({'time': time.time() - self.t}) 18 | self.t = time.time() 19 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/runner/hooks/logger/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Open-MMLab. All rights reserved. 2 | from .base import LoggerHook 3 | from .mlflow import MlflowLoggerHook 4 | from .pavi import PaviLoggerHook 5 | from .tensorboard import TensorboardLoggerHook 6 | from .text import TextLoggerHook 7 | from .wandb import WandbLoggerHook 8 | 9 | __all__ = [ 10 | 'LoggerHook', 'MlflowLoggerHook', 'PaviLoggerHook', 11 | 'TensorboardLoggerHook', 'TextLoggerHook', 'WandbLoggerHook' 12 | ] 13 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/runner/hooks/memory.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Open-MMLab. All rights reserved. 2 | import torch 3 | 4 | from .hook import HOOKS, Hook 5 | 6 | 7 | @HOOKS.register_module() 8 | class EmptyCacheHook(Hook): 9 | 10 | def __init__(self, before_epoch=False, after_epoch=True, after_iter=False): 11 | self._before_epoch = before_epoch 12 | self._after_epoch = after_epoch 13 | self._after_iter = after_iter 14 | 15 | def after_iter(self, runner): 16 | if self._after_iter: 17 | torch.cuda.empty_cache() 18 | 19 | def before_epoch(self, runner): 20 | if self._before_epoch: 21 | torch.cuda.empty_cache() 22 | 23 | def after_epoch(self, runner): 24 | if self._after_epoch: 25 | torch.cuda.empty_cache() 26 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/runner/hooks/sampler_seed.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Open-MMLab. All rights reserved. 2 | from .hook import HOOKS, Hook 3 | 4 | 5 | @HOOKS.register_module() 6 | class DistSamplerSeedHook(Hook): 7 | """Data-loading sampler for distributed training. 8 | 9 | When distributed training, it is only useful in conjunction with 10 | :obj:`EpochBasedRunner`, while :obj:`IterBasedRunner` achieves the same 11 | purpose with :obj:`IterLoader`. 12 | """ 13 | 14 | def before_epoch(self, runner): 15 | if hasattr(runner.data_loader.sampler, 'set_epoch'): 16 | # in case the data loader uses `SequentialSampler` in Pytorch 17 | runner.data_loader.sampler.set_epoch(runner.epoch) 18 | elif hasattr(runner.data_loader.batch_sampler.sampler, 'set_epoch'): 19 | # batch sampler in pytorch warps the sampler as its attributes. 20 | runner.data_loader.batch_sampler.sampler.set_epoch(runner.epoch) 21 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/runner/hooks/sync_buffer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Open-MMLab. All rights reserved. 2 | from ..dist_utils import allreduce_params 3 | from .hook import HOOKS, Hook 4 | 5 | 6 | @HOOKS.register_module() 7 | class SyncBuffersHook(Hook): 8 | """Synchronize model buffers such as running_mean and running_var in BN at 9 | the end of each epoch. 10 | 11 | Args: 12 | distributed (bool): Whether distributed training is used. It is 13 | effective only for distributed training. Defaults to True. 14 | """ 15 | 16 | def __init__(self, distributed=True): 17 | self.distributed = distributed 18 | 19 | def after_epoch(self, runner): 20 | """All-reduce model buffers at the end of each epoch.""" 21 | if self.distributed: 22 | allreduce_params(runner.model.buffers()) 23 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/runner/log_buffer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Open-MMLab. All rights reserved. 2 | from collections import OrderedDict 3 | 4 | import numpy as np 5 | 6 | 7 | class LogBuffer: 8 | 9 | def __init__(self): 10 | self.val_history = OrderedDict() 11 | self.n_history = OrderedDict() 12 | self.output = OrderedDict() 13 | self.ready = False 14 | 15 | def clear(self): 16 | self.val_history.clear() 17 | self.n_history.clear() 18 | self.clear_output() 19 | 20 | def clear_output(self): 21 | self.output.clear() 22 | self.ready = False 23 | 24 | def update(self, vars, count=1): 25 | assert isinstance(vars, dict) 26 | for key, var in vars.items(): 27 | if key not in self.val_history: 28 | self.val_history[key] = [] 29 | self.n_history[key] = [] 30 | self.val_history[key].append(var) 31 | self.n_history[key].append(count) 32 | 33 | def average(self, n=0): 34 | """Average latest n values or all values.""" 35 | assert n >= 0 36 | for key in self.val_history: 37 | values = np.array(self.val_history[key][-n:]) 38 | nums = np.array(self.n_history[key][-n:]) 39 | avg = np.sum(values * nums) / np.sum(nums) 40 | self.output[key] = avg 41 | self.ready = True 42 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/runner/optimizer/__init__.py: -------------------------------------------------------------------------------- 1 | from .builder import (OPTIMIZER_BUILDERS, OPTIMIZERS, build_optimizer, 2 | build_optimizer_constructor) 3 | from .default_constructor import DefaultOptimizerConstructor 4 | 5 | __all__ = [ 6 | 'OPTIMIZER_BUILDERS', 'OPTIMIZERS', 'DefaultOptimizerConstructor', 7 | 'build_optimizer', 'build_optimizer_constructor' 8 | ] 9 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/runner/optimizer/builder.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import inspect 3 | 4 | import torch 5 | 6 | from ...utils import Registry, build_from_cfg 7 | 8 | OPTIMIZERS = Registry('optimizer') 9 | OPTIMIZER_BUILDERS = Registry('optimizer builder') 10 | 11 | 12 | def register_torch_optimizers(): 13 | torch_optimizers = [] 14 | for module_name in dir(torch.optim): 15 | if module_name.startswith('__'): 16 | continue 17 | _optim = getattr(torch.optim, module_name) 18 | if inspect.isclass(_optim) and issubclass(_optim, 19 | torch.optim.Optimizer): 20 | OPTIMIZERS.register_module()(_optim) 21 | torch_optimizers.append(module_name) 22 | return torch_optimizers 23 | 24 | 25 | TORCH_OPTIMIZERS = register_torch_optimizers() 26 | 27 | 28 | def build_optimizer_constructor(cfg): 29 | return build_from_cfg(cfg, OPTIMIZER_BUILDERS) 30 | 31 | 32 | def build_optimizer(model, cfg): 33 | optimizer_cfg = copy.deepcopy(cfg) 34 | constructor_type = optimizer_cfg.pop('constructor', 35 | 'DefaultOptimizerConstructor') 36 | paramwise_cfg = optimizer_cfg.pop('paramwise_cfg', None) 37 | optim_constructor = build_optimizer_constructor( 38 | dict( 39 | type=constructor_type, 40 | optimizer_cfg=optimizer_cfg, 41 | paramwise_cfg=paramwise_cfg)) 42 | optimizer = optim_constructor(model) 43 | return optimizer 44 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/tensorrt/__init__.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa 2 | from .init_plugins import is_tensorrt_plugin_loaded, load_tensorrt_plugin 3 | from .tensorrt_utils import (TRTWraper, load_trt_engine, onnx2trt, 4 | save_trt_engine) 5 | 6 | # load tensorrt plugin lib 7 | load_tensorrt_plugin() 8 | 9 | __all__ = [ 10 | 'onnx2trt', 'save_trt_engine', 'load_trt_engine', 'TRTWraper', 11 | 'is_tensorrt_plugin_loaded' 12 | ] 13 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/tensorrt/init_plugins.py: -------------------------------------------------------------------------------- 1 | import ctypes 2 | import glob 3 | import os 4 | 5 | 6 | def get_tensorrt_op_path(): 7 | """Get TensorRT plugins library path.""" 8 | wildcard = os.path.join( 9 | os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 10 | '_ext_trt.*.so') 11 | 12 | paths = glob.glob(wildcard) 13 | lib_path = paths[0] if len(paths) > 0 else '' 14 | return lib_path 15 | 16 | 17 | plugin_is_loaded = False 18 | 19 | 20 | def is_tensorrt_plugin_loaded(): 21 | """Check if TensorRT plugins library is loaded or not. 22 | 23 | Returns: 24 | bool: plugin_is_loaded flag 25 | """ 26 | global plugin_is_loaded 27 | return plugin_is_loaded 28 | 29 | 30 | def load_tensorrt_plugin(): 31 | """load TensorRT plugins library.""" 32 | global plugin_is_loaded 33 | lib_path = get_tensorrt_op_path() 34 | if (not plugin_is_loaded) and os.path.exists(lib_path): 35 | ctypes.CDLL(lib_path) 36 | plugin_is_loaded = True 37 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/utils/ext_loader.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | import os 3 | import pkgutil 4 | from collections import namedtuple 5 | 6 | import torch 7 | 8 | if torch.__version__ != 'parrots': 9 | 10 | def load_ext(name, funcs): 11 | ext = importlib.import_module('mmcv.' + name) 12 | for fun in funcs: 13 | assert hasattr(ext, fun), f'{fun} miss in module {name}' 14 | return ext 15 | else: 16 | from parrots import extension 17 | 18 | has_return_value_ops = [ 19 | 'nms', 'softnms', 'nms_match', 'nms_rotated', 'top_pool_forward', 20 | 'top_pool_backward', 'bottom_pool_forward', 'bottom_pool_backward', 21 | 'left_pool_forward', 'left_pool_backward', 'right_pool_forward', 22 | 'right_pool_backward' 23 | ] 24 | 25 | def load_ext(name, funcs): 26 | ExtModule = namedtuple('ExtModule', funcs) 27 | ext_list = [] 28 | lib_root = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) 29 | for fun in funcs: 30 | if fun in has_return_value_ops: 31 | ext_list.append(extension.load(fun, name, lib_dir=lib_root).op) 32 | else: 33 | ext_list.append( 34 | extension.load(fun, name, lib_dir=lib_root).op_) 35 | return ExtModule(*ext_list) 36 | 37 | 38 | def check_ops_exist(): 39 | ext_loader = pkgutil.find_loader('mmcv._ext') 40 | return ext_loader is not None 41 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/utils/parrots_jit.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from .parrots_wrapper import TORCH_VERSION 4 | 5 | parrots_jit_option = os.getenv('PARROTS_JIT_OPTION') 6 | 7 | if TORCH_VERSION == 'parrots' and parrots_jit_option == 'ON': 8 | from parrots.jit import pat as jit 9 | else: 10 | 11 | def jit(func=None, 12 | check_input=None, 13 | full_shape=True, 14 | derivate=False, 15 | coderize=False, 16 | optimize=False): 17 | 18 | def wrapper(func): 19 | 20 | def wrapper_inner(*args, **kargs): 21 | return func(*args, **kargs) 22 | 23 | return wrapper_inner 24 | 25 | if func is None: 26 | return wrapper 27 | else: 28 | return func 29 | 30 | 31 | if TORCH_VERSION == 'parrots': 32 | from parrots.utils.tester import skip_no_elena 33 | else: 34 | 35 | def skip_no_elena(func): 36 | 37 | def wrapper(*args, **kargs): 38 | return func(*args, **kargs) 39 | 40 | return wrapper 41 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/version.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Open-MMLab. All rights reserved. 2 | 3 | __version__ = '1.3.0' 4 | 5 | 6 | def parse_version_info(version_str): 7 | """Parse a version string into a tuple. 8 | 9 | Args: 10 | version_str (str): The version string. 11 | 12 | Returns: 13 | tuple[int | str]: The version info, e.g., "1.3.0" is parsed into 14 | (1, 3, 0), and "2.0.0rc1" is parsed into (2, 0, 0, 'rc1'). 15 | """ 16 | version_info = [] 17 | for x in version_str.split('.'): 18 | if x.isdigit(): 19 | version_info.append(int(x)) 20 | elif x.find('rc') != -1: 21 | patch_version = x.split('rc') 22 | version_info.append(int(patch_version[0])) 23 | version_info.append(f'rc{patch_version[1]}') 24 | return tuple(version_info) 25 | 26 | 27 | version_info = parse_version_info(__version__) 28 | 29 | __all__ = ['__version__', 'version_info', 'parse_version_info'] 30 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/video/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Open-MMLab. All rights reserved. 2 | from .io import Cache, VideoReader, frames2video 3 | from .optflow import (dequantize_flow, flow_warp, flowread, flowwrite, 4 | quantize_flow) 5 | from .processing import concat_video, convert_video, cut_video, resize_video 6 | 7 | __all__ = [ 8 | 'Cache', 'VideoReader', 'frames2video', 'convert_video', 'resize_video', 9 | 'cut_video', 'concat_video', 'flowread', 'flowwrite', 'quantize_flow', 10 | 'dequantize_flow', 'flow_warp' 11 | ] 12 | -------------------------------------------------------------------------------- /mmcv-1.3.0/mmcv/visualization/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Open-MMLab. All rights reserved. 2 | from .color import Color, color_val 3 | from .image import imshow, imshow_bboxes, imshow_det_bboxes 4 | from .optflow import flow2rgb, flowshow, make_color_wheel 5 | 6 | __all__ = [ 7 | 'Color', 'color_val', 'imshow', 'imshow_bboxes', 'imshow_det_bboxes', 8 | 'flowshow', 'flow2rgb', 'make_color_wheel' 9 | ] 10 | -------------------------------------------------------------------------------- /mmcv-1.3.0/requirements.txt: -------------------------------------------------------------------------------- 1 | addict 2 | numpy 3 | pyyaml 4 | regex;sys_platform=='win32' 5 | yapf 6 | -------------------------------------------------------------------------------- /mmcv-1.3.0/requirements/docs.txt: -------------------------------------------------------------------------------- 1 | m2r 2 | torch 3 | -------------------------------------------------------------------------------- /mmcv-1.3.0/requirements/runtime.txt: -------------------------------------------------------------------------------- 1 | addict 2 | numpy 3 | Pillow 4 | pyyaml 5 | regex;sys_platform=='win32' 6 | yapf 7 | -------------------------------------------------------------------------------- /mmcv-1.3.0/requirements/test.txt: -------------------------------------------------------------------------------- 1 | coverage 2 | lmdb 3 | onnx==1.7.0 4 | onnxoptimizer 5 | onnxruntime==1.4.0 6 | pytest 7 | PyTurboJPEG 8 | tiffile 9 | -------------------------------------------------------------------------------- /mmcv-1.3.0/setup.cfg: -------------------------------------------------------------------------------- 1 | [bdist_wheel] 2 | universal=1 3 | 4 | [aliases] 5 | test=pytest 6 | 7 | [yapf] 8 | based_on_style = pep8 9 | blank_line_before_nested_class_or_def = true 10 | split_before_expression_after_opening_paren = true 11 | 12 | [isort] 13 | line_length = 79 14 | multi_line_output = 0 15 | known_standard_library = pkg_resources,setuptools,logging,os,warnings,abc 16 | known_first_party = mmcv 17 | known_third_party = addict,cv2,m2r,numpy,onnx,onnxoptimizer,onnxruntime,packaging,pytest,recommonmark,resnet_cifar,tensorrt,torch,torchvision,yaml,yapf 18 | no_lines_before = STDLIB,LOCALFOLDER 19 | default_section = THIRDPARTY 20 | -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/batched_nms_data.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/tests/data/batched_nms_data.pkl -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/color.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/tests/data/color.jpg -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/color_exif.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/tests/data/color_exif.jpg -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/config/a.b.py: -------------------------------------------------------------------------------- 1 | item1 = [1, 2] 2 | item2 = {'a': 0} 3 | item3 = True 4 | item4 = 'test' 5 | -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/config/a.py: -------------------------------------------------------------------------------- 1 | item1 = [1, 2] 2 | item2 = {'a': 0} 3 | item3 = True 4 | item4 = 'test' 5 | -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/config/b.json: -------------------------------------------------------------------------------- 1 | { 2 | "item1": [1, 2], 3 | "item2": { 4 | "a": 0 5 | }, 6 | "item3": true, 7 | "item4": "test" 8 | } -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/config/base.py: -------------------------------------------------------------------------------- 1 | item1 = [1, 2] 2 | item2 = {'a': 0} 3 | item3 = True 4 | item4 = 'test' 5 | -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/config/c.yaml: -------------------------------------------------------------------------------- 1 | item1: [1, 2] 2 | item2: {'a': 0} 3 | item3: True 4 | item4: 'test' 5 | -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/config/code.py: -------------------------------------------------------------------------------- 1 | from mmcv import Config # isort:skip 2 | cfg = Config.fromfile('./tests/data/config/a.py') 3 | item5 = cfg.item1[0] + cfg.item2.a 4 | -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/config/d.py: -------------------------------------------------------------------------------- 1 | _base_ = './base.py' 2 | item1 = [2, 3] 3 | item2 = {'a': 1} 4 | item3 = False 5 | item4 = 'test_base' 6 | -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/config/delete.py: -------------------------------------------------------------------------------- 1 | _base_ = './base.py' 2 | item2 = {'b': 0, '_delete_': True} 3 | -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/config/e.py: -------------------------------------------------------------------------------- 1 | _base_ = './base.py' 2 | item3 = {'a': 1} 3 | -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/config/f.py: -------------------------------------------------------------------------------- 1 | _base_ = './d.py' 2 | item4 = 'test_recursive_bases' 3 | -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/config/g.py: -------------------------------------------------------------------------------- 1 | filename = 'reserved.py' 2 | -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/config/h.py: -------------------------------------------------------------------------------- 1 | item1 = '{{fileBasename}}' 2 | item2 = '{{ fileDirname}}' 3 | item3 = 'abc_{{ fileBasenameNoExtension }}' 4 | -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/config/i_base.py: -------------------------------------------------------------------------------- 1 | item1 = [1, 2] 2 | item2 = {'a': 0} 3 | item3 = True 4 | item4 = 'test' 5 | item_cfg = {'b': 1} 6 | item5 = {'cfg': item_cfg} 7 | item6 = {'cfg': item_cfg} 8 | -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/config/i_child.py: -------------------------------------------------------------------------------- 1 | _base_ = './i_base.py' 2 | item_cfg = {'b': 2} 3 | item6 = {'cfg': item_cfg} 4 | -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/config/l.py: -------------------------------------------------------------------------------- 1 | _base_ = ['./l1.py', './l2.yaml', './l3.json', './l4.py'] 2 | item3 = False 3 | item4 = 'test' 4 | -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/config/l1.py: -------------------------------------------------------------------------------- 1 | item1 = [1, 2] 2 | -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/config/l2.yaml: -------------------------------------------------------------------------------- 1 | item2: {'a': 0} 2 | -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/config/l3.json: -------------------------------------------------------------------------------- 1 | { 2 | "item3": true 3 | } 4 | -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/config/l4.py: -------------------------------------------------------------------------------- 1 | item5 = dict(a=0, b=1) 2 | item6 = [dict(a=0), dict(b=1)] 3 | item7 = dict(a=[0, 1, 2], b=dict(c=[3.1, 4.2, 5.3])) 4 | -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/config/m.py: -------------------------------------------------------------------------------- 1 | _base_ = ['./l1.py', './l2.yaml', './l3.json', 'a.py'] 2 | item3 = False 3 | item4 = 'test' 4 | -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/config/n.py: -------------------------------------------------------------------------------- 1 | test_item1 = [1, 2] 2 | bool_item2 = True 3 | str_item3 = 'test' 4 | dict_item4 = dict( 5 | a={ 6 | 'c/d': 'path/d', 7 | 'f': 's3//f', 8 | 6: '2333', 9 | '2333': 'number' 10 | }, 11 | b={'8': 543}, 12 | c={9: 678}, 13 | d={'a': 0}, 14 | f=dict(a='69')) 15 | dict_item5 = {'x/x': {'a.0': 233}} 16 | dict_list_item6 = {'x/x': [{'a.0': 1., 'b.0': 2.}, {'c/3': 3.}]} 17 | -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/config/o.json: -------------------------------------------------------------------------------- 1 | { 2 | "item1": "{{ fileDirname }}" 3 | } 4 | -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/config/p.yaml: -------------------------------------------------------------------------------- 1 | item1: '{{ fileDirname }}' 2 | -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/config/q.py: -------------------------------------------------------------------------------- 1 | custom_imports = dict(imports=['r'], allow_failed_imports=False) 2 | -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/config/r.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | os.environ["TEST_VALUE"] = 'test' 4 | -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/config/s.py: -------------------------------------------------------------------------------- 1 | item = [{'a': 0}, {'b': 0, 'c': 0}] 2 | -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/demo.lmdb/data.mdb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/tests/data/demo.lmdb/data.mdb -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/demo.lmdb/lock.mdb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/tests/data/demo.lmdb/lock.mdb -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/filelist.txt: -------------------------------------------------------------------------------- 1 | 1.jpg 2 | 2.jpg 3 | 3.jpg 4 | 4.jpg 5 | 5.jpg -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/for_ccattention/ccattention_input.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/tests/data/for_ccattention/ccattention_input.bin -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/for_ccattention/ccattention_output.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/tests/data/for_ccattention/ccattention_output.bin -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/for_psa_mask/psa_input.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/tests/data/for_psa_mask/psa_input.bin -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/for_psa_mask/psa_output_collect.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/tests/data/for_psa_mask/psa_output_collect.bin -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/for_psa_mask/psa_output_distribute.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/tests/data/for_psa_mask/psa_output_distribute.bin -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/for_scan/1.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/tests/data/for_scan/1.json -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/for_scan/1.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/tests/data/for_scan/1.txt -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/for_scan/2.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/tests/data/for_scan/2.json -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/for_scan/2.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/tests/data/for_scan/2.txt -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/for_scan/a.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/tests/data/for_scan/a.bin -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/for_scan/sub/1.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/tests/data/for_scan/sub/1.json -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/for_scan/sub/1.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/tests/data/for_scan/sub/1.txt -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/gray_alpha.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/tests/data/gray_alpha.png -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/grayscale.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/tests/data/grayscale.jpg -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/grayscale_dim3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/tests/data/grayscale_dim3.jpg -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/mapping.txt: -------------------------------------------------------------------------------- 1 | 1 cat 2 | 2 dog cow 3 | 3 panda -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/model_zoo/deprecated.json: -------------------------------------------------------------------------------- 1 | { 2 | "train_old": "train", 3 | "test_old": "test" 4 | } -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/model_zoo/mmcv_home/open_mmlab.json: -------------------------------------------------------------------------------- 1 | { 2 | "test": "test.pth", 3 | "val": "val.pth", 4 | "train_empty": "train.pth" 5 | } -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/model_zoo/mmcv_home/test.pth: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/tests/data/model_zoo/mmcv_home/test.pth -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/model_zoo/mmcv_home/val.pth: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/tests/data/model_zoo/mmcv_home/val.pth -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/model_zoo/open_mmlab.json: -------------------------------------------------------------------------------- 1 | { 2 | "train": "https://localhost/train.pth", 3 | "test": "https://localhost/test.pth" 4 | } -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/optflow.flo: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/tests/data/optflow.flo -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/optflow_concat0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/tests/data/optflow_concat0.jpg -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/optflow_concat1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/tests/data/optflow_concat1.jpg -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/palette.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/tests/data/palette.gif -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/patches/0.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/tests/data/patches/0.npy -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/patches/1.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/tests/data/patches/1.npy -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/patches/2.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/tests/data/patches/2.npy -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/patches/3.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/tests/data/patches/3.npy -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/patches/4.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/tests/data/patches/4.npy -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/patches/pad0_0.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/tests/data/patches/pad0_0.npy -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/patches/pad0_1.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/tests/data/patches/pad0_1.npy -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/patches/pad0_2.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/tests/data/patches/pad0_2.npy -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/patches/pad0_3.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/tests/data/patches/pad0_3.npy -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/patches/pad0_4.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/tests/data/patches/pad0_4.npy -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/patches/pad_0.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/tests/data/patches/pad_0.npy -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/patches/pad_1.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/tests/data/patches/pad_1.npy -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/patches/pad_2.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/tests/data/patches/pad_2.npy -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/patches/pad_3.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/tests/data/patches/pad_3.npy -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/patches/pad_4.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/tests/data/patches/pad_4.npy -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/patches/scale_0.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/tests/data/patches/scale_0.npy -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/patches/scale_1.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/tests/data/patches/scale_1.npy -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/patches/scale_2.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/tests/data/patches/scale_2.npy -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/patches/scale_3.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/tests/data/patches/scale_3.npy -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/patches/scale_4.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/tests/data/patches/scale_4.npy -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/test.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/tests/data/test.mp4 -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/data/uint16-5channel.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmcv-1.3.0/tests/data/uint16-5channel.tif -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/test_cnn/test_conv2d_adaptive_padding.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from mmcv.cnn.bricks import Conv2dAdaptivePadding 4 | 5 | 6 | def test_conv2d_samepadding(): 7 | # test Conv2dAdaptivePadding with stride=1 8 | inputs = torch.rand((1, 3, 28, 28)) 9 | conv = Conv2dAdaptivePadding(3, 3, kernel_size=3, stride=1) 10 | output = conv(inputs) 11 | assert output.shape == inputs.shape 12 | 13 | inputs = torch.rand((1, 3, 13, 13)) 14 | conv = Conv2dAdaptivePadding(3, 3, kernel_size=3, stride=1) 15 | output = conv(inputs) 16 | assert output.shape == inputs.shape 17 | 18 | # test Conv2dAdaptivePadding with stride=2 19 | inputs = torch.rand((1, 3, 28, 28)) 20 | conv = Conv2dAdaptivePadding(3, 3, kernel_size=3, stride=2) 21 | output = conv(inputs) 22 | assert output.shape == torch.Size([1, 3, 14, 14]) 23 | 24 | inputs = torch.rand((1, 3, 13, 13)) 25 | conv = Conv2dAdaptivePadding(3, 3, kernel_size=3, stride=2) 26 | output = conv(inputs) 27 | assert output.shape == torch.Size([1, 3, 7, 7]) 28 | -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/test_cnn/test_fuse_conv_bn.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | from mmcv.cnn import ConvModule, fuse_conv_bn 5 | 6 | 7 | def test_fuse_conv_bn(): 8 | inputs = torch.rand((1, 3, 5, 5)) 9 | modules = nn.ModuleList() 10 | modules.append(nn.BatchNorm2d(3)) 11 | modules.append(ConvModule(3, 5, 3, norm_cfg=dict(type='BN'))) 12 | modules.append(ConvModule(5, 5, 3, norm_cfg=dict(type='BN'))) 13 | modules = nn.Sequential(*modules) 14 | fused_modules = fuse_conv_bn(modules) 15 | assert torch.equal(modules(inputs), fused_modules(inputs)) 16 | -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/test_cnn/test_hsigmoid.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import torch 3 | 4 | from mmcv.cnn.bricks import HSigmoid 5 | 6 | 7 | def test_hsigmoid(): 8 | # test assertion divisor can not be zero 9 | with pytest.raises(AssertionError): 10 | HSigmoid(divisor=0) 11 | 12 | # test with default parameters 13 | act = HSigmoid() 14 | input_shape = torch.Size([1, 3, 64, 64]) 15 | input = torch.randn(input_shape) 16 | output = act(input) 17 | expected_output = torch.min( 18 | torch.max((input + 1) / 2, torch.zeros(input_shape)), 19 | torch.ones(input_shape)) 20 | # test output shape 21 | assert output.shape == expected_output.shape 22 | # test output value 23 | assert torch.equal(output, expected_output) 24 | 25 | # test with designated parameters 26 | act = HSigmoid(3, 6, 0, 1) 27 | input_shape = torch.Size([1, 3, 64, 64]) 28 | input = torch.randn(input_shape) 29 | output = act(input) 30 | expected_output = torch.min( 31 | torch.max((input + 3) / 6, torch.zeros(input_shape)), 32 | torch.ones(input_shape)) 33 | # test output shape 34 | assert output.shape == expected_output.shape 35 | # test output value 36 | assert torch.equal(output, expected_output) 37 | -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/test_cnn/test_hswish.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.nn.functional import relu6 3 | 4 | from mmcv.cnn.bricks import HSwish 5 | 6 | 7 | def test_hswish(): 8 | # test inplace 9 | act = HSwish(inplace=True) 10 | assert act.act.inplace 11 | act = HSwish() 12 | assert not act.act.inplace 13 | 14 | input = torch.randn(1, 3, 64, 64) 15 | expected_output = input * relu6(input + 3) / 6 16 | output = act(input) 17 | # test output shape 18 | assert output.shape == expected_output.shape 19 | # test output value 20 | assert torch.equal(output, expected_output) 21 | -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/test_cnn/test_scale.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from mmcv.cnn.bricks import Scale 4 | 5 | 6 | def test_scale(): 7 | # test default scale 8 | scale = Scale() 9 | assert scale.scale.data == 1. 10 | assert scale.scale.dtype == torch.float 11 | x = torch.rand(1, 3, 64, 64) 12 | output = scale(x) 13 | assert output.shape == (1, 3, 64, 64) 14 | 15 | # test given scale 16 | scale = Scale(10.) 17 | assert scale.scale.data == 10. 18 | assert scale.scale.dtype == torch.float 19 | x = torch.rand(1, 3, 64, 64) 20 | output = scale(x) 21 | assert output.shape == (1, 3, 64, 64) 22 | -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/test_cnn/test_swish.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.nn.functional import sigmoid 3 | 4 | from mmcv.cnn.bricks import Swish 5 | 6 | 7 | def test_swish(): 8 | act = Swish() 9 | input = torch.randn(1, 3, 64, 64) 10 | expected_output = input * sigmoid(input) 11 | output = act(input) 12 | # test output shape 13 | assert output.shape == expected_output.shape 14 | # test output value 15 | assert torch.equal(output, expected_output) 16 | -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/test_ops/test_carafe.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.autograd import gradcheck 3 | 4 | 5 | class TestCarafe(object): 6 | 7 | def test_carafe_naive_gradcheck(self): 8 | if not torch.cuda.is_available(): 9 | return 10 | from mmcv.ops import CARAFENaive 11 | feat = torch.randn( 12 | 2, 64, 3, 3, requires_grad=True, device='cuda').double() 13 | mask = torch.randn( 14 | 2, 100, 6, 6, requires_grad=True, 15 | device='cuda').sigmoid().double() 16 | gradcheck(CARAFENaive(5, 4, 2), (feat, mask), atol=1e-4, eps=1e-4) 17 | 18 | def test_carafe_gradcheck(self): 19 | if not torch.cuda.is_available(): 20 | return 21 | from mmcv.ops import CARAFE 22 | feat = torch.randn( 23 | 2, 64, 3, 3, requires_grad=True, device='cuda').double() 24 | mask = torch.randn( 25 | 2, 100, 6, 6, requires_grad=True, 26 | device='cuda').sigmoid().double() 27 | gradcheck(CARAFE(5, 4, 2), (feat, mask), atol=1e-4, eps=1e-4) 28 | -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/test_ops/test_fused_bias_leakyrelu.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import torch 3 | from torch.autograd import gradcheck, gradgradcheck 4 | 5 | 6 | class TestFusedBiasLeakyReLU(object): 7 | 8 | @classmethod 9 | def setup_class(cls): 10 | if not torch.cuda.is_available(): 11 | return 12 | cls.input_tensor = torch.randn((2, 2, 2, 2), requires_grad=True).cuda() 13 | cls.bias = torch.zeros(2, requires_grad=True).cuda() 14 | 15 | @pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda') 16 | def test_gradient(self): 17 | 18 | from mmcv.ops import FusedBiasLeakyReLU 19 | gradcheck( 20 | FusedBiasLeakyReLU(2).cuda(), 21 | self.input_tensor, 22 | eps=1e-4, 23 | atol=1e-3) 24 | 25 | @pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda') 26 | def test_gradgradient(self): 27 | 28 | from mmcv.ops import FusedBiasLeakyReLU 29 | gradgradcheck( 30 | FusedBiasLeakyReLU(2).cuda(), 31 | self.input_tensor, 32 | eps=1e-4, 33 | atol=1e-3) 34 | -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/test_ops/test_info.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | class TestInfo(object): 5 | 6 | def test_info(self): 7 | if not torch.cuda.is_available(): 8 | return 9 | from mmcv.ops import get_compiler_version, get_compiling_cuda_version 10 | cv = get_compiler_version() 11 | ccv = get_compiling_cuda_version() 12 | assert cv is not None 13 | assert ccv is not None 14 | -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/test_ops/test_masked_conv2d.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | class TestMaskedConv2d(object): 5 | 6 | def test_masked_conv2d(self): 7 | if not torch.cuda.is_available(): 8 | return 9 | from mmcv.ops import MaskedConv2d 10 | input = torch.randn(1, 3, 16, 16, requires_grad=True, device='cuda') 11 | mask = torch.randn(1, 16, 16, requires_grad=True, device='cuda') 12 | conv = MaskedConv2d(3, 3, 3).cuda() 13 | output = conv(input, mask) 14 | assert output is not None 15 | -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/test_ops/test_upfirdn2d.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import torch 3 | from torch.autograd import gradcheck, gradgradcheck 4 | 5 | 6 | class TestUpFirDn2d(object): 7 | """Unit test for UpFirDn2d. 8 | 9 | Here, we just test the basic case of upsample version. More gerneal tests 10 | will be included in other unit test for UpFirDnUpsample and 11 | UpFirDnDownSample modules. 12 | """ 13 | 14 | @classmethod 15 | def setup_class(cls): 16 | kernel_1d = torch.tensor([1., 3., 3., 1.]) 17 | cls.kernel = kernel_1d[:, None] * kernel_1d[None, :] 18 | cls.kernel = cls.kernel / cls.kernel.sum() 19 | cls.factor = 2 20 | pad = cls.kernel.shape[0] - cls.factor 21 | cls.pad = ((pad + 1) // 2 + cls.factor - 1, pad // 2) 22 | 23 | cls.input_tensor = torch.randn((2, 3, 4, 4), requires_grad=True) 24 | 25 | @pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda') 26 | def test_upfirdn2d(self): 27 | from mmcv.ops import upfirdn2d 28 | 29 | gradcheck( 30 | upfirdn2d, 31 | (self.input_tensor.cuda(), self.kernel.type_as( 32 | self.input_tensor).cuda(), self.factor, 1, self.pad), 33 | eps=1e-4, 34 | atol=1e-3) 35 | 36 | gradgradcheck( 37 | upfirdn2d, 38 | (self.input_tensor.cuda(), self.kernel.type_as( 39 | self.input_tensor).cuda(), self.factor, 1, self.pad), 40 | eps=1e-4, 41 | atol=1e-3) 42 | -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/test_runner/test_utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import random 3 | 4 | import numpy as np 5 | import torch 6 | 7 | from mmcv.runner import set_random_seed 8 | 9 | 10 | def test_set_random_seed(): 11 | set_random_seed(0) 12 | a_random = random.randint(0, 10) 13 | a_np_random = np.random.rand(2, 2) 14 | a_torch_random = torch.rand(2, 2) 15 | assert torch.backends.cudnn.deterministic is False 16 | assert torch.backends.cudnn.benchmark is False 17 | assert os.environ['PYTHONHASHSEED'] == str(0) 18 | 19 | set_random_seed(0, True) 20 | b_random = random.randint(0, 10) 21 | b_np_random = np.random.rand(2, 2) 22 | b_torch_random = torch.rand(2, 2) 23 | assert torch.backends.cudnn.deterministic is True 24 | assert torch.backends.cudnn.benchmark is False 25 | 26 | assert a_random == b_random 27 | assert np.equal(a_np_random, b_np_random).all() 28 | assert torch.equal(a_torch_random, b_torch_random) 29 | -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/test_utils/test_env.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | import pytest 4 | 5 | import mmcv 6 | 7 | 8 | def test_collect_env(): 9 | try: 10 | import torch # noqa: F401 11 | except ModuleNotFoundError: 12 | pytest.skip('skipping tests that require PyTorch') 13 | 14 | from mmcv.utils import collect_env 15 | env_info = collect_env() 16 | expected_keys = [ 17 | 'sys.platform', 'Python', 'CUDA available', 'PyTorch', 18 | 'PyTorch compiling details', 'OpenCV', 'MMCV', 'MMCV Compiler', 19 | 'MMCV CUDA Compiler' 20 | ] 21 | for key in expected_keys: 22 | assert key in env_info 23 | 24 | if env_info['CUDA available']: 25 | for key in ['CUDA_HOME', 'NVCC']: 26 | assert key in env_info 27 | 28 | if sys.platform != 'win32': 29 | assert 'GCC' in env_info 30 | 31 | assert env_info['sys.platform'] == sys.platform 32 | assert env_info['Python'] == sys.version.replace('\n', '') 33 | assert env_info['MMCV'] == mmcv.__version__ 34 | -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/test_utils/test_timer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Open-MMLab. All rights reserved. 2 | import time 3 | 4 | import pytest 5 | 6 | import mmcv 7 | 8 | 9 | def test_timer_init(): 10 | timer = mmcv.Timer(start=False) 11 | assert not timer.is_running 12 | timer.start() 13 | assert timer.is_running 14 | timer = mmcv.Timer() 15 | assert timer.is_running 16 | 17 | 18 | def test_timer_run(): 19 | timer = mmcv.Timer() 20 | time.sleep(1) 21 | assert abs(timer.since_start() - 1) < 1e-2 22 | time.sleep(1) 23 | assert abs(timer.since_last_check() - 1) < 1e-2 24 | assert abs(timer.since_start() - 2) < 1e-2 25 | timer = mmcv.Timer(False) 26 | with pytest.raises(mmcv.TimerError): 27 | timer.since_start() 28 | with pytest.raises(mmcv.TimerError): 29 | timer.since_last_check() 30 | 31 | 32 | def test_timer_context(capsys): 33 | with mmcv.Timer(): 34 | time.sleep(1) 35 | out, _ = capsys.readouterr() 36 | assert abs(float(out) - 1) < 1e-2 37 | with mmcv.Timer(print_tmpl='time: {:.1f}s'): 38 | time.sleep(1) 39 | out, _ = capsys.readouterr() 40 | assert out == 'time: 1.0s\n' 41 | -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/test_utils/test_version_utils.py: -------------------------------------------------------------------------------- 1 | from unittest.mock import patch 2 | 3 | from mmcv import digit_version, get_git_hash, parse_version_info 4 | 5 | 6 | def test_digit_version(): 7 | assert digit_version('0.2.16') == (0, 2, 16) 8 | assert digit_version('1.2.3') == (1, 2, 3) 9 | assert digit_version('1.2.3rc0') == (1, 2, 2, 0) 10 | assert digit_version('1.2.3rc1') == (1, 2, 2, 1) 11 | assert digit_version('1.0rc0') == (1, -1, 0) 12 | 13 | 14 | def test_parse_version_info(): 15 | assert parse_version_info('0.2.16') == (0, 2, 16) 16 | assert parse_version_info('1.2.3') == (1, 2, 3) 17 | assert parse_version_info('1.2.3rc0') == (1, 2, 3, 'rc0') 18 | assert parse_version_info('1.2.3rc1') == (1, 2, 3, 'rc1') 19 | assert parse_version_info('1.0rc0') == (1, 0, 'rc0') 20 | 21 | 22 | def _mock_cmd_success(cmd): 23 | return '3b46d33e90c397869ad5103075838fdfc9812aa0'.encode('ascii') 24 | 25 | 26 | def _mock_cmd_fail(cmd): 27 | raise OSError 28 | 29 | 30 | def test_get_git_hash(): 31 | with patch('mmcv.utils.version_utils._minimal_ext_cmd', _mock_cmd_success): 32 | assert get_git_hash() == '3b46d33e90c397869ad5103075838fdfc9812aa0' 33 | assert get_git_hash(digits=6) == '3b46d3' 34 | assert get_git_hash(digits=100) == get_git_hash() 35 | with patch('mmcv.utils.version_utils._minimal_ext_cmd', _mock_cmd_fail): 36 | assert get_git_hash() == 'unknown' 37 | assert get_git_hash(fallback='n/a') == 'n/a' 38 | -------------------------------------------------------------------------------- /mmcv-1.3.0/tests/test_visualization.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Open-MMLab. All rights reserved. 2 | import numpy as np 3 | import pytest 4 | 5 | import mmcv 6 | 7 | 8 | def test_color(): 9 | assert mmcv.color_val(mmcv.Color.blue) == (255, 0, 0) 10 | assert mmcv.color_val('green') == (0, 255, 0) 11 | assert mmcv.color_val((1, 2, 3)) == (1, 2, 3) 12 | assert mmcv.color_val(100) == (100, 100, 100) 13 | assert mmcv.color_val(np.zeros(3, dtype=np.int)) == (0, 0, 0) 14 | with pytest.raises(TypeError): 15 | mmcv.color_val([255, 255, 255]) 16 | with pytest.raises(TypeError): 17 | mmcv.color_val(1.0) 18 | with pytest.raises(AssertionError): 19 | mmcv.color_val((0, 0, 500)) 20 | -------------------------------------------------------------------------------- /mmseg-v0.11/README.md: -------------------------------------------------------------------------------- 1 | Based on [MMSegmentation-V0.11.0](https://github.com/open-mmlab/mmsegmentation/releases/tag/v0.11.0), Documentation of MMSeg: https://mmsegmentation.readthedocs.io/ 2 | 3 | ## Installation 4 | 5 | ``` 6 | pip install addict; 7 | pip install yapf; 8 | pip install cython; 9 | pip install opencv-python; 10 | pip install pytest-runner; 11 | pip install terminaltables==3.1.0; 12 | pip install mmpycocotools; 13 | cd mmcv-1.3.0; 14 | MMCV_WITH_OPS=1 pip install -e . --disable-pip-version-check; 15 | cd ../mmseg-v0.11; pip install -e . --disable-pip-version-check; 16 | ``` 17 | 18 | Or you can refer to [get_started.md](docs/get_started.md#installation) for installation and dataset preparation. 19 | 20 | ## Get Started 21 | 22 | ### Training 23 | 24 | To train Semantic-FPN based Hire-MLP-Small on ADE20K on a single node with 8 gpus: 25 | 26 | ``` 27 | cd mmseg-v0.11/; python -m torch.distributed.launch --nproc_per_node=8 tools/train.py configs/sem_fpn/hire_mlp_small_512x512_ade20k.py --gpus 8 --launcher pytorch --work-dir /your_path_to// 28 | ``` 29 | 30 | ### Evaluation 31 | 32 | ``` 33 | python tools/test.py --eval mIoU 34 | ``` 35 | 36 | Or you can see [train.md](docs/train.md) and [inference.md](docs/inference.md) for the basic usage of MMSegmentation. 37 | -------------------------------------------------------------------------------- /mmseg-v0.11/configs/_base_/datasets/cityscapes_769x769.py: -------------------------------------------------------------------------------- 1 | _base_ = './cityscapes.py' 2 | img_norm_cfg = dict( 3 | mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) 4 | crop_size = (769, 769) 5 | train_pipeline = [ 6 | dict(type='LoadImageFromFile'), 7 | dict(type='LoadAnnotations'), 8 | dict(type='Resize', img_scale=(2049, 1025), ratio_range=(0.5, 2.0)), 9 | dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), 10 | dict(type='RandomFlip', prob=0.5), 11 | dict(type='PhotoMetricDistortion'), 12 | dict(type='Normalize', **img_norm_cfg), 13 | dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), 14 | dict(type='DefaultFormatBundle'), 15 | dict(type='Collect', keys=['img', 'gt_semantic_seg']), 16 | ] 17 | test_pipeline = [ 18 | dict(type='LoadImageFromFile'), 19 | dict( 20 | type='MultiScaleFlipAug', 21 | img_scale=(2049, 1025), 22 | # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], 23 | flip=False, 24 | transforms=[ 25 | dict(type='Resize', keep_ratio=True), 26 | dict(type='RandomFlip'), 27 | dict(type='Normalize', **img_norm_cfg), 28 | dict(type='ImageToTensor', keys=['img']), 29 | dict(type='Collect', keys=['img']), 30 | ]) 31 | ] 32 | data = dict( 33 | train=dict(pipeline=train_pipeline), 34 | val=dict(pipeline=test_pipeline), 35 | test=dict(pipeline=test_pipeline)) 36 | -------------------------------------------------------------------------------- /mmseg-v0.11/configs/_base_/datasets/pascal_voc12_aug.py: -------------------------------------------------------------------------------- 1 | _base_ = './pascal_voc12.py' 2 | # dataset settings 3 | data = dict( 4 | train=dict( 5 | ann_dir=['SegmentationClass', 'SegmentationClassAug'], 6 | split=[ 7 | 'ImageSets/Segmentation/train.txt', 8 | 'ImageSets/Segmentation/aug.txt' 9 | ])) 10 | -------------------------------------------------------------------------------- /mmseg-v0.11/configs/_base_/default_runtime.py: -------------------------------------------------------------------------------- 1 | # yapf:disable 2 | log_config = dict( 3 | interval=50, 4 | hooks=[ 5 | dict(type='TextLoggerHook', by_epoch=False), 6 | # dict(type='TensorboardLoggerHook') 7 | ]) 8 | # yapf:enable 9 | dist_params = dict(backend='nccl') 10 | log_level = 'INFO' 11 | load_from = None 12 | resume_from = None 13 | workflow = [('train', 1)] 14 | cudnn_benchmark = True 15 | -------------------------------------------------------------------------------- /mmseg-v0.11/configs/_base_/models/apcnet_r50-d8.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | norm_cfg = dict(type='SyncBN', requires_grad=True) 3 | model = dict( 4 | type='EncoderDecoder', 5 | pretrained='open-mmlab://resnet50_v1c', 6 | backbone=dict( 7 | type='ResNetV1c', 8 | depth=50, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | dilations=(1, 1, 2, 4), 12 | strides=(1, 2, 1, 1), 13 | norm_cfg=norm_cfg, 14 | norm_eval=False, 15 | style='pytorch', 16 | contract_dilation=True), 17 | decode_head=dict( 18 | type='APCHead', 19 | in_channels=2048, 20 | in_index=3, 21 | channels=512, 22 | pool_scales=(1, 2, 3, 6), 23 | dropout_ratio=0.1, 24 | num_classes=19, 25 | norm_cfg=dict(type='SyncBN', requires_grad=True), 26 | align_corners=False, 27 | loss_decode=dict( 28 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), 29 | auxiliary_head=dict( 30 | type='FCNHead', 31 | in_channels=1024, 32 | in_index=2, 33 | channels=256, 34 | num_convs=1, 35 | concat_input=False, 36 | dropout_ratio=0.1, 37 | num_classes=19, 38 | norm_cfg=norm_cfg, 39 | align_corners=False, 40 | loss_decode=dict( 41 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), 42 | # model training and testing settings 43 | train_cfg=dict(), 44 | test_cfg=dict(mode='whole')) 45 | -------------------------------------------------------------------------------- /mmseg-v0.11/configs/_base_/models/ccnet_r50-d8.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | norm_cfg = dict(type='SyncBN', requires_grad=True) 3 | model = dict( 4 | type='EncoderDecoder', 5 | pretrained='open-mmlab://resnet50_v1c', 6 | backbone=dict( 7 | type='ResNetV1c', 8 | depth=50, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | dilations=(1, 1, 2, 4), 12 | strides=(1, 2, 1, 1), 13 | norm_cfg=norm_cfg, 14 | norm_eval=False, 15 | style='pytorch', 16 | contract_dilation=True), 17 | decode_head=dict( 18 | type='CCHead', 19 | in_channels=2048, 20 | in_index=3, 21 | channels=512, 22 | recurrence=2, 23 | dropout_ratio=0.1, 24 | num_classes=19, 25 | norm_cfg=norm_cfg, 26 | align_corners=False, 27 | loss_decode=dict( 28 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), 29 | auxiliary_head=dict( 30 | type='FCNHead', 31 | in_channels=1024, 32 | in_index=2, 33 | channels=256, 34 | num_convs=1, 35 | concat_input=False, 36 | dropout_ratio=0.1, 37 | num_classes=19, 38 | norm_cfg=norm_cfg, 39 | align_corners=False, 40 | loss_decode=dict( 41 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), 42 | # model training and testing settings 43 | train_cfg=dict(), 44 | test_cfg=dict(mode='whole')) 45 | -------------------------------------------------------------------------------- /mmseg-v0.11/configs/_base_/models/cgnet.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | norm_cfg = dict(type='SyncBN', eps=1e-03, requires_grad=True) 3 | model = dict( 4 | type='EncoderDecoder', 5 | backbone=dict( 6 | type='CGNet', 7 | norm_cfg=norm_cfg, 8 | in_channels=3, 9 | num_channels=(32, 64, 128), 10 | num_blocks=(3, 21), 11 | dilations=(2, 4), 12 | reductions=(8, 16)), 13 | decode_head=dict( 14 | type='FCNHead', 15 | in_channels=256, 16 | in_index=2, 17 | channels=256, 18 | num_convs=0, 19 | concat_input=False, 20 | dropout_ratio=0, 21 | num_classes=19, 22 | norm_cfg=norm_cfg, 23 | loss_decode=dict( 24 | type='CrossEntropyLoss', 25 | use_sigmoid=False, 26 | loss_weight=1.0, 27 | class_weight=[ 28 | 2.5959933, 6.7415504, 3.5354059, 9.8663225, 9.690899, 9.369352, 29 | 10.289121, 9.953208, 4.3097677, 9.490387, 7.674431, 9.396905, 30 | 10.347791, 6.3927646, 10.226669, 10.241062, 10.280587, 31 | 10.396974, 10.055647 32 | ])), 33 | # model training and testing settings 34 | train_cfg=dict(sampler=None), 35 | test_cfg=dict(mode='whole')) 36 | -------------------------------------------------------------------------------- /mmseg-v0.11/configs/_base_/models/danet_r50-d8.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | norm_cfg = dict(type='SyncBN', requires_grad=True) 3 | model = dict( 4 | type='EncoderDecoder', 5 | pretrained='open-mmlab://resnet50_v1c', 6 | backbone=dict( 7 | type='ResNetV1c', 8 | depth=50, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | dilations=(1, 1, 2, 4), 12 | strides=(1, 2, 1, 1), 13 | norm_cfg=norm_cfg, 14 | norm_eval=False, 15 | style='pytorch', 16 | contract_dilation=True), 17 | decode_head=dict( 18 | type='DAHead', 19 | in_channels=2048, 20 | in_index=3, 21 | channels=512, 22 | pam_channels=64, 23 | dropout_ratio=0.1, 24 | num_classes=19, 25 | norm_cfg=norm_cfg, 26 | align_corners=False, 27 | loss_decode=dict( 28 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), 29 | auxiliary_head=dict( 30 | type='FCNHead', 31 | in_channels=1024, 32 | in_index=2, 33 | channels=256, 34 | num_convs=1, 35 | concat_input=False, 36 | dropout_ratio=0.1, 37 | num_classes=19, 38 | norm_cfg=norm_cfg, 39 | align_corners=False, 40 | loss_decode=dict( 41 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), 42 | # model training and testing settings 43 | train_cfg=dict(), 44 | test_cfg=dict(mode='whole')) 45 | -------------------------------------------------------------------------------- /mmseg-v0.11/configs/_base_/models/deeplabv3_r50-d8.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | norm_cfg = dict(type='SyncBN', requires_grad=True) 3 | model = dict( 4 | type='EncoderDecoder', 5 | pretrained='open-mmlab://resnet50_v1c', 6 | backbone=dict( 7 | type='ResNetV1c', 8 | depth=50, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | dilations=(1, 1, 2, 4), 12 | strides=(1, 2, 1, 1), 13 | norm_cfg=norm_cfg, 14 | norm_eval=False, 15 | style='pytorch', 16 | contract_dilation=True), 17 | decode_head=dict( 18 | type='ASPPHead', 19 | in_channels=2048, 20 | in_index=3, 21 | channels=512, 22 | dilations=(1, 12, 24, 36), 23 | dropout_ratio=0.1, 24 | num_classes=19, 25 | norm_cfg=norm_cfg, 26 | align_corners=False, 27 | loss_decode=dict( 28 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), 29 | auxiliary_head=dict( 30 | type='FCNHead', 31 | in_channels=1024, 32 | in_index=2, 33 | channels=256, 34 | num_convs=1, 35 | concat_input=False, 36 | dropout_ratio=0.1, 37 | num_classes=19, 38 | norm_cfg=norm_cfg, 39 | align_corners=False, 40 | loss_decode=dict( 41 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), 42 | # model training and testing settings 43 | train_cfg=dict(), 44 | test_cfg=dict(mode='whole')) 45 | -------------------------------------------------------------------------------- /mmseg-v0.11/configs/_base_/models/dmnet_r50-d8.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | norm_cfg = dict(type='SyncBN', requires_grad=True) 3 | model = dict( 4 | type='EncoderDecoder', 5 | pretrained='open-mmlab://resnet50_v1c', 6 | backbone=dict( 7 | type='ResNetV1c', 8 | depth=50, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | dilations=(1, 1, 2, 4), 12 | strides=(1, 2, 1, 1), 13 | norm_cfg=norm_cfg, 14 | norm_eval=False, 15 | style='pytorch', 16 | contract_dilation=True), 17 | decode_head=dict( 18 | type='DMHead', 19 | in_channels=2048, 20 | in_index=3, 21 | channels=512, 22 | filter_sizes=(1, 3, 5, 7), 23 | dropout_ratio=0.1, 24 | num_classes=19, 25 | norm_cfg=dict(type='SyncBN', requires_grad=True), 26 | align_corners=False, 27 | loss_decode=dict( 28 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), 29 | auxiliary_head=dict( 30 | type='FCNHead', 31 | in_channels=1024, 32 | in_index=2, 33 | channels=256, 34 | num_convs=1, 35 | concat_input=False, 36 | dropout_ratio=0.1, 37 | num_classes=19, 38 | norm_cfg=norm_cfg, 39 | align_corners=False, 40 | loss_decode=dict( 41 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), 42 | # model training and testing settings 43 | train_cfg=dict(), 44 | test_cfg=dict(mode='whole')) 45 | -------------------------------------------------------------------------------- /mmseg-v0.11/configs/_base_/models/dnl_r50-d8.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | norm_cfg = dict(type='SyncBN', requires_grad=True) 3 | model = dict( 4 | type='EncoderDecoder', 5 | pretrained='open-mmlab://resnet50_v1c', 6 | backbone=dict( 7 | type='ResNetV1c', 8 | depth=50, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | dilations=(1, 1, 2, 4), 12 | strides=(1, 2, 1, 1), 13 | norm_cfg=norm_cfg, 14 | norm_eval=False, 15 | style='pytorch', 16 | contract_dilation=True), 17 | decode_head=dict( 18 | type='DNLHead', 19 | in_channels=2048, 20 | in_index=3, 21 | channels=512, 22 | dropout_ratio=0.1, 23 | reduction=2, 24 | use_scale=True, 25 | mode='embedded_gaussian', 26 | num_classes=19, 27 | norm_cfg=norm_cfg, 28 | align_corners=False, 29 | loss_decode=dict( 30 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), 31 | auxiliary_head=dict( 32 | type='FCNHead', 33 | in_channels=1024, 34 | in_index=2, 35 | channels=256, 36 | num_convs=1, 37 | concat_input=False, 38 | dropout_ratio=0.1, 39 | num_classes=19, 40 | norm_cfg=norm_cfg, 41 | align_corners=False, 42 | loss_decode=dict( 43 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), 44 | # model training and testing settings 45 | train_cfg=dict(), 46 | test_cfg=dict(mode='whole')) 47 | -------------------------------------------------------------------------------- /mmseg-v0.11/configs/_base_/models/fcn_r50-d8.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | norm_cfg = dict(type='SyncBN', requires_grad=True) 3 | model = dict( 4 | type='EncoderDecoder', 5 | pretrained='open-mmlab://resnet50_v1c', 6 | backbone=dict( 7 | type='ResNetV1c', 8 | depth=50, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | dilations=(1, 1, 2, 4), 12 | strides=(1, 2, 1, 1), 13 | norm_cfg=norm_cfg, 14 | norm_eval=False, 15 | style='pytorch', 16 | contract_dilation=True), 17 | decode_head=dict( 18 | type='FCNHead', 19 | in_channels=2048, 20 | in_index=3, 21 | channels=512, 22 | num_convs=2, 23 | concat_input=True, 24 | dropout_ratio=0.1, 25 | num_classes=19, 26 | norm_cfg=norm_cfg, 27 | align_corners=False, 28 | loss_decode=dict( 29 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), 30 | auxiliary_head=dict( 31 | type='FCNHead', 32 | in_channels=1024, 33 | in_index=2, 34 | channels=256, 35 | num_convs=1, 36 | concat_input=False, 37 | dropout_ratio=0.1, 38 | num_classes=19, 39 | norm_cfg=norm_cfg, 40 | align_corners=False, 41 | loss_decode=dict( 42 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), 43 | # model training and testing settings 44 | train_cfg=dict(), 45 | test_cfg=dict(mode='whole')) 46 | -------------------------------------------------------------------------------- /mmseg-v0.11/configs/_base_/models/fpn_r50.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | norm_cfg = dict(type='SyncBN', requires_grad=True) 3 | model = dict( 4 | type='EncoderDecoder', 5 | pretrained='open-mmlab://resnet50_v1c', 6 | backbone=dict( 7 | type='ResNetV1c', 8 | depth=50, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | dilations=(1, 1, 1, 1), 12 | strides=(1, 2, 2, 2), 13 | norm_cfg=norm_cfg, 14 | norm_eval=False, 15 | style='pytorch', 16 | contract_dilation=True), 17 | neck=dict( 18 | type='FPN', 19 | in_channels=[256, 512, 1024, 2048], 20 | out_channels=256, 21 | num_outs=4), 22 | decode_head=dict( 23 | type='FPNHead', 24 | in_channels=[256, 256, 256, 256], 25 | in_index=[0, 1, 2, 3], 26 | feature_strides=[4, 8, 16, 32], 27 | channels=128, 28 | dropout_ratio=0.1, 29 | num_classes=19, 30 | norm_cfg=norm_cfg, 31 | align_corners=False, 32 | loss_decode=dict( 33 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), 34 | # model training and testing settings 35 | train_cfg=dict(), 36 | test_cfg=dict(mode='whole')) 37 | -------------------------------------------------------------------------------- /mmseg-v0.11/configs/_base_/models/lraspp_m-v3-d8.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | norm_cfg = dict(type='SyncBN', eps=0.001, requires_grad=True) 3 | model = dict( 4 | type='EncoderDecoder', 5 | backbone=dict( 6 | type='MobileNetV3', 7 | arch='large', 8 | out_indices=(1, 3, 16), 9 | norm_cfg=norm_cfg), 10 | decode_head=dict( 11 | type='LRASPPHead', 12 | in_channels=(16, 24, 960), 13 | in_index=(0, 1, 2), 14 | channels=128, 15 | input_transform='multiple_select', 16 | dropout_ratio=0.1, 17 | num_classes=19, 18 | norm_cfg=norm_cfg, 19 | act_cfg=dict(type='ReLU'), 20 | align_corners=False, 21 | loss_decode=dict( 22 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), 23 | # model training and testing settings 24 | train_cfg=dict(), 25 | test_cfg=dict(mode='whole')) 26 | -------------------------------------------------------------------------------- /mmseg-v0.11/configs/_base_/models/pspnet_r50-d8.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | norm_cfg = dict(type='SyncBN', requires_grad=True) 3 | model = dict( 4 | type='EncoderDecoder', 5 | pretrained='open-mmlab://resnet50_v1c', 6 | backbone=dict( 7 | type='ResNetV1c', 8 | depth=50, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | dilations=(1, 1, 2, 4), 12 | strides=(1, 2, 1, 1), 13 | norm_cfg=norm_cfg, 14 | norm_eval=False, 15 | style='pytorch', 16 | contract_dilation=True), 17 | decode_head=dict( 18 | type='PSPHead', 19 | in_channels=2048, 20 | in_index=3, 21 | channels=512, 22 | pool_scales=(1, 2, 3, 6), 23 | dropout_ratio=0.1, 24 | num_classes=19, 25 | norm_cfg=norm_cfg, 26 | align_corners=False, 27 | loss_decode=dict( 28 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), 29 | auxiliary_head=dict( 30 | type='FCNHead', 31 | in_channels=1024, 32 | in_index=2, 33 | channels=256, 34 | num_convs=1, 35 | concat_input=False, 36 | dropout_ratio=0.1, 37 | num_classes=19, 38 | norm_cfg=norm_cfg, 39 | align_corners=False, 40 | loss_decode=dict( 41 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), 42 | # model training and testing settings 43 | train_cfg=dict(), 44 | test_cfg=dict(mode='whole')) 45 | -------------------------------------------------------------------------------- /mmseg-v0.11/configs/_base_/models/upernet_r50.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | norm_cfg = dict(type='SyncBN', requires_grad=True) 3 | model = dict( 4 | type='EncoderDecoder', 5 | pretrained='open-mmlab://resnet50_v1c', 6 | backbone=dict( 7 | type='ResNetV1c', 8 | depth=50, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | dilations=(1, 1, 1, 1), 12 | strides=(1, 2, 2, 2), 13 | norm_cfg=norm_cfg, 14 | norm_eval=False, 15 | style='pytorch', 16 | contract_dilation=True), 17 | decode_head=dict( 18 | type='UPerHead', 19 | in_channels=[256, 512, 1024, 2048], 20 | in_index=[0, 1, 2, 3], 21 | pool_scales=(1, 2, 3, 6), 22 | channels=512, 23 | dropout_ratio=0.1, 24 | num_classes=19, 25 | norm_cfg=norm_cfg, 26 | align_corners=False, 27 | loss_decode=dict( 28 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), 29 | auxiliary_head=dict( 30 | type='FCNHead', 31 | in_channels=1024, 32 | in_index=2, 33 | channels=256, 34 | num_convs=1, 35 | concat_input=False, 36 | dropout_ratio=0.1, 37 | num_classes=19, 38 | norm_cfg=norm_cfg, 39 | align_corners=False, 40 | loss_decode=dict( 41 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), 42 | # model training and testing settings 43 | train_cfg=dict(), 44 | test_cfg=dict(mode='whole')) 45 | -------------------------------------------------------------------------------- /mmseg-v0.11/configs/_base_/schedules/schedule_160k.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) 3 | optimizer_config = dict() 4 | # learning policy 5 | lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) 6 | # runtime settings 7 | runner = dict(type='IterBasedRunner', max_iters=160000) 8 | checkpoint_config = dict(by_epoch=False, interval=16000) 9 | evaluation = dict(interval=16000, metric='mIoU') 10 | -------------------------------------------------------------------------------- /mmseg-v0.11/configs/_base_/schedules/schedule_20k.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) 3 | optimizer_config = dict() 4 | # learning policy 5 | lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) 6 | # runtime settings 7 | runner = dict(type='IterBasedRunner', max_iters=20000) 8 | checkpoint_config = dict(by_epoch=False, interval=2000) 9 | evaluation = dict(interval=2000, metric='mIoU') 10 | -------------------------------------------------------------------------------- /mmseg-v0.11/configs/_base_/schedules/schedule_40k.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) 3 | optimizer_config = dict() 4 | # learning policy 5 | lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) 6 | # runtime settings 7 | runner = dict(type='IterBasedRunner', max_iters=40000) 8 | checkpoint_config = dict(by_epoch=False, interval=4000) 9 | evaluation = dict(interval=4000, metric='mIoU') 10 | -------------------------------------------------------------------------------- /mmseg-v0.11/configs/_base_/schedules/schedule_80k.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) 3 | optimizer_config = dict() 4 | # learning policy 5 | lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) 6 | # runtime settings 7 | runner = dict(type='IterBasedRunner', max_iters=80000) 8 | checkpoint_config = dict(by_epoch=False, interval=8000) 9 | evaluation = dict(interval=8000, metric='mIoU') 10 | -------------------------------------------------------------------------------- /mmseg-v0.11/mmseg/__init__.py: -------------------------------------------------------------------------------- 1 | import mmcv 2 | 3 | from .version import __version__, version_info 4 | 5 | MMCV_MIN = '1.1.4' 6 | MMCV_MAX = '1.3.0' 7 | 8 | 9 | def digit_version(version_str): 10 | digit_version = [] 11 | for x in version_str.split('.'): 12 | if x.isdigit(): 13 | digit_version.append(int(x)) 14 | elif x.find('rc') != -1: 15 | patch_version = x.split('rc') 16 | digit_version.append(int(patch_version[0]) - 1) 17 | digit_version.append(int(patch_version[1])) 18 | return digit_version 19 | 20 | 21 | mmcv_min_version = digit_version(MMCV_MIN) 22 | mmcv_max_version = digit_version(MMCV_MAX) 23 | mmcv_version = digit_version(mmcv.__version__) 24 | 25 | 26 | assert (mmcv_min_version <= mmcv_version <= mmcv_max_version), \ 27 | f'MMCV=={mmcv.__version__} is used but incompatible. ' \ 28 | f'Please install mmcv>={mmcv_min_version}, <={mmcv_max_version}.' 29 | 30 | __all__ = ['__version__', 'version_info'] 31 | -------------------------------------------------------------------------------- /mmseg-v0.11/mmseg/apis/__init__.py: -------------------------------------------------------------------------------- 1 | from .inference import inference_segmentor, init_segmentor, show_result_pyplot 2 | from .test import multi_gpu_test, single_gpu_test 3 | from .train import get_root_logger, set_random_seed, train_segmentor 4 | 5 | __all__ = [ 6 | 'get_root_logger', 'set_random_seed', 'train_segmentor', 'init_segmentor', 7 | 'inference_segmentor', 'multi_gpu_test', 'single_gpu_test', 8 | 'show_result_pyplot' 9 | ] 10 | -------------------------------------------------------------------------------- /mmseg-v0.11/mmseg/core/__init__.py: -------------------------------------------------------------------------------- 1 | from .evaluation import * # noqa: F401, F403 2 | from .seg import * # noqa: F401, F403 3 | from .utils import * # noqa: F401, F403 4 | -------------------------------------------------------------------------------- /mmseg-v0.11/mmseg/core/evaluation/__init__.py: -------------------------------------------------------------------------------- 1 | from .class_names import get_classes, get_palette 2 | from .eval_hooks import DistEvalHook, EvalHook 3 | from .metrics import eval_metrics, mean_dice, mean_iou 4 | 5 | __all__ = [ 6 | 'EvalHook', 'DistEvalHook', 'mean_dice', 'mean_iou', 'eval_metrics', 7 | 'get_classes', 'get_palette' 8 | ] 9 | -------------------------------------------------------------------------------- /mmseg-v0.11/mmseg/core/seg/__init__.py: -------------------------------------------------------------------------------- 1 | from .builder import build_pixel_sampler 2 | from .sampler import BasePixelSampler, OHEMPixelSampler 3 | 4 | __all__ = ['build_pixel_sampler', 'BasePixelSampler', 'OHEMPixelSampler'] 5 | -------------------------------------------------------------------------------- /mmseg-v0.11/mmseg/core/seg/builder.py: -------------------------------------------------------------------------------- 1 | from mmcv.utils import Registry, build_from_cfg 2 | 3 | PIXEL_SAMPLERS = Registry('pixel sampler') 4 | 5 | 6 | def build_pixel_sampler(cfg, **default_args): 7 | """Build pixel sampler for segmentation map.""" 8 | return build_from_cfg(cfg, PIXEL_SAMPLERS, default_args) 9 | -------------------------------------------------------------------------------- /mmseg-v0.11/mmseg/core/seg/sampler/__init__.py: -------------------------------------------------------------------------------- 1 | from .base_pixel_sampler import BasePixelSampler 2 | from .ohem_pixel_sampler import OHEMPixelSampler 3 | 4 | __all__ = ['BasePixelSampler', 'OHEMPixelSampler'] 5 | -------------------------------------------------------------------------------- /mmseg-v0.11/mmseg/core/seg/sampler/base_pixel_sampler.py: -------------------------------------------------------------------------------- 1 | from abc import ABCMeta, abstractmethod 2 | 3 | 4 | class BasePixelSampler(metaclass=ABCMeta): 5 | """Base class of pixel sampler.""" 6 | 7 | def __init__(self, **kwargs): 8 | pass 9 | 10 | @abstractmethod 11 | def sample(self, seg_logit, seg_label): 12 | """Placeholder for sample function.""" 13 | pass 14 | -------------------------------------------------------------------------------- /mmseg-v0.11/mmseg/core/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .misc import add_prefix 2 | 3 | __all__ = ['add_prefix'] 4 | -------------------------------------------------------------------------------- /mmseg-v0.11/mmseg/core/utils/misc.py: -------------------------------------------------------------------------------- 1 | def add_prefix(inputs, prefix): 2 | """Add prefix for dict. 3 | 4 | Args: 5 | inputs (dict): The input dict with str keys. 6 | prefix (str): The prefix to add. 7 | 8 | Returns: 9 | 10 | dict: The dict with keys updated with ``prefix``. 11 | """ 12 | 13 | outputs = dict() 14 | for name, value in inputs.items(): 15 | outputs[f'{prefix}.{name}'] = value 16 | 17 | return outputs 18 | -------------------------------------------------------------------------------- /mmseg-v0.11/mmseg/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | from .ade import ADE20KDataset 2 | from .builder import DATASETS, PIPELINES, build_dataloader, build_dataset 3 | from .chase_db1 import ChaseDB1Dataset 4 | from .cityscapes import CityscapesDataset 5 | from .custom import CustomDataset 6 | from .dataset_wrappers import ConcatDataset, RepeatDataset 7 | from .drive import DRIVEDataset 8 | from .hrf import HRFDataset 9 | from .pascal_context import PascalContextDataset 10 | from .stare import STAREDataset 11 | from .voc import PascalVOCDataset 12 | 13 | __all__ = [ 14 | 'CustomDataset', 'build_dataloader', 'ConcatDataset', 'RepeatDataset', 15 | 'DATASETS', 'build_dataset', 'PIPELINES', 'CityscapesDataset', 16 | 'PascalVOCDataset', 'ADE20KDataset', 'PascalContextDataset', 17 | 'ChaseDB1Dataset', 'DRIVEDataset', 'HRFDataset', 'STAREDataset' 18 | ] 19 | -------------------------------------------------------------------------------- /mmseg-v0.11/mmseg/datasets/chase_db1.py: -------------------------------------------------------------------------------- 1 | import os.path as osp 2 | 3 | from .builder import DATASETS 4 | from .custom import CustomDataset 5 | 6 | 7 | @DATASETS.register_module() 8 | class ChaseDB1Dataset(CustomDataset): 9 | """Chase_db1 dataset. 10 | 11 | In segmentation map annotation for Chase_db1, 0 stands for background, 12 | which is included in 2 categories. ``reduce_zero_label`` is fixed to False. 13 | The ``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to 14 | '_1stHO.png'. 15 | """ 16 | 17 | CLASSES = ('background', 'vessel') 18 | 19 | PALETTE = [[120, 120, 120], [6, 230, 230]] 20 | 21 | def __init__(self, **kwargs): 22 | super(ChaseDB1Dataset, self).__init__( 23 | img_suffix='.png', 24 | seg_map_suffix='_1stHO.png', 25 | reduce_zero_label=False, 26 | **kwargs) 27 | assert osp.exists(self.img_dir) 28 | -------------------------------------------------------------------------------- /mmseg-v0.11/mmseg/datasets/drive.py: -------------------------------------------------------------------------------- 1 | import os.path as osp 2 | 3 | from .builder import DATASETS 4 | from .custom import CustomDataset 5 | 6 | 7 | @DATASETS.register_module() 8 | class DRIVEDataset(CustomDataset): 9 | """DRIVE dataset. 10 | 11 | In segmentation map annotation for DRIVE, 0 stands for background, which is 12 | included in 2 categories. ``reduce_zero_label`` is fixed to False. The 13 | ``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to 14 | '_manual1.png'. 15 | """ 16 | 17 | CLASSES = ('background', 'vessel') 18 | 19 | PALETTE = [[120, 120, 120], [6, 230, 230]] 20 | 21 | def __init__(self, **kwargs): 22 | super(DRIVEDataset, self).__init__( 23 | img_suffix='.png', 24 | seg_map_suffix='_manual1.png', 25 | reduce_zero_label=False, 26 | **kwargs) 27 | assert osp.exists(self.img_dir) 28 | -------------------------------------------------------------------------------- /mmseg-v0.11/mmseg/datasets/hrf.py: -------------------------------------------------------------------------------- 1 | import os.path as osp 2 | 3 | from .builder import DATASETS 4 | from .custom import CustomDataset 5 | 6 | 7 | @DATASETS.register_module() 8 | class HRFDataset(CustomDataset): 9 | """HRF dataset. 10 | 11 | In segmentation map annotation for HRF, 0 stands for background, which is 12 | included in 2 categories. ``reduce_zero_label`` is fixed to False. The 13 | ``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to 14 | '.png'. 15 | """ 16 | 17 | CLASSES = ('background', 'vessel') 18 | 19 | PALETTE = [[120, 120, 120], [6, 230, 230]] 20 | 21 | def __init__(self, **kwargs): 22 | super(HRFDataset, self).__init__( 23 | img_suffix='.png', 24 | seg_map_suffix='.png', 25 | reduce_zero_label=False, 26 | **kwargs) 27 | assert osp.exists(self.img_dir) 28 | -------------------------------------------------------------------------------- /mmseg-v0.11/mmseg/datasets/pipelines/__init__.py: -------------------------------------------------------------------------------- 1 | from .compose import Compose 2 | from .formating import (Collect, ImageToTensor, ToDataContainer, ToTensor, 3 | Transpose, to_tensor) 4 | from .loading import LoadAnnotations, LoadImageFromFile 5 | from .test_time_aug import MultiScaleFlipAug 6 | from .transforms import (CLAHE, AdjustGamma, Normalize, Pad, 7 | PhotoMetricDistortion, RandomCrop, RandomFlip, 8 | RandomRotate, Rerange, Resize, RGB2Gray, SegRescale) 9 | 10 | __all__ = [ 11 | 'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer', 12 | 'Transpose', 'Collect', 'LoadAnnotations', 'LoadImageFromFile', 13 | 'MultiScaleFlipAug', 'Resize', 'RandomFlip', 'Pad', 'RandomCrop', 14 | 'Normalize', 'SegRescale', 'PhotoMetricDistortion', 'RandomRotate', 15 | 'AdjustGamma', 'CLAHE', 'Rerange', 'RGB2Gray' 16 | ] 17 | -------------------------------------------------------------------------------- /mmseg-v0.11/mmseg/datasets/stare.py: -------------------------------------------------------------------------------- 1 | import os.path as osp 2 | 3 | from .builder import DATASETS 4 | from .custom import CustomDataset 5 | 6 | 7 | @DATASETS.register_module() 8 | class STAREDataset(CustomDataset): 9 | """STARE dataset. 10 | 11 | In segmentation map annotation for STARE, 0 stands for background, which is 12 | included in 2 categories. ``reduce_zero_label`` is fixed to False. The 13 | ``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to 14 | '.ah.png'. 15 | """ 16 | 17 | CLASSES = ('background', 'vessel') 18 | 19 | PALETTE = [[120, 120, 120], [6, 230, 230]] 20 | 21 | def __init__(self, **kwargs): 22 | super(STAREDataset, self).__init__( 23 | img_suffix='.png', 24 | seg_map_suffix='.ah.png', 25 | reduce_zero_label=False, 26 | **kwargs) 27 | assert osp.exists(self.img_dir) 28 | -------------------------------------------------------------------------------- /mmseg-v0.11/mmseg/datasets/voc.py: -------------------------------------------------------------------------------- 1 | import os.path as osp 2 | 3 | from .builder import DATASETS 4 | from .custom import CustomDataset 5 | 6 | 7 | @DATASETS.register_module() 8 | class PascalVOCDataset(CustomDataset): 9 | """Pascal VOC dataset. 10 | 11 | Args: 12 | split (str): Split txt file for Pascal VOC. 13 | """ 14 | 15 | CLASSES = ('background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 16 | 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 17 | 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 18 | 'train', 'tvmonitor') 19 | 20 | PALETTE = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128], 21 | [128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0], 22 | [192, 0, 0], [64, 128, 0], [192, 128, 0], [64, 0, 128], 23 | [192, 0, 128], [64, 128, 128], [192, 128, 128], [0, 64, 0], 24 | [128, 64, 0], [0, 192, 0], [128, 192, 0], [0, 64, 128]] 25 | 26 | def __init__(self, split, **kwargs): 27 | super(PascalVOCDataset, self).__init__( 28 | img_suffix='.jpg', seg_map_suffix='.png', split=split, **kwargs) 29 | assert osp.exists(self.img_dir) and self.split is not None 30 | -------------------------------------------------------------------------------- /mmseg-v0.11/mmseg/models/__init__.py: -------------------------------------------------------------------------------- 1 | from .backbones import * # noqa: F401,F403 2 | from .builder import (BACKBONES, HEADS, LOSSES, SEGMENTORS, build_backbone, 3 | build_head, build_loss, build_segmentor) 4 | from .decode_heads import * # noqa: F401,F403 5 | from .losses import * # noqa: F401,F403 6 | from .necks import * # noqa: F401,F403 7 | from .segmentors import * # noqa: F401,F403 8 | 9 | __all__ = [ 10 | 'BACKBONES', 'HEADS', 'LOSSES', 'SEGMENTORS', 'build_backbone', 11 | 'build_head', 'build_loss', 'build_segmentor' 12 | ] 13 | -------------------------------------------------------------------------------- /mmseg-v0.11/mmseg/models/backbones/__init__.py: -------------------------------------------------------------------------------- 1 | from .cgnet import CGNet 2 | from .fast_scnn import FastSCNN 3 | from .hrnet import HRNet 4 | from .mobilenet_v2 import MobileNetV2 5 | from .mobilenet_v3 import MobileNetV3 6 | from .resnest import ResNeSt 7 | from .resnet import ResNet, ResNetV1c, ResNetV1d 8 | from .resnext import ResNeXt 9 | from .unet import UNet 10 | from .hire_mlp import seg_hire_mlp_tiny, seg_hire_mlp_small, seg_hire_mlp_base, seg_hire_mlp_large 11 | 12 | __all__ = [ 13 | 'ResNet', 'ResNetV1c', 'ResNetV1d', 'ResNeXt', 'HRNet', 'FastSCNN', 14 | 'ResNeSt', 'MobileNetV2', 'UNet', 'CGNet', 'MobileNetV3', 15 | 'seg_hire_mlp_tiny', 'seg_hire_mlp_small', 'seg_hire_mlp_base', 'seg_hire_mlp_large' 16 | ] 17 | -------------------------------------------------------------------------------- /mmseg-v0.11/mmseg/models/decode_heads/__init__.py: -------------------------------------------------------------------------------- 1 | from .ann_head import ANNHead 2 | from .apc_head import APCHead 3 | from .aspp_head import ASPPHead 4 | from .cc_head import CCHead 5 | from .da_head import DAHead 6 | from .dm_head import DMHead 7 | from .dnl_head import DNLHead 8 | from .ema_head import EMAHead 9 | from .enc_head import EncHead 10 | from .fcn_head import FCNHead 11 | from .fpn_head import FPNHead 12 | from .gc_head import GCHead 13 | from .lraspp_head import LRASPPHead 14 | from .nl_head import NLHead 15 | from .ocr_head import OCRHead 16 | from .point_head import PointHead 17 | from .psa_head import PSAHead 18 | from .psp_head import PSPHead 19 | from .sep_aspp_head import DepthwiseSeparableASPPHead 20 | from .sep_fcn_head import DepthwiseSeparableFCNHead 21 | from .uper_head import UPerHead 22 | 23 | __all__ = [ 24 | 'FCNHead', 'PSPHead', 'ASPPHead', 'PSAHead', 'NLHead', 'GCHead', 'CCHead', 25 | 'UPerHead', 'DepthwiseSeparableASPPHead', 'ANNHead', 'DAHead', 'OCRHead', 26 | 'EncHead', 'DepthwiseSeparableFCNHead', 'FPNHead', 'EMAHead', 'DNLHead', 27 | 'PointHead', 'APCHead', 'DMHead', 'LRASPPHead' 28 | ] 29 | -------------------------------------------------------------------------------- /mmseg-v0.11/mmseg/models/decode_heads/cc_head.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from ..builder import HEADS 4 | from .fcn_head import FCNHead 5 | 6 | try: 7 | from mmcv.ops import CrissCrossAttention 8 | except ModuleNotFoundError: 9 | CrissCrossAttention = None 10 | 11 | 12 | @HEADS.register_module() 13 | class CCHead(FCNHead): 14 | """CCNet: Criss-Cross Attention for Semantic Segmentation. 15 | 16 | This head is the implementation of `CCNet 17 | `_. 18 | 19 | Args: 20 | recurrence (int): Number of recurrence of Criss Cross Attention 21 | module. Default: 2. 22 | """ 23 | 24 | def __init__(self, recurrence=2, **kwargs): 25 | if CrissCrossAttention is None: 26 | raise RuntimeError('Please install mmcv-full for ' 27 | 'CrissCrossAttention ops') 28 | super(CCHead, self).__init__(num_convs=2, **kwargs) 29 | self.recurrence = recurrence 30 | self.cca = CrissCrossAttention(self.channels) 31 | 32 | def forward(self, inputs): 33 | """Forward function.""" 34 | x = self._transform_inputs(inputs) 35 | output = self.convs[0](x) 36 | for _ in range(self.recurrence): 37 | output = self.cca(output) 38 | output = self.convs[1](output) 39 | if self.concat_input: 40 | output = self.conv_cat(torch.cat([x, output], dim=1)) 41 | output = self.cls_seg(output) 42 | return output 43 | -------------------------------------------------------------------------------- /mmseg-v0.11/mmseg/models/losses/__init__.py: -------------------------------------------------------------------------------- 1 | from .accuracy import Accuracy, accuracy 2 | from .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy, 3 | cross_entropy, mask_cross_entropy) 4 | from .lovasz_loss import LovaszLoss 5 | from .utils import reduce_loss, weight_reduce_loss, weighted_loss 6 | 7 | __all__ = [ 8 | 'accuracy', 'Accuracy', 'cross_entropy', 'binary_cross_entropy', 9 | 'mask_cross_entropy', 'CrossEntropyLoss', 'reduce_loss', 10 | 'weight_reduce_loss', 'weighted_loss', 'LovaszLoss' 11 | ] 12 | -------------------------------------------------------------------------------- /mmseg-v0.11/mmseg/models/necks/__init__.py: -------------------------------------------------------------------------------- 1 | from .fpn import FPN 2 | 3 | __all__ = ['FPN'] 4 | -------------------------------------------------------------------------------- /mmseg-v0.11/mmseg/models/segmentors/__init__.py: -------------------------------------------------------------------------------- 1 | from .cascade_encoder_decoder import CascadeEncoderDecoder 2 | from .encoder_decoder import EncoderDecoder 3 | 4 | __all__ = ['EncoderDecoder', 'CascadeEncoderDecoder'] 5 | -------------------------------------------------------------------------------- /mmseg-v0.11/mmseg/models/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .inverted_residual import InvertedResidual, InvertedResidualV3 2 | from .make_divisible import make_divisible 3 | from .res_layer import ResLayer 4 | from .self_attention_block import SelfAttentionBlock 5 | from .up_conv_block import UpConvBlock 6 | 7 | __all__ = [ 8 | 'ResLayer', 'SelfAttentionBlock', 'make_divisible', 'InvertedResidual', 9 | 'UpConvBlock', 'InvertedResidualV3' 10 | ] 11 | -------------------------------------------------------------------------------- /mmseg-v0.11/mmseg/models/utils/make_divisible.py: -------------------------------------------------------------------------------- 1 | def make_divisible(value, divisor, min_value=None, min_ratio=0.9): 2 | """Make divisible function. 3 | 4 | This function rounds the channel number to the nearest value that can be 5 | divisible by the divisor. It is taken from the original tf repo. It ensures 6 | that all layers have a channel number that is divisible by divisor. It can 7 | be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py # noqa 8 | 9 | Args: 10 | value (int): The original channel number. 11 | divisor (int): The divisor to fully divide the channel number. 12 | min_value (int): The minimum value of the output channel. 13 | Default: None, means that the minimum value equal to the divisor. 14 | min_ratio (float): The minimum ratio of the rounded channel number to 15 | the original channel number. Default: 0.9. 16 | 17 | Returns: 18 | int: The modified output channel number. 19 | """ 20 | 21 | if min_value is None: 22 | min_value = divisor 23 | new_value = max(min_value, int(value + divisor / 2) // divisor * divisor) 24 | # Make sure that round down does not go down by more than (1-min_ratio). 25 | if new_value < min_ratio * value: 26 | new_value += divisor 27 | return new_value 28 | -------------------------------------------------------------------------------- /mmseg-v0.11/mmseg/ops/__init__.py: -------------------------------------------------------------------------------- 1 | from .encoding import Encoding 2 | from .wrappers import Upsample, resize 3 | 4 | __all__ = ['Upsample', 'resize', 'Encoding'] 5 | -------------------------------------------------------------------------------- /mmseg-v0.11/mmseg/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .collect_env import collect_env 2 | from .logger import get_root_logger 3 | 4 | __all__ = ['get_root_logger', 'collect_env'] 5 | -------------------------------------------------------------------------------- /mmseg-v0.11/mmseg/utils/collect_env.py: -------------------------------------------------------------------------------- 1 | from mmcv.utils import collect_env as collect_base_env 2 | from mmcv.utils import get_git_hash 3 | 4 | import mmseg 5 | 6 | 7 | def collect_env(): 8 | """Collect the information of the running environments.""" 9 | env_info = collect_base_env() 10 | env_info['MMSegmentation'] = f'{mmseg.__version__}+{get_git_hash()[:7]}' 11 | 12 | return env_info 13 | 14 | 15 | if __name__ == '__main__': 16 | for name, val in collect_env().items(): 17 | print('{}: {}'.format(name, val)) 18 | -------------------------------------------------------------------------------- /mmseg-v0.11/mmseg/utils/logger.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from mmcv.utils import get_logger 4 | 5 | 6 | def get_root_logger(log_file=None, log_level=logging.INFO): 7 | """Get the root logger. 8 | 9 | The logger will be initialized if it has not been initialized. By default a 10 | StreamHandler will be added. If `log_file` is specified, a FileHandler will 11 | also be added. The name of the root logger is the top-level package name, 12 | e.g., "mmseg". 13 | 14 | Args: 15 | log_file (str | None): The log filename. If specified, a FileHandler 16 | will be added to the root logger. 17 | log_level (int): The root logger level. Note that only the process of 18 | rank 0 is affected, while other processes will set the level to 19 | "Error" and be silent most of the time. 20 | 21 | Returns: 22 | logging.Logger: The root logger. 23 | """ 24 | 25 | logger = get_logger(name='mmseg', log_file=log_file, log_level=log_level) 26 | 27 | return logger 28 | -------------------------------------------------------------------------------- /mmseg-v0.11/mmseg/version.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Open-MMLab. All rights reserved. 2 | 3 | __version__ = '0.11.0' 4 | 5 | 6 | def parse_version_info(version_str): 7 | version_info = [] 8 | for x in version_str.split('.'): 9 | if x.isdigit(): 10 | version_info.append(int(x)) 11 | elif x.find('rc') != -1: 12 | patch_version = x.split('rc') 13 | version_info.append(int(patch_version[0])) 14 | version_info.append(f'rc{patch_version[1]}') 15 | return tuple(version_info) 16 | 17 | 18 | version_info = parse_version_info(__version__) 19 | -------------------------------------------------------------------------------- /mmseg-v0.11/pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | addopts = --xdoctest --xdoctest-style=auto 3 | norecursedirs = .git ignore build __pycache__ data docker docs .eggs 4 | 5 | filterwarnings= default 6 | ignore:.*No cfgstr given in Cacher constructor or call.*:Warning 7 | ignore:.*Define the __nice__ method for.*:Warning 8 | -------------------------------------------------------------------------------- /mmseg-v0.11/requirements.txt: -------------------------------------------------------------------------------- 1 | -r requirements/optional.txt 2 | -r requirements/runtime.txt 3 | -r requirements/tests.txt 4 | -------------------------------------------------------------------------------- /mmseg-v0.11/requirements/docs.txt: -------------------------------------------------------------------------------- 1 | recommonmark 2 | sphinx 3 | sphinx_markdown_tables 4 | sphinx_rtd_theme 5 | -------------------------------------------------------------------------------- /mmseg-v0.11/requirements/optional.txt: -------------------------------------------------------------------------------- 1 | cityscapesscripts 2 | -------------------------------------------------------------------------------- /mmseg-v0.11/requirements/readthedocs.txt: -------------------------------------------------------------------------------- 1 | mmcv 2 | torch 3 | torchvision 4 | -------------------------------------------------------------------------------- /mmseg-v0.11/requirements/runtime.txt: -------------------------------------------------------------------------------- 1 | matplotlib 2 | numpy 3 | terminaltables 4 | -------------------------------------------------------------------------------- /mmseg-v0.11/requirements/tests.txt: -------------------------------------------------------------------------------- 1 | codecov 2 | flake8 3 | interrogate 4 | isort==4.3.21 5 | pytest 6 | xdoctest>=0.10.0 7 | yapf 8 | -------------------------------------------------------------------------------- /mmseg-v0.11/resources/mmseg-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmseg-v0.11/resources/mmseg-logo.png -------------------------------------------------------------------------------- /mmseg-v0.11/resources/seg_demo.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmseg-v0.11/resources/seg_demo.gif -------------------------------------------------------------------------------- /mmseg-v0.11/setup.cfg: -------------------------------------------------------------------------------- 1 | [yapf] 2 | based_on_style = pep8 3 | blank_line_before_nested_class_or_def = true 4 | split_before_expression_after_opening_paren = true 5 | 6 | [isort] 7 | line_length = 79 8 | multi_line_output = 0 9 | known_standard_library = setuptools 10 | known_first_party = mmseg 11 | known_third_party = PIL,cityscapesscripts,cv2,detail,matplotlib,mmcv,numpy,onnxruntime,oss2,pytest,scipy,terminaltables,torch 12 | no_lines_before = STDLIB,LOCALFOLDER 13 | default_section = THIRDPARTY 14 | -------------------------------------------------------------------------------- /mmseg-v0.11/tests/data/color.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmseg-v0.11/tests/data/color.jpg -------------------------------------------------------------------------------- /mmseg-v0.11/tests/data/gray.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmseg-v0.11/tests/data/gray.jpg -------------------------------------------------------------------------------- /mmseg-v0.11/tests/data/pseudo_dataset/gts/00000_gt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmseg-v0.11/tests/data/pseudo_dataset/gts/00000_gt.png -------------------------------------------------------------------------------- /mmseg-v0.11/tests/data/pseudo_dataset/gts/00001_gt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmseg-v0.11/tests/data/pseudo_dataset/gts/00001_gt.png -------------------------------------------------------------------------------- /mmseg-v0.11/tests/data/pseudo_dataset/gts/00002_gt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmseg-v0.11/tests/data/pseudo_dataset/gts/00002_gt.png -------------------------------------------------------------------------------- /mmseg-v0.11/tests/data/pseudo_dataset/gts/00003_gt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmseg-v0.11/tests/data/pseudo_dataset/gts/00003_gt.png -------------------------------------------------------------------------------- /mmseg-v0.11/tests/data/pseudo_dataset/gts/00004_gt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmseg-v0.11/tests/data/pseudo_dataset/gts/00004_gt.png -------------------------------------------------------------------------------- /mmseg-v0.11/tests/data/pseudo_dataset/imgs/00000_img.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmseg-v0.11/tests/data/pseudo_dataset/imgs/00000_img.jpg -------------------------------------------------------------------------------- /mmseg-v0.11/tests/data/pseudo_dataset/imgs/00001_img.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmseg-v0.11/tests/data/pseudo_dataset/imgs/00001_img.jpg -------------------------------------------------------------------------------- /mmseg-v0.11/tests/data/pseudo_dataset/imgs/00002_img.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmseg-v0.11/tests/data/pseudo_dataset/imgs/00002_img.jpg -------------------------------------------------------------------------------- /mmseg-v0.11/tests/data/pseudo_dataset/imgs/00003_img.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmseg-v0.11/tests/data/pseudo_dataset/imgs/00003_img.jpg -------------------------------------------------------------------------------- /mmseg-v0.11/tests/data/pseudo_dataset/imgs/00004_img.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmseg-v0.11/tests/data/pseudo_dataset/imgs/00004_img.jpg -------------------------------------------------------------------------------- /mmseg-v0.11/tests/data/pseudo_dataset/splits/train.txt: -------------------------------------------------------------------------------- 1 | 00000 2 | 00001 3 | 00002 4 | 00003 5 | -------------------------------------------------------------------------------- /mmseg-v0.11/tests/data/pseudo_dataset/splits/val.txt: -------------------------------------------------------------------------------- 1 | 00004 2 | -------------------------------------------------------------------------------- /mmseg-v0.11/tests/data/seg.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggjy/Hire-Wave-MLP.pytorch/7ef7ddd784f97b6369d6fbaf913c4c1536949cf6/mmseg-v0.11/tests/data/seg.png -------------------------------------------------------------------------------- /mmseg-v0.11/tests/test_inference.py: -------------------------------------------------------------------------------- 1 | import os.path as osp 2 | 3 | import mmcv 4 | 5 | from mmseg.apis import inference_segmentor, init_segmentor 6 | 7 | 8 | def test_test_time_augmentation_on_cpu(): 9 | config_file = 'configs/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes.py' 10 | config = mmcv.Config.fromfile(config_file) 11 | 12 | # Remove pretrain model download for testing 13 | config.model.pretrained = None 14 | # Replace SyncBN with BN to inference on CPU 15 | norm_cfg = dict(type='BN', requires_grad=True) 16 | config.model.backbone.norm_cfg = norm_cfg 17 | config.model.decode_head.norm_cfg = norm_cfg 18 | config.model.auxiliary_head.norm_cfg = norm_cfg 19 | 20 | # Enable test time augmentation 21 | config.data.test.pipeline[1].flip = True 22 | 23 | checkpoint_file = None 24 | model = init_segmentor(config, checkpoint_file, device='cpu') 25 | 26 | img = mmcv.imread( 27 | osp.join(osp.dirname(__file__), 'data/color.jpg'), 'color') 28 | result = inference_segmentor(model, img) 29 | assert result[0].shape == (288, 512) 30 | -------------------------------------------------------------------------------- /mmseg-v0.11/tests/test_models/test_necks.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from mmseg.models import FPN 4 | 5 | 6 | def test_fpn(): 7 | in_channels = [256, 512, 1024, 2048] 8 | inputs = [ 9 | torch.randn(1, c, 56 // 2**i, 56 // 2**i) 10 | for i, c in enumerate(in_channels) 11 | ] 12 | 13 | fpn = FPN(in_channels, 256, len(in_channels)) 14 | outputs = fpn(inputs) 15 | assert outputs[0].shape == torch.Size([1, 256, 56, 56]) 16 | assert outputs[1].shape == torch.Size([1, 256, 28, 28]) 17 | assert outputs[2].shape == torch.Size([1, 256, 14, 14]) 18 | assert outputs[3].shape == torch.Size([1, 256, 7, 7]) 19 | -------------------------------------------------------------------------------- /mmseg-v0.11/tests/test_sampler.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import torch 3 | 4 | from mmseg.core import OHEMPixelSampler 5 | from mmseg.models.decode_heads import FCNHead 6 | 7 | 8 | def _context_for_ohem(): 9 | return FCNHead(in_channels=32, channels=16, num_classes=19) 10 | 11 | 12 | def test_ohem_sampler(): 13 | 14 | with pytest.raises(AssertionError): 15 | # seg_logit and seg_label must be of the same size 16 | sampler = OHEMPixelSampler(context=_context_for_ohem()) 17 | seg_logit = torch.randn(1, 19, 45, 45) 18 | seg_label = torch.randint(0, 19, size=(1, 1, 89, 89)) 19 | sampler.sample(seg_logit, seg_label) 20 | 21 | # test with thresh 22 | sampler = OHEMPixelSampler( 23 | context=_context_for_ohem(), thresh=0.7, min_kept=200) 24 | seg_logit = torch.randn(1, 19, 45, 45) 25 | seg_label = torch.randint(0, 19, size=(1, 1, 45, 45)) 26 | seg_weight = sampler.sample(seg_logit, seg_label) 27 | assert seg_weight.shape[0] == seg_logit.shape[0] 28 | assert seg_weight.shape[1:] == seg_logit.shape[2:] 29 | assert seg_weight.sum() > 200 30 | 31 | # test w.o thresh 32 | sampler = OHEMPixelSampler(context=_context_for_ohem(), min_kept=200) 33 | seg_logit = torch.randn(1, 19, 45, 45) 34 | seg_label = torch.randint(0, 19, size=(1, 1, 45, 45)) 35 | seg_weight = sampler.sample(seg_logit, seg_label) 36 | assert seg_weight.shape[0] == seg_logit.shape[0] 37 | assert seg_weight.shape[1:] == seg_logit.shape[2:] 38 | assert seg_weight.sum() == 200 39 | -------------------------------------------------------------------------------- /mmseg-v0.11/tests/test_utils/test_make_divisible.py: -------------------------------------------------------------------------------- 1 | from mmseg.models.utils import make_divisible 2 | 3 | 4 | def test_make_divisible(): 5 | # test with min_value = None 6 | assert make_divisible(10, 4) == 12 7 | assert make_divisible(9, 4) == 12 8 | assert make_divisible(1, 4) == 4 9 | 10 | # test with min_value = 8 11 | assert make_divisible(10, 4, 8) == 12 12 | assert make_divisible(9, 4, 8) == 12 13 | assert make_divisible(1, 4, 8) == 8 14 | -------------------------------------------------------------------------------- /mmseg-v0.11/tools/dist_test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | CONFIG=$1 4 | CHECKPOINT=$2 5 | GPUS=$3 6 | PORT=${PORT:-29500} 7 | PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ 8 | python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \ 9 | $(dirname "$0")/test.py $CONFIG $CHECKPOINT --launcher pytorch ${@:4} 10 | -------------------------------------------------------------------------------- /mmseg-v0.11/tools/dist_train.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | CONFIG=$1 4 | GPUS=$2 5 | PORT=${PORT:-29500} 6 | 7 | PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ 8 | python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \ 9 | $(dirname "$0")/train.py $CONFIG --launcher pytorch ${@:3} 10 | -------------------------------------------------------------------------------- /mmseg-v0.11/tools/print_config.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | from mmcv import Config, DictAction 4 | 5 | 6 | def parse_args(): 7 | parser = argparse.ArgumentParser(description='Print the whole config') 8 | parser.add_argument('config', help='config file path') 9 | parser.add_argument( 10 | '--options', nargs='+', action=DictAction, help='arguments in dict') 11 | args = parser.parse_args() 12 | 13 | return args 14 | 15 | 16 | def main(): 17 | args = parse_args() 18 | 19 | cfg = Config.fromfile(args.config) 20 | if args.options is not None: 21 | cfg.merge_from_dict(args.options) 22 | print(f'Config:\n{cfg.pretty_text}') 23 | # dump config 24 | cfg.dump('example.py') 25 | 26 | 27 | if __name__ == '__main__': 28 | main() 29 | -------------------------------------------------------------------------------- /mmseg-v0.11/tools/publish_model.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import subprocess 3 | 4 | import torch 5 | 6 | 7 | def parse_args(): 8 | parser = argparse.ArgumentParser( 9 | description='Process a checkpoint to be published') 10 | parser.add_argument('in_file', help='input checkpoint filename') 11 | parser.add_argument('out_file', help='output checkpoint filename') 12 | args = parser.parse_args() 13 | return args 14 | 15 | 16 | def process_checkpoint(in_file, out_file): 17 | checkpoint = torch.load(in_file, map_location='cpu') 18 | # remove optimizer for smaller file size 19 | if 'optimizer' in checkpoint: 20 | del checkpoint['optimizer'] 21 | # if it is necessary to remove some sensitive data in checkpoint['meta'], 22 | # add the code here. 23 | torch.save(checkpoint, out_file) 24 | sha = subprocess.check_output(['sha256sum', out_file]).decode() 25 | final_file = out_file.rstrip('.pth') + '-{}.pth'.format(sha[:8]) 26 | subprocess.Popen(['mv', out_file, final_file]) 27 | 28 | 29 | def main(): 30 | args = parse_args() 31 | process_checkpoint(args.in_file, args.out_file) 32 | 33 | 34 | if __name__ == '__main__': 35 | main() 36 | -------------------------------------------------------------------------------- /mmseg-v0.11/tools/slurm_test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -x 4 | 5 | PARTITION=$1 6 | JOB_NAME=$2 7 | CONFIG=$3 8 | CHECKPOINT=$4 9 | GPUS=${GPUS:-4} 10 | GPUS_PER_NODE=${GPUS_PER_NODE:-4} 11 | CPUS_PER_TASK=${CPUS_PER_TASK:-5} 12 | PY_ARGS=${@:5} 13 | SRUN_ARGS=${SRUN_ARGS:-""} 14 | 15 | PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ 16 | srun -p ${PARTITION} \ 17 | --job-name=${JOB_NAME} \ 18 | --gres=gpu:${GPUS_PER_NODE} \ 19 | --ntasks=${GPUS} \ 20 | --ntasks-per-node=${GPUS_PER_NODE} \ 21 | --cpus-per-task=${CPUS_PER_TASK} \ 22 | --kill-on-bad-exit=1 \ 23 | ${SRUN_ARGS} \ 24 | python -u tools/test.py ${CONFIG} ${CHECKPOINT} --launcher="slurm" ${PY_ARGS} 25 | -------------------------------------------------------------------------------- /mmseg-v0.11/tools/slurm_train.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -x 4 | 5 | PARTITION=$1 6 | JOB_NAME=$2 7 | CONFIG=$3 8 | GPUS=${GPUS:-4} 9 | GPUS_PER_NODE=${GPUS_PER_NODE:-4} 10 | CPUS_PER_TASK=${CPUS_PER_TASK:-5} 11 | SRUN_ARGS=${SRUN_ARGS:-""} 12 | PY_ARGS=${@:4} 13 | 14 | PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ 15 | srun -p ${PARTITION} \ 16 | --job-name=${JOB_NAME} \ 17 | --gres=gpu:${GPUS_PER_NODE} \ 18 | --ntasks=${GPUS} \ 19 | --ntasks-per-node=${GPUS_PER_NODE} \ 20 | --cpus-per-task=${CPUS_PER_TASK} \ 21 | --kill-on-bad-exit=1 \ 22 | ${SRUN_ARGS} \ 23 | python -u tools/train.py ${CONFIG} --launcher="slurm" ${PY_ARGS} 24 | -------------------------------------------------------------------------------- /timm/__init__.py: -------------------------------------------------------------------------------- 1 | from .version import __version__ 2 | from .models import create_model, list_models, is_model, list_modules, model_entrypoint, \ 3 | is_scriptable, is_exportable, set_scriptable, set_exportable, has_model_default_key, is_model_default_key, \ 4 | get_model_default_value, is_model_pretrained 5 | -------------------------------------------------------------------------------- /timm/data/__init__.py: -------------------------------------------------------------------------------- 1 | from .auto_augment import RandAugment, AutoAugment, rand_augment_ops, auto_augment_policy,\ 2 | rand_augment_transform, auto_augment_transform 3 | from .config import resolve_data_config 4 | from .constants import * 5 | from .dataset import ImageDataset, IterableImageDataset, AugMixDataset 6 | from .dataset_factory import create_dataset 7 | from .loader import create_loader 8 | from .mixup import Mixup, FastCollateMixup 9 | from .parsers import create_parser 10 | from .real_labels import RealLabelsImagenet 11 | from .transforms import * 12 | from .transforms_factory import create_transform -------------------------------------------------------------------------------- /timm/data/constants.py: -------------------------------------------------------------------------------- 1 | DEFAULT_CROP_PCT = 0.875 2 | IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406) 3 | IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225) 4 | IMAGENET_INCEPTION_MEAN = (0.5, 0.5, 0.5) 5 | IMAGENET_INCEPTION_STD = (0.5, 0.5, 0.5) 6 | IMAGENET_DPN_MEAN = (124 / 255, 117 / 255, 104 / 255) 7 | IMAGENET_DPN_STD = tuple([1 / (.0167 * 255)] * 3) 8 | -------------------------------------------------------------------------------- /timm/data/dataset_factory.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from .dataset import IterableImageDataset, ImageDataset 4 | 5 | 6 | def _search_split(root, split): 7 | # look for sub-folder with name of split in root and use that if it exists 8 | split_name = split.split('[')[0] 9 | try_root = os.path.join(root, split_name) 10 | if os.path.exists(try_root): 11 | return try_root 12 | if split_name == 'validation': 13 | try_root = os.path.join(root, 'val') 14 | if os.path.exists(try_root): 15 | return try_root 16 | return root 17 | 18 | 19 | def create_dataset(name, root, split='validation', search_split=True, is_training=False, batch_size=None, **kwargs): 20 | name = name.lower() 21 | if name.startswith('tfds'): 22 | ds = IterableImageDataset( 23 | root, parser=name, split=split, is_training=is_training, batch_size=batch_size, **kwargs) 24 | else: 25 | # FIXME support more advance split cfg for ImageFolder/Tar datasets in the future 26 | kwargs.pop('repeats', 0) # FIXME currently only Iterable dataset support the repeat multiplier 27 | if search_split and os.path.isdir(root): 28 | root = _search_split(root, split) 29 | ds = ImageDataset(root, parser=name, **kwargs) 30 | return ds 31 | -------------------------------------------------------------------------------- /timm/data/parsers/__init__.py: -------------------------------------------------------------------------------- 1 | from .parser_factory import create_parser 2 | -------------------------------------------------------------------------------- /timm/data/parsers/class_map.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | def load_class_map(filename, root=''): 5 | class_map_path = filename 6 | if not os.path.exists(class_map_path): 7 | class_map_path = os.path.join(root, filename) 8 | assert os.path.exists(class_map_path), 'Cannot locate specified class map file (%s)' % filename 9 | class_map_ext = os.path.splitext(filename)[-1].lower() 10 | if class_map_ext == '.txt': 11 | with open(class_map_path) as f: 12 | class_to_idx = {v.strip(): k for k, v in enumerate(f)} 13 | else: 14 | assert False, 'Unsupported class map extension' 15 | return class_to_idx 16 | 17 | -------------------------------------------------------------------------------- /timm/data/parsers/constants.py: -------------------------------------------------------------------------------- 1 | IMG_EXTENSIONS = ('.png', '.jpg', '.jpeg') 2 | -------------------------------------------------------------------------------- /timm/data/parsers/parser.py: -------------------------------------------------------------------------------- 1 | from abc import abstractmethod 2 | 3 | 4 | class Parser: 5 | def __init__(self): 6 | pass 7 | 8 | @abstractmethod 9 | def _filename(self, index, basename=False, absolute=False): 10 | pass 11 | 12 | def filename(self, index, basename=False, absolute=False): 13 | return self._filename(index, basename=basename, absolute=absolute) 14 | 15 | def filenames(self, basename=False, absolute=False): 16 | return [self._filename(index, basename=basename, absolute=absolute) for index in range(len(self))] 17 | 18 | -------------------------------------------------------------------------------- /timm/data/parsers/parser_factory.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from .parser_image_folder import ParserImageFolder 4 | from .parser_image_tar import ParserImageTar 5 | from .parser_image_in_tar import ParserImageInTar 6 | 7 | 8 | def create_parser(name, root, split='train', **kwargs): 9 | name = name.lower() 10 | name = name.split('/', 2) 11 | prefix = '' 12 | if len(name) > 1: 13 | prefix = name[0] 14 | name = name[-1] 15 | 16 | # FIXME improve the selection right now just tfds prefix or fallback path, will need options to 17 | # explicitly select other options shortly 18 | if prefix == 'tfds': 19 | from .parser_tfds import ParserTfds # defer tensorflow import 20 | parser = ParserTfds(root, name, split=split, shuffle=kwargs.pop('shuffle', False), **kwargs) 21 | else: 22 | assert os.path.exists(root) 23 | # default fallback path (backwards compat), use image tar if root is a .tar file, otherwise image folder 24 | # FIXME support split here, in parser? 25 | if os.path.isfile(root) and os.path.splitext(root)[1] == '.tar': 26 | parser = ParserImageInTar(root, **kwargs) 27 | else: 28 | parser = ParserImageFolder(root, **kwargs) 29 | return parser 30 | -------------------------------------------------------------------------------- /timm/loss/__init__.py: -------------------------------------------------------------------------------- 1 | from .cross_entropy import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy 2 | from .jsd import JsdCrossEntropy 3 | from .asymmetric_loss import AsymmetricLossMultiLabel, AsymmetricLossSingleLabel -------------------------------------------------------------------------------- /timm/loss/cross_entropy.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | 6 | class LabelSmoothingCrossEntropy(nn.Module): 7 | """ 8 | NLL loss with label smoothing. 9 | """ 10 | def __init__(self, smoothing=0.1): 11 | """ 12 | Constructor for the LabelSmoothing module. 13 | :param smoothing: label smoothing factor 14 | """ 15 | super(LabelSmoothingCrossEntropy, self).__init__() 16 | assert smoothing < 1.0 17 | self.smoothing = smoothing 18 | self.confidence = 1. - smoothing 19 | 20 | def forward(self, x, target): 21 | logprobs = F.log_softmax(x, dim=-1) 22 | nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1)) 23 | nll_loss = nll_loss.squeeze(1) 24 | smooth_loss = -logprobs.mean(dim=-1) 25 | loss = self.confidence * nll_loss + self.smoothing * smooth_loss 26 | return loss.mean() 27 | 28 | 29 | class SoftTargetCrossEntropy(nn.Module): 30 | 31 | def __init__(self): 32 | super(SoftTargetCrossEntropy, self).__init__() 33 | 34 | def forward(self, x, target): 35 | loss = torch.sum(-target * F.log_softmax(x, dim=-1), dim=-1) 36 | return loss.mean() 37 | -------------------------------------------------------------------------------- /timm/models/layers/helpers.py: -------------------------------------------------------------------------------- 1 | """ Layer/Module Helpers 2 | 3 | Hacked together by / Copyright 2020 Ross Wightman 4 | """ 5 | from itertools import repeat 6 | import collections.abc 7 | 8 | 9 | # From PyTorch internals 10 | def _ntuple(n): 11 | def parse(x): 12 | if isinstance(x, collections.abc.Iterable): 13 | return x 14 | return tuple(repeat(x, n)) 15 | return parse 16 | 17 | 18 | to_1tuple = _ntuple(1) 19 | to_2tuple = _ntuple(2) 20 | to_3tuple = _ntuple(3) 21 | to_4tuple = _ntuple(4) 22 | to_ntuple = _ntuple 23 | 24 | 25 | def make_divisible(v, divisor=8, min_value=None, round_limit=.9): 26 | min_value = min_value or divisor 27 | new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) 28 | # Make sure that round down does not go down by more than 10%. 29 | if new_v < round_limit * v: 30 | new_v += divisor 31 | return new_v 32 | -------------------------------------------------------------------------------- /timm/models/layers/linear.py: -------------------------------------------------------------------------------- 1 | """ Linear layer (alternate definition) 2 | """ 3 | import torch 4 | import torch.nn.functional as F 5 | from torch import nn as nn 6 | 7 | 8 | class Linear(nn.Linear): 9 | r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b` 10 | 11 | Wraps torch.nn.Linear to support AMP + torchscript usage by manually casting 12 | weight & bias to input.dtype to work around an issue w/ torch.addmm in this use case. 13 | """ 14 | def forward(self, input: torch.Tensor) -> torch.Tensor: 15 | if torch.jit.is_scripting(): 16 | bias = self.bias.to(dtype=input.dtype) if self.bias is not None else None 17 | return F.linear(input, self.weight.to(dtype=input.dtype), bias=bias) 18 | else: 19 | return F.linear(input, self.weight, self.bias) 20 | -------------------------------------------------------------------------------- /timm/models/layers/norm.py: -------------------------------------------------------------------------------- 1 | """ Normalization layers and wrappers 2 | """ 3 | import torch 4 | import torch.nn as nn 5 | import torch.nn.functional as F 6 | 7 | 8 | class GroupNorm(nn.GroupNorm): 9 | def __init__(self, num_channels, num_groups, eps=1e-5, affine=True): 10 | # NOTE num_channels is swapped to first arg for consistency in swapping norm layers with BN 11 | super().__init__(num_groups, num_channels, eps=eps, affine=affine) 12 | 13 | def forward(self, x): 14 | return F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps) 15 | 16 | 17 | class LayerNorm2d(nn.LayerNorm): 18 | """ LayerNorm for channels of '2D' spatial BCHW tensors """ 19 | def __init__(self, num_channels): 20 | super().__init__(num_channels) 21 | 22 | def forward(self, x: torch.Tensor) -> torch.Tensor: 23 | return F.layer_norm( 24 | x.permute(0, 2, 3, 1), self.normalized_shape, self.weight, self.bias, self.eps).permute(0, 3, 1, 2) 25 | -------------------------------------------------------------------------------- /timm/optim/__init__.py: -------------------------------------------------------------------------------- 1 | from .adabelief import AdaBelief 2 | from .adafactor import Adafactor 3 | from .adahessian import Adahessian 4 | from .adamp import AdamP 5 | from .adamw import AdamW 6 | from .lamb import Lamb 7 | from .lars import Lars 8 | from .lookahead import Lookahead 9 | from .madgrad import MADGRAD 10 | from .nadam import Nadam 11 | from .nvnovograd import NvNovoGrad 12 | from .radam import RAdam 13 | from .rmsprop_tf import RMSpropTF 14 | from .sgdp import SGDP 15 | from .optim_factory import create_optimizer, create_optimizer_v2, optimizer_kwargs 16 | -------------------------------------------------------------------------------- /timm/scheduler/__init__.py: -------------------------------------------------------------------------------- 1 | from .cosine_lr import CosineLRScheduler 2 | from .plateau_lr import PlateauLRScheduler 3 | from .step_lr import StepLRScheduler 4 | from .tanh_lr import TanhLRScheduler 5 | from .scheduler_factory import create_scheduler 6 | -------------------------------------------------------------------------------- /timm/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .agc import adaptive_clip_grad 2 | from .checkpoint_saver import CheckpointSaver 3 | from .clip_grad import dispatch_clip_grad 4 | from .cuda import ApexScaler, NativeScaler 5 | from .distributed import distribute_bn, reduce_tensor 6 | from .jit import set_jit_legacy 7 | from .log import setup_default_logging, FormatterNoInfo 8 | from .metrics import AverageMeter, accuracy 9 | from .misc import natural_key, add_bool_arg 10 | from .model import unwrap_model, get_state_dict 11 | from .model_ema import ModelEma, ModelEmaV2 12 | from .random import random_seed 13 | from .summary import update_summary, get_outdir 14 | -------------------------------------------------------------------------------- /timm/utils/clip_grad.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from timm.utils.agc import adaptive_clip_grad 4 | 5 | 6 | def dispatch_clip_grad(parameters, value: float, mode: str = 'norm', norm_type: float = 2.0): 7 | """ Dispatch to gradient clipping method 8 | 9 | Args: 10 | parameters (Iterable): model parameters to clip 11 | value (float): clipping value/factor/norm, mode dependant 12 | mode (str): clipping mode, one of 'norm', 'value', 'agc' 13 | norm_type (float): p-norm, default 2.0 14 | """ 15 | if mode == 'norm': 16 | torch.nn.utils.clip_grad_norm_(parameters, value, norm_type=norm_type) 17 | elif mode == 'value': 18 | torch.nn.utils.clip_grad_value_(parameters, value) 19 | elif mode == 'agc': 20 | adaptive_clip_grad(parameters, value, norm_type=norm_type) 21 | else: 22 | assert False, f"Unknown clip mode ({mode})." 23 | 24 | -------------------------------------------------------------------------------- /timm/utils/distributed.py: -------------------------------------------------------------------------------- 1 | """ Distributed training/validation utils 2 | 3 | Hacked together by / Copyright 2020 Ross Wightman 4 | """ 5 | import torch 6 | from torch import distributed as dist 7 | 8 | from .model import unwrap_model 9 | 10 | 11 | def reduce_tensor(tensor, n): 12 | rt = tensor.clone() 13 | dist.all_reduce(rt, op=dist.ReduceOp.SUM) 14 | rt /= n 15 | return rt 16 | 17 | 18 | def distribute_bn(model, world_size, reduce=False): 19 | # ensure every node has the same running bn stats 20 | for bn_name, bn_buf in unwrap_model(model).named_buffers(recurse=True): 21 | if ('running_mean' in bn_name) or ('running_var' in bn_name): 22 | if reduce: 23 | # average bn stats across whole group 24 | torch.distributed.all_reduce(bn_buf, op=dist.ReduceOp.SUM) 25 | bn_buf /= float(world_size) 26 | else: 27 | # broadcast bn stats from rank 0 to whole group 28 | torch.distributed.broadcast(bn_buf, 0) 29 | -------------------------------------------------------------------------------- /timm/utils/jit.py: -------------------------------------------------------------------------------- 1 | """ JIT scripting/tracing utils 2 | 3 | Hacked together by / Copyright 2020 Ross Wightman 4 | """ 5 | import torch 6 | 7 | 8 | def set_jit_legacy(): 9 | """ Set JIT executor to legacy w/ support for op fusion 10 | This is hopefully a temporary need in 1.5/1.5.1/1.6 to restore performance due to changes 11 | in the JIT exectutor. These API are not supported so could change. 12 | """ 13 | # 14 | assert hasattr(torch._C, '_jit_set_profiling_executor'), "Old JIT behavior doesn't exist!" 15 | torch._C._jit_set_profiling_executor(False) 16 | torch._C._jit_set_profiling_mode(False) 17 | torch._C._jit_override_can_fuse_on_gpu(True) 18 | #torch._C._jit_set_texpr_fuser_enabled(True) 19 | -------------------------------------------------------------------------------- /timm/utils/log.py: -------------------------------------------------------------------------------- 1 | """ Logging helpers 2 | 3 | Hacked together by / Copyright 2020 Ross Wightman 4 | """ 5 | import logging 6 | import logging.handlers 7 | 8 | 9 | class FormatterNoInfo(logging.Formatter): 10 | def __init__(self, fmt='%(levelname)s: %(message)s'): 11 | logging.Formatter.__init__(self, fmt) 12 | 13 | def format(self, record): 14 | if record.levelno == logging.INFO: 15 | return str(record.getMessage()) 16 | return logging.Formatter.format(self, record) 17 | 18 | 19 | def setup_default_logging(default_level=logging.INFO, log_path=''): 20 | console_handler = logging.StreamHandler() 21 | console_handler.setFormatter(FormatterNoInfo()) 22 | logging.root.addHandler(console_handler) 23 | logging.root.setLevel(default_level) 24 | if log_path: 25 | file_handler = logging.handlers.RotatingFileHandler(log_path, maxBytes=(1024 ** 2 * 2), backupCount=3) 26 | file_formatter = logging.Formatter("%(asctime)s - %(name)20s: [%(levelname)8s] - %(message)s") 27 | file_handler.setFormatter(file_formatter) 28 | logging.root.addHandler(file_handler) 29 | -------------------------------------------------------------------------------- /timm/utils/metrics.py: -------------------------------------------------------------------------------- 1 | """ Eval metrics and related 2 | 3 | Hacked together by / Copyright 2020 Ross Wightman 4 | """ 5 | 6 | 7 | class AverageMeter: 8 | """Computes and stores the average and current value""" 9 | def __init__(self): 10 | self.reset() 11 | 12 | def reset(self): 13 | self.val = 0 14 | self.avg = 0 15 | self.sum = 0 16 | self.count = 0 17 | 18 | def update(self, val, n=1): 19 | self.val = val 20 | self.sum += val * n 21 | self.count += n 22 | self.avg = self.sum / self.count 23 | 24 | 25 | def accuracy(output, target, topk=(1,)): 26 | """Computes the accuracy over the k top predictions for the specified values of k""" 27 | maxk = min(max(topk), output.size()[1]) 28 | batch_size = target.size(0) 29 | _, pred = output.topk(maxk, 1, True, True) 30 | pred = pred.t() 31 | correct = pred.eq(target.reshape(1, -1).expand_as(pred)) 32 | return [correct[:min(k, maxk)].reshape(-1).float().sum(0) * 100. / batch_size for k in topk] 33 | -------------------------------------------------------------------------------- /timm/utils/misc.py: -------------------------------------------------------------------------------- 1 | """ Misc utils 2 | 3 | Hacked together by / Copyright 2020 Ross Wightman 4 | """ 5 | import re 6 | 7 | 8 | def natural_key(string_): 9 | """See http://www.codinghorror.com/blog/archives/001018.html""" 10 | return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())] 11 | 12 | 13 | def add_bool_arg(parser, name, default=False, help=''): 14 | dest_name = name.replace('-', '_') 15 | group = parser.add_mutually_exclusive_group(required=False) 16 | group.add_argument('--' + name, dest=dest_name, action='store_true', help=help) 17 | group.add_argument('--no-' + name, dest=dest_name, action='store_false', help=help) 18 | parser.set_defaults(**{dest_name: default}) 19 | -------------------------------------------------------------------------------- /timm/utils/random.py: -------------------------------------------------------------------------------- 1 | import random 2 | import numpy as np 3 | import torch 4 | 5 | 6 | def random_seed(seed=42, rank=0): 7 | torch.manual_seed(seed + rank) 8 | np.random.seed(seed + rank) 9 | random.seed(seed + rank) 10 | -------------------------------------------------------------------------------- /timm/utils/summary.py: -------------------------------------------------------------------------------- 1 | """ Summary utilities 2 | 3 | Hacked together by / Copyright 2020 Ross Wightman 4 | """ 5 | import csv 6 | import os 7 | from collections import OrderedDict 8 | try: 9 | import wandb 10 | except ImportError: 11 | pass 12 | 13 | def get_outdir(path, *paths, inc=False): 14 | outdir = os.path.join(path, *paths) 15 | if not os.path.exists(outdir): 16 | os.makedirs(outdir) 17 | elif inc: 18 | count = 1 19 | outdir_inc = outdir + '-' + str(count) 20 | while os.path.exists(outdir_inc): 21 | count = count + 1 22 | outdir_inc = outdir + '-' + str(count) 23 | assert count < 100 24 | outdir = outdir_inc 25 | os.makedirs(outdir) 26 | return outdir 27 | 28 | 29 | def update_summary(epoch, train_metrics, eval_metrics, filename, write_header=False, log_wandb=False): 30 | rowd = OrderedDict(epoch=epoch) 31 | rowd.update([('train_' + k, v) for k, v in train_metrics.items()]) 32 | rowd.update([('eval_' + k, v) for k, v in eval_metrics.items()]) 33 | if log_wandb: 34 | wandb.log(rowd) 35 | with open(filename, mode='a') as cf: 36 | dw = csv.DictWriter(cf, fieldnames=rowd.keys()) 37 | if write_header: # first iteration (epoch == 1 can't be used) 38 | dw.writeheader() 39 | dw.writerow(rowd) 40 | -------------------------------------------------------------------------------- /timm/version.py: -------------------------------------------------------------------------------- 1 | __version__ = '0.4.13' 2 | --------------------------------------------------------------------------------