The response has been limited to 50k tokens of the smallest files in the repo. You can remove this limitation by removing the max tokens filter.
├── .gitignore
├── GETTING_STARTED.md
├── LICENSE
├── MODEL_ZOO.md
├── README.md
├── config.profile
├── configs
    ├── ade20k
    │   ├── H_48_D_4.json
    │   ├── H_48_D_4_TEST.json
    │   ├── H_SEGFIX.json
    │   ├── M_V2_D_8.json
    │   ├── R_101_D_8.json
    │   ├── R_101_D_8_TEST.json
    │   ├── R_50_D_8.json
    │   └── W_38_D_8.json
    ├── celeba
    │   ├── H_48_D_4.json
    │   └── H_48_D_4_TEST.json
    ├── cityscapes
    │   ├── H_48_D_4.json
    │   ├── H_48_D_4_TEST_DEPTH.json
    │   ├── H_64_D_4.json
    │   ├── H_SEGFIX.json
    │   ├── M_V2_D_8.json
    │   ├── R_101_D_16.json
    │   ├── R_101_D_8.json
    │   ├── R_101_D_8_EDGE_VOID.json
    │   ├── R_101_D_8_NON_EDGE_VOID.json
    │   ├── R_101_D_8_TEST.json
    │   ├── R_18_D_8.json
    │   ├── R_50_D_8.json
    │   ├── W_38_D_8.json
    │   ├── X_65_D_16.json
    │   └── X_65_D_8.json
    ├── coco_stuff
    │   ├── H_48_D_4.json
    │   ├── H_48_D_4_TEST.json
    │   ├── R_101_D_8.json
    │   └── R_101_D_8_TEST.json
    ├── lip
    │   ├── H_48_D_4.json
    │   ├── H_48_D_4_TEST.json
    │   ├── R_101_D_16.json
    │   ├── R_101_D_8.json
    │   └── W_38_D_8.json
    ├── mapillary
    │   └── H_48_D_4_1024x1024.json
    ├── pascal_context
    │   ├── H_48_D_4.json
    │   ├── H_48_D_4_TEST.json
    │   ├── R_101_D_8.json
    │   ├── R_101_D_8_TEST.json
    │   └── W_38_D_8.json
    └── segfix
    │   └── H_SEGFIX.json
├── imgs
    ├── OCR.PNG
    └── SegFix.PNG
├── lib
    ├── __init__.py
    ├── datasets
    │   ├── __init__.py
    │   ├── data_loader.py
    │   ├── loader
    │   │   ├── __init__.py
    │   │   ├── ade20k_loader.py
    │   │   ├── default_loader.py
    │   │   ├── lip_loader.py
    │   │   ├── multi_dataset_loader.py
    │   │   └── offset_loader.py
    │   ├── preprocess
    │   │   ├── README.md
    │   │   ├── __init__.py
    │   │   ├── ade20k
    │   │   │   ├── __init__.py
    │   │   │   ├── ade20k_generator.py
    │   │   │   ├── ade20k_generator.sh
    │   │   │   └── dt_offset_generator.py
    │   │   ├── cityscapes
    │   │   │   ├── __init__.py
    │   │   │   ├── cityscapes_generator.py
    │   │   │   ├── cityscapes_instance_generator.py
    │   │   │   ├── dt_offset_generator.py
    │   │   │   ├── edge_generator.py
    │   │   │   ├── instance_dt_offset_generator.py
    │   │   │   └── instance_edge_generator.py
    │   │   ├── coco_stuff
    │   │   │   └── coco_stuff_generator.py
    │   │   ├── face
    │   │   │   ├── celebmask_color.py
    │   │   │   ├── celebmask_label_generator.py
    │   │   │   ├── celebmask_partition.py
    │   │   │   └── prepare_celeb.sh
    │   │   ├── lip
    │   │   │   ├── __init__.py
    │   │   │   └── lip.py
    │   │   ├── mapillary
    │   │   │   ├── mapillary_generator.py
    │   │   │   └── mapillary_generator.sh
    │   │   ├── pascal_context
    │   │   │   ├── pascal_context_generator.py
    │   │   │   └── pascal_context_generator.sh
    │   │   └── pascal_voc
    │   │   │   └── pascal_voc_generator.py
    │   └── tools
    │   │   ├── __init__.py
    │   │   ├── collate.py
    │   │   ├── cv2_aug_transforms.py
    │   │   ├── pil_aug_transforms.py
    │   │   └── transforms.py
    ├── extensions
    │   ├── __init__.py
    │   ├── cc_attention
    │   │   ├── __init__.py
    │   │   ├── _ext
    │   │   │   └── __init__.py
    │   │   ├── build.py
    │   │   ├── build.sh
    │   │   ├── functions.py
    │   │   └── src
    │   │   │   ├── ca.cu
    │   │   │   ├── ca.h
    │   │   │   ├── common.h
    │   │   │   ├── lib_cffi.cpp
    │   │   │   └── lib_cffi.h
    │   ├── crf
    │   │   └── dense_crf.py
    │   ├── dcn
    │   │   ├── README.md
    │   │   ├── __init__.py
    │   │   ├── _ext
    │   │   │   ├── __init__.py
    │   │   │   ├── deform_conv
    │   │   │   │   └── __init__.py
    │   │   │   └── modulated_dcn
    │   │   │   │   └── __init__.py
    │   │   ├── build.py
    │   │   ├── build_modulated.py
    │   │   ├── functions
    │   │   │   ├── __init__.py
    │   │   │   ├── deform_conv.py
    │   │   │   └── modulated_dcn_func.py
    │   │   ├── make.sh
    │   │   ├── make_p100.sh
    │   │   ├── modules
    │   │   │   ├── __init__.py
    │   │   │   ├── deform_conv.py
    │   │   │   └── modulated_dcn.py
    │   │   ├── src
    │   │   │   ├── cuda
    │   │   │   │   ├── deform_psroi_pooling_cuda.cu
    │   │   │   │   ├── deform_psroi_pooling_cuda.h
    │   │   │   │   ├── modulated_deform_im2col_cuda.cu
    │   │   │   │   └── modulated_deform_im2col_cuda.h
    │   │   │   ├── deform_conv.c
    │   │   │   ├── deform_conv.h
    │   │   │   ├── deform_conv_cuda.c
    │   │   │   ├── deform_conv_cuda.h
    │   │   │   ├── deform_conv_cuda_kernel.cu
    │   │   │   ├── deform_conv_cuda_kernel.h
    │   │   │   ├── modulated_dcn.c
    │   │   │   ├── modulated_dcn.h
    │   │   │   ├── modulated_dcn_cuda.c
    │   │   │   └── modulated_dcn_cuda.h
    │   │   ├── test.py
    │   │   └── test_modulated.py
    │   ├── frn
    │   │   ├── __init__.py
    │   │   └── frn.py
    │   ├── inplace_abn
    │   │   ├── __init__.py
    │   │   ├── bn.py
    │   │   ├── functions.py
    │   │   └── src
    │   │   │   ├── common.h
    │   │   │   ├── inplace_abn.cpp
    │   │   │   ├── inplace_abn.h
    │   │   │   ├── inplace_abn_cpu.cpp
    │   │   │   └── inplace_abn_cuda.cu
    │   ├── inplace_abn_1
    │   │   ├── __init__.py
    │   │   ├── bn.py
    │   │   ├── functions.py
    │   │   ├── misc.py
    │   │   └── src
    │   │   │   ├── checks.h
    │   │   │   ├── inplace_abn.cpp
    │   │   │   ├── inplace_abn.h
    │   │   │   ├── inplace_abn_cpu.cpp
    │   │   │   ├── inplace_abn_cuda.cu
    │   │   │   ├── inplace_abn_cuda_half.cu
    │   │   │   └── utils
    │   │   │       ├── checks.h
    │   │   │       ├── common.h
    │   │   │       └── cuda.cuh
    │   ├── pacnet
    │   │   ├── __init__.py
    │   │   ├── pac.py
    │   │   ├── paccrf.py
    │   │   └── test_pac.py
    │   ├── parallel
    │   │   ├── __init__.py
    │   │   ├── _functions.py
    │   │   ├── data_container.py
    │   │   ├── data_parallel.py
    │   │   ├── distributed.py
    │   │   └── scatter_gather.py
    │   ├── switchablenorms
    │   │   ├── __init__.py
    │   │   └── switchable_norm.py
    │   └── syncbn
    │   │   ├── __init__.py
    │   │   ├── allreduce.py
    │   │   ├── comm.py
    │   │   ├── module.py
    │   │   └── src
    │   │       ├── common.h
    │   │       ├── device_tensor.h
    │   │       ├── operator.cpp
    │   │       ├── operator.h
    │   │       ├── syncbn_cpu.cpp
    │   │       └── syncbn_kernel.cu
    ├── loss
    │   ├── __init__.py
    │   ├── loss_helper.py
    │   └── loss_manager.py
    ├── metrics
    │   ├── F1_running_score.py
    │   ├── __init__.py
    │   ├── ade20k_evaluator.py
    │   ├── cityscapes
    │   │   ├── __init__.py
    │   │   ├── evaluation
    │   │   │   ├── __init__.py
    │   │   │   ├── addToConfusionMatrix.c
    │   │   │   ├── addToConfusionMatrix.pyx
    │   │   │   ├── addToConfusionMatrix_impl.c
    │   │   │   ├── csHelpers.py
    │   │   │   ├── evalInstanceLevelSemanticLabeling.py
    │   │   │   ├── evalPixelLevelSemanticLabeling.py
    │   │   │   ├── instance.py
    │   │   │   └── instances2dict.py
    │   │   ├── helpers
    │   │   │   ├── __init__.py
    │   │   │   ├── annotation.py
    │   │   │   ├── csHelpers.py
    │   │   │   ├── labels.py
    │   │   │   └── labels_cityPersons.py
    │   │   ├── make.sh
    │   │   └── setup.py
    │   ├── cityscapes_evaluator.py
    │   ├── cocostuff_evaluator.py
    │   ├── pascal_context_evaluator.py
    │   ├── running_score.py
    │   └── running_score_mp.py
    ├── models
    │   ├── __init__.py
    │   ├── backbones
    │   │   ├── __init__.py
    │   │   ├── backbone_selector.py
    │   │   ├── hrnet
    │   │   │   ├── __init__.py
    │   │   │   ├── hrnet_backbone.py
    │   │   │   └── hrnet_config.py
    │   │   └── resnet
    │   │   │   ├── __init__.py
    │   │   │   ├── dcn_resnet_models.py
    │   │   │   ├── resnest_models.py
    │   │   │   ├── resnet_backbone.py
    │   │   │   ├── resnet_models.py
    │   │   │   ├── resnext_models.py
    │   │   │   ├── wide_resnet_models.py
    │   │   │   └── wsl_resnext_models.py
    │   ├── model_manager.py
    │   ├── modules
    │   │   ├── __init__.py
    │   │   ├── asp_oc_block.py
    │   │   ├── base_oc_block.py
    │   │   ├── decoder_block.py
    │   │   ├── edge_block.py
    │   │   ├── isa_block.py
    │   │   ├── offset_block.py
    │   │   └── spatial_ocr_block.py
    │   ├── nets
    │   │   ├── __init__.py
    │   │   ├── ce2pnet.py
    │   │   ├── fcnet.py
    │   │   ├── hrnet.py
    │   │   ├── ideal_ocrnet.py
    │   │   ├── isanet.py
    │   │   ├── ocnet.py
    │   │   ├── ocrnet.py
    │   │   └── segfix.py
    │   └── tools
    │   │   ├── __init__.py
    │   │   └── module_helper.py
    ├── utils
    │   ├── __init__.py
    │   ├── distributed.py
    │   ├── helpers
    │   │   ├── __init__.py
    │   │   ├── dc_helper.py
    │   │   ├── file_helper.py
    │   │   ├── image_helper.py
    │   │   ├── json_helper.py
    │   │   ├── mask_helper.py
    │   │   ├── offset_helper.py
    │   │   └── video_helper.py
    │   └── tools
    │   │   ├── __init__.py
    │   │   ├── average_meter.py
    │   │   ├── configer.py
    │   │   ├── logger.py
    │   │   └── timer.py
    └── vis
    │   ├── __init__.py
    │   ├── attention_visualizer.py
    │   ├── color150.mat
    │   ├── color60.mat
    │   ├── log_visualizer.py
    │   ├── palette.py
    │   ├── seg_parser.py
    │   ├── seg_visualizer.py
    │   └── tensor_visualizer.py
├── main.py
├── requirements.txt
├── scripts
    ├── ade20k
    │   ├── hrnet
    │   │   ├── run_h_48_d_4_asp_ocr_ohem.sh
    │   │   ├── run_h_48_d_4_isa_ohem.sh
    │   │   ├── run_h_48_d_4_ocr_ohem.sh
    │   │   └── run_h_48_d_4_train.sh
    │   ├── isa
    │   │   └── run_wideb5_isanet_ade20k.sh
    │   ├── ocnet
    │   │   ├── run_res101d8_aspocnet_ade20k_seg.sh
    │   │   ├── run_res101d8_aspp_baseocnet_ade20k_seg.sh
    │   │   ├── run_res101d8_baseocnet_ade20k_seg.sh
    │   │   ├── run_res50d8_aspocnet_ade20k_seg.sh
    │   │   ├── run_res50d8_aspp_baseocnet_ade20k_seg.sh
    │   │   └── run_res50d8_baseocnet_ade20k_seg.sh
    │   ├── ocrnet
    │   │   ├── run_res101d8_aspocr_ade20k.sh
    │   │   ├── run_res101d8_fastaspocnet_ade20k_seg_test.sh
    │   │   ├── run_res101d8_fastbaseocnet_ade20k_seg.sh
    │   │   ├── run_res101d8_fastbaseocnet_ade20k_seg_ohem.sh
    │   │   ├── run_res101d8_fastbaseocnet_ade20k_trainval.sh
    │   │   ├── run_res101d8_fastbaseocnet_ade20k_trainval_ohem.sh
    │   │   ├── run_res101d8_ideal_ocr_ade20k.sh
    │   │   ├── run_res101d8_ideal_ocr_b_ade20k.sh
    │   │   ├── run_res101d8_ideal_ocr_c_ade20k.sh
    │   │   ├── run_res101d8_ocr_ade20k.sh
    │   │   ├── run_res50d8_fastaspocnet_ade20k_seg.sh
    │   │   ├── run_res50d8_ideal_ocr_ade20k.sh
    │   │   └── run_res50d8_ocr_ade20k.sh
    │   └── run_res101d8_fcn_ade20k_seg.sh
    ├── celeba
    │   ├── aml_run_h_48_d_4_ocr_train.sh
    │   ├── aml_run_h_48_d_4_ocr_train_lr1e2.sh
    │   ├── aml_run_h_48_d_4_ocr_train_lr1e3.sh
    │   ├── aml_run_h_48_d_4_ocr_train_lr5e3.sh
    │   ├── run_h_48_d_4_ocr_train.sh
    │   └── run_h_48_d_4_train.sh
    ├── cityscapes
    │   ├── fcn
    │   │   ├── run_r_101_d_8_fcn_train.sh
    │   │   └── run_r_101_d_8_fcn_wo_dsn_train.sh
    │   ├── hrnet
    │   │   ├── run_h_48_d_4.sh
    │   │   ├── run_h_48_d_4_ocr.sh
    │   │   ├── run_h_48_d_4_ocr_b.sh
    │   │   ├── run_h_48_d_4_ocr_b_mapillary_trainval_coarse_ohem.sh
    │   │   ├── run_h_48_d_4_ocr_b_mapillary_trainval_coarse_trainval_ohem.sh
    │   │   ├── run_h_48_d_4_ocr_b_mapillary_trainval_ohem.sh
    │   │   ├── run_h_48_d_4_ocr_ohem.sh
    │   │   └── run_h_48_d_4_ocr_trainval.sh
    │   ├── isa
    │   │   └── run_r_101_d_8_isa_train.sh
    │   ├── ocnet
    │   │   ├── run_r_101_d_8_aspoc_train.sh
    │   │   ├── run_r_101_d_8_baseoc_train.sh
    │   │   └── run_r_101_d_8_pyramidoc_train.sh
    │   ├── ocrnet
    │   │   ├── run_ideal_distribute_ocrnet.sh
    │   │   ├── run_ideal_gather_ocrnet.sh
    │   │   ├── run_ideal_spatial_ocrnet.sh
    │   │   ├── run_ideal_spatial_ocrnet_b.sh
    │   │   ├── run_ideal_spatial_ocrnet_c.sh
    │   │   ├── run_r_101_d_8_asp_ocrnet_train.sh
    │   │   ├── run_r_101_d_8_ocrnet_train.sh
    │   │   ├── run_r_101_d_8_ocrnet_trainval.sh
    │   │   ├── run_spatial_ocrnet_trainval_coarse.sh
    │   │   ├── run_spatial_ocrnet_trainval_coarse_trainval.sh
    │   │   ├── run_spatial_ocrnet_trainval_mapillary.sh
    │   │   └── run_spatial_ocrnet_trainval_mapillary_coarse.sh
    │   ├── resnest
    │   │   └── run_r_101_d_8_fcn.sh
    │   ├── segfix.py
    │   ├── segfix
    │   │   ├── run_h_48_d_4_segfix.sh
    │   │   ├── run_h_48_d_4_segfix_inst.sh
    │   │   ├── run_h_48_d_4_segfix_trainval.sh
    │   │   ├── run_hx_20_d_2_segfix.sh
    │   │   ├── run_hx_20_d_2_segfix_inst.sh
    │   │   └── run_hx_20_d_2_segfix_trainval.sh
    │   ├── segfix_ade20k.py
    │   └── segfix_instance.py
    ├── coco_stuff
    │   ├── run_h_48_d_4_isa_train.sh
    │   ├── run_h_48_d_4_ocr_ohem_train.sh
    │   ├── run_h_48_d_4_ocr_train.sh
    │   ├── run_h_48_d_4_train.sh
    │   ├── run_r_101_d_8_gt_ocr_train.sh
    │   ├── run_r_101_d_8_ocr_train.sh
    │   └── run_r_101_d_8_train.sh
    ├── lip
    │   ├── run_h_48_d_4_isa_train.sh
    │   ├── run_h_48_d_4_ocr_b_train.sh
    │   ├── run_h_48_d_4_ocr_train.sh
    │   ├── run_h_48_d_4_train.sh
    │   ├── run_r_101_d_16_ce2p_gt_ocrnet_train.sh
    │   └── run_r_101_d_16_ce2p_ocr_train.sh
    ├── mapillary
    │   └── run_h_48_d_4_ocr_b.sh
    ├── pascal_context
    │   ├── run_h_48_d_4_isa_train.sh
    │   ├── run_h_48_d_4_ocr_b_train.sh
    │   ├── run_h_48_d_4_ocr_train.sh
    │   ├── run_h_48_d_4_train.sh
    │   ├── run_r_101_d_8_aspocr_train.sh
    │   ├── run_r_101_d_8_baseoc_train.sh
    │   ├── run_r_101_d_8_gt_ocr_train.sh
    │   ├── run_r_101_d_8_ocr_train.sh
    │   └── run_r_101_d_8_train.sh
    └── segfix
    │   └── run_hx_20_d_2_cityscapes_ade20k.sh
└── segmentor
    ├── __init__.py
    ├── tester.py
    ├── tester_offset.py
    ├── tools
        ├── __init__.py
        ├── blob_helper.py
        ├── cost_helper.py
        ├── data_helper.py
        ├── evaluator
        │   ├── __init__.py
        │   ├── base.py
        │   ├── standard.py
        │   └── tasks.py
        ├── module_runner.py
        └── optim_scheduler.py
    └── trainer.py


/.gitignore:
--------------------------------------------------------------------------------
  1 | # Byte-compiled / optimized / DLL files
  2 | __pycache__/
  3 | *.py[cod]
  4 | *$py.class
  5 | 
  6 | # C extensions
  7 | *.so
  8 | 
  9 | # Distribution / packaging
 10 | .Python
 11 | build/
 12 | develop-eggs/
 13 | dist/
 14 | downloads/
 15 | eggs/
 16 | .eggs/
 17 | lib/
 18 | !/lib/
 19 | lib64/
 20 | parts/
 21 | sdist/
 22 | var/
 23 | wheels/
 24 | *.egg-info/
 25 | .installed.cfg
 26 | *.egg
 27 | MANIFEST
 28 | 
 29 | # PyInstaller
 30 | #  Usually these files are written by a python script from a template
 31 | #  before PyInstaller builds the exe, so as to inject date/other infos into it.
 32 | *.manifest
 33 | *.spec
 34 | 
 35 | # Installer logs
 36 | pip-log.txt
 37 | pip-delete-this-directory.txt
 38 | 
 39 | # Unit test / coverage reports
 40 | htmlcov/
 41 | .tox/
 42 | .coverage
 43 | .coverage.*
 44 | .cache
 45 | nosetests.xml
 46 | coverage.xml
 47 | *.cover
 48 | .hypothesis/
 49 | .pytest_cache/
 50 | 
 51 | # Translations
 52 | *.mo
 53 | *.pot
 54 | 
 55 | # Django stuff:
 56 | *.log
 57 | !/release_files/**/*
 58 | local_settings.py
 59 | db.sqlite3
 60 | 
 61 | # Flask stuff:
 62 | instance/
 63 | .webassets-cache
 64 | 
 65 | # Scrapy stuff:
 66 | .scrapy
 67 | 
 68 | # Sphinx documentation
 69 | docs/_build/
 70 | 
 71 | # PyBuilder
 72 | target/
 73 | 
 74 | # Jupyter Notebook
 75 | .ipynb_checkpoints
 76 | 
 77 | # pyenv
 78 | .python-version
 79 | 
 80 | # celery beat schedule file
 81 | celerybeat-schedule
 82 | 
 83 | # SageMath parsed files
 84 | *.sage.py
 85 | 
 86 | # Environments
 87 | .env
 88 | .venv
 89 | env/
 90 | venv/
 91 | ENV/
 92 | env.bak/
 93 | venv.bak/
 94 | 
 95 | # Spyder project settings
 96 | .spyderproject
 97 | .spyproject
 98 | 
 99 | # Rope project settings
100 | .ropeproject
101 | 
102 | # mkdocs documentation
103 | /site
104 | 
105 | # mypy
106 | .mypy_cache/
107 | 
108 | .vscode
109 | pretrained_model
110 | nohup.out
111 | 
112 | release_files/
113 | lib/metrics/cityscapes/evaluation/addToConfusionMatrix.c
114 | log/
115 | checkpoints/


--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
 1 | MIT License
 2 | 
 3 | Copyright (c) 2020 openseg-group (Yuhui Yuan,JingyiXie,Jianyuan Guo,Lang Huang)
 4 | 
 5 | Permission is hereby granted, free of charge, to any person obtaining a copy
 6 | of this software and associated documentation files (the "Software"), to deal
 7 | in the Software without restriction, including without limitation the rights
 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 | 
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 | 
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 | 


--------------------------------------------------------------------------------
/config.profile:
--------------------------------------------------------------------------------
 1 | # NOTE: This file accepts bash syntax
 2 | 
 3 | # Your Python executable
 4 | PYTHON=/data/anaconda/envs/torch0.4/bin/python
 5 | 
 6 | # Path to your data dir
 7 | # We expect the following directory structure:
 8 | # 
 9 | # $DATA_ROOT/
10 | #     cityscapes/
11 | #         train/
12 | #             image/
13 | #             label/
14 | #         val/
15 | #             image/
16 | #             label/
17 | #         test/
18 | #             berlin/
19 | #             ...
20 | #     pascal_context/
21 | #     ...
22 | DATA_ROOT=/data/home/yuhui/teamdrive/dataset
23 | 
24 | 


--------------------------------------------------------------------------------
/imgs/OCR.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/openseg-group/openseg.pytorch/aefc75517b09068d7131a69420bc5f66cb41f0ee/imgs/OCR.PNG


--------------------------------------------------------------------------------
/imgs/SegFix.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/openseg-group/openseg.pytorch/aefc75517b09068d7131a69420bc5f66cb41f0ee/imgs/SegFix.PNG


--------------------------------------------------------------------------------
/lib/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/openseg-group/openseg.pytorch/aefc75517b09068d7131a69420bc5f66cb41f0ee/lib/__init__.py


--------------------------------------------------------------------------------
/lib/datasets/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/openseg-group/openseg.pytorch/aefc75517b09068d7131a69420bc5f66cb41f0ee/lib/datasets/__init__.py


--------------------------------------------------------------------------------
/lib/datasets/loader/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/openseg-group/openseg.pytorch/aefc75517b09068d7131a69420bc5f66cb41f0ee/lib/datasets/loader/__init__.py


--------------------------------------------------------------------------------
/lib/datasets/preprocess/README.md:
--------------------------------------------------------------------------------
 1 | ### Data Format for Semantic Segmentation
 2 | 
 3 | The raw data will be processed by generator shell scripts. There will be two subdirs('train' & 'val')
 4 | 
 5 | ```
 6 | train or val dir {
 7 |     image: contains the images for train or val.
 8 |     label: contains the label png files(mode='P') for train or val.
 9 |     mask: contains the mask png files(mode='P') for train or val.
10 | }
11 | ```
12 | 
13 | 
14 | ### Data Format for Instance Segmentation
15 | 
16 | The raw data will be processed by generator shell scripts. There will be two subdirs('train' & 'val')
17 | 
18 | ```
19 | train or val dir {
20 |     image: contains the images for train or val.
21 |     json: contains the json files for train or val.
22 | }
23 | ```
24 | 
25 | The json format for Instance Segmentation below.
26 | 
27 | ```
28 | {
29 |     "width": 640,
30 |     "height": 480,
31 |     "objects": [
32 |         {
33 |             "bbox": [x_left_up, y_left_up, x_right_bottom, y_right_bottom],
34 |             "label": class_num,
35 |             "segm": [[polygon1], [...], ...] or rle
36 |         },
37 |         {
38 |             ...
39 |         }
40 |     ]
41 | }
42 | ```
43 | 


--------------------------------------------------------------------------------
/lib/datasets/preprocess/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/openseg-group/openseg.pytorch/aefc75517b09068d7131a69420bc5f66cb41f0ee/lib/datasets/preprocess/__init__.py


--------------------------------------------------------------------------------
/lib/datasets/preprocess/ade20k/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/openseg-group/openseg.pytorch/aefc75517b09068d7131a69420bc5f66cb41f0ee/lib/datasets/preprocess/ade20k/__init__.py


--------------------------------------------------------------------------------
/lib/datasets/preprocess/ade20k/ade20k_generator.sh:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/env bash
 2 | # -*- coding:utf-8 -*-
 3 | # Author: Donny You(youansheng@gmail.com)
 4 | # Generate train & val data.
 5 | 
 6 | 
 7 | ORI_ROOT_DIR='/home/donny/DataSet/ADE20K'
 8 | SAVE_DIR='/home/donny/DataSet/ADE20K'
 9 | 
10 | 
11 | python ade20k_generator.py --ori_root_dir $ORI_ROOT_DIR \
12 |                            --save_dir $SAVE_DIR


--------------------------------------------------------------------------------
/lib/datasets/preprocess/cityscapes/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/openseg-group/openseg.pytorch/aefc75517b09068d7131a69420bc5f66cb41f0ee/lib/datasets/preprocess/cityscapes/__init__.py


--------------------------------------------------------------------------------
/lib/datasets/preprocess/coco_stuff/coco_stuff_generator.py:
--------------------------------------------------------------------------------
 1 | import os
 2 | import json
 3 | import shutil
 4 | import argparse
 5 | from pathlib import Path
 6 | import multiprocessing as mp
 7 | import multiprocessing.pool as mpp
 8 | from collections import defaultdict
 9 | 
10 | import numpy as np
11 | import scipy.io as io
12 | from PIL import Image
13 | import pycocotools.mask as mask_util
14 | 
15 | 
16 | class COCOProcessor:
17 |     def build(self, name):
18 |         in_label = args.ori_root_dir / 'annotations' / (name + '.mat')
19 |         return io.loadmat(str(in_label))['S'].astype(np.uint8)
20 | 
21 | 
22 | def process(inputs):
23 |     split, name = inputs
24 |     print('Processing', name, split)
25 |     in_img = args.ori_root_dir / 'images' / (name + '.jpg')
26 |     out_img: Path = args.save_dir / split / 'images' / (name + '.jpg')
27 |     out_img.parent.mkdir(parents=True, exist_ok=True)
28 |     shutil.copy(str(in_img), str(out_img))
29 | 
30 |     out_label: Path = args.save_dir / split / 'label' / (name + '.png')
31 |     labelmap = coco.build(name)
32 | 
33 |     if args.validate_dir is not None:
34 |         validate_label = args.validate_dir / split / 'label' / (name + '.png')
35 |         validate_labelmap = np.array(Image.open(str(validate_label))).astype(
36 |             np.uint8)
37 |         diff = (validate_labelmap != labelmap).sum() / labelmap.size * 100
38 |         if diff > 1:
39 |             print('{:.6f}%'.format(diff))
40 |         equal = (np.unique(validate_labelmap) == np.unique(labelmap))
41 |         assert equal if isinstance(equal, bool) else equal.all()
42 | 
43 |     out_label.parent.mkdir(parents=True, exist_ok=True)
44 |     Image.fromarray(labelmap).save(str(out_label))
45 | 
46 | 
47 | def input_args():
48 |     with (args.ori_root_dir / 'imageLists' / 'test.txt').open() as f:
49 |         for name in f:
50 |             yield ('val', name.strip())
51 | 
52 |     with (args.ori_root_dir / 'imageLists' / 'train.txt').open() as f:
53 |         for name in f:
54 |             yield ('train', name.strip())
55 | 
56 | 
57 | if __name__ == '__main__':
58 |     parser = argparse.ArgumentParser()
59 |     parser.add_argument('--ori_root_dir', type=Path)
60 |     parser.add_argument('--save_dir', type=Path)
61 |     parser.add_argument('--validate_dir', type=lambda x: x and Path(x))
62 |     args = parser.parse_args()
63 | 
64 |     coco = COCOProcessor()
65 |     mpp.Pool(processes=None).map(process, input_args())
66 | 


--------------------------------------------------------------------------------
/lib/datasets/preprocess/face/celebmask_color.py:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/python
 2 | # -*- encoding: utf-8 -*-
 3 | # Reference: https://github.com/switchablenorms/CelebAMask-HQ/blob/master/face_parsing/Data_preprocessing/g_mask.py
 4 | #            
 5 | 
 6 | import os
 7 | from PIL import Image
 8 | import glob
 9 | import numpy as np
10 | 
11 | def make_folder(path):
12 |     if not os.path.exists(os.path.join(path)):
13 |         os.makedirs(os.path.join(path))
14 | 
15 | if __name__ == "__main__":
16 |     color_list = [[0, 0, 0], [204, 0, 0], [76, 153, 0], [204, 204, 0], [51, 51, 255], [204, 0, 204],
17 |                   [0, 255, 255], [255, 204, 204], [102, 51, 0], [255, 0, 0], [102, 204, 0], [255, 255, 0],
18 |                   [0, 0, 153], [0, 0, 204], [255, 51, 153], [0, 204, 204], [0, 51, 0], [255, 153, 51], [0, 204, 0]]
19 |     root_path = '/home/yuhui/teamdrive/dataset/face_parse/CelebAMask-HQ/'
20 | 
21 |     folder_base = root_path + 'CelebAMask-HQ-mask'
22 |     folder_save = root_path + 'CelebAMask-HQ-mask-color'
23 | 
24 |     img_num = 10
25 | 
26 |     make_folder(folder_save)
27 | 
28 |     for k in range(img_num):
29 |         filename = os.path.join(folder_base, str(k) + '.png')
30 |         if (os.path.exists(filename)):
31 |             im_base = np.zeros((512, 512, 3))
32 |             im = Image.open(filename)
33 |             im = np.array(im)
34 |             for idx, color in enumerate(color_list):
35 |                 im_base[im == idx] = color
36 |         filename_save = os.path.join(folder_save, str(k) + '.png')
37 |         result = Image.fromarray((im_base).astype(np.uint8))
38 |         print (filename_save)
39 |         result.save(filename_save)


--------------------------------------------------------------------------------
/lib/datasets/preprocess/face/celebmask_label_generator.py:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/python
 2 | # -*- encoding: utf-8 -*-
 3 | # Reference: https://github.com/switchablenorms/CelebAMask-HQ/blob/master/face_parsing/Data_preprocessing/g_mask.py
 4 | #            
 5 | 
 6 | # other resource: 
 7 | #                   https://github.com/switchablenorms/CelebAMask-HQ
 8 | #                   https://github.com/zllrunning/face-parsing.PyTorch
 9 | #                   https://github.com/JACKYLUO1991/FaceParsing
10 | 
11 | 
12 | 
13 | import os
14 | import cv2
15 | import glob
16 | import numpy as np
17 | 
18 | label_list = ['skin', 'nose', 'eye_g', 'l_eye', 'r_eye', 'l_brow', 'r_brow', 'l_ear', 'r_ear', 
19 |               'mouth', 'u_lip', 'l_lip', 'hair', 'hat', 'ear_r', 'neck_l', 'neck', 'cloth']
20 | 
21 | def make_folder(path):
22 |     if not os.path.exists(os.path.join(path)):
23 |         os.makedirs(os.path.join(path))
24 | 
25 | if __name__ == "__main__":
26 |     root_path = '/home/yuhui/teamdrive/dataset/face_parse/CelebAMask-HQ/'
27 |     folder_base = root_path + 'CelebAMask-HQ-mask-anno'
28 |     folder_save = root_path + 'CelebAMask-HQ-mask'
29 |     img_num = 30000
30 |     make_folder(folder_save)
31 | 
32 |     for k in range(14700, img_num):
33 |         folder_num = k // 2000
34 |         im_base = np.zeros((512, 512))
35 |         for idx, label in enumerate(label_list):
36 |             filename = os.path.join(folder_base, str(folder_num), str(k).rjust(5, '0') + '_' + label + '.png')
37 |             if (os.path.exists(filename)):
38 |                 print (label, idx+1)
39 |                 im = cv2.imread(filename)
40 |                 im = im[:, :, 0]
41 |                 im_base[im != 0] = (idx + 1)
42 | 
43 |         filename_save = os.path.join(folder_save, str(k) + '.png')
44 |         print (filename_save)
45 |         cv2.imwrite(filename_save, im_base)
46 | 
47 | 
48 | '''
49 | # based on https://raw.githubusercontent.com/zllrunning/face-parsing.PyTorch/master/prepropess_data.py
50 | import os.path as osp
51 | import os
52 | import cv2
53 | from PIL import Image
54 | import numpy as np
55 | root_path = '/home/yuhui/teamdrive/dataset/face_parse/CelebAMask-HQ/'
56 | face_data = root_path + 'CelebA-HQ-img'
57 | face_sep_mask = root_path + 'CelebAMask-HQ-mask-anno'
58 | mask_path = root_path + 'CelebAMaskHQ-mask'
59 | counter = 0
60 | total = 0
61 | for i in range(15):
62 | 
63 |     atts = ['skin', 'l_brow', 'r_brow', 'l_eye', 'r_eye', 'eye_g', 'l_ear', 'r_ear', 'ear_r',
64 |             'nose', 'mouth', 'u_lip', 'l_lip', 'neck', 'neck_l', 'cloth', 'hair', 'hat']
65 | 
66 |     for j in range(i * 2000, (i + 1) * 2000):
67 | 
68 |         mask = np.zeros((512, 512))
69 | 
70 |         for l, att in enumerate(atts, 1):
71 |             total += 1
72 |             file_name = ''.join([str(j).rjust(5, '0'), '_', att, '.png'])
73 |             path = osp.join(face_sep_mask, str(i), file_name)
74 | 
75 |             if os.path.exists(path):
76 |                 counter += 1
77 |                 sep_mask = np.array(Image.open(path).convert('P'))
78 |                 # print(np.unique(sep_mask))
79 |                 mask[sep_mask == 225] = l
80 |         cv2.imwrite('{}/{}.png'.format(mask_path, j), mask)
81 |         print(j)
82 | print(counter, total)
83 | '''
84 | 
85 | 


--------------------------------------------------------------------------------
/lib/datasets/preprocess/face/celebmask_partition.py:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/python
 2 | # -*- encoding: utf-8 -*-
 3 | # Reference: https://github.com/switchablenorms/CelebAMask-HQ/blob/master/face_parsing/Data_preprocessing/g_mask.py
 4 | #            
 5 | 
 6 | import os
 7 | import pdb
 8 | import shutil
 9 | import pandas as pd
10 | from shutil import copyfile
11 | 
12 | def make_folder(path):
13 |     if not os.path.exists(os.path.join(path)):
14 |         os.makedirs(os.path.join(path))
15 | 
16 | if __name__ == "__main__":
17 |     root_path = '/home/yuhui/teamdrive/dataset/face_parse/CelebAMask-HQ/'
18 |     #### source data path
19 |     s_label = root_path + 'CelebAMask-HQ-mask'
20 |     s_img = root_path + 'CelebA-HQ-img'
21 |     #### destination training data path
22 |     d_train_label = root_path + 'train/label'
23 |     d_train_img = root_path + 'train/image'
24 |     #### destination testing data path
25 |     d_test_label = root_path + 'test/label'
26 |     d_test_img = root_path + 'test/image'
27 |     #### val data path
28 |     d_val_label = root_path + 'val/label'
29 |     d_val_img = root_path + 'val/image'
30 | 
31 |     #### make folderYY
32 |     make_folder(d_train_label)
33 |     make_folder(d_train_img)
34 |     make_folder(d_test_label)
35 |     make_folder(d_test_img)
36 |     make_folder(d_val_label)
37 |     make_folder(d_val_img)
38 | 
39 |     #### calculate data counts in destination folder
40 |     train_count = 0
41 |     test_count = 0
42 |     val_count = 0
43 | 
44 |     image_list = pd.read_csv(root_path + 'CelebA-HQ-to-CelebA-mapping.txt', delim_whitespace=True, header=None)
45 |     # f_train = open('train_list.txt', 'w')
46 |     # f_val = open('val_list.txt', 'w')
47 |     # f_test = open('test_list.txt', 'w')
48 | 
49 |     for idx, x in enumerate(image_list.loc[:, 1]):
50 |         print (idx, x)
51 |         # if idx < 14700:
52 |         #     continue
53 |         # pdb.set_trace()
54 |         if x >= 162771 and x < 182638:
55 |             # copyfile(os.path.join(s_label, str(idx)+'.png'), os.path.join(d_val_label, str(val_count)+'.png'))
56 |             # copyfile(os.path.join(s_img, str(idx)+'.jpg'), os.path.join(d_val_img, str(val_count)+'.jpg'))        
57 |             val_count += 1
58 |         elif x >= 182638:
59 |             copyfile(os.path.join(s_label, str(idx)+'.png'), os.path.join(d_test_label, str(test_count)+'.png'))
60 |             copyfile(os.path.join(s_img, str(idx)+'.jpg'), os.path.join(d_test_img, str(test_count)+'.jpg'))
61 |             test_count += 1
62 |         else:
63 |             # copyfile(os.path.join(s_label, str(idx)+'.png'), os.path.join(d_train_label, str(train_count)+'.png'))
64 |             # copyfile(os.path.join(s_img, str(idx)+'.jpg'), os.path.join(d_train_img, str(train_count)+'.jpg'))
65 |             train_count += 1
66 | 
67 |     print (train_count + test_count + val_count)
68 |     #### close the file
69 |     # f_train.close()
70 |     # f_val.close()
71 |     # f_test.close()
72 | 


--------------------------------------------------------------------------------
/lib/datasets/preprocess/face/prepare_celeb.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | 
3 | PYTHON="/data/anaconda/envs/pytorch1.6.0/bin/python"
4 | 
5 | # $PYTHON celebmask_label_generator.py
6 | $PYTHON celebmask_partition.py
7 | 


--------------------------------------------------------------------------------
/lib/datasets/preprocess/lip/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/openseg-group/openseg.pytorch/aefc75517b09068d7131a69420bc5f66cb41f0ee/lib/datasets/preprocess/lip/__init__.py


--------------------------------------------------------------------------------
/lib/datasets/preprocess/mapillary/mapillary_generator.sh:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/env bash
 2 | 
 3 | # check the enviroment info
 4 | nvidia-smi
 5 | # PYTHON="/root/miniconda3/bin/python"
 6 | PYTHON="/data/anaconda/envs/py35/bin/python"
 7 | 
 8 | ORI_ROOT_DIR='/msravcshare/dataset/mapillary-vista-v1.1'
 9 | SAVE_DIR='/msravcshare/dataset/cityscapes/mapillary'
10 | 
11 | mkdir -p ${SAVE_DIR}
12 | 
13 | # directly copy images
14 | # mkdir -p ${SAVE_DIR}/train
15 | # cp -r ${ORI_ROOT_DIR}/training/images ${SAVE_DIR}/train/image
16 | 
17 | # mkdir -p ${SAVE_DIR}/val
18 | # cp -r ${ORI_ROOT_DIR}/validation/images ${SAVE_DIR}/val/image
19 | 
20 | 
21 | ${PYTHON} mapillary_generator.py --ori_root_dir $ORI_ROOT_DIR \
22 |                           --save_dir $SAVE_DIR


--------------------------------------------------------------------------------
/lib/datasets/preprocess/pascal_context/pascal_context_generator.py:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/env python
 2 | # -*- coding:utf-8 -*-
 3 | # Author: Lang Huang(layenhuang@outlook.com)
 4 | # Pascal Context aug data generator.
 5 | 
 6 | 
 7 | from __future__ import absolute_import
 8 | from __future__ import division
 9 | from __future__ import print_function
10 | 
11 | import json
12 | import os
13 | import argparse
14 | import shutil
15 | import scipy.io as sio
16 | import cv2
17 | import numpy as np
18 | import torch
19 | 
20 | 
21 | LABEL_DIR = 'label'
22 | IMAGE_DIR = 'image'
23 | 
24 | 
25 | class PContextGenerator(object):
26 |     def __init__(self, args, image_dir=IMAGE_DIR, label_dir=LABEL_DIR):
27 |         self.args = args
28 |         self.train_label_dir = os.path.join(self.args.save_dir, 'train', label_dir)
29 |         self.val_label_dir = os.path.join(self.args.save_dir, 'val', label_dir)
30 |         if not os.path.exists(self.train_label_dir):
31 |             os.makedirs(self.train_label_dir)
32 | 
33 |         if not os.path.exists(self.val_label_dir):
34 |             os.makedirs(self.val_label_dir)
35 | 
36 |         self.train_image_dir = os.path.join(self.args.save_dir, 'train', image_dir)
37 |         self.val_image_dir = os.path.join(self.args.save_dir, 'val', image_dir)
38 |         if not os.path.exists(self.train_image_dir):
39 |             os.makedirs(self.train_image_dir)
40 | 
41 |         if not os.path.exists(self.val_image_dir):
42 |             os.makedirs(self.val_image_dir)
43 |         
44 |         self.train_mask = torch.load(os.path.join(self.args.ori_root_dir, "PytorchEncoding/train.pth"))
45 |         self.val_mask = torch.load(os.path.join(self.args.ori_root_dir, "PytorchEncoding/val.pth"))
46 |     
47 | 
48 |     def generate_label(self):
49 |         train_img_folder = os.path.join(self.args.ori_root_dir, 'JPEGImages')
50 |         val_img_folder = os.path.join(self.args.ori_root_dir, 'JPEGImages')
51 | 
52 |         for basename, mask in self.train_mask.items():
53 |             basename = str(basename)
54 |             print(basename)
55 |             basename = basename[:4] + "_" + basename[4:]
56 |             filename = basename + ".jpg"
57 |             imgpath = os.path.join(train_img_folder, filename)
58 |             shutil.copy(imgpath,
59 |                         os.path.join(self.train_image_dir, filename))
60 |             mask = np.asarray(mask)
61 |             cv2.imwrite(os.path.join(self.train_label_dir, basename + ".png"), mask)
62 |         
63 |         for basename, mask in self.val_mask.items():
64 |             basename = str(basename)
65 |             print(basename)
66 |             basename = basename[:4] + "_" + basename[4:]
67 |             filename = basename + ".jpg"
68 |             imgpath = os.path.join(val_img_folder, filename)
69 |             shutil.copy(imgpath,
70 |                         os.path.join(self.val_image_dir, filename))
71 |             mask = np.asarray(mask)
72 |             cv2.imwrite(os.path.join(self.val_label_dir, basename + ".png"), mask)
73 | 
74 | 
75 | if __name__ == "__main__":
76 | 
77 |     parser = argparse.ArgumentParser()
78 |     parser.add_argument('--save_dir', default=None, type=str,
79 |                         dest='save_dir', help='The directory to save the data.')
80 |     parser.add_argument('--ori_root_dir', default=None, type=str,
81 |                         dest='ori_root_dir', help='The directory of the cityscapes data.')
82 | 
83 |     args = parser.parse_args()
84 | 
85 |     pcontext_generator = PContextGenerator(args)
86 |     pcontext_generator.generate_label()


--------------------------------------------------------------------------------
/lib/datasets/preprocess/pascal_context/pascal_context_generator.sh:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/env bash
 2 | # -*- coding:utf-8 -*-
 3 | # Author: Lang Huang(layenhuang@outlook.com)
 4 | # Pascal context aug data generator.
 5 | 
 6 | PYTHON="/root/miniconda3/envs/pytorch1.0/bin/python"
 7 | ORI_ROOT_DIR='/msravcshare/dataset/pascal_context/' #'/msravcshare/dataset/pcontext/'
 8 | SAVE_DIR='/msravcshare/dataset/pascal_context/' #'/msravcshare/dataset/pcontext/'
 9 | SCRIPT_DIR='/msravcshare/yuyua/code/segmentation/openseg.pytorch/lib/datasets/preprocess/pascal_context'
10 | 
11 | cd ${ORI_ROOT_DIR}
12 | 
13 | # if [ ! -f train.pth ]; then
14 | #     echo "Download training annotations"
15 | #     wget https://hangzh.s3.amazonaws.com/encoding/data/pcontext/train.pth
16 | # fi
17 | 
18 | # if [ ! -f val.pth ]; then
19 | #     echo "Download val annotations"
20 | #     wget https://hangzh.s3.amazonaws.com/encoding/data/pcontext/val.pth
21 | # fi
22 | 
23 | cd ${SCRIPT_DIR}
24 | echo "Start generation..."
25 | 
26 | python pascal_context_generator.py --ori_root_dir ${ORI_ROOT_DIR} \
27 |                            --save_dir ${SAVE_DIR}
28 | 
29 | 


--------------------------------------------------------------------------------
/lib/datasets/preprocess/pascal_voc/pascal_voc_generator.py:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/env python
 2 | # -*- coding:utf-8 -*-
 3 | # Author: Lang Huang(layenhuang@outlook.com)
 4 | # Pascal Context aug data generator.
 5 | 
 6 | 
 7 | from __future__ import absolute_import
 8 | from __future__ import division
 9 | from __future__ import print_function
10 | 
11 | import json
12 | import os
13 | import argparse
14 | import shutil
15 | import scipy.io as sio
16 | import cv2
17 | import numpy as np
18 | import torch
19 | 
20 | 
21 | LABEL_DIR = 'label'
22 | IMAGE_DIR = 'image'
23 | 
24 | 
25 | class PascalVOCGenerator(object):
26 |     def __init__(self, args, image_dir=IMAGE_DIR, label_dir=LABEL_DIR):
27 |         self.args = args
28 |         self.train_label_dir = os.path.join(self.args.save_dir, 'train', label_dir)
29 |         self.val_label_dir = os.path.join(self.args.save_dir, 'val', label_dir)
30 |         if not os.path.exists(self.train_label_dir):
31 |             os.makedirs(self.train_label_dir)
32 | 
33 |         if not os.path.exists(self.val_label_dir):
34 |             os.makedirs(self.val_label_dir)
35 | 
36 |         self.train_image_dir = os.path.join(self.args.save_dir, 'train', image_dir)
37 |         self.val_image_dir = os.path.join(self.args.save_dir, 'val', image_dir)
38 |         if not os.path.exists(self.train_image_dir):
39 |             os.makedirs(self.train_image_dir)
40 | 
41 |         if not os.path.exists(self.val_image_dir):
42 |             os.makedirs(self.val_image_dir)
43 |         
44 |         self.train_mask = torch.load(os.path.join(self.args.ori_root_dir, "PytorchEncoding/train.pth"))
45 |         self.val_mask = torch.load(os.path.join(self.args.ori_root_dir, "PytorchEncoding/val.pth"))
46 |     
47 | 
48 |     def generate_label(self):
49 |         train_img_folder = os.path.join(self.args.ori_root_dir, 'JPEGImages')
50 |         val_img_folder = os.path.join(self.args.ori_root_dir, 'JPEGImages')
51 | 
52 |         for basename, mask in self.train_mask.items():
53 |             basename = str(basename)
54 |             print(basename)
55 |             basename = basename[:4] + "_" + basename[4:]
56 |             filename = basename + ".jpg"
57 |             imgpath = os.path.join(train_img_folder, filename)
58 |             shutil.copy(imgpath,
59 |                         os.path.join(self.train_image_dir, filename))
60 |             mask = np.asarray(mask)
61 |             cv2.imwrite(os.path.join(self.train_label_dir, basename + ".png"), mask)
62 |         
63 |         for basename, mask in self.val_mask.items():
64 |             basename = str(basename)
65 |             print(basename)
66 |             basename = basename[:4] + "_" + basename[4:]
67 |             filename = basename + ".jpg"
68 |             imgpath = os.path.join(val_img_folder, filename)
69 |             shutil.copy(imgpath,
70 |                         os.path.join(self.val_image_dir, filename))
71 |             mask = np.asarray(mask)
72 |             cv2.imwrite(os.path.join(self.val_label_dir, basename + ".png"), mask)
73 | 
74 | 
75 | if __name__ == "__main__":
76 | 
77 |     parser = argparse.ArgumentParser()
78 |     parser.add_argument('--save_dir', default=None, type=str,
79 |                         dest='save_dir', help='The directory to save the data.')
80 |     parser.add_argument('--ori_root_dir', default=None, type=str,
81 |                         dest='ori_root_dir', help='The directory of the cityscapes data.')
82 | 
83 |     args = parser.parse_args()
84 | 
85 |     pcontext_generator = PContextGenerator(args)
86 |     pcontext_generator.generate_label()


--------------------------------------------------------------------------------
/lib/datasets/tools/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/openseg-group/openseg.pytorch/aefc75517b09068d7131a69420bc5f66cb41f0ee/lib/datasets/tools/__init__.py


--------------------------------------------------------------------------------
/lib/datasets/tools/transforms.py:
--------------------------------------------------------------------------------
  1 | #!/usr/bin/env python
  2 | # -*- coding:utf-8 -*-
  3 | # Author: Donny You (youansheng@gmail.com)
  4 | 
  5 | 
  6 | from __future__ import absolute_import
  7 | from __future__ import division
  8 | from __future__ import print_function
  9 | 
 10 | import numpy as np
 11 | import torch
 12 | from PIL import Image
 13 | 
 14 | 
 15 | class Normalize(object):
 16 |     """Normalize a ``torch.tensor``
 17 | 
 18 |     Args:
 19 |         inputs (torch.tensor): tensor to be normalized.
 20 |         mean: (list): the mean of RGB
 21 |         std: (list): the std of RGB
 22 | 
 23 |     Returns:
 24 |         Tensor: Normalized tensor.
 25 |     """
 26 |     def __init__(self, div_value, mean, std):
 27 |         self.div_value = div_value
 28 |         self.mean = mean
 29 |         self.std =std
 30 | 
 31 |     def __call__(self, inputs):
 32 |         inputs = inputs.div(self.div_value)
 33 |         for t, m, s in zip(inputs, self.mean, self.std):
 34 |             t.sub_(m).div_(s)
 35 | 
 36 |         return inputs
 37 | 
 38 | 
 39 | class DeNormalize(object):
 40 |     """DeNormalize a ``torch.tensor``
 41 | 
 42 |     Args:
 43 |         inputs (torch.tensor): tensor to be normalized.
 44 |         mean: (list): the mean of RGB
 45 |         std: (list): the std of RGB
 46 | 
 47 |     Returns:
 48 |         Tensor: Normalized tensor.
 49 |     """
 50 |     def __init__(self, div_value, mean, std):
 51 |         self.div_value = div_value
 52 |         self.mean = mean
 53 |         self.std =std
 54 | 
 55 |     def __call__(self, inputs):
 56 |         result = inputs.clone()
 57 |         for i in range(result.size(0)):
 58 |             result[i, :, :] = result[i, :, :] * self.std[i] + self.mean[i]
 59 | 
 60 |         return result.mul_(self.div_value)
 61 | 
 62 | 
 63 | class ToTensor(object):
 64 |     """Convert a ``numpy.ndarray or Image`` to tensor.
 65 | 
 66 |     See ``ToTensor`` for more details.
 67 | 
 68 |     Args:
 69 |         inputs (numpy.ndarray or Image): Image to be converted to tensor.
 70 | 
 71 |     Returns:
 72 |         Tensor: Converted image.
 73 |     """
 74 |     def __call__(self, inputs):
 75 |         if isinstance(inputs, Image.Image):
 76 |             channels = len(inputs.mode)
 77 |             inputs = np.array(inputs)
 78 |             inputs = inputs.reshape(inputs.shape[0], inputs.shape[1], channels)
 79 |             inputs = torch.from_numpy(inputs.transpose(2, 0, 1))
 80 |         else:
 81 |             inputs = torch.from_numpy(inputs.transpose(2, 0, 1))
 82 | 
 83 |         return inputs.float()
 84 | 
 85 | 
 86 | class ToLabel(object):
 87 |     def __call__(self, inputs):
 88 |         return torch.from_numpy(np.array(inputs)).long()
 89 | 
 90 | 
 91 | class ReLabel(object):
 92 |     """
 93 |       255 indicate the background, relabel 255 to some value.
 94 |     """
 95 |     def __init__(self, olabel, nlabel):
 96 |         self.olabel = olabel
 97 |         self.nlabel = nlabel
 98 | 
 99 |     def __call__(self, inputs):
100 |         assert isinstance(inputs, torch.LongTensor), 'tensor needs to be LongTensor'
101 | 
102 |         inputs[inputs == self.olabel] = self.nlabel
103 |         return inputs
104 | 
105 | 
106 | class Compose(object):
107 | 
108 |     def __init__(self, transforms):
109 |         self.transforms = transforms
110 | 
111 |     def __call__(self, inputs):
112 |         for t in self.transforms:
113 |             inputs = t(inputs)
114 | 
115 |         return inputs
116 | 
117 | 
118 | 
119 | 
120 | 


--------------------------------------------------------------------------------
/lib/extensions/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/openseg-group/openseg.pytorch/aefc75517b09068d7131a69420bc5f66cb41f0ee/lib/extensions/__init__.py


--------------------------------------------------------------------------------
/lib/extensions/cc_attention/__init__.py:
--------------------------------------------------------------------------------
1 | from .functions import PAM_Module, CrissCrossAttention, CrossAttention, ca_weight, ca_map


--------------------------------------------------------------------------------
/lib/extensions/cc_attention/_ext/__init__.py:
--------------------------------------------------------------------------------
 1 | 
 2 | from torch.utils.ffi import _wrap_function
 3 | from .__ext import lib as _lib, ffi as _ffi
 4 | 
 5 | __all__ = []
 6 | def _import_symbols(locals):
 7 |     for symbol in dir(_lib):
 8 |         fn = getattr(_lib, symbol)
 9 |         if callable(fn):
10 |             locals[symbol] = _wrap_function(fn, _ffi)
11 |         else:
12 |             locals[symbol] = fn
13 |         __all__.append(symbol)
14 | 
15 | _import_symbols(locals())
16 | 


--------------------------------------------------------------------------------
/lib/extensions/cc_attention/build.py:
--------------------------------------------------------------------------------
 1 | import os
 2 | 
 3 | from torch.utils.ffi import create_extension
 4 | 
 5 | sources = ['src/lib_cffi.cpp']
 6 | headers = ['src/lib_cffi.h']
 7 | extra_objects = ['src/ca.o']
 8 | with_cuda = True
 9 | 
10 | this_file = os.path.dirname(os.path.realpath(__file__))
11 | extra_objects = [os.path.join(this_file, fname) for fname in extra_objects]
12 | 
13 | ffi = create_extension(
14 |     '_ext',
15 |     headers=headers,
16 |     sources=sources,
17 |     relative_to=__file__,
18 |     with_cuda=with_cuda,
19 |     extra_objects=extra_objects,
20 |     extra_compile_args=["-std=c++11"]
21 | )
22 | 
23 | if __name__ == '__main__':
24 |     ffi.build()
25 | 


--------------------------------------------------------------------------------
/lib/extensions/cc_attention/build.sh:
--------------------------------------------------------------------------------
 1 | #!/bin/bash
 2 | 
 3 | # Configuration
 4 | CUDA_GENCODE="\
 5 | -gencode=arch=compute_60,code=sm_60 \
 6 | -gencode=arch=compute_61,code=sm_61 \
 7 | -gencode=arch=compute_52,code=sm_52 \
 8 | -gencode=arch=compute_50,code=sm_50"
 9 | 
10 | 
11 | cd src
12 | /usr/local/cuda-8.0/bin/nvcc -I/usr/local/cuda/include --expt-extended-lambda -O3 -c -o ca.o ca.cu -x cu -Xcompiler -fPIC -std=c++11 ${CUDA_GENCODE}
13 | cd ..
14 | 


--------------------------------------------------------------------------------
/lib/extensions/cc_attention/src/ca.h:
--------------------------------------------------------------------------------
 1 | #ifndef __CA__
 2 | #define __CA__
 3 | 
 4 | /*
 5 |  * Exported functions
 6 |  */
 7 | extern "C" int _ca_forward_cuda(int N, int C, int H, int W, const float *t, const float *f, float *weight, cudaStream_t stream);
 8 | extern "C" int _ca_backward_cuda(int N, int C, int H, int W, const float *dw, const float *t, const float *f, float *dt, float *df, cudaStream_t stream);
 9 | extern "C" int _ca_map_forward_cuda(int N, int C, int H, int W, const float *weight, const float *g, float *out, cudaStream_t stream);
10 | extern "C" int _ca_map_backward_cuda(int N, int C, int H, int W, const float *dout, const float *weight, const float *g, float *dw, float *dg, cudaStream_t stream);
11 | 
12 | #endif
13 | 


--------------------------------------------------------------------------------
/lib/extensions/cc_attention/src/common.h:
--------------------------------------------------------------------------------
 1 | #ifndef __COMMON__
 2 | #define __COMMON__
 3 | #include <cuda_runtime_api.h>
 4 | 
 5 | /*
 6 |  * General settings
 7 |  */
 8 | const int WARP_SIZE = 32;
 9 | const int MAX_BLOCK_SIZE = 512;
10 | 
11 | /*
12 |  * Utility functions
13 |  */
14 | template <typename T>
15 | __device__ __forceinline__ T WARP_SHFL_XOR(T value, int laneMask, int width = warpSize,
16 |                                            unsigned int mask = 0xffffffff) {
17 | #if CUDART_VERSION >= 9000
18 |   return __shfl_xor_sync(mask, value, laneMask, width);
19 | #else
20 |   return __shfl_xor(value, laneMask, width);
21 | #endif
22 | }
23 | 
24 | __device__ __forceinline__ int getMSB(int val) { return 31 - __clz(val); }
25 | 
26 | static int getNumThreads(int nElem) {
27 |   int threadSizes[5] = {32, 64, 128, 256, MAX_BLOCK_SIZE};
28 |   for (int i = 0; i != 5; ++i) {
29 |     if (nElem <= threadSizes[i]) {
30 |       return threadSizes[i];
31 |     }
32 |   }
33 |   return MAX_BLOCK_SIZE;
34 | }
35 | 
36 | 
37 | #endif


--------------------------------------------------------------------------------
/lib/extensions/cc_attention/src/lib_cffi.cpp:
--------------------------------------------------------------------------------
 1 | // All functions assume that input and output tensors are already initialized
 2 | // and have the correct dimensions
 3 | #include <THC/THC.h>
 4 | 
 5 | // Forward definition of implementation functions
 6 | extern "C" {
 7 | int _ca_forward_cuda(int N, int C, int H, int W, const float *t, const float *f, float *weight, cudaStream_t);
 8 | int _ca_backward_cuda(int N, int C, int H, int W, const float *dw, const float *t, const float *f, float *dt, float *df, cudaStream_t);
 9 | 
10 | int _ca_map_forward_cuda(int N, int C, int H, int W, const float *weight, const float *g, float *out, cudaStream_t);
11 | int _ca_map_backward_cuda(int N, int C, int H, int W, const float *dout, const float *weight, const float *g, float *dw, float *dg, cudaStream_t);
12 | }
13 | 
14 | extern THCState *state;
15 | 
16 | void get_sizes(const THCudaTensor *t, int *N, int *C, int *H, int *W){
17 |   // Get sizes
18 |   *N = THCudaTensor_size(state, t, 0);
19 |   *C = THCudaTensor_size(state, t, 1);
20 |   *H = THCudaTensor_size(state, t, 2);
21 |   *W = THCudaTensor_size(state, t, 3);
22 | }
23 | 
24 | extern "C" int ca_forward_cuda(const THCudaTensor *t, const THCudaTensor *f, THCudaTensor *weight) {
25 |   cudaStream_t stream = THCState_getCurrentStream(state);
26 | 
27 |   int N, C, H, W;
28 |   get_sizes(t, &N, &C, &H, &W);
29 | 
30 |   // Get pointers
31 |   const float *t_data = THCudaTensor_data(state, t);
32 |   const float *f_data = THCudaTensor_data(state, f);
33 |   float *weight_data = THCudaTensor_data(state, weight);
34 | 
35 | 
36 |   return _ca_forward_cuda(N, C, H, W, t_data, f_data, weight_data, stream);
37 | }
38 | 
39 | extern "C" int ca_backward_cuda(const THCudaTensor *dw, const THCudaTensor *t, const THCudaTensor *f, THCudaTensor *dt, THCudaTensor *df) {
40 |   cudaStream_t stream = THCState_getCurrentStream(state);
41 | 
42 |   int N, C, H, W;
43 |   get_sizes(t, &N, &C, &H, &W);
44 | 
45 |   // Get pointers
46 |   const float *dw_data = THCudaTensor_data(state, dw);
47 |   const float *t_data = THCudaTensor_data(state, t);
48 |   const float *f_data = THCudaTensor_data(state, f);
49 |   float *dt_data = THCudaTensor_data(state, dt);
50 |   float *df_data = THCudaTensor_data(state, df);
51 | 
52 | 
53 |   return _ca_backward_cuda(N, C, H, W, dw_data, t_data, f_data, dt_data, df_data, stream);
54 | }
55 | 
56 | 
57 | extern "C" int ca_map_forward_cuda(const THCudaTensor *weight, const THCudaTensor *g, THCudaTensor *out) {
58 |   cudaStream_t stream = THCState_getCurrentStream(state);
59 | 
60 |   int N, C, H, W;
61 |   get_sizes(g, &N, &C, &H, &W);
62 | 
63 |   const float *weight_data = THCudaTensor_data(state, weight);
64 |   const float *g_data = THCudaTensor_data(state, g);
65 |   float *out_data = THCudaTensor_data(state, out);
66 | 
67 |   return _ca_map_forward_cuda(N, C, H, W, weight_data, g_data, out_data, stream);
68 | }
69 | 
70 | 
71 | extern "C" int ca_map_backward_cuda(const THCudaTensor *dout, const THCudaTensor *weight, const THCudaTensor *g,
72 |                      THCudaTensor *dw,  THCudaTensor *dg) {
73 |   cudaStream_t stream = THCState_getCurrentStream(state);
74 | 
75 |   int N, C, H, W;
76 |   get_sizes(dout, &N, &C, &H, &W);
77 | 
78 |   const float *dout_data = THCudaTensor_data(state, dout);
79 |   const float *weight_data = THCudaTensor_data(state, weight);
80 |   const float *g_data = THCudaTensor_data(state, g);
81 |   float *dw_data = THCudaTensor_data(state, dw);
82 |   float *dg_data = THCudaTensor_data(state, dg);
83 | 
84 |   return _ca_map_backward_cuda(N, C, H, W, dout_data, weight_data, g_data, dw_data, dg_data, stream);
85 | }
86 | 
87 | 


--------------------------------------------------------------------------------
/lib/extensions/cc_attention/src/lib_cffi.h:
--------------------------------------------------------------------------------
1 | int ca_forward_cuda(const THCudaTensor *t, const THCudaTensor *f, THCudaTensor *weight);
2 | 
3 | int ca_backward_cuda(const THCudaTensor *dw, const THCudaTensor *t, const THCudaTensor *f, THCudaTensor *dt, THCudaTensor *df);
4 | 
5 | int ca_map_forward_cuda(const THCudaTensor *weight, const THCudaTensor *g, THCudaTensor *out);
6 | int ca_map_backward_cuda(const THCudaTensor *dout, const THCudaTensor *weight, const THCudaTensor *g,
7 |                      THCudaTensor *dw,  THCudaTensor *dg);
8 | 


--------------------------------------------------------------------------------
/lib/extensions/crf/dense_crf.py:
--------------------------------------------------------------------------------
 1 | import numpy as np
 2 | import pydensecrf.densecrf as dcrf
 3 | 
 4 | def dense_crf(img, output_probs):
 5 |     h = output_probs.shape[0]
 6 |     w = output_probs.shape[1]
 7 | 
 8 |     output_probs = np.expand_dims(output_probs, 0)
 9 |     output_probs = np.append(1 - output_probs, output_probs, axis=0)
10 | 
11 |     d = dcrf.DenseCRF2D(w, h, 2)
12 |     U = -np.log(output_probs)
13 |     U = U.reshape((2, -1))
14 |     U = np.ascontiguousarray(U)
15 |     img = np.ascontiguousarray(img)
16 | 
17 |     d.setUnaryEnergy(U)
18 | 
19 |     d.addPairwiseGaussian(sxy=20, compat=3)
20 |     d.addPairwiseBilateral(sxy=30, srgb=20, rgbim=img, compat=10)
21 | 
22 |     Q = d.inference(5)
23 |     Q = np.argmax(np.array(Q), axis=0).reshape((h, w))
24 | 
25 |     return Q
26 | 


--------------------------------------------------------------------------------
/lib/extensions/dcn/__init__.py:
--------------------------------------------------------------------------------
1 | from .modules.deform_conv import DeformConv
2 | from .modules.modulated_dcn import DeformRoIPooling, ModulatedDeformRoIPoolingPack, ModulatedDeformConv, ModulatedDeformConvPack
3 | 
4 | __all__ = ['DeformConv', 'DeformRoIPooling', 'ModulatedDeformRoIPoolingPack', 'ModulatedDeformConv', 'ModulatedDeformConvPack']


--------------------------------------------------------------------------------
/lib/extensions/dcn/_ext/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/openseg-group/openseg.pytorch/aefc75517b09068d7131a69420bc5f66cb41f0ee/lib/extensions/dcn/_ext/__init__.py


--------------------------------------------------------------------------------
/lib/extensions/dcn/_ext/deform_conv/__init__.py:
--------------------------------------------------------------------------------
 1 | 
 2 | from torch.utils.ffi import _wrap_function
 3 | from ._deform_conv import lib as _lib, ffi as _ffi
 4 | 
 5 | __all__ = []
 6 | def _import_symbols(locals):
 7 |     for symbol in dir(_lib):
 8 |         fn = getattr(_lib, symbol)
 9 |         if callable(fn):
10 |             locals[symbol] = _wrap_function(fn, _ffi)
11 |         else:
12 |             locals[symbol] = fn
13 |         __all__.append(symbol)
14 | 
15 | _import_symbols(locals())
16 | 


--------------------------------------------------------------------------------
/lib/extensions/dcn/_ext/modulated_dcn/__init__.py:
--------------------------------------------------------------------------------
 1 | 
 2 | from torch.utils.ffi import _wrap_function
 3 | from ._modulated_dcn import lib as _lib, ffi as _ffi
 4 | 
 5 | __all__ = []
 6 | def _import_symbols(locals):
 7 |     for symbol in dir(_lib):
 8 |         fn = getattr(_lib, symbol)
 9 |         if callable(fn):
10 |             locals[symbol] = _wrap_function(fn, _ffi)
11 |         else:
12 |             locals[symbol] = fn
13 |         __all__.append(symbol)
14 | 
15 | _import_symbols(locals())
16 | 


--------------------------------------------------------------------------------
/lib/extensions/dcn/build.py:
--------------------------------------------------------------------------------
 1 | import os
 2 | import torch
 3 | from torch.utils.ffi import create_extension
 4 | 
 5 | this_file = os.path.dirname(__file__)
 6 | 
 7 | sources = ['src/deform_conv.c']
 8 | headers = ['src/deform_conv.h']
 9 | defines = []
10 | with_cuda = False
11 | 
12 | if torch.cuda.is_available():
13 |     print('Including CUDA code.')
14 |     sources += ['src/deform_conv_cuda.c']
15 |     headers += ['src/deform_conv_cuda.h']
16 |     defines += [('WITH_CUDA', None)]
17 |     with_cuda = True
18 | 
19 | this_file = os.path.dirname(os.path.realpath(__file__))
20 | print(this_file)
21 | extra_objects = ['src/deform_conv_cuda_kernel.cu.so']
22 | extra_objects = [os.path.join(this_file, fname) for fname in extra_objects]
23 | 
24 | ffi = create_extension(
25 |     '_ext.deform_conv',
26 |     headers=headers,
27 |     sources=sources,
28 |     define_macros=defines,
29 |     relative_to=__file__,
30 |     with_cuda=with_cuda,
31 |     extra_objects=extra_objects,
32 |     extra_compile_args=['-std=c++11']
33 | )
34 | 
35 | assert torch.cuda.is_available(), 'Please install CUDA for GPU support.'
36 | ffi.build()
37 | 
38 | 


--------------------------------------------------------------------------------
/lib/extensions/dcn/build_modulated.py:
--------------------------------------------------------------------------------
 1 | import os
 2 | import torch
 3 | from torch.utils.ffi import create_extension
 4 | 
 5 | 
 6 | sources = ['src/modulated_dcn.c']
 7 | headers = ['src/modulated_dcn.h']
 8 | defines = []
 9 | with_cuda = False
10 | 
11 | extra_objects = []
12 | if torch.cuda.is_available():
13 |     print('Including CUDA code.')
14 |     sources += ['src/modulated_dcn_cuda.c']
15 |     headers += ['src/modulated_dcn_cuda.h']
16 |     defines += [('WITH_CUDA', None)]
17 |     extra_objects += ['src/cuda/modulated_deform_im2col_cuda.cu.so']
18 |     extra_objects += ['src/cuda/deform_psroi_pooling_cuda.cu.so']
19 |     with_cuda = True
20 | else:
21 |     raise ValueError('CUDA is not available')
22 | 
23 | extra_compile_args = ['-fopenmp', '-std=c99']
24 | 
25 | this_file = os.path.dirname(os.path.realpath(__file__))
26 | print(this_file)
27 | sources = [os.path.join(this_file, fname) for fname in sources]
28 | headers = [os.path.join(this_file, fname) for fname in headers]
29 | extra_objects = [os.path.join(this_file, fname) for fname in extra_objects]
30 | 
31 | ffi = create_extension(
32 |     '_ext.modulated_dcn',
33 |     headers=headers,
34 |     sources=sources,
35 |     define_macros=defines,
36 |     relative_to=__file__,
37 |     with_cuda=with_cuda,
38 |     extra_objects=extra_objects,
39 |     extra_compile_args=extra_compile_args
40 | )
41 | 
42 | if __name__ == '__main__':
43 |     ffi.build()
44 | 


--------------------------------------------------------------------------------
/lib/extensions/dcn/functions/__init__.py:
--------------------------------------------------------------------------------
1 | from .deform_conv import DeformConvFunction, deform_conv_function
2 | from .modulated_dcn_func import DeformRoIPoolingFunction, ModulatedDeformConvFunction


--------------------------------------------------------------------------------
/lib/extensions/dcn/make.sh:
--------------------------------------------------------------------------------
 1 | PYTHON="/root/miniconda3/bin/python"
 2 | 
 3 | cd src
 4 | /usr/local/cuda-8.0/bin/nvcc -c -o deform_conv_cuda_kernel.cu.so deform_conv_cuda_kernel.cu -x cu -Xcompiler -fPIC -std=c++11
 5 | 
 6 | cd cuda
 7 | 
 8 | # compile modulated deform conv
 9 | /usr/local/cuda-8.0/bin/nvcc -c -o modulated_deform_im2col_cuda.cu.so modulated_deform_im2col_cuda.cu -x cu -Xcompiler -fPIC
10 | 
11 | # compile deform-psroi-pooling
12 | /usr/local/cuda-8.0/bin/nvcc -c -o deform_psroi_pooling_cuda.cu.so deform_psroi_pooling_cuda.cu -x cu -Xcompiler -fPIC
13 | 
14 | cd ../..
15 | CC=g++ ${PYTHON} build.py
16 | ${PYTHON} build_modulated.py
17 | 


--------------------------------------------------------------------------------
/lib/extensions/dcn/make_p100.sh:
--------------------------------------------------------------------------------
 1 | PYTHON="/data/anaconda/envs/py35/bin/python"
 2 | 
 3 | cd src
 4 | /usr/bin/nvcc -c -o deform_conv_cuda_kernel.cu.so deform_conv_cuda_kernel.cu -x cu -Xcompiler -fPIC -std=c++11
 5 | 
 6 | cd cuda
 7 | 
 8 | # compile modulated deform conv
 9 | /usr/bin/nvcc -c -o modulated_deform_im2col_cuda.cu.so modulated_deform_im2col_cuda.cu -x cu -Xcompiler -fPIC
10 | 
11 | # compile deform-psroi-pooling
12 | /usr/bin/nvcc -c -o deform_psroi_pooling_cuda.cu.so deform_psroi_pooling_cuda.cu -x cu -Xcompiler -fPIC
13 | 
14 | cd ../..
15 | CC=g++ ${PYTHON} build.py
16 | ${PYTHON} build_modulated.py
17 | 


--------------------------------------------------------------------------------
/lib/extensions/dcn/modules/__init__.py:
--------------------------------------------------------------------------------
1 | from .deform_conv import DeformConv
2 | from .modulated_dcn import DeformRoIPooling, ModulatedDeformConv, ModulatedDeformConvPack, ModulatedDeformRoIPoolingPack


--------------------------------------------------------------------------------
/lib/extensions/dcn/modules/deform_conv.py:
--------------------------------------------------------------------------------
 1 | import math
 2 | 
 3 | import torch
 4 | import torch.nn as nn
 5 | from torch.nn.modules.module import Module
 6 | from torch.nn.modules.utils import _pair
 7 | from lib.extensions.dcn.functions import deform_conv_function
 8 | 
 9 | 
10 | class DeformConv(Module):
11 |     def __init__(self,
12 |                  in_channels,
13 |                  out_channels,
14 |                  kernel_size,
15 |                  stride=1,
16 |                  padding=0,
17 |                  dilation=1,
18 |                  num_deformable_groups=1):
19 |         super(DeformConv, self).__init__()
20 |         self.in_channels = in_channels
21 |         self.out_channels = out_channels
22 |         self.kernel_size = _pair(kernel_size)
23 |         self.stride = _pair(stride)
24 |         self.padding = _pair(padding)
25 |         self.dilation = _pair(dilation)
26 |         self.num_deformable_groups = num_deformable_groups
27 | 
28 |         self.weight = nn.Parameter(
29 |             torch.Tensor(out_channels, in_channels, *self.kernel_size))
30 | 
31 |         self.reset_parameters()
32 | 
33 |     def reset_parameters(self):
34 |         n = self.in_channels
35 |         for k in self.kernel_size:
36 |             n *= k
37 |         stdv = 1. / math.sqrt(n)
38 |         self.weight.data.uniform_(-stdv, stdv)
39 | 
40 |     def forward(self, input, offset):
41 |         return deform_conv_function(input, offset, self.weight, self.stride,
42 |                              self.padding, self.dilation,
43 |                              self.num_deformable_groups)
44 | 


--------------------------------------------------------------------------------
/lib/extensions/dcn/src/cuda/deform_psroi_pooling_cuda.h:
--------------------------------------------------------------------------------
 1 | /*!
 2 |  * Copyright (c) 2017 Microsoft
 3 |  * Licensed under The MIT License [see LICENSE for details]
 4 |  * \file deformable_psroi_pooling.cu
 5 |  * \brief
 6 |  * \author Yi Li, Guodong Zhang, Jifeng Dai
 7 | */
 8 | /***************** Adapted by Charles Shang *********************/
 9 | 
10 | #ifndef DCN_V2_PSROI_POOLING_CUDA
11 | #define DCN_V2_PSROI_POOLING_CUDA
12 | 
13 | #ifdef __cplusplus
14 | extern "C"
15 | {
16 | #endif
17 | 
18 |     void DeformablePSROIPoolForward(cudaStream_t stream,
19 |                                     const float *data,
20 |                                     const float *bbox,
21 |                                     const float *trans,
22 |                                     float *out,
23 |                                     float *top_count,
24 |                                     const int batch,
25 |                                     const int channels,
26 |                                     const int height,
27 |                                     const int width,
28 |                                     const int num_bbox,
29 |                                     const int channels_trans,
30 |                                     const int no_trans,
31 |                                     const float spatial_scale,
32 |                                     const int output_dim,
33 |                                     const int group_size,
34 |                                     const int pooled_size,
35 |                                     const int part_size,
36 |                                     const int sample_per_part,
37 |                                     const float trans_std);
38 | 
39 |     void DeformablePSROIPoolBackwardAcc(cudaStream_t stream,
40 |                                         const float *out_grad,
41 |                                         const float *data,
42 |                                         const float *bbox,
43 |                                         const float *trans,
44 |                                         const float *top_count,
45 |                                         float *in_grad,
46 |                                         float *trans_grad,
47 |                                         const int batch,
48 |                                         const int channels,
49 |                                         const int height,
50 |                                         const int width,
51 |                                         const int num_bbox,
52 |                                         const int channels_trans,
53 |                                         const int no_trans,
54 |                                         const float spatial_scale,
55 |                                         const int output_dim,
56 |                                         const int group_size,
57 |                                         const int pooled_size,
58 |                                         const int part_size,
59 |                                         const int sample_per_part,
60 |                                         const float trans_std);
61 | 
62 | #ifdef __cplusplus
63 | }
64 | #endif
65 | 
66 | #endif


--------------------------------------------------------------------------------
/lib/extensions/dcn/src/deform_conv.c:
--------------------------------------------------------------------------------
 1 | #include <TH/TH.h>
 2 | 
 3 | int deform_conv_forward(THFloatTensor *input, THFloatTensor *offset,
 4 |                         THFloatTensor *output)
 5 | {
 6 |   // if (!THFloatTensor_isSameSizeAs(input1, input2))
 7 |     // return 0;
 8 |   // THFloatTensor_resizeAs(output, input);
 9 |   // THFloatTensor_cadd(output, input1, 1.0, input2);
10 |   return 1;
11 | }
12 | 
13 | int deform_conv_backward(THFloatTensor *grad_output, THFloatTensor *grad_input,
14 |                          THFloatTensor *grad_offset)
15 | {
16 |   // THFloatTensor_resizeAs(grad_input, grad_output);
17 |   // THFloatTensor_fill(grad_input, 1);
18 |   return 1;
19 | }
20 | 


--------------------------------------------------------------------------------
/lib/extensions/dcn/src/deform_conv.h:
--------------------------------------------------------------------------------
1 | int deform_conv_forward(THFloatTensor *input, THFloatTensor *offset,
2 |                         THFloatTensor *output);
3 | int deform_conv_backward(THFloatTensor *grad_output, THFloatTensor *grad_input,
4 |                          THFloatTensor *grad_offset);
5 | 


--------------------------------------------------------------------------------
/lib/extensions/dcn/src/deform_conv_cuda.h:
--------------------------------------------------------------------------------
 1 | int deform_conv_forward_cuda(THCudaTensor *input,
 2 |                              THCudaTensor *weight, /*THCudaTensor * bias, */
 3 |                              THCudaTensor *offset, THCudaTensor *output,
 4 |                              THCudaTensor *columns, THCudaTensor *ones, int kW,
 5 |                              int kH, int dW, int dH, int padW, int padH,
 6 |                              int dilationW, int dilationH,
 7 |                              int deformable_group, int im2col_step);
 8 | 
 9 | int deform_conv_backward_input_cuda(
10 |     THCudaTensor *input, THCudaTensor *offset, THCudaTensor *gradOutput,
11 |     THCudaTensor *gradInput, THCudaTensor *gradOffset, THCudaTensor *weight,
12 |     THCudaTensor *columns, int kW, int kH, int dW, int dH, int padW, int padH,
13 |     int dilationW, int dilationH, int deformable_group, int im2col_step);
14 | 
15 | int deform_conv_backward_parameters_cuda(
16 |     THCudaTensor *input, THCudaTensor *offset, THCudaTensor *gradOutput,
17 |     THCudaTensor *gradWeight, /*THCudaTensor *gradBias, */
18 |     THCudaTensor *columns, THCudaTensor *ones, int kW, int kH, int dW, int dH,
19 |     int padW, int padH, int dilationW, int dilationH, int deformable_group,
20 |     float scale, int im2col_step);
21 | 


--------------------------------------------------------------------------------
/lib/extensions/dcn/src/deform_conv_cuda_kernel.h:
--------------------------------------------------------------------------------
 1 | template <typename DType>
 2 | void deformable_im2col(cudaStream_t stream, const DType *data_im,
 3 |                        const DType *data_offset, const int channels,
 4 |                        const int height, const int width, const int ksize_h,
 5 |                        const int ksize_w, const int pad_h, const int pad_w,
 6 |                        const int stride_h, const int stride_w,
 7 |                        const int dilation_h, const int dilation_w,
 8 |                        const int parallel_imgs,
 9 |                        const int deformable_group, DType *data_col);
10 | 
11 | template <typename DType>
12 | void deformable_col2im(cudaStream_t stream, const DType *data_col,
13 |                        const DType *data_offset, const int channels,
14 |                        const int height, const int width, const int ksize_h,
15 |                        const int ksize_w, const int pad_h, const int pad_w,
16 |                        const int stride_h, const int stride_w,
17 |                        const int dilation_h, const int dilation_w,
18 |                        const int parallel_imgs,
19 |                        const int deformable_group, DType *grad_im);
20 | 
21 | template <typename DType>
22 | void deformable_col2im_coord(cudaStream_t stream, const DType *data_col,
23 |                              const DType *data_im, const DType *data_offset,
24 |                              const int channels, const int height,
25 |                              const int width, const int ksize_h,
26 |                              const int ksize_w, const int pad_h,
27 |                              const int pad_w, const int stride_h,
28 |                              const int stride_w, const int dilation_h,
29 |                              const int dilation_w, const int parallel_imgs,
30 |                              const int deformable_group, DType *grad_offset);
31 | 


--------------------------------------------------------------------------------
/lib/extensions/dcn/src/modulated_dcn.c:
--------------------------------------------------------------------------------
 1 | #include <TH/TH.h>
 2 | #include <stdio.h>
 3 | #include <math.h>
 4 | 
 5 | void modulated_deform_conv_forward(THFloatTensor *input, THFloatTensor *weight,
 6 |                         THFloatTensor *bias, THFloatTensor *ones,
 7 |                         THFloatTensor *offset, THFloatTensor *mask,
 8 |                         THFloatTensor *output, THFloatTensor *columns,
 9 |                         const int pad_h, const int pad_w,
10 |                         const int stride_h, const int stride_w,
11 |                         const int dilation_h, const int dilation_w,
12 |                         const int deformable_group)
13 | {
14 |     printf("only implemented in GPU");
15 | }
16 |     void modulated_deform_conv_backward(THFloatTensor *input, THFloatTensor *weight,
17 |                          THFloatTensor *bias, THFloatTensor *ones,
18 |                          THFloatTensor *offset, THFloatTensor *mask,
19 |                          THFloatTensor *output, THFloatTensor *columns,
20 |                          THFloatTensor *grad_input, THFloatTensor *grad_weight,
21 |                          THFloatTensor *grad_bias, THFloatTensor *grad_offset,
22 |                          THFloatTensor *grad_mask, THFloatTensor *grad_output,
23 |                          int kernel_h, int kernel_w,
24 |                          int stride_h, int stride_w,
25 |                          int pad_h, int pad_w,
26 |                          int dilation_h, int dilation_w,
27 |                          int deformable_group)
28 | {
29 |     printf("only implemented in GPU");
30 | }


--------------------------------------------------------------------------------
/lib/extensions/dcn/src/modulated_dcn.h:
--------------------------------------------------------------------------------
 1 | void modulated_deform_conv_forward(THFloatTensor *input, THFloatTensor *weight,
 2 |                         THFloatTensor *bias, THFloatTensor *ones,
 3 |                         THFloatTensor *offset, THFloatTensor *mask,
 4 |                         THFloatTensor *output, THFloatTensor *columns,
 5 |                         const int pad_h, const int pad_w,
 6 |                         const int stride_h, const int stride_w,
 7 |                         const int dilation_h, const int dilation_w,
 8 |                         const int deformable_group);
 9 | void modulated_deform_conv_backward(THFloatTensor *input, THFloatTensor *weight,
10 |                         THFloatTensor *bias, THFloatTensor *ones,
11 |                         THFloatTensor *offset, THFloatTensor *mask,
12 |                         THFloatTensor *output, THFloatTensor *columns,
13 |                         THFloatTensor *grad_input, THFloatTensor *grad_weight,
14 |                         THFloatTensor *grad_bias, THFloatTensor *grad_offset,
15 |                         THFloatTensor *grad_mask, THFloatTensor *grad_output,
16 |                         int kernel_h, int kernel_w,
17 |                         int stride_h, int stride_w,
18 |                         int pad_h, int pad_w,
19 |                         int dilation_h, int dilation_w,
20 |                         int deformable_group);


--------------------------------------------------------------------------------
/lib/extensions/dcn/src/modulated_dcn_cuda.h:
--------------------------------------------------------------------------------
 1 | // #ifndef DCN_V2_CUDA
 2 | // #define DCN_V2_CUDA
 3 | 
 4 | // #ifdef __cplusplus
 5 | // extern "C"
 6 | // {
 7 | // #endif
 8 | 
 9 | void modulated_deform_conv_cuda_forward(THCudaTensor *input, THCudaTensor *weight,
10 |                          THCudaTensor *bias, THCudaTensor *ones,
11 |                          THCudaTensor *offset, THCudaTensor *mask,
12 |                          THCudaTensor *output, THCudaTensor *columns,
13 |                          int kernel_h, int kernel_w,
14 |                          const int stride_h, const int stride_w,
15 |                          const int pad_h, const int pad_w,
16 |                          const int dilation_h, const int dilation_w,
17 |                          const int deformable_group);
18 | void modulated_deform_conv_cuda_backward(THCudaTensor *input, THCudaTensor *weight,
19 |                           THCudaTensor *bias, THCudaTensor *ones,
20 |                           THCudaTensor *offset, THCudaTensor *mask,
21 |                           THCudaTensor *columns,
22 |                           THCudaTensor *grad_input, THCudaTensor *grad_weight,
23 |                           THCudaTensor *grad_bias, THCudaTensor *grad_offset,
24 |                           THCudaTensor *grad_mask, THCudaTensor *grad_output,
25 |                           int kernel_h, int kernel_w,
26 |                           int stride_h, int stride_w,
27 |                           int pad_h, int pad_w,
28 |                           int dilation_h, int dilation_w,
29 |                           int deformable_group);
30 | 
31 | void deform_psroi_pooling_cuda_forward(THCudaTensor * input, THCudaTensor * bbox,
32 |                                        THCudaTensor * trans, 
33 |                                        THCudaTensor * out, THCudaTensor * top_count,
34 |                                        const int no_trans,
35 |                                        const float spatial_scale,
36 |                                        const int output_dim,
37 |                                        const int group_size,
38 |                                        const int pooled_size,
39 |                                        const int part_size,
40 |                                        const int sample_per_part,
41 |                                        const float trans_std);
42 | 
43 | void deform_psroi_pooling_cuda_backward(THCudaTensor * out_grad,
44 |                                         THCudaTensor * input, THCudaTensor * bbox,
45 |                                         THCudaTensor * trans, THCudaTensor * top_count,
46 |                                         THCudaTensor * input_grad, THCudaTensor * trans_grad,
47 |                                         const int no_trans,
48 |                                         const float spatial_scale,
49 |                                         const int output_dim,
50 |                                         const int group_size,
51 |                                         const int pooled_size,
52 |                                         const int part_size,
53 |                                         const int sample_per_part,
54 |                                         const float trans_std);
55 | 


--------------------------------------------------------------------------------
/lib/extensions/dcn/test.py:
--------------------------------------------------------------------------------
 1 | import torch
 2 | import torch.nn as nn
 3 | import torch.nn.functional as F
 4 | from torch.autograd import Variable
 5 | 
 6 | from modules import DeformConv
 7 | 
 8 | num_deformable_groups = 2
 9 | 
10 | N, inC, inH, inW = 2, 6, 512, 512
11 | outC, outH, outW = 4, 512, 512
12 | kH, kW = 3, 3
13 | 
14 | conv = nn.Conv2d(
15 |     inC,
16 |     num_deformable_groups * 2 * kH * kW,
17 |     kernel_size=(kH, kW),
18 |     stride=(1, 1),
19 |     padding=(1, 1),
20 |     bias=False).cuda()
21 | 
22 | conv_offset2d = DeformConv(
23 |     inC,
24 |     outC, (kH, kW),
25 |     stride=1,
26 |     padding=1,
27 |     num_deformable_groups=num_deformable_groups).cuda()
28 | 
29 | inputs = Variable(torch.randn(N, inC, inH, inW).cuda(), requires_grad=True)
30 | offset = conv(inputs)
31 | #offset = Variable(torch.randn(N, num_deformable_groups * 2 * kH * kW, inH, inW).cuda(), requires_grad=True)
32 | output = conv_offset2d(inputs, offset)
33 | output.backward(output.data)
34 | print(output.size())
35 | 


--------------------------------------------------------------------------------
/lib/extensions/frn/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/openseg-group/openseg.pytorch/aefc75517b09068d7131a69420bc5f66cb41f0ee/lib/extensions/frn/__init__.py


--------------------------------------------------------------------------------
/lib/extensions/frn/frn.py:
--------------------------------------------------------------------------------
 1 | import torch
 2 | import torch.nn as nn
 3 | 
 4 | class FilterResponseNormalization(nn.Module):
 5 |     def __init__(self, beta, gamma, tau, eps=1e-6):
 6 |         """
 7 |         Input Variables:
 8 |         ----------------
 9 |             beta, gamma, tau: Variables of shape [1, C, 1, 1].
10 |             eps: A scalar constant or learnable variable.
11 |         """
12 | 
13 |         super(FilterResponseNormalization, self).__init__()
14 |         self.beta = beta
15 |         self.gamma = gamma
16 |         self.tau = tau
17 |         self.eps = torch.Tensor([eps])
18 | 
19 |     def forward(self, x):
20 |         """
21 |         Input Variables:
22 |         ----------------
23 |             x: Input tensor of shape [NxCxHxW]
24 |         """
25 | 
26 |         n, c, h, w = x.shape
27 |         assert (self.gamma.shape[1], self.beta.shape[1], self.tau.shape[1]) == (c, c, c)
28 | 
29 |         # Compute the mean norm of activations per channel
30 |         nu2 = torch.mean(x.pow(2), (2,3), keepdims=True)
31 |         # Perform FRN
32 |         x = x * torch.rsqrt(nu2 + torch.abs(self.eps))
33 |         # Return after applying the Offset-ReLU non-linearity
34 |         return torch.max(self.gamma*x + self.beta, self.tau)


--------------------------------------------------------------------------------
/lib/extensions/inplace_abn/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/openseg-group/openseg.pytorch/aefc75517b09068d7131a69420bc5f66cb41f0ee/lib/extensions/inplace_abn/__init__.py


--------------------------------------------------------------------------------
/lib/extensions/inplace_abn/src/common.h:
--------------------------------------------------------------------------------
  1 | #pragma once
  2 | 
  3 | #include <cuda_runtime_api.h>
  4 | 
  5 | /*
  6 |  * General settings
  7 |  */
  8 | const int WARP_SIZE = 32;
  9 | const int MAX_BLOCK_SIZE = 512;
 10 | 
 11 | template<typename T>
 12 | struct Pair {
 13 |   T v1, v2;
 14 |   __device__ Pair() {}
 15 |   __device__ Pair(T _v1, T _v2) : v1(_v1), v2(_v2) {}
 16 |   __device__ Pair(T v) : v1(v), v2(v) {}
 17 |   __device__ Pair(int v) : v1(v), v2(v) {}
 18 |   __device__ Pair &operator+=(const Pair<T> &a) {
 19 |     v1 += a.v1;
 20 |     v2 += a.v2;
 21 |     return *this;
 22 |   }
 23 | };
 24 | 
 25 | /*
 26 |  * Utility functions
 27 |  */
 28 | template <typename T>
 29 | __device__ __forceinline__ T WARP_SHFL_XOR(T value, int laneMask, int width = warpSize,
 30 |                                            unsigned int mask = 0xffffffff) {
 31 | #if CUDART_VERSION >= 9000
 32 |   return __shfl_xor_sync(mask, value, laneMask, width);
 33 | #else
 34 |   return __shfl_xor(value, laneMask, width);
 35 | #endif
 36 | }
 37 | 
 38 | __device__ __forceinline__ int getMSB(int val) { return 31 - __clz(val); }
 39 | 
 40 | static int getNumThreads(int nElem) {
 41 |   int threadSizes[5] = {32, 64, 128, 256, MAX_BLOCK_SIZE};
 42 |   for (int i = 0; i != 5; ++i) {
 43 |     if (nElem <= threadSizes[i]) {
 44 |       return threadSizes[i];
 45 |     }
 46 |   }
 47 |   return MAX_BLOCK_SIZE;
 48 | }
 49 | 
 50 | template<typename T>
 51 | static __device__ __forceinline__ T warpSum(T val) {
 52 | #if __CUDA_ARCH__ >= 300
 53 |   for (int i = 0; i < getMSB(WARP_SIZE); ++i) {
 54 |     val += WARP_SHFL_XOR(val, 1 << i, WARP_SIZE);
 55 |   }
 56 | #else
 57 |   __shared__ T values[MAX_BLOCK_SIZE];
 58 |   values[threadIdx.x] = val;
 59 |   __threadfence_block();
 60 |   const int base = (threadIdx.x / WARP_SIZE) * WARP_SIZE;
 61 |   for (int i = 1; i < WARP_SIZE; i++) {
 62 |     val += values[base + ((i + threadIdx.x) % WARP_SIZE)];
 63 |   }
 64 | #endif
 65 |   return val;
 66 | }
 67 | 
 68 | template<typename T>
 69 | static __device__ __forceinline__ Pair<T> warpSum(Pair<T> value) {
 70 |   value.v1 = warpSum(value.v1);
 71 |   value.v2 = warpSum(value.v2);
 72 |   return value;
 73 | }
 74 | 
 75 | template <typename T, typename Op>
 76 | __device__ T reduce(Op op, int plane, int N, int C, int S) {
 77 |   T sum = (T)0;
 78 |   for (int batch = 0; batch < N; ++batch) {
 79 |     for (int x = threadIdx.x; x < S; x += blockDim.x) {
 80 |       sum += op(batch, plane, x);
 81 |     }
 82 |   }
 83 | 
 84 |   // sum over NumThreads within a warp
 85 |   sum = warpSum(sum);
 86 | 
 87 |   // 'transpose', and reduce within warp again
 88 |   __shared__ T shared[32];
 89 |   __syncthreads();
 90 |   if (threadIdx.x % WARP_SIZE == 0) {
 91 |     shared[threadIdx.x / WARP_SIZE] = sum;
 92 |   }
 93 |   if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) {
 94 |     // zero out the other entries in shared
 95 |     shared[threadIdx.x] = (T)0;
 96 |   }
 97 |   __syncthreads();
 98 |   if (threadIdx.x / WARP_SIZE == 0) {
 99 |     sum = warpSum(shared[threadIdx.x]);
100 |     if (threadIdx.x == 0) {
101 |       shared[0] = sum;
102 |     }
103 |   }
104 |   __syncthreads();
105 | 
106 |   // Everyone picks it up, should be broadcast into the whole gradInput
107 |   return shared[0];
108 | }


--------------------------------------------------------------------------------
/lib/extensions/inplace_abn/src/inplace_abn.cpp:
--------------------------------------------------------------------------------
 1 | #include <torch/torch.h>
 2 | 
 3 | #include <vector>
 4 | 
 5 | #include "inplace_abn.h"
 6 | 
 7 | std::vector<at::Tensor> mean_var(at::Tensor x) {
 8 |   if (x.is_cuda()) {
 9 |     return mean_var_cuda(x);
10 |   } else {
11 |     return mean_var_cpu(x);
12 |   }
13 | }
14 | 
15 | at::Tensor forward(at::Tensor x, at::Tensor mean, at::Tensor var, at::Tensor weight, at::Tensor bias,
16 |                    bool affine, float eps) {
17 |   if (x.is_cuda()) {
18 |     return forward_cuda(x, mean, var, weight, bias, affine, eps);
19 |   } else {
20 |     return forward_cpu(x, mean, var, weight, bias, affine, eps);
21 |   }
22 | }
23 | 
24 | std::vector<at::Tensor> edz_eydz(at::Tensor z, at::Tensor dz, at::Tensor weight, at::Tensor bias,
25 |                                  bool affine, float eps) {
26 |   if (z.is_cuda()) {
27 |     return edz_eydz_cuda(z, dz, weight, bias, affine, eps);
28 |   } else {
29 |     return edz_eydz_cpu(z, dz, weight, bias, affine, eps);
30 |   }
31 | }
32 | 
33 | std::vector<at::Tensor> backward(at::Tensor z, at::Tensor dz, at::Tensor var, at::Tensor weight, at::Tensor bias,
34 |                                  at::Tensor edz, at::Tensor eydz, bool affine, float eps) {
35 |   if (z.is_cuda()) {
36 |     return backward_cuda(z, dz, var, weight, bias, edz, eydz, affine, eps);
37 |   } else {
38 |     return backward_cpu(z, dz, var, weight, bias, edz, eydz, affine, eps);
39 |   }
40 | }
41 | 
42 | void leaky_relu_forward(at::Tensor z, float slope) {
43 |   at::leaky_relu_(z, slope);
44 | }
45 | 
46 | void leaky_relu_backward(at::Tensor z, at::Tensor dz, float slope) {
47 |   if (z.is_cuda()) {
48 |     return leaky_relu_backward_cuda(z, dz, slope);
49 |   } else {
50 |     return leaky_relu_backward_cpu(z, dz, slope);
51 |   }
52 | }
53 | 
54 | void elu_forward(at::Tensor z) {
55 |   at::elu_(z);
56 | }
57 | 
58 | void elu_backward(at::Tensor z, at::Tensor dz) {
59 |   if (z.is_cuda()) {
60 |     return elu_backward_cuda(z, dz);
61 |   } else {
62 |     return elu_backward_cpu(z, dz);
63 |   }
64 | }
65 | 
66 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
67 |   m.def("mean_var", &mean_var, "Mean and variance computation");
68 |   m.def("forward", &forward, "In-place forward computation");
69 |   m.def("edz_eydz", &edz_eydz, "First part of backward computation");
70 |   m.def("backward", &backward, "Second part of backward computation");
71 |   m.def("leaky_relu_forward", &leaky_relu_forward, "Leaky relu forward computation");
72 |   m.def("leaky_relu_backward", &leaky_relu_backward, "Leaky relu backward computation and inversion");
73 |   m.def("elu_forward", &elu_forward, "Elu forward computation");
74 |   m.def("elu_backward", &elu_backward, "Elu backward computation and inversion");
75 | }


--------------------------------------------------------------------------------
/lib/extensions/inplace_abn/src/inplace_abn.h:
--------------------------------------------------------------------------------
 1 | #pragma once
 2 | 
 3 | #include <ATen/ATen.h>
 4 | 
 5 | #include <vector>
 6 | 
 7 | std::vector<at::Tensor> mean_var_cpu(at::Tensor x);
 8 | std::vector<at::Tensor> mean_var_cuda(at::Tensor x);
 9 | 
10 | at::Tensor forward_cpu(at::Tensor x, at::Tensor mean, at::Tensor var, at::Tensor weight, at::Tensor bias,
11 |                        bool affine, float eps);
12 | at::Tensor forward_cuda(at::Tensor x, at::Tensor mean, at::Tensor var, at::Tensor weight, at::Tensor bias,
13 |                         bool affine, float eps);
14 | 
15 | std::vector<at::Tensor> edz_eydz_cpu(at::Tensor z, at::Tensor dz, at::Tensor weight, at::Tensor bias,
16 |                                      bool affine, float eps);
17 | std::vector<at::Tensor> edz_eydz_cuda(at::Tensor z, at::Tensor dz, at::Tensor weight, at::Tensor bias,
18 |                                       bool affine, float eps);
19 | 
20 | std::vector<at::Tensor> backward_cpu(at::Tensor z, at::Tensor dz, at::Tensor var, at::Tensor weight, at::Tensor bias,
21 |                                      at::Tensor edz, at::Tensor eydz, bool affine, float eps);
22 | std::vector<at::Tensor> backward_cuda(at::Tensor z, at::Tensor dz, at::Tensor var, at::Tensor weight, at::Tensor bias,
23 |                                       at::Tensor edz, at::Tensor eydz, bool affine, float eps);
24 | 
25 | void leaky_relu_backward_cpu(at::Tensor z, at::Tensor dz, float slope);
26 | void leaky_relu_backward_cuda(at::Tensor z, at::Tensor dz, float slope);
27 | 
28 | void elu_backward_cpu(at::Tensor z, at::Tensor dz);
29 | void elu_backward_cuda(at::Tensor z, at::Tensor dz);


--------------------------------------------------------------------------------
/lib/extensions/inplace_abn_1/__init__.py:
--------------------------------------------------------------------------------
1 | from .bn import ABN, InPlaceABN, InPlaceABNSync
2 | from .functions import ACT_RELU, ACT_LEAKY_RELU, ACT_ELU, ACT_NONE
3 | 


--------------------------------------------------------------------------------
/lib/extensions/inplace_abn_1/misc.py:
--------------------------------------------------------------------------------
 1 | import torch.nn as nn
 2 | import torch
 3 | import torch.distributed as dist
 4 | 
 5 | class GlobalAvgPool2d(nn.Module):
 6 |     def __init__(self):
 7 |         """Global average pooling over the input's spatial dimensions"""
 8 |         super(GlobalAvgPool2d, self).__init__()
 9 | 
10 |     def forward(self, inputs):
11 |         in_size = inputs.size()
12 |         return inputs.view((in_size[0], in_size[1], -1)).mean(dim=2)
13 | 
14 | class SingleGPU(nn.Module):
15 |     def __init__(self, module):
16 |         super(SingleGPU, self).__init__()
17 |         self.module=module
18 | 
19 |     def forward(self, input):
20 |         return self.module(input.cuda(non_blocking=True))
21 | 
22 | 


--------------------------------------------------------------------------------
/lib/extensions/inplace_abn_1/src/checks.h:
--------------------------------------------------------------------------------
 1 | #pragma once
 2 | 
 3 | #include <ATen/ATen.h>
 4 | 
 5 | // Define AT_CHECK for old version of ATen where the same function was called AT_ASSERT
 6 | #ifndef AT_CHECK
 7 | #define AT_CHECK AT_ASSERT
 8 | #endif
 9 | 
10 | #define CHECK_CUDA(x) AT_CHECK((x).type().is_cuda(), #x " must be a CUDA tensor")
11 | #define CHECK_CPU(x) AT_CHECK(!(x).type().is_cuda(), #x " must be a CPU tensor")
12 | #define CHECK_CONTIGUOUS(x) AT_CHECK((x).is_contiguous(), #x " must be contiguous")
13 | 
14 | #define CHECK_CUDA_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
15 | #define CHECK_CPU_INPUT(x) CHECK_CPU(x); CHECK_CONTIGUOUS(x)


--------------------------------------------------------------------------------
/lib/extensions/inplace_abn_1/src/inplace_abn.cpp:
--------------------------------------------------------------------------------
 1 | #include <torch/extension.h>
 2 | 
 3 | #include <vector>
 4 | 
 5 | #include "inplace_abn.h"
 6 | 
 7 | std::vector<at::Tensor> mean_var(at::Tensor x) {
 8 |   if (x.is_cuda()) {
 9 |     if (x.type().scalarType() == at::ScalarType::Half) {
10 |       return mean_var_cuda_h(x);
11 |     } else {
12 |       return mean_var_cuda(x);
13 |     }
14 |   } else {
15 |     return mean_var_cpu(x);
16 |   }
17 | }
18 | 
19 | at::Tensor forward(at::Tensor x, at::Tensor mean, at::Tensor var, at::Tensor weight, at::Tensor bias,
20 |                    bool affine, float eps) {
21 |   if (x.is_cuda()) {
22 |     if (x.type().scalarType() == at::ScalarType::Half) {
23 |       return forward_cuda_h(x, mean, var, weight, bias, affine, eps);
24 |     } else {
25 |       return forward_cuda(x, mean, var, weight, bias, affine, eps);
26 |     }
27 |   } else {
28 |     return forward_cpu(x, mean, var, weight, bias, affine, eps);
29 |   }
30 | }
31 | 
32 | std::vector<at::Tensor> edz_eydz(at::Tensor z, at::Tensor dz, at::Tensor weight, at::Tensor bias,
33 |                                  bool affine, float eps) {
34 |   if (z.is_cuda()) {
35 |     if (z.type().scalarType() == at::ScalarType::Half) {
36 |       return edz_eydz_cuda_h(z, dz, weight, bias, affine, eps);
37 |     } else {
38 |       return edz_eydz_cuda(z, dz, weight, bias, affine, eps);
39 | 	}
40 |   } else {
41 |     return edz_eydz_cpu(z, dz, weight, bias, affine, eps);
42 |   }
43 | }
44 | 
45 | at::Tensor backward(at::Tensor z, at::Tensor dz, at::Tensor var, at::Tensor weight, at::Tensor bias,
46 |                                  at::Tensor edz, at::Tensor eydz, bool affine, float eps) {
47 |   if (z.is_cuda()) {
48 |     if (z.type().scalarType() == at::ScalarType::Half) {
49 |       return backward_cuda_h(z, dz, var, weight, bias, edz, eydz, affine, eps);
50 | 	} else {
51 |       return backward_cuda(z, dz, var, weight, bias, edz, eydz, affine, eps);
52 |     }
53 |   } else {
54 |     return backward_cpu(z, dz, var, weight, bias, edz, eydz, affine, eps);
55 |   }
56 | }
57 | 
58 | void leaky_relu_forward(at::Tensor z, float slope) {
59 |   at::leaky_relu_(z, slope);
60 | }
61 | 
62 | void leaky_relu_backward(at::Tensor z, at::Tensor dz, float slope) {
63 |   if (z.is_cuda()) {
64 |     if (z.type().scalarType() == at::ScalarType::Half) {
65 |       return leaky_relu_backward_cuda_h(z, dz, slope);
66 | 	} else {
67 |       return leaky_relu_backward_cuda(z, dz, slope);
68 |     }
69 |   } else {
70 |     return leaky_relu_backward_cpu(z, dz, slope);
71 |   }
72 | }
73 | 
74 | void elu_forward(at::Tensor z) {
75 |   at::elu_(z);
76 | }
77 | 
78 | void elu_backward(at::Tensor z, at::Tensor dz) {
79 |   if (z.is_cuda()) {
80 |     return elu_backward_cuda(z, dz);
81 |   } else {
82 |     return elu_backward_cpu(z, dz);
83 |   }
84 | }
85 | 
86 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
87 |   m.def("mean_var", &mean_var, "Mean and variance computation");
88 |   m.def("forward", &forward, "In-place forward computation");
89 |   m.def("edz_eydz", &edz_eydz, "First part of backward computation");
90 |   m.def("backward", &backward, "Second part of backward computation");
91 |   m.def("leaky_relu_forward", &leaky_relu_forward, "Leaky relu forward computation");
92 |   m.def("leaky_relu_backward", &leaky_relu_backward, "Leaky relu backward computation and inversion");
93 |   m.def("elu_forward", &elu_forward, "Elu forward computation");
94 |   m.def("elu_backward", &elu_backward, "Elu backward computation and inversion");
95 | }
96 | 


--------------------------------------------------------------------------------
/lib/extensions/inplace_abn_1/src/utils/checks.h:
--------------------------------------------------------------------------------
 1 | #pragma once
 2 | 
 3 | #include <ATen/ATen.h>
 4 | 
 5 | // Define AT_CHECK for old version of ATen where the same function was called AT_ASSERT
 6 | #ifndef AT_CHECK
 7 | #define AT_CHECK AT_ASSERT
 8 | #endif
 9 | 
10 | #define CHECK_CUDA(x) AT_CHECK((x).type().is_cuda(), #x " must be a CUDA tensor")
11 | #define CHECK_CPU(x) AT_CHECK(!(x).type().is_cuda(), #x " must be a CPU tensor")
12 | #define CHECK_CONTIGUOUS(x) AT_CHECK((x).is_contiguous(), #x " must be contiguous")
13 | 
14 | #define CHECK_CUDA_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
15 | #define CHECK_CPU_INPUT(x) CHECK_CPU(x); CHECK_CONTIGUOUS(x)


--------------------------------------------------------------------------------
/lib/extensions/inplace_abn_1/src/utils/common.h:
--------------------------------------------------------------------------------
 1 | #pragma once
 2 | 
 3 | #include <ATen/ATen.h>
 4 | 
 5 | /*
 6 |  * Functions to share code between CPU and GPU
 7 |  */
 8 | 
 9 | #ifdef __CUDACC__
10 | // CUDA versions
11 | 
12 | #define HOST_DEVICE __host__ __device__
13 | #define INLINE_HOST_DEVICE __host__ __device__ inline
14 | #define FLOOR(x) floor(x)
15 | 
16 | #if __CUDA_ARCH__ >= 600
17 | // Recent compute capabilities have block-level atomicAdd for all data types, so we use that
18 | #define ACCUM(x,y) atomicAdd_block(&(x),(y))
19 | #else
20 | // Older architectures don't have block-level atomicAdd, nor atomicAdd for doubles, so we defer to atomicAdd for float
21 | // and use the known atomicCAS-based implementation for double
22 | template<typename data_t>
23 | __device__ inline data_t atomic_add(data_t *address, data_t val) {
24 |   return atomicAdd(address, val);
25 | }
26 | 
27 | template<>
28 | __device__ inline double atomic_add(double *address, double val) {
29 |   unsigned long long int* address_as_ull = (unsigned long long int*)address;
30 |   unsigned long long int old = *address_as_ull, assumed;
31 |   do {
32 |     assumed = old;
33 |     old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed)));
34 |   } while (assumed != old);
35 |   return __longlong_as_double(old);
36 | }
37 | 
38 | #define ACCUM(x,y) atomic_add(&(x),(y))
39 | #endif // #if __CUDA_ARCH__ >= 600
40 | 
41 | #else
42 | // CPU versions
43 | 
44 | #define HOST_DEVICE
45 | #define INLINE_HOST_DEVICE inline
46 | #define FLOOR(x) std::floor(x)
47 | #define ACCUM(x,y) (x) += (y)
48 | 
49 | #endif // #ifdef __CUDACC__


--------------------------------------------------------------------------------
/lib/extensions/inplace_abn_1/src/utils/cuda.cuh:
--------------------------------------------------------------------------------
 1 | #pragma once
 2 | 
 3 | /*
 4 |  * General settings and functions
 5 |  */
 6 | const int WARP_SIZE = 32;
 7 | const int MAX_BLOCK_SIZE = 1024;
 8 | 
 9 | static int getNumThreads(int nElem) {
10 |   int threadSizes[6] = {32, 64, 128, 256, 512, MAX_BLOCK_SIZE};
11 |   for (int i = 0; i < 6; ++i) {
12 |     if (nElem <= threadSizes[i]) {
13 |       return threadSizes[i];
14 |     }
15 |   }
16 |   return MAX_BLOCK_SIZE;
17 | }
18 | 
19 | /*
20 |  * Reduction utilities
21 |  */
22 | template <typename T>
23 | __device__ __forceinline__ T WARP_SHFL_XOR(T value, int laneMask, int width = warpSize,
24 |                                            unsigned int mask = 0xffffffff) {
25 | #if CUDART_VERSION >= 9000
26 |   return __shfl_xor_sync(mask, value, laneMask, width);
27 | #else
28 |   return __shfl_xor(value, laneMask, width);
29 | #endif
30 | }
31 | 
32 | __device__ __forceinline__ int getMSB(int val) { return 31 - __clz(val); }
33 | 
34 | template<typename T>
35 | struct Pair {
36 |   T v1, v2;
37 |   __device__ Pair() {}
38 |   __device__ Pair(T _v1, T _v2) : v1(_v1), v2(_v2) {}
39 |   __device__ Pair(T v) : v1(v), v2(v) {}
40 |   __device__ Pair(int v) : v1(v), v2(v) {}
41 |   __device__ Pair &operator+=(const Pair<T> &a) {
42 |     v1 += a.v1;
43 |     v2 += a.v2;
44 |     return *this;
45 |   }
46 | };
47 | 
48 | template<typename T>
49 | static __device__ __forceinline__ T warpSum(T val) {
50 | #if __CUDA_ARCH__ >= 300
51 |   for (int i = 0; i < getMSB(WARP_SIZE); ++i) {
52 |     val += WARP_SHFL_XOR(val, 1 << i, WARP_SIZE);
53 |   }
54 | #else
55 |   __shared__ T values[MAX_BLOCK_SIZE];
56 |   values[threadIdx.x] = val;
57 |   __threadfence_block();
58 |   const int base = (threadIdx.x / WARP_SIZE) * WARP_SIZE;
59 |   for (int i = 1; i < WARP_SIZE; i++) {
60 |     val += values[base + ((i + threadIdx.x) % WARP_SIZE)];
61 |   }
62 | #endif
63 |   return val;
64 | }
65 | 
66 | template<typename T>
67 | static __device__ __forceinline__ Pair<T> warpSum(Pair<T> value) {
68 |   value.v1 = warpSum(value.v1);
69 |   value.v2 = warpSum(value.v2);
70 |   return value;
71 | }


--------------------------------------------------------------------------------
/lib/extensions/pacnet/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/openseg-group/openseg.pytorch/aefc75517b09068d7131a69420bc5f66cb41f0ee/lib/extensions/pacnet/__init__.py


--------------------------------------------------------------------------------
/lib/extensions/parallel/__init__.py:
--------------------------------------------------------------------------------
1 | from .data_container import DataContainer
2 | from .distributed import MMDistributedDataParallel
3 | from .scatter_gather import scatter, scatter_kwargs
4 | 
5 | __all__ = [
6 |     'collate', 'DataContainer', 'MMDataParallel', 'MMDistributedDataParallel',
7 |     'scatter', 'scatter_kwargs'
8 | ]
9 | 


--------------------------------------------------------------------------------
/lib/extensions/parallel/_functions.py:
--------------------------------------------------------------------------------
 1 | import torch
 2 | from torch.nn.parallel._functions import _get_stream
 3 | 
 4 | 
 5 | def scatter(input, devices, streams=None):
 6 |     """Scatters tensor across multiple GPUs.
 7 |     """
 8 |     if streams is None:
 9 |         streams = [None] * len(devices)
10 | 
11 |     if isinstance(input, list):
12 |         chunk_size = (len(input) - 1) // len(devices) + 1
13 |         outputs = [
14 |             scatter(input[i], [devices[i // chunk_size]],
15 |                     [streams[i // chunk_size]]) for i in range(len(input))
16 |         ]
17 |         return outputs
18 |     elif isinstance(input, torch.Tensor):
19 |         output = input.contiguous()
20 |         # TODO: copy to a pinned buffer first (if copying from CPU)
21 |         stream = streams[0] if output.numel() > 0 else None
22 |         with torch.cuda.device(devices[0]), torch.cuda.stream(stream):
23 |             output = output.cuda(devices[0], non_blocking=True)
24 |         return output
25 |     else:
26 |         raise Exception('Unknown type {}.'.format(type(input)))
27 | 
28 | 
29 | def synchronize_stream(output, devices, streams):
30 |     if isinstance(output, list):
31 |         chunk_size = len(output) // len(devices)
32 |         for i in range(len(devices)):
33 |             for j in range(chunk_size):
34 |                 synchronize_stream(output[i * chunk_size + j], [devices[i]],
35 |                                    [streams[i]])
36 |     elif isinstance(output, torch.Tensor):
37 |         if output.numel() != 0:
38 |             with torch.cuda.device(devices[0]):
39 |                 main_stream = torch.cuda.current_stream()
40 |                 main_stream.wait_stream(streams[0])
41 |                 output.record_stream(main_stream)
42 |     else:
43 |         raise Exception('Unknown type {}.'.format(type(output)))
44 | 
45 | 
46 | def get_input_device(input):
47 |     if isinstance(input, list):
48 |         for item in input:
49 |             input_device = get_input_device(item)
50 |             if input_device != -1:
51 |                 return input_device
52 |         return -1
53 |     elif isinstance(input, torch.Tensor):
54 |         return input.get_device() if input.is_cuda else -1
55 |     else:
56 |         raise Exception('Unknown type {}.'.format(type(input)))
57 | 
58 | 
59 | class Scatter(object):
60 | 
61 |     @staticmethod
62 |     def forward(target_gpus, input):
63 |         input_device = get_input_device(input)
64 |         streams = None
65 |         if input_device == -1:
66 |             # Perform CPU to GPU copies in a background stream
67 |             streams = [_get_stream(device) for device in target_gpus]
68 | 
69 |         outputs = scatter(input, target_gpus, streams)
70 |         # Synchronize with the copy stream
71 |         if streams is not None:
72 |             synchronize_stream(outputs, target_gpus, streams)
73 | 
74 |         return tuple(outputs)
75 | 


--------------------------------------------------------------------------------
/lib/extensions/parallel/data_container.py:
--------------------------------------------------------------------------------
 1 | import functools
 2 | 
 3 | import torch
 4 | 
 5 | 
 6 | def assert_tensor_type(func):
 7 | 
 8 |     @functools.wraps(func)
 9 |     def wrapper(*args, **kwargs):
10 |         if not isinstance(args[0].data, torch.Tensor):
11 |             raise AttributeError('{} has no attribute {} for type {}'.format(
12 |                 args[0].__class__.__name__, func.__name__, args[0].datatype))
13 |         return func(*args, **kwargs)
14 | 
15 |     return wrapper
16 | 
17 | 
18 | class DataContainer(object):
19 |     """A container for any type of objects.
20 | 
21 |     Typically tensors will be stacked in the collate function and sliced along
22 |     some dimension in the scatter function. This behavior has some limitations.
23 |     1. All tensors have to be the same size.
24 |     2. Types are limited (numpy array or Tensor).
25 | 
26 |     We design `DataContainer` and `MMDataParallel` to overcome these
27 |     limitations. The behavior can be either of the following.
28 | 
29 |     - copy to GPU, pad all tensors to the same size and stack them
30 |     - copy to GPU without stacking
31 |     - leave the objects as is and pass it to the model
32 |     """
33 | 
34 |     def __init__(self, data, stack=False, padding_value=0, cpu_only=False):
35 |         self._data = data
36 |         self._cpu_only = cpu_only
37 |         self._stack = stack
38 |         self._padding_value = padding_value
39 | 
40 |     def __repr__(self):
41 |         return '{}({})'.format(self.__class__.__name__, repr(self.data))
42 | 
43 |     @property
44 |     def data(self):
45 |         return self._data
46 | 
47 |     @property
48 |     def datatype(self):
49 |         if isinstance(self.data, torch.Tensor):
50 |             return self.data.type()
51 |         else:
52 |             return type(self.data)
53 | 
54 |     @property
55 |     def cpu_only(self):
56 |         return self._cpu_only
57 | 
58 |     @property
59 |     def stack(self):
60 |         return self._stack
61 | 
62 |     @property
63 |     def padding_value(self):
64 |         return self._padding_value
65 | 
66 |     @assert_tensor_type
67 |     def size(self, *args, **kwargs):
68 |         return self.data.size(*args, **kwargs)
69 | 
70 |     @assert_tensor_type
71 |     def dim(self):
72 |         return self.data.dim()
73 | 
74 |     @assert_tensor_type
75 |     def numel(self):
76 |         return self.data.numel()
77 | 


--------------------------------------------------------------------------------
/lib/extensions/parallel/distributed.py:
--------------------------------------------------------------------------------
 1 | import torch
 2 | import torch.distributed as dist
 3 | import torch.nn as nn
 4 | from torch._utils import (_flatten_dense_tensors, _unflatten_dense_tensors,
 5 |                           _take_tensors)
 6 | 
 7 | from .scatter_gather import scatter_kwargs
 8 | 
 9 | 
10 | class MMDistributedDataParallel(nn.Module):
11 | 
12 |     def __init__(self, module, dim=0, broadcast_buffers=True,
13 |                  bucket_cap_mb=25):
14 |         super(MMDistributedDataParallel, self).__init__()
15 |         self.module = module
16 |         self.dim = dim
17 |         self.broadcast_buffers = broadcast_buffers
18 | 
19 |         self.broadcast_bucket_size = bucket_cap_mb * 1024 * 1024
20 |         self._sync_params()
21 | 
22 |     def _dist_broadcast_coalesced(self, tensors, buffer_size):
23 |         for tensors in _take_tensors(tensors, buffer_size):
24 |             flat_tensors = _flatten_dense_tensors(tensors)
25 |             dist.broadcast(flat_tensors, 0)
26 |             for tensor, synced in zip(
27 |                     tensors, _unflatten_dense_tensors(flat_tensors, tensors)):
28 |                 tensor.copy_(synced)
29 | 
30 |     def _sync_params(self):
31 |         module_states = list(self.module.state_dict().values())
32 |         if len(module_states) > 0:
33 |             self._dist_broadcast_coalesced(module_states,
34 |                                            self.broadcast_bucket_size)
35 |         if self.broadcast_buffers:
36 |             buffers = [b.data for b in self.module._all_buffers()]
37 |             if len(buffers) > 0:
38 |                 self._dist_broadcast_coalesced(buffers,
39 |                                                self.broadcast_bucket_size)
40 | 
41 |     def scatter(self, inputs, kwargs, device_ids):
42 |         return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
43 | 
44 |     def forward(self, *inputs, **kwargs):
45 |         inputs, kwargs = self.scatter(inputs, kwargs,
46 |                                       [torch.cuda.current_device()])
47 |         return self.module(*inputs[0], **kwargs[0])
48 | 


--------------------------------------------------------------------------------
/lib/extensions/parallel/scatter_gather.py:
--------------------------------------------------------------------------------
 1 | import torch
 2 | from torch.nn.parallel._functions import Scatter as OrigScatter
 3 | 
 4 | from lib.extensions.parallel.data_container import DataContainer
 5 | from lib.extensions.parallel._functions import Scatter
 6 | 
 7 | 
 8 | def scatter(inputs, target_gpus, dim=0):
 9 |     """Scatter inputs to target gpus.
10 | 
11 |     The only difference from original :func:`scatter` is to add support for
12 |     :type:`~mmcv.parallel.DataContainer`.
13 |     """
14 | 
15 |     def scatter_map(obj):
16 |         if isinstance(obj, torch.Tensor):
17 |             return OrigScatter.apply(target_gpus, None, dim, obj)
18 |         if isinstance(obj, DataContainer):
19 |             if obj.cpu_only:
20 |                 return obj.data
21 |             else:
22 |                 return Scatter.forward(target_gpus, obj.data)
23 |         if isinstance(obj, tuple) and len(obj) > 0:
24 |             return list(zip(*map(scatter_map, obj)))
25 |         if isinstance(obj, list) and len(obj) > 0:
26 |             out = list(map(list, zip(*map(scatter_map, obj))))
27 |             return out
28 |         if isinstance(obj, dict) and len(obj) > 0:
29 |             out = list(map(type(obj), zip(*map(scatter_map, obj.items()))))
30 |             return out
31 | 
32 |         return [obj for targets in target_gpus]
33 | 
34 |     # After scatter_map is called, a scatter_map cell will exist. This cell
35 |     # has a reference to the actual function scatter_map, which has references
36 |     # to a closure that has a reference to the scatter_map cell (because the
37 |     # fn is recursive). To avoid this reference cycle, we set the function to
38 |     # None, clearing the cell
39 |     try:
40 |         return scatter_map(inputs)
41 |     finally:
42 |         scatter_map = None
43 | 
44 | 
45 | def scatter_kwargs(inputs, kwargs, target_gpus, dim=0):
46 |     """Scatter with support for kwargs dictionary"""
47 |     inputs = scatter(inputs, target_gpus, dim) if inputs else []
48 |     kwargs = scatter(kwargs, target_gpus, dim) if kwargs else []
49 |     if len(inputs) < len(kwargs):
50 |         inputs.extend([() for _ in range(len(kwargs) - len(inputs))])
51 |     elif len(kwargs) < len(inputs):
52 |         kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])
53 |     inputs = tuple(inputs)
54 |     kwargs = tuple(kwargs)
55 |     return inputs, kwargs
56 | 


--------------------------------------------------------------------------------
/lib/extensions/switchablenorms/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/openseg-group/openseg.pytorch/aefc75517b09068d7131a69420bc5f66cb41f0ee/lib/extensions/switchablenorms/__init__.py


--------------------------------------------------------------------------------
/lib/extensions/syncbn/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/openseg-group/openseg.pytorch/aefc75517b09068d7131a69420bc5f66cb41f0ee/lib/extensions/syncbn/__init__.py


--------------------------------------------------------------------------------
/lib/extensions/syncbn/allreduce.py:
--------------------------------------------------------------------------------
 1 | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 2 | ## Created by: Hang Zhang
 3 | ## ECE Department, Rutgers University
 4 | ## Email: zhang.hang@rutgers.edu
 5 | ## Copyright (c) 2017
 6 | ##
 7 | ## This source code is licensed under the MIT-style license found in the
 8 | ## LICENSE file in the root directory of this source tree
 9 | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
10 | 
11 | """Encoding Data Parallel"""
12 | import torch
13 | from torch.autograd import Variable, Function
14 | import torch.cuda.comm as comm
15 | 
16 | 
17 | torch_ver = torch.__version__[:3]
18 | 
19 | 
20 | def allreduce(*inputs):
21 |     """Cross GPU all reduce autograd operation for calculate mean and
22 |     variance in SyncBN.
23 |     """
24 |     return AllReduce.apply(*inputs)
25 | 
26 | 
27 | class AllReduce(Function):
28 |     @staticmethod
29 |     def forward(ctx, num_inputs, *inputs):
30 |         ctx.num_inputs = num_inputs
31 |         ctx.target_gpus = [inputs[i].get_device() for i in range(0, len(inputs), num_inputs)]
32 |         inputs = [inputs[i:i + num_inputs]
33 |                  for i in range(0, len(inputs), num_inputs)]
34 |         # sort before reduce sum
35 |         inputs = sorted(inputs, key=lambda i: i[0].get_device())
36 |         results = comm.reduce_add_coalesced(inputs, ctx.target_gpus[0])
37 |         outputs = comm.broadcast_coalesced(results, ctx.target_gpus)
38 |         return tuple([t for tensors in outputs for t in tensors])
39 | 
40 |     @staticmethod
41 |     def backward(ctx, *inputs):
42 |         inputs = [i.data for i in inputs]
43 |         inputs = [inputs[i:i + ctx.num_inputs]
44 |                  for i in range(0, len(inputs), ctx.num_inputs)]
45 |         results = comm.reduce_add_coalesced(inputs, ctx.target_gpus[0])
46 |         outputs = comm.broadcast_coalesced(results, ctx.target_gpus)
47 |         return (None,) + tuple([Variable(t) for tensors in outputs for t in tensors])
48 | 


--------------------------------------------------------------------------------
/lib/extensions/syncbn/src/common.h:
--------------------------------------------------------------------------------
 1 | #include <cuda.h>
 2 | #include <cuda_runtime.h>
 3 | 
 4 | static const unsigned WARP_SIZE = 32;
 5 | 
 6 | // The maximum number of threads in a block
 7 | static const unsigned MAX_BLOCK_SIZE = 512U;
 8 | 
 9 | template<typename In, typename Out>
10 | struct ScalarConvert {
11 |   static __host__ __device__ __forceinline__ Out to(const In v) { return (Out) v; }
12 | };
13 | 
14 | // Number of threads in a block given an input size up to MAX_BLOCK_SIZE
15 | static int getNumThreads(int nElem) {
16 |   int threadSizes[5] = { 32, 64, 128, 256, MAX_BLOCK_SIZE };
17 |   for (int i = 0; i != 5; ++i) {
18 |     if (nElem <= threadSizes[i]) {
19 |       return threadSizes[i];
20 |     }
21 |   }
22 |   return MAX_BLOCK_SIZE;
23 | }
24 | 
25 | // Returns the index of the most significant 1 bit in `val`.
26 | __device__ __forceinline__ int getMSB(int val) {
27 |   return 31 - __clz(val);
28 | }
29 | 
30 | template <typename T>
31 | __device__ __forceinline__ T WARP_SHFL_XOR(T value, int laneMask, int width = warpSize, unsigned int mask = 0xffffffff)
32 | {
33 | #if CUDA_VERSION >= 9000
34 |     return __shfl_xor_sync(mask, value, laneMask, width);
35 | #else
36 |     return __shfl_xor(value, laneMask, width);
37 | #endif
38 | }
39 | 
40 | // Sum across all threads within a warp
41 | template <typename T>
42 | static __device__ __forceinline__ T warpSum(T val) {
43 | #if __CUDA_ARCH__ >= 300
44 |   for (int i = 0; i < getMSB(WARP_SIZE); ++i) {
45 |     val += WARP_SHFL_XOR(val, 1 << i, WARP_SIZE);
46 |   }
47 | #else
48 |   __shared__ T values[MAX_BLOCK_SIZE];
49 |   values[threadIdx.x] = val;
50 |   __threadfence_block();
51 |   const int base = (threadIdx.x / WARP_SIZE) * WARP_SIZE;
52 |   for (int i = 1; i < WARP_SIZE; i++) {
53 |     val += values[base + ((i + threadIdx.x) % WARP_SIZE)];
54 |   }
55 | #endif
56 |   return val;
57 | }
58 | 
59 | template <typename DType, typename Acctype>
60 | struct Float2 {
61 |   Acctype v1, v2;
62 |   __device__ Float2() {}
63 |   __device__ Float2(DType v1, DType v2) : v1(ScalarConvert<DType, Acctype>::to(v1)), v2(ScalarConvert<DType, Acctype>::to(v2)) {}
64 |   __device__ Float2(DType v) : v1(ScalarConvert<DType, Acctype>::to(v)), v2(ScalarConvert<DType, Acctype>::to(v)) {}
65 |   __device__ Float2(int v) : v1(ScalarConvert<int, Acctype>::to(v)), v2(ScalarConvert<int, Acctype>::to(v)) {}
66 |   __device__ Float2& operator+=(const Float2& a) {
67 |     v1 += a.v1;
68 |     v2 += a.v2;
69 |     return *this;
70 |   }
71 | };
72 | 
73 | template <typename DType, typename Acctype>
74 | static __device__ __forceinline__ Float2<DType, Acctype> warpSum(Float2<DType, Acctype> value) {
75 |   value.v1 = warpSum(value.v1);
76 |   value.v2 = warpSum(value.v2);
77 |   return value;
78 | }
79 | 
80 | 


--------------------------------------------------------------------------------
/lib/extensions/syncbn/src/device_tensor.h:
--------------------------------------------------------------------------------
  1 | #include <ATen/ATen.h>
  2 | 
  3 | template<typename DType, int Dim>
  4 | struct DeviceTensor {
  5 |  public:
  6 |   inline __device__ __host__ DeviceTensor(DType *p, const int *size)
  7 |     : dptr_(p) {
  8 |     for (int i = 0; i < Dim; ++i) {
  9 |       size_[i] = size ? size[i] : 0;
 10 |     }
 11 |   }
 12 | 
 13 |   inline __device__ __host__ unsigned getSize(const int i) const {
 14 |     assert(i < Dim);
 15 |     return size_[i];
 16 |   }
 17 | 
 18 |   inline __device__ __host__ int numElements() const {
 19 |     int n = 1;
 20 |     for (int i = 0; i < Dim; ++i) {
 21 |       n *= size_[i];
 22 |     }
 23 |     return n;
 24 |   }
 25 | 
 26 |   inline __device__ __host__ DeviceTensor<DType, Dim-1> select(const size_t x) const {
 27 |     assert(Dim > 1);
 28 |     int offset = x;
 29 |     for (int i = 1; i < Dim; ++i) {
 30 |       offset *= size_[i];
 31 |     }
 32 |     DeviceTensor<DType, Dim-1> tensor(dptr_ + offset, nullptr);
 33 |     for (int i = 0; i < Dim - 1; ++i) {
 34 |       tensor.size_[i] = this->size_[i+1];
 35 |     }
 36 |     return tensor;
 37 |   }
 38 | 
 39 |   inline __device__ __host__ DeviceTensor<DType, Dim-1> operator[](const size_t x) const {
 40 |     assert(Dim > 1);
 41 |     int offset = x;
 42 |     for (int i = 1; i < Dim; ++i) {
 43 |       offset *= size_[i];
 44 |     }
 45 |     DeviceTensor<DType, Dim-1> tensor(dptr_ + offset, nullptr);
 46 |     for (int i = 0; i < Dim - 1; ++i) {
 47 |       tensor.size_[i] = this->size_[i+1];
 48 |     }
 49 |     return tensor;
 50 |   }
 51 | 
 52 |   inline __device__ __host__ size_t InnerSize() const {
 53 |     assert(Dim >= 3);
 54 |     size_t sz = 1;
 55 |     for (size_t i = 2; i < Dim; ++i) {
 56 |       sz *= size_[i];
 57 |     }
 58 |     return sz;
 59 |   }
 60 | 
 61 |   inline __device__ __host__ size_t ChannelCount() const {
 62 |     assert(Dim >= 3);
 63 |     return size_[1];
 64 |   }
 65 | 
 66 |   inline __device__ __host__ DType* data_ptr() const {
 67 |     return dptr_;
 68 |   }
 69 | 
 70 |   DType *dptr_;
 71 |   int size_[Dim];
 72 | };
 73 | 
 74 | template<typename DType>
 75 | struct DeviceTensor<DType, 1> {
 76 |   inline __device__ __host__ DeviceTensor(DType *p, const int *size)
 77 |     : dptr_(p) {
 78 |     size_[0] = size ? size[0] : 0;
 79 |   }
 80 | 
 81 |   inline __device__ __host__ unsigned getSize(const int i) const {
 82 |     assert(i == 0);
 83 |     return size_[0];
 84 |   }
 85 | 
 86 |   inline __device__ __host__ int numElements() const {
 87 |     return size_[0];
 88 |   }
 89 | 
 90 |   inline __device__ __host__ DType &operator[](const size_t x) const {
 91 |       return *(dptr_ + x);
 92 |   }
 93 | 
 94 |   inline __device__ __host__ DType* data_ptr() const {
 95 |     return dptr_;
 96 |   }
 97 | 
 98 |   DType *dptr_;
 99 |   int size_[1];
100 | };
101 | 
102 | template<typename DType, int Dim>
103 | static DeviceTensor<DType, Dim> devicetensor(const at::Tensor &blob) {
104 |   DType *data = blob.data<DType>();
105 |   DeviceTensor<DType, Dim> tensor(data, nullptr);
106 |   for (int i = 0; i < Dim; ++i) {
107 |     tensor.size_[i] = blob.size(i);
108 |   }
109 |   return tensor;
110 | }
111 | 


--------------------------------------------------------------------------------
/lib/extensions/syncbn/src/operator.cpp:
--------------------------------------------------------------------------------
1 | #include "operator.h"
2 | 
3 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
4 |   m.def("batchnorm_forward", &BatchNorm_Forward_CUDA, "BatchNorm forward (CUDA)");
5 |   m.def("batchnorm_backward", &BatchNorm_Backward_CUDA, "BatchNorm backward (CUDA)");
6 |   m.def("sumsquare_forward", &Sum_Square_Forward_CUDA, "SumSqu forward (CUDA)");
7 |   m.def("sumsquare_backward", &Sum_Square_Backward_CUDA, "SumSqu backward (CUDA)");
8 | }
9 | 


--------------------------------------------------------------------------------
/lib/extensions/syncbn/src/operator.h:
--------------------------------------------------------------------------------
 1 | #include <torch/torch.h>
 2 | #include <vector>
 3 | 
 4 | 
 5 | at::Tensor BatchNorm_Forward_CUDA(
 6 |   const at::Tensor input_, 
 7 |   const at::Tensor mean_,
 8 |   const at::Tensor std_,
 9 |   const at::Tensor gamma_,
10 |   const at::Tensor beta_);
11 | 
12 | std::vector<at::Tensor> BatchNorm_Backward_CUDA(
13 |   const at::Tensor gradoutput_,
14 |   const at::Tensor input_,
15 |   const at::Tensor mean_, 
16 |   const at::Tensor std_,
17 |   const at::Tensor gamma_,
18 |   const at::Tensor beta_, 
19 |   bool train);
20 | 
21 | std::vector<at::Tensor> Sum_Square_Forward_CUDA(
22 |   const at::Tensor input_);
23 | 
24 | at::Tensor Sum_Square_Backward_CUDA(
25 |   const at::Tensor input_,
26 |   const at::Tensor gradSum_,
27 |   const at::Tensor gradSquare_);
28 | 


--------------------------------------------------------------------------------
/lib/extensions/syncbn/src/syncbn_cpu.cpp:
--------------------------------------------------------------------------------
 1 | #include <ATen/ATen.h>
 2 | #include <vector>
 3 | 
 4 | at::Tensor broadcast_to(at::Tensor v, at::Tensor x) {
 5 |   if (x.ndimension() == 2) {
 6 |     return v;
 7 |   } else {
 8 |     std::vector<int64_t> broadcast_size = {1, -1};
 9 |     for (int64_t i = 2; i < x.ndimension(); ++i)
10 |       broadcast_size.push_back(1);
11 |      return v.view(broadcast_size);
12 |   }
13 | }
14 | at::Tensor BatchNorm_Forward_CPU(
15 |     const at::Tensor input,
16 |     const at::Tensor mean,
17 |     const at::Tensor std,
18 |     const at::Tensor gamma,
19 |     const at::Tensor beta) {
20 |   auto output = (input - broadcast_to(mean, input)) / broadcast_to(std, input);
21 |   output = output * broadcast_to(gamma, input) + broadcast_to(beta, input);
22 |   return output;
23 | }
24 | 
25 | // Not implementing CPU backward for now
26 | std::vector<at::Tensor> BatchNorm_Backward_CPU(
27 |     const at::Tensor gradoutput,
28 |     const at::Tensor input,
29 |     const at::Tensor mean,
30 |     const at::Tensor std,
31 |     const at::Tensor gamma,
32 |     const at::Tensor beta,
33 |     bool train) {
34 |   /* outputs*/
35 |   at::Tensor gradinput = at::zeros_like(input);
36 |   at::Tensor gradgamma = at::zeros_like(gamma);
37 |   at::Tensor gradbeta = at::zeros_like(beta);
38 |   at::Tensor gradMean = at::zeros_like(mean);
39 |   at::Tensor gradStd = at::zeros_like(std);
40 |   return {gradinput, gradMean, gradStd, gradgamma, gradbeta};
41 | }
42 | 
43 | std::vector<at::Tensor> Sum_Square_Forward_CPU(
44 |     const at::Tensor input) {
45 |   /* outputs */
46 |   at::Tensor sum = input.type().tensor({input.size(1)}).zero_();
47 |   at::Tensor square = input.type().tensor({input.size(1)}).zero_();
48 |   return {sum, square};
49 | }
50 | 
51 | at::Tensor Sum_Square_Backward_CPU(
52 |     const at::Tensor input,
53 |     const at::Tensor gradSum,
54 |     const at::Tensor gradSquare) {
55 |   /* outputs */
56 |   at::Tensor gradInput = at::zeros_like(input);
57 |   return gradInput;
58 | }


--------------------------------------------------------------------------------
/lib/loss/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/openseg-group/openseg.pytorch/aefc75517b09068d7131a69420bc5f66cb41f0ee/lib/loss/__init__.py


--------------------------------------------------------------------------------
/lib/loss/loss_manager.py:
--------------------------------------------------------------------------------
 1 | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 2 | ## Created by: DonnyYou, RainbowSecret, JingyiXie, JianyuanGuo
 3 | ## Microsoft Research
 4 | ## yuyua@microsoft.com
 5 | ## Copyright (c) 2019
 6 | ##
 7 | ## This source code is licensed under the MIT-style license found in the
 8 | ## LICENSE file in the root directory of this source tree 
 9 | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
10 | 
11 | 
12 | from __future__ import absolute_import
13 | from __future__ import division
14 | from __future__ import print_function
15 | 
16 | from lib.loss.loss_helper import FSAuxOhemCELoss, FSOhemCELoss
17 | from lib.loss.loss_helper import FSCELoss, FSAuxCELoss
18 | from lib.loss.loss_helper import SegFixLoss
19 | 
20 | from lib.utils.tools.logger import Logger as Log
21 | from lib.utils.distributed import is_distributed
22 | 
23 | 
24 | SEG_LOSS_DICT = {
25 |     'fs_ce_loss': FSCELoss,
26 |     'fs_ohemce_loss': FSOhemCELoss,
27 |     'fs_auxce_loss': FSAuxCELoss,
28 |     'fs_auxohemce_loss': FSAuxOhemCELoss,
29 |     'segfix_loss': SegFixLoss,
30 | }
31 | 
32 | 
33 | class LossManager(object):
34 |     def __init__(self, configer):
35 |         self.configer = configer
36 | 
37 |     def _parallel(self, loss):
38 |         if is_distributed():
39 |             Log.info('use distributed loss')
40 |             return loss
41 |             
42 |         if self.configer.get('network', 'loss_balance') and len(self.configer.get('gpu')) > 1:
43 |             Log.info('use DataParallelCriterion loss')
44 |             from lib.extensions.parallel.data_parallel import DataParallelCriterion
45 |             loss = DataParallelCriterion(loss)
46 | 
47 |         return loss
48 | 
49 |     def get_seg_loss(self, loss_type=None):
50 |         key = self.configer.get('loss', 'loss_type') if loss_type is None else loss_type
51 |         if key not in SEG_LOSS_DICT:
52 |             Log.error('Loss: {} not valid!'.format(key))
53 |             exit(1)
54 |         Log.info('use loss: {}.'.format(key))
55 |         loss = SEG_LOSS_DICT[key](self.configer)
56 |         return self._parallel(loss)
57 | 
58 | 
59 | 


--------------------------------------------------------------------------------
/lib/metrics/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/openseg-group/openseg.pytorch/aefc75517b09068d7131a69420bc5f66cb41f0ee/lib/metrics/__init__.py


--------------------------------------------------------------------------------
/lib/metrics/ade20k_evaluator.py:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/env python
 2 | # -*- coding:utf-8 -*-
 3 | # Author: Donny You(youansheng@gmail.com)
 4 | 
 5 | 
 6 | import argparse
 7 | import os
 8 | import pdb
 9 | 
10 | import numpy as np
11 | 
12 | from lib.utils.helpers.image_helper import ImageHelper
13 | from lib.utils.tools.logger import Logger as Log
14 | from lib.utils.tools.configer import Configer
15 | from lib.metrics.running_score import RunningScore
16 | 
17 | 
18 | class ADE20KEvaluator(object):
19 |     def __init__(self, configer):
20 |         self.configer = configer
21 |         self.seg_running_score = RunningScore(configer)
22 | 
23 |     def relabel(self, labelmap):
24 |         return (labelmap - 1).astype(np.uint8)
25 | 
26 |     def evaluate(self, pred_dir, gt_dir):
27 |         img_cnt = 0
28 |         for filename in os.listdir(pred_dir):
29 |             print(filename)
30 |             
31 |             pred_path = os.path.join(pred_dir, filename)
32 |             gt_path = os.path.join(gt_dir, filename)
33 |             predmap = ImageHelper.img2np(ImageHelper.read_image(pred_path, tool='pil', mode='P'))
34 |             gtmap = ImageHelper.img2np(ImageHelper.read_image(gt_path, tool='pil', mode='P'))
35 | 
36 |             if "pascal_context" in gt_dir or "ade" in gt_dir or "coco_stuff" in gt_dir:
37 |                 predmap = self.relabel(predmap)
38 |                 gtmap = self.relabel(gtmap)
39 | 
40 |             if "coco_stuff" in gt_dir:
41 |                 gtmap[gtmap == 0] = 255 
42 |                 
43 |             self.seg_running_score.update(predmap[np.newaxis, :, :], gtmap[np.newaxis, :, :])
44 |             img_cnt += 1
45 | 
46 |         Log.info('Evaluate {} images'.format(img_cnt))
47 |         Log.info('mIOU: {}'.format(self.seg_running_score.get_mean_iou()))
48 |         Log.info('Pixel ACC: {}'.format(self.seg_running_score.get_pixel_acc()))
49 | 
50 | 
51 | if __name__ == "__main__":
52 |     parser = argparse.ArgumentParser()
53 |     parser.add_argument('--configs', default=None, type=str,
54 |                         dest='configs', help='The configs file of pose.')
55 |     parser.add_argument('--gt_dir', default=None, type=str,
56 |                         dest='gt_dir', help='The groundtruth annotations.')
57 |     parser.add_argument('--pred_dir', default=None, type=str,
58 |                         dest='pred_dir', help='The label dir of predict annotations.')
59 |     args = parser.parse_args()
60 | 
61 |     ade20k_evaluator = ADE20KEvaluator(Configer(configs=args.configs))
62 |     ade20k_evaluator.evaluate(args.pred_dir, args.gt_dir)


--------------------------------------------------------------------------------
/lib/metrics/cityscapes/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/openseg-group/openseg.pytorch/aefc75517b09068d7131a69420bc5f66cb41f0ee/lib/metrics/cityscapes/__init__.py


--------------------------------------------------------------------------------
/lib/metrics/cityscapes/evaluation/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/openseg-group/openseg.pytorch/aefc75517b09068d7131a69420bc5f66cb41f0ee/lib/metrics/cityscapes/evaluation/__init__.py


--------------------------------------------------------------------------------
/lib/metrics/cityscapes/evaluation/addToConfusionMatrix.pyx:
--------------------------------------------------------------------------------
 1 | # cython methods to speed-up evaluation
 2 | 
 3 | import numpy as np
 4 | cimport cython
 5 | cimport numpy as np
 6 | import ctypes
 7 | 
 8 | np.import_array()
 9 | 
10 | cdef extern from "addToConfusionMatrix_impl.c":
11 | 	void addToConfusionMatrix( const unsigned char* f_prediction_p  ,
12 |                                const unsigned char* f_groundTruth_p ,
13 |                                const unsigned int   f_width_i       ,
14 |                                const unsigned int   f_height_i      ,
15 |                                unsigned long long*  f_confMatrix_p  ,
16 |                                const unsigned int   f_confMatDim_i  )
17 | 
18 | 
19 | cdef tonumpyarray(unsigned long long* data, unsigned long long size):
20 | 	if not (data and size >= 0): raise ValueError
21 | 	return np.PyArray_SimpleNewFromData(2, [size, size], np.NPY_UINT64, <void*>data)
22 | 
23 | @cython.boundscheck(False)
24 | def cEvaluatePair( np.ndarray[np.uint8_t , ndim=2] predictionArr   ,
25 |                    np.ndarray[np.uint8_t , ndim=2] groundTruthArr  ,
26 |                    np.ndarray[np.uint64_t, ndim=2] confMatrix      ,
27 |                    evalLabels                                    ):
28 | 	cdef np.ndarray[np.uint8_t    , ndim=2, mode="c"] predictionArr_c
29 | 	cdef np.ndarray[np.uint8_t    , ndim=2, mode="c"] groundTruthArr_c
30 | 	cdef np.ndarray[np.ulonglong_t, ndim=2, mode="c"] confMatrix_c
31 | 
32 | 	predictionArr_c  = np.ascontiguousarray(predictionArr , dtype=np.uint8    )
33 | 	groundTruthArr_c = np.ascontiguousarray(groundTruthArr, dtype=np.uint8    )
34 | 	confMatrix_c     = np.ascontiguousarray(confMatrix    , dtype=np.ulonglong)
35 | 
36 | 	cdef np.uint32_t height_ui     = predictionArr.shape[1]
37 | 	cdef np.uint32_t width_ui      = predictionArr.shape[0]
38 | 	cdef np.uint32_t confMatDim_ui = confMatrix.shape[0]
39 | 
40 | 	addToConfusionMatrix(&predictionArr_c[0,0], &groundTruthArr_c[0,0], height_ui, width_ui, &confMatrix_c[0,0], confMatDim_ui)
41 | 
42 | 	confMatrix = np.ascontiguousarray(tonumpyarray(&confMatrix_c[0,0], confMatDim_ui))
43 | 
44 | 	return np.copy(confMatrix)


--------------------------------------------------------------------------------
/lib/metrics/cityscapes/evaluation/addToConfusionMatrix_impl.c:
--------------------------------------------------------------------------------
 1 | // cython methods to speed-up evaluation
 2 | 
 3 | void addToConfusionMatrix( const unsigned char* f_prediction_p  ,
 4 |                            const unsigned char* f_groundTruth_p ,
 5 |                            const unsigned int   f_width_i       ,
 6 |                            const unsigned int   f_height_i      ,
 7 |                            unsigned long long*  f_confMatrix_p  ,
 8 |                            const unsigned int   f_confMatDim_i  )
 9 | {
10 |     const unsigned int size_ui = f_height_i * f_width_i;
11 |     for (unsigned int i = 0; i < size_ui; ++i)
12 |     {
13 |         const unsigned char predPx = f_prediction_p [i];
14 |         const unsigned char gtPx   = f_groundTruth_p[i];
15 |         f_confMatrix_p[f_confMatDim_i*gtPx + predPx] += 1u;
16 |     }
17 | }


--------------------------------------------------------------------------------
/lib/metrics/cityscapes/evaluation/instance.py:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/python
 2 | #
 3 | # Instance class
 4 | #
 5 | 
 6 | class Instance(object):
 7 |     instID     = 0
 8 |     labelID    = 0
 9 |     pixelCount = 0
10 |     medDist    = -1
11 |     distConf   = 0.0
12 | 
13 |     def __init__(self, imgNp, instID):
14 |         if (instID == -1):
15 |             return
16 |         self.instID     = int(instID)
17 |         self.labelID    = int(self.getLabelID(instID))
18 |         self.pixelCount = int(self.getInstancePixels(imgNp, instID))
19 | 
20 |     def getLabelID(self, instID):
21 |         if (instID < 1000):
22 |             return instID
23 |         else:
24 |             return int(instID / 1000)
25 | 
26 |     def getInstancePixels(self, imgNp, instLabel):
27 |         return (imgNp == instLabel).sum()
28 | 
29 |     def toJSON(self):
30 |         return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)
31 | 
32 |     def toDict(self):
33 |         buildDict = {}
34 |         buildDict["instID"]     = self.instID
35 |         buildDict["labelID"]    = self.labelID
36 |         buildDict["pixelCount"] = self.pixelCount
37 |         buildDict["medDist"]    = self.medDist
38 |         buildDict["distConf"]   = self.distConf
39 |         return buildDict
40 | 
41 |     def fromJSON(self, data):
42 |         self.instID     = int(data["instID"])
43 |         self.labelID    = int(data["labelID"])
44 |         self.pixelCount = int(data["pixelCount"])
45 |         if ("medDist" in data):
46 |             self.medDist    = float(data["medDist"])
47 |             self.distConf   = float(data["distConf"])
48 | 
49 |     def __str__(self):
50 |         return "("+str(self.instID)+")"


--------------------------------------------------------------------------------
/lib/metrics/cityscapes/evaluation/instances2dict.py:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/python
 2 | #
 3 | # Convert instances from png files to a dictionary
 4 | #
 5 | 
 6 | from __future__ import print_function
 7 | import os, sys
 8 | 
 9 | # Cityscapes imports
10 | from instance import *
11 | sys.path.append( os.path.normpath( os.path.join( os.path.dirname( __file__ ) , '..' , 'helpers' ) ) )
12 | from csHelpers import *
13 | 
14 | def instances2dict(imageFileList, verbose=False):
15 |     imgCount     = 0
16 |     instanceDict = {}
17 | 
18 |     if not isinstance(imageFileList, list):
19 |         imageFileList = [imageFileList]
20 | 
21 |     if verbose:
22 |         print("Processing {} images...".format(len(imageFileList)))
23 | 
24 |     for imageFileName in imageFileList:
25 |         # Load image
26 |         img = Image.open(imageFileName)
27 | 
28 |         # Image as numpy array
29 |         imgNp = np.array(img)
30 | 
31 |         # Initialize label categories
32 |         instances = {}
33 |         for label in labels:
34 |             instances[label.name] = []
35 | 
36 |         # Loop through all instance ids in instance image
37 |         for instanceId in np.unique(imgNp):
38 |             instanceObj = Instance(imgNp, instanceId)
39 | 
40 |             instances[id2label[instanceObj.labelID].name].append(instanceObj.toDict())
41 | 
42 |         imgKey = os.path.abspath(imageFileName)
43 |         instanceDict[imgKey] = instances
44 |         imgCount += 1
45 | 
46 |         if verbose:
47 |             print("\rImages Processed: {}".format(imgCount), end=' ')
48 |             sys.stdout.flush()
49 | 
50 |     if verbose:
51 |         print("")
52 | 
53 |     return instanceDict
54 | 
55 | def main(argv):
56 |     fileList = []
57 |     if (len(argv) > 2):
58 |         for arg in argv:
59 |             if ("png" in arg):
60 |                 fileList.append(arg)
61 |     instances2dict(fileList, True)
62 | 
63 | if __name__ == "__main__":
64 |     main(sys.argv[1:])
65 | 


--------------------------------------------------------------------------------
/lib/metrics/cityscapes/helpers/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/openseg-group/openseg.pytorch/aefc75517b09068d7131a69420bc5f66cb41f0ee/lib/metrics/cityscapes/helpers/__init__.py


--------------------------------------------------------------------------------
/lib/metrics/cityscapes/helpers/labels_cityPersons.py:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/python
 2 | #
 3 | # CityPersons (cp) labels
 4 | #
 5 | 
 6 | from collections import namedtuple
 7 | 
 8 | 
 9 | #--------------------------------------------------------------------------------
10 | # Definitions
11 | #--------------------------------------------------------------------------------
12 | 
13 | # a label and all meta information
14 | LabelCp = namedtuple( 'LabelCp' , [
15 | 
16 |     'name'        , # The identifier of this label, e.g. 'pedestrian', 'rider', ... .
17 |                     # We use them to uniquely name a class
18 | 
19 |     'id'          , # An integer ID that is associated with this label.
20 |                     # The IDs are used to represent the label in ground truth
21 | 
22 |     'hasInstances', # Whether this label distinguishes between single instances or not
23 | 
24 |     'ignoreInEval', # Whether pixels having this class as ground truth label are ignored
25 |                     # during evaluations or not
26 | 
27 |     'color'       , # The color of this label
28 |     ] )
29 | 
30 | 
31 | #--------------------------------------------------------------------------------
32 | # A list of all labels
33 | #--------------------------------------------------------------------------------
34 | 
35 | # The 'ignore' label covers representations of humans, e.g. people on posters, reflections etc.
36 | # Each annotation includes both the full bounding box (bbox) as well as a bounding box covering the visible area (bboxVis).
37 | # The latter is obtained automatically from the segmentation masks.  
38 | 
39 | labelsCp = [
40 |     #         name                     id   hasInstances   ignoreInEval   color
41 |     LabelCp(  'ignore'               ,  0 , False        , True         , (250,170, 30) ),
42 |     LabelCp(  'pedestrian'           ,  1 , True         , False        , (220, 20, 60) ),
43 |     LabelCp(  'rider'                ,  2 , True         , False        , (  0,  0,142) ),
44 |     LabelCp(  'sitting person'       ,  3 , True         , False        , (107,142, 35) ),
45 |     LabelCp(  'person (other)'       ,  4 , True         , False        , (190,153,153) ),
46 |     LabelCp(  'person group'         ,  5 , False        , True         , (255,  0,  0) ),
47 | ]
48 | 
49 | 
50 | #--------------------------------------------------------------------------------
51 | # Create dictionaries for a fast lookup
52 | #--------------------------------------------------------------------------------
53 | 
54 | # Please refer to the main method below for example usages!
55 | 
56 | # name to label object
57 | name2labelCp      = { label.name    : label for label in labelsCp }
58 | # id to label object
59 | id2labelCp        = { label.id      : label for label in labelsCp }
60 | 
61 | 


--------------------------------------------------------------------------------
/lib/metrics/cityscapes/make.sh:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/env bash
 2 | 
 3 | # check the enviroment info
 4 | 
 5 | PYTHON="/root/miniconda3/bin/python"
 6 | export PYTHONPATH="/msravcshare/yuyua/code/segmentation/openseg.pytorch":$PYTHONPATH
 7 | 
 8 | cd ../../../
 9 | ${PYTHON} lib/metrics/cityscapes/setup.py build_ext --inplace
10 | 


--------------------------------------------------------------------------------
/lib/metrics/cityscapes/setup.py:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/python
 2 | #
 3 | # Enable cython support for eval metrics
 4 | # Run as
 5 | # setup.py build_ext --inplace
 6 | #
 7 | # WARNING: Only tested for Ubuntu 64bit OS.
 8 | 
 9 | try:
10 |     from distutils.core import setup
11 |     from Cython.Build import cythonize
12 | except:
13 |     print("Unable to setup. Please use pip to install: cython")
14 |     print("sudo pip install cython")
15 | import os
16 | import numpy
17 | 
18 | os.environ["CC"]  = "g++"
19 | os.environ["CXX"] = "g++"
20 | 
21 | setup(ext_modules = cythonize("lib/metrics/cityscapes/evaluation/addToConfusionMatrix.pyx"),
22 |       include_dirs=[numpy.get_include()])
23 | 


--------------------------------------------------------------------------------
/lib/metrics/pascal_context_evaluator.py:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/env python
 2 | # -*- coding:utf-8 -*-
 3 | # Author: RainbowSecret(yuyua@microsoft.com)
 4 | 
 5 | 
 6 | import argparse
 7 | import os
 8 | import pdb
 9 | 
10 | import numpy as np
11 | 
12 | from lib.utils.helpers.image_helper import ImageHelper
13 | from lib.utils.tools.logger import Logger as Log
14 | from lib.utils.tools.configer import Configer
15 | from lib.metrics.running_score import RunningScore
16 | 
17 | 
18 | class PascalContextEvaluator(object):
19 |     def __init__(self, configer):
20 |         self.configer = configer
21 |         self.seg_running_score = RunningScore(configer)
22 | 
23 |     def relabel(self, labelmap):
24 |         return (labelmap - 1).astype(np.uint8)
25 | 
26 |     def evaluate(self, pred_dir, gt_dir):
27 |         img_cnt = 0
28 |         for filename in os.listdir(pred_dir):
29 |             print(filename)
30 |             
31 |             pred_path = os.path.join(pred_dir, filename)
32 |             gt_path = os.path.join(gt_dir, filename)
33 |             predmap = ImageHelper.img2np(ImageHelper.read_image(pred_path, tool='pil', mode='P'))
34 |             gtmap = ImageHelper.img2np(ImageHelper.read_image(gt_path, tool='pil', mode='P'))
35 | 
36 |             predmap = self.relabel(predmap)
37 |             gtmap = self.relabel(gtmap)
38 | 
39 |             self.seg_running_score.update(predmap[np.newaxis, :, :], gtmap[np.newaxis, :, :])
40 |             img_cnt += 1
41 | 
42 |         Log.info('Evaluate {} images'.format(img_cnt))
43 |         Log.info('mIOU: {}'.format(self.seg_running_score.get_mean_iou()))
44 |         Log.info('Pixel ACC: {}'.format(self.seg_running_score.get_pixel_acc()))
45 | 
46 | 
47 | if __name__ == "__main__":
48 |     parser = argparse.ArgumentParser()
49 |     parser.add_argument('--configs', default=None, type=str,
50 |                         dest='configs', help='The configs file of pose.')
51 |     parser.add_argument('--gt_dir', default=None, type=str,
52 |                         dest='gt_dir', help='The groundtruth annotations.')
53 |     parser.add_argument('--pred_dir', default=None, type=str,
54 |                         dest='pred_dir', help='The label dir of predict annotations.')
55 |     args = parser.parse_args()
56 | 
57 |     pcontext_evaluator = PascalContextEvaluator(Configer(configs=args.configs))
58 |     pcontext_evaluator.evaluate(args.pred_dir, args.gt_dir)
59 | 


--------------------------------------------------------------------------------
/lib/models/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/openseg-group/openseg.pytorch/aefc75517b09068d7131a69420bc5f66cb41f0ee/lib/models/__init__.py


--------------------------------------------------------------------------------
/lib/models/backbones/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/openseg-group/openseg.pytorch/aefc75517b09068d7131a69420bc5f66cb41f0ee/lib/models/backbones/__init__.py


--------------------------------------------------------------------------------
/lib/models/backbones/backbone_selector.py:
--------------------------------------------------------------------------------
 1 | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 2 | ## Created by: Donny You, RainbowSecret
 3 | ## Microsoft Research
 4 | ## yuyua@microsoft.com
 5 | ## Copyright (c) 2019
 6 | ##
 7 | ## This source code is licensed under the MIT-style license found in the
 8 | ## LICENSE file in the root directory of this source tree 
 9 | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
10 | 
11 | 
12 | from __future__ import absolute_import
13 | from __future__ import division
14 | from __future__ import print_function
15 | 
16 | from lib.models.backbones.resnet.resnet_backbone import ResNetBackbone
17 | from lib.models.backbones.hrnet.hrnet_backbone import HRNetBackbone
18 | from lib.utils.tools.logger import Logger as Log
19 | 
20 | 
21 | class BackboneSelector(object):
22 | 
23 |     def __init__(self, configer):
24 |         self.configer = configer
25 | 
26 |     def get_backbone(self, **params):
27 |         backbone = self.configer.get('network', 'backbone')
28 | 
29 |         model = None
30 |         if ('resnet' in backbone or 'resnext' in backbone or 'resnest' in backbone) and 'senet' not in backbone:
31 |             model = ResNetBackbone(self.configer)(**params)
32 | 
33 |         elif 'hrne' in backbone:
34 |             model = HRNetBackbone(self.configer)(**params)
35 | 
36 |         else:
37 |             Log.error('Backbone {} is invalid.'.format(backbone))
38 |             exit(1)
39 | 
40 |         return model
41 | 


--------------------------------------------------------------------------------
/lib/models/backbones/hrnet/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/openseg-group/openseg.pytorch/aefc75517b09068d7131a69420bc5f66cb41f0ee/lib/models/backbones/hrnet/__init__.py


--------------------------------------------------------------------------------
/lib/models/backbones/resnet/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/openseg-group/openseg.pytorch/aefc75517b09068d7131a69420bc5f66cb41f0ee/lib/models/backbones/resnet/__init__.py


--------------------------------------------------------------------------------
/lib/models/backbones/resnet/wsl_resnext_models.py:
--------------------------------------------------------------------------------
 1 | # Copyright (c) Facebook, Inc. and its affiliates.
 2 | # All rights reserved.
 3 | #
 4 | # This source code is licensed under the license found in the
 5 | # LICENSE file in the root directory of this source tree.
 6 | 
 7 | # Optional list of dependencies required by the package
 8 | dependencies = ['torch', 'torchvision']
 9 | 
10 | from torch.hub import load_state_dict_from_url
11 | from torchvision.models.resnet import ResNet, Bottleneck
12 | 
13 | 
14 | model_urls = {
15 |     'resnext101_32x8d': 'https://download.pytorch.org/models/ig_resnext101_32x8-c38310e5.pth',
16 |     'resnext101_32x16d': 'https://download.pytorch.org/models/ig_resnext101_32x16-c6f796b0.pth',
17 |     'resnext101_32x32d': 'https://download.pytorch.org/models/ig_resnext101_32x32-e4b90b00.pth',
18 |     'resnext101_32x48d': 'https://download.pytorch.org/models/ig_resnext101_32x48-3e41cc8a.pth',
19 | }
20 | 
21 | 
22 | def _resnext(arch, block, layers, pretrained, progress, **kwargs):
23 |     model = ResNet(block, layers, **kwargs)
24 |     state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
25 |     model.load_state_dict(state_dict)
26 |     return model
27 | 
28 | 
29 | def resnext101_32x8d_wsl(progress=True, **kwargs):
30 |     """Constructs a ResNeXt-101 32x8 model pre-trained on weakly-supervised data
31 |     and finetuned on ImageNet from Figure 5 in
32 |     `"Exploring the Limits of Weakly Supervised Pretraining" <https://arxiv.org/abs/1805.00932>`_
33 | 
34 |     Args:
35 |         progress (bool): If True, displays a progress bar of the download to stderr.
36 |     """
37 |     kwargs['groups'] = 32
38 |     kwargs['width_per_group'] = 8
39 |     return _resnext('resnext101_32x8d', Bottleneck, [3, 4, 23, 3], True, progress, **kwargs)
40 | 
41 | 
42 | def resnext101_32x16d_wsl(progress=True, **kwargs):
43 |     """Constructs a ResNeXt-101 32x16 model pre-trained on weakly-supervised data
44 |     and finetuned on ImageNet from Figure 5 in
45 |     `"Exploring the Limits of Weakly Supervised Pretraining" <https://arxiv.org/abs/1805.00932>`_
46 | 
47 |     Args:
48 |         progress (bool): If True, displays a progress bar of the download to stderr.
49 |     """
50 |     kwargs['groups'] = 32
51 |     kwargs['width_per_group'] = 16
52 |     return _resnext('resnext101_32x16d', Bottleneck, [3, 4, 23, 3], True, progress, **kwargs)
53 | 
54 | 
55 | def resnext101_32x32d_wsl(progress=True, **kwargs):
56 |     """Constructs a ResNeXt-101 32x32 model pre-trained on weakly-supervised data
57 |     and finetuned on ImageNet from Figure 5 in
58 |     `"Exploring the Limits of Weakly Supervised Pretraining" <https://arxiv.org/abs/1805.00932>`_
59 | 
60 |     Args:
61 |         progress (bool): If True, displays a progress bar of the download to stderr.
62 |     """
63 |     kwargs['groups'] = 32
64 |     kwargs['width_per_group'] = 32
65 |     return _resnext('resnext101_32x32d', Bottleneck, [3, 4, 23, 3], True, progress, **kwargs)
66 | 
67 | 
68 | def resnext101_32x48d_wsl(progress=True, **kwargs):
69 |     """Constructs a ResNeXt-101 32x48 model pre-trained on weakly-supervised data
70 |     and finetuned on ImageNet from Figure 5 in
71 |     `"Exploring the Limits of Weakly Supervised Pretraining" <https://arxiv.org/abs/1805.00932>`_
72 | 
73 |     Args:
74 |         progress (bool): If True, displays a progress bar of the download to stderr.
75 |     """
76 |     kwargs['groups'] = 32
77 |     kwargs['width_per_group'] = 48
78 |     return _resnext('resnext101_32x48d', Bottleneck, [3, 4, 23, 3], True, progress, **kwargs)
79 | 


--------------------------------------------------------------------------------
/lib/models/model_manager.py:
--------------------------------------------------------------------------------
 1 | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 2 | ## Microsoft Research
 3 | ## Author: RainbowSecret, LangHuang, JingyiXie, JianyuanGuo
 4 | ## Copyright (c) 2019
 5 | ## yuyua@microsoft.com
 6 | ##
 7 | ## This source code is licensed under the MIT-style license found in the
 8 | ## LICENSE file in the root directory of this source tree 
 9 | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
10 | 
11 | 
12 | from __future__ import absolute_import
13 | from __future__ import division
14 | from __future__ import print_function
15 | 
16 | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
17 | # Our approaches including FCN baseline, HRNet, OCNet, ISA, OCR
18 | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
19 | # FCN baseline 
20 | from lib.models.nets.fcnet import FcnNet
21 | 
22 | # OCR
23 | from lib.models.nets.ocrnet import SpatialOCRNet, ASPOCRNet
24 | from lib.models.nets.ideal_ocrnet import IdealSpatialOCRNet, IdealSpatialOCRNetB, IdealSpatialOCRNetC, IdealGatherOCRNet, IdealDistributeOCRNet
25 | 
26 | # HRNet
27 | from lib.models.nets.hrnet import HRNet_W48
28 | from lib.models.nets.hrnet import HRNet_W48_OCR, HRNet_W48_ASPOCR, HRNet_W48_OCR_B
29 | 
30 | # OCNet
31 | from lib.models.nets.ocnet import BaseOCNet, AspOCNet
32 | 
33 | # ISA Net
34 | from lib.models.nets.isanet import ISANet
35 | 
36 | # CE2P
37 | from lib.models.nets.ce2pnet import CE2P_OCRNet, CE2P_IdealOCRNet, CE2P_ASPOCR
38 | 
39 | # SegFix
40 | from lib.models.nets.segfix import SegFix_HRNet
41 | 
42 | from lib.utils.tools.logger import Logger as Log
43 | 
44 | SEG_MODEL_DICT = {
45 |     # SegFix
46 |     'segfix_hrnet': SegFix_HRNet,
47 |     # OCNet series
48 |     'base_ocnet': BaseOCNet,
49 |     'asp_ocnet': AspOCNet,
50 |     # ISA Net
51 |     'isanet': ISANet,
52 |     # OCR series
53 |     'spatial_ocrnet': SpatialOCRNet,
54 |     'spatial_asp_ocrnet': ASPOCRNet,
55 |     # OCR series with ground-truth   
56 |     'ideal_spatial_ocrnet': IdealSpatialOCRNet,
57 |     'ideal_spatial_ocrnet_b': IdealSpatialOCRNetB,
58 |     'ideal_spatial_ocrnet_c': IdealSpatialOCRNetC, 
59 |     'ideal_gather_ocrnet': IdealGatherOCRNet,
60 |     'ideal_distribute_ocrnet': IdealDistributeOCRNet,
61 |     # HRNet series
62 |     'hrnet_w48': HRNet_W48,
63 |     'hrnet_w48_ocr': HRNet_W48_OCR,
64 |     'hrnet_w48_ocr_b': HRNet_W48_OCR_B,
65 |     'hrnet_w48_asp_ocr': HRNet_W48_ASPOCR,
66 |     # CE2P series
67 |     'ce2p_asp_ocrnet': CE2P_ASPOCR,
68 |     'ce2p_ocrnet': CE2P_OCRNet,
69 |     'ce2p_ideal_ocrnet': CE2P_IdealOCRNet, 
70 |     # baseline series
71 |     'fcnet': FcnNet,
72 | }
73 | 
74 | 
75 | class ModelManager(object):
76 |     def __init__(self, configer):
77 |         self.configer = configer
78 | 
79 |     def semantic_segmentor(self):
80 |         model_name = self.configer.get('network', 'model_name')
81 | 
82 |         if model_name not in SEG_MODEL_DICT:
83 |             Log.error('Model: {} not valid!'.format(model_name))
84 |             exit(1)
85 | 
86 |         model = SEG_MODEL_DICT[model_name](self.configer)
87 | 
88 |         return model
89 | 


--------------------------------------------------------------------------------
/lib/models/modules/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/openseg-group/openseg.pytorch/aefc75517b09068d7131a69420bc5f66cb41f0ee/lib/models/modules/__init__.py


--------------------------------------------------------------------------------
/lib/models/modules/decoder_block.py:
--------------------------------------------------------------------------------
 1 | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 2 | ## Created by: Jianyuan Guo, Rainbowsecret
 3 | ## Copyright (c) 2018
 4 | ##
 5 | ## This source code is licensed under the MIT-style license found in the
 6 | ## LICENSE file in the root directory of this source tree 
 7 | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 8 | 
 9 | import torch
10 | from torch import nn
11 | from torch.nn import functional as F
12 | 
13 | from lib.models.tools.module_helper import ModuleHelper
14 | 
15 | 
16 | class Decoder_Module(nn.Module):
17 | 
18 |     def __init__(self, bn_type=None, inplane1=512, inplane2=256, outplane=128):
19 |         super(Decoder_Module, self).__init__()
20 |         self.conv1 = nn.Sequential(
21 |             nn.Conv2d(inplane1, 256, kernel_size=1, padding=0, dilation=1, bias=False),
22 |             ModuleHelper.BNReLU(256, bn_type=bn_type),
23 |             )
24 |         self.conv2 = nn.Sequential(
25 |             nn.Conv2d(inplane2, 48, kernel_size=1, padding=0, dilation=1, bias=False),
26 |             ModuleHelper.BNReLU(48, bn_type=bn_type),
27 |             )
28 |         self.conv3 = nn.Sequential(
29 |             nn.Conv2d(304, outplane, kernel_size=1, padding=0, dilation=1, bias=False),
30 |             ModuleHelper.BNReLU(outplane, bn_type=bn_type),
31 |             nn.Conv2d(outplane, outplane, kernel_size=1, padding=0, dilation=1, bias=False),
32 |             ModuleHelper.BNReLU(outplane, bn_type=bn_type),
33 |             )
34 |         
35 | 
36 |     def forward(self, xt, xl):
37 |         _, _, h, w = xl.size()
38 |         xt = F.interpolate(xt, size=(h, w), mode='bilinear', align_corners=True)
39 |         xl = self.conv2(xl)
40 |         x = torch.cat([xt, xl], dim=1)
41 |         x = self.conv3(x)
42 |         return x  
43 | 
44 | 
45 | class CE2P_Decoder_Module(nn.Module):
46 | 
47 |     def __init__(self, num_classes, dropout=0, bn_type=None, inplane1=512, inplane2=256):
48 |         super(CE2P_Decoder_Module, self).__init__()
49 |         self.conv1 = nn.Sequential(
50 |             nn.Conv2d(inplane1, 256, kernel_size=1, padding=0, dilation=1, bias=False),
51 |             ModuleHelper.BNReLU(256, bn_type=bn_type),
52 |             )
53 |         self.conv2 = nn.Sequential(
54 |             nn.Conv2d(inplane2, 48, kernel_size=1, stride=1, padding=0, dilation=1, bias=False),
55 |             ModuleHelper.BNReLU(48, bn_type=bn_type),
56 |             )
57 |         self.conv3 = nn.Sequential(
58 |             nn.Conv2d(304, 256, kernel_size=1, padding=0, dilation=1, bias=False),
59 |             ModuleHelper.BNReLU(256, bn_type=bn_type),
60 |             nn.Conv2d(256, 256, kernel_size=1, padding=0, dilation=1, bias=False),
61 |             ModuleHelper.BNReLU(256, bn_type=bn_type),
62 |             nn.Dropout2d(dropout),
63 |             )
64 |         
65 |         self.conv4 = nn.Conv2d(256, num_classes, kernel_size=1, padding=0, dilation=1, bias=True)
66 | 
67 |     def forward(self, xt, xl):
68 |         _, _, h, w = xl.size()
69 |         xt = F.interpolate(self.conv1(xt), size=(h, w), mode='bilinear', align_corners=True)
70 |         xl = self.conv2(xl)
71 |         x = torch.cat([xt, xl], dim=1)
72 |         x = self.conv3(x)
73 |         seg = self.conv4(x)
74 |         return seg, x  
75 | 
76 | 


--------------------------------------------------------------------------------
/lib/models/modules/edge_block.py:
--------------------------------------------------------------------------------
 1 | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 2 | ## Reproduce model writed by RainbowSecret
 3 | ## Created by: Jianyuan Guo
 4 | ## Copyright (c) 2019
 5 | ##
 6 | ## This source code is licensed under the MIT-style license found in the
 7 | ## LICENSE file in the root directory of this source tree 
 8 | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 9 | 
10 | import torch
11 | from torch import nn
12 | from torch.nn import functional as F
13 | 
14 | from lib.models.tools.module_helper import ModuleHelper
15 | 
16 |    
17 | class Edge_Module(nn.Module):
18 |     def __init__(self, mid_fea, out_fea, bn_type=None, factor=1):
19 |         super(Edge_Module, self).__init__()
20 |         
21 |         self.conv1 =  nn.Sequential(
22 |             nn.Conv2d(factor*256, mid_fea, kernel_size=1, padding=0, dilation=1, bias=False),
23 |             ModuleHelper.BNReLU(mid_fea, bn_type=bn_type),
24 |             ) 
25 |         self.conv2 =  nn.Sequential(
26 |             nn.Conv2d(factor*512, mid_fea, kernel_size=1, padding=0, dilation=1, bias=False),
27 |             ModuleHelper.BNReLU(mid_fea, bn_type=bn_type),
28 |             )  
29 |         self.conv3 =  nn.Sequential(
30 |             nn.Conv2d(factor*1024, mid_fea, kernel_size=1, padding=0, dilation=1, bias=False),
31 |             ModuleHelper.BNReLU(mid_fea, bn_type=bn_type),
32 |             )
33 |         
34 |         self.conv4 = nn.Conv2d(mid_fea, out_fea, kernel_size=3, padding=1, dilation=1, bias=True)
35 |         self.conv5 = nn.Conv2d(out_fea*3, out_fea, kernel_size=1, padding=0, dilation=1, bias=True)
36 |         
37 |     def forward(self, x1, x2, x3):
38 |         _, _, h, w = x1.size()
39 |         
40 |         edge1_fea = self.conv1(x1)
41 |         edge1 = self.conv4(edge1_fea)
42 |         edge2_fea = self.conv2(x2)
43 |         edge2 = self.conv4(edge2_fea)
44 |         edge3_fea = self.conv3(x3)
45 |         edge3 = self.conv4(edge3_fea)        
46 |         
47 |         edge2_fea = F.interpolate(edge2_fea, size=(h, w), mode='bilinear', align_corners=True)
48 |         edge3_fea = F.interpolate(edge3_fea, size=(h, w), mode='bilinear', align_corners=True)         
49 |         edge2 = F.interpolate(edge2, size=(h, w), mode='bilinear', align_corners=True)
50 |         edge3 = F.interpolate(edge3, size=(h, w), mode='bilinear', align_corners=True)
51 | 
52 |         edge_fea = torch.cat([edge1_fea, edge2_fea, edge3_fea], dim=1)
53 |         edge = torch.cat([edge1, edge2, edge3], dim=1)
54 |         edge = self.conv5(edge)
55 |          
56 |         return edge, edge_fea


--------------------------------------------------------------------------------
/lib/models/modules/offset_block.py:
--------------------------------------------------------------------------------
 1 | import torch
 2 | import math
 3 | import pdb
 4 | from torch import nn
 5 | from torch.nn import functional as F
 6 | import numpy as np
 7 | 
 8 | from lib.models.tools.module_helper import ModuleHelper
 9 | 
10 | 
11 | class OffsetBlock(nn.Module):
12 |     '''
13 |     This module takes relative offset as input and outputs feature at each position (coordinate + offset)
14 |     '''
15 |     def __init__(self):
16 |         super(OffsetBlock, self).__init__()
17 |         self.coord_map = None
18 |         self.norm_factor = None
19 |     
20 |     def _gen_coord_map(self, H, W):
21 |         coord_vecs = [torch.arange(length, dtype=torch.float).cuda() for length in (H, W)]
22 |         coord_h, coord_w = torch.meshgrid(coord_vecs)
23 |         return coord_h, coord_w
24 |     
25 |     def forward(self, x, offset_map):
26 |         n, c, h, w = x.size()
27 |         
28 |         if self.coord_map is None or self.coord_map[0].size() != offset_map.size()[2:]:
29 |             self.coord_map = self._gen_coord_map(h, w)
30 |             self.norm_factor = torch.cuda.FloatTensor([(w-1) / 2, (h-1) / 2])
31 |         
32 |         # offset to absolute coordinate
33 |         grid_h = offset_map[:, 0] + self.coord_map[0]                               # (N, H, W)
34 |         grid_w = offset_map[:, 1] + self.coord_map[1]                               # (N, H, W)
35 | 
36 |         # scale to [-1, 1], order of grid: [x, y] (i.e., [w, h])
37 |         grid = torch.stack([grid_w, grid_h], dim=-1) / self.norm_factor - 1.        # (N, H, W, 2)
38 | 
39 |         # use grid to obtain output feature
40 |         feats = F.grid_sample(x, grid, padding_mode='border')                       # (N, C, H, W)
41 |         
42 |         return feats
43 | 
44 | 
45 | class OffsetModule(nn.Module):
46 |     def __init__(self):
47 |         super(OffsetModule, self).__init__()
48 |         self.offset_block = OffsetBlock()
49 |     
50 |     def forward(self, x, offset):
51 |         # sample
52 |         x_out = self.offset_block(x, offset)
53 |         return x_out
54 | 


--------------------------------------------------------------------------------
/lib/models/nets/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/openseg-group/openseg.pytorch/aefc75517b09068d7131a69420bc5f66cb41f0ee/lib/models/nets/__init__.py


--------------------------------------------------------------------------------
/lib/models/nets/isanet.py:
--------------------------------------------------------------------------------
 1 | import torch
 2 | import torch.nn as nn
 3 | import torch.nn.functional as F
 4 | 
 5 | from lib.models.backbones.backbone_selector import BackboneSelector
 6 | from lib.models.tools.module_helper import ModuleHelper
 7 | 
 8 | 
 9 | class ISANet(nn.Module):
10 |     """
11 |     Interlaced Sparse Self-Attention for Semantic Segmentation
12 |     """
13 |     def __init__(self, configer):
14 |         self.inplanes = 128
15 |         super(ISANet, self).__init__()
16 |         self.configer = configer
17 |         self.num_classes = self.configer.get('data', 'num_classes')
18 |         self.backbone = BackboneSelector(configer).get_backbone()
19 | 
20 |         # extra added layers
21 |         bn_type = self.configer.get('network', 'bn_type')
22 |         factors = self.configer.get('network', 'factors')
23 |         from lib.models.modules.isa_block import ISA_Module
24 |         self.isa_head = nn.Sequential(
25 |             nn.Conv2d(2048, 512, kernel_size=3, stride=1, padding=1, bias=False),
26 |             ModuleHelper.BNReLU(512, bn_type=bn_type),
27 |             ISA_Module(in_channels=512, key_channels=256, value_channels=512, 
28 |                 out_channels=512, down_factors=factors, dropout=0.05, bn_type=bn_type),
29 |         )
30 |         self.cls_head = nn.Conv2d(512, self.num_classes, kernel_size=1, stride=1, padding=0, bias=True)
31 |         self.dsn_head = nn.Sequential(
32 |             nn.Conv2d(1024, 512, kernel_size=3, stride=1, padding=1, bias=False),
33 |             ModuleHelper.BNReLU(512, bn_type=bn_type),
34 |             nn.Dropout2d(0.05),
35 |             nn.Conv2d(512, self.num_classes, kernel_size=1, stride=1, padding=0, bias=True),
36 |         )
37 | 
38 |     def forward(self, x_):
39 |         x = self.backbone(x_)
40 |         x_dsn = self.dsn_head(x[-2])
41 |         x = self.isa_head(x[-1])
42 |         x = self.cls_head(x)
43 |         x_dsn = F.interpolate(x_dsn, size=(x_.size(2), x_.size(3)), mode="bilinear", align_corners=True)
44 |         x = F.interpolate(x, size=(x_.size(2), x_.size(3)), mode="bilinear", align_corners=True)
45 |         return x_dsn, x


--------------------------------------------------------------------------------
/lib/models/nets/segfix.py:
--------------------------------------------------------------------------------
 1 | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 2 | ## Created by: RainbowSecret
 3 | ## Microsoft Research
 4 | ## yuyua@microsoft.com
 5 | ## Copyright (c) 2019
 6 | ##
 7 | ## This source code is licensed under the MIT-style license found in the
 8 | ## LICENSE file in the root directory of this source tree
 9 | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
10 | 
11 | import pdb
12 | import cv2
13 | import os
14 | import torch
15 | import torch.nn as nn
16 | import torch.nn.functional as F
17 | import numpy as np
18 | 
19 | from lib.models.backbones.backbone_selector import BackboneSelector
20 | from lib.models.tools.module_helper import ModuleHelper
21 | from lib.utils.helpers.offset_helper import DTOffsetConfig
22 | from lib.models.backbones.hrnet.hrnet_backbone import BasicBlock
23 | 
24 | 
25 | class SegFix_HRNet(nn.Module):
26 |     def __init__(self, configer):
27 |         super(SegFix_HRNet, self).__init__()
28 |         self.configer = configer
29 |         self.backbone = BackboneSelector(configer).get_backbone()
30 |         backbone_name = self.configer.get('network', 'backbone')
31 |         width = int(backbone_name[-2:])
32 |         if 'hrnet2x' in backbone_name:
33 |             in_channels = width * 31
34 |         else:
35 |             in_channels = width * 15
36 | 
37 |         num_masks = 2
38 |         num_directions = DTOffsetConfig.num_classes
39 | 
40 |         mid_channels = 256
41 | 
42 |         self.dir_head = nn.Sequential(
43 |             nn.Conv2d(in_channels,
44 |                       mid_channels,
45 |                       kernel_size=1,
46 |                       stride=1,
47 |                       padding=0,
48 |                       bias=False),
49 |             ModuleHelper.BNReLU(mid_channels,
50 |                                 bn_type=self.configer.get(
51 |                                     'network', 'bn_type')),
52 |             nn.Conv2d(mid_channels,
53 |                       num_directions,
54 |                       kernel_size=1,
55 |                       stride=1,
56 |                       padding=0,
57 |                       bias=False))
58 |         self.mask_head = nn.Sequential(
59 |             nn.Conv2d(in_channels,
60 |                       mid_channels,
61 |                       kernel_size=1,
62 |                       stride=1,
63 |                       padding=0,
64 |                       bias=False),
65 |             ModuleHelper.BNReLU(mid_channels,
66 |                                 bn_type=self.configer.get(
67 |                                     'network', 'bn_type')),
68 |             nn.Conv2d(mid_channels,
69 |                       num_masks,
70 |                       kernel_size=1,
71 |                       stride=1,
72 |                       padding=0,
73 |                       bias=False))
74 | 
75 |     def forward(self, x_):
76 |         x = self.backbone(x_)
77 |         _, _, h, w = x[0].size()
78 | 
79 |         feat1 = x[0]
80 |         for i in range(1, len(x)):
81 |             x[i] = F.interpolate(x[i],
82 |                                  size=(h, w),
83 |                                  mode='bilinear',
84 |                                  align_corners=True)
85 | 
86 |         feats = torch.cat(x, 1)
87 |         mask_map = self.mask_head(feats)
88 |         dir_map = self.dir_head(feats)
89 |         return mask_map, dir_map
90 | 


--------------------------------------------------------------------------------
/lib/models/tools/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/openseg-group/openseg.pytorch/aefc75517b09068d7131a69420bc5f66cb41f0ee/lib/models/tools/__init__.py


--------------------------------------------------------------------------------
/lib/utils/__init__.py:
--------------------------------------------------------------------------------
1 | 
2 | 


--------------------------------------------------------------------------------
/lib/utils/distributed.py:
--------------------------------------------------------------------------------
 1 | import torch
 2 | import torch.nn as nn
 3 | import subprocess
 4 | import sys
 5 | import os
 6 | 
 7 | def is_distributed():
 8 |     return torch.distributed.is_initialized()
 9 | 
10 | def get_world_size():
11 |     if not torch.distributed.is_initialized():
12 |         return 1
13 |     return torch.distributed.get_world_size()
14 | 
15 | def get_rank():
16 |     if not torch.distributed.is_initialized():
17 |         return 0
18 |     return torch.distributed.get_rank()
19 | 
20 | def handle_distributed(args, main_file):
21 |     if not args.distributed:
22 |         os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(map(str, args.gpu))        
23 |         return
24 | 
25 |     if args.local_rank >= 0:
26 |         _setup_process_group(args)
27 |         return
28 | 
29 |     current_env = os.environ.copy()
30 |     if current_env.get('CUDA_VISIBLE_DEVICES') is None:
31 |         current_env['CUDA_VISIBLE_DEVICES'] = ','.join(map(str, args.gpu))
32 |         world_size = len(args.gpu)
33 |     else:
34 |         world_size = len(current_env['CUDA_VISIBLE_DEVICES'].split(','))
35 | 
36 |     current_env['WORLD_SIZE'] = str(world_size)
37 | 
38 |     print('World size:', world_size)
39 |     # Logic for spawner
40 |     python_exec = sys.executable
41 |     command_args = sys.argv
42 |     main_index = command_args.index('main.py')
43 |     command_args = command_args[main_index+1:]
44 |     print(command_args)
45 |     command_args = [
46 |         python_exec, '-u',
47 |         '-m', 'torch.distributed.launch',
48 |         '--nproc_per_node', str(world_size),
49 |         main_file,
50 |     ] + command_args
51 |     process = subprocess.Popen(command_args, env=current_env)
52 |     process.wait()
53 |     if process.returncode != 0:
54 |         raise subprocess.CalledProcessError(returncode=process.returncode,
55 |                                             cmd=command_args)    
56 |     sys.exit(process.returncode)
57 | 
58 | def _setup_process_group(args):
59 |     local_rank = args.local_rank
60 | 
61 |     torch.cuda.set_device(local_rank)
62 |     torch.distributed.init_process_group(
63 |         'nccl',
64 |         init_method='env://',
65 |         rank=local_rank
66 |     )


--------------------------------------------------------------------------------
/lib/utils/helpers/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/openseg-group/openseg.pytorch/aefc75517b09068d7131a69420bc5f66cb41f0ee/lib/utils/helpers/__init__.py


--------------------------------------------------------------------------------
/lib/utils/helpers/dc_helper.py:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/env python
 2 | # -*- coding:utf-8 -*-
 3 | # Author: Donny You(youansheng@gmail.com)
 4 | 
 5 | 
 6 | from __future__ import absolute_import
 7 | from __future__ import division
 8 | from __future__ import print_function
 9 | 
10 | import itertools
11 | 
12 | from lib.extensions.parallel.data_container import DataContainer
13 | 
14 | 
15 | class DCHelper(object):
16 | 
17 |     @staticmethod
18 |     def tolist(dc):
19 |         return list(itertools.chain(*dc.data))
20 | 
21 |     @staticmethod
22 |     def todc(data_list, gpu_list, cpu_only=False):
23 |         assert len(data_list) % len(gpu_list) == 0
24 |         samples_per_gpu = len(data_list) // len(gpu_list)
25 |         stacked = []
26 |         for i in range(0, len(data_list), samples_per_gpu):
27 |             stacked.append(data_list[i:i + samples_per_gpu])
28 | 
29 |         return DataContainer(stacked, cpu_only=cpu_only)
30 | 


--------------------------------------------------------------------------------
/lib/utils/helpers/file_helper.py:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/env python
 2 | # -*- coding:utf-8 -*-
 3 | # Author: Donny You (youansheng@gmail.com)
 4 | # Repackage some file operations.
 5 | 
 6 | 
 7 | from __future__ import absolute_import
 8 | from __future__ import division
 9 | from __future__ import print_function
10 | 
11 | import os
12 | 
13 | 
14 | class FileHelper(object):
15 | 
16 |     @staticmethod
17 |     def make_dirs(dir_path, is_file=False):
18 |         dir_path = os.path.expanduser(dir_path)
19 |         dir_name = FileHelper.dir_name(dir_path) if is_file else dir_path
20 |         if not os.path.exists(dir_name):
21 |             os.makedirs(dir_name)
22 | 
23 |     @staticmethod
24 |     def dir_name(file_path):
25 |         return os.path.dirname(file_path)
26 | 
27 |     @staticmethod
28 |     def abs_path(file_path):
29 |         return os.path.abspath(file_path)
30 | 
31 |     @staticmethod
32 |     def shotname(file_name):
33 |         shotname, extension = os.path.splitext(file_name)
34 |         return shotname
35 | 
36 |     @staticmethod
37 |     def scandir(dir_path, suffix=None):
38 |         for entry in os.scandir(dir_path):
39 |             if not entry.is_file():
40 |                 continue
41 |             filename = entry.name
42 |             if suffix is None:
43 |                 yield filename
44 |             elif filename.endswith(suffix):
45 |                 yield filename
46 | 
47 |     @staticmethod
48 |     def check_file_exist(filename, msg_tmpl='file "{}" does not exist'):
49 |         if not os.path.isfile(filename):
50 |             raise FileNotFoundError(msg_tmpl.format(filename))
51 | 
52 |     @staticmethod
53 |     def list_dir(dir_name, prefix=''):
54 |         filename_list = list()
55 |         items = os.listdir(os.path.join(dir_name, prefix))
56 |         for item in items:
57 |             fi_d = os.path.join(dir_name, prefix, item)
58 |             if os.path.isdir(fi_d):
59 |                 prefix_temp = '{}/{}'.format(prefix, item).lstrip('/')
60 |                 filename_list += FileHelper.list_dir(dir_name, prefix_temp)
61 |             else:
62 |                 filename_list.append('{}/{}'.format(prefix, item).lstrip('/'))
63 | 
64 |         return filename_list
65 | 
66 | 
67 | if __name__ == "__main__":
68 |     print (FileHelper.list_dir('/home/donny/Projects'))


--------------------------------------------------------------------------------
/lib/utils/helpers/json_helper.py:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/env python
 2 | # -*- coding:utf-8 -*-
 3 | # Author: Donny You (youansheng@gmail.com)
 4 | # Repackage some json operations.
 5 | 
 6 | 
 7 | from __future__ import absolute_import
 8 | from __future__ import division
 9 | from __future__ import print_function
10 | 
11 | import json
12 | import os
13 | 
14 | from lib.utils.tools.logger import Logger as Log
15 | 
16 | 
17 | class JsonHelper(object):
18 | 
19 |     @staticmethod
20 |     def load_file(json_file):
21 |         if not os.path.exists(json_file):
22 |             Log.error('Json file: {} not exists.'.format(json_file))
23 |             exit(1)
24 | 
25 |         with open(json_file, 'r') as read_stream:
26 |             json_dict = json.load(read_stream)
27 | 
28 |         return json_dict
29 | 
30 |     @staticmethod
31 |     def save_file(json_dict, json_file):
32 |         dir_name = os.path.dirname(json_file)
33 |         if not os.path.exists(dir_name):
34 |             Log.info('Json Dir: {} not exists.'.format(dir_name))
35 |             os.makedirs(dir_name)
36 | 
37 |         with open(json_file, 'w') as write_stream:
38 |             write_stream.write(json.dumps(json_dict))
39 | 
40 |     @staticmethod
41 |     def json2xml(json_file, xml_file):
42 |         if not os.path.exists(json_file):
43 |             Log.error('Json file: {} not exists.'.format(json_file))
44 |             exit(1)
45 | 
46 |         xml_dir_name = os.path.dirname(xml_file)
47 |         if not os.path.exists(xml_dir_name):
48 |             Log.info('Xml Dir: {} not exists.'.format(xml_dir_name))
49 |             os.makedirs(xml_dir_name)
50 | 
51 |     @staticmethod
52 |     def xml2json(xml_file, json_file):
53 |         if not os.path.exists(xml_file):
54 |             Log.error('Xml file: {} not exists.'.format(xml_file))
55 |             exit(1)
56 | 
57 |         json_dir_name = os.path.dirname(json_file)
58 |         if not os.path.exists(json_dir_name):
59 |             Log.info('Json Dir: {} not exists.'.format(json_dir_name))
60 |             os.makedirs(json_dir_name)


--------------------------------------------------------------------------------
/lib/utils/tools/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/openseg-group/openseg.pytorch/aefc75517b09068d7131a69420bc5f66cb41f0ee/lib/utils/tools/__init__.py


--------------------------------------------------------------------------------
/lib/utils/tools/average_meter.py:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/env python
 2 | #-*- coding:utf-8 -*-
 3 | # Author: Donny You (youansheng@gmail.com)
 4 | # Utils to store the average and current value.
 5 | 
 6 | 
 7 | from __future__ import absolute_import
 8 | from __future__ import division
 9 | from __future__ import print_function
10 | 
11 | 
12 | class AverageMeter(object):
13 |     """ Computes ans stores the average and current value"""
14 |     def __init__(self):
15 |         self.reset()
16 | 
17 |     def reset(self):
18 |         self.val = 0.
19 |         self.avg = 0.
20 |         self.sum = 0.
21 |         self.count = 0
22 | 
23 |     def update(self, val, n=1):
24 |         self.val = val
25 |         self.sum += val * n
26 |         self.count += n
27 |         self.avg = self.sum / self.count
28 | 


--------------------------------------------------------------------------------
/lib/utils/tools/timer.py:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/env python
 2 | # -*- coding:utf-8 -*-
 3 | # Author: Donny You(youansheng@gmail.com)
 4 | 
 5 | 
 6 | from __future__ import absolute_import
 7 | from __future__ import division
 8 | from __future__ import print_function
 9 | from __future__ import unicode_literals
10 | 
11 | import time
12 | 
13 | 
14 | class Timer(object):
15 |     """A simple timer."""
16 |     def __init__(self):
17 |         self.reset()
18 | 
19 |     def tic(self):
20 |         # using time.time instead of time.clock because time time.clock
21 |         # does not normalize for multithreading
22 |         self.start_time = time.time()
23 | 
24 |     def toc(self, average=True):
25 |         self.diff = time.time() - self.start_time
26 |         self.total_time += self.diff
27 |         self.calls += 1
28 |         self.average_time = self.total_time / self.calls
29 |         if average:
30 |             return self.average_time
31 |         else:
32 |             return self.diff
33 | 
34 |     def reset(self):
35 |         self.total_time = 0.
36 |         self.calls = 0
37 |         self.start_time = 0.
38 |         self.diff = 0.
39 |         self.average_time = 0.


--------------------------------------------------------------------------------
/lib/vis/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/openseg-group/openseg.pytorch/aefc75517b09068d7131a69420bc5f66cb41f0ee/lib/vis/__init__.py


--------------------------------------------------------------------------------
/lib/vis/color150.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/openseg-group/openseg.pytorch/aefc75517b09068d7131a69420bc5f66cb41f0ee/lib/vis/color150.mat


--------------------------------------------------------------------------------
/lib/vis/color60.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/openseg-group/openseg.pytorch/aefc75517b09068d7131a69420bc5f66cb41f0ee/lib/vis/color60.mat


--------------------------------------------------------------------------------
/lib/vis/log_visualizer.py:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/env python
 2 | #-*- coding:utf-8 -*-
 3 | # Author: Donny You(youansheng@gmail.com)
 4 | # Visualize the log files.
 5 | 
 6 | 
 7 | from __future__ import absolute_import
 8 | from __future__ import division
 9 | from __future__ import print_function
10 | 
11 | import re
12 | import numpy as np
13 | import matplotlib.pyplot as plt
14 | 
15 | 
16 | class LogVisualizer(object):
17 | 
18 |     def vis_loss(self, log_file):
19 |         with open(log_file, 'r') as file_stream:
20 |             train_ax = list()
21 |             train_ay = list()
22 |             test_ax = list()
23 |             test_ay = list()
24 |             test_mark = 0
25 | 
26 |             for line in file_stream.readlines():
27 |                 if 'Iteration' in line:
28 |                     m = re.match(r'.*Iteration:(.*)Learning.*', line)
29 |                     iter = int(m.group(1))
30 |                     train_ax.append(iter)
31 |                     test_mark = iter
32 | 
33 |                 elif 'TrainLoss' in line:
34 |                     m = re.match(r'.*TrainLoss = (.*)', line)
35 |                     loss = float(m.group(1))
36 |                     train_ay.append(loss)
37 | 
38 |                 elif 'TestLoss' in line:
39 |                     m = re.match(r'.*TestLoss = (.*)', line)
40 |                     loss = float(m.group(1))
41 |                     test_ax.append(test_mark)
42 |                     test_ay.append(loss)
43 | 
44 |                 else:
45 |                     continue
46 | 
47 |         train_ax = np.array(train_ax)
48 |         train_ay = np.array(train_ay)
49 |         test_ax = np.array(test_ax)
50 |         test_ay = np.array(test_ay)
51 |         plt.plot(train_ax, train_ay, label='Train Loss')
52 |         plt.plot(test_ax, test_ay, label='Test Loss')
53 |         plt.legend()
54 |         plt.show()
55 | 
56 |     def vis_acc(self, log_file):
57 |         with open(log_file, 'r') as file_stream:
58 |             acc_ax = list()
59 |             acc_ay = list()
60 |             test_mark = 0
61 | 
62 |             for line in file_stream.readlines():
63 |                 if 'Iteration' in line and 'Train' in line:
64 |                     m = re.match(r'.*Iteration:(.*)Learning.*', line)
65 |                     iter = int(m.group(1))
66 |                     test_mark = iter
67 | 
68 |                 if 'Accuracy' in line:
69 |                     m = re.match(r'.*Accuracy = (.*)', line)
70 |                     loss = float(m.group(1))
71 |                     acc_ax.append(test_mark)
72 |                     acc_ay.append(loss)
73 | 
74 |                 else:
75 |                     continue
76 | 
77 |         plt.plot(acc_ax, acc_ay, label='Acc')
78 |         plt.legend()
79 |         plt.show()
80 | 
81 | 
82 | if __name__ == "__main__":
83 |     #if len(sys.argv) != 2:
84 |     #    print >> sys.stderr, "Need one args: log_file"
85 |     #    exit(0)
86 | 
87 |     log_visualizer = LogVisualizer()
88 |     log_visualizer.vis_loss('../../log/cls/fc_flower_cls.log')
89 | 


--------------------------------------------------------------------------------
/lib/vis/tensor_visualizer.py:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/env python
 2 | #-*- coding:utf-8 -*-
 3 | # Author: Donny You(youansheng@gmail.com)
 4 | # Visualize the tensor of the computer vision.
 5 | 
 6 | 
 7 | import os
 8 | 
 9 | import cv2
10 | import numpy as np
11 | 
12 | from lib.datasets.tools.transforms import DeNormalize
13 | from lib.utils.tools.logger import Logger as Log
14 | 
15 | TENSOR_DIR = 'vis/results/tensor'
16 | 
17 | 
18 | class TensorVisualizer(object):
19 | 
20 |     def __init__(self, configer):
21 |         self.configer = configer
22 | 
23 |     def vis_tensor(self, tensor, name='default', sub_dir=''):
24 |         base_dir = os.path.join(self.configer.get('project_dir'), TENSOR_DIR, sub_dir)
25 | 
26 |         if not isinstance(tensor, np.ndarray):
27 |             if len(tensor.size()) != 3:
28 |                 Log.error('Tensor size is not valid.')
29 |                 exit(1)
30 | 
31 |             tensor = tensor.data.cpu().numpy().transpose(1, 2, 0)
32 | 
33 |         if not os.path.exists(base_dir):
34 |             Log.error('Dir:{} not exists!'.format(base_dir))
35 |             os.makedirs(base_dir)
36 | 
37 |         tensor_img = cv2.resize(tensor, tuple(self.configer.get('data', 'input_size')))
38 |         cv2.imwrite(tensor_img, os.path.join(base_dir, '{}.jpg'.format(name)))
39 | 
40 |     def vis_img(self, image_in, name='default', sub_dir='images'):
41 |         base_dir = os.path.join(self.configer.get('project_dir'), TENSOR_DIR, sub_dir)
42 | 
43 |         if not isinstance(image_in, np.ndarray):
44 |             if len(image_in.size()) != 3:
45 |                 Log.error('Image size is not valid.')
46 |                 exit(1)
47 | 
48 |             image = DeNormalize(div_value=self.configer.get('normalize', 'div_value'),
49 |                                 mean=self.configer.get('normalize', 'mean'),
50 |                                 std=self.configer.get('normalize', 'std'))(image_in.clone())
51 |             image = image.data.cpu().numpy().transpose(1, 2, 0)
52 |         else:
53 |             image = image_in.copy()
54 | 
55 |         if not os.path.exists(base_dir):
56 |             Log.error('Dir:{} not exists!'.format(base_dir))
57 |             os.makedirs(base_dir)
58 | 
59 |         img = cv2.resize(image, tuple(self.configer.get('data', 'input_size')))
60 |         cv2.imwrite(img, os.path.join(base_dir, '{}.jpg'.format(name)))
61 | 
62 | 
63 | if __name__ == "__main__":
64 |     # Test the visualizer.
65 |     pass


--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
 1 | cython
 2 | numpy
 3 | cffi
 4 | opencv-python
 5 | scipy
 6 | easydict
 7 | matplotlib
 8 | Pillow>=6.2.2
 9 | torchcontrib
10 | yacs
11 | pyyaml
12 | visdom
13 | bs4
14 | html5lib
15 | ninja
16 | torch==0.4.1
17 | torchvision==0.2.1


--------------------------------------------------------------------------------
/scripts/ade20k/hrnet/run_h_48_d_4_train.sh:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/env bash
 2 | SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
 3 | cd $SCRIPTPATH
 4 | cd ../../../
 5 | . config.profile
 6 | 
 7 | # check the enviroment info
 8 | nvidia-smi
 9 | ${PYTHON} -m pip install yacs
10 | 
11 | export PYTHONPATH="$PWD":$PYTHONPATH
12 | 
13 | DATA_DIR="${DATA_ROOT}/ADE20K"
14 | BACKBONE="hrnet48"
15 | CONFIGS="configs/ade20k/H_48_D_4.json"
16 | 
17 | MODEL_NAME="hrnet_w48"
18 | LOSS_TYPE="fs_ce_loss"
19 | CHECKPOINTS_NAME="${MODEL_NAME}_${BACKBONE}_"$2
20 | PRETRAINED_MODEL="./pretrained_model/hrnetv2_w48_imagenet_pretrained.pth"
21 | MAX_ITERS=150000
22 | 
23 | LOG_FILE="./log/ade20k/${CHECKPOINTS_NAME}.log"
24 | echo "Logging to $LOG_FILE"
25 | mkdir -p `dirname $LOG_FILE`
26 | 
27 | if [ "$1"x == "train"x ]; then
28 |   ${PYTHON} -u main.py --configs ${CONFIGS} --drop_last y \
29 |                        --phase train --gathered n --loss_balance y --log_to_file n \
30 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --gpu 0 1 2 3 \
31 |                        --data_dir ${DATA_DIR} --loss_type ${LOSS_TYPE} --max_iters ${MAX_ITERS} \
32 |                        --checkpoints_name ${CHECKPOINTS_NAME} --pretrained ${PRETRAINED_MODEL} \
33 |                        2>&1 | tee ${LOG_FILE}
34 | 
35 | elif [ "$1"x == "resume"x ]; then
36 |   ${PYTHON} -u main.py --configs ${CONFIGS} --drop_last y \
37 |                        --phase train --gathered n --loss_balance y --log_to_file n \
38 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --max_iters ${MAX_ITERS} \
39 |                        --data_dir ${DATA_DIR} --loss_type ${LOSS_TYPE} --gpu 0 1 2 3 \
40 |                        --resume_continue y --resume ./checkpoints/ade20k/${CHECKPOINTS_NAME}_latest.pth \
41 |                        --checkpoints_name ${CHECKPOINTS_NAME} --pretrained ${PRETRAINED_MODEL}  2>&1 | tee -a ${LOG_FILE}
42 | 
43 | elif [ "$1"x == "debug"x ]; then
44 |   ${PYTHON} -u main.py --configs ${CONFIGS} \
45 |                        --phase debug --gpu 0 --log_to_file n 2>&1 | tee ${LOG_FILE}
46 | 
47 | elif [ "$1"x == "val"x ]; then
48 |   ${PYTHON} -u main.py --configs ${CONFIGS} \
49 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --checkpoints_name ${CHECKPOINTS_NAME} \
50 |                        --phase test --gpu 0 --resume ./checkpoints/ade20k/${CHECKPOINTS_NAME}_latest.pth \
51 |                        --test_dir ${DATA_DIR}/val/image --log_to_file n --out_dir val 2>&1 | tee -a ${LOG_FILE}
52 |   cd lib/metrics
53 |   ${PYTHON} -u ade20k_evaluator.py --configs ../../${CONFIGS} \
54 |                                    --pred_dir ../../results/ade20k/test_dir/${CHECKPOINTS_NAME}/val/label \
55 |                                    --gt_dir ${DATA_DIR}/val/label  >> "../../"${LOG_FILE} 2>&1
56 | 
57 | elif [ "$1"x == "test"x ]; then
58 |   ${PYTHON} -u main.py --configs ${CONFIGS} \
59 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --checkpoints_name ${CHECKPOINTS_NAME} \
60 |                        --phase test --gpu 0 --resume ./checkpoints/ade20k/${CHECKPOINTS_NAME}_latest.pth \
61 |                        --test_dir ${DATA_DIR}/test --log_to_file n --out_dir test 2>&1 | tee -a ${LOG_FILE}
62 | 
63 | else
64 |   echo "$1"x" is invalid..."
65 | fi
66 | 


--------------------------------------------------------------------------------
/scripts/ade20k/isa/run_wideb5_isanet_ade20k.sh:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/env bash
 2 | SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
 3 | cd $SCRIPTPATH
 4 | cd ../../../
 5 | . config.profile
 6 | 
 7 | # check the enviroment info
 8 | nvidia-smi
 9 | 
10 | export PYTHONPATH="$PWD":$PYTHONPATH
11 | 
12 | cd ../../
13 | 
14 | DATA_DIR="${DATA_ROOT}/ADE20K"
15 | SAVE_DIR="${DATA_ROOT}/seg_result/ade20k/"
16 | BACKBONE="wide_resnet38_dilated8"
17 | CONFIGS="configs/ade20k/${BACKBONE}.json"
18 | TEST_CONFIGS="configs/ade20k/${BACKBONE}_test.json"
19 | 
20 | MODEL_NAME="wideb5_isanet"
21 | LOSS_TYPE="fs_auxce_loss"
22 | CHECKPOINTS_NAME="${MODEL_NAME}_${BACKBONE}_"$2
23 | PRETRAINED_MODEL="./pretrained_model/wide_resnet38_ipabn_lr_256.pth.tar"
24 | MAX_ITERS=150000
25 | 
26 | LOG_FILE="./log/ade20k/${CHECKPOINTS_NAME}.log"
27 | echo "Logging to $LOG_FILE"
28 | mkdir -p `dirname $LOG_FILE`
29 | 
30 | if [ "$1"x == "train"x ]; then
31 |   ${PYTHON} -u main.py --configs ${CONFIGS} --drop_last y \
32 |                        --phase train --gathered n --loss_balance y --log_to_file n \
33 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --gpu 0 1 2 3 \
34 |                        --data_dir ${DATA_DIR} --loss_type ${LOSS_TYPE} --max_iters ${MAX_ITERS} \
35 |                        --checkpoints_name ${CHECKPOINTS_NAME} --pretrained ${PRETRAINED_MODEL} 2>&1 | tee ${LOG_FILE}
36 | 
37 | elif [ "$1"x == "resume"x ]; then
38 |   ${PYTHON} -u main.py --configs ${CONFIGS} --drop_last y \
39 |                        --phase train --gathered n --loss_balance y --log_to_file n \
40 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --max_iters ${MAX_ITERS} \
41 |                        --data_dir ${DATA_DIR} --loss_type ${LOSS_TYPE} --gpu 0 1 2 3 \
42 |                        --resume_continue y --resume ./checkpoints/ade20k/${CHECKPOINTS_NAME}_latest.pth \
43 |                        --checkpoints_name ${CHECKPOINTS_NAME} --pretrained ${PRETRAINED_MODEL}  2>&1 | tee -a ${LOG_FILE}
44 | 
45 | elif [ "$1"x == "debug"x ]; then
46 |   ${PYTHON} -u main.py --configs ${CONFIGS} \
47 |                        --phase debug --gpu 0 --log_to_file n 2>&1 | tee ${LOG_FILE}
48 | 
49 | elif [ "$1"x == "val"x ]; then
50 |   # ${PYTHON} -u main.py --configs ${TEST_CONFIGS} --data_dir ${DATA_DIR} \
51 |   #                      --backbone ${BACKBONE} --model_name ${MODEL_NAME} --checkpoints_name ${CHECKPOINTS_NAME}_mscrop \
52 |   #                      --phase test --gpu 0 1 2 3 --resume ./checkpoints/ade20k/${CHECKPOINTS_NAME}_latest.pth \
53 |   #                      --test_dir ${DATA_DIR}/val/image --log_to_file n \
54 |   #                      --out_dir ${SAVE_DIR}${CHECKPOINTS_NAME}_val_ms
55 | 
56 |   cd lib/metrics
57 |   ${PYTHON} -u ade20k_evaluator.py --configs ../../${TEST_CONFIGS} \
58 |                                    --pred_dir ${SAVE_DIR}${CHECKPOINTS_NAME}_val_ms \
59 |                                    --gt_dir ${DATA_DIR}/val/label  
60 | 
61 | elif [ "$1"x == "test"x ]; then
62 |   ${PYTHON} -u main.py --configs ${CONFIGS} \
63 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --checkpoints_name ${CHECKPOINTS_NAME} \
64 |                        --phase test --gpu 0 --resume ./checkpoints/ade20k/${CHECKPOINTS_NAME}_latest.pth \
65 |                        --test_dir ${DATA_DIR}/test --log_to_file n --out_dir test 2>&1 | tee -a ${LOG_FILE}
66 | 
67 | else
68 |   echo "$1"x" is invalid..."
69 | fi
70 | 


--------------------------------------------------------------------------------
/scripts/ade20k/ocnet/run_res101d8_aspocnet_ade20k_seg.sh:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/env bash
 2 | SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
 3 | cd $SCRIPTPATH
 4 | cd ../../../
 5 | . config.profile
 6 | 
 7 | # check the enviroment info
 8 | nvidia-smi
 9 | 
10 | export PYTHONPATH="$PWD":$PYTHONPATH
11 | 
12 | cd ../../
13 | 
14 | DATA_DIR="${DATA_ROOT}/ADE20K"
15 | BACKBONE="deepbase_resnet101_dilated8"
16 | CONFIGS="configs/ade20k/${BACKBONE}.json"
17 | 
18 | MODEL_NAME="asp_ocnet"
19 | LOSS_TYPE="fs_auxce_loss"
20 | CHECKPOINTS_NAME="${MODEL_NAME}_${BACKBONE}_"$2
21 | PRETRAINED_MODEL="./pretrained_model/resnet101-imagenet.pth"
22 | MAX_ITERS=150000
23 | 
24 | LOG_FILE="./log/ade20k/${CHECKPOINTS_NAME}.log"
25 | echo "Logging to $LOG_FILE"
26 | mkdir -p `dirname $LOG_FILE`
27 | 
28 | if [ "$1"x == "train"x ]; then
29 |   ${PYTHON} -u main.py --configs ${CONFIGS} --drop_last y \
30 |                        --phase train --gathered n --loss_balance y --log_to_file n \
31 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --gpu 0 1 2 3 \
32 |                        --data_dir ${DATA_DIR} --loss_type ${LOSS_TYPE} --max_iters ${MAX_ITERS} \
33 |                        --checkpoints_name ${CHECKPOINTS_NAME} --pretrained ${PRETRAINED_MODEL} 2>&1 | tee ${LOG_FILE}
34 | 
35 | elif [ "$1"x == "resume"x ]; then
36 |   ${PYTHON} -u main.py --configs ${CONFIGS} --drop_last y \
37 |                        --phase train --gathered n --loss_balance y --log_to_file n \
38 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --max_iters ${MAX_ITERS} \
39 |                        --data_dir ${DATA_DIR} --loss_type ${LOSS_TYPE} --gpu 0 1 2 3 \
40 |                        --resume_continue y --resume ./checkpoints/ade20k/${CHECKPOINTS_NAME}_latest.pth \
41 |                        --checkpoints_name ${CHECKPOINTS_NAME} --pretrained ${PRETRAINED_MODEL}  2>&1 | tee -a ${LOG_FILE}
42 | 
43 | elif [ "$1"x == "debug"x ]; then
44 |   ${PYTHON} -u main.py --configs ${CONFIGS} \
45 |                        --phase debug --gpu 0 --log_to_file n 2>&1 | tee ${LOG_FILE}
46 | 
47 | elif [ "$1"x == "val"x ]; then
48 |   ${PYTHON} -u main.py --configs ${CONFIGS} \
49 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --checkpoints_name ${CHECKPOINTS_NAME} \
50 |                        --phase test --gpu 0 --resume ./checkpoints/ade20k/${CHECKPOINTS_NAME}_latest.pth \
51 |                        --test_dir ${DATA_DIR}/val/image --log_to_file n --out_dir val 2>&1 | tee -a ${LOG_FILE}
52 |   cd lib/metrics
53 |   ${PYTHON} -u ade20k_evaluator.py --configs ../../${CONFIGS} \
54 |                                    --pred_dir ../../results/ade20k/test_dir/${CHECKPOINTS_NAME}/val/label \
55 |                                    --gt_dir ${DATA_DIR}/val/label  >> "../../"${LOG_FILE} 2>&1
56 | 
57 | elif [ "$1"x == "test"x ]; then
58 |   ${PYTHON} -u main.py --configs ${CONFIGS} \
59 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --checkpoints_name ${CHECKPOINTS_NAME} \
60 |                        --phase test --gpu 0 --resume ./checkpoints/ade20k/${CHECKPOINTS_NAME}_latest.pth \
61 |                        --test_dir ${DATA_DIR}/test --log_to_file n --out_dir test 2>&1 | tee -a ${LOG_FILE}
62 | 
63 | else
64 |   echo "$1"x" is invalid..."
65 | fi
66 | 


--------------------------------------------------------------------------------
/scripts/ade20k/ocnet/run_res101d8_aspp_baseocnet_ade20k_seg.sh:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/env bash
 2 | SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
 3 | cd $SCRIPTPATH
 4 | cd ../../../
 5 | . config.profile
 6 | 
 7 | # check the enviroment info
 8 | nvidia-smi
 9 | 
10 | export PYTHONPATH="$PWD":$PYTHONPATH
11 | 
12 | cd ../../
13 | 
14 | DATA_DIR="${DATA_ROOT}/ADE20K"
15 | BACKBONE="deepbase_resnet101_dilated8"
16 | CONFIGS="configs/ade20k/${BACKBONE}.json"
17 | 
18 | MODEL_NAME="aspp_base_ocnet"
19 | LOSS_TYPE="fs_auxce_loss"
20 | CHECKPOINTS_NAME="${MODEL_NAME}_${BACKBONE}_"$2
21 | PRETRAINED_MODEL="./pretrained_model/resnet101-imagenet.pth"
22 | MAX_ITERS=150000
23 | 
24 | LOG_FILE="./log/ade20k/${CHECKPOINTS_NAME}.log"
25 | echo "Logging to $LOG_FILE"
26 | mkdir -p `dirname $LOG_FILE`
27 | 
28 | if [ "$1"x == "train"x ]; then
29 |   ${PYTHON} -u main.py --configs ${CONFIGS} --drop_last y \
30 |                        --phase train --gathered n --loss_balance y --log_to_file n \
31 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --gpu 0 1 2 3 \
32 |                        --data_dir ${DATA_DIR} --loss_type ${LOSS_TYPE} --max_iters ${MAX_ITERS} \
33 |                        --checkpoints_name ${CHECKPOINTS_NAME} --pretrained ${PRETRAINED_MODEL} 2>&1 | tee ${LOG_FILE}
34 | 
35 | elif [ "$1"x == "resume"x ]; then
36 |   ${PYTHON} -u main.py --configs ${CONFIGS} --drop_last y \
37 |                        --phase train --gathered n --loss_balance y --log_to_file n \
38 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --max_iters ${MAX_ITERS} \
39 |                        --data_dir ${DATA_DIR} --loss_type ${LOSS_TYPE} --gpu 0 1 2 3 \
40 |                        --resume_continue y --resume ./checkpoints/ade20k/${CHECKPOINTS_NAME}_latest.pth \
41 |                        --checkpoints_name ${CHECKPOINTS_NAME} --pretrained ${PRETRAINED_MODEL}  2>&1 | tee -a ${LOG_FILE}
42 | 
43 | elif [ "$1"x == "debug"x ]; then
44 |   ${PYTHON} -u main.py --configs ${CONFIGS} \
45 |                        --phase debug --gpu 0 --log_to_file n 2>&1 | tee ${LOG_FILE}
46 | 
47 | elif [ "$1"x == "val"x ]; then
48 |   ${PYTHON} -u main.py --configs ${CONFIGS} \
49 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --checkpoints_name ${CHECKPOINTS_NAME} \
50 |                        --phase test --gpu 0 --resume ./checkpoints/ade20k/${CHECKPOINTS_NAME}_latest.pth \
51 |                        --test_dir ${DATA_DIR}/val/image --log_to_file n --out_dir val 2>&1 | tee -a ${LOG_FILE}
52 |   cd lib/metrics
53 |   ${PYTHON} -u ade20k_evaluator.py --configs ../../${CONFIGS} \
54 |                                    --pred_dir ../../results/ade20k/test_dir/${CHECKPOINTS_NAME}/val/label \
55 |                                    --gt_dir ${DATA_DIR}/val/label  >> "../../"${LOG_FILE} 2>&1
56 | 
57 | elif [ "$1"x == "test"x ]; then
58 |   ${PYTHON} -u main.py --configs ${CONFIGS} \
59 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --checkpoints_name ${CHECKPOINTS_NAME} \
60 |                        --phase test --gpu 0 --resume ./checkpoints/ade20k/${CHECKPOINTS_NAME}_latest.pth \
61 |                        --test_dir ${DATA_DIR}/test --log_to_file n --out_dir test 2>&1 | tee -a ${LOG_FILE}
62 | 
63 | else
64 |   echo "$1"x" is invalid..."
65 | fi
66 | 


--------------------------------------------------------------------------------
/scripts/ade20k/ocnet/run_res101d8_baseocnet_ade20k_seg.sh:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/env bash
 2 | SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
 3 | cd $SCRIPTPATH
 4 | cd ../../../
 5 | . config.profile
 6 | 
 7 | # check the enviroment info
 8 | nvidia-smi
 9 | 
10 | export PYTHONPATH="$PWD":$PYTHONPATH
11 | 
12 | cd ../../
13 | 
14 | DATA_DIR="${DATA_ROOT}/ADE20K"
15 | SAVE_DIR="${DATA_ROOT}/seg_result/ade20k/"
16 | BACKBONE="deepbase_resnet101_dilated8"
17 | CONFIGS="configs/ade20k/${BACKBONE}.json"
18 | TEST_CONFIGS="configs/ade20k/${BACKBONE}_test.json"
19 | 
20 | MODEL_NAME="base_ocnet"
21 | LOSS_TYPE="fs_auxce_loss"
22 | CHECKPOINTS_NAME="${MODEL_NAME}_${BACKBONE}_"$2
23 | PRETRAINED_MODEL="./pretrained_model/resnet101-imagenet.pth"
24 | MAX_ITERS=150000
25 | 
26 | LOG_FILE="./log/ade20k/${CHECKPOINTS_NAME}.log"
27 | echo "Logging to $LOG_FILE"
28 | mkdir -p `dirname $LOG_FILE`
29 | 
30 | if [ "$1"x == "train"x ]; then
31 |   ${PYTHON} -u main.py --configs ${CONFIGS} --drop_last y \
32 |                        --phase train --gathered n --loss_balance y --log_to_file n \
33 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --gpu 0 1 2 3 \
34 |                        --data_dir ${DATA_DIR} --loss_type ${LOSS_TYPE} --max_iters ${MAX_ITERS} \
35 |                        --checkpoints_name ${CHECKPOINTS_NAME} --pretrained ${PRETRAINED_MODEL} 2>&1 | tee ${LOG_FILE}
36 | 
37 | elif [ "$1"x == "resume"x ]; then
38 |   ${PYTHON} -u main.py --configs ${CONFIGS} --drop_last y \
39 |                        --phase train --gathered n --loss_balance y --log_to_file n \
40 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --max_iters ${MAX_ITERS} \
41 |                        --data_dir ${DATA_DIR} --loss_type ${LOSS_TYPE} --gpu 0 1 2 3 \
42 |                        --resume_continue y --resume ./checkpoints/ade20k/${CHECKPOINTS_NAME}_latest.pth \
43 |                        --checkpoints_name ${CHECKPOINTS_NAME} --pretrained ${PRETRAINED_MODEL}  2>&1 | tee -a ${LOG_FILE}
44 | 
45 | elif [ "$1"x == "debug"x ]; then
46 |   ${PYTHON} -u main.py --configs ${CONFIGS} \
47 |                        --phase debug --gpu 0 --log_to_file n 2>&1 | tee ${LOG_FILE}
48 | 
49 | elif [ "$1"x == "val"x ]; then
50 |   # ${PYTHON} -u main.py --configs ${TEST_CONFIGS} --data_dir ${DATA_DIR} \
51 |   #                      --backbone ${BACKBONE} --model_name ${MODEL_NAME} --checkpoints_name ${CHECKPOINTS_NAME}_mscrop \
52 |   #                      --phase test --gpu 0 1 2 3 --resume ./checkpoints/ade20k/${CHECKPOINTS_NAME}_latest.pth \
53 |   #                      --test_dir ${DATA_DIR}/val/image --log_to_file n \
54 |   #                      --out_dir ${SAVE_DIR}${CHECKPOINTS_NAME}_val_ms
55 | 
56 |   cd lib/metrics
57 |   ${PYTHON} -u ade20k_evaluator.py --configs ../../${TEST_CONFIGS} \
58 |                                    --pred_dir ${SAVE_DIR}${CHECKPOINTS_NAME}_val_ms \
59 |                                    --gt_dir ${DATA_DIR}/val/label  
60 | 
61 | elif [ "$1"x == "test"x ]; then
62 |   ${PYTHON} -u main.py --configs ${CONFIGS} \
63 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --checkpoints_name ${CHECKPOINTS_NAME} \
64 |                        --phase test --gpu 0 --resume ./checkpoints/ade20k/${CHECKPOINTS_NAME}_latest.pth \
65 |                        --test_dir ${DATA_DIR}/test --log_to_file n --out_dir test 2>&1 | tee -a ${LOG_FILE}
66 | 
67 | else
68 |   echo "$1"x" is invalid..."
69 | fi
70 | 


--------------------------------------------------------------------------------
/scripts/ade20k/ocnet/run_res50d8_aspocnet_ade20k_seg.sh:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/env bash
 2 | SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
 3 | cd $SCRIPTPATH
 4 | cd ../../../
 5 | . config.profile
 6 | 
 7 | # check the enviroment info
 8 | nvidia-smi
 9 | 
10 | export PYTHONPATH="$PWD":$PYTHONPATH
11 | 
12 | cd ../../
13 | 
14 | DATA_DIR="${DATA_ROOT}/ADE20K"
15 | BACKBONE="deepbase_resnet50_dilated8"
16 | CONFIGS="configs/ade20k/${BACKBONE}.json"
17 | 
18 | MODEL_NAME="asp_ocnet"
19 | LOSS_TYPE="fs_auxce_loss"
20 | CHECKPOINTS_NAME="${MODEL_NAME}_${BACKBONE}_"$2
21 | PRETRAINED_MODEL="./pretrained_model/resnet50-imagenet.pth"
22 | MAX_ITERS=150000
23 | 
24 | LOG_FILE="./log/ade20k/${CHECKPOINTS_NAME}.log"
25 | echo "Logging to $LOG_FILE"
26 | mkdir -p `dirname $LOG_FILE`
27 | 
28 | if [ "$1"x == "train"x ]; then
29 |   ${PYTHON} -u main.py --configs ${CONFIGS} --drop_last y \
30 |                        --phase train --gathered n --loss_balance y --log_to_file n \
31 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --gpu 0 1 2 3 \
32 |                        --data_dir ${DATA_DIR} --loss_type ${LOSS_TYPE} --max_iters ${MAX_ITERS} \
33 |                        --checkpoints_name ${CHECKPOINTS_NAME} --pretrained ${PRETRAINED_MODEL} 2>&1 | tee ${LOG_FILE}
34 | 
35 | elif [ "$1"x == "resume"x ]; then
36 |   ${PYTHON} -u main.py --configs ${CONFIGS} --drop_last y \
37 |                        --phase train --gathered n --loss_balance y --log_to_file n \
38 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --max_iters ${MAX_ITERS} \
39 |                        --data_dir ${DATA_DIR} --loss_type ${LOSS_TYPE} --gpu 0 1 2 3 \
40 |                        --resume_continue y --resume ./checkpoints/ade20k/${CHECKPOINTS_NAME}_latest.pth \
41 |                        --checkpoints_name ${CHECKPOINTS_NAME} --pretrained ${PRETRAINED_MODEL}  2>&1 | tee -a ${LOG_FILE}
42 | 
43 | elif [ "$1"x == "debug"x ]; then
44 |   ${PYTHON} -u main.py --configs ${CONFIGS} \
45 |                        --phase debug --gpu 0 --log_to_file n 2>&1 | tee ${LOG_FILE}
46 | 
47 | elif [ "$1"x == "val"x ]; then
48 |   ${PYTHON} -u main.py --configs ${CONFIGS} \
49 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --checkpoints_name ${CHECKPOINTS_NAME} \
50 |                        --phase test --gpu 0 --resume ./checkpoints/ade20k/${CHECKPOINTS_NAME}_latest.pth \
51 |                        --test_dir ${DATA_DIR}/val/image --log_to_file n --out_dir val 2>&1 | tee -a ${LOG_FILE}
52 |   cd lib/metrics
53 |   ${PYTHON} -u ade20k_evaluator.py --configs ../../${CONFIGS} \
54 |                                    --pred_dir ../../results/ade20k/test_dir/${CHECKPOINTS_NAME}/val/label \
55 |                                    --gt_dir ${DATA_DIR}/val/label  >> "../../"${LOG_FILE} 2>&1
56 | 
57 | elif [ "$1"x == "test"x ]; then
58 |   ${PYTHON} -u main.py --configs ${CONFIGS} \
59 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --checkpoints_name ${CHECKPOINTS_NAME} \
60 |                        --phase test --gpu 0 --resume ./checkpoints/ade20k/${CHECKPOINTS_NAME}_latest.pth \
61 |                        --test_dir ${DATA_DIR}/test --log_to_file n --out_dir test 2>&1 | tee -a ${LOG_FILE}
62 | 
63 | else
64 |   echo "$1"x" is invalid..."
65 | fi
66 | 


--------------------------------------------------------------------------------
/scripts/ade20k/ocnet/run_res50d8_aspp_baseocnet_ade20k_seg.sh:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/env bash
 2 | SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
 3 | cd $SCRIPTPATH
 4 | cd ../../../
 5 | . config.profile
 6 | 
 7 | # check the enviroment info
 8 | nvidia-smi
 9 | 
10 | export PYTHONPATH="$PWD":$PYTHONPATH
11 | 
12 | cd ../../
13 | 
14 | DATA_DIR="${DATA_ROOT}/ADE20K"
15 | BACKBONE="deepbase_resnet50_dilated8"
16 | CONFIGS="configs/ade20k/${BACKBONE}.json"
17 | 
18 | MODEL_NAME="aspp_base_ocnet"
19 | LOSS_TYPE="fs_auxce_loss"
20 | CHECKPOINTS_NAME="${MODEL_NAME}_${BACKBONE}_"$2
21 | PRETRAINED_MODEL="./pretrained_model/resnet50-imagenet.pth"
22 | MAX_ITERS=150000
23 | 
24 | LOG_FILE="./log/ade20k/${CHECKPOINTS_NAME}.log"
25 | echo "Logging to $LOG_FILE"
26 | mkdir -p `dirname $LOG_FILE`
27 | 
28 | if [ "$1"x == "train"x ]; then
29 |   ${PYTHON} -u main.py --configs ${CONFIGS} --drop_last y \
30 |                        --phase train --gathered n --loss_balance y --log_to_file n \
31 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --gpu 0 1 2 3 \
32 |                        --data_dir ${DATA_DIR} --loss_type ${LOSS_TYPE} --max_iters ${MAX_ITERS} \
33 |                        --checkpoints_name ${CHECKPOINTS_NAME} --pretrained ${PRETRAINED_MODEL} 2>&1 | tee ${LOG_FILE}
34 | 
35 | elif [ "$1"x == "resume"x ]; then
36 |   ${PYTHON} -u main.py --configs ${CONFIGS} --drop_last y \
37 |                        --phase train --gathered n --loss_balance y --log_to_file n \
38 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --max_iters ${MAX_ITERS} \
39 |                        --data_dir ${DATA_DIR} --loss_type ${LOSS_TYPE} --gpu 0 1 2 3 \
40 |                        --resume_continue y --resume ./checkpoints/ade20k/${CHECKPOINTS_NAME}_latest.pth \
41 |                        --checkpoints_name ${CHECKPOINTS_NAME} --pretrained ${PRETRAINED_MODEL}  2>&1 | tee -a ${LOG_FILE}
42 | 
43 | elif [ "$1"x == "debug"x ]; then
44 |   ${PYTHON} -u main.py --configs ${CONFIGS} \
45 |                        --phase debug --gpu 0 --log_to_file n 2>&1 | tee ${LOG_FILE}
46 | 
47 | elif [ "$1"x == "val"x ]; then
48 |   ${PYTHON} -u main.py --configs ${CONFIGS} \
49 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --checkpoints_name ${CHECKPOINTS_NAME} \
50 |                        --phase test --gpu 0 --resume ./checkpoints/ade20k/${CHECKPOINTS_NAME}_latest.pth \
51 |                        --test_dir ${DATA_DIR}/val/image --log_to_file n --out_dir val 2>&1 | tee -a ${LOG_FILE}
52 |   cd lib/metrics
53 |   ${PYTHON} -u ade20k_evaluator.py --configs ../../${CONFIGS} \
54 |                                    --pred_dir ../../results/ade20k/test_dir/${CHECKPOINTS_NAME}/val/label \
55 |                                    --gt_dir ${DATA_DIR}/val/label  >> "../../"${LOG_FILE} 2>&1
56 | 
57 | elif [ "$1"x == "test"x ]; then
58 |   ${PYTHON} -u main.py --configs ${CONFIGS} \
59 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --checkpoints_name ${CHECKPOINTS_NAME} \
60 |                        --phase test --gpu 0 --resume ./checkpoints/ade20k/${CHECKPOINTS_NAME}_latest.pth \
61 |                        --test_dir ${DATA_DIR}/test --log_to_file n --out_dir test 2>&1 | tee -a ${LOG_FILE}
62 | 
63 | else
64 |   echo "$1"x" is invalid..."
65 | fi
66 | 


--------------------------------------------------------------------------------
/scripts/ade20k/ocnet/run_res50d8_baseocnet_ade20k_seg.sh:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/env bash
 2 | SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
 3 | cd $SCRIPTPATH
 4 | cd ../../../
 5 | . config.profile
 6 | 
 7 | # check the enviroment info
 8 | nvidia-smi
 9 | 
10 | export PYTHONPATH="$PWD":$PYTHONPATH
11 | 
12 | cd ../../
13 | 
14 | DATA_DIR="${DATA_ROOT}/ADE20K"
15 | BACKBONE="deepbase_resnet50_dilated8"
16 | CONFIGS="configs/ade20k/${BACKBONE}.json"
17 | 
18 | MODEL_NAME="base_ocnet"
19 | LOSS_TYPE="fs_auxce_loss"
20 | CHECKPOINTS_NAME="${MODEL_NAME}_${BACKBONE}_"$2
21 | PRETRAINED_MODEL="./pretrained_model/resnet50-imagenet.pth"
22 | MAX_ITERS=150000
23 | 
24 | LOG_FILE="./log/ade20k/${CHECKPOINTS_NAME}.log"
25 | echo "Logging to $LOG_FILE"
26 | mkdir -p `dirname $LOG_FILE`
27 | 
28 | if [ "$1"x == "train"x ]; then
29 |   ${PYTHON} -u main.py --configs ${CONFIGS} --drop_last y \
30 |                        --phase train --gathered n --loss_balance y --log_to_file n \
31 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --gpu 0 1 2 3 \
32 |                        --data_dir ${DATA_DIR} --loss_type ${LOSS_TYPE} --max_iters ${MAX_ITERS} \
33 |                        --checkpoints_name ${CHECKPOINTS_NAME} --pretrained ${PRETRAINED_MODEL} 2>&1 | tee ${LOG_FILE}
34 | 
35 | elif [ "$1"x == "resume"x ]; then
36 |   ${PYTHON} -u main.py --configs ${CONFIGS} --drop_last y \
37 |                        --phase train --gathered n --loss_balance y --log_to_file n \
38 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --max_iters ${MAX_ITERS} \
39 |                        --data_dir ${DATA_DIR} --loss_type ${LOSS_TYPE} --gpu 0 1 2 3 \
40 |                        --resume_continue y --resume ./checkpoints/ade20k/${CHECKPOINTS_NAME}_latest.pth \
41 |                        --checkpoints_name ${CHECKPOINTS_NAME} --pretrained ${PRETRAINED_MODEL}  2>&1 | tee -a ${LOG_FILE}
42 | 
43 | elif [ "$1"x == "debug"x ]; then
44 |   ${PYTHON} -u main.py --configs ${CONFIGS} \
45 |                        --phase debug --gpu 0 --log_to_file n 2>&1 | tee ${LOG_FILE}
46 | 
47 | elif [ "$1"x == "val"x ]; then
48 |   ${PYTHON} -u main.py --configs ${CONFIGS} \
49 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --checkpoints_name ${CHECKPOINTS_NAME} \
50 |                        --phase test --gpu 0 --resume ./checkpoints/ade20k/${CHECKPOINTS_NAME}_latest.pth \
51 |                        --test_dir ${DATA_DIR}/val/image --log_to_file n --out_dir val 2>&1 | tee -a ${LOG_FILE}
52 |   cd lib/metrics
53 |   ${PYTHON} -u ade20k_evaluator.py --configs ../../${CONFIGS} \
54 |                                    --pred_dir ../../results/ade20k/test_dir/${CHECKPOINTS_NAME}/val/label \
55 |                                    --gt_dir ${DATA_DIR}/val/label  >> "../../"${LOG_FILE} 2>&1
56 | 
57 | elif [ "$1"x == "test"x ]; then
58 |   ${PYTHON} -u main.py --configs ${CONFIGS} \
59 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --checkpoints_name ${CHECKPOINTS_NAME} \
60 |                        --phase test --gpu 0 --resume ./checkpoints/ade20k/${CHECKPOINTS_NAME}_latest.pth \
61 |                        --test_dir ${DATA_DIR}/test --log_to_file n --out_dir test 2>&1 | tee -a ${LOG_FILE}
62 | 
63 | else
64 |   echo "$1"x" is invalid..."
65 | fi
66 | 


--------------------------------------------------------------------------------
/scripts/ade20k/ocrnet/run_res101d8_fastaspocnet_ade20k_seg_test.sh:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/env bash
 2 | SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
 3 | cd $SCRIPTPATH
 4 | cd ../../../
 5 | . config.profile
 6 | 
 7 | # check the enviroment info
 8 | nvidia-smi
 9 | 
10 | export PYTHONPATH="$PWD":$PYTHONPATH
11 | 
12 | cd ../../
13 | 
14 | DATA_DIR="${DATA_ROOT}/ADE20K"
15 | SAVE_DIR="${DATA_ROOT}/seg_result/ade20k/"
16 | BACKBONE="deepbase_resnet101_dilated8"
17 | CONFIGS="configs/ade20k/${BACKBONE}_test.json"
18 | 
19 | MODEL_NAME="fast_asp_ocnet"
20 | LOSS_TYPE="fs_auxce_loss"
21 | CHECKPOINTS_NAME="${MODEL_NAME}_${BACKBONE}_"$2
22 | PRETRAINED_MODEL="./pretrained_model/resnet101-imagenet.pth"
23 | MAX_ITERS=150000
24 | 
25 | LOG_FILE="./log/ade20k/${CHECKPOINTS_NAME}.log"
26 | echo "Logging to $LOG_FILE"
27 | mkdir -p `dirname $LOG_FILE`
28 | 
29 | 
30 | if [ "$1"x == "val"x ]; then
31 |   ${PYTHON} -u main.py --configs ${CONFIGS} --data_dir ${DATA_DIR} \
32 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --checkpoints_name ${CHECKPOINTS_NAME}_mscrop \
33 |                        --phase test --gpu 0 1 2 3 --resume ./checkpoints/ade20k/${CHECKPOINTS_NAME}_latest.pth \
34 |                        --test_dir ${DATA_DIR}/val/image --log_to_file n \
35 |                        --out_dir ${SAVE_DIR}${CHECKPOINTS_NAME}_val_ms
36 | 
37 |   cd lib/metrics
38 |   ${PYTHON} -u ade20k_evaluator.py --configs ../../${CONFIGS} \
39 |                                    --pred_dir ${SAVE_DIR}${CHECKPOINTS_NAME}_val_ms \
40 |                                    --gt_dir ${DATA_DIR}/val/label  
41 | 
42 | 
43 | elif [ "$1"x == "test"x ]; then
44 |   ${PYTHON} -u main.py --configs ${CONFIGS} \
45 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --checkpoints_name ${CHECKPOINTS_NAME} \
46 |                        --phase test --gpu 0 --resume ./checkpoints/ade20k/${CHECKPOINTS_NAME}_latest.pth \
47 |                        --test_dir ${DATA_DIR}/test --log_to_file n --out_dir test 
48 |                        # 2>&1 | tee -a ${LOG_FILE}
49 | 
50 | else
51 |   echo "$1"x" is invalid..."
52 | fi


--------------------------------------------------------------------------------
/scripts/ade20k/ocrnet/run_res101d8_fastbaseocnet_ade20k_seg.sh:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/env bash
 2 | SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
 3 | cd $SCRIPTPATH
 4 | cd ../../../
 5 | . config.profile
 6 | 
 7 | # check the enviroment info
 8 | nvidia-smi
 9 | 
10 | export PYTHONPATH="$PWD":$PYTHONPATH
11 | 
12 | cd ../../
13 | 
14 | DATA_DIR="${DATA_ROOT}/ADE20K"
15 | SAVE_DIR="${DATA_ROOT}/seg_result/ade20k/"
16 | BACKBONE="deepbase_resnet101_dilated8"
17 | CONFIGS="configs/ade20k/${BACKBONE}.json"
18 | CONFIGS="configs/ade20k/${BACKBONE}_test.json"
19 | 
20 | MODEL_NAME="fast_base_ocnet"
21 | LOSS_TYPE="fs_auxce_loss"
22 | CHECKPOINTS_NAME="${MODEL_NAME}_${BACKBONE}_"$2
23 | PRETRAINED_MODEL="./pretrained_model/resnet101-imagenet.pth"
24 | MAX_ITERS=150000
25 | 
26 | LOG_FILE="./log/ade20k/${CHECKPOINTS_NAME}.log"
27 | echo "Logging to $LOG_FILE"
28 | mkdir -p `dirname $LOG_FILE`
29 | 
30 | if [ "$1"x == "train"x ]; then
31 |   ${PYTHON} -u main.py --configs ${CONFIGS} --drop_last y \
32 |                        --phase train --gathered n --loss_balance y --log_to_file n \
33 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --gpu 0 1 2 3 \
34 |                        --data_dir ${DATA_DIR} --loss_type ${LOSS_TYPE} --max_iters ${MAX_ITERS} \
35 |                        --checkpoints_name ${CHECKPOINTS_NAME} --pretrained ${PRETRAINED_MODEL} 2>&1 | tee ${LOG_FILE}
36 | 
37 | elif [ "$1"x == "resume"x ]; then
38 |   ${PYTHON} -u main.py --configs ${CONFIGS} --drop_last y \
39 |                        --phase train --gathered n --loss_balance y --log_to_file n \
40 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --max_iters ${MAX_ITERS} \
41 |                        --data_dir ${DATA_DIR} --loss_type ${LOSS_TYPE} --gpu 0 1 2 3 \
42 |                        --resume_continue y --resume ./checkpoints/ade20k/${CHECKPOINTS_NAME}_latest.pth \
43 |                        --checkpoints_name ${CHECKPOINTS_NAME} --pretrained ${PRETRAINED_MODEL}  2>&1 | tee -a ${LOG_FILE}
44 | 
45 | elif [ "$1"x == "debug"x ]; then
46 |   ${PYTHON} -u main.py --configs ${CONFIGS} \
47 |                        --phase debug --gpu 0 --log_to_file n 2>&1 | tee ${LOG_FILE}
48 | 
49 | 
50 | elif [ "$1"x == "val"x ]; then
51 |   ${PYTHON} -u main.py --configs ${CONFIGS} --data_dir ${DATA_DIR} \
52 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --checkpoints_name ${CHECKPOINTS_NAME}_mscrop \
53 |                        --phase test --gpu 0 1 2 3 --resume ./checkpoints/ade20k/${CHECKPOINTS_NAME}_latest.pth \
54 |                        --test_dir ${DATA_DIR}/val/image --log_to_file n \
55 |                        --out_dir ${SAVE_DIR}${CHECKPOINTS_NAME}_val_ms_flip
56 | 
57 |   cd lib/metrics
58 |   ${PYTHON} -u ade20k_evaluator.py --configs ../../${CONFIGS} \
59 |                                    --pred_dir ${SAVE_DIR}${CHECKPOINTS_NAME}_val_ms_flip/label \
60 |                                    --gt_dir ${DATA_DIR}/val/label  
61 | 
62 | elif [ "$1"x == "test"x ]; then
63 |   ${PYTHON} -u main.py --configs ${CONFIGS} \
64 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --checkpoints_name ${CHECKPOINTS_NAME} \
65 |                        --phase test --gpu 0 --resume ./checkpoints/ade20k/${CHECKPOINTS_NAME}_latest.pth \
66 |                        --test_dir ${DATA_DIR}/test --log_to_file n --out_dir test 2>&1 | tee -a ${LOG_FILE}
67 | 
68 | else
69 |   echo "$1"x" is invalid..."
70 | fi
71 | 


--------------------------------------------------------------------------------
/scripts/ade20k/ocrnet/run_res101d8_fastbaseocnet_ade20k_seg_ohem.sh:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/env bash
 2 | SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
 3 | cd $SCRIPTPATH
 4 | cd ../../../
 5 | . config.profile
 6 | 
 7 | # check the enviroment info
 8 | nvidia-smi
 9 | 
10 | export PYTHONPATH="$PWD":$PYTHONPATH
11 | 
12 | cd ../../
13 | 
14 | DATA_DIR="${DATA_ROOT}/ADE20K"
15 | BACKBONE="deepbase_resnet101_dilated8"
16 | CONFIGS="configs/ade20k/${BACKBONE}_ohem.json"
17 | 
18 | MODEL_NAME="fast_base_ocnet"
19 | LOSS_TYPE="fs_auxohemce_loss"
20 | CHECKPOINTS_NAME="${MODEL_NAME}_${BACKBONE}_ohem_"$2
21 | PRETRAINED_MODEL="./pretrained_model/resnet101-imagenet.pth"
22 | MAX_ITERS=150000
23 | 
24 | LOG_FILE="./log/ade20k/${CHECKPOINTS_NAME}.log"
25 | echo "Logging to $LOG_FILE"
26 | mkdir -p `dirname $LOG_FILE`
27 | 
28 | if [ "$1"x == "train"x ]; then
29 |   ${PYTHON} -u main.py --configs ${CONFIGS} --drop_last y \
30 |                        --phase train --gathered n --loss_balance y --log_to_file n \
31 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --gpu 0 1 2 3 \
32 |                        --data_dir ${DATA_DIR} --loss_type ${LOSS_TYPE} --max_iters ${MAX_ITERS} \
33 |                        --checkpoints_name ${CHECKPOINTS_NAME} --pretrained ${PRETRAINED_MODEL} 2>&1 | tee ${LOG_FILE}
34 | 
35 | elif [ "$1"x == "resume"x ]; then
36 |   ${PYTHON} -u main.py --configs ${CONFIGS} --drop_last y \
37 |                        --phase train --gathered n --loss_balance y --log_to_file n \
38 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --max_iters ${MAX_ITERS} \
39 |                        --data_dir ${DATA_DIR} --loss_type ${LOSS_TYPE} --gpu 0 1 2 3 \
40 |                        --resume_continue y --resume ./checkpoints/ade20k/${CHECKPOINTS_NAME}_latest.pth \
41 |                        --checkpoints_name ${CHECKPOINTS_NAME} --pretrained ${PRETRAINED_MODEL}  2>&1 | tee -a ${LOG_FILE}
42 | 
43 | elif [ "$1"x == "debug"x ]; then
44 |   ${PYTHON} -u main.py --configs ${CONFIGS} \
45 |                        --phase debug --gpu 0 --log_to_file n 2>&1 | tee ${LOG_FILE}
46 | 
47 | elif [ "$1"x == "val"x ]; then
48 |   ${PYTHON} -u main.py --configs ${CONFIGS} \
49 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --checkpoints_name ${CHECKPOINTS_NAME} \
50 |                        --phase test --gpu 0 --resume ./checkpoints/ade20k/${CHECKPOINTS_NAME}_latest.pth \
51 |                        --test_dir ${DATA_DIR}/val/image --log_to_file n --out_dir val 2>&1 | tee -a ${LOG_FILE}
52 |   cd lib/metrics
53 |   ${PYTHON} -u ade20k_evaluator.py --configs ../../${CONFIGS} \
54 |                                    --pred_dir ../../results/ade20k/test_dir/${CHECKPOINTS_NAME}/val/label \
55 |                                    --gt_dir ${DATA_DIR}/val/label  >> "../../"${LOG_FILE} 2>&1
56 | 
57 | elif [ "$1"x == "test"x ]; then
58 |   ${PYTHON} -u main.py --configs ${CONFIGS} \
59 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --checkpoints_name ${CHECKPOINTS_NAME} \
60 |                        --phase test --gpu 0 --resume ./checkpoints/ade20k/${CHECKPOINTS_NAME}_latest.pth \
61 |                        --test_dir ${DATA_DIR}/test --log_to_file n --out_dir test 2>&1 | tee -a ${LOG_FILE}
62 | 
63 | else
64 |   echo "$1"x" is invalid..."
65 | fi
66 | 


--------------------------------------------------------------------------------
/scripts/ade20k/ocrnet/run_res50d8_fastaspocnet_ade20k_seg.sh:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/env bash
 2 | SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
 3 | cd $SCRIPTPATH
 4 | cd ../../../
 5 | . config.profile
 6 | 
 7 | # check the enviroment info
 8 | nvidia-smi
 9 | 
10 | export PYTHONPATH="$PWD":$PYTHONPATH
11 | 
12 | cd ../../
13 | 
14 | DATA_DIR="${DATA_ROOT}/ADE20K"
15 | BACKBONE="deepbase_resnet50_dilated8"
16 | CONFIGS="configs/ade20k/${BACKBONE}.json"
17 | 
18 | MODEL_NAME="fast_asp_ocnet"
19 | LOSS_TYPE="fs_auxce_loss"
20 | CHECKPOINTS_NAME="${MODEL_NAME}_${BACKBONE}_"$2
21 | PRETRAINED_MODEL="./pretrained_model/resnet50-imagenet.pth"
22 | MAX_ITERS=150000
23 | 
24 | LOG_FILE="./log/ade20k/${CHECKPOINTS_NAME}.log"
25 | echo "Logging to $LOG_FILE"
26 | mkdir -p `dirname $LOG_FILE`
27 | 
28 | if [ "$1"x == "train"x ]; then
29 |   ${PYTHON} -u main.py --configs ${CONFIGS} --drop_last y \
30 |                        --phase train --gathered n --loss_balance y --log_to_file n \
31 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --gpu 0 1 2 3 \
32 |                        --data_dir ${DATA_DIR} --loss_type ${LOSS_TYPE} --max_iters ${MAX_ITERS} \
33 |                        --checkpoints_name ${CHECKPOINTS_NAME} --pretrained ${PRETRAINED_MODEL} 2>&1 | tee ${LOG_FILE}
34 | 
35 | elif [ "$1"x == "resume"x ]; then
36 |   ${PYTHON} -u main.py --configs ${CONFIGS} --drop_last y \
37 |                        --phase train --gathered n --loss_balance y --log_to_file n \
38 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --max_iters ${MAX_ITERS} \
39 |                        --data_dir ${DATA_DIR} --loss_type ${LOSS_TYPE} --gpu 0 1 2 3 \
40 |                        --resume_continue y --resume ./checkpoints/ade20k/${CHECKPOINTS_NAME}_latest.pth \
41 |                        --checkpoints_name ${CHECKPOINTS_NAME} --pretrained ${PRETRAINED_MODEL}  2>&1 | tee -a ${LOG_FILE}
42 | 
43 | elif [ "$1"x == "debug"x ]; then
44 |   ${PYTHON} -u main.py --configs ${CONFIGS} \
45 |                        --phase debug --gpu 0 --log_to_file n 2>&1 | tee ${LOG_FILE}
46 | 
47 | elif [ "$1"x == "val"x ]; then
48 |   ${PYTHON} -u main.py --configs ${CONFIGS} \
49 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --checkpoints_name ${CHECKPOINTS_NAME} \
50 |                        --phase test --gpu 0 --resume ./checkpoints/ade20k/${CHECKPOINTS_NAME}_latest.pth \
51 |                        --test_dir ${DATA_DIR}/val/image --log_to_file n --out_dir val 2>&1 | tee -a ${LOG_FILE}
52 |   cd lib/metrics
53 |   ${PYTHON} -u ade20k_evaluator.py --configs ../../${CONFIGS} \
54 |                                    --pred_dir ../../results/ade20k/test_dir/${CHECKPOINTS_NAME}/val/label \
55 |                                    --gt_dir ${DATA_DIR}/val/label  >> "../../"${LOG_FILE} 2>&1
56 | 
57 | elif [ "$1"x == "test"x ]; then
58 |   ${PYTHON} -u main.py --configs ${CONFIGS} \
59 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --checkpoints_name ${CHECKPOINTS_NAME} \
60 |                        --phase test --gpu 0 --resume ./checkpoints/ade20k/${CHECKPOINTS_NAME}_latest.pth \
61 |                        --test_dir ${DATA_DIR}/test --log_to_file n --out_dir test 2>&1 | tee -a ${LOG_FILE}
62 | 
63 | else
64 |   echo "$1"x" is invalid..."
65 | fi
66 | 


--------------------------------------------------------------------------------
/scripts/ade20k/ocrnet/run_res50d8_ideal_ocr_ade20k.sh:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/env bash
 2 | SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
 3 | cd $SCRIPTPATH
 4 | cd ../../../
 5 | . config.profile
 6 | 
 7 | # check the enviroment info
 8 | nvidia-smi
 9 | 
10 | export PYTHONPATH="$PWD":$PYTHONPATH
11 | 
12 | DATA_DIR="${DATA_ROOT}/ade20k"
13 | SAVE_DIR="${DATA_ROOT}/seg_result/ade20k/"
14 | BACKBONE="deepbase_resnet50_dilated8"
15 | CONFIGS="configs/ade20k/${BACKBONE}.json"
16 | CONFIGS="configs/ade20k/deepbase_resnet101_dilated8_test.json"
17 | 
18 | MODEL_NAME="ideal_spatial_ocrnet"
19 | LOSS_TYPE="fs_auxce_loss"
20 | CHECKPOINTS_NAME="${MODEL_NAME}_${BACKBONE}_"$2
21 | PRETRAINED_MODEL="./pretrained_model/resnet50-imagenet.pth"
22 | MAX_ITERS=150000
23 | 
24 | LOG_FILE="./log/ade20k/${CHECKPOINTS_NAME}.log"
25 | echo "Logging to $LOG_FILE"
26 | mkdir -p `dirname $LOG_FILE`
27 | 
28 | if [ "$1"x == "train"x ]; then
29 |   ${PYTHON} -u main.py --configs ${CONFIGS} --drop_last y \
30 |                        --phase train --gathered n --loss_balance y --log_to_file n \
31 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --gpu 0 1 2 3 \
32 |                        --data_dir ${DATA_DIR} --loss_type ${LOSS_TYPE} --max_iters ${MAX_ITERS} \
33 |                        --checkpoints_name ${CHECKPOINTS_NAME} --pretrained ${PRETRAINED_MODEL} \
34 |                        --use_ground_truth \
35 |                        2>&1 | tee ${LOG_FILE}
36 | 
37 | elif [ "$1"x == "resume"x ]; then
38 |   ${PYTHON} -u main.py --configs ${CONFIGS} --drop_last y \
39 |                        --phase train --gathered n --loss_balance y --log_to_file n \
40 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --max_iters ${MAX_ITERS} \
41 |                        --data_dir ${DATA_DIR} --loss_type ${LOSS_TYPE} --gpu 0 1 2 3 \
42 |                        --resume_continue y --resume ./checkpoints/ade20k/${CHECKPOINTS_NAME}_latest.pth \
43 |                        --checkpoints_name ${CHECKPOINTS_NAME} --pretrained ${PRETRAINED_MODEL}  \
44 |                        --use_ground_truth \
45 |                        2>&1 | tee -a ${LOG_FILE}
46 | 
47 | elif [ "$1"x == "debug"x ]; then
48 |   ${PYTHON} -u main.py --configs ${CONFIGS} \
49 |                        --phase debug --gpu 0 --log_to_file n 2>&1 | tee ${LOG_FILE}
50 | 
51 | elif [ "$1"x == "val"x ]; then
52 |   ${PYTHON} -u main.py --configs ${CONFIGS} --data_dir ${DATA_DIR} \
53 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --checkpoints_name ${CHECKPOINTS_NAME}_mscrop \
54 |                        --phase test --gpu 0 1 2 3 --resume ./checkpoints/ade20k/${CHECKPOINTS_NAME}_latest.pth \
55 |                        --test_dir ${DATA_DIR}/val/image --log_to_file n \
56 |                        --out_dir ${SAVE_DIR}${CHECKPOINTS_NAME}_val_ms_flip
57 |                        # 2>&1 | tee -a ${LOG_FILE}
58 | 
59 |   cd lib/metrics
60 |   ${PYTHON} -u ade20k_evaluator.py --configs ../../${CONFIGS} \
61 |                                    --pred_dir ${SAVE_DIR}${CHECKPOINTS_NAME}_val_ms_flip/label \
62 |                                    --gt_dir ${DATA_DIR}/val/label  
63 |                                    # >> "../../"${LOG_FILE} 2>&1
64 | 
65 | elif [ "$1"x == "test"x ]; then
66 |   ${PYTHON} -u main.py --configs ${CONFIGS} \
67 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --checkpoints_name ${CHECKPOINTS_NAME} \
68 |                        --phase test --gpu 0 --resume ./checkpoints/ade20k/${CHECKPOINTS_NAME}_latest.pth \
69 |                        --test_dir ${DATA_DIR}/test --log_to_file n --out_dir test 2>&1 | tee -a ${LOG_FILE}
70 | 
71 | else
72 |   echo "$1"x" is invalid..."
73 | fi
74 | 


--------------------------------------------------------------------------------
/scripts/ade20k/ocrnet/run_res50d8_ocr_ade20k.sh:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/env bash
 2 | SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
 3 | cd $SCRIPTPATH
 4 | cd ../../../
 5 | . config.profile
 6 | 
 7 | # check the enviroment info
 8 | nvidia-smi
 9 | 
10 | export PYTHONPATH="$PWD":$PYTHONPATH
11 | 
12 | cd ../../
13 | 
14 | DATA_DIR="${DATA_ROOT}/ADE20K"
15 | SAVE_DIR="${DATA_ROOT}/seg_result/ade20k/"
16 | BACKBONE="deepbase_resnet50_dilated8"
17 | # CONFIGS="configs/ade20k/${BACKBONE}.json"
18 | CONFIGS="configs/ade20k/deepbase_resnet101_dilated8_test.json"
19 | 
20 | MODEL_NAME="spatial_ocrnet"
21 | LOSS_TYPE="fs_auxce_loss"
22 | CHECKPOINTS_NAME="${MODEL_NAME}_${BACKBONE}_"$2
23 | PRETRAINED_MODEL="./pretrained_model/resnet50-imagenet.pth"
24 | MAX_ITERS=150000
25 | 
26 | LOG_FILE="./log/ade20k/${CHECKPOINTS_NAME}.log"
27 | echo "Logging to $LOG_FILE"
28 | mkdir -p `dirname $LOG_FILE`
29 | 
30 | if [ "$1"x == "train"x ]; then
31 |   ${PYTHON} -u main.py --configs ${CONFIGS} --drop_last y \
32 |                        --phase train --gathered n --loss_balance y --log_to_file n \
33 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --gpu 0 1 2 3 \
34 |                        --data_dir ${DATA_DIR} --loss_type ${LOSS_TYPE} --max_iters ${MAX_ITERS} \
35 |                        --checkpoints_name ${CHECKPOINTS_NAME} --pretrained ${PRETRAINED_MODEL} 2>&1 | tee ${LOG_FILE}
36 | 
37 | elif [ "$1"x == "resume"x ]; then
38 |   ${PYTHON} -u main.py --configs ${CONFIGS} --drop_last y \
39 |                        --phase train --gathered n --loss_balance y --log_to_file n \
40 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --max_iters ${MAX_ITERS} \
41 |                        --data_dir ${DATA_DIR} --loss_type ${LOSS_TYPE} --gpu 0 1 2 3 \
42 |                        --resume_continue y --resume ./checkpoints/ade20k/${CHECKPOINTS_NAME}_latest.pth \
43 |                        --checkpoints_name ${CHECKPOINTS_NAME} --pretrained ${PRETRAINED_MODEL}  2>&1 | tee -a ${LOG_FILE}
44 | 
45 | elif [ "$1"x == "debug"x ]; then
46 |   ${PYTHON} -u main.py --configs ${CONFIGS} \
47 |                        --phase debug --gpu 0 --log_to_file n 2>&1 | tee ${LOG_FILE}
48 | 
49 | elif [ "$1"x == "val"x ]; then
50 |   ${PYTHON} -u main.py --configs ${CONFIGS} --data_dir ${DATA_DIR} \
51 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --checkpoints_name ${CHECKPOINTS_NAME}_mscrop \
52 |                        --phase test --gpu 0 1 2 3 --resume ./checkpoints/ade20k/${CHECKPOINTS_NAME}_latest.pth \
53 |                        --test_dir ${DATA_DIR}/val/image --log_to_file n \
54 |                        --out_dir ${SAVE_DIR}${CHECKPOINTS_NAME}_val_ms_flip
55 |                        # 2>&1 | tee -a ${LOG_FILE}
56 | 
57 |   cd lib/metrics
58 |   ${PYTHON} -u ade20k_evaluator.py --configs ../../${CONFIGS} \
59 |                                    --pred_dir ${SAVE_DIR}${CHECKPOINTS_NAME}_val_ms_flip/label \
60 |                                    --gt_dir ${DATA_DIR}/val/label  
61 |                                    # >> "../../"${LOG_FILE} 2>&1
62 | 
63 | elif [ "$1"x == "test"x ]; then
64 |   ${PYTHON} -u main.py --configs ${CONFIGS} \
65 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --checkpoints_name ${CHECKPOINTS_NAME} \
66 |                        --phase test --gpu 0 --resume ./checkpoints/ade20k/${CHECKPOINTS_NAME}_latest.pth \
67 |                        --test_dir ${DATA_DIR}/test --log_to_file n --out_dir test 2>&1 | tee -a ${LOG_FILE}
68 | 
69 | else
70 |   echo "$1"x" is invalid..."
71 | fi
72 | 


--------------------------------------------------------------------------------
/scripts/ade20k/run_res101d8_fcn_ade20k_seg.sh:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/env bash
 2 | SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
 3 | cd $SCRIPTPATH
 4 | cd ../../../
 5 | . config.profile
 6 | 
 7 | # check the enviroment info
 8 | nvidia-smi
 9 | 
10 | ${PYTHON} -m pip install torchcontrib
11 | ${PYTHON} -m pip install pydensecrf
12 | 
13 | export PYTHONPATH="$PWD":$PYTHONPATH
14 | 
15 | cd ../../
16 | 
17 | DATA_DIR="${DATA_ROOT}/ade20k"
18 | SAVE_DIR="${DATA_ROOT}/seg_result/ade20k/"
19 | BACKBONE="deepbase_resnet101_dilated8"
20 | CONFIGS="configs/ade20k/${BACKBONE}.json"
21 | CONFIGS_TEST="configs/ade20k/deepbase_resnet101_dilated8_test.json"
22 | 
23 | MODEL_NAME="fcnet"
24 | LOSS_TYPE="fs_auxce_loss"
25 | CHECKPOINTS_NAME="${MODEL_NAME}_${BACKBONE}_"$2
26 | PRETRAINED_MODEL="./pretrained_model/resnet101-imagenet.pth"
27 | MAX_ITERS=150000
28 | 
29 | LOG_FILE="./log/ade20k/${CHECKPOINTS_NAME}.log"
30 | echo "Logging to $LOG_FILE"
31 | mkdir -p `dirname $LOG_FILE`
32 | 
33 | if [ "$1"x == "train"x ]; then
34 |   ${PYTHON} -u main.py --configs ${CONFIGS} --drop_last y \
35 |                        --phase train --gathered n --loss_balance y --log_to_file n \
36 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --gpu 0 1 2 3 \
37 |                        --data_dir ${DATA_DIR} --loss_type ${LOSS_TYPE} --max_iters ${MAX_ITERS} \
38 |                        --checkpoints_name ${CHECKPOINTS_NAME} --pretrained ${PRETRAINED_MODEL} \
39 |                        2>&1 | tee ${LOG_FILE}
40 | 
41 | elif [ "$1"x == "resume"x ]; then
42 |   ${PYTHON} -u main.py --configs ${CONFIGS} --drop_last y \
43 |                        --phase train --gathered n --loss_balance y --log_to_file n \
44 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --max_iters ${MAX_ITERS} \
45 |                        --data_dir ${DATA_DIR} --loss_type ${LOSS_TYPE} --gpu 0 1 2 3 \
46 |                        --resume_continue y --resume ./checkpoints/ade20k/${CHECKPOINTS_NAME}_latest.pth \
47 |                        --checkpoints_name ${CHECKPOINTS_NAME} --pretrained ${PRETRAINED_MODEL}  2>&1 | tee -a ${LOG_FILE}
48 | 
49 | elif [ "$1"x == "debug"x ]; then
50 |   ${PYTHON} -u main.py --configs ${CONFIGS} \
51 |                        --phase debug --gpu 0 --log_to_file n 2>&1 | tee ${LOG_FILE}
52 | 
53 | 
54 | elif [ "$1"x == "val"x ]; then
55 |   ${PYTHON} -u main.py --configs ${CONFIGS_TEST} --data_dir ${DATA_DIR} \
56 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --checkpoints_name ${CHECKPOINTS_NAME} \
57 |                        --phase test --gpu 0 1 2 3 --resume ./checkpoints/ade20k/${CHECKPOINTS_NAME}_latest.pth \
58 |                        --test_dir ${DATA_DIR}/val/image --log_to_file n \
59 |                        --out_dir ${SAVE_DIR}${CHECKPOINTS_NAME}_val_ms
60 | 
61 |   # cd lib/metrics
62 |   # ${PYTHON} -u ade20k_evaluator.py --configs ../../${CONFIGS} \
63 |   #                                  --pred_dir ${SAVE_DIR}${CHECKPOINTS_NAME}_val_ms \
64 |   #                                  --gt_dir ${DATA_DIR}/val/label  
65 | 
66 | elif [ "$1"x == "test"x ]; then
67 |   ${PYTHON} -u main.py --configs ${CONFIGS} \
68 |                        --backbone ${BACKBONE} --model_name ${MODEL_NAME} --checkpoints_name ${CHECKPOINTS_NAME} \
69 |                        --phase test --gpu 0 --resume ./checkpoints/ade20k/${CHECKPOINTS_NAME}_latest.pth \
70 |                        --test_dir ${DATA_DIR}/test --log_to_file n --out_dir test 2>&1 | tee -a ${LOG_FILE}
71 | 
72 | else
73 |   echo "$1"x" is invalid..."
74 | fi
75 | 


--------------------------------------------------------------------------------
/scripts/mapillary/run_h_48_d_4_ocr_b.sh:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/env bash
 2 | SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
 3 | cd $SCRIPTPATH
 4 | cd ../../../
 5 | . config.profile
 6 | # check the enviroment info
 7 | nvidia-smi
 8 | ${PYTHON} -m pip install yacs
 9 | ${PYTHON} -m pip install torchcontrib
10 | ${PYTHON} -m pip install pydensecrf
11 | 
12 | export PYTHONPATH="$PWD":$PYTHONPATH
13 | 
14 | DATA_DIR="${DATA_ROOT}/mapillary-vista-v1.1"
15 | SAVE_DIR="${DATA_ROOT}/seg_result/mapillary/"
16 | BACKBONE="hrnet48"
17 | 
18 | CONFIGS="configs/mapillary/H_48_D_4_1024x1024.json"
19 | 
20 | MODEL_NAME="hrnet_w48_ocr_b"
21 | LOSS_TYPE="fs_auxce_loss"
22 | LOG_FILE="./log/mapillary/${CHECKPOINTS_NAME}.log"
23 | LOG_FILE="./log/mapillary/${CHECKPOINTS_NAME}.log"
24 | echo "Logging to $LOG_FILE"
25 | mkdir -p `dirname $LOG_FILE`
26 | 
27 | PRETRAINED_MODEL="./pretrained_model/hrnetv2_w48_imagenet_pretrained.pth"
28 | MAX_ITERS=500000
29 | BATCH_SIZE=16
30 | 
31 | if [ "$1"x == "train"x ]; then
32 |   ${PYTHON} -u main.py --configs ${CONFIGS} \
33 |                        --drop_last y \
34 |                        --phase train \
35 |                        --gathered n \
36 |                        --loss_balance y \
37 |                        --log_to_file n \
38 |                        --backbone ${BACKBONE} \
39 |                        --model_name ${MODEL_NAME} \
40 |                        --gpu 0 1 2 3 \
41 |                        --data_dir ${DATA_DIR} \
42 |                        --loss_type ${LOSS_TYPE} \
43 |                        --max_iters ${MAX_ITERS} \
44 |                        --checkpoints_name ${CHECKPOINTS_NAME} \
45 |                        --pretrained ${PRETRAINED_MODEL} \
46 |                        --train_batch_size ${BATCH_SIZE}
47 |                        --base_lr 0.02 \
48 |                        --test_interval 10000 \
49 |                        2>&1 | tee ${LOG_FILE}
50 | 
51 | 
52 | elif [ "$1"x == "resume"x ]; then
53 |   ${PYTHON} -u main.py --configs ${CONFIGS} \
54 |                        --drop_last y \
55 |                        --include_val y  \
56 |                        --phase train \
57 |                        --gathered n \
58 |                        --loss_balance y \
59 |                        --log_to_file n \
60 |                        --backbone ${BACKBONE} \
61 |                        --model_name ${MODEL_NAME} \
62 |                        --max_iters ${MAX_ITERS} \
63 |                        --data_dir ${DATA_DIR} \
64 |                        --loss_type ${LOSS_TYPE} \
65 |                        --gpu 0 1 2 3 \
66 |                        --resume_continue y \
67 |                        --resume ./checkpoints/cityscapes/${CHECKPOINTS_NAME}_latest.pth \
68 |                        --checkpoints_name ${CHECKPOINTS_NAME} \
69 |                        --train_batch_size ${BATCH_SIZE}
70 |                         2>&1 | tee -a ${LOG_FILE}


--------------------------------------------------------------------------------
/segmentor/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/openseg-group/openseg.pytorch/aefc75517b09068d7131a69420bc5f66cb41f0ee/segmentor/__init__.py


--------------------------------------------------------------------------------
/segmentor/tools/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/openseg-group/openseg.pytorch/aefc75517b09068d7131a69420bc5f66cb41f0ee/segmentor/tools/__init__.py


--------------------------------------------------------------------------------
/segmentor/tools/evaluator/__init__.py:
--------------------------------------------------------------------------------
 1 | import os
 2 | 
 3 | from lib.utils.tools.logger import Logger as Log
 4 | from . import standard
 5 | 
 6 | evaluators = {
 7 |     'standard': standard.StandardEvaluator
 8 | }
 9 | 
10 | 
11 | def get_evaluator(configer, trainer, name=None):
12 |     name = os.environ.get('evaluator', 'standard')
13 | 
14 |     if not name in evaluators:
15 |         raise RuntimeError('Unknown evaluator name: {}'.format(name))
16 |     klass = evaluators[name]
17 |     Log.info('Using evaluator: {}'.format(klass.__name__))
18 | 
19 |     return klass(configer, trainer)
20 | 


--------------------------------------------------------------------------------
/segmentor/tools/evaluator/base.py:
--------------------------------------------------------------------------------
 1 | import os
 2 | 
 3 | import torch
 4 | import torch.nn as nn
 5 | import torch.nn.functional as F
 6 | import torch.backends.cudnn as cudnn
 7 | 
 8 | from lib.utils.tools.logger import Logger as Log
 9 | from lib.metrics import running_score as rslib
10 | from lib.metrics import F1_running_score as fscore_rslib
11 | 
12 | 
13 | class _BaseEvaluator:
14 | 
15 |     def __init__(self, configer, trainer):
16 |         self.configer = configer
17 |         self.trainer = trainer
18 |         self._init_running_scores()
19 |         self.conditions = configer.conditions
20 | 
21 |     def use_me(self):
22 |         raise NotImplementedError
23 | 
24 |     def _init_running_scores(self):
25 |         raise NotImplementedError
26 | 
27 |     def update_score(self, *args, **kwargs):
28 |         raise NotImplementedError
29 | 
30 |     def print_scores(self, show_miou=True):
31 |         for key, rs in self.running_scores.items():
32 |             Log.info('Result for {}'.format(key))
33 |             if isinstance(rs, fscore_rslib.F1RunningScore):
34 |                 FScore, FScore_cls = rs.get_scores()
35 |                 Log.info('Mean FScore: {}'.format(FScore))
36 |                 Log.info(
37 |                     'Class-wise FScore: {}'.format(
38 |                         ', '.join(
39 |                             '{:.3f}'.format(x)
40 |                             for x in FScore_cls
41 |                         )
42 |                     )
43 |                 )
44 |             elif isinstance(rs, rslib.SimpleCounterRunningScore):
45 |                 Log.info('ACC: {}\n'.format(rs.get_mean_acc()))
46 |             else:
47 |                 if show_miou and hasattr(rs, 'get_mean_iou'):
48 |                     Log.info('Mean IOU: {}\n'.format(rs.get_mean_iou()))
49 |                 Log.info('Pixel ACC: {}\n'.format(rs.get_pixel_acc()))
50 | 
51 |                 if hasattr(rs, 'n_classes') and rs.n_classes == 2:
52 |                     Log.info(
53 |                         'F1 Score: {} Precision: {} Recall: {}\n'
54 |                         .format(*rs.get_F1_score())
55 |                     )
56 | 
57 |     def prepare_validaton(self):
58 |         """
59 |         Replicate models if using diverse size validation.
60 |         """
61 |         device_ids = list(range(len(self.configer.get('gpu'))))
62 |         if self.conditions.diverse_size:
63 |             cudnn.benchmark = False
64 |             assert self.configer.get('val', 'batch_size') <= len(device_ids)
65 |             replicas = nn.parallel.replicate(
66 |                 self.trainer.seg_net.module, device_ids)
67 |             return replicas
68 | 
69 |     def update_performance(self):
70 | 
71 |         try:
72 |             rs = self.running_scores[self.save_net_main_key]
73 |             if self.save_net_metric == 'miou':
74 |                 perf = rs.get_mean_iou()
75 |             elif self.save_net_metric == 'acc':
76 |                 perf = rs.get_pixel_acc()
77 | 
78 |             max_perf = self.configer.get('max_performance')
79 |             self.configer.update(['performance'], perf)
80 |             if perf > max_perf:
81 |                 Log.info('Performance {} -> {}'.format(max_perf, perf))
82 |         except Exception as e:
83 |             Log.warn(e)
84 | 
85 |     def reset(self):
86 |         for rs in self.running_scores.values():
87 |             rs.reset()
88 | 


--------------------------------------------------------------------------------