├── docs ├── Readme.md └── figure1.png ├── mmseg ├── models │ ├── Readme.md │ ├── __pycache__ │ │ ├── builder.cpython-36.pyc │ │ └── __init__.cpython-36.pyc │ ├── necks │ │ ├── __pycache__ │ │ │ ├── fpn.cpython-36.pyc │ │ │ ├── jpu.cpython-36.pyc │ │ │ ├── __init__.cpython-36.pyc │ │ │ ├── ic_neck.cpython-36.pyc │ │ │ ├── mla_neck.cpython-36.pyc │ │ │ └── multilevel_neck.cpython-36.pyc │ │ └── __init__.py │ ├── losses │ │ ├── __pycache__ │ │ │ ├── utils.cpython-36.pyc │ │ │ ├── __init__.cpython-36.pyc │ │ │ ├── accuracy.cpython-36.pyc │ │ │ ├── dice_loss.cpython-36.pyc │ │ │ ├── focal_loss.cpython-36.pyc │ │ │ ├── lovasz_loss.cpython-36.pyc │ │ │ └── cross_entropy_loss.cpython-36.pyc │ │ ├── __init__.py │ │ ├── accuracy.py │ │ ├── utils.py │ │ ├── dice_loss.py │ │ └── cross_entropy_loss.py │ ├── utils │ │ ├── __pycache__ │ │ │ ├── embed.cpython-36.pyc │ │ │ ├── __init__.cpython-36.pyc │ │ │ ├── se_layer.cpython-36.pyc │ │ │ ├── res_layer.cpython-36.pyc │ │ │ ├── shape_convert.cpython-36.pyc │ │ │ ├── up_conv_block.cpython-36.pyc │ │ │ ├── make_divisible.cpython-36.pyc │ │ │ ├── inverted_residual.cpython-36.pyc │ │ │ └── self_attention_block.cpython-36.pyc │ │ ├── __init__.py │ │ └── res_layer.py │ ├── backbones │ │ ├── __pycache__ │ │ │ ├── mit.cpython-36.pyc │ │ │ ├── stdc.cpython-36.pyc │ │ │ ├── swin.cpython-36.pyc │ │ │ ├── unet.cpython-36.pyc │ │ │ ├── vit.cpython-36.pyc │ │ │ ├── cgnet.cpython-36.pyc │ │ │ ├── erfnet.cpython-36.pyc │ │ │ ├── hrnet.cpython-36.pyc │ │ │ ├── icnet.cpython-36.pyc │ │ │ ├── resnest.cpython-36.pyc │ │ │ ├── resnet.cpython-36.pyc │ │ │ ├── resnext.cpython-36.pyc │ │ │ ├── twins.cpython-36.pyc │ │ │ ├── __init__.cpython-36.pyc │ │ │ ├── bisenetv1.cpython-36.pyc │ │ │ ├── bisenetv2.cpython-36.pyc │ │ │ ├── fast_scnn.cpython-36.pyc │ │ │ ├── mobilenet_v2.cpython-36.pyc │ │ │ ├── mobilenet_v3.cpython-36.pyc │ │ │ └── timm_backbone.cpython-36.pyc │ │ └── __init__.py │ ├── segmentors │ │ ├── __pycache__ │ │ │ ├── base.cpython-36.pyc │ │ │ ├── encoder_decoder.cpython-36.pyc │ │ │ └── cascade_encoder_decoder.cpython-36.pyc │ │ ├── __init__.py │ │ └── cascade_encoder_decoder.py │ ├── decode_heads │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-36.pyc │ │ │ ├── aff_head.cpython-36.pyc │ │ │ ├── ann_head.cpython-36.pyc │ │ │ ├── apc_head.cpython-36.pyc │ │ │ ├── aspp_head.cpython-36.pyc │ │ │ ├── cc_head.cpython-36.pyc │ │ │ ├── da_head.cpython-36.pyc │ │ │ ├── dm_head.cpython-36.pyc │ │ │ ├── dnl_head.cpython-36.pyc │ │ │ ├── dpt_head.cpython-36.pyc │ │ │ ├── ema_head.cpython-36.pyc │ │ │ ├── enc_head.cpython-36.pyc │ │ │ ├── fcn_head.cpython-36.pyc │ │ │ ├── fpn_head.cpython-36.pyc │ │ │ ├── gc_head.cpython-36.pyc │ │ │ ├── ham_head.cpython-36.pyc │ │ │ ├── isa_head.cpython-36.pyc │ │ │ ├── nl_head.cpython-36.pyc │ │ │ ├── ocr_head.cpython-36.pyc │ │ │ ├── psa_head.cpython-36.pyc │ │ │ ├── psp_head.cpython-36.pyc │ │ │ ├── stdc_head.cpython-36.pyc │ │ │ ├── uper_head.cpython-36.pyc │ │ │ ├── decode_head.cpython-36.pyc │ │ │ ├── lraspp_head.cpython-36.pyc │ │ │ ├── point_head.cpython-36.pyc │ │ │ ├── segformer_head.cpython-36.pyc │ │ │ ├── sep_aspp_head.cpython-36.pyc │ │ │ ├── sep_fcn_head.cpython-36.pyc │ │ │ ├── setr_mla_head.cpython-36.pyc │ │ │ ├── setr_up_head.cpython-36.pyc │ │ │ ├── cascade_decode_head.cpython-36.pyc │ │ │ └── segmenter_mask_head.cpython-36.pyc │ │ ├── __init__.py │ │ └── aff_head.py │ ├── __init__.py │ └── builder.py ├── __pycache__ │ ├── __init__.cpython-36.pyc │ └── version.cpython-36.pyc ├── apis │ ├── __pycache__ │ │ ├── test.cpython-36.pyc │ │ ├── train.cpython-36.pyc │ │ ├── __init__.cpython-36.pyc │ │ └── inference.cpython-36.pyc │ ├── __init__.py │ ├── inference.py │ └── train.py ├── utils │ ├── __pycache__ │ │ ├── misc.cpython-36.pyc │ │ ├── __init__.cpython-36.pyc │ │ ├── logger.cpython-36.pyc │ │ ├── set_env.cpython-36.pyc │ │ └── collect_env.cpython-36.pyc │ ├── __init__.py │ ├── collect_env.py │ ├── logger.py │ ├── misc.py │ └── set_env.py ├── core │ ├── __pycache__ │ │ └── __init__.cpython-36.pyc │ ├── seg │ │ ├── __pycache__ │ │ │ ├── builder.cpython-36.pyc │ │ │ └── __init__.cpython-36.pyc │ │ ├── sampler │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-36.pyc │ │ │ │ ├── base_pixel_sampler.cpython-36.pyc │ │ │ │ └── ohem_pixel_sampler.cpython-36.pyc │ │ │ ├── __init__.py │ │ │ ├── base_pixel_sampler.py │ │ │ └── ohem_pixel_sampler.py │ │ ├── __init__.py │ │ └── builder.py │ ├── utils │ │ ├── __pycache__ │ │ │ ├── misc.cpython-36.pyc │ │ │ └── __init__.cpython-36.pyc │ │ ├── __init__.py │ │ └── misc.py │ ├── evaluation │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-36.pyc │ │ │ ├── metrics.cpython-36.pyc │ │ │ ├── eval_hooks.cpython-36.pyc │ │ │ └── class_names.cpython-36.pyc │ │ ├── __init__.py │ │ └── eval_hooks.py │ └── __init__.py ├── datasets │ ├── __pycache__ │ │ ├── ade.cpython-36.pyc │ │ ├── drive.cpython-36.pyc │ │ ├── hrf.cpython-36.pyc │ │ ├── isaid.cpython-36.pyc │ │ ├── isprs.cpython-36.pyc │ │ ├── stare.cpython-36.pyc │ │ ├── voc.cpython-36.pyc │ │ ├── builder.cpython-36.pyc │ │ ├── custom.cpython-36.pyc │ │ ├── loveda.cpython-36.pyc │ │ ├── potsdam.cpython-36.pyc │ │ ├── __init__.cpython-36.pyc │ │ ├── chase_db1.cpython-36.pyc │ │ ├── cityscapes.cpython-36.pyc │ │ ├── coco_stuff.cpython-36.pyc │ │ ├── dark_zurich.cpython-36.pyc │ │ ├── night_driving.cpython-36.pyc │ │ ├── pascal_context.cpython-36.pyc │ │ └── dataset_wrappers.cpython-36.pyc │ ├── pipelines │ │ ├── __pycache__ │ │ │ ├── compose.cpython-36.pyc │ │ │ ├── loading.cpython-36.pyc │ │ │ ├── __init__.cpython-36.pyc │ │ │ ├── formatting.cpython-36.pyc │ │ │ ├── transforms.cpython-36.pyc │ │ │ └── test_time_aug.cpython-36.pyc │ │ ├── formating.py │ │ ├── __init__.py │ │ ├── compose.py │ │ ├── test_time_aug.py │ │ └── loading.py │ ├── __init__.py │ ├── coco_stuff.py │ ├── builder.py │ ├── ade.py │ └── cityscapes.py ├── ops │ ├── __pycache__ │ │ ├── __init__.cpython-36.pyc │ │ ├── encoding.cpython-36.pyc │ │ └── wrappers.cpython-36.pyc │ ├── __init__.py │ ├── wrappers.py │ └── encoding.py ├── version.py └── __init__.py ├── tools ├── __pycache__ │ └── afformer.cpython-36.pyc ├── dist_train.sh ├── dist_test.sh ├── cmd.sh ├── get_flops_fps.py └── train.py ├── configs ├── _base_ │ ├── default_runtime.py │ ├── schedules │ │ ├── schedule_20k.py │ │ ├── schedule_40k.py │ │ ├── schedule_80k.py │ │ └── schedule_160k.py │ ├── models │ │ └── afformer.py │ └── datasets │ │ ├── cityscapes_640x1024.py │ │ ├── cityscapes_768x1024.py │ │ ├── cityscapes_1024x1024.py │ │ ├── cityscapes.py │ │ ├── coco_stuff10k.py │ │ ├── ade20k_std.py │ │ └── ade20k.py └── AFFormer │ ├── AFFormer_base_ade20k.py │ ├── AFFormer_tiny_ade20k.py │ ├── AFFormer_small_ade20k.py │ ├── AFFormer_base_cityscapes.py │ ├── AFFormer_small_cityscapes.py │ └── AFFormer_tiny_cityscapes.py └── README.md /docs/Readme.md: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /mmseg/models/Readme.md: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /docs/figure1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/docs/figure1.png -------------------------------------------------------------------------------- /mmseg/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/__pycache__/version.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/__pycache__/version.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/apis/__pycache__/test.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/apis/__pycache__/test.cpython-36.pyc -------------------------------------------------------------------------------- /tools/__pycache__/afformer.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/tools/__pycache__/afformer.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/apis/__pycache__/train.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/apis/__pycache__/train.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/utils/__pycache__/misc.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/utils/__pycache__/misc.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/apis/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/apis/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/apis/__pycache__/inference.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/apis/__pycache__/inference.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/core/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/core/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/datasets/__pycache__/ade.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/datasets/__pycache__/ade.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/datasets/__pycache__/drive.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/datasets/__pycache__/drive.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/datasets/__pycache__/hrf.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/datasets/__pycache__/hrf.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/datasets/__pycache__/isaid.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/datasets/__pycache__/isaid.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/datasets/__pycache__/isprs.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/datasets/__pycache__/isprs.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/datasets/__pycache__/stare.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/datasets/__pycache__/stare.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/datasets/__pycache__/voc.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/datasets/__pycache__/voc.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/__pycache__/builder.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/__pycache__/builder.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/ops/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/ops/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/ops/__pycache__/encoding.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/ops/__pycache__/encoding.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/ops/__pycache__/wrappers.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/ops/__pycache__/wrappers.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/utils/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/utils/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/utils/__pycache__/logger.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/utils/__pycache__/logger.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/utils/__pycache__/set_env.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/utils/__pycache__/set_env.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/core/seg/__pycache__/builder.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/core/seg/__pycache__/builder.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/core/utils/__pycache__/misc.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/core/utils/__pycache__/misc.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/datasets/__pycache__/builder.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/datasets/__pycache__/builder.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/datasets/__pycache__/custom.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/datasets/__pycache__/custom.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/datasets/__pycache__/loveda.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/datasets/__pycache__/loveda.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/datasets/__pycache__/potsdam.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/datasets/__pycache__/potsdam.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/necks/__pycache__/fpn.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/necks/__pycache__/fpn.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/necks/__pycache__/jpu.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/necks/__pycache__/jpu.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/core/seg/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/core/seg/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/core/utils/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/core/utils/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/datasets/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/datasets/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/datasets/__pycache__/chase_db1.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/datasets/__pycache__/chase_db1.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/datasets/__pycache__/cityscapes.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/datasets/__pycache__/cityscapes.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/datasets/__pycache__/coco_stuff.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/datasets/__pycache__/coco_stuff.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/losses/__pycache__/utils.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/losses/__pycache__/utils.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/utils/__pycache__/embed.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/utils/__pycache__/embed.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/utils/__pycache__/collect_env.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/utils/__pycache__/collect_env.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/datasets/__pycache__/dark_zurich.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/datasets/__pycache__/dark_zurich.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/backbones/__pycache__/mit.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/backbones/__pycache__/mit.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/backbones/__pycache__/stdc.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/backbones/__pycache__/stdc.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/backbones/__pycache__/swin.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/backbones/__pycache__/swin.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/backbones/__pycache__/unet.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/backbones/__pycache__/unet.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/backbones/__pycache__/vit.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/backbones/__pycache__/vit.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/necks/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/necks/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/necks/__pycache__/ic_neck.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/necks/__pycache__/ic_neck.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/necks/__pycache__/mla_neck.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/necks/__pycache__/mla_neck.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/utils/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/utils/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/utils/__pycache__/se_layer.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/utils/__pycache__/se_layer.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/core/evaluation/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/core/evaluation/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/core/evaluation/__pycache__/metrics.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/core/evaluation/__pycache__/metrics.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/datasets/__pycache__/night_driving.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/datasets/__pycache__/night_driving.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/datasets/__pycache__/pascal_context.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/datasets/__pycache__/pascal_context.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/backbones/__pycache__/cgnet.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/backbones/__pycache__/cgnet.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/backbones/__pycache__/erfnet.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/backbones/__pycache__/erfnet.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/backbones/__pycache__/hrnet.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/backbones/__pycache__/hrnet.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/backbones/__pycache__/icnet.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/backbones/__pycache__/icnet.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/backbones/__pycache__/resnest.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/backbones/__pycache__/resnest.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/backbones/__pycache__/resnet.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/backbones/__pycache__/resnet.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/backbones/__pycache__/resnext.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/backbones/__pycache__/resnext.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/backbones/__pycache__/twins.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/backbones/__pycache__/twins.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/losses/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/losses/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/losses/__pycache__/accuracy.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/losses/__pycache__/accuracy.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/losses/__pycache__/dice_loss.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/losses/__pycache__/dice_loss.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/losses/__pycache__/focal_loss.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/losses/__pycache__/focal_loss.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/segmentors/__pycache__/base.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/segmentors/__pycache__/base.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/utils/__pycache__/res_layer.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/utils/__pycache__/res_layer.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/core/evaluation/__pycache__/eval_hooks.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/core/evaluation/__pycache__/eval_hooks.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/core/seg/sampler/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/core/seg/sampler/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/datasets/__pycache__/dataset_wrappers.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/datasets/__pycache__/dataset_wrappers.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/datasets/pipelines/__pycache__/compose.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/datasets/pipelines/__pycache__/compose.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/datasets/pipelines/__pycache__/loading.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/datasets/pipelines/__pycache__/loading.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/backbones/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/backbones/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/backbones/__pycache__/bisenetv1.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/backbones/__pycache__/bisenetv1.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/backbones/__pycache__/bisenetv2.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/backbones/__pycache__/bisenetv2.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/backbones/__pycache__/fast_scnn.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/backbones/__pycache__/fast_scnn.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/losses/__pycache__/lovasz_loss.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/losses/__pycache__/lovasz_loss.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/utils/__pycache__/shape_convert.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/utils/__pycache__/shape_convert.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/utils/__pycache__/up_conv_block.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/utils/__pycache__/up_conv_block.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/core/evaluation/__pycache__/class_names.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/core/evaluation/__pycache__/class_names.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/datasets/pipelines/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/datasets/pipelines/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/datasets/pipelines/__pycache__/formatting.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/datasets/pipelines/__pycache__/formatting.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/datasets/pipelines/__pycache__/transforms.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/datasets/pipelines/__pycache__/transforms.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/backbones/__pycache__/mobilenet_v2.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/backbones/__pycache__/mobilenet_v2.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/backbones/__pycache__/mobilenet_v3.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/backbones/__pycache__/mobilenet_v3.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/decode_heads/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/decode_heads/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/decode_heads/__pycache__/aff_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/decode_heads/__pycache__/aff_head.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/decode_heads/__pycache__/ann_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/decode_heads/__pycache__/ann_head.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/decode_heads/__pycache__/apc_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/decode_heads/__pycache__/apc_head.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/decode_heads/__pycache__/aspp_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/decode_heads/__pycache__/aspp_head.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/decode_heads/__pycache__/cc_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/decode_heads/__pycache__/cc_head.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/decode_heads/__pycache__/da_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/decode_heads/__pycache__/da_head.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/decode_heads/__pycache__/dm_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/decode_heads/__pycache__/dm_head.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/decode_heads/__pycache__/dnl_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/decode_heads/__pycache__/dnl_head.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/decode_heads/__pycache__/dpt_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/decode_heads/__pycache__/dpt_head.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/decode_heads/__pycache__/ema_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/decode_heads/__pycache__/ema_head.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/decode_heads/__pycache__/enc_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/decode_heads/__pycache__/enc_head.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/decode_heads/__pycache__/fcn_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/decode_heads/__pycache__/fcn_head.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/decode_heads/__pycache__/fpn_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/decode_heads/__pycache__/fpn_head.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/decode_heads/__pycache__/gc_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/decode_heads/__pycache__/gc_head.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/decode_heads/__pycache__/ham_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/decode_heads/__pycache__/ham_head.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/decode_heads/__pycache__/isa_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/decode_heads/__pycache__/isa_head.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/decode_heads/__pycache__/nl_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/decode_heads/__pycache__/nl_head.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/decode_heads/__pycache__/ocr_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/decode_heads/__pycache__/ocr_head.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/decode_heads/__pycache__/psa_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/decode_heads/__pycache__/psa_head.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/decode_heads/__pycache__/psp_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/decode_heads/__pycache__/psp_head.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/decode_heads/__pycache__/stdc_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/decode_heads/__pycache__/stdc_head.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/decode_heads/__pycache__/uper_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/decode_heads/__pycache__/uper_head.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/necks/__pycache__/multilevel_neck.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/necks/__pycache__/multilevel_neck.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/utils/__pycache__/make_divisible.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/utils/__pycache__/make_divisible.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/backbones/__pycache__/timm_backbone.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/backbones/__pycache__/timm_backbone.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/decode_heads/__pycache__/decode_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/decode_heads/__pycache__/decode_head.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/decode_heads/__pycache__/lraspp_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/decode_heads/__pycache__/lraspp_head.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/decode_heads/__pycache__/point_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/decode_heads/__pycache__/point_head.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/utils/__pycache__/inverted_residual.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/utils/__pycache__/inverted_residual.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/datasets/pipelines/__pycache__/test_time_aug.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/datasets/pipelines/__pycache__/test_time_aug.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/decode_heads/__pycache__/segformer_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/decode_heads/__pycache__/segformer_head.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/decode_heads/__pycache__/sep_aspp_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/decode_heads/__pycache__/sep_aspp_head.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/decode_heads/__pycache__/sep_fcn_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/decode_heads/__pycache__/sep_fcn_head.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/decode_heads/__pycache__/setr_mla_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/decode_heads/__pycache__/setr_mla_head.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/decode_heads/__pycache__/setr_up_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/decode_heads/__pycache__/setr_up_head.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/losses/__pycache__/cross_entropy_loss.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/losses/__pycache__/cross_entropy_loss.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/segmentors/__pycache__/encoder_decoder.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/segmentors/__pycache__/encoder_decoder.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/utils/__pycache__/self_attention_block.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/utils/__pycache__/self_attention_block.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/core/seg/sampler/__pycache__/base_pixel_sampler.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/core/seg/sampler/__pycache__/base_pixel_sampler.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/core/seg/sampler/__pycache__/ohem_pixel_sampler.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/core/seg/sampler/__pycache__/ohem_pixel_sampler.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/decode_heads/__pycache__/cascade_decode_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/decode_heads/__pycache__/cascade_decode_head.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/decode_heads/__pycache__/segmenter_mask_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/decode_heads/__pycache__/segmenter_mask_head.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/segmentors/__pycache__/cascade_encoder_decoder.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dongbo811/AFFormer/HEAD/mmseg/models/segmentors/__pycache__/cascade_encoder_decoder.cpython-36.pyc -------------------------------------------------------------------------------- /mmseg/models/utils/__init__.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright (C) 2010-2022 Alibaba Group Holding Limited. 3 | ''' 4 | from .res_layer import ResLayer 5 | 6 | 7 | __all__ = [ 8 | 'ResLayer' 9 | ] 10 | -------------------------------------------------------------------------------- /mmseg/core/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .evaluation import * # noqa: F401, F403 3 | from .seg import * # noqa: F401, F403 4 | from .utils import * # noqa: F401, F403 5 | -------------------------------------------------------------------------------- /mmseg/models/decode_heads/__init__.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright (C) 2010-2021 Alibaba Group Holding Limited. 3 | ''' 4 | from .aff_head import CLS 5 | from .decode_head import BaseDecodeHead 6 | __all__ = ['CLS', 'BaseDecodeHead'] 7 | 8 | -------------------------------------------------------------------------------- /mmseg/models/necks/__init__.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright (C) 2010-2022 Alibaba Group Holding Limited. 3 | This file is modified from: 4 | https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/model/necks/__init__.py 5 | ''' 6 | from .fpn import FPN 7 | 8 | __all__ = ['FPN'] 9 | -------------------------------------------------------------------------------- /mmseg/core/utils/__init__.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright (C) 2010-2022 Alibaba Group Holding Limited. 3 | This file is modified from: 4 | https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/core/utils/__init__.py 5 | ''' 6 | from .misc import add_prefix 7 | 8 | __all__ = ['add_prefix'] 9 | -------------------------------------------------------------------------------- /mmseg/models/backbones/__init__.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright (C) 2010-2022 Alibaba Group Holding Limited. 3 | This file is modified from: 4 | https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/model/backbone/__init__.py 5 | ''' 6 | 7 | from .resnet import ResNet 8 | 9 | __all__ = [ 10 | 'ResNet' 11 | ] 12 | -------------------------------------------------------------------------------- /mmseg/ops/__init__.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright (C) 2010-2022 Alibaba Group Holding Limited. 3 | This file is modified from: 4 | https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/ops/__init__.py 5 | ''' 6 | from .encoding import Encoding 7 | from .wrappers import Upsample, resize 8 | 9 | __all__ = ['Upsample', 'resize', 'Encoding'] 10 | -------------------------------------------------------------------------------- /mmseg/core/seg/sampler/__init__.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright (C) 2010-2022 Alibaba Group Holding Limited. 3 | This file is modified from: 4 | https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/core/seg/sampler/__init__.py 5 | ''' 6 | from .base_pixel_sampler import BasePixelSampler 7 | from .ohem_pixel_sampler import OHEMPixelSampler 8 | 9 | __all__ = ['BasePixelSampler', 'OHEMPixelSampler'] 10 | -------------------------------------------------------------------------------- /mmseg/core/seg/__init__.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright (C) 2010-2022 Alibaba Group Holding Limited. 3 | This file is modified from: 4 | https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/core/seg/__init__.py 5 | ''' 6 | from .builder import build_pixel_sampler 7 | from .sampler import BasePixelSampler, OHEMPixelSampler 8 | 9 | __all__ = ['build_pixel_sampler', 'BasePixelSampler', 'OHEMPixelSampler'] 10 | -------------------------------------------------------------------------------- /configs/_base_/default_runtime.py: -------------------------------------------------------------------------------- 1 | # yapf:disable 2 | log_config = dict( 3 | interval=50, 4 | hooks=[ 5 | dict(type='TextLoggerHook', by_epoch=False), 6 | # dict(type='TensorboardLoggerHook') 7 | ]) 8 | # yapf:enable 9 | dist_params = dict(backend='nccl') 10 | log_level = 'INFO' 11 | load_from = None 12 | resume_from = None 13 | workflow = [('train', 1)] 14 | cudnn_benchmark = True 15 | -------------------------------------------------------------------------------- /configs/_base_/schedules/schedule_20k.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) 3 | optimizer_config = dict() 4 | # learning policy 5 | lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) 6 | # runtime settings 7 | runner = dict(type='IterBasedRunner', max_iters=20000) 8 | checkpoint_config = dict(by_epoch=False, interval=2000) 9 | evaluation = dict(interval=2000, metric='mIoU') 10 | -------------------------------------------------------------------------------- /configs/_base_/schedules/schedule_40k.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) 3 | optimizer_config = dict() 4 | # learning policy 5 | lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) 6 | # runtime settings 7 | runner = dict(type='IterBasedRunner', max_iters=40000) 8 | checkpoint_config = dict(by_epoch=False, interval=4000) 9 | evaluation = dict(interval=4000, metric='mIoU') 10 | -------------------------------------------------------------------------------- /configs/_base_/schedules/schedule_80k.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) 3 | optimizer_config = dict() 4 | # learning policy 5 | lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) 6 | # runtime settings 7 | runner = dict(type='IterBasedRunner', max_iters=80000) 8 | checkpoint_config = dict(by_epoch=False, interval=8000) 9 | evaluation = dict(interval=8000, metric='mIoU') 10 | -------------------------------------------------------------------------------- /mmseg/models/segmentors/__init__.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright (C) 2010-2022 Alibaba Group Holding Limited. 3 | This file is modified from: 4 | https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/model/segmentors/__init__.py 5 | ''' 6 | from .base import BaseSegmentor 7 | from .cascade_encoder_decoder import CascadeEncoderDecoder 8 | from .encoder_decoder import EncoderDecoder 9 | 10 | __all__ = ['BaseSegmentor', 'EncoderDecoder', 'CascadeEncoderDecoder'] 11 | -------------------------------------------------------------------------------- /configs/_base_/schedules/schedule_160k.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) 3 | optimizer_config = dict() 4 | # learning policy 5 | lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) 6 | # runtime settings 7 | runner = dict(type='IterBasedRunner', max_iters=160000) 8 | checkpoint_config = dict(by_epoch=False, interval=16000) 9 | evaluation = dict(interval=5000, metric='mIoU', pre_eval=True) 10 | -------------------------------------------------------------------------------- /mmseg/core/seg/builder.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright (C) 2010-2022 Alibaba Group Holding Limited. 3 | This file is modified from: 4 | https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/core/seg/builder.py 5 | ''' 6 | from mmcv.utils import Registry, build_from_cfg 7 | 8 | PIXEL_SAMPLERS = Registry('pixel sampler') 9 | 10 | 11 | def build_pixel_sampler(cfg, **default_args): 12 | """Build pixel sampler for segmentation map.""" 13 | return build_from_cfg(cfg, PIXEL_SAMPLERS, default_args) 14 | -------------------------------------------------------------------------------- /mmseg/datasets/pipelines/formating.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright (C) 2010-2022 Alibaba Group Holding Limited. 3 | This file is modified from: 4 | https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/datasets/pipelines/formating.py 5 | ''' 6 | import warnings 7 | 8 | from .formatting import * 9 | 10 | warnings.warn('DeprecationWarning: mmseg.datasets.pipelines.formating will be ' 11 | 'deprecated in 2021, please replace it with ' 12 | 'mmseg.datasets.pipelines.formatting.') 13 | -------------------------------------------------------------------------------- /mmseg/utils/__init__.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright (C) 2010-2022 Alibaba Group Holding Limited. 3 | This file is modified from: 4 | https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/utils/__init__.py 5 | ''' 6 | from .collect_env import collect_env 7 | from .logger import get_root_logger 8 | from .misc import find_latest_checkpoint 9 | from .set_env import setup_multi_processes 10 | 11 | __all__ = [ 12 | 'get_root_logger', 'collect_env', 'find_latest_checkpoint', 13 | 'setup_multi_processes' 14 | ] 15 | -------------------------------------------------------------------------------- /tools/dist_train.sh: -------------------------------------------------------------------------------- 1 | 2 | CONFIG=$1 3 | GPUS=$2 4 | NNODES=${NNODES:-1} 5 | NODE_RANK=${NODE_RANK:-0} 6 | PORT=${PORT:-29500} 7 | MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"} 8 | 9 | PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ 10 | python -m torch.distributed.launch \ 11 | --nnodes=$NNODES \ 12 | --node_rank=$NODE_RANK \ 13 | --master_addr=$MASTER_ADDR \ 14 | --nproc_per_node=$GPUS \ 15 | --master_port=$PORT \ 16 | $(dirname "$0")/train.py \ 17 | $CONFIG \ 18 | --seed 0 \ 19 | --launcher pytorch ${@:3} 20 | -------------------------------------------------------------------------------- /mmseg/apis/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .inference import inference_segmentor, init_segmentor, show_result_pyplot 3 | from .test import multi_gpu_test, single_gpu_test 4 | from .train import (get_root_logger, init_random_seed, set_random_seed, 5 | train_segmentor) 6 | 7 | __all__ = [ 8 | 'get_root_logger', 'set_random_seed', 'train_segmentor', 'init_segmentor', 9 | 'inference_segmentor', 'multi_gpu_test', 'single_gpu_test', 10 | 'show_result_pyplot', 'init_random_seed' 11 | ] 12 | -------------------------------------------------------------------------------- /tools/dist_test.sh: -------------------------------------------------------------------------------- 1 | 2 | CONFIG=$1 3 | CHECKPOINT=$2 4 | GPUS=$3 5 | NNODES=${NNODES:-1} 6 | NODE_RANK=${NODE_RANK:-0} 7 | PORT=${PORT:-29500} 8 | MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"} 9 | 10 | PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ 11 | python -m torch.distributed.launch \ 12 | --nnodes=$NNODES \ 13 | --node_rank=$NODE_RANK \ 14 | --master_addr=$MASTER_ADDR \ 15 | --nproc_per_node=$GPUS \ 16 | --master_port=$PORT \ 17 | $(dirname "$0")/test.py \ 18 | $CONFIG \ 19 | $CHECKPOINT \ 20 | --launcher pytorch \ 21 | ${@:4} -------------------------------------------------------------------------------- /mmseg/core/seg/sampler/base_pixel_sampler.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright (C) 2010-2022 Alibaba Group Holding Limited. 3 | This file is modified from: 4 | https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/core/seg/sampler/base_pixel_smaplesr.py 5 | ''' 6 | from abc import ABCMeta, abstractmethod 7 | 8 | 9 | class BasePixelSampler(metaclass=ABCMeta): 10 | """Base class of pixel sampler.""" 11 | 12 | def __init__(self, **kwargs): 13 | pass 14 | 15 | @abstractmethod 16 | def sample(self, seg_logit, seg_label): 17 | """Placeholder for sample function.""" 18 | -------------------------------------------------------------------------------- /mmseg/models/__init__.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright (C) 2010-2022 Alibaba Group Holding Limited. 3 | ''' 4 | from .backbones import * # noqa: F401,F403 5 | from .builder import (BACKBONES, HEADS, LOSSES, SEGMENTORS, build_backbone, 6 | build_head, build_loss, build_segmentor) 7 | from .decode_heads import * # noqa: F401,F403 8 | from .losses import * # noqa: F401,F403 9 | from .necks import * # noqa: F401,F403 10 | from .segmentors import * # noqa: F401,F403 11 | 12 | __all__ = [ 13 | 'BACKBONES', 'HEADS', 'LOSSES', 'SEGMENTORS', 'build_backbone', 14 | 'build_head', 'build_loss', 'build_segmentor' 15 | ] 16 | -------------------------------------------------------------------------------- /mmseg/core/utils/misc.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright (C) 2010-2022 Alibaba Group Holding Limited. 3 | This file is modified from: 4 | https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/core/utils/misc.py 5 | ''' 6 | def add_prefix(inputs, prefix): 7 | """Add prefix for dict. 8 | 9 | Args: 10 | inputs (dict): The input dict with str keys. 11 | prefix (str): The prefix to add. 12 | 13 | Returns: 14 | 15 | dict: The dict with keys updated with ``prefix``. 16 | """ 17 | 18 | outputs = dict() 19 | for name, value in inputs.items(): 20 | outputs[f'{prefix}.{name}'] = value 21 | 22 | return outputs 23 | -------------------------------------------------------------------------------- /mmseg/core/evaluation/__init__.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright (C) 2010-2022 Alibaba Group Holding Limited. 3 | This file is modified from: 4 | https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/apis/train.py 5 | ''' 6 | from .class_names import get_classes, get_palette 7 | from .eval_hooks import DistEvalHook, EvalHook 8 | from .metrics import (eval_metrics, intersect_and_union, mean_dice, 9 | mean_fscore, mean_iou, pre_eval_to_metrics) 10 | 11 | __all__ = [ 12 | 'EvalHook', 'DistEvalHook', 'mean_dice', 'mean_iou', 'mean_fscore', 13 | 'eval_metrics', 'get_classes', 'get_palette', 'pre_eval_to_metrics', 14 | 'intersect_and_union' 15 | ] 16 | -------------------------------------------------------------------------------- /mmseg/utils/collect_env.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright (C) 2010-2022 Alibaba Group Holding Limited. 3 | This file is modified from: 4 | https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/utils/colect_env.py 5 | ''' 6 | from mmcv.utils import collect_env as collect_base_env 7 | from mmcv.utils import get_git_hash 8 | 9 | import mmseg 10 | 11 | 12 | def collect_env(): 13 | """Collect the information of the running environments.""" 14 | env_info = collect_base_env() 15 | env_info['MMSegmentation'] = f'{mmseg.__version__}+{get_git_hash()[:7]}' 16 | 17 | return env_info 18 | 19 | 20 | if __name__ == '__main__': 21 | for name, val in collect_env().items(): 22 | print('{}: {}'.format(name, val)) 23 | -------------------------------------------------------------------------------- /mmseg/version.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright (C) 2010-2022 Alibaba Group Holding Limited. 3 | This file is modified from: 4 | https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/version.py 5 | ''' 6 | 7 | __version__ = '0.21.1' 8 | 9 | 10 | def parse_version_info(version_str): 11 | version_info = [] 12 | for x in version_str.split('.'): 13 | if x.isdigit(): 14 | version_info.append(int(x)) 15 | elif x.find('rc') != -1: 16 | patch_version = x.split('rc') 17 | version_info.append(int(patch_version[0])) 18 | version_info.append(f'rc{patch_version[1]}') 19 | return tuple(version_info) 20 | 21 | 22 | version_info = parse_version_info(__version__) 23 | -------------------------------------------------------------------------------- /mmseg/models/losses/__init__.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright (C) 2010-2022 Alibaba Group Holding Limited. 3 | This file is modified from: 4 | https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/model/losses/__init__.py 5 | ''' 6 | from .accuracy import Accuracy, accuracy 7 | from .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy, 8 | cross_entropy, mask_cross_entropy) 9 | from .dice_loss import DiceLoss 10 | from .focal_loss import FocalLoss 11 | from .lovasz_loss import LovaszLoss 12 | from .utils import reduce_loss, weight_reduce_loss, weighted_loss 13 | 14 | __all__ = [ 15 | 'accuracy', 'Accuracy', 'cross_entropy', 'binary_cross_entropy', 16 | 'mask_cross_entropy', 'CrossEntropyLoss', 'reduce_loss', 17 | 'weight_reduce_loss', 'weighted_loss', 'LovaszLoss', 'DiceLoss', 18 | 'FocalLoss' 19 | ] 20 | -------------------------------------------------------------------------------- /configs/_base_/models/afformer.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | norm_cfg = dict(type='SyncBN', requires_grad=True) 3 | ham_norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) 4 | model = dict( 5 | type='EncoderDecoder', 6 | pretrained=None, 7 | backbone=dict( 8 | type='afformer_base', 9 | strides=[4, 2, 2, 2]), 10 | decode_head=dict( 11 | type='CLS', 12 | in_channels=256, 13 | in_index=[0, 1, 2 ,3], 14 | channels=512, 15 | aff_channels=512, 16 | dropout_ratio=0.1, 17 | num_classes=150, 18 | norm_cfg=ham_norm_cfg, 19 | align_corners=False, 20 | loss_decode=dict( 21 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), 22 | # model training and testing settings 23 | train_cfg=dict(), 24 | test_cfg=dict(mode='whole')) 25 | 26 | -------------------------------------------------------------------------------- /mmseg/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright (C) 2010-2022 Alibaba Group Holding Limited. 3 | This file is modified from: 4 | https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/datasets/__inti__.py 5 | ''' 6 | from .ade import ADE20KDataset 7 | from .builder import DATASETS, PIPELINES, build_dataloader, build_dataset 8 | from .cityscapes import CityscapesDataset 9 | from .coco_stuff import COCOStuffDataset 10 | from .custom import CustomDataset 11 | from .dataset_wrappers import (ConcatDataset, MultiImageMixDataset, 12 | RepeatDataset) 13 | 14 | 15 | __all__ = [ 16 | 'CustomDataset', 'build_dataloader', 'ConcatDataset', 'RepeatDataset', 17 | 'DATASETS', 'build_dataset', 'PIPELINES', 'CityscapesDataset', 18 | 'PascalVOCDataset', 'ADE20KDataset', 'PascalContextDataset', 19 | 'PascalContextDataset59', 'ChaseDB1Dataset', 'DRIVEDataset', 'HRFDataset', 20 | 'STAREDataset', 'DarkZurichDataset', 'NightDrivingDataset', 21 | 'COCOStuffDataset', 'LoveDADataset', 'MultiImageMixDataset', 22 | 'iSAIDDataset', 'ISPRSDataset', 'PotsdamDataset' 23 | ] 24 | -------------------------------------------------------------------------------- /configs/AFFormer/AFFormer_base_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/afformer.py', '../_base_/datasets/ade20k.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' 4 | ] 5 | model = dict( 6 | pretrained='./pretained_weight/AFFormer_base_ImageNet1k.pth', 7 | backbone=dict( 8 | type='afformer_base', 9 | strides=[4, 2, 2, 2]), 10 | decode_head=dict( 11 | in_channels=[216], 12 | in_index=[3], 13 | channels=256, 14 | aff_channels=256, 15 | aff_kwargs=dict(MD_R=16), 16 | num_classes=150 17 | ) 18 | ) 19 | 20 | # AdamW optimizer, no weight decay for position embedding & layer norm in backbone 21 | optimizer = dict(_delete_=True, type='AdamW', lr=0.0003, betas=(0.9, 0.999), weight_decay=0.01) 22 | 23 | lr_config = dict(_delete_=True, policy='poly', 24 | warmup='linear', 25 | warmup_iters=1500, 26 | warmup_ratio=1e-6, 27 | power=1.0, min_lr=0.0, by_epoch=False) 28 | 29 | # By default, models are trained on 2 GPUs with 8 images per GPU 30 | data=dict(samples_per_gpu=8, workers_per_gpu=8) 31 | find_unused_parameters=True 32 | -------------------------------------------------------------------------------- /configs/AFFormer/AFFormer_tiny_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/afformer.py', '../_base_/datasets/ade20k.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' 4 | ] 5 | model = dict( 6 | pretrained='./pretained_weight/AFFormer_tiny_ImageNet1k.pth', 7 | backbone=dict( 8 | type='afformer_tiny', 9 | strides=[4, 2, 2, 2]), 10 | decode_head=dict( 11 | in_channels=[216], 12 | in_index=[3], 13 | channels=256, 14 | aff_channels=256, 15 | aff_kwargs=dict(MD_R=16), 16 | num_classes=150 17 | ) 18 | ) 19 | 20 | # AdamW optimizer, no weight decay for position embedding & layer norm in backbone 21 | optimizer = dict(_delete_=True, type='AdamW', lr=0.0003, betas=(0.9, 0.999), weight_decay=0.01) 22 | 23 | lr_config = dict(_delete_=True, policy='poly', 24 | warmup='linear', 25 | warmup_iters=1500, 26 | warmup_ratio=1e-6, 27 | power=1.0, min_lr=0.0, by_epoch=False) 28 | 29 | # By default, models are trained on 2 GPUs with 8 images per GPU 30 | data=dict(samples_per_gpu=8, workers_per_gpu=8) 31 | find_unused_parameters=True 32 | -------------------------------------------------------------------------------- /configs/AFFormer/AFFormer_small_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/afformer.py', '../_base_/datasets/ade20k.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' 4 | ] 5 | model = dict( 6 | pretrained='./pretained_weight/AFFormer_small_ImageNet1k.pth', 7 | backbone=dict( 8 | type='afformer_small', 9 | strides=[4, 2, 2, 2]), 10 | decode_head=dict( 11 | in_channels=[216], 12 | in_index=[3], 13 | channels=256, 14 | aff_channels=256, 15 | aff_kwargs=dict(MD_R=16), 16 | num_classes=150 17 | ) 18 | ) 19 | 20 | # AdamW optimizer, no weight decay for position embedding & layer norm in backbone 21 | optimizer = dict(_delete_=True, type='AdamW', lr=0.0003, betas=(0.9, 0.999), weight_decay=0.01) 22 | 23 | lr_config = dict(_delete_=True, policy='poly', 24 | warmup='linear', 25 | warmup_iters=1500, 26 | warmup_ratio=1e-6, 27 | power=1.0, min_lr=0.0, by_epoch=False) 28 | 29 | # By default, models are trained on 2 GPUs with 8 images per GPU 30 | data=dict(samples_per_gpu=8, workers_per_gpu=8) 31 | find_unused_parameters=True 32 | -------------------------------------------------------------------------------- /mmseg/datasets/pipelines/__init__.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright (C) 2010-2022 Alibaba Group Holding Limited. 3 | This file is modified from: 4 | https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/datasets/pipelines/__init__.py 5 | ''' 6 | from .compose import Compose 7 | from .formatting import (Collect, ImageToTensor, ToDataContainer, ToTensor, 8 | Transpose, to_tensor) 9 | from .loading import LoadAnnotations, LoadImageFromFile 10 | from .test_time_aug import MultiScaleFlipAug 11 | from .transforms import (CLAHE, AdjustGamma, Normalize, Pad, 12 | PhotoMetricDistortion, RandomCrop, RandomCutOut, 13 | RandomFlip, RandomMosaic, RandomRotate, Rerange, 14 | Resize, RGB2Gray, SegRescale) 15 | 16 | __all__ = [ 17 | 'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer', 18 | 'Transpose', 'Collect', 'LoadAnnotations', 'LoadImageFromFile', 19 | 'MultiScaleFlipAug', 'Resize', 'RandomFlip', 'Pad', 'RandomCrop', 20 | 'Normalize', 'SegRescale', 'PhotoMetricDistortion', 'RandomRotate', 21 | 'AdjustGamma', 'CLAHE', 'Rerange', 'RGB2Gray', 'RandomCutOut', 22 | 'RandomMosaic' 23 | ] 24 | -------------------------------------------------------------------------------- /configs/AFFormer/AFFormer_base_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/afformer.py', '../_base_/datasets/cityscapes_1024x1024.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' 4 | ] 5 | model = dict( 6 | pretrained='./pretained_weight/AFFormer_base_ImageNet1k.pth', 7 | backbone=dict( 8 | type='afformer_base', 9 | strides=[4, 2, 2, 2]), 10 | decode_head=dict( 11 | in_channels=[216], 12 | in_index=[3], 13 | channels=256, 14 | aff_channels=256, 15 | aff_kwargs=dict(MD_R=16), 16 | num_classes=19 17 | ) 18 | ) 19 | 20 | # AdamW optimizer, no weight decay for position embedding & layer norm in backbone 21 | optimizer = dict(_delete_=True, type='AdamW', lr=0.0004, betas=(0.9, 0.999), weight_decay=0.01) 22 | 23 | lr_config = dict(_delete_=True, policy='poly', 24 | warmup='linear', 25 | warmup_iters=1500, 26 | warmup_ratio=1e-6, 27 | power=1.0, min_lr=0.0, by_epoch=False) 28 | 29 | # By default, models are trained on 2 GPUs with 4 images per GPU 30 | data=dict(samples_per_gpu=4, workers_per_gpu=4) 31 | find_unused_parameters=True 32 | -------------------------------------------------------------------------------- /configs/AFFormer/AFFormer_small_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/afformer.py', '../_base_/datasets/cityscapes_1024x1024.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' 4 | ] 5 | model = dict( 6 | pretrained='./pretained_weight/AFFormer_small_ImageNet1k.pth', 7 | backbone=dict( 8 | type='afformer_small', 9 | strides=[4, 2, 2, 2]), 10 | decode_head=dict( 11 | in_channels=[216], 12 | in_index=[3], 13 | channels=256, 14 | aff_channels=256, 15 | aff_kwargs=dict(MD_R=16), 16 | num_classes=19 17 | ) 18 | ) 19 | 20 | # AdamW optimizer, no weight decay for position embedding & layer norm in backbone 21 | optimizer = dict(_delete_=True, type='AdamW', lr=0.0004, betas=(0.9, 0.999), weight_decay=0.01) 22 | 23 | lr_config = dict(_delete_=True, policy='poly', 24 | warmup='linear', 25 | warmup_iters=1500, 26 | warmup_ratio=1e-6, 27 | power=1.0, min_lr=0.0, by_epoch=False) 28 | 29 | # By default, models are trained on 2 GPUs with 4 images per GPU 30 | data=dict(samples_per_gpu=4, workers_per_gpu=4) 31 | find_unused_parameters=True 32 | -------------------------------------------------------------------------------- /configs/AFFormer/AFFormer_tiny_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/afformer.py', '../_base_/datasets/cityscapes_1024x1024.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' 4 | ] 5 | model = dict( 6 | pretrained='./pretained_weight/AFFormer_tiny_ImageNet1k.pth', 7 | backbone=dict( 8 | type='afformer_tiny', 9 | strides=[4, 2, 2, 2]), 10 | decode_head=dict( 11 | in_channels=[216], 12 | in_index=[3], 13 | channels=256, 14 | aff_channels=256, 15 | aff_kwargs=dict(MD_R=16), 16 | num_classes=19 17 | ) 18 | ) 19 | 20 | # AdamW optimizer, no weight decay for position embedding & layer norm in backbone 21 | optimizer = dict(_delete_=True, type='AdamW', lr=0.0004, betas=(0.9, 0.999), weight_decay=0.01) 22 | 23 | lr_config = dict(_delete_=True, policy='poly', 24 | warmup='linear', 25 | warmup_iters=1500, 26 | warmup_ratio=1e-6, 27 | power=1.0, min_lr=0.0, by_epoch=False) 28 | 29 | # By default, models are trained on 2 GPUs with 4 images per GPU 30 | data=dict(samples_per_gpu=4, workers_per_gpu=4) 31 | find_unused_parameters=True 32 | -------------------------------------------------------------------------------- /mmseg/utils/logger.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright (C) 2010-2022 Alibaba Group Holding Limited. 3 | This file is modified from: 4 | https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/utils/logger.py 5 | ''' 6 | import logging 7 | 8 | from mmcv.utils import get_logger 9 | 10 | 11 | def get_root_logger(log_file=None, log_level=logging.INFO): 12 | """Get the root logger. 13 | 14 | The logger will be initialized if it has not been initialized. By default a 15 | StreamHandler will be added. If `log_file` is specified, a FileHandler will 16 | also be added. The name of the root logger is the top-level package name, 17 | e.g., "mmseg". 18 | 19 | Args: 20 | log_file (str | None): The log filename. If specified, a FileHandler 21 | will be added to the root logger. 22 | log_level (int): The root logger level. Note that only the process of 23 | rank 0 is affected, while other processes will set the level to 24 | "Error" and be silent most of the time. 25 | 26 | Returns: 27 | logging.Logger: The root logger. 28 | """ 29 | 30 | logger = get_logger(name='mmseg', log_file=log_file, log_level=log_level) 31 | 32 | return logger 33 | -------------------------------------------------------------------------------- /tools/cmd.sh: -------------------------------------------------------------------------------- 1 | # base 2 | bash dist_train.sh ./configs/AFFormer/AFFormer_base_ade20k.py 4 3 | bash dist_train.sh ./configs/AFFormer/AFFormer_base_cityscapes.py 2 4 | 5 | bash tools/dist_test.sh ./configs/AFFormer/AFFormer_base_ade20k.py ./pretained_weight/AFFormer_base_ade20k.pth 8 --eval mIoU 6 | bash tools/dist_test.sh ./configs/AFFormer/AFFormer_base_cityscapes.py ./pretained_weight/AFFormer_base_cityscapes.pth 8 --eval mIoU 7 | 8 | # ----------------------------------------------------------------- 9 | # small 10 | bash dist_train.sh ./configs/AFFormer/AFFormer_base_ade20k.py 4 11 | bash dist_train.sh ./configs/AFFormer/AFFormer_base_cityscapes.py 2 12 | 13 | bash tools/dist_test.sh ./configs/AFFormer/AFFormer_small_ade20k.py ./pretained_weight/AFFormer_small_ade20k.pth 8 --eval mIoU 14 | bash tools/dist_test.sh ./configs/AFFormer/AFFormer_small_cityscapes.py ./pretained_weight/AFFormer_small_cityscapes.pth 8 --eval mIoU 15 | 16 | 17 | # ----------------------------------------------------------------- 18 | # tiny 19 | bash dist_train.sh ./configs/AFFormer/AFFormer_base_ade20k.py 4 20 | bash dist_train.sh ./configs/AFFormer/AFFormer_base_cityscapes.py 2 21 | 22 | bash tools/dist_test.sh ./configs/AFFormer/AFFormer_tiny_ade20k.py ./pretained_weight/AFFormer_tiny_ade20k.pth 8 --eval mIoU 23 | bash tools/dist_test.sh ./configs/AFFormer/AFFormer_tiny_cityscapes.py ./pretained_weight/AFFormer_tiny_cityscapes.pth 8 --eval mIoU 24 | 25 | -------------------------------------------------------------------------------- /mmseg/models/decode_heads/aff_head.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright (C) 2010-2021 Alibaba Group Holding Limited. 3 | ''' 4 | import torch 5 | import torch.nn as nn 6 | import torch.nn.functional as F 7 | from mmcv.cnn import ConvModule 8 | 9 | from mmseg.ops import resize 10 | from ..builder import HEADS 11 | from .decode_head import BaseDecodeHead 12 | 13 | 14 | @HEADS.register_module() 15 | class CLS(BaseDecodeHead): 16 | def __init__(self, 17 | aff_channels=512, 18 | aff_kwargs=dict(), 19 | **kwargs): 20 | super(CLS, self).__init__( 21 | input_transform='multiple_select', **kwargs) 22 | self.aff_channels = aff_channels 23 | 24 | self.squeeze = ConvModule( 25 | sum(self.in_channels), 26 | self.channels, 27 | 1, 28 | conv_cfg=self.conv_cfg, 29 | norm_cfg=self.norm_cfg, 30 | act_cfg=self.act_cfg) 31 | 32 | 33 | self.align = ConvModule( 34 | self.aff_channels, 35 | self.channels, 36 | 1, 37 | conv_cfg=self.conv_cfg, 38 | norm_cfg=self.norm_cfg, 39 | act_cfg=self.act_cfg) 40 | 41 | def forward(self, inputs): 42 | """Forward function.""" 43 | inputs = self._transform_inputs(inputs)[0] 44 | 45 | x = self.squeeze(inputs) 46 | 47 | output = self.cls_seg(x) 48 | return output 49 | -------------------------------------------------------------------------------- /configs/_base_/datasets/cityscapes_640x1024.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright (C) 2010-2021 Alibaba Group Holding Limited. 3 | ''' 4 | _base_ = './cityscapes.py' 5 | img_norm_cfg = dict( 6 | mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) 7 | crop_size = (640, 1280) 8 | train_pipeline = [ 9 | dict(type='LoadImageFromFile'), 10 | dict(type='LoadAnnotations'), 11 | dict(type='Resize', img_scale=(1280, 640), ratio_range=(0.5, 2.0)), 12 | dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), 13 | dict(type='RandomFlip', prob=0.5), 14 | dict(type='PhotoMetricDistortion'), 15 | dict(type='Normalize', **img_norm_cfg), 16 | dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), 17 | dict(type='DefaultFormatBundle'), 18 | dict(type='Collect', keys=['img', 'gt_semantic_seg']), 19 | ] 20 | test_pipeline = [ 21 | dict(type='LoadImageFromFile'), 22 | dict( 23 | type='MultiScaleFlipAug', 24 | img_scale=(1280, 640), 25 | # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], 26 | flip=False, 27 | transforms=[ 28 | dict(type='Resize', keep_ratio=True), 29 | dict(type='RandomFlip'), 30 | dict(type='Normalize', **img_norm_cfg), 31 | dict(type='ImageToTensor', keys=['img']), 32 | dict(type='Collect', keys=['img']), 33 | ]) 34 | ] 35 | data = dict( 36 | train=dict(pipeline=train_pipeline), 37 | val=dict(pipeline=test_pipeline), 38 | test=dict(pipeline=test_pipeline)) 39 | -------------------------------------------------------------------------------- /configs/_base_/datasets/cityscapes_768x1024.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright (C) 2010-2021 Alibaba Group Holding Limited. 3 | ''' 4 | _base_ = './cityscapes.py' 5 | img_norm_cfg = dict( 6 | mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) 7 | crop_size = (768, 768) 8 | train_pipeline = [ 9 | dict(type='LoadImageFromFile'), 10 | dict(type='LoadAnnotations'), 11 | dict(type='Resize', img_scale=(1536, 768), ratio_range=(0.5, 2.0)), 12 | dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), 13 | dict(type='RandomFlip', prob=0.5), 14 | dict(type='PhotoMetricDistortion'), 15 | dict(type='Normalize', **img_norm_cfg), 16 | dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), 17 | dict(type='DefaultFormatBundle'), 18 | dict(type='Collect', keys=['img', 'gt_semantic_seg']), 19 | ] 20 | test_pipeline = [ 21 | dict(type='LoadImageFromFile'), 22 | dict( 23 | type='MultiScaleFlipAug', 24 | img_scale=(1536, 768), 25 | # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], 26 | flip=False, 27 | transforms=[ 28 | dict(type='Resize', keep_ratio=True), 29 | dict(type='RandomFlip'), 30 | dict(type='Normalize', **img_norm_cfg), 31 | dict(type='ImageToTensor', keys=['img']), 32 | dict(type='Collect', keys=['img']), 33 | ]) 34 | ] 35 | data = dict( 36 | train=dict(pipeline=train_pipeline), 37 | val=dict(pipeline=test_pipeline), 38 | test=dict(pipeline=test_pipeline)) 39 | -------------------------------------------------------------------------------- /mmseg/models/builder.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright (C) 2010-2022 Alibaba Group Holding Limited. 3 | ''' 4 | import warnings 5 | 6 | from mmcv.cnn import MODELS as MMCV_MODELS 7 | from mmcv.cnn.bricks.registry import ATTENTION as MMCV_ATTENTION 8 | from mmcv.utils import Registry 9 | 10 | MODELS = Registry('models', parent=MMCV_MODELS) 11 | ATTENTION = Registry('attention', parent=MMCV_ATTENTION) 12 | 13 | BACKBONES = MODELS 14 | NECKS = MODELS 15 | HEADS = MODELS 16 | LOSSES = MODELS 17 | SEGMENTORS = MODELS 18 | 19 | 20 | def build_backbone(cfg): 21 | """Build backbone.""" 22 | return BACKBONES.build(cfg) 23 | 24 | 25 | def build_neck(cfg): 26 | """Build neck.""" 27 | return NECKS.build(cfg) 28 | 29 | 30 | def build_head(cfg): 31 | """Build head.""" 32 | return HEADS.build(cfg) 33 | 34 | 35 | def build_loss(cfg): 36 | """Build loss.""" 37 | return LOSSES.build(cfg) 38 | 39 | 40 | def build_segmentor(cfg, train_cfg=None, test_cfg=None): 41 | """Build segmentor.""" 42 | if train_cfg is not None or test_cfg is not None: 43 | warnings.warn( 44 | 'train_cfg and test_cfg is deprecated, ' 45 | 'please specify them in model', UserWarning) 46 | assert cfg.get('train_cfg') is None or train_cfg is None, \ 47 | 'train_cfg specified in both outer field and model field ' 48 | assert cfg.get('test_cfg') is None or test_cfg is None, \ 49 | 'test_cfg specified in both outer field and model field ' 50 | return SEGMENTORS.build( 51 | cfg, default_args=dict(train_cfg=train_cfg, test_cfg=test_cfg)) 52 | -------------------------------------------------------------------------------- /configs/_base_/datasets/cityscapes_1024x1024.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright (C) 2010-2022 Alibaba Group Holding Limited. 3 | This file is modified from: 4 | https://github.com/open-mmlab/mmsegmentation/blob/master/configs/_base_/datasets/cityscapes_1024x1024.py 5 | ''' 6 | _base_ = './cityscapes.py' 7 | img_norm_cfg = dict( 8 | mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) 9 | crop_size = (1024, 1024) 10 | train_pipeline = [ 11 | dict(type='LoadImageFromFile'), 12 | dict(type='LoadAnnotations'), 13 | dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), 14 | dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), 15 | dict(type='RandomFlip', prob=0.5), 16 | dict(type='PhotoMetricDistortion'), 17 | dict(type='Normalize', **img_norm_cfg), 18 | dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), 19 | dict(type='DefaultFormatBundle'), 20 | dict(type='Collect', keys=['img', 'gt_semantic_seg']), 21 | ] 22 | test_pipeline = [ 23 | dict(type='LoadImageFromFile'), 24 | dict( 25 | type='MultiScaleFlipAug', 26 | img_scale=(2048, 1024), 27 | # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], 28 | flip=False, 29 | transforms=[ 30 | dict(type='Resize', keep_ratio=True), 31 | dict(type='RandomFlip'), 32 | dict(type='Normalize', **img_norm_cfg), 33 | dict(type='ImageToTensor', keys=['img']), 34 | dict(type='Collect', keys=['img']), 35 | ]) 36 | ] 37 | data = dict( 38 | train=dict(pipeline=train_pipeline), 39 | val=dict(pipeline=test_pipeline), 40 | test=dict(pipeline=test_pipeline)) 41 | -------------------------------------------------------------------------------- /mmseg/utils/misc.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright (C) 2010-2022 Alibaba Group Holding Limited. 3 | This file is modified from: 4 | https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/utils/misc.py 5 | ''' 6 | import glob 7 | import os.path as osp 8 | import warnings 9 | 10 | 11 | def find_latest_checkpoint(path, suffix='pth'): 12 | """This function is for finding the latest checkpoint. 13 | 14 | It will be used when automatically resume, modified from 15 | https://github.com/open-mmlab/mmdetection/blob/dev-v2.20.0/mmdet/utils/misc.py 16 | 17 | Args: 18 | path (str): The path to find checkpoints. 19 | suffix (str): File extension for the checkpoint. Defaults to pth. 20 | 21 | Returns: 22 | latest_path(str | None): File path of the latest checkpoint. 23 | """ 24 | if not osp.exists(path): 25 | warnings.warn("The path of the checkpoints doesn't exist.") 26 | return None 27 | if osp.exists(osp.join(path, f'latest.{suffix}')): 28 | return osp.join(path, f'latest.{suffix}') 29 | 30 | checkpoints = glob.glob(osp.join(path, f'*.{suffix}')) 31 | if len(checkpoints) == 0: 32 | warnings.warn('The are no checkpoints in the path') 33 | return None 34 | latest = -1 35 | latest_path = '' 36 | for checkpoint in checkpoints: 37 | if len(checkpoint) < len(latest_path): 38 | continue 39 | # `count` is iteration number, as checkpoints are saved as 40 | # 'iter_xx.pth' or 'epoch_xx.pth' and xx is iteration number. 41 | count = int(osp.basename(checkpoint).split('_')[-1].split('.')[0]) 42 | if count > latest: 43 | latest = count 44 | latest_path = checkpoint 45 | return latest_path 46 | -------------------------------------------------------------------------------- /mmseg/datasets/pipelines/compose.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright (C) 2010-2022 Alibaba Group Holding Limited. 3 | This file is modified from: 4 | https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/datasets/pipelines/compose.py 5 | ''' 6 | import collections 7 | 8 | from mmcv.utils import build_from_cfg 9 | 10 | from ..builder import PIPELINES 11 | 12 | 13 | @PIPELINES.register_module() 14 | class Compose(object): 15 | """Compose multiple transforms sequentially. 16 | 17 | Args: 18 | transforms (Sequence[dict | callable]): Sequence of transform object or 19 | config dict to be composed. 20 | """ 21 | 22 | def __init__(self, transforms): 23 | assert isinstance(transforms, collections.abc.Sequence) 24 | self.transforms = [] 25 | for transform in transforms: 26 | if isinstance(transform, dict): 27 | transform = build_from_cfg(transform, PIPELINES) 28 | self.transforms.append(transform) 29 | elif callable(transform): 30 | self.transforms.append(transform) 31 | else: 32 | raise TypeError('transform must be callable or a dict') 33 | 34 | def __call__(self, data): 35 | """Call function to apply transforms sequentially. 36 | 37 | Args: 38 | data (dict): A result dict contains the data to transform. 39 | 40 | Returns: 41 | dict: Transformed data. 42 | """ 43 | 44 | for t in self.transforms: 45 | data = t(data) 46 | if data is None: 47 | return None 48 | return data 49 | 50 | def __repr__(self): 51 | format_string = self.__class__.__name__ + '(' 52 | for t in self.transforms: 53 | format_string += '\n' 54 | format_string += f' {t}' 55 | format_string += '\n)' 56 | return format_string 57 | -------------------------------------------------------------------------------- /configs/_base_/datasets/cityscapes.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright (C) 2010-2021 Alibaba Group Holding Limited. 3 | This file is modified from: 4 | https://github.com/open-mmlab/mmsegmentation/blob/master/configs/_base_/datasets/cityscapes.py 5 | ''' 6 | # dataset settings 7 | dataset_type = 'CityscapesDataset' 8 | data_root = 'data/cityscapes/' 9 | img_norm_cfg = dict( 10 | mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) 11 | crop_size = (512, 1024) 12 | train_pipeline = [ 13 | dict(type='LoadImageFromFile'), 14 | dict(type='LoadAnnotations'), 15 | dict(type='Resize', img_scale=(1024, 512), ratio_range=(0.5, 2.0)), 16 | dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), 17 | dict(type='RandomFlip', prob=0.5), 18 | dict(type='PhotoMetricDistortion'), 19 | dict(type='Normalize', **img_norm_cfg), 20 | dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), 21 | dict(type='DefaultFormatBundle'), 22 | dict(type='Collect', keys=['img', 'gt_semantic_seg']), 23 | ] 24 | test_pipeline = [ 25 | dict(type='LoadImageFromFile'), 26 | dict( 27 | type='MultiScaleFlipAug', 28 | img_scale=(1024, 512), 29 | # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], 30 | flip=False, 31 | transforms=[ 32 | dict(type='Resize', keep_ratio=True), 33 | dict(type='RandomFlip'), 34 | dict(type='Normalize', **img_norm_cfg), 35 | dict(type='ImageToTensor', keys=['img']), 36 | dict(type='Collect', keys=['img']), 37 | ]) 38 | ] 39 | data = dict( 40 | samples_per_gpu=2, 41 | workers_per_gpu=2, 42 | train=dict( 43 | type=dataset_type, 44 | data_root=data_root, 45 | img_dir='leftImg8bit/train', 46 | ann_dir='gtFine/train', 47 | pipeline=train_pipeline), 48 | val=dict( 49 | type=dataset_type, 50 | data_root=data_root, 51 | img_dir='leftImg8bit/val', 52 | ann_dir='gtFine/val', 53 | pipeline=test_pipeline), 54 | test=dict( 55 | type=dataset_type, 56 | data_root=data_root, 57 | img_dir='leftImg8bit/val', 58 | ann_dir='gtFine/val', 59 | pipeline=test_pipeline)) 60 | -------------------------------------------------------------------------------- /mmseg/ops/wrappers.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright (C) 2010-2022 Alibaba Group Holding Limited. 3 | This file is modified from: 4 | https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/ops/wrappers.py 5 | ''' 6 | import warnings 7 | 8 | import torch.nn as nn 9 | import torch.nn.functional as F 10 | 11 | 12 | def resize(input, 13 | size=None, 14 | scale_factor=None, 15 | mode='nearest', 16 | align_corners=None, 17 | warning=True): 18 | if warning: 19 | if size is not None and align_corners: 20 | input_h, input_w = tuple(int(x) for x in input.shape[2:]) 21 | output_h, output_w = tuple(int(x) for x in size) 22 | if output_h > input_h or output_w > output_h: 23 | if ((output_h > 1 and output_w > 1 and input_h > 1 24 | and input_w > 1) and (output_h - 1) % (input_h - 1) 25 | and (output_w - 1) % (input_w - 1)): 26 | warnings.warn( 27 | f'When align_corners={align_corners}, ' 28 | 'the output would more aligned if ' 29 | f'input size {(input_h, input_w)} is `x+1` and ' 30 | f'out size {(output_h, output_w)} is `nx+1`') 31 | return F.interpolate(input, size, scale_factor, mode, align_corners) 32 | 33 | 34 | class Upsample(nn.Module): 35 | 36 | def __init__(self, 37 | size=None, 38 | scale_factor=None, 39 | mode='nearest', 40 | align_corners=None): 41 | super(Upsample, self).__init__() 42 | self.size = size 43 | if isinstance(scale_factor, tuple): 44 | self.scale_factor = tuple(float(factor) for factor in scale_factor) 45 | else: 46 | self.scale_factor = float(scale_factor) if scale_factor else None 47 | self.mode = mode 48 | self.align_corners = align_corners 49 | 50 | def forward(self, x): 51 | if not self.size: 52 | size = [int(t * self.scale_factor) for t in x.shape[-2:]] 53 | else: 54 | size = self.size 55 | return resize(x, size, None, self.mode, self.align_corners) 56 | -------------------------------------------------------------------------------- /mmseg/__init__.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright (C) 2010-2022 Alibaba Group Holding Limited. 3 | This file is modified from: 4 | https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/__init__.py 5 | ''' 6 | import warnings 7 | 8 | import mmcv 9 | from packaging.version import parse 10 | 11 | from .version import __version__, version_info 12 | 13 | MMCV_MIN = '1.3.13' 14 | MMCV_MAX = '1.5.0' 15 | 16 | 17 | def digit_version(version_str: str, length: int = 4): 18 | """Convert a version string into a tuple of integers. 19 | 20 | This method is usually used for comparing two versions. For pre-release 21 | versions: alpha < beta < rc. 22 | 23 | Args: 24 | version_str (str): The version string. 25 | length (int): The maximum number of version levels. Default: 4. 26 | 27 | Returns: 28 | tuple[int]: The version info in digits (integers). 29 | """ 30 | version = parse(version_str) 31 | assert version.release, f'failed to parse version {version_str}' 32 | release = list(version.release) 33 | release = release[:length] 34 | if len(release) < length: 35 | release = release + [0] * (length - len(release)) 36 | if version.is_prerelease: 37 | mapping = {'a': -3, 'b': -2, 'rc': -1} 38 | val = -4 39 | # version.pre can be None 40 | if version.pre: 41 | if version.pre[0] not in mapping: 42 | warnings.warn(f'unknown prerelease version {version.pre[0]}, ' 43 | 'version checking may go wrong') 44 | else: 45 | val = mapping[version.pre[0]] 46 | release.extend([val, version.pre[-1]]) 47 | else: 48 | release.extend([val, 0]) 49 | 50 | elif version.is_postrelease: 51 | release.extend([1, version.post]) 52 | else: 53 | release.extend([0, 0]) 54 | return tuple(release) 55 | 56 | 57 | mmcv_min_version = digit_version(MMCV_MIN) 58 | mmcv_max_version = digit_version(MMCV_MAX) 59 | mmcv_version = digit_version(mmcv.__version__) 60 | 61 | 62 | assert (mmcv_min_version <= mmcv_version <= mmcv_max_version), \ 63 | f'MMCV=={mmcv.__version__} is used but incompatible. ' \ 64 | f'Please install mmcv>={mmcv_min_version}, <={mmcv_max_version}.' 65 | 66 | __all__ = ['__version__', 'version_info', 'digit_version'] 67 | -------------------------------------------------------------------------------- /configs/_base_/datasets/coco_stuff10k.py: -------------------------------------------------------------------------------- 1 | # dataset settings 2 | ''' 3 | Copyright (C) 2010-2021 Alibaba Group Holding Limited. 4 | This file is modified from: 5 | https://github.com/open-mmlab/mmsegmentation/blob/master/configs/_base_/datasets/coco_stuff10k.py 6 | ''' 7 | dataset_type = 'COCOStuffDataset' 8 | data_root = 'data/coco_stuff10k' 9 | img_norm_cfg = dict( 10 | mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) 11 | crop_size = (512, 512) 12 | train_pipeline = [ 13 | dict(type='LoadImageFromFile'), 14 | dict(type='LoadAnnotations', reduce_zero_label=True), 15 | dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)), 16 | dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), 17 | dict(type='RandomFlip', prob=0.5), 18 | dict(type='PhotoMetricDistortion'), 19 | dict(type='Normalize', **img_norm_cfg), 20 | dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), 21 | dict(type='DefaultFormatBundle'), 22 | dict(type='Collect', keys=['img', 'gt_semantic_seg']), 23 | ] 24 | test_pipeline = [ 25 | dict(type='LoadImageFromFile'), 26 | dict( 27 | type='MultiScaleFlipAug', 28 | img_scale=(2048, 512), 29 | # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], 30 | flip=False, 31 | transforms=[ 32 | dict(type='Resize', keep_ratio=True), 33 | dict(type='RandomFlip'), 34 | dict(type='Normalize', **img_norm_cfg), 35 | dict(type='ImageToTensor', keys=['img']), 36 | dict(type='Collect', keys=['img']), 37 | ]) 38 | ] 39 | data = dict( 40 | samples_per_gpu=4, 41 | workers_per_gpu=4, 42 | train=dict( 43 | type=dataset_type, 44 | data_root=data_root, 45 | reduce_zero_label=True, 46 | img_dir='images/train2014', 47 | ann_dir='annotations/train2014', 48 | pipeline=train_pipeline), 49 | val=dict( 50 | type=dataset_type, 51 | data_root=data_root, 52 | reduce_zero_label=True, 53 | img_dir='images/test2014', 54 | ann_dir='annotations/test2014', 55 | pipeline=test_pipeline), 56 | test=dict( 57 | type=dataset_type, 58 | data_root=data_root, 59 | reduce_zero_label=True, 60 | img_dir='images/test2014', 61 | ann_dir='annotations/test2014', 62 | pipeline=test_pipeline)) -------------------------------------------------------------------------------- /mmseg/utils/set_env.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright (C) 2010-2022 Alibaba Group Holding Limited. 3 | This file is modified from: 4 | https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/utils/set_env.py 5 | ''' 6 | import os 7 | import platform 8 | 9 | import cv2 10 | import torch.multiprocessing as mp 11 | 12 | from ..utils import get_root_logger 13 | 14 | 15 | def setup_multi_processes(cfg): 16 | """Setup multi-processing environment variables.""" 17 | logger = get_root_logger() 18 | 19 | # set multi-process start method 20 | if platform.system() != 'Windows': 21 | mp_start_method = cfg.get('mp_start_method', None) 22 | current_method = mp.get_start_method(allow_none=True) 23 | if mp_start_method in ('fork', 'spawn', 'forkserver'): 24 | logger.info( 25 | f'Multi-processing start method `{mp_start_method}` is ' 26 | f'different from the previous setting `{current_method}`.' 27 | f'It will be force set to `{mp_start_method}`.') 28 | mp.set_start_method(mp_start_method, force=True) 29 | else: 30 | logger.info( 31 | f'Multi-processing start method is `{mp_start_method}`') 32 | 33 | # disable opencv multithreading to avoid system being overloaded 34 | opencv_num_threads = cfg.get('opencv_num_threads', None) 35 | if isinstance(opencv_num_threads, int): 36 | logger.info(f'OpenCV num_threads is `{opencv_num_threads}`') 37 | cv2.setNumThreads(opencv_num_threads) 38 | else: 39 | logger.info(f'OpenCV num_threads is `{cv2.getNumThreads}') 40 | 41 | if cfg.data.workers_per_gpu > 1: 42 | # setup OMP threads 43 | # This code is referred from https://github.com/pytorch/pytorch/blob/master/torch/distributed/run.py # noqa 44 | omp_num_threads = cfg.get('omp_num_threads', None) 45 | if 'OMP_NUM_THREADS' not in os.environ: 46 | if isinstance(omp_num_threads, int): 47 | logger.info(f'OMP num threads is {omp_num_threads}') 48 | os.environ['OMP_NUM_THREADS'] = str(omp_num_threads) 49 | else: 50 | logger.info(f'OMP num threads is {os.environ["OMP_NUM_THREADS"] }') 51 | 52 | # setup MKL threads 53 | if 'MKL_NUM_THREADS' not in os.environ: 54 | mkl_num_threads = cfg.get('mkl_num_threads', None) 55 | if isinstance(mkl_num_threads, int): 56 | logger.info(f'MKL num threads is {mkl_num_threads}') 57 | os.environ['MKL_NUM_THREADS'] = str(mkl_num_threads) 58 | else: 59 | logger.info(f'MKL num threads is {os.environ["MKL_NUM_THREADS"]}') 60 | -------------------------------------------------------------------------------- /configs/_base_/datasets/ade20k_std.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright (C) 2010-2022 Alibaba Group Holding Limited. 3 | This file is modified from: 4 | https://github.com/Gsunshine/Enjoy-Hamburger/blob/main/seg_light_ham/configs/_base_/datasets/ade20k_std.py 5 | ''' 6 | # dataset settings 7 | dataset_type = 'ADE20KDataset' 8 | data_root = 'data/ade/ADEChallengeData2016' 9 | img_norm_cfg = dict( 10 | mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) 11 | crop_size = (512, 512) 12 | train_pipeline = [ 13 | dict(type='LoadImageFromFile'), 14 | dict(type='LoadAnnotations', reduce_zero_label=True), 15 | dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)), 16 | dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), 17 | dict(type='RandomFlip', prob=0.5), 18 | dict(type='PhotoMetricDistortion'), 19 | dict(type='Normalize', **img_norm_cfg), 20 | dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), 21 | dict(type='DefaultFormatBundle'), 22 | dict(type='Collect', keys=['img', 'gt_semantic_seg']), 23 | ] 24 | val_pipeline = [ 25 | dict(type='LoadImageFromFile'), 26 | dict( 27 | type='MultiScaleFlipAug', 28 | img_scale=(2048, 512), 29 | # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], 30 | flip=False, 31 | transforms=[ 32 | dict(type='Resize', keep_ratio=True), 33 | dict(type='RandomFlip'), 34 | dict(type='Normalize', **img_norm_cfg), 35 | dict(type='ImageToTensor', keys=['img']), 36 | dict(type='Collect', keys=['img']), 37 | ]) 38 | ] 39 | test_pipeline = [ 40 | dict(type='LoadImageFromFile'), 41 | dict( 42 | type='MultiScaleFlipAug', 43 | img_scale=(2048, 512), 44 | img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], 45 | flip=False, 46 | transforms=[ 47 | dict(type='Resize', keep_ratio=True), 48 | dict(type='RandomFlip'), 49 | dict(type='Normalize', **img_norm_cfg), 50 | dict(type='ImageToTensor', keys=['img']), 51 | dict(type='Collect', keys=['img']), 52 | ]) 53 | ] 54 | data = dict( 55 | samples_per_gpu=4, 56 | workers_per_gpu=4, 57 | train=dict( 58 | type='RepeatDataset', 59 | times=50, 60 | dataset=dict( 61 | type=dataset_type, 62 | data_root=data_root, 63 | img_dir='images/training', 64 | ann_dir='annotations/training', 65 | pipeline=train_pipeline)), 66 | val=dict( 67 | type=dataset_type, 68 | data_root=data_root, 69 | img_dir='images/validation', 70 | ann_dir='annotations/validation', 71 | pipeline=val_pipeline), 72 | test=dict( 73 | type=dataset_type, 74 | data_root=data_root, 75 | img_dir='images/validation', 76 | ann_dir='annotations/validation', 77 | pipeline=test_pipeline)) 78 | -------------------------------------------------------------------------------- /configs/_base_/datasets/ade20k.py: -------------------------------------------------------------------------------- 1 | # dataset settings 2 | ''' 3 | Copyright (C) 2010-2022 Alibaba Group Holding Limited. 4 | This file is modified from: 5 | https://github.com/Gsunshine/Enjoy-Hamburger/blob/main/seg_light_ham/configs/_base_/datasets/ade20k.py 6 | ''' 7 | dataset_type = 'ADE20KDataset' 8 | data_root = 'data/ade/ADEChallengeData2016' 9 | img_norm_cfg = dict( 10 | mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) 11 | crop_size = (512, 512) 12 | train_pipeline = [ 13 | dict(type='LoadImageFromFile'), 14 | dict(type='LoadAnnotations', reduce_zero_label=True), 15 | dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)), 16 | dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), 17 | dict(type='RandomFlip', prob=0.5), 18 | dict(type='PhotoMetricDistortion'), 19 | dict(type='Normalize', **img_norm_cfg), 20 | dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), 21 | dict(type='DefaultFormatBundle'), 22 | dict(type='Collect', keys=['img', 'gt_semantic_seg']), 23 | ] 24 | val_pipeline = [ 25 | dict(type='LoadImageFromFile'), 26 | dict( 27 | type='MultiScaleFlipAug', 28 | img_scale=(2048, 512), 29 | # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], 30 | flip=False, 31 | transforms=[ 32 | dict(type='Resize', keep_ratio=True), 33 | dict(type='RandomFlip'), 34 | dict(type='Normalize', **img_norm_cfg), 35 | dict(type='ImageToTensor', keys=['img']), 36 | dict(type='Collect', keys=['img']), 37 | ]) 38 | ] 39 | test_pipeline = [ 40 | dict(type='LoadImageFromFile'), 41 | dict( 42 | type='MultiScaleFlipAug', 43 | img_scale=(2048, 512), 44 | # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], 45 | flip=False, 46 | transforms=[ 47 | # dict(type='AlignResize', keep_ratio=True, size_divisor=32), 48 | dict(type='Resize', keep_ratio=True), 49 | dict(type='ResizeToMultiple', size_divisor=32), 50 | dict(type='RandomFlip'), 51 | dict(type='Normalize', **img_norm_cfg), 52 | dict(type='ImageToTensor', keys=['img']), 53 | dict(type='Collect', keys=['img']), 54 | ]) 55 | ] 56 | data = dict( 57 | samples_per_gpu=4, 58 | workers_per_gpu=4, 59 | train=dict( 60 | type='RepeatDataset', 61 | times=50, 62 | dataset=dict( 63 | type=dataset_type, 64 | data_root=data_root, 65 | img_dir='images/training', 66 | ann_dir='annotations/training', 67 | pipeline=train_pipeline)), 68 | val=dict( 69 | type=dataset_type, 70 | data_root=data_root, 71 | img_dir='images/validation', 72 | ann_dir='annotations/validation', 73 | pipeline=val_pipeline), 74 | test=dict( 75 | type=dataset_type, 76 | data_root=data_root, 77 | img_dir='images/validation', 78 | ann_dir='annotations/validation', 79 | pipeline=test_pipeline)) 80 | -------------------------------------------------------------------------------- /mmseg/ops/encoding.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright (C) 2010-2022 Alibaba Group Holding Limited. 3 | This file is modified from: 4 | https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/ops/encoding.py 5 | ''' 6 | import torch 7 | from torch import nn 8 | from torch.nn import functional as F 9 | 10 | 11 | class Encoding(nn.Module): 12 | """Encoding Layer: a learnable residual encoder. 13 | 14 | Input is of shape (batch_size, channels, height, width). 15 | Output is of shape (batch_size, num_codes, channels). 16 | 17 | Args: 18 | channels: dimension of the features or feature channels 19 | num_codes: number of code words 20 | """ 21 | 22 | def __init__(self, channels, num_codes): 23 | super(Encoding, self).__init__() 24 | # init codewords and smoothing factor 25 | self.channels, self.num_codes = channels, num_codes 26 | std = 1. / ((num_codes * channels)**0.5) 27 | # [num_codes, channels] 28 | self.codewords = nn.Parameter( 29 | torch.empty(num_codes, channels, 30 | dtype=torch.float).uniform_(-std, std), 31 | requires_grad=True) 32 | # [num_codes] 33 | self.scale = nn.Parameter( 34 | torch.empty(num_codes, dtype=torch.float).uniform_(-1, 0), 35 | requires_grad=True) 36 | 37 | @staticmethod 38 | def scaled_l2(x, codewords, scale): 39 | num_codes, channels = codewords.size() 40 | batch_size = x.size(0) 41 | reshaped_scale = scale.view((1, 1, num_codes)) 42 | expanded_x = x.unsqueeze(2).expand( 43 | (batch_size, x.size(1), num_codes, channels)) 44 | reshaped_codewords = codewords.view((1, 1, num_codes, channels)) 45 | 46 | scaled_l2_norm = reshaped_scale * ( 47 | expanded_x - reshaped_codewords).pow(2).sum(dim=3) 48 | return scaled_l2_norm 49 | 50 | @staticmethod 51 | def aggregate(assignment_weights, x, codewords): 52 | num_codes, channels = codewords.size() 53 | reshaped_codewords = codewords.view((1, 1, num_codes, channels)) 54 | batch_size = x.size(0) 55 | 56 | expanded_x = x.unsqueeze(2).expand( 57 | (batch_size, x.size(1), num_codes, channels)) 58 | encoded_feat = (assignment_weights.unsqueeze(3) * 59 | (expanded_x - reshaped_codewords)).sum(dim=1) 60 | return encoded_feat 61 | 62 | def forward(self, x): 63 | assert x.dim() == 4 and x.size(1) == self.channels 64 | # [batch_size, channels, height, width] 65 | batch_size = x.size(0) 66 | # [batch_size, height x width, channels] 67 | x = x.view(batch_size, self.channels, -1).transpose(1, 2).contiguous() 68 | # assignment_weights: [batch_size, channels, num_codes] 69 | assignment_weights = F.softmax( 70 | self.scaled_l2(x, self.codewords, self.scale), dim=2) 71 | # aggregate 72 | encoded_feat = self.aggregate(assignment_weights, x, self.codewords) 73 | return encoded_feat 74 | 75 | def __repr__(self): 76 | repr_str = self.__class__.__name__ 77 | repr_str += f'(Nx{self.channels}xHxW =>Nx{self.num_codes}' \ 78 | f'x{self.channels})' 79 | return repr_str 80 | -------------------------------------------------------------------------------- /mmseg/models/segmentors/cascade_encoder_decoder.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright (C) 2010-2022 Alibaba Group Holding Limited. 3 | This file is modified from: 4 | https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/model/segmentors/cascade_encoder_decoder.py 5 | ''' 6 | from torch import nn 7 | 8 | from mmseg.core import add_prefix 9 | from mmseg.ops import resize 10 | from .. import builder 11 | from ..builder import SEGMENTORS 12 | from .encoder_decoder import EncoderDecoder 13 | 14 | 15 | @SEGMENTORS.register_module() 16 | class CascadeEncoderDecoder(EncoderDecoder): 17 | """Cascade Encoder Decoder segmentors. 18 | 19 | CascadeEncoderDecoder almost the same as EncoderDecoder, while decoders of 20 | CascadeEncoderDecoder are cascaded. The output of previous decoder_head 21 | will be the input of next decoder_head. 22 | """ 23 | 24 | def __init__(self, 25 | num_stages, 26 | backbone, 27 | decode_head, 28 | neck=None, 29 | auxiliary_head=None, 30 | train_cfg=None, 31 | test_cfg=None, 32 | pretrained=None, 33 | init_cfg=None): 34 | self.num_stages = num_stages 35 | super(CascadeEncoderDecoder, self).__init__( 36 | backbone=backbone, 37 | decode_head=decode_head, 38 | neck=neck, 39 | auxiliary_head=auxiliary_head, 40 | train_cfg=train_cfg, 41 | test_cfg=test_cfg, 42 | pretrained=pretrained, 43 | init_cfg=init_cfg) 44 | 45 | def _init_decode_head(self, decode_head): 46 | """Initialize ``decode_head``""" 47 | assert isinstance(decode_head, list) 48 | assert len(decode_head) == self.num_stages 49 | self.decode_head = nn.ModuleList() 50 | for i in range(self.num_stages): 51 | self.decode_head.append(builder.build_head(decode_head[i])) 52 | self.align_corners = self.decode_head[-1].align_corners 53 | self.num_classes = self.decode_head[-1].num_classes 54 | 55 | def encode_decode(self, img, img_metas): 56 | """Encode images with backbone and decode into a semantic segmentation 57 | map of the same size as input.""" 58 | x = self.extract_feat(img) 59 | out = self.decode_head[0].forward_test(x, img_metas, self.test_cfg) 60 | for i in range(1, self.num_stages): 61 | out = self.decode_head[i].forward_test(x, out, img_metas, 62 | self.test_cfg) 63 | out = resize( 64 | input=out, 65 | size=img.shape[2:], 66 | mode='bilinear', 67 | align_corners=self.align_corners) 68 | return out 69 | 70 | def _decode_head_forward_train(self, x, img_metas, gt_semantic_seg): 71 | """Run forward function and calculate loss for decode head in 72 | training.""" 73 | losses = dict() 74 | 75 | loss_decode = self.decode_head[0].forward_train( 76 | x, img_metas, gt_semantic_seg, self.train_cfg) 77 | 78 | losses.update(add_prefix(loss_decode, 'decode_0')) 79 | 80 | for i in range(1, self.num_stages): 81 | # forward test again, maybe unnecessary for most methods. 82 | prev_outputs = self.decode_head[i - 1].forward_test( 83 | x, img_metas, self.test_cfg) 84 | loss_decode = self.decode_head[i].forward_train( 85 | x, prev_outputs, img_metas, gt_semantic_seg, self.train_cfg) 86 | losses.update(add_prefix(loss_decode, f'decode_{i}')) 87 | 88 | return losses 89 | -------------------------------------------------------------------------------- /mmseg/models/losses/accuracy.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright (C) 2010-2022 Alibaba Group Holding Limited. 3 | This file is modified from: 4 | https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/model/losses/accuracy.py 5 | ''' 6 | import torch.nn as nn 7 | 8 | 9 | def accuracy(pred, target, topk=1, thresh=None, ignore_index=None): 10 | """Calculate accuracy according to the prediction and target. 11 | 12 | Args: 13 | pred (torch.Tensor): The model prediction, shape (N, num_class, ...) 14 | target (torch.Tensor): The target of each prediction, shape (N, , ...) 15 | ignore_index (int | None): The label index to be ignored. Default: None 16 | topk (int | tuple[int], optional): If the predictions in ``topk`` 17 | matches the target, the predictions will be regarded as 18 | correct ones. Defaults to 1. 19 | thresh (float, optional): If not None, predictions with scores under 20 | this threshold are considered incorrect. Default to None. 21 | 22 | Returns: 23 | float | tuple[float]: If the input ``topk`` is a single integer, 24 | the function will return a single float as accuracy. If 25 | ``topk`` is a tuple containing multiple integers, the 26 | function will return a tuple containing accuracies of 27 | each ``topk`` number. 28 | """ 29 | assert isinstance(topk, (int, tuple)) 30 | if isinstance(topk, int): 31 | topk = (topk, ) 32 | return_single = True 33 | else: 34 | return_single = False 35 | 36 | maxk = max(topk) 37 | if pred.size(0) == 0: 38 | accu = [pred.new_tensor(0.) for i in range(len(topk))] 39 | return accu[0] if return_single else accu 40 | assert pred.ndim == target.ndim + 1 41 | assert pred.size(0) == target.size(0) 42 | assert maxk <= pred.size(1), \ 43 | f'maxk {maxk} exceeds pred dimension {pred.size(1)}' 44 | pred_value, pred_label = pred.topk(maxk, dim=1) 45 | # transpose to shape (maxk, N, ...) 46 | pred_label = pred_label.transpose(0, 1) 47 | correct = pred_label.eq(target.unsqueeze(0).expand_as(pred_label)) 48 | if thresh is not None: 49 | # Only prediction values larger than thresh are counted as correct 50 | correct = correct & (pred_value > thresh).t() 51 | correct = correct[:, target != ignore_index] 52 | res = [] 53 | for k in topk: 54 | correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True) 55 | res.append( 56 | correct_k.mul_(100.0 / target[target != ignore_index].numel())) 57 | return res[0] if return_single else res 58 | 59 | 60 | class Accuracy(nn.Module): 61 | """Accuracy calculation module.""" 62 | 63 | def __init__(self, topk=(1, ), thresh=None, ignore_index=None): 64 | """Module to calculate the accuracy. 65 | 66 | Args: 67 | topk (tuple, optional): The criterion used to calculate the 68 | accuracy. Defaults to (1,). 69 | thresh (float, optional): If not None, predictions with scores 70 | under this threshold are considered incorrect. Default to None. 71 | """ 72 | super().__init__() 73 | self.topk = topk 74 | self.thresh = thresh 75 | self.ignore_index = ignore_index 76 | 77 | def forward(self, pred, target): 78 | """Forward function to calculate accuracy. 79 | 80 | Args: 81 | pred (torch.Tensor): Prediction of models. 82 | target (torch.Tensor): Target for each prediction. 83 | 84 | Returns: 85 | tuple[float]: The accuracies under different topk criterions. 86 | """ 87 | return accuracy(pred, target, self.topk, self.thresh, 88 | self.ignore_index) 89 | -------------------------------------------------------------------------------- /mmseg/models/utils/res_layer.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright (C) 2010-2022 Alibaba Group Holding Limited. 3 | ''' 4 | from mmcv.cnn import build_conv_layer, build_norm_layer 5 | from mmcv.runner import Sequential 6 | from torch import nn as nn 7 | 8 | 9 | class ResLayer(Sequential): 10 | """ResLayer to build ResNet style backbone. 11 | 12 | Args: 13 | block (nn.Module): block used to build ResLayer. 14 | inplanes (int): inplanes of block. 15 | planes (int): planes of block. 16 | num_blocks (int): number of blocks. 17 | stride (int): stride of the first block. Default: 1 18 | avg_down (bool): Use AvgPool instead of stride conv when 19 | downsampling in the bottleneck. Default: False 20 | conv_cfg (dict): dictionary to construct and config conv layer. 21 | Default: None 22 | norm_cfg (dict): dictionary to construct and config norm layer. 23 | Default: dict(type='BN') 24 | multi_grid (int | None): Multi grid dilation rates of last 25 | stage. Default: None 26 | contract_dilation (bool): Whether contract first dilation of each layer 27 | Default: False 28 | """ 29 | 30 | def __init__(self, 31 | block, 32 | inplanes, 33 | planes, 34 | num_blocks, 35 | stride=1, 36 | dilation=1, 37 | avg_down=False, 38 | conv_cfg=None, 39 | norm_cfg=dict(type='BN'), 40 | multi_grid=None, 41 | contract_dilation=False, 42 | **kwargs): 43 | self.block = block 44 | 45 | downsample = None 46 | if stride != 1 or inplanes != planes * block.expansion: 47 | downsample = [] 48 | conv_stride = stride 49 | if avg_down: 50 | conv_stride = 1 51 | downsample.append( 52 | nn.AvgPool2d( 53 | kernel_size=stride, 54 | stride=stride, 55 | ceil_mode=True, 56 | count_include_pad=False)) 57 | downsample.extend([ 58 | build_conv_layer( 59 | conv_cfg, 60 | inplanes, 61 | planes * block.expansion, 62 | kernel_size=1, 63 | stride=conv_stride, 64 | bias=False), 65 | build_norm_layer(norm_cfg, planes * block.expansion)[1] 66 | ]) 67 | downsample = nn.Sequential(*downsample) 68 | 69 | layers = [] 70 | if multi_grid is None: 71 | if dilation > 1 and contract_dilation: 72 | first_dilation = dilation // 2 73 | else: 74 | first_dilation = dilation 75 | else: 76 | first_dilation = multi_grid[0] 77 | layers.append( 78 | block( 79 | inplanes=inplanes, 80 | planes=planes, 81 | stride=stride, 82 | dilation=first_dilation, 83 | downsample=downsample, 84 | conv_cfg=conv_cfg, 85 | norm_cfg=norm_cfg, 86 | **kwargs)) 87 | inplanes = planes * block.expansion 88 | for i in range(1, num_blocks): 89 | layers.append( 90 | block( 91 | inplanes=inplanes, 92 | planes=planes, 93 | stride=1, 94 | dilation=dilation if multi_grid is None else multi_grid[i], 95 | conv_cfg=conv_cfg, 96 | norm_cfg=norm_cfg, 97 | **kwargs)) 98 | super(ResLayer, self).__init__(*layers) 99 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Head-Free Lightweight Semantic Segmentation with Linear Transformer 2 | 3 | This repository contains the official Pytorch implementation of training & evaluation code and the pretrained models for [AFFormer](https://arxiv.org/pdf/2301.04648.pdf).🔥🔥 4 | 5 | 6 | 7 |
9 | 11 | Figure 1: Performance of AFFormer. 12 |
13 | 14 | AFFormer is a head-free, lightweight and powerful semantic segmentation method, as shown in Figure 1. 15 | 16 | We use [MMSegmentation v0.21.1](https://github.com/open-mmlab/mmsegmentation/tree/v0.21.1) as the codebase. 17 | 18 | 19 | 20 | ## Installation 21 | 22 | For install and data preparation, please refer to the guidelines in [MMSegmentation v0.21.1](https://github.com/open-mmlab/mmsegmentation/tree/v0.21.1). 23 | 24 | An example (works for me): ```CUDA 11.3``` and ```pytorch 1.10.1``` 25 | 26 | ``` 27 | pip install mmcv-full==1.5.0 28 | pip install torchvision 29 | pip install timm 30 | pip install opencv-python 31 | pip install einops 32 | ``` 33 | 34 | ## Evaluation 35 | 36 | Download `weights` 37 | ( 38 | [google drive](https://drive.google.com/drive/folders/1Mru24qPdta9o8aLn1RwT8EapiQCih1Sw?usp=share_link) | 39 | [alidrive](https://www.aliyundrive.com/s/Ha2xMsG9ufy) 40 | ) 41 | 42 | Example: evaluate ```AFFormer-base``` on ```ADE20K``` : 43 | 44 | ``` 45 | # Single-gpu testing 46 | bash tools/dist_test.sh ./configs/AFFormer/AFFormer_base_ade20k.py /path/to/checkpoint_file.pth 1 --eval mIoU 47 | 48 | # Multi-gpu testing 49 | bash tools/dist_test.sh ./configs/AFFormer/AFFormer_base_ade20k.py /path/to/checkpoint_file.pth