├── .github
├── FUNDING.yml
├── ISSUE_TEMPLATE
│ └── bug-report.md
└── pictures
│ └── alipay.JPG
├── .gitignore
├── .readthedocs.yaml
├── LICENSE
├── README.md
├── csseg
├── __init__.py
├── configs
│ ├── _base_
│ │ ├── __init__.py
│ │ ├── dataloaders
│ │ │ ├── __init__.py
│ │ │ └── default_dataloader_bs24.py
│ │ ├── datasets
│ │ │ ├── __init__.py
│ │ │ ├── ade20k_512x512.py
│ │ │ └── vocaug_512x512.py
│ │ ├── parallel
│ │ │ ├── __init__.py
│ │ │ └── default_parallel.py
│ │ └── schedulers
│ │ │ ├── __init__.py
│ │ │ └── default_scheduler_poly.py
│ ├── ilt
│ │ ├── base_cfg.py
│ │ ├── ilt_r101iabnd16_aspp_512x512_vocaug10-1_disjoint.py
│ │ ├── ilt_r101iabnd16_aspp_512x512_vocaug10-1_overlap.py
│ │ ├── ilt_r101iabnd16_aspp_512x512_vocaug15-1_disjoint.py
│ │ ├── ilt_r101iabnd16_aspp_512x512_vocaug15-1_overlap.py
│ │ ├── ilt_r101iabnd16_aspp_512x512_vocaug15-5_disjoint.py
│ │ └── ilt_r101iabnd16_aspp_512x512_vocaug15-5_overlap.py
│ ├── mib
│ │ ├── base_cfg.py
│ │ ├── mib_r101iabnd16_aspp_512x512_vocaug10-1_disjoint.py
│ │ ├── mib_r101iabnd16_aspp_512x512_vocaug10-1_overlap.py
│ │ ├── mib_r101iabnd16_aspp_512x512_vocaug15-1_disjoint.py
│ │ ├── mib_r101iabnd16_aspp_512x512_vocaug15-1_overlap.py
│ │ ├── mib_r101iabnd16_aspp_512x512_vocaug15-5_disjoint.py
│ │ └── mib_r101iabnd16_aspp_512x512_vocaug15-5_overlap.py
│ ├── plop
│ │ ├── base_cfg.py
│ │ ├── plop_r101iabnd16_aspp_512x512_vocaug10-1_disjoint.py
│ │ ├── plop_r101iabnd16_aspp_512x512_vocaug10-1_overlap.py
│ │ ├── plop_r101iabnd16_aspp_512x512_vocaug15-1_disjoint.py
│ │ ├── plop_r101iabnd16_aspp_512x512_vocaug15-1_overlap.py
│ │ ├── plop_r101iabnd16_aspp_512x512_vocaug15-5_disjoint.py
│ │ └── plop_r101iabnd16_aspp_512x512_vocaug15-5_overlap.py
│ ├── rcil
│ │ ├── base_cfg.py
│ │ ├── rcil_r101iabnd16_aspp_512x512_vocaug10-1_disjoint.py
│ │ ├── rcil_r101iabnd16_aspp_512x512_vocaug10-1_overlap.py
│ │ ├── rcil_r101iabnd16_aspp_512x512_vocaug15-1_disjoint.py
│ │ ├── rcil_r101iabnd16_aspp_512x512_vocaug15-1_overlap.py
│ │ ├── rcil_r101iabnd16_aspp_512x512_vocaug15-5_disjoint.py
│ │ └── rcil_r101iabnd16_aspp_512x512_vocaug15-5_overlap.py
│ └── ucd
│ │ ├── base_cfg.py
│ │ ├── mib+ucd_r101iabnd16_aspp_512x512_vocaug10-1_disjoint.py
│ │ ├── mib+ucd_r101iabnd16_aspp_512x512_vocaug10-1_overlap.py
│ │ ├── mib+ucd_r101iabnd16_aspp_512x512_vocaug15-1_disjoint.py
│ │ ├── mib+ucd_r101iabnd16_aspp_512x512_vocaug15-1_overlap.py
│ │ ├── mib+ucd_r101iabnd16_aspp_512x512_vocaug15-5_disjoint.py
│ │ └── mib+ucd_r101iabnd16_aspp_512x512_vocaug15-5_overlap.py
├── modules
│ ├── __init__.py
│ ├── datasets
│ │ ├── __init__.py
│ │ ├── ade20k.py
│ │ ├── base.py
│ │ ├── builder.py
│ │ ├── pipelines
│ │ │ ├── __init__.py
│ │ │ ├── evaluators.py
│ │ │ └── transforms.py
│ │ └── voc.py
│ ├── models
│ │ ├── __init__.py
│ │ ├── decoders
│ │ │ ├── __init__.py
│ │ │ ├── aspphead.py
│ │ │ ├── builder.py
│ │ │ └── rcilaspphead.py
│ │ ├── encoders
│ │ │ ├── __init__.py
│ │ │ ├── bricks
│ │ │ │ ├── __init__.py
│ │ │ │ ├── activation
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── builder.py
│ │ │ │ └── normalization
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── builder.py
│ │ │ ├── builder.py
│ │ │ ├── resnet.py
│ │ │ ├── resnetilt.py
│ │ │ ├── resnetplop.py
│ │ │ └── resnetrcil.py
│ │ ├── losses
│ │ │ ├── __init__.py
│ │ │ ├── builder.py
│ │ │ ├── celoss.py
│ │ │ ├── csloss.py
│ │ │ ├── klloss.py
│ │ │ └── mseloss.py
│ │ ├── optimizers
│ │ │ ├── __init__.py
│ │ │ ├── builder.py
│ │ │ └── paramsconstructor.py
│ │ ├── schedulers
│ │ │ ├── __init__.py
│ │ │ ├── base.py
│ │ │ ├── builder.py
│ │ │ └── poly.py
│ │ └── segmentors
│ │ │ ├── __init__.py
│ │ │ ├── base.py
│ │ │ ├── builder.py
│ │ │ ├── ilt.py
│ │ │ ├── mib.py
│ │ │ ├── plop.py
│ │ │ └── ucd.py
│ ├── parallel
│ │ ├── __init__.py
│ │ ├── dataloader.py
│ │ └── model.py
│ ├── runners
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── builder.py
│ │ ├── caf.py
│ │ ├── ewf.py
│ │ ├── ilt.py
│ │ ├── mib.py
│ │ ├── plop.py
│ │ ├── rcil.py
│ │ ├── reminder.py
│ │ ├── sdr.py
│ │ └── ucd.py
│ └── utils
│ │ ├── __init__.py
│ │ ├── configparser.py
│ │ ├── env.py
│ │ ├── io.py
│ │ ├── logger.py
│ │ ├── misc.py
│ │ └── modulebuilder.py
├── test.py
└── train.py
├── docs
├── DatasetPreparation.md
├── GetStarted.md
├── Makefile
├── ModelZoo.md
├── QuickRun.md
├── Tutorials.md
├── conf.py
├── index.rst
├── logo.png
├── make.bat
├── modelzoo
│ ├── caf
│ │ └── README.md
│ ├── ewf
│ │ └── README.md
│ ├── ilt
│ │ └── README.md
│ ├── mib
│ │ └── README.md
│ ├── plop
│ │ └── README.md
│ ├── rcil
│ │ └── README.md
│ ├── reminder
│ │ └── README.md
│ ├── sdr
│ │ └── README.md
│ └── ucd
│ │ └── README.md
└── requirements.txt
├── requirements.txt
├── requirements
├── bricks.txt
├── io.txt
├── misc.txt
├── nn.txt
├── science.txt
└── terminal.txt
├── scripts
├── collect_env.py
├── dist_test.sh
├── dist_train.sh
└── prepare_dataset.sh
└── setup.py
/.github/FUNDING.yml:
--------------------------------------------------------------------------------
1 | patreon: CharlesPikachu
2 | ko_fi: charlespikachu
3 | custom: https://github.com/CharlesPikachu/Games/tree/master/.github/pictures/alipay.JPG
4 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug-report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: What bug did you encounter
4 | title: "[BUG]"
5 | labels: ''
6 | assignees: ''
7 | ---
8 |
9 | **Environment (使用环境)**
10 |
11 | - Installation method (安装方式):
12 | - The version of cssegmentation (版本号):
13 | - Operating system (操作系统):
14 | - Python version (Python版本):
15 |
16 | **Question description (问题描述)**
17 |
18 | **Screenshot (报错截图)**
19 |
20 | **Advice (修复建议)**
21 |
--------------------------------------------------------------------------------
/.github/pictures/alipay.JPG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SegmentationBLWX/cssegmentation/c12517594ee91e22ba99974e30f066c0b081728f/.github/pictures/alipay.JPG
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | **/.DS_Store
7 |
8 | # C extensions
9 | *.so
10 |
11 | # Distribution / packaging
12 | .Python
13 | build/
14 | develop-eggs/
15 | dist/
16 | downloads/
17 | eggs/
18 | .eggs/
19 | lib/
20 | lib64/
21 | parts/
22 | sdist/
23 | var/
24 | wheels/
25 | pip-wheel-metadata/
26 | share/python-wheels/
27 | *.egg-info/
28 | .installed.cfg
29 | *.egg
30 | MANIFEST
31 |
32 | # PyInstaller
33 | # Usually these files are written by a python script from a template
34 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
35 | *.manifest
36 | *.spec
37 |
38 | # Installer logs
39 | pip-log.txt
40 | pip-delete-this-directory.txt
41 |
42 | # Unit test / coverage reports
43 | htmlcov/
44 | .tox/
45 | .nox/
46 | .coverage
47 | .coverage.*
48 | .cache
49 | nosetests.xml
50 | coverage.xml
51 | *.cover
52 | *.py,cover
53 | .hypothesis/
54 | .pytest_cache/
55 |
56 | # Translations
57 | *.mo
58 | *.pot
59 |
60 | # Django stuff:
61 | *.log
62 | local_settings.py
63 | db.sqlite3
64 | db.sqlite3-journal
65 |
66 | # Flask stuff:
67 | instance/
68 | .webassets-cache
69 |
70 | # Scrapy stuff:
71 | .scrapy
72 |
73 | # Sphinx documentation
74 | docs/_build/
75 |
76 | # PyBuilder
77 | target/
78 |
79 | # Jupyter Notebook
80 | .ipynb_checkpoints
81 |
82 | # IPython
83 | profile_default/
84 | ipython_config.py
85 |
86 | # pyenv
87 | .python-version
88 |
89 | # pipenv
90 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
91 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
92 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
93 | # install all needed dependencies.
94 | #Pipfile.lock
95 |
96 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
97 | __pypackages__/
98 |
99 | # Celery stuff
100 | celerybeat-schedule
101 | celerybeat.pid
102 |
103 | # SageMath parsed files
104 | *.sage.py
105 |
106 | # Environments
107 | .env
108 | .venv
109 | env/
110 | venv/
111 | ENV/
112 | env.bak/
113 | venv.bak/
114 |
115 | # Spyder project settings
116 | .spyderproject
117 | .spyproject
118 |
119 | # Rope project settings
120 | .ropeproject
121 |
122 | # mkdocs documentation
123 | /site
124 |
125 | # mypy
126 | .mypy_cache/
127 | .dmypy.json
128 | dmypy.json
129 |
130 | # Pyre type checker
131 | .pyre/
132 |
--------------------------------------------------------------------------------
/.readthedocs.yaml:
--------------------------------------------------------------------------------
1 | # required
2 | version: 2
3 |
4 | # set the OS, python version and other tools you might need
5 | build:
6 | os: ubuntu-22.04
7 | tools:
8 | python: "3.11"
9 |
10 | # build documentation in the "docs/" directory with Sphinx
11 | sphinx:
12 | configuration: docs/conf.py
13 |
14 | # explicitly set the version of Python and its requirements
15 | python:
16 | install:
17 | - requirements: docs/requirements.txt
--------------------------------------------------------------------------------
/csseg/__init__.py:
--------------------------------------------------------------------------------
1 | '''version'''
2 | __version__ = '0.1.0'
3 | '''author'''
4 | __author__ = 'Zhenchao Jin'
5 | '''title'''
6 | __title__ = 'CSSegmentation'
7 | '''description'''
8 | __description__ = 'CSSegmentation: An Open Source Continual Semantic Segmentation Toolbox Based on PyTorch'
9 | '''url'''
10 | __url__ = 'https://github.com/SegmentationBLWX/cssegmentation'
11 | '''email'''
12 | __email__ = 'charlesblwx@gmail.com'
13 | '''license'''
14 | __license__ = 'Apache License 2.0'
15 | '''copyright'''
16 | __copyright__ = 'Copyright 2023-2030 Zhenchao Jin'
--------------------------------------------------------------------------------
/csseg/configs/_base_/__init__.py:
--------------------------------------------------------------------------------
1 | '''initialize'''
2 | from .parallel import PARALLEL_CFG
3 | from .schedulers import SCHEDULER_CFG_POLY
4 | from .dataloaders import DATALOADER_CFG_BS24
5 | from .datasets import DATASET_CFG_VOCAUG_512x512
--------------------------------------------------------------------------------
/csseg/configs/_base_/dataloaders/__init__.py:
--------------------------------------------------------------------------------
1 | '''initialize'''
2 | from .default_dataloader_bs24 import DATALOADER_CFG_BS24
--------------------------------------------------------------------------------
/csseg/configs/_base_/dataloaders/default_dataloader_bs24.py:
--------------------------------------------------------------------------------
1 | '''default_dataloader_bs24'''
2 | import os
3 |
4 |
5 | '''DATALOADER_CFG_BS24'''
6 | DATALOADER_CFG_BS24 = {
7 | 'total_train_bs_for_auto_check': 24,
8 | 'auto_align_train_bs': True,
9 | 'train': {
10 | 'batch_size_per_gpu': 12, 'num_workers_per_gpu': 4, 'shuffle': True, 'pin_memory': True, 'drop_last': True
11 | },
12 | 'test': {
13 | 'batch_size_per_gpu': 12, 'num_workers_per_gpu': 4, 'shuffle': False, 'pin_memory': True, 'drop_last': False
14 | },
15 | }
--------------------------------------------------------------------------------
/csseg/configs/_base_/datasets/__init__.py:
--------------------------------------------------------------------------------
1 | '''initialize'''
2 | from .vocaug_512x512 import DATASET_CFG_VOCAUG_512x512
--------------------------------------------------------------------------------
/csseg/configs/_base_/datasets/ade20k_512x512.py:
--------------------------------------------------------------------------------
1 | '''ade20k_512x512'''
2 | import os
3 |
4 |
5 | '''DATASET_CFG_ADE20K_512x512'''
6 | DATASET_CFG_ADE20K_512x512 = {
7 | 'type': 'ADE20kDataset',
8 | 'rootdir': os.path.join(os.getcwd(), 'ADE20k'),
9 | 'overlap': True,
10 | 'masking_value': 0,
11 | 'train': {
12 | 'set': 'train',
13 | 'transforms': [
14 | ('RandomResizedCrop', {'output_size': 512, 'scale': (0.5, 2.0)}),
15 | ('RandomHorizontalFlip', {}),
16 | ('ToTensor', {}),
17 | ('Normalize', {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}),
18 | ],
19 | },
20 | 'test': {
21 | 'set': 'val',
22 | 'transforms': [
23 | ('Resize', {'output_size': 512}),
24 | ('CenterCrop', {'output_size': 512}),
25 | ('ToTensor', {}),
26 | ('Normalize', {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}),
27 | ],
28 | },
29 | }
--------------------------------------------------------------------------------
/csseg/configs/_base_/datasets/vocaug_512x512.py:
--------------------------------------------------------------------------------
1 | '''vocaug_512x512'''
2 | import os
3 |
4 |
5 | '''DATASET_CFG_VOCAUG_512x512'''
6 | DATASET_CFG_VOCAUG_512x512 = {
7 | 'type': 'VOCDataset',
8 | 'rootdir': os.path.join(os.getcwd(), 'VOCdevkit/VOC2012'),
9 | 'overlap': True,
10 | 'masking_value': 0,
11 | 'train': {
12 | 'set': 'trainaug',
13 | 'transforms': [
14 | ('RandomResizedCrop', {'output_size': 512, 'scale': (0.5, 2.0)}),
15 | ('RandomHorizontalFlip', {}),
16 | ('ToTensor', {}),
17 | ('Normalize', {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}),
18 | ],
19 | },
20 | 'test': {
21 | 'set': 'val',
22 | 'transforms': [
23 | ('Resize', {'output_size': 512}),
24 | ('CenterCrop', {'output_size': 512}),
25 | ('ToTensor', {}),
26 | ('Normalize', {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}),
27 | ],
28 | },
29 | }
--------------------------------------------------------------------------------
/csseg/configs/_base_/parallel/__init__.py:
--------------------------------------------------------------------------------
1 | '''initialize'''
2 | from .default_parallel import PARALLEL_CFG
--------------------------------------------------------------------------------
/csseg/configs/_base_/parallel/default_parallel.py:
--------------------------------------------------------------------------------
1 | '''default_parallel'''
2 | import os
3 |
4 |
5 | '''PARALLEL_CFG'''
6 | PARALLEL_CFG = {
7 | 'backend': 'nccl', 'init_method': 'env://', 'model_cfg': {}
8 | }
--------------------------------------------------------------------------------
/csseg/configs/_base_/schedulers/__init__.py:
--------------------------------------------------------------------------------
1 | '''initialize'''
2 | from .default_scheduler_poly import SCHEDULER_CFG_POLY
--------------------------------------------------------------------------------
/csseg/configs/_base_/schedulers/default_scheduler_poly.py:
--------------------------------------------------------------------------------
1 | '''default_scheduler_poly'''
2 | import os
3 |
4 |
5 | '''SCHEDULER_CFG_POLY'''
6 | SCHEDULER_CFG_POLY = {
7 | 'type': 'PolyScheduler', 'iters_per_epoch': -1, 'max_epochs': -1, 'lr': 0.01, 'min_lr': 0.0, 'power': 0.9,
8 | 'optimizer_cfg': {
9 | 'type': 'SGD', 'momentum': 0.9, 'nesterov': True, 'weight_decay': 1e-4, 'lr': None,
10 | 'paramwise_cfg': {'type': 'DefaultParamsConstructor'}, 'filter_params': True,
11 | }
12 | }
--------------------------------------------------------------------------------
/csseg/configs/ilt/base_cfg.py:
--------------------------------------------------------------------------------
1 | '''BASE_CFG for ILT'''
2 | # SEGMENTOR_CFG
3 | SEGMENTOR_CFG = {
4 | 'type': 'ILTSegmentor',
5 | 'num_known_classes_list': None,
6 | 'selected_indices': (0,),
7 | 'align_corners': False,
8 | 'encoder_cfg': {
9 | 'type': 'ResNetILT',
10 | 'depth': 101,
11 | 'outstride': 16,
12 | 'out_indices': (3,),
13 | 'norm_cfg': {'type': 'InPlaceABNSync', 'activation': 'leaky_relu', 'activation_param': 0.01},
14 | 'act_cfg': None,
15 | 'shortcut_norm_cfg': {'type': 'InPlaceABNSync', 'activation': 'identity'},
16 | 'shortcut_act_cfg': {'type': 'LeakyReLU', 'inplace': True, 'negative_slope': 0.01},
17 | 'pretrained': True,
18 | 'structure_type': 'resnet101inplaceabn',
19 | },
20 | 'decoder_cfg': {
21 | 'type': 'ASPPHead',
22 | 'in_channels': 2048,
23 | 'feats_channels': 256,
24 | 'out_channels': 256,
25 | 'dilations': (1, 6, 12, 18),
26 | 'pooling_size': 32,
27 | 'norm_cfg': {'type': 'InPlaceABNSync', 'activation': 'leaky_relu', 'activation_param': 0.01},
28 | 'act_cfg': None,
29 | },
30 | 'losses_cfgs': {
31 | 'segmentation_init': {
32 | 'loss_seg': {'CrossEntropyLoss': {'scale_factor': 1.0, 'reduction': 'mean', 'ignore_index': 255}}
33 | },
34 | 'segmentation_cl' : {
35 | 'loss_seg': {'CrossEntropyLoss': {'scale_factor': 1.0, 'reduction': 'mean', 'ignore_index': 255}}
36 | },
37 | 'distillation_logits': {'scale_factor': 100, 'alpha': 1.0},
38 | 'distillation_features': {'type': 'MSELoss', 'scale_factor': 100, 'reduction': 'mean'},
39 | },
40 | }
41 | # RUNNER_CFG
42 | RUNNER_CFG = {
43 | 'type': 'ILTRunner',
44 | 'algorithm': 'ILT',
45 | 'task_name': '',
46 | 'task_id': -1,
47 | 'num_tasks': -1,
48 | 'work_dir': '',
49 | 'benchmark': True,
50 | 'save_interval_epochs': 10,
51 | 'eval_interval_epochs': 10,
52 | 'log_interval_iterations': 10,
53 | 'choose_best_segmentor_by_metric': 'mean_iou',
54 | 'logger_handle_cfg': {'type': 'LocalLoggerHandle', 'logfilepath': ''},
55 | 'num_total_classes': -1,
56 | 'fp16_cfg': {'type': 'apex', 'initialize': {'opt_level': 'O1'}, 'scale_loss': {}},
57 | 'segmentor_cfg': SEGMENTOR_CFG,
58 | }
--------------------------------------------------------------------------------
/csseg/configs/ilt/ilt_r101iabnd16_aspp_512x512_vocaug10-1_disjoint.py:
--------------------------------------------------------------------------------
1 | '''ilt_r101iabnd16_aspp_512x512_vocaug10-1_disjoint'''
2 | import os
3 | from .base_cfg import RUNNER_CFG
4 | from .._base_ import DATASET_CFG_VOCAUG_512x512, SCHEDULER_CFG_POLY, DATALOADER_CFG_BS24, PARALLEL_CFG
5 |
6 |
7 | # add dataset_cfg
8 | RUNNER_CFG['dataset_cfg'] = DATASET_CFG_VOCAUG_512x512.copy()
9 | RUNNER_CFG['dataset_cfg']['overlap'] = False
10 | # add dataloader_cfg
11 | RUNNER_CFG['dataloader_cfg'] = DATALOADER_CFG_BS24.copy()
12 | # add scheduler_cfg
13 | RUNNER_CFG['scheduler_cfg'] = [
14 | SCHEDULER_CFG_POLY.copy() for _ in range(11)
15 | ]
16 | RUNNER_CFG['scheduler_cfg'][0]['max_epochs'] = 30
17 | RUNNER_CFG['scheduler_cfg'][0]['lr'] = 0.01
18 | for i in range(1, 11):
19 | RUNNER_CFG['scheduler_cfg'][i]['max_epochs'] = 10
20 | RUNNER_CFG['scheduler_cfg'][i]['lr'] = 0.001
21 | # add parallel_cfg
22 | RUNNER_CFG['parallel_cfg'] = PARALLEL_CFG.copy()
23 | # modify RUNNER_CFG
24 | RUNNER_CFG.update({
25 | 'task_name': '10-1',
26 | 'num_tasks': 11,
27 | 'num_total_classes': 21,
28 | 'work_dir': os.path.split(__file__)[-1].split('.')[0],
29 | 'logger_handle_cfg': {'type': 'LocalLoggerHandle', 'logfilepath': f"{os.path.split(__file__)[-1].split('.')[0]}/{os.path.split(__file__)[-1].split('.')[0]}.log"},
30 | })
--------------------------------------------------------------------------------
/csseg/configs/ilt/ilt_r101iabnd16_aspp_512x512_vocaug10-1_overlap.py:
--------------------------------------------------------------------------------
1 | '''ilt_r101iabnd16_aspp_512x512_vocaug10-1_overlap'''
2 | import os
3 | from .base_cfg import RUNNER_CFG
4 | from .._base_ import DATASET_CFG_VOCAUG_512x512, SCHEDULER_CFG_POLY, DATALOADER_CFG_BS24, PARALLEL_CFG
5 |
6 |
7 | # add dataset_cfg
8 | RUNNER_CFG['dataset_cfg'] = DATASET_CFG_VOCAUG_512x512.copy()
9 | RUNNER_CFG['dataset_cfg']['overlap'] = True
10 | # add dataloader_cfg
11 | RUNNER_CFG['dataloader_cfg'] = DATALOADER_CFG_BS24.copy()
12 | # add scheduler_cfg
13 | RUNNER_CFG['scheduler_cfg'] = [
14 | SCHEDULER_CFG_POLY.copy() for _ in range(11)
15 | ]
16 | RUNNER_CFG['scheduler_cfg'][0]['max_epochs'] = 30
17 | RUNNER_CFG['scheduler_cfg'][0]['lr'] = 0.01
18 | for i in range(1, 11):
19 | RUNNER_CFG['scheduler_cfg'][i]['max_epochs'] = 10
20 | RUNNER_CFG['scheduler_cfg'][i]['lr'] = 0.001
21 | # add parallel_cfg
22 | RUNNER_CFG['parallel_cfg'] = PARALLEL_CFG.copy()
23 | # modify RUNNER_CFG
24 | RUNNER_CFG.update({
25 | 'task_name': '10-1',
26 | 'num_tasks': 11,
27 | 'num_total_classes': 21,
28 | 'work_dir': os.path.split(__file__)[-1].split('.')[0],
29 | 'logger_handle_cfg': {'type': 'LocalLoggerHandle', 'logfilepath': f"{os.path.split(__file__)[-1].split('.')[0]}/{os.path.split(__file__)[-1].split('.')[0]}.log"},
30 | })
--------------------------------------------------------------------------------
/csseg/configs/ilt/ilt_r101iabnd16_aspp_512x512_vocaug15-1_disjoint.py:
--------------------------------------------------------------------------------
1 | '''ilt_r101iabnd16_aspp_512x512_vocaug15-1_disjoint'''
2 | import os
3 | from .base_cfg import RUNNER_CFG
4 | from .._base_ import DATASET_CFG_VOCAUG_512x512, SCHEDULER_CFG_POLY, DATALOADER_CFG_BS24, PARALLEL_CFG
5 |
6 |
7 | # add dataset_cfg
8 | RUNNER_CFG['dataset_cfg'] = DATASET_CFG_VOCAUG_512x512.copy()
9 | RUNNER_CFG['dataset_cfg']['overlap'] = False
10 | # add dataloader_cfg
11 | RUNNER_CFG['dataloader_cfg'] = DATALOADER_CFG_BS24.copy()
12 | # add scheduler_cfg
13 | RUNNER_CFG['scheduler_cfg'] = [
14 | SCHEDULER_CFG_POLY.copy() for _ in range(6)
15 | ]
16 | RUNNER_CFG['scheduler_cfg'][0]['max_epochs'] = 30
17 | RUNNER_CFG['scheduler_cfg'][0]['lr'] = 0.01
18 | for i in range(1, 6):
19 | RUNNER_CFG['scheduler_cfg'][i]['max_epochs'] = 30
20 | RUNNER_CFG['scheduler_cfg'][i]['lr'] = 0.001
21 | # add parallel_cfg
22 | RUNNER_CFG['parallel_cfg'] = PARALLEL_CFG.copy()
23 | # modify RUNNER_CFG
24 | RUNNER_CFG.update({
25 | 'task_name': '15-5s',
26 | 'num_tasks': 6,
27 | 'num_total_classes': 21,
28 | 'work_dir': os.path.split(__file__)[-1].split('.')[0],
29 | 'logger_handle_cfg': {'type': 'LocalLoggerHandle', 'logfilepath': f"{os.path.split(__file__)[-1].split('.')[0]}/{os.path.split(__file__)[-1].split('.')[0]}.log"},
30 | })
--------------------------------------------------------------------------------
/csseg/configs/ilt/ilt_r101iabnd16_aspp_512x512_vocaug15-1_overlap.py:
--------------------------------------------------------------------------------
1 | '''ilt_r101iabnd16_aspp_512x512_vocaug15-1_overlap'''
2 | import os
3 | from .base_cfg import RUNNER_CFG
4 | from .._base_ import DATASET_CFG_VOCAUG_512x512, SCHEDULER_CFG_POLY, DATALOADER_CFG_BS24, PARALLEL_CFG
5 |
6 |
7 | # add dataset_cfg
8 | RUNNER_CFG['dataset_cfg'] = DATASET_CFG_VOCAUG_512x512.copy()
9 | RUNNER_CFG['dataset_cfg']['overlap'] = True
10 | # add dataloader_cfg
11 | RUNNER_CFG['dataloader_cfg'] = DATALOADER_CFG_BS24.copy()
12 | # add scheduler_cfg
13 | RUNNER_CFG['scheduler_cfg'] = [
14 | SCHEDULER_CFG_POLY.copy() for _ in range(6)
15 | ]
16 | RUNNER_CFG['scheduler_cfg'][0]['max_epochs'] = 30
17 | RUNNER_CFG['scheduler_cfg'][0]['lr'] = 0.01
18 | for i in range(1, 6):
19 | RUNNER_CFG['scheduler_cfg'][i]['max_epochs'] = 30
20 | RUNNER_CFG['scheduler_cfg'][i]['lr'] = 0.001
21 | # add parallel_cfg
22 | RUNNER_CFG['parallel_cfg'] = PARALLEL_CFG.copy()
23 | # modify RUNNER_CFG
24 | RUNNER_CFG.update({
25 | 'task_name': '15-5s',
26 | 'num_tasks': 6,
27 | 'num_total_classes': 21,
28 | 'work_dir': os.path.split(__file__)[-1].split('.')[0],
29 | 'logger_handle_cfg': {'type': 'LocalLoggerHandle', 'logfilepath': f"{os.path.split(__file__)[-1].split('.')[0]}/{os.path.split(__file__)[-1].split('.')[0]}.log"},
30 | })
--------------------------------------------------------------------------------
/csseg/configs/ilt/ilt_r101iabnd16_aspp_512x512_vocaug15-5_disjoint.py:
--------------------------------------------------------------------------------
1 | '''ilt_r101iabnd16_aspp_512x512_vocaug15-5_disjoint'''
2 | import os
3 | from .base_cfg import RUNNER_CFG
4 | from .._base_ import DATASET_CFG_VOCAUG_512x512, SCHEDULER_CFG_POLY, DATALOADER_CFG_BS24, PARALLEL_CFG
5 |
6 |
7 | # add dataset_cfg
8 | RUNNER_CFG['dataset_cfg'] = DATASET_CFG_VOCAUG_512x512.copy()
9 | RUNNER_CFG['dataset_cfg']['overlap'] = False
10 | # add dataloader_cfg
11 | RUNNER_CFG['dataloader_cfg'] = DATALOADER_CFG_BS24.copy()
12 | # add scheduler_cfg
13 | RUNNER_CFG['scheduler_cfg'] = [
14 | SCHEDULER_CFG_POLY.copy() for _ in range(2)
15 | ]
16 | RUNNER_CFG['scheduler_cfg'][0]['max_epochs'] = 30
17 | RUNNER_CFG['scheduler_cfg'][0]['lr'] = 0.01
18 | for i in range(1, 2):
19 | RUNNER_CFG['scheduler_cfg'][i]['max_epochs'] = 30
20 | RUNNER_CFG['scheduler_cfg'][i]['lr'] = 0.001
21 | # add parallel_cfg
22 | RUNNER_CFG['parallel_cfg'] = PARALLEL_CFG.copy()
23 | # modify RUNNER_CFG
24 | RUNNER_CFG.update({
25 | 'task_name': '15-5',
26 | 'num_tasks': 2,
27 | 'num_total_classes': 21,
28 | 'work_dir': os.path.split(__file__)[-1].split('.')[0],
29 | 'logger_handle_cfg': {'type': 'LocalLoggerHandle', 'logfilepath': f"{os.path.split(__file__)[-1].split('.')[0]}/{os.path.split(__file__)[-1].split('.')[0]}.log"},
30 | })
--------------------------------------------------------------------------------
/csseg/configs/ilt/ilt_r101iabnd16_aspp_512x512_vocaug15-5_overlap.py:
--------------------------------------------------------------------------------
1 | '''ilt_r101iabnd16_aspp_512x512_vocaug15-5_overlap'''
2 | import os
3 | from .base_cfg import RUNNER_CFG
4 | from .._base_ import DATASET_CFG_VOCAUG_512x512, SCHEDULER_CFG_POLY, DATALOADER_CFG_BS24, PARALLEL_CFG
5 |
6 |
7 | # add dataset_cfg
8 | RUNNER_CFG['dataset_cfg'] = DATASET_CFG_VOCAUG_512x512.copy()
9 | RUNNER_CFG['dataset_cfg']['overlap'] = True
10 | # add dataloader_cfg
11 | RUNNER_CFG['dataloader_cfg'] = DATALOADER_CFG_BS24.copy()
12 | # add scheduler_cfg
13 | RUNNER_CFG['scheduler_cfg'] = [
14 | SCHEDULER_CFG_POLY.copy() for _ in range(2)
15 | ]
16 | RUNNER_CFG['scheduler_cfg'][0]['max_epochs'] = 30
17 | RUNNER_CFG['scheduler_cfg'][0]['lr'] = 0.01
18 | for i in range(1, 2):
19 | RUNNER_CFG['scheduler_cfg'][i]['max_epochs'] = 30
20 | RUNNER_CFG['scheduler_cfg'][i]['lr'] = 0.001
21 | # add parallel_cfg
22 | RUNNER_CFG['parallel_cfg'] = PARALLEL_CFG.copy()
23 | # modify RUNNER_CFG
24 | RUNNER_CFG.update({
25 | 'task_name': '15-5',
26 | 'num_tasks': 2,
27 | 'num_total_classes': 21,
28 | 'work_dir': os.path.split(__file__)[-1].split('.')[0],
29 | 'logger_handle_cfg': {'type': 'LocalLoggerHandle', 'logfilepath': f"{os.path.split(__file__)[-1].split('.')[0]}/{os.path.split(__file__)[-1].split('.')[0]}.log"},
30 | })
--------------------------------------------------------------------------------
/csseg/configs/mib/base_cfg.py:
--------------------------------------------------------------------------------
1 | '''BASE_CFG for MIB'''
2 | # SEGMENTOR_CFG
3 | SEGMENTOR_CFG = {
4 | 'type': 'MIBSegmentor',
5 | 'num_known_classes_list': None,
6 | 'selected_indices': (0,),
7 | 'align_corners': False,
8 | 'encoder_cfg': {
9 | 'type': 'ResNetILT',
10 | 'depth': 101,
11 | 'outstride': 16,
12 | 'out_indices': (3,),
13 | 'norm_cfg': {'type': 'InPlaceABNSync', 'activation': 'leaky_relu', 'activation_param': 0.01},
14 | 'act_cfg': None,
15 | 'shortcut_norm_cfg': {'type': 'InPlaceABNSync', 'activation': 'identity'},
16 | 'shortcut_act_cfg': {'type': 'LeakyReLU', 'inplace': True, 'negative_slope': 0.01},
17 | 'pretrained': True,
18 | 'structure_type': 'resnet101inplaceabn',
19 | },
20 | 'decoder_cfg': {
21 | 'type': 'ASPPHead',
22 | 'in_channels': 2048,
23 | 'feats_channels': 256,
24 | 'out_channels': 256,
25 | 'dilations': (1, 6, 12, 18),
26 | 'pooling_size': 32,
27 | 'norm_cfg': {'type': 'InPlaceABNSync', 'activation': 'leaky_relu', 'activation_param': 0.01},
28 | 'act_cfg': None,
29 | },
30 | 'losses_cfgs': {
31 | 'segmentation_init': {
32 | 'loss_seg': {'CrossEntropyLoss': {'scale_factor': 1.0, 'reduction': 'mean', 'ignore_index': 255}}
33 | },
34 | 'segmentation_cl' : {
35 | 'loss_seg': {'MIBUnbiasedCrossEntropyLoss': {'scale_factor': 1.0, 'reduction': 'mean', 'ignore_index': 255}}
36 | },
37 | 'distillation': {'scale_factor': 10, 'alpha': 1.0},
38 | },
39 | }
40 | # RUNNER_CFG
41 | RUNNER_CFG = {
42 | 'type': 'MIBRunner',
43 | 'algorithm': 'MIB',
44 | 'task_name': '',
45 | 'task_id': -1,
46 | 'num_tasks': -1,
47 | 'work_dir': '',
48 | 'benchmark': True,
49 | 'save_interval_epochs': 10,
50 | 'eval_interval_epochs': 10,
51 | 'log_interval_iterations': 10,
52 | 'choose_best_segmentor_by_metric': 'mean_iou',
53 | 'logger_handle_cfg': {'type': 'LocalLoggerHandle', 'logfilepath': ''},
54 | 'num_total_classes': -1,
55 | 'fp16_cfg': {'type': 'apex', 'initialize': {'opt_level': 'O1'}, 'scale_loss': {}},
56 | 'segmentor_cfg': SEGMENTOR_CFG,
57 | }
--------------------------------------------------------------------------------
/csseg/configs/mib/mib_r101iabnd16_aspp_512x512_vocaug10-1_disjoint.py:
--------------------------------------------------------------------------------
1 | '''mib_r101iabnd16_aspp_512x512_vocaug10-1_disjoint'''
2 | import os
3 | from .base_cfg import RUNNER_CFG
4 | from .._base_ import DATASET_CFG_VOCAUG_512x512, SCHEDULER_CFG_POLY, DATALOADER_CFG_BS24, PARALLEL_CFG
5 |
6 |
7 | # add dataset_cfg
8 | RUNNER_CFG['dataset_cfg'] = DATASET_CFG_VOCAUG_512x512.copy()
9 | RUNNER_CFG['dataset_cfg']['overlap'] = False
10 | # add dataloader_cfg
11 | RUNNER_CFG['dataloader_cfg'] = DATALOADER_CFG_BS24.copy()
12 | # add scheduler_cfg
13 | RUNNER_CFG['scheduler_cfg'] = [
14 | SCHEDULER_CFG_POLY.copy() for _ in range(11)
15 | ]
16 | RUNNER_CFG['scheduler_cfg'][0]['max_epochs'] = 30
17 | RUNNER_CFG['scheduler_cfg'][0]['lr'] = 0.01
18 | for i in range(1, 11):
19 | RUNNER_CFG['scheduler_cfg'][i]['max_epochs'] = 10
20 | RUNNER_CFG['scheduler_cfg'][i]['lr'] = 0.001
21 | # add parallel_cfg
22 | RUNNER_CFG['parallel_cfg'] = PARALLEL_CFG.copy()
23 | # modify RUNNER_CFG
24 | RUNNER_CFG.update({
25 | 'task_name': '10-1',
26 | 'num_tasks': 11,
27 | 'num_total_classes': 21,
28 | 'work_dir': os.path.split(__file__)[-1].split('.')[0],
29 | 'logger_handle_cfg': {'type': 'LocalLoggerHandle', 'logfilepath': f"{os.path.split(__file__)[-1].split('.')[0]}/{os.path.split(__file__)[-1].split('.')[0]}.log"},
30 | })
31 | RUNNER_CFG['segmentor_cfg']['losses_cfgs']['distillation']['scale_factor'] = 100
--------------------------------------------------------------------------------
/csseg/configs/mib/mib_r101iabnd16_aspp_512x512_vocaug10-1_overlap.py:
--------------------------------------------------------------------------------
1 | '''mib_r101iabnd16_aspp_512x512_vocaug10-1_overlap'''
2 | import os
3 | from .base_cfg import RUNNER_CFG
4 | from .._base_ import DATASET_CFG_VOCAUG_512x512, SCHEDULER_CFG_POLY, DATALOADER_CFG_BS24, PARALLEL_CFG
5 |
6 |
7 | # add dataset_cfg
8 | RUNNER_CFG['dataset_cfg'] = DATASET_CFG_VOCAUG_512x512.copy()
9 | RUNNER_CFG['dataset_cfg']['overlap'] = True
10 | # add dataloader_cfg
11 | RUNNER_CFG['dataloader_cfg'] = DATALOADER_CFG_BS24.copy()
12 | # add scheduler_cfg
13 | RUNNER_CFG['scheduler_cfg'] = [
14 | SCHEDULER_CFG_POLY.copy() for _ in range(11)
15 | ]
16 | RUNNER_CFG['scheduler_cfg'][0]['max_epochs'] = 30
17 | RUNNER_CFG['scheduler_cfg'][0]['lr'] = 0.01
18 | for i in range(1, 11):
19 | RUNNER_CFG['scheduler_cfg'][i]['max_epochs'] = 10
20 | RUNNER_CFG['scheduler_cfg'][i]['lr'] = 0.001
21 | # add parallel_cfg
22 | RUNNER_CFG['parallel_cfg'] = PARALLEL_CFG.copy()
23 | # modify RUNNER_CFG
24 | RUNNER_CFG.update({
25 | 'task_name': '10-1',
26 | 'num_tasks': 11,
27 | 'num_total_classes': 21,
28 | 'work_dir': os.path.split(__file__)[-1].split('.')[0],
29 | 'logger_handle_cfg': {'type': 'LocalLoggerHandle', 'logfilepath': f"{os.path.split(__file__)[-1].split('.')[0]}/{os.path.split(__file__)[-1].split('.')[0]}.log"},
30 | })
31 | RUNNER_CFG['segmentor_cfg']['losses_cfgs']['distillation']['scale_factor'] = 100
--------------------------------------------------------------------------------
/csseg/configs/mib/mib_r101iabnd16_aspp_512x512_vocaug15-1_disjoint.py:
--------------------------------------------------------------------------------
1 | '''mib_r101iabnd16_aspp_512x512_vocaug15-1_disjoint'''
2 | import os
3 | from .base_cfg import RUNNER_CFG
4 | from .._base_ import DATASET_CFG_VOCAUG_512x512, SCHEDULER_CFG_POLY, DATALOADER_CFG_BS24, PARALLEL_CFG
5 |
6 |
7 | # add dataset_cfg
8 | RUNNER_CFG['dataset_cfg'] = DATASET_CFG_VOCAUG_512x512.copy()
9 | RUNNER_CFG['dataset_cfg']['overlap'] = False
10 | # add dataloader_cfg
11 | RUNNER_CFG['dataloader_cfg'] = DATALOADER_CFG_BS24.copy()
12 | # add scheduler_cfg
13 | RUNNER_CFG['scheduler_cfg'] = [
14 | SCHEDULER_CFG_POLY.copy() for _ in range(6)
15 | ]
16 | RUNNER_CFG['scheduler_cfg'][0]['max_epochs'] = 30
17 | RUNNER_CFG['scheduler_cfg'][0]['lr'] = 0.01
18 | for i in range(1, 6):
19 | RUNNER_CFG['scheduler_cfg'][i]['max_epochs'] = 30
20 | RUNNER_CFG['scheduler_cfg'][i]['lr'] = 0.001
21 | # add parallel_cfg
22 | RUNNER_CFG['parallel_cfg'] = PARALLEL_CFG.copy()
23 | # modify RUNNER_CFG
24 | RUNNER_CFG.update({
25 | 'task_name': '15-5s',
26 | 'num_tasks': 6,
27 | 'num_total_classes': 21,
28 | 'work_dir': os.path.split(__file__)[-1].split('.')[0],
29 | 'logger_handle_cfg': {'type': 'LocalLoggerHandle', 'logfilepath': f"{os.path.split(__file__)[-1].split('.')[0]}/{os.path.split(__file__)[-1].split('.')[0]}.log"},
30 | })
31 | RUNNER_CFG['segmentor_cfg']['losses_cfgs']['distillation']['scale_factor'] = 100
--------------------------------------------------------------------------------
/csseg/configs/mib/mib_r101iabnd16_aspp_512x512_vocaug15-1_overlap.py:
--------------------------------------------------------------------------------
1 | '''mib_r101iabnd16_aspp_512x512_vocaug15-1_overlap'''
2 | import os
3 | from .base_cfg import RUNNER_CFG
4 | from .._base_ import DATASET_CFG_VOCAUG_512x512, SCHEDULER_CFG_POLY, DATALOADER_CFG_BS24, PARALLEL_CFG
5 |
6 |
7 | # add dataset_cfg
8 | RUNNER_CFG['dataset_cfg'] = DATASET_CFG_VOCAUG_512x512.copy()
9 | RUNNER_CFG['dataset_cfg']['overlap'] = True
10 | # add dataloader_cfg
11 | RUNNER_CFG['dataloader_cfg'] = DATALOADER_CFG_BS24.copy()
12 | # add scheduler_cfg
13 | RUNNER_CFG['scheduler_cfg'] = [
14 | SCHEDULER_CFG_POLY.copy() for _ in range(6)
15 | ]
16 | RUNNER_CFG['scheduler_cfg'][0]['max_epochs'] = 30
17 | RUNNER_CFG['scheduler_cfg'][0]['lr'] = 0.01
18 | for i in range(1, 6):
19 | RUNNER_CFG['scheduler_cfg'][i]['max_epochs'] = 30
20 | RUNNER_CFG['scheduler_cfg'][i]['lr'] = 0.001
21 | # add parallel_cfg
22 | RUNNER_CFG['parallel_cfg'] = PARALLEL_CFG.copy()
23 | # modify RUNNER_CFG
24 | RUNNER_CFG.update({
25 | 'task_name': '15-5s',
26 | 'num_tasks': 6,
27 | 'num_total_classes': 21,
28 | 'work_dir': os.path.split(__file__)[-1].split('.')[0],
29 | 'logger_handle_cfg': {'type': 'LocalLoggerHandle', 'logfilepath': f"{os.path.split(__file__)[-1].split('.')[0]}/{os.path.split(__file__)[-1].split('.')[0]}.log"},
30 | })
31 | RUNNER_CFG['segmentor_cfg']['losses_cfgs']['distillation']['scale_factor'] = 100
--------------------------------------------------------------------------------
/csseg/configs/mib/mib_r101iabnd16_aspp_512x512_vocaug15-5_disjoint.py:
--------------------------------------------------------------------------------
1 | '''mib_r101iabnd16_aspp_512x512_vocaug15-5_disjoint'''
2 | import os
3 | from .base_cfg import RUNNER_CFG
4 | from .._base_ import DATASET_CFG_VOCAUG_512x512, SCHEDULER_CFG_POLY, DATALOADER_CFG_BS24, PARALLEL_CFG
5 |
6 |
7 | # add dataset_cfg
8 | RUNNER_CFG['dataset_cfg'] = DATASET_CFG_VOCAUG_512x512.copy()
9 | RUNNER_CFG['dataset_cfg']['overlap'] = False
10 | # add dataloader_cfg
11 | RUNNER_CFG['dataloader_cfg'] = DATALOADER_CFG_BS24.copy()
12 | # add scheduler_cfg
13 | RUNNER_CFG['scheduler_cfg'] = [
14 | SCHEDULER_CFG_POLY.copy() for _ in range(2)
15 | ]
16 | RUNNER_CFG['scheduler_cfg'][0]['max_epochs'] = 30
17 | RUNNER_CFG['scheduler_cfg'][0]['lr'] = 0.01
18 | for i in range(1, 2):
19 | RUNNER_CFG['scheduler_cfg'][i]['max_epochs'] = 30
20 | RUNNER_CFG['scheduler_cfg'][i]['lr'] = 0.001
21 | # add parallel_cfg
22 | RUNNER_CFG['parallel_cfg'] = PARALLEL_CFG.copy()
23 | # modify RUNNER_CFG
24 | RUNNER_CFG.update({
25 | 'task_name': '15-5',
26 | 'num_tasks': 2,
27 | 'num_total_classes': 21,
28 | 'work_dir': os.path.split(__file__)[-1].split('.')[0],
29 | 'logger_handle_cfg': {'type': 'LocalLoggerHandle', 'logfilepath': f"{os.path.split(__file__)[-1].split('.')[0]}/{os.path.split(__file__)[-1].split('.')[0]}.log"},
30 | })
--------------------------------------------------------------------------------
/csseg/configs/mib/mib_r101iabnd16_aspp_512x512_vocaug15-5_overlap.py:
--------------------------------------------------------------------------------
1 | '''mib_r101iabnd16_aspp_512x512_vocaug15-5_overlap'''
2 | import os
3 | from .base_cfg import RUNNER_CFG
4 | from .._base_ import DATASET_CFG_VOCAUG_512x512, SCHEDULER_CFG_POLY, DATALOADER_CFG_BS24, PARALLEL_CFG
5 |
6 |
7 | # add dataset_cfg
8 | RUNNER_CFG['dataset_cfg'] = DATASET_CFG_VOCAUG_512x512.copy()
9 | RUNNER_CFG['dataset_cfg']['overlap'] = True
10 | # add dataloader_cfg
11 | RUNNER_CFG['dataloader_cfg'] = DATALOADER_CFG_BS24.copy()
12 | # add scheduler_cfg
13 | RUNNER_CFG['scheduler_cfg'] = [
14 | SCHEDULER_CFG_POLY.copy() for _ in range(2)
15 | ]
16 | RUNNER_CFG['scheduler_cfg'][0]['max_epochs'] = 30
17 | RUNNER_CFG['scheduler_cfg'][0]['lr'] = 0.01
18 | for i in range(1, 2):
19 | RUNNER_CFG['scheduler_cfg'][i]['max_epochs'] = 30
20 | RUNNER_CFG['scheduler_cfg'][i]['lr'] = 0.001
21 | # add parallel_cfg
22 | RUNNER_CFG['parallel_cfg'] = PARALLEL_CFG.copy()
23 | # modify RUNNER_CFG
24 | RUNNER_CFG.update({
25 | 'task_name': '15-5',
26 | 'num_tasks': 2,
27 | 'num_total_classes': 21,
28 | 'work_dir': os.path.split(__file__)[-1].split('.')[0],
29 | 'logger_handle_cfg': {'type': 'LocalLoggerHandle', 'logfilepath': f"{os.path.split(__file__)[-1].split('.')[0]}/{os.path.split(__file__)[-1].split('.')[0]}.log"},
30 | })
--------------------------------------------------------------------------------
/csseg/configs/plop/base_cfg.py:
--------------------------------------------------------------------------------
1 | '''BASE_CFG for PLOP'''
2 | # SEGMENTOR_CFG
3 | SEGMENTOR_CFG = {
4 | 'type': 'PLOPSegmentor',
5 | 'num_known_classes_list': None,
6 | 'selected_indices': (3,),
7 | 'align_corners': False,
8 | 'encoder_cfg': {
9 | 'type': 'ResNetPLOP',
10 | 'depth': 101,
11 | 'outstride': 16,
12 | 'out_indices': (0, 1, 2, 3),
13 | 'norm_cfg': {'type': 'InPlaceABNSync', 'activation': 'leaky_relu', 'activation_param': 0.01},
14 | 'act_cfg': None,
15 | 'shortcut_norm_cfg': {'type': 'InPlaceABNSync', 'activation': 'identity'},
16 | 'shortcut_act_cfg': {'type': 'LeakyReLU', 'inplace': True, 'negative_slope': 0.01},
17 | 'pretrained': True,
18 | 'structure_type': 'resnet101inplaceabn',
19 | },
20 | 'decoder_cfg': {
21 | 'type': 'ASPPHead',
22 | 'in_channels': 2048,
23 | 'feats_channels': 256,
24 | 'out_channels': 256,
25 | 'dilations': (1, 6, 12, 18),
26 | 'pooling_size': 32,
27 | 'norm_cfg': {'type': 'InPlaceABNSync', 'activation': 'leaky_relu', 'activation_param': 0.01},
28 | 'act_cfg': None,
29 | },
30 | 'losses_cfgs': {
31 | 'segmentation_init': {
32 | 'loss_seg': {'CrossEntropyLoss': {'scale_factor': None, 'reduction': 'none', 'ignore_index': 255}}
33 | },
34 | 'segmentation_cl' : {
35 | 'loss_seg': {'CrossEntropyLoss': {'scale_factor': None, 'reduction': 'none', 'ignore_index': 255}}
36 | },
37 | 'distillation': {'pod_factor': 0.01, 'pod_factor_last_scale': 0.0005, 'spp_scales': [1, 2, 4]},
38 | }
39 | }
40 | # RUNNER_CFG
41 | RUNNER_CFG = {
42 | 'type': 'PLOPRunner',
43 | 'algorithm': 'PLOP',
44 | 'task_name': '',
45 | 'task_id': -1,
46 | 'num_tasks': -1,
47 | 'work_dir': '',
48 | 'benchmark': True,
49 | 'save_interval_epochs': 10,
50 | 'eval_interval_epochs': 10,
51 | 'log_interval_iterations': 10,
52 | 'choose_best_segmentor_by_metric': 'mean_iou',
53 | 'logger_handle_cfg': {'type': 'LocalLoggerHandle', 'logfilepath': ''},
54 | 'num_total_classes': -1,
55 | 'pseudolabeling_minimal_threshold': 0.001,
56 | 'fp16_cfg': {'type': 'apex', 'initialize': {'opt_level': 'O1'}, 'scale_loss': {}},
57 | 'segmentor_cfg': SEGMENTOR_CFG,
58 | }
--------------------------------------------------------------------------------
/csseg/configs/plop/plop_r101iabnd16_aspp_512x512_vocaug10-1_disjoint.py:
--------------------------------------------------------------------------------
1 | '''plop_r101iabnd16_aspp_512x512_vocaug10-1_disjoint'''
2 | import os
3 | from .base_cfg import RUNNER_CFG
4 | from .._base_ import DATASET_CFG_VOCAUG_512x512, SCHEDULER_CFG_POLY, DATALOADER_CFG_BS24, PARALLEL_CFG
5 |
6 |
7 | # add dataset_cfg
8 | RUNNER_CFG['dataset_cfg'] = DATASET_CFG_VOCAUG_512x512.copy()
9 | RUNNER_CFG['dataset_cfg']['overlap'] = False
10 | # add dataloader_cfg
11 | RUNNER_CFG['dataloader_cfg'] = DATALOADER_CFG_BS24.copy()
12 | # add scheduler_cfg
13 | RUNNER_CFG['scheduler_cfg'] = [
14 | SCHEDULER_CFG_POLY.copy() for _ in range(11)
15 | ]
16 | RUNNER_CFG['scheduler_cfg'][0]['max_epochs'] = 30
17 | RUNNER_CFG['scheduler_cfg'][0]['lr'] = 0.01
18 | for i in range(1, 11):
19 | RUNNER_CFG['scheduler_cfg'][i]['max_epochs'] = 10
20 | RUNNER_CFG['scheduler_cfg'][i]['lr'] = 0.001
21 | # add parallel_cfg
22 | RUNNER_CFG['parallel_cfg'] = PARALLEL_CFG.copy()
23 | # modify RUNNER_CFG
24 | RUNNER_CFG.update({
25 | 'task_name': '10-1',
26 | 'num_tasks': 11,
27 | 'num_total_classes': 21,
28 | 'work_dir': os.path.split(__file__)[-1].split('.')[0],
29 | 'logger_handle_cfg': {'type': 'LocalLoggerHandle', 'logfilepath': f"{os.path.split(__file__)[-1].split('.')[0]}/{os.path.split(__file__)[-1].split('.')[0]}.log"},
30 | })
--------------------------------------------------------------------------------
/csseg/configs/plop/plop_r101iabnd16_aspp_512x512_vocaug10-1_overlap.py:
--------------------------------------------------------------------------------
1 | '''plop_r101iabnd16_aspp_512x512_vocaug10-1_overlap'''
2 | import os
3 | from .base_cfg import RUNNER_CFG
4 | from .._base_ import DATASET_CFG_VOCAUG_512x512, SCHEDULER_CFG_POLY, DATALOADER_CFG_BS24, PARALLEL_CFG
5 |
6 |
7 | # add dataset_cfg
8 | RUNNER_CFG['dataset_cfg'] = DATASET_CFG_VOCAUG_512x512.copy()
9 | RUNNER_CFG['dataset_cfg']['overlap'] = True
10 | # add dataloader_cfg
11 | RUNNER_CFG['dataloader_cfg'] = DATALOADER_CFG_BS24.copy()
12 | # add scheduler_cfg
13 | RUNNER_CFG['scheduler_cfg'] = [
14 | SCHEDULER_CFG_POLY.copy() for _ in range(11)
15 | ]
16 | RUNNER_CFG['scheduler_cfg'][0]['max_epochs'] = 30
17 | RUNNER_CFG['scheduler_cfg'][0]['lr'] = 0.01
18 | for i in range(1, 11):
19 | RUNNER_CFG['scheduler_cfg'][i]['max_epochs'] = 10
20 | RUNNER_CFG['scheduler_cfg'][i]['lr'] = 0.001
21 | # add parallel_cfg
22 | RUNNER_CFG['parallel_cfg'] = PARALLEL_CFG.copy()
23 | # modify RUNNER_CFG
24 | RUNNER_CFG.update({
25 | 'task_name': '10-1',
26 | 'num_tasks': 11,
27 | 'num_total_classes': 21,
28 | 'work_dir': os.path.split(__file__)[-1].split('.')[0],
29 | 'logger_handle_cfg': {'type': 'LocalLoggerHandle', 'logfilepath': f"{os.path.split(__file__)[-1].split('.')[0]}/{os.path.split(__file__)[-1].split('.')[0]}.log"},
30 | })
--------------------------------------------------------------------------------
/csseg/configs/plop/plop_r101iabnd16_aspp_512x512_vocaug15-1_disjoint.py:
--------------------------------------------------------------------------------
1 | '''plop_r101iabnd16_aspp_512x512_vocaug15-1_disjoint'''
2 | import os
3 | from .base_cfg import RUNNER_CFG
4 | from .._base_ import DATASET_CFG_VOCAUG_512x512, SCHEDULER_CFG_POLY, DATALOADER_CFG_BS24, PARALLEL_CFG
5 |
6 |
7 | # add dataset_cfg
8 | RUNNER_CFG['dataset_cfg'] = DATASET_CFG_VOCAUG_512x512.copy()
9 | RUNNER_CFG['dataset_cfg']['overlap'] = False
10 | # add dataloader_cfg
11 | RUNNER_CFG['dataloader_cfg'] = DATALOADER_CFG_BS24.copy()
12 | # add scheduler_cfg
13 | RUNNER_CFG['scheduler_cfg'] = [
14 | SCHEDULER_CFG_POLY.copy() for _ in range(6)
15 | ]
16 | RUNNER_CFG['scheduler_cfg'][0]['max_epochs'] = 30
17 | RUNNER_CFG['scheduler_cfg'][0]['lr'] = 0.01
18 | for i in range(1, 6):
19 | RUNNER_CFG['scheduler_cfg'][i]['max_epochs'] = 30
20 | RUNNER_CFG['scheduler_cfg'][i]['lr'] = 0.001
21 | # add parallel_cfg
22 | RUNNER_CFG['parallel_cfg'] = PARALLEL_CFG.copy()
23 | # modify RUNNER_CFG
24 | RUNNER_CFG.update({
25 | 'task_name': '15-5s',
26 | 'num_tasks': 6,
27 | 'num_total_classes': 21,
28 | 'work_dir': os.path.split(__file__)[-1].split('.')[0],
29 | 'logger_handle_cfg': {'type': 'LocalLoggerHandle', 'logfilepath': f"{os.path.split(__file__)[-1].split('.')[0]}/{os.path.split(__file__)[-1].split('.')[0]}.log"},
30 | })
--------------------------------------------------------------------------------
/csseg/configs/plop/plop_r101iabnd16_aspp_512x512_vocaug15-1_overlap.py:
--------------------------------------------------------------------------------
1 | '''plop_r101iabnd16_aspp_512x512_vocaug15-1_overlap'''
2 | import os
3 | from .base_cfg import RUNNER_CFG
4 | from .._base_ import DATASET_CFG_VOCAUG_512x512, SCHEDULER_CFG_POLY, DATALOADER_CFG_BS24, PARALLEL_CFG
5 |
6 |
7 | # add dataset_cfg
8 | RUNNER_CFG['dataset_cfg'] = DATASET_CFG_VOCAUG_512x512.copy()
9 | RUNNER_CFG['dataset_cfg']['overlap'] = True
10 | # add dataloader_cfg
11 | RUNNER_CFG['dataloader_cfg'] = DATALOADER_CFG_BS24.copy()
12 | # add scheduler_cfg
13 | RUNNER_CFG['scheduler_cfg'] = [
14 | SCHEDULER_CFG_POLY.copy() for _ in range(6)
15 | ]
16 | RUNNER_CFG['scheduler_cfg'][0]['max_epochs'] = 30
17 | RUNNER_CFG['scheduler_cfg'][0]['lr'] = 0.01
18 | for i in range(1, 6):
19 | RUNNER_CFG['scheduler_cfg'][i]['max_epochs'] = 30
20 | RUNNER_CFG['scheduler_cfg'][i]['lr'] = 0.001
21 | # add parallel_cfg
22 | RUNNER_CFG['parallel_cfg'] = PARALLEL_CFG.copy()
23 | # modify RUNNER_CFG
24 | RUNNER_CFG.update({
25 | 'task_name': '15-5s',
26 | 'num_tasks': 6,
27 | 'num_total_classes': 21,
28 | 'work_dir': os.path.split(__file__)[-1].split('.')[0],
29 | 'logger_handle_cfg': {'type': 'LocalLoggerHandle', 'logfilepath': f"{os.path.split(__file__)[-1].split('.')[0]}/{os.path.split(__file__)[-1].split('.')[0]}.log"},
30 | })
--------------------------------------------------------------------------------
/csseg/configs/plop/plop_r101iabnd16_aspp_512x512_vocaug15-5_disjoint.py:
--------------------------------------------------------------------------------
1 | '''plop_r101iabnd16_aspp_512x512_vocaug15-5_disjoint'''
2 | import os
3 | from .base_cfg import RUNNER_CFG
4 | from .._base_ import DATASET_CFG_VOCAUG_512x512, SCHEDULER_CFG_POLY, DATALOADER_CFG_BS24, PARALLEL_CFG
5 |
6 |
7 | # add dataset_cfg
8 | RUNNER_CFG['dataset_cfg'] = DATASET_CFG_VOCAUG_512x512.copy()
9 | RUNNER_CFG['dataset_cfg']['overlap'] = False
10 | # add dataloader_cfg
11 | RUNNER_CFG['dataloader_cfg'] = DATALOADER_CFG_BS24.copy()
12 | # add scheduler_cfg
13 | RUNNER_CFG['scheduler_cfg'] = [
14 | SCHEDULER_CFG_POLY.copy() for _ in range(2)
15 | ]
16 | RUNNER_CFG['scheduler_cfg'][0]['max_epochs'] = 30
17 | RUNNER_CFG['scheduler_cfg'][0]['lr'] = 0.01
18 | for i in range(1, 2):
19 | RUNNER_CFG['scheduler_cfg'][i]['max_epochs'] = 30
20 | RUNNER_CFG['scheduler_cfg'][i]['lr'] = 0.001
21 | # add parallel_cfg
22 | RUNNER_CFG['parallel_cfg'] = PARALLEL_CFG.copy()
23 | # modify RUNNER_CFG
24 | RUNNER_CFG.update({
25 | 'task_name': '15-5',
26 | 'num_tasks': 2,
27 | 'num_total_classes': 21,
28 | 'work_dir': os.path.split(__file__)[-1].split('.')[0],
29 | 'logger_handle_cfg': {'type': 'LocalLoggerHandle', 'logfilepath': f"{os.path.split(__file__)[-1].split('.')[0]}/{os.path.split(__file__)[-1].split('.')[0]}.log"},
30 | })
--------------------------------------------------------------------------------
/csseg/configs/plop/plop_r101iabnd16_aspp_512x512_vocaug15-5_overlap.py:
--------------------------------------------------------------------------------
1 | '''plop_r101iabnd16_aspp_512x512_vocaug15-5_overlap'''
2 | import os
3 | from .base_cfg import RUNNER_CFG
4 | from .._base_ import DATASET_CFG_VOCAUG_512x512, SCHEDULER_CFG_POLY, DATALOADER_CFG_BS24, PARALLEL_CFG
5 |
6 |
7 | # add dataset_cfg
8 | RUNNER_CFG['dataset_cfg'] = DATASET_CFG_VOCAUG_512x512.copy()
9 | RUNNER_CFG['dataset_cfg']['overlap'] = True
10 | # add dataloader_cfg
11 | RUNNER_CFG['dataloader_cfg'] = DATALOADER_CFG_BS24.copy()
12 | # add scheduler_cfg
13 | RUNNER_CFG['scheduler_cfg'] = [
14 | SCHEDULER_CFG_POLY.copy() for _ in range(2)
15 | ]
16 | RUNNER_CFG['scheduler_cfg'][0]['max_epochs'] = 30
17 | RUNNER_CFG['scheduler_cfg'][0]['lr'] = 0.01
18 | for i in range(1, 2):
19 | RUNNER_CFG['scheduler_cfg'][i]['max_epochs'] = 30
20 | RUNNER_CFG['scheduler_cfg'][i]['lr'] = 0.001
21 | # add parallel_cfg
22 | RUNNER_CFG['parallel_cfg'] = PARALLEL_CFG.copy()
23 | # modify RUNNER_CFG
24 | RUNNER_CFG.update({
25 | 'task_name': '15-5',
26 | 'num_tasks': 2,
27 | 'num_total_classes': 21,
28 | 'work_dir': os.path.split(__file__)[-1].split('.')[0],
29 | 'logger_handle_cfg': {'type': 'LocalLoggerHandle', 'logfilepath': f"{os.path.split(__file__)[-1].split('.')[0]}/{os.path.split(__file__)[-1].split('.')[0]}.log"},
30 | })
--------------------------------------------------------------------------------
/csseg/configs/rcil/base_cfg.py:
--------------------------------------------------------------------------------
1 | '''BASE_CFG for RCIL'''
2 | # SEGMENTOR_CFG
3 | SEGMENTOR_CFG = {
4 | 'type': 'RCILSegmentor',
5 | 'num_known_classes_list': None,
6 | 'selected_indices': (3,),
7 | 'align_corners': False,
8 | 'encoder_cfg': {
9 | 'type': 'ResNetRCIL',
10 | 'depth': 101,
11 | 'outstride': 16,
12 | 'out_indices': (0, 1, 2, 3),
13 | 'norm_cfg': {'type': 'InPlaceABNSync', 'activation': 'leaky_relu', 'activation_param': 1.0},
14 | 'act_cfg': None,
15 | 'pretrained': True,
16 | },
17 | 'decoder_cfg': {
18 | 'type': 'RCILASPPHead',
19 | 'in_channels': 2048,
20 | 'feats_channels': 256,
21 | 'out_channels': 256,
22 | 'dilations': (1, 6, 12, 18),
23 | 'pooling_size': 32,
24 | 'norm_cfg': {'type': 'InPlaceABNSync', 'activation': 'leaky_relu', 'activation_param': 1.0},
25 | },
26 | 'losses_cfgs': {
27 | 'segmentation_init': {
28 | 'loss_seg': {'CrossEntropyLoss': {'scale_factor': 1.0, 'reduction': 'mean', 'ignore_index': 255}}
29 | },
30 | 'segmentation_cl' : {
31 | 'loss_seg': {'MIBUnbiasedCrossEntropyLoss': {'scale_factor': 1.0, 'reduction': 'mean', 'ignore_index': 255}}
32 | },
33 | 'distillation_rcil': {'scale_factor': 1.0, 'spp_scales': [4, 8, 12, 16, 20, 24]},
34 | 'distillation_mib': {'scale_factor': 100, 'alpha': 1.0},
35 | }
36 | }
37 | # RUNNER_CFG
38 | RUNNER_CFG = {
39 | 'type': 'RCILRunner',
40 | 'algorithm': 'RCIL',
41 | 'task_name': '',
42 | 'task_id': -1,
43 | 'num_tasks': -1,
44 | 'work_dir': '',
45 | 'benchmark': True,
46 | 'save_interval_epochs': 10,
47 | 'eval_interval_epochs': 10,
48 | 'log_interval_iterations': 10,
49 | 'choose_best_segmentor_by_metric': 'mean_iou',
50 | 'logfilepath': '',
51 | 'num_total_classes': -1,
52 | 'pseudolabeling_minimal_threshold': 0.001,
53 | 'random_seed': 42,
54 | 'segmentor_cfg': SEGMENTOR_CFG,
55 | }
--------------------------------------------------------------------------------
/csseg/configs/rcil/rcil_r101iabnd16_aspp_512x512_vocaug10-1_disjoint.py:
--------------------------------------------------------------------------------
1 | '''rcil_r101iabnd16_aspp_512x512_vocaug10-1_disjoint'''
2 | import os
3 | from .base_cfg import RUNNER_CFG
4 | from .._base_ import DATASET_CFG_VOCAUG_512x512, OPTIMIZER_CFG_SGD, SCHEDULER_CFG_POLY, DATALOADER_CFG_BS24, PARALLEL_CFG
5 |
6 |
7 | # add dataset_cfg
8 | RUNNER_CFG['dataset_cfg'] = DATASET_CFG_VOCAUG_512x512.copy()
9 | RUNNER_CFG['dataset_cfg']['overlap'] = False
10 | # add dataloader_cfg
11 | RUNNER_CFG['dataloader_cfg'] = DATALOADER_CFG_BS24.copy()
12 | # add optimizer_cfg
13 | RUNNER_CFG['optimizer_cfg'] = OPTIMIZER_CFG_SGD.copy()
14 | # add scheduler_cfg
15 | RUNNER_CFG['scheduler_cfg'] = [
16 | SCHEDULER_CFG_POLY.copy() for _ in range(11)
17 | ]
18 | RUNNER_CFG['scheduler_cfg'][0]['max_epochs'] = 30
19 | RUNNER_CFG['scheduler_cfg'][0]['lr'] = 0.02
20 | for i in range(1, 11):
21 | RUNNER_CFG['scheduler_cfg'][i]['max_epochs'] = 10
22 | RUNNER_CFG['scheduler_cfg'][i]['lr'] = 0.001
23 | # add parallel_cfg
24 | RUNNER_CFG['parallel_cfg'] = PARALLEL_CFG.copy()
25 | # modify RUNNER_CFG
26 | RUNNER_CFG.update({
27 | 'task_name': '10-1',
28 | 'num_tasks': 11,
29 | 'num_total_classes': 21,
30 | 'work_dir': os.path.split(__file__)[-1].split('.')[0],
31 | 'logfilepath': f"{os.path.split(__file__)[-1].split('.')[0]}/{os.path.split(__file__)[-1].split('.')[0]}.log",
32 | })
--------------------------------------------------------------------------------
/csseg/configs/rcil/rcil_r101iabnd16_aspp_512x512_vocaug10-1_overlap.py:
--------------------------------------------------------------------------------
1 | '''rcil_r101iabnd16_aspp_512x512_vocaug10-1_overlap'''
2 | import os
3 | from .base_cfg import RUNNER_CFG
4 | from .._base_ import DATASET_CFG_VOCAUG_512x512, OPTIMIZER_CFG_SGD, SCHEDULER_CFG_POLY, DATALOADER_CFG_BS24, PARALLEL_CFG
5 |
6 |
7 | # add dataset_cfg
8 | RUNNER_CFG['dataset_cfg'] = DATASET_CFG_VOCAUG_512x512.copy()
9 | RUNNER_CFG['dataset_cfg']['overlap'] = True
10 | # add dataloader_cfg
11 | RUNNER_CFG['dataloader_cfg'] = DATALOADER_CFG_BS24.copy()
12 | # add optimizer_cfg
13 | RUNNER_CFG['optimizer_cfg'] = OPTIMIZER_CFG_SGD.copy()
14 | # add scheduler_cfg
15 | RUNNER_CFG['scheduler_cfg'] = [
16 | SCHEDULER_CFG_POLY.copy() for _ in range(11)
17 | ]
18 | RUNNER_CFG['scheduler_cfg'][0]['max_epochs'] = 30
19 | RUNNER_CFG['scheduler_cfg'][0]['lr'] = 0.02
20 | for i in range(1, 11):
21 | RUNNER_CFG['scheduler_cfg'][i]['max_epochs'] = 10
22 | RUNNER_CFG['scheduler_cfg'][i]['lr'] = 0.001
23 | # add parallel_cfg
24 | RUNNER_CFG['parallel_cfg'] = PARALLEL_CFG.copy()
25 | # modify RUNNER_CFG
26 | RUNNER_CFG.update({
27 | 'task_name': '10-1',
28 | 'num_tasks': 11,
29 | 'num_total_classes': 21,
30 | 'work_dir': os.path.split(__file__)[-1].split('.')[0],
31 | 'logfilepath': f"{os.path.split(__file__)[-1].split('.')[0]}/{os.path.split(__file__)[-1].split('.')[0]}.log",
32 | })
--------------------------------------------------------------------------------
/csseg/configs/rcil/rcil_r101iabnd16_aspp_512x512_vocaug15-1_disjoint.py:
--------------------------------------------------------------------------------
1 | '''rcil_r101iabnd16_aspp_512x512_vocaug15-1_disjoint'''
2 | import os
3 | from .base_cfg import RUNNER_CFG
4 | from .._base_ import DATASET_CFG_VOCAUG_512x512, OPTIMIZER_CFG_SGD, SCHEDULER_CFG_POLY, DATALOADER_CFG_BS24, PARALLEL_CFG
5 |
6 |
7 | # add dataset_cfg
8 | RUNNER_CFG['dataset_cfg'] = DATASET_CFG_VOCAUG_512x512.copy()
9 | RUNNER_CFG['dataset_cfg']['overlap'] = False
10 | # add dataloader_cfg
11 | RUNNER_CFG['dataloader_cfg'] = DATALOADER_CFG_BS24.copy()
12 | # add optimizer_cfg
13 | RUNNER_CFG['optimizer_cfg'] = OPTIMIZER_CFG_SGD.copy()
14 | # add scheduler_cfg
15 | RUNNER_CFG['scheduler_cfg'] = [
16 | SCHEDULER_CFG_POLY.copy() for _ in range(6)
17 | ]
18 | RUNNER_CFG['scheduler_cfg'][0]['max_epochs'] = 30
19 | RUNNER_CFG['scheduler_cfg'][0]['lr'] = 0.02
20 | for i in range(1, 6):
21 | RUNNER_CFG['scheduler_cfg'][i]['max_epochs'] = 30
22 | RUNNER_CFG['scheduler_cfg'][i]['lr'] = 0.001
23 | # add parallel_cfg
24 | RUNNER_CFG['parallel_cfg'] = PARALLEL_CFG.copy()
25 | # modify RUNNER_CFG
26 | RUNNER_CFG.update({
27 | 'task_name': '15-5s',
28 | 'num_tasks': 6,
29 | 'num_total_classes': 21,
30 | 'work_dir': os.path.split(__file__)[-1].split('.')[0],
31 | 'logfilepath': f"{os.path.split(__file__)[-1].split('.')[0]}/{os.path.split(__file__)[-1].split('.')[0]}.log",
32 | })
--------------------------------------------------------------------------------
/csseg/configs/rcil/rcil_r101iabnd16_aspp_512x512_vocaug15-1_overlap.py:
--------------------------------------------------------------------------------
1 | '''rcil_r101iabnd16_aspp_512x512_vocaug15-1_overlap'''
2 | import os
3 | from .base_cfg import RUNNER_CFG
4 | from .._base_ import DATASET_CFG_VOCAUG_512x512, OPTIMIZER_CFG_SGD, SCHEDULER_CFG_POLY, DATALOADER_CFG_BS24, PARALLEL_CFG
5 |
6 |
7 | # add dataset_cfg
8 | RUNNER_CFG['dataset_cfg'] = DATASET_CFG_VOCAUG_512x512.copy()
9 | RUNNER_CFG['dataset_cfg']['overlap'] = True
10 | # add dataloader_cfg
11 | RUNNER_CFG['dataloader_cfg'] = DATALOADER_CFG_BS24.copy()
12 | # add optimizer_cfg
13 | RUNNER_CFG['optimizer_cfg'] = OPTIMIZER_CFG_SGD.copy()
14 | # add scheduler_cfg
15 | RUNNER_CFG['scheduler_cfg'] = [
16 | SCHEDULER_CFG_POLY.copy() for _ in range(6)
17 | ]
18 | RUNNER_CFG['scheduler_cfg'][0]['max_epochs'] = 30
19 | RUNNER_CFG['scheduler_cfg'][0]['lr'] = 0.02
20 | for i in range(1, 6):
21 | RUNNER_CFG['scheduler_cfg'][i]['max_epochs'] = 30
22 | RUNNER_CFG['scheduler_cfg'][i]['lr'] = 0.001
23 | # add parallel_cfg
24 | RUNNER_CFG['parallel_cfg'] = PARALLEL_CFG.copy()
25 | # modify RUNNER_CFG
26 | RUNNER_CFG.update({
27 | 'task_name': '15-5s',
28 | 'num_tasks': 6,
29 | 'num_total_classes': 21,
30 | 'work_dir': os.path.split(__file__)[-1].split('.')[0],
31 | 'logfilepath': f"{os.path.split(__file__)[-1].split('.')[0]}/{os.path.split(__file__)[-1].split('.')[0]}.log",
32 | })
--------------------------------------------------------------------------------
/csseg/configs/rcil/rcil_r101iabnd16_aspp_512x512_vocaug15-5_disjoint.py:
--------------------------------------------------------------------------------
1 | '''rcil_r101iabnd16_aspp_512x512_vocaug15-5_disjoint'''
2 | import os
3 | from .base_cfg import RUNNER_CFG
4 | from .._base_ import DATASET_CFG_VOCAUG_512x512, OPTIMIZER_CFG_SGD, SCHEDULER_CFG_POLY, DATALOADER_CFG_BS24, PARALLEL_CFG
5 |
6 |
7 | # add dataset_cfg
8 | RUNNER_CFG['dataset_cfg'] = DATASET_CFG_VOCAUG_512x512.copy()
9 | RUNNER_CFG['dataset_cfg']['overlap'] = False
10 | # add dataloader_cfg
11 | RUNNER_CFG['dataloader_cfg'] = DATALOADER_CFG_BS24.copy()
12 | # add optimizer_cfg
13 | RUNNER_CFG['optimizer_cfg'] = OPTIMIZER_CFG_SGD.copy()
14 | # add scheduler_cfg
15 | RUNNER_CFG['scheduler_cfg'] = [
16 | SCHEDULER_CFG_POLY.copy() for _ in range(2)
17 | ]
18 | RUNNER_CFG['scheduler_cfg'][0]['max_epochs'] = 30
19 | RUNNER_CFG['scheduler_cfg'][0]['lr'] = 0.02
20 | for i in range(1, 2):
21 | RUNNER_CFG['scheduler_cfg'][i]['max_epochs'] = 30
22 | RUNNER_CFG['scheduler_cfg'][i]['lr'] = 0.001
23 | # add parallel_cfg
24 | RUNNER_CFG['parallel_cfg'] = PARALLEL_CFG.copy()
25 | # modify RUNNER_CFG
26 | RUNNER_CFG.update({
27 | 'task_name': '15-5',
28 | 'num_tasks': 2,
29 | 'num_total_classes': 21,
30 | 'work_dir': os.path.split(__file__)[-1].split('.')[0],
31 | 'logfilepath': f"{os.path.split(__file__)[-1].split('.')[0]}/{os.path.split(__file__)[-1].split('.')[0]}.log",
32 | })
--------------------------------------------------------------------------------
/csseg/configs/rcil/rcil_r101iabnd16_aspp_512x512_vocaug15-5_overlap.py:
--------------------------------------------------------------------------------
1 | '''rcil_r101iabnd16_aspp_512x512_vocaug15-5_overlap'''
2 | import os
3 | from .base_cfg import RUNNER_CFG
4 | from .._base_ import DATASET_CFG_VOCAUG_512x512, OPTIMIZER_CFG_SGD, SCHEDULER_CFG_POLY, DATALOADER_CFG_BS24, PARALLEL_CFG
5 |
6 |
7 | # add dataset_cfg
8 | RUNNER_CFG['dataset_cfg'] = DATASET_CFG_VOCAUG_512x512.copy()
9 | RUNNER_CFG['dataset_cfg']['overlap'] = True
10 | # add dataloader_cfg
11 | RUNNER_CFG['dataloader_cfg'] = DATALOADER_CFG_BS24.copy()
12 | # add optimizer_cfg
13 | RUNNER_CFG['optimizer_cfg'] = OPTIMIZER_CFG_SGD.copy()
14 | # add scheduler_cfg
15 | RUNNER_CFG['scheduler_cfg'] = [
16 | SCHEDULER_CFG_POLY.copy() for _ in range(2)
17 | ]
18 | RUNNER_CFG['scheduler_cfg'][0]['max_epochs'] = 30
19 | RUNNER_CFG['scheduler_cfg'][0]['lr'] = 0.02
20 | for i in range(1, 2):
21 | RUNNER_CFG['scheduler_cfg'][i]['max_epochs'] = 30
22 | RUNNER_CFG['scheduler_cfg'][i]['lr'] = 0.001
23 | # add parallel_cfg
24 | RUNNER_CFG['parallel_cfg'] = PARALLEL_CFG.copy()
25 | # modify RUNNER_CFG
26 | RUNNER_CFG.update({
27 | 'task_name': '15-5',
28 | 'num_tasks': 2,
29 | 'num_total_classes': 21,
30 | 'work_dir': os.path.split(__file__)[-1].split('.')[0],
31 | 'logfilepath': f"{os.path.split(__file__)[-1].split('.')[0]}/{os.path.split(__file__)[-1].split('.')[0]}.log",
32 | })
--------------------------------------------------------------------------------
/csseg/configs/ucd/base_cfg.py:
--------------------------------------------------------------------------------
1 | '''BASE_CFG for UCD'''
2 | # SEGMENTOR_CFG
3 | SEGMENTOR_CFG = {
4 | 'type': 'UCDSegmentor',
5 | 'num_known_classes_list': None,
6 | 'selected_indices': (0,),
7 | 'align_corners': False,
8 | 'encoder_cfg': {
9 | 'type': 'ResNetUCD',
10 | 'depth': 101,
11 | 'outstride': 16,
12 | 'out_indices': (3,),
13 | 'norm_cfg': {'type': 'InPlaceABNSync', 'activation': 'leaky_relu', 'activation_param': 0.01},
14 | 'act_cfg': None,
15 | 'pretrained': True,
16 | },
17 | 'decoder_cfg': {
18 | 'type': 'BASECLASPPHead',
19 | 'in_channels': 2048,
20 | 'feats_channels': 256,
21 | 'out_channels': 256,
22 | 'dilations': (1, 6, 12, 18),
23 | 'pooling_size': 32,
24 | 'norm_cfg': {'type': 'InPlaceABNSync', 'activation': 'leaky_relu', 'activation_param': 0.01},
25 | },
26 | 'losses_cfgs': {
27 | 'segmentation_init': {
28 | 'loss_seg': {'CrossEntropyLoss': {'scale_factor': 1.0, 'reduction': 'mean', 'ignore_index': 255}}
29 | },
30 | 'segmentation_cl' : {
31 | 'loss_seg': {'MIBUnbiasedCrossEntropyLoss': {'scale_factor': 1.0, 'reduction': 'mean', 'ignore_index': 255}}
32 | },
33 | 'distillation': {'scale_factor': 10, 'alpha': 1.0},
34 | 'contrastive': {'scale_factor': 0.01, 'reduction': 'mean'},
35 | },
36 | }
37 | # RUNNER_CFG
38 | RUNNER_CFG = {
39 | 'type': 'UCDMIBRunner',
40 | 'algorithm': 'UCD',
41 | 'task_name': '',
42 | 'task_id': -1,
43 | 'num_tasks': -1,
44 | 'work_dir': '',
45 | 'benchmark': True,
46 | 'save_interval_epochs': 10,
47 | 'eval_interval_epochs': 10,
48 | 'log_interval_iterations': 10,
49 | 'choose_best_segmentor_by_metric': 'mean_iou',
50 | 'logfilepath': '',
51 | 'num_total_classes': -1,
52 | 'random_seed': 42,
53 | 'segmentor_cfg': SEGMENTOR_CFG,
54 | }
--------------------------------------------------------------------------------
/csseg/configs/ucd/mib+ucd_r101iabnd16_aspp_512x512_vocaug10-1_disjoint.py:
--------------------------------------------------------------------------------
1 | '''mib+ucd_r101iabnd16_aspp_512x512_vocaug10-1_disjoint'''
2 | import os
3 | from .base_cfg import RUNNER_CFG
4 | from .._base_ import DATASET_CFG_VOCAUG_512x512, OPTIMIZER_CFG_SGD, SCHEDULER_CFG_POLY, DATALOADER_CFG_BS24, PARALLEL_CFG
5 |
6 |
7 | # add dataset_cfg
8 | RUNNER_CFG['dataset_cfg'] = DATASET_CFG_VOCAUG_512x512.copy()
9 | RUNNER_CFG['dataset_cfg']['overlap'] = False
10 | # add dataloader_cfg
11 | RUNNER_CFG['dataloader_cfg'] = DATALOADER_CFG_BS24.copy()
12 | # add optimizer_cfg
13 | RUNNER_CFG['optimizer_cfg'] = OPTIMIZER_CFG_SGD.copy()
14 | # add scheduler_cfg
15 | RUNNER_CFG['scheduler_cfg'] = [
16 | SCHEDULER_CFG_POLY.copy() for _ in range(11)
17 | ]
18 | RUNNER_CFG['scheduler_cfg'][0]['max_epochs'] = 30
19 | RUNNER_CFG['scheduler_cfg'][0]['lr'] = 0.02
20 | for i in range(1, 11):
21 | RUNNER_CFG['scheduler_cfg'][i]['max_epochs'] = 10
22 | RUNNER_CFG['scheduler_cfg'][i]['lr'] = 0.001
23 | # add parallel_cfg
24 | RUNNER_CFG['parallel_cfg'] = PARALLEL_CFG.copy()
25 | # modify RUNNER_CFG
26 | RUNNER_CFG.update({
27 | 'task_name': '10-1',
28 | 'num_tasks': 11,
29 | 'num_total_classes': 21,
30 | 'work_dir': os.path.split(__file__)[-1].split('.')[0],
31 | 'logfilepath': f"{os.path.split(__file__)[-1].split('.')[0]}/{os.path.split(__file__)[-1].split('.')[0]}.log",
32 | })
--------------------------------------------------------------------------------
/csseg/configs/ucd/mib+ucd_r101iabnd16_aspp_512x512_vocaug10-1_overlap.py:
--------------------------------------------------------------------------------
1 | '''mib+ucd_r101iabnd16_aspp_512x512_vocaug10-1_overlap'''
2 | import os
3 | from .base_cfg import RUNNER_CFG
4 | from .._base_ import DATASET_CFG_VOCAUG_512x512, OPTIMIZER_CFG_SGD, SCHEDULER_CFG_POLY, DATALOADER_CFG_BS24, PARALLEL_CFG
5 |
6 |
7 | # add dataset_cfg
8 | RUNNER_CFG['dataset_cfg'] = DATASET_CFG_VOCAUG_512x512.copy()
9 | RUNNER_CFG['dataset_cfg']['overlap'] = True
10 | # add dataloader_cfg
11 | RUNNER_CFG['dataloader_cfg'] = DATALOADER_CFG_BS24.copy()
12 | # add optimizer_cfg
13 | RUNNER_CFG['optimizer_cfg'] = OPTIMIZER_CFG_SGD.copy()
14 | # add scheduler_cfg
15 | RUNNER_CFG['scheduler_cfg'] = [
16 | SCHEDULER_CFG_POLY.copy() for _ in range(11)
17 | ]
18 | RUNNER_CFG['scheduler_cfg'][0]['max_epochs'] = 30
19 | RUNNER_CFG['scheduler_cfg'][0]['lr'] = 0.02
20 | for i in range(1, 11):
21 | RUNNER_CFG['scheduler_cfg'][i]['max_epochs'] = 10
22 | RUNNER_CFG['scheduler_cfg'][i]['lr'] = 0.001
23 | # add parallel_cfg
24 | RUNNER_CFG['parallel_cfg'] = PARALLEL_CFG.copy()
25 | # modify RUNNER_CFG
26 | RUNNER_CFG.update({
27 | 'task_name': '10-1',
28 | 'num_tasks': 11,
29 | 'num_total_classes': 21,
30 | 'work_dir': os.path.split(__file__)[-1].split('.')[0],
31 | 'logfilepath': f"{os.path.split(__file__)[-1].split('.')[0]}/{os.path.split(__file__)[-1].split('.')[0]}.log",
32 | })
--------------------------------------------------------------------------------
/csseg/configs/ucd/mib+ucd_r101iabnd16_aspp_512x512_vocaug15-1_disjoint.py:
--------------------------------------------------------------------------------
1 | '''mib+ucd_r101iabnd16_aspp_512x512_vocaug15-1_disjoint'''
2 | import os
3 | from .base_cfg import RUNNER_CFG
4 | from .._base_ import DATASET_CFG_VOCAUG_512x512, OPTIMIZER_CFG_SGD, SCHEDULER_CFG_POLY, DATALOADER_CFG_BS24, PARALLEL_CFG
5 |
6 |
7 | # add dataset_cfg
8 | RUNNER_CFG['dataset_cfg'] = DATASET_CFG_VOCAUG_512x512.copy()
9 | RUNNER_CFG['dataset_cfg']['overlap'] = False
10 | # add dataloader_cfg
11 | RUNNER_CFG['dataloader_cfg'] = DATALOADER_CFG_BS24.copy()
12 | # add optimizer_cfg
13 | RUNNER_CFG['optimizer_cfg'] = OPTIMIZER_CFG_SGD.copy()
14 | # add scheduler_cfg
15 | RUNNER_CFG['scheduler_cfg'] = [
16 | SCHEDULER_CFG_POLY.copy() for _ in range(6)
17 | ]
18 | RUNNER_CFG['scheduler_cfg'][0]['max_epochs'] = 30
19 | RUNNER_CFG['scheduler_cfg'][0]['lr'] = 0.02
20 | for i in range(1, 6):
21 | RUNNER_CFG['scheduler_cfg'][i]['max_epochs'] = 30
22 | RUNNER_CFG['scheduler_cfg'][i]['lr'] = 0.001
23 | # add parallel_cfg
24 | RUNNER_CFG['parallel_cfg'] = PARALLEL_CFG.copy()
25 | # modify RUNNER_CFG
26 | RUNNER_CFG.update({
27 | 'task_name': '15-5s',
28 | 'num_tasks': 6,
29 | 'num_total_classes': 21,
30 | 'work_dir': os.path.split(__file__)[-1].split('.')[0],
31 | 'logfilepath': f"{os.path.split(__file__)[-1].split('.')[0]}/{os.path.split(__file__)[-1].split('.')[0]}.log",
32 | })
--------------------------------------------------------------------------------
/csseg/configs/ucd/mib+ucd_r101iabnd16_aspp_512x512_vocaug15-1_overlap.py:
--------------------------------------------------------------------------------
1 | '''mib+ucd_r101iabnd16_aspp_512x512_vocaug15-1_overlap'''
2 | import os
3 | from .base_cfg import RUNNER_CFG
4 | from .._base_ import DATASET_CFG_VOCAUG_512x512, OPTIMIZER_CFG_SGD, SCHEDULER_CFG_POLY, DATALOADER_CFG_BS24, PARALLEL_CFG
5 |
6 |
7 | # add dataset_cfg
8 | RUNNER_CFG['dataset_cfg'] = DATASET_CFG_VOCAUG_512x512.copy()
9 | RUNNER_CFG['dataset_cfg']['overlap'] = True
10 | # add dataloader_cfg
11 | RUNNER_CFG['dataloader_cfg'] = DATALOADER_CFG_BS24.copy()
12 | # add optimizer_cfg
13 | RUNNER_CFG['optimizer_cfg'] = OPTIMIZER_CFG_SGD.copy()
14 | # add scheduler_cfg
15 | RUNNER_CFG['scheduler_cfg'] = [
16 | SCHEDULER_CFG_POLY.copy() for _ in range(6)
17 | ]
18 | RUNNER_CFG['scheduler_cfg'][0]['max_epochs'] = 30
19 | RUNNER_CFG['scheduler_cfg'][0]['lr'] = 0.02
20 | for i in range(1, 6):
21 | RUNNER_CFG['scheduler_cfg'][i]['max_epochs'] = 30
22 | RUNNER_CFG['scheduler_cfg'][i]['lr'] = 0.001
23 | # add parallel_cfg
24 | RUNNER_CFG['parallel_cfg'] = PARALLEL_CFG.copy()
25 | # modify RUNNER_CFG
26 | RUNNER_CFG.update({
27 | 'task_name': '15-5s',
28 | 'num_tasks': 6,
29 | 'num_total_classes': 21,
30 | 'work_dir': os.path.split(__file__)[-1].split('.')[0],
31 | 'logfilepath': f"{os.path.split(__file__)[-1].split('.')[0]}/{os.path.split(__file__)[-1].split('.')[0]}.log",
32 | })
--------------------------------------------------------------------------------
/csseg/configs/ucd/mib+ucd_r101iabnd16_aspp_512x512_vocaug15-5_disjoint.py:
--------------------------------------------------------------------------------
1 | '''mib+ucd_r101iabnd16_aspp_512x512_vocaug15-5_disjoint'''
2 | import os
3 | from .base_cfg import RUNNER_CFG
4 | from .._base_ import DATASET_CFG_VOCAUG_512x512, OPTIMIZER_CFG_SGD, SCHEDULER_CFG_POLY, DATALOADER_CFG_BS24, PARALLEL_CFG
5 |
6 |
7 | # add dataset_cfg
8 | RUNNER_CFG['dataset_cfg'] = DATASET_CFG_VOCAUG_512x512.copy()
9 | RUNNER_CFG['dataset_cfg']['overlap'] = False
10 | # add dataloader_cfg
11 | RUNNER_CFG['dataloader_cfg'] = DATALOADER_CFG_BS24.copy()
12 | # add optimizer_cfg
13 | RUNNER_CFG['optimizer_cfg'] = OPTIMIZER_CFG_SGD.copy()
14 | # add scheduler_cfg
15 | RUNNER_CFG['scheduler_cfg'] = [
16 | SCHEDULER_CFG_POLY.copy() for _ in range(2)
17 | ]
18 | RUNNER_CFG['scheduler_cfg'][0]['max_epochs'] = 30
19 | RUNNER_CFG['scheduler_cfg'][0]['lr'] = 0.02
20 | for i in range(1, 2):
21 | RUNNER_CFG['scheduler_cfg'][i]['max_epochs'] = 30
22 | RUNNER_CFG['scheduler_cfg'][i]['lr'] = 0.001
23 | # add parallel_cfg
24 | RUNNER_CFG['parallel_cfg'] = PARALLEL_CFG.copy()
25 | # modify RUNNER_CFG
26 | RUNNER_CFG.update({
27 | 'task_name': '15-5',
28 | 'num_tasks': 2,
29 | 'num_total_classes': 21,
30 | 'work_dir': os.path.split(__file__)[-1].split('.')[0],
31 | 'logfilepath': f"{os.path.split(__file__)[-1].split('.')[0]}/{os.path.split(__file__)[-1].split('.')[0]}.log",
32 | })
--------------------------------------------------------------------------------
/csseg/configs/ucd/mib+ucd_r101iabnd16_aspp_512x512_vocaug15-5_overlap.py:
--------------------------------------------------------------------------------
1 | '''mib+ucd_r101iabnd16_aspp_512x512_vocaug15-5_overlap'''
2 | import os
3 | from .base_cfg import RUNNER_CFG
4 | from .._base_ import DATASET_CFG_VOCAUG_512x512, OPTIMIZER_CFG_SGD, SCHEDULER_CFG_POLY, DATALOADER_CFG_BS24, PARALLEL_CFG
5 |
6 |
7 | # add dataset_cfg
8 | RUNNER_CFG['dataset_cfg'] = DATASET_CFG_VOCAUG_512x512.copy()
9 | RUNNER_CFG['dataset_cfg']['overlap'] = True
10 | # add dataloader_cfg
11 | RUNNER_CFG['dataloader_cfg'] = DATALOADER_CFG_BS24.copy()
12 | # add optimizer_cfg
13 | RUNNER_CFG['optimizer_cfg'] = OPTIMIZER_CFG_SGD.copy()
14 | # add scheduler_cfg
15 | RUNNER_CFG['scheduler_cfg'] = [
16 | SCHEDULER_CFG_POLY.copy() for _ in range(2)
17 | ]
18 | RUNNER_CFG['scheduler_cfg'][0]['max_epochs'] = 30
19 | RUNNER_CFG['scheduler_cfg'][0]['lr'] = 0.02
20 | for i in range(1, 2):
21 | RUNNER_CFG['scheduler_cfg'][i]['max_epochs'] = 30
22 | RUNNER_CFG['scheduler_cfg'][i]['lr'] = 0.001
23 | # add parallel_cfg
24 | RUNNER_CFG['parallel_cfg'] = PARALLEL_CFG.copy()
25 | # modify RUNNER_CFG
26 | RUNNER_CFG.update({
27 | 'task_name': '15-5',
28 | 'num_tasks': 2,
29 | 'num_total_classes': 21,
30 | 'work_dir': os.path.split(__file__)[-1].split('.')[0],
31 | 'logfilepath': f"{os.path.split(__file__)[-1].split('.')[0]}/{os.path.split(__file__)[-1].split('.')[0]}.log",
32 | })
--------------------------------------------------------------------------------
/csseg/modules/__init__.py:
--------------------------------------------------------------------------------
1 | '''initialize'''
2 | from .runners import BuildRunner, RunnerBuilder
3 | from .parallel import BuildDistributedDataloader, BuildDistributedModel
4 | from .datasets import (
5 | SegmentationEvaluator, BuildDataTransform, DataTransformBuilder, BuildDataset, DatasetBuilder
6 | )
7 | from .utils import (
8 | setrandomseed, saveckpts, loadckpts, touchdir, saveaspickle, loadpicklefile, symlink, loadpretrainedweights,
9 | BaseModuleBuilder, EnvironmentCollector, ConfigParser, LoggerHandleBuilder, BuildLoggerHandle
10 | )
11 | from .models import (
12 | BuildLoss, LossBuilder, BuildDecoder, DecoderBuilder, BuildOptimizer, OptimizerBuilder, BuildParamsConstructor, ParamsConstructorBuilder,
13 | BuildEncoder, EncoderBuilder, BuildActivation, ActivationBuilder, BuildNormalization, NormalizationBuilder, BuildScheduler, SchedulerBuilder,
14 | BuildSegmentor, SegmentorBuilder
15 | )
--------------------------------------------------------------------------------
/csseg/modules/datasets/__init__.py:
--------------------------------------------------------------------------------
1 | '''initialize'''
2 | from .builder import DatasetBuilder, BuildDataset
3 | from .pipelines import SegmentationEvaluator, BuildDataTransform, DataTransformBuilder
--------------------------------------------------------------------------------
/csseg/modules/datasets/ade20k.py:
--------------------------------------------------------------------------------
1 | '''
2 | Function:
3 | Implementation of ADE20kDataset
4 | Author:
5 | Zhenchao Jin
6 | '''
7 | import os
8 | import pandas as pd
9 | from .base import _BaseDataset, BaseDataset
10 |
11 |
12 | '''_ADE20kDataset'''
13 | class _ADE20kDataset(_BaseDataset):
14 | num_classes = 151
15 | classnames = [
16 | '__background__', 'wall', 'building, edifice', 'sky', 'floor, flooring', 'tree', 'ceiling', 'road, route', 'bed', 'windowpane, window', 'grass',
17 | 'cabinet', 'sidewalk, pavement', 'person, individual, someone, somebody, mortal, soul', 'earth, ground', 'door, double door', 'table', 'mountain, mount',
18 | 'plant, flora, plant life', 'curtain, drape, drapery, mantle, pall', 'chair', 'car, auto, automobile, machine, motorcar', 'water', 'painting, picture',
19 | 'sofa, couch, lounge', 'shelf', 'house', 'sea', 'mirror', 'rug, carpet, carpeting', 'field', 'armchair', 'seat', 'fence, fencing', 'desk', 'rock, stone',
20 | 'wardrobe, closet, press', 'lamp', 'bathtub, bathing tub, bath, tub', 'railing, rail', 'cushion', 'base, pedestal, stand', 'box', 'column, pillar', 'signboard, sign',
21 | 'chest of drawers, chest, bureau, dresser', 'counter', 'sand', 'sink', 'skyscraper', 'fireplace, hearth, open fireplace', 'refrigerator, icebox',
22 | 'grandstand, covered stand', 'path', 'stairs, steps', 'runway', 'case, display case, showcase, vitrine', 'pool table, billiard table, snooker table', 'pillow',
23 | 'screen door, screen', 'stairway, staircase', 'river', 'bridge, span', 'bookcase', 'blind, screen', 'coffee table, cocktail table', 'toilet, can, commode, crapper, pot, potty, stool, throne',
24 | 'flower', 'book', 'hill', 'bench', 'countertop', 'stove, kitchen stove, range, kitchen range, cooking stove', 'palm, palm tree', 'kitchen island',
25 | 'computer, computing machine, computing device, data processor, electronic computer, information processing system', 'swivel chair', 'boat', 'bar', 'arcade machine',
26 | 'hovel, hut, hutch, shack, shanty', 'bus, autobus, coach, charabanc, double-decker, jitney, motorbus, motorcoach, omnibus, passenger vehicle',
27 | 'towel', 'light, light source', 'truck, motortruck', 'tower', 'chandelier, pendant, pendent', 'awning, sunshade, sunblind', 'streetlight, street lamp', 'booth, cubicle, stall, kiosk',
28 | 'television receiver, television, television set, tv, tv set, idiot box, boob tube, telly, goggle box', 'airplane, aeroplane, plane', 'dirt track',
29 | 'apparel, wearing apparel, dress, clothes', 'pole', 'land, ground, soil', 'bannister, banister, balustrade, balusters, handrail', 'escalator, moving staircase, moving stairway',
30 | 'ottoman, pouf, pouffe, puff, hassock', 'bottle', 'buffet, counter, sideboard', 'poster, posting, placard, notice, bill, card', 'stage', 'van', 'ship', 'fountain',
31 | 'conveyer belt, conveyor belt, conveyer, conveyor, transporter', 'canopy', 'washer, automatic washer, washing machine', 'plaything, toy', 'swimming pool, swimming bath, natatorium',
32 | 'stool', 'barrel, cask', 'basket, handbasket', 'waterfall, falls', 'tent, collapsible shelter', 'bag', 'minibike, motorbike', 'cradle', 'oven', 'ball', 'food, solid food', 'step, stair', 'tank, storage tank',
33 | 'trade name, brand name, brand, marque', 'microwave, microwave oven', 'pot, flowerpot', 'animal, animate being, beast, brute, creature, fauna', 'bicycle, bike, wheel, cycle', 'lake',
34 | 'dishwasher, dish washer, dishwashing machine', 'screen, silver screen, projection screen', 'blanket, cover', 'sculpture', 'hood, exhaust hood', 'sconce', 'vase',
35 | 'traffic light, traffic signal, stoplight', 'tray', 'ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin',
36 | 'fan', 'pier, wharf, wharfage, dock', 'crt screen', 'plate', 'monitor, monitoring device', 'bulletin board, notice board', 'shower', 'radiator', 'glass, drinking glass', 'clock', 'flag'
37 | ]
38 | assert num_classes == len(classnames)
39 | def __init__(self, mode, dataset_cfg):
40 | super(_ADE20kDataset, self).__init__(mode=mode, dataset_cfg=dataset_cfg)
41 | # set directory
42 | rootdir = dataset_cfg['rootdir']
43 | setmap_dict = {'train': 'training', 'val': 'validation', 'test': 'testing'}
44 | self.image_dir = os.path.join(rootdir, 'ADEChallengeData2016/images', setmap_dict[dataset_cfg['set']])
45 | self.ann_dir = os.path.join(rootdir, 'ADEChallengeData2016/annotations', setmap_dict[dataset_cfg['set']])
46 | # obatin imageids
47 | df = pd.read_csv(os.path.join(rootdir, 'ADEChallengeData2016', dataset_cfg['set']+'.txt'), names=['imageids'])
48 | self.imageids = df['imageids'].values
49 | self.imageids = [str(_id) for _id in self.imageids]
50 |
51 |
52 | '''ADE20kDataset'''
53 | class ADE20kDataset(BaseDataset):
54 | tasks = {
55 | 'offline': {
56 | 0: [x for x in range(151)]
57 | },
58 | '100-50': {
59 | 0: [x for x in range(0, 101)], 1: [x for x in range(101, 151)]
60 | },
61 | '100-10': {
62 | 0: [x for x in range(0, 101)], 1: [x for x in range(101, 111)], 2: [x for x in range(111, 121)],
63 | 3: [x for x in range(121, 131)], 4: [x for x in range(131, 141)], 5: [x for x in range(141, 151)]
64 | },
65 | '100-5': {
66 | 0: [x for x in range(0, 101)], 1: [x for x in range(101, 106)], 2: [x for x in range(106, 111)],
67 | 3: [x for x in range(111, 116)], 4: [x for x in range(116, 121)], 5: [x for x in range(121, 126)],
68 | 6: [x for x in range(126, 131)], 7: [x for x in range(131, 136)], 8: [x for x in range(136, 141)],
69 | 9: [x for x in range(141, 146)], 10: [x for x in range(146, 151)]
70 | },
71 | '50': {
72 | 0: [x for x in range(0, 51)], 1: [x for x in range(51, 101)], 2: [x for x in range(101, 151)]
73 | },
74 | }
75 | def __init__(self, mode, task_name, task_id, dataset_cfg):
76 | super(ADE20kDataset, self).__init__(
77 | mode=mode, task_name=task_name, task_id=task_id, dataset_cfg=dataset_cfg
78 | )
79 | '''builddatagenerator'''
80 | def builddatagenerator(self, mode, dataset_cfg):
81 | data_generator = _ADE20kDataset(mode, dataset_cfg)
82 | return data_generator
--------------------------------------------------------------------------------
/csseg/modules/datasets/builder.py:
--------------------------------------------------------------------------------
1 | '''
2 | Function:
3 | Implementation of DatasetBuilder and BuildDataset
4 | Author:
5 | Zhenchao Jin
6 | '''
7 | import copy
8 | from .voc import VOCDataset
9 | from .ade20k import ADE20kDataset
10 | from ..utils import BaseModuleBuilder
11 |
12 |
13 | '''DatasetBuilder'''
14 | class DatasetBuilder(BaseModuleBuilder):
15 | REGISTERED_MODULES = {
16 | 'VOCDataset': VOCDataset, 'ADE20kDataset': ADE20kDataset,
17 | }
18 | '''build'''
19 | def build(self, mode, task_name, task_id, dataset_cfg):
20 | dataset_cfg = copy.deepcopy(dataset_cfg)
21 | train_cfg, test_cfg = dataset_cfg.pop('train'), dataset_cfg.pop('test')
22 | dataset_cfg.update(train_cfg if mode == 'TRAIN' else test_cfg)
23 | dataset_type = dataset_cfg.pop('type')
24 | module_cfg = {
25 | 'mode': mode, 'task_name': task_name, 'task_id': task_id, 'dataset_cfg': dataset_cfg, 'type': dataset_type
26 | }
27 | return super().build(module_cfg)
28 |
29 |
30 | '''BuildDataset'''
31 | BuildDataset = DatasetBuilder().build
--------------------------------------------------------------------------------
/csseg/modules/datasets/pipelines/__init__.py:
--------------------------------------------------------------------------------
1 | '''initialize'''
2 | from .evaluators import SegmentationEvaluator
3 | from .transforms import DataTransformBuilder, BuildDataTransform, Compose
--------------------------------------------------------------------------------
/csseg/modules/datasets/pipelines/evaluators.py:
--------------------------------------------------------------------------------
1 | '''
2 | Function:
3 | Implementation of Evaluators
4 | Author:
5 | Zhenchao Jin
6 | '''
7 | import torch
8 | import numpy as np
9 | import torch.distributed as dist
10 |
11 |
12 | '''SegmentationEvaluator'''
13 | class SegmentationEvaluator():
14 | def __init__(self, num_classes, eps=1e-6):
15 | self.eps = eps
16 | self.num_classes = num_classes
17 | self.reset()
18 | '''reset'''
19 | def reset(self):
20 | self.confusion_matrix = np.zeros((self.num_classes, self.num_classes))
21 | self.total_samples = 0
22 | '''synchronize'''
23 | def synchronize(self, device=None):
24 | confusion_matrix = torch.tensor(self.confusion_matrix).to(device)
25 | total_samples = torch.tensor(self.total_samples).to(device)
26 | dist.reduce(confusion_matrix, dst=0)
27 | dist.reduce(total_samples, dst=0)
28 | self.confusion_matrix = confusion_matrix.cpu().numpy()
29 | self.total_samples = total_samples.cpu().numpy()
30 | '''update'''
31 | def update(self, seg_targets, seg_preds):
32 | for st, sp in zip(seg_targets, seg_preds):
33 | self.confusion_matrix += self.fasthist(st.flatten(), sp.flatten())
34 | self.total_samples += len(seg_targets)
35 | '''fasthist'''
36 | def fasthist(self, seg_target, seg_pred):
37 | mask = (seg_target >= 0) & (seg_target < self.num_classes)
38 | hist = np.bincount(
39 | self.num_classes * seg_target[mask].astype(int) + seg_pred[mask], minlength=self.num_classes**2
40 | ).reshape(self.num_classes, self.num_classes)
41 | return hist
42 | '''evaluate'''
43 | def evaluate(self):
44 | # obtain variables
45 | eps = self.eps
46 | hist = self.confusion_matrix
47 | # evaluate
48 | all_accuracy = np.diag(hist).sum() / hist.sum()
49 | mean_accuracy = np.mean((np.diag(hist) / (hist.sum(axis=1) + eps))[hist.sum(axis=1) != 0])
50 | iou = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist) + eps)
51 | mean_iou = np.mean(iou[hist.sum(axis=1) != 0])
52 | class_iou = dict(zip(range(self.num_classes), [iou[i] if m else 'INVALID' for i, m in enumerate(hist.sum(axis=1) != 0)]))
53 | class_accuracy = dict(zip(range(self.num_classes), [(np.diag(hist) / (hist.sum(axis=1) + eps))[i] if m else 'INVALID' for i, m in enumerate(hist.sum(axis=1) != 0)]))
54 | # summarize
55 | results = {
56 | 'all_accuracy': all_accuracy, 'mean_accuracy': mean_accuracy, 'mean_iou': mean_iou,
57 | 'class_iou': class_iou, 'class_accuracy': class_accuracy
58 | }
59 | # return
60 | return results
--------------------------------------------------------------------------------
/csseg/modules/datasets/voc.py:
--------------------------------------------------------------------------------
1 | '''
2 | Function:
3 | Implementation of VOCDataset
4 | Author:
5 | Zhenchao Jin
6 | '''
7 | import os
8 | import pandas as pd
9 | from .base import _BaseDataset, BaseDataset
10 |
11 |
12 | '''_VOCDataset'''
13 | class _VOCDataset(_BaseDataset):
14 | num_classes = 21
15 | classnames = [
16 | '__background__', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus',
17 | 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
18 | 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'
19 | ]
20 | assert num_classes == len(classnames)
21 | def __init__(self, mode, dataset_cfg):
22 | super(_VOCDataset, self).__init__(mode=mode, dataset_cfg=dataset_cfg)
23 | # set directory
24 | rootdir = dataset_cfg['rootdir']
25 | self.image_dir = os.path.join(rootdir, 'JPEGImages')
26 | self.ann_dir = os.path.join(rootdir, 'SegmentationClass')
27 | # obatin imageids
28 | set_dir = os.path.join(rootdir, 'ImageSets', 'Segmentation')
29 | df = pd.read_csv(os.path.join(set_dir, dataset_cfg['set']+'.txt'), names=['imageids'])
30 | self.imageids = df['imageids'].values
31 | self.imageids = [str(_id) for _id in self.imageids]
32 |
33 |
34 | '''VOCDataset'''
35 | class VOCDataset(BaseDataset):
36 | tasks = {
37 | 'offline': {
38 | 0: list(range(21)),
39 | },
40 | '19-1': {
41 | 0: list(range(20)), 1: [20],
42 | },
43 | '15-5': {
44 | 0: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
45 | 1: [16, 17, 18, 19, 20]
46 | },
47 | '15-5s': {
48 | 0: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
49 | 1: [16], 2: [17], 3: [18], 4: [19], 5: [20]
50 | },
51 | '15-5s_b': {
52 | 0: [0, 12, 9, 20, 7, 15, 8, 14, 16, 5, 19, 4, 1, 13, 2, 11],
53 | 1: [17], 2: [3], 3: [6], 4: [18], 5: [10]
54 | },
55 | '15-5s_c': {
56 | 0: [0, 13, 19, 15, 17, 9, 8, 5, 20, 4, 3, 10, 11, 18, 16, 7],
57 | 1: [12], 2: [14], 3: [6], 4: [1], 5: [2]
58 | },
59 | '15-5s_d': {
60 | 0: [0, 15, 3, 2, 12, 14, 18, 20, 16, 11, 1, 19, 8, 10, 7, 17],
61 | 1: [6], 2: [5], 3: [13], 4: [9], 5: [4]
62 | },
63 | '15-5s_e': {
64 | 0: [0, 7, 5, 3, 9, 13, 12, 14, 19, 10, 2, 1, 4, 16, 8, 17],
65 | 1: [15], 2: [18], 3: [6], 4: [11], 5: [20]
66 | },
67 | '15-5s_f': {
68 | 0: [0, 7, 13, 5, 11, 9, 2, 15, 12, 14, 3, 20, 1, 16, 4, 18],
69 | 1: [8], 2: [6], 3: [10], 4: [19], 5: [17]
70 | },
71 | '15-5s_g': {
72 | 0: [0, 7, 5, 9, 1, 15, 18, 14, 3, 20, 10, 4, 19, 11, 17, 16],
73 | 1: [12], 2: [8], 3: [6], 4: [2], 5: [13]
74 | },
75 | '15-5s_h': {
76 | 0: [0, 12, 9, 19, 6, 4, 10, 5, 18, 14, 15, 16, 3, 8, 7, 11],
77 | 1: [13], 2: [2], 3: [20], 4: [17], 5: [1]
78 | },
79 | '15-5s_i': {
80 | 0: [0, 13, 10, 15, 8, 7, 19, 4, 3, 16, 12, 14, 11, 5, 20, 6],
81 | 1: [2], 2: [18], 3: [9], 4: [17], 5: [1]
82 | },
83 | '15-5s_j': {
84 | 0: [0, 1, 14, 9, 5, 2, 15, 8, 20, 6, 16, 18, 7, 11, 10, 19],
85 | 1: [3], 2: [4], 3: [17], 4: [12], 5: [13]
86 | },
87 | '15-5s_k': {
88 | 0: [0, 16, 13, 1, 11, 12, 18, 6, 14, 5, 3, 7, 9, 20, 19, 15],
89 | 1: [4], 2: [2], 3: [10], 4: [8], 5: [17]
90 | },
91 | '15-5s_l': {
92 | 0: [0, 10, 7, 6, 19, 16, 8, 17, 1, 14, 4, 9, 3, 15, 11, 12],
93 | 1: [2], 2: [18], 3: [20], 4: [13], 5: [5]
94 | },
95 | '15-5s_m': {
96 | 0: [0, 18, 4, 14, 17, 12, 10, 7, 3, 9, 1, 8, 15, 6, 13, 2],
97 | 1: [5], 2: [11], 3: [20], 4: [16], 5: [19]
98 | },
99 | '15-5s_n': {
100 | 0: [0, 5, 4, 13, 18, 14, 10, 19, 15, 7, 9, 3, 2, 8, 16, 20],
101 | 1: [1], 2: [12], 3: [11], 4: [6], 5: [17]
102 | },
103 | '15-5s_o': {
104 | 0: [0, 9, 12, 13, 18, 7, 1, 15, 17, 10, 8, 4, 5, 20, 16, 6],
105 | 1: [14], 2: [19], 3: [11], 4: [2], 5: [3]
106 | },
107 | '15-5s_p': {
108 | 0: [0, 9, 12, 13, 18, 2, 11, 15, 17, 10, 8, 4, 5, 20, 16, 6],
109 | 1: [14], 2: [19], 3: [1], 4: [7], 5: [3]
110 | },
111 | '15-5s_q': {
112 | 0: [0, 3, 14, 13, 18, 2, 11, 15, 17, 10, 8, 4, 5, 20, 16, 6],
113 | 1: [12], 2: [19], 3: [1], 4: [7], 5: [9]
114 | },
115 | '15-5s_r': {
116 | 0: [0, 3, 14, 13, 1, 2, 11, 15, 17, 7, 8, 4, 5, 9, 16, 19],
117 | 1: [12], 2: [6], 3: [18], 4: [10], 5: [20]
118 | },
119 | '15-5s_s': {
120 | 0: [0, 3, 14, 6, 1, 2, 11, 12, 17, 7, 20, 4, 5, 9, 16, 19],
121 | 1: [15], 2: [13], 3: [18], 4: [10], 5: [8]
122 | },
123 | '15-5s_t': {
124 | 0: [0, 3, 15, 13, 1, 2, 11, 18, 17, 7, 20, 8, 5, 9, 16, 19],
125 | 1: [14], 2: [6], 3: [12], 4: [10], 5: [4]
126 | },
127 | '15-5s_u': {
128 | 0: [0, 3, 15, 13, 14, 6, 11, 18, 17, 7, 20, 8, 4, 9, 16, 10],
129 | 1: [1], 2: [2], 3: [12], 4: [19], 5: [5]
130 | },
131 | '15-5s_v': {
132 | 0: [0, 1, 2, 12, 14, 6, 19, 18, 17, 5, 20, 8, 4, 9, 16, 10],
133 | 1: [3], 2: [15], 3: [13], 4: [11], 5: [7]
134 | },
135 | '15-5s_w': {
136 | 0: [0, 1, 2, 12, 14, 13, 19, 18, 7, 11, 20, 8, 4, 9, 16, 10],
137 | 1: [3], 2: [15], 3: [6], 4: [5], 5: [17]
138 | },
139 | '10-1': {
140 | 0: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
141 | 1: [11], 2: [12], 3: [13], 4: [14], 5: [15], 6: [16], 7: [17], 8: [18], 9: [19], 10: [20]
142 | },
143 | }
144 | def __init__(self, mode, task_name, task_id, dataset_cfg):
145 | super(VOCDataset, self).__init__(
146 | mode=mode, task_name=task_name, task_id=task_id, dataset_cfg=dataset_cfg
147 | )
148 | '''builddatagenerator'''
149 | def builddatagenerator(self, mode, dataset_cfg):
150 | data_generator = _VOCDataset(mode, dataset_cfg)
151 | return data_generator
--------------------------------------------------------------------------------
/csseg/modules/models/__init__.py:
--------------------------------------------------------------------------------
1 | '''initialize'''
2 | from .losses import BuildLoss, LossBuilder
3 | from .decoders import BuildDecoder, DecoderBuilder
4 | from .segmentors import BuildSegmentor, SegmentorBuilder
5 | from .schedulers import BuildScheduler, SchedulerBuilder
6 | from .optimizers import BuildOptimizer, OptimizerBuilder, ParamsConstructorBuilder, BuildParamsConstructor
7 | from .encoders import (
8 | BuildEncoder, EncoderBuilder, BuildActivation, ActivationBuilder, BuildNormalization, NormalizationBuilder
9 | )
--------------------------------------------------------------------------------
/csseg/modules/models/decoders/__init__.py:
--------------------------------------------------------------------------------
1 | '''initialize'''
2 | from .builder import BuildDecoder, DecoderBuilder
--------------------------------------------------------------------------------
/csseg/modules/models/decoders/aspphead.py:
--------------------------------------------------------------------------------
1 | '''
2 | Function:
3 | Implementation of ASPPHead
4 | Author:
5 | Zhenchao Jin
6 | '''
7 | import torch
8 | import torch.nn as nn
9 | import torch.nn.functional as F
10 | from ..encoders import BuildNormalization, BuildActivation
11 |
12 |
13 | '''ASPPHead'''
14 | class ASPPHead(nn.Module):
15 | def __init__(self, in_channels, feats_channels, out_channels, dilations, pooling_size=32, norm_cfg=None, act_cfg=None):
16 | super(ASPPHead, self).__init__()
17 | # set attributes
18 | self.in_channels = in_channels
19 | self.feats_channels = feats_channels
20 | self.out_channels = out_channels
21 | self.dilations = dilations
22 | self.pooling_size = (pooling_size, pooling_size) if isinstance(pooling_size, int) else pooling_size
23 | self.norm_cfg = norm_cfg
24 | self.act_cfg = act_cfg
25 | # parallel convolutions
26 | self.parallel_convs = nn.ModuleList()
27 | for _, dilation in enumerate(dilations):
28 | if dilation == 1:
29 | conv = nn.Conv2d(in_channels, feats_channels, kernel_size=1, stride=1, padding=0, dilation=dilation, bias=False)
30 | else:
31 | conv = nn.Conv2d(in_channels, feats_channels, kernel_size=3, stride=1, padding=dilation, dilation=dilation, bias=False)
32 | self.parallel_convs.append(conv)
33 | self.parallel_bn = BuildNormalization(placeholder=feats_channels * len(dilations), norm_cfg=norm_cfg)
34 | self.parallel_act = BuildActivation(act_cfg)
35 | # global branch
36 | self.global_branch = nn.Sequential(
37 | nn.Conv2d(in_channels, feats_channels, kernel_size=1, stride=1, padding=0, bias=False),
38 | BuildNormalization(placeholder=feats_channels, norm_cfg=norm_cfg),
39 | BuildActivation(act_cfg),
40 | nn.Conv2d(feats_channels, feats_channels, kernel_size=1, stride=1, padding=0, bias=False),
41 | )
42 | # output project
43 | self.out_project = nn.Sequential(
44 | nn.Conv2d(feats_channels * len(dilations), out_channels, kernel_size=1, stride=1, padding=0, bias=False),
45 | BuildNormalization(placeholder=out_channels, norm_cfg=norm_cfg),
46 | BuildActivation(act_cfg),
47 | )
48 | '''forward'''
49 | def forward(self, x):
50 | # feed to parallel convolutions
51 | outputs = torch.cat([conv(x) for conv in self.parallel_convs], dim=1)
52 | outputs = self.parallel_bn(outputs)
53 | outputs = self.parallel_act(outputs)
54 | outputs = self.out_project[0](outputs)
55 | # feed to global branch
56 | global_feats = self.globalpooling(x)
57 | global_feats = self.global_branch(global_feats)
58 | if self.training or self.pooling_size is None:
59 | global_feats = global_feats.repeat(1, 1, x.size(2), x.size(3))
60 | # shortcut
61 | outputs = outputs + global_feats
62 | outputs = self.out_project[1:](outputs)
63 | # return
64 | return outputs
65 | '''globalpooling'''
66 | def globalpooling(self, x):
67 | if self.training or self.pooling_size is None:
68 | global_feats = x.view(x.size(0), x.size(1), -1).mean(dim=-1)
69 | global_feats = global_feats.view(x.size(0), x.size(1), 1, 1)
70 | else:
71 | pooling_size = (min(self.pooling_size[0], x.shape[2]), min(self.pooling_size[1], x.shape[3]))
72 | padding = (
73 | (pooling_size[1] - 1) // 2, (pooling_size[1] - 1) // 2 if pooling_size[1] % 2 == 1 else (pooling_size[1] - 1) // 2 + 1,
74 | (pooling_size[0] - 1) // 2, (pooling_size[0] - 1) // 2 if pooling_size[0] % 2 == 1 else (pooling_size[0] - 1) // 2 + 1,
75 | )
76 | global_feats = F.avg_pool2d(x, pooling_size, stride=1)
77 | global_feats = F.pad(global_feats, pad=padding, mode='replicate')
78 | return global_feats
--------------------------------------------------------------------------------
/csseg/modules/models/decoders/builder.py:
--------------------------------------------------------------------------------
1 | '''
2 | Function:
3 | Implementation of BuildDecoder
4 | Author:
5 | Zhenchao Jin
6 | '''
7 | from .aspphead import ASPPHead
8 | from .rcilaspphead import RCILASPPHead
9 | from ...utils import BaseModuleBuilder
10 |
11 |
12 | '''DecoderBuilder'''
13 | class DecoderBuilder(BaseModuleBuilder):
14 | REGISTERED_MODULES = {
15 | 'ASPPHead': ASPPHead, 'RCILASPPHead': RCILASPPHead,
16 | }
17 | '''build'''
18 | def build(self, decoder_cfg):
19 | return super().build(decoder_cfg)
20 |
21 |
22 | '''BuildDecoder'''
23 | BuildDecoder = DecoderBuilder().build
--------------------------------------------------------------------------------
/csseg/modules/models/decoders/rcilaspphead.py:
--------------------------------------------------------------------------------
1 | '''
2 | Function:
3 | Implementation of RCILASPPHead
4 | Author:
5 | Zhenchao Jin
6 | '''
7 | import torch
8 | import torch.nn as nn
9 | import torch.nn.functional as F
10 | from ..encoders import BuildNormalization
11 |
12 |
13 | '''RCILASPPHead'''
14 | class RCILASPPHead(nn.Module):
15 | def __init__(self, in_channels, feats_channels, out_channels, dilations, pooling_size=32, norm_cfg=None):
16 | super(RCILASPPHead, self).__init__()
17 | # assert
18 | assert norm_cfg['type'] in ['ABN', 'InPlaceABN', 'InPlaceABNSync']
19 | assert self.bottleneck_bn.activation_param == 1.0
20 | # set attributes
21 | self.in_channels = in_channels
22 | self.feats_channels = feats_channels
23 | self.out_channels = out_channels
24 | self.pooling_size = (pooling_size, pooling_size) if isinstance(pooling_size, int) else pooling_size
25 | # parallel convolutions
26 | self.parallel_convs_branch1 = nn.ModuleList()
27 | self.parallel_convs_branch2 = nn.ModuleList()
28 | for idx, dilation in enumerate(dilations):
29 | if dilation == 1:
30 | conv_branch1 = nn.Conv2d(in_channels, feats_channels, kernel_size=1, stride=1, padding=0, dilation=dilation, bias=False)
31 | conv_branch2 = nn.Conv2d(in_channels, feats_channels, kernel_size=1, stride=1, padding=0, dilation=dilation, bias=False)
32 | else:
33 | conv_branch1 = nn.Conv2d(in_channels, feats_channels, kernel_size=3, stride=1, padding=dilation, dilation=dilation, bias=False)
34 | conv_branch2 = nn.Conv2d(in_channels, feats_channels, kernel_size=3, stride=1, padding=dilation, dilation=dilation, bias=False)
35 | self.parallel_convs_branch1.append(conv_branch1)
36 | self.parallel_convs_branch2.append(conv_branch2)
37 | self.parallel_bn_branch1 = BuildNormalization(placeholder=feats_channels * len(dilations), norm_cfg=norm_cfg)
38 | self.parallel_bn_branch2 = BuildNormalization(placeholder=feats_channels * len(dilations), norm_cfg=norm_cfg)
39 | # global branch
40 | self.global_branch = nn.Sequential(
41 | nn.Conv2d(in_channels, feats_channels, kernel_size=1, stride=1, padding=0, bias=False),
42 | BuildNormalization(placeholder=feats_channels, norm_cfg=norm_cfg),
43 | nn.LeakyReLU(0.01),
44 | nn.Conv2d(feats_channels, feats_channels, kernel_size=1, stride=1, padding=0, bias=False),
45 | )
46 | # output project
47 | self.bottleneck_conv = nn.Conv2d(feats_channels * len(dilations), out_channels, kernel_size=1, stride=1, padding=0, bias=False)
48 | self.bottleneck_bn = BuildNormalization(placeholder=out_channels, norm_cfg=norm_cfg)
49 | # initialize parameters
50 | self.initparams(self.bottleneck_bn.activation, self.bottleneck_bn.activation_param)
51 | '''initparams'''
52 | def initparams(self, nonlinearity, param=None):
53 | gain = nn.init.calculate_gain(nonlinearity, param)
54 | for module in self.modules():
55 | if isinstance(module, nn.Conv2d):
56 | nn.init.xavier_normal_(module.weight.data, gain)
57 | if hasattr(module, 'bias') and module.bias is not None:
58 | nn.init.constant_(module.bias, 0)
59 | elif isinstance(module, nn.BatchNorm2d):
60 | if hasattr(module, 'weight') and module.weight is not None:
61 | nn.init.constant_(module.weight, 1)
62 | if hasattr(module, 'bias') and module.bias is not None:
63 | nn.init.constant_(module.bias, 0)
64 | '''forward'''
65 | def forward(self, x):
66 | # feed to parallel convolutions branch1 and branch2
67 | outputs_branch1 = torch.cat([conv(x) for conv in self.parallel_convs_branch1], dim=1)
68 | outputs_branch1 = self.parallel_bn_branch1(outputs_branch1)
69 | outputs_branch2 = torch.cat([conv(x) for conv in self.parallel_convs_branch2], dim=1)
70 | outputs_branch2 = self.parallel_bn_branch2(outputs_branch2)
71 | # merge
72 | r = torch.rand(1, outputs_branch1.shape[1], 1, 1, dtype=torch.float32)
73 | if not self.training: r[:, :, :, :] = 1.0
74 | weight_branch1, weight_branch2 = torch.zeros_like(r), torch.zeros_like(r)
75 | weight_branch1[r < 0.33] = 2.
76 | weight_branch1[(r < 0.66) & (r >= 0.33)] = 0.
77 | weight_branch1[r >= 0.66] = 1.
78 | weight_branch2[r < 0.33] = 0.
79 | weight_branch2[(r < 0.66) & (r >= 0.33)] = 2.
80 | weight_branch2[r >= 0.66] = 1.
81 | outputs = outputs_branch1 * weight_branch1.type_as(outputs_branch1) * 0.5 + outputs_branch2 * weight_branch2.type_as(outputs_branch2) * 0.5
82 | outputs = F.leaky_relu(outputs, negative_slope=0.01)
83 | outputs = self.bottleneck_conv(outputs)
84 | # feed to global branch
85 | global_feats = self.globalpooling(x)
86 | global_feats = self.global_branch(global_feats)
87 | if self.training or self.pooling_size is None:
88 | global_feats = global_feats.repeat(1, 1, x.size(2), x.size(3))
89 | # shortcut
90 | outputs = outputs + global_feats
91 | outputs = self.bottleneck_bn(outputs)
92 | outputs = F.leaky_relu(outputs, negative_slope=0.01)
93 | # return
94 | return outputs
95 | '''globalpooling'''
96 | def globalpooling(self, x):
97 | if self.training or self.pooling_size is None:
98 | global_feats = x.view(x.size(0), x.size(1), -1).mean(dim=-1)
99 | global_feats = global_feats.view(x.size(0), x.size(1), 1, 1)
100 | else:
101 | pooling_size = (min(self.pooling_size[0], x.shape[2]), min(self.pooling_size[1], x.shape[3]))
102 | padding = (
103 | (pooling_size[1] - 1) // 2, (pooling_size[1] - 1) // 2 if pooling_size[1] % 2 == 1 else (pooling_size[1] - 1) // 2 + 1,
104 | (pooling_size[0] - 1) // 2, (pooling_size[0] - 1) // 2 if pooling_size[0] % 2 == 1 else (pooling_size[0] - 1) // 2 + 1,
105 | )
106 | global_feats = F.avg_pool2d(x, pooling_size, stride=1)
107 | global_feats = F.pad(global_feats, pad=padding, mode='replicate')
108 | return global_feats
--------------------------------------------------------------------------------
/csseg/modules/models/encoders/__init__.py:
--------------------------------------------------------------------------------
1 | '''initialize'''
2 | from .builder import BuildEncoder, EncoderBuilder
3 | from .bricks import (
4 | NormalizationBuilder, BuildNormalization, ActivationBuilder, BuildActivation
5 | )
--------------------------------------------------------------------------------
/csseg/modules/models/encoders/bricks/__init__.py:
--------------------------------------------------------------------------------
1 | '''initialize'''
2 | from .activation import BuildActivation, ActivationBuilder
3 | from .normalization import BuildNormalization, NormalizationBuilder
--------------------------------------------------------------------------------
/csseg/modules/models/encoders/bricks/activation/__init__.py:
--------------------------------------------------------------------------------
1 | '''initialize'''
2 | from .builder import BuildActivation, ActivationBuilder
--------------------------------------------------------------------------------
/csseg/modules/models/encoders/bricks/activation/builder.py:
--------------------------------------------------------------------------------
1 | '''
2 | Function:
3 | Implementation of ActivationBuilder and BuildActivation
4 | Author:
5 | Zhenchao Jin
6 | '''
7 | import torch.nn as nn
8 | from .....utils import BaseModuleBuilder
9 |
10 |
11 | '''ActivationBuilder'''
12 | class ActivationBuilder(BaseModuleBuilder):
13 | REGISTERED_MODULES = {
14 | 'ReLU': nn.ReLU, 'GELU': nn.GELU, 'ReLU6': nn.ReLU6, 'PReLU': nn.PReLU, 'Sigmoid': nn.Sigmoid, 'LeakyReLU': nn.LeakyReLU,
15 | }
16 | for act_type in ['ELU', 'Hardshrink', 'Hardtanh', 'LogSigmoid', 'RReLU', 'SELU', 'CELU', 'SiLU', 'GLU',
17 | 'Mish', 'Softplus', 'Softshrink', 'Softsign', 'Tanh', 'Tanhshrink', 'Threshold']:
18 | if hasattr(nn, act_type):
19 | REGISTERED_MODULES[act_type] = getattr(nn, act_type)
20 | '''build'''
21 | def build(self, act_cfg):
22 | if act_cfg is None: return nn.Identity()
23 | return super().build(act_cfg)
24 |
25 |
26 | '''BuildActivation'''
27 | BuildActivation = ActivationBuilder().build
--------------------------------------------------------------------------------
/csseg/modules/models/encoders/bricks/normalization/__init__.py:
--------------------------------------------------------------------------------
1 | '''initialize'''
2 | from .builder import BuildNormalization, NormalizationBuilder
--------------------------------------------------------------------------------
/csseg/modules/models/encoders/bricks/normalization/builder.py:
--------------------------------------------------------------------------------
1 | '''
2 | Function:
3 | Implementation of NormalizationBuilder and BuildNormalization
4 | Author:
5 | Zhenchao Jin
6 | '''
7 | import copy
8 | import torch.nn as nn
9 | import torch.distributed as dist
10 | from .....utils import BaseModuleBuilder
11 | from inplace_abn import ABN, InPlaceABN, InPlaceABNSync
12 |
13 |
14 | '''NormalizationBuilder'''
15 | class NormalizationBuilder(BaseModuleBuilder):
16 | REGISTERED_MODULES = {
17 | 'ABN': ABN, 'InPlaceABN': InPlaceABN, 'InPlaceABNSync': InPlaceABNSync, 'GroupNorm': nn.GroupNorm, 'LayerNorm': nn.LayerNorm,
18 | 'BatchNorm1d': nn.BatchNorm1d, 'BatchNorm2d': nn.BatchNorm2d, 'BatchNorm3d': nn.BatchNorm3d, 'SyncBatchNorm': nn.SyncBatchNorm,
19 | 'InstanceNorm1d': nn.InstanceNorm1d, 'InstanceNorm2d': nn.InstanceNorm2d, 'InstanceNorm3d': nn.InstanceNorm3d,
20 | }
21 | for norm_type in ['LazyBatchNorm1d', 'LazyBatchNorm2d', 'LazyBatchNorm3d', 'LazyInstanceNorm1d', 'LazyInstanceNorm2d', 'LazyInstanceNorm3d']:
22 | if hasattr(nn, norm_type):
23 | REGISTERED_MODULES[norm_type] = getattr(nn, norm_type)
24 | '''build'''
25 | def build(self, placeholder, norm_cfg):
26 | if norm_cfg is None: return nn.Identity()
27 | norm_cfg = copy.deepcopy(norm_cfg)
28 | norm_type = norm_cfg.pop('type')
29 | if norm_type in ['GroupNorm']:
30 | normalization = self.REGISTERED_MODULES[norm_type](num_channels=placeholder, **norm_cfg)
31 | elif norm_type in ['InPlaceABNSync']:
32 | norm_cfg['group'] = dist.group.WORLD
33 | normalization = self.REGISTERED_MODULES[norm_type](placeholder, **norm_cfg)
34 | else:
35 | normalization = self.REGISTERED_MODULES[norm_type](placeholder, **norm_cfg)
36 | return normalization
37 | '''isnorm'''
38 | @staticmethod
39 | def isnorm(module, norm_list=None):
40 | if norm_list is None:
41 | norm_list = (
42 | nn.GroupNorm, nn.LayerNorm, nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d,
43 | nn.InstanceNorm1d, nn.InstanceNorm2d, nn.InstanceNorm3d, nn.SyncBatchNorm,
44 | ABN, InPlaceABN, InPlaceABNSync,
45 | )
46 | return isinstance(module, norm_list)
47 |
48 |
49 | '''BuildNormalization'''
50 | BuildNormalization = NormalizationBuilder().build
--------------------------------------------------------------------------------
/csseg/modules/models/encoders/builder.py:
--------------------------------------------------------------------------------
1 | '''
2 | Function:
3 | Implementation of EncoderBuilder and BuildEncoder
4 | Author:
5 | Zhenchao Jin
6 | '''
7 | from .resnet import ResNet
8 | from .resnetilt import ResNetILT
9 | from .resnetplop import ResNetPLOP
10 | from .resnetrcil import ResNetRCIL
11 | from ...utils import BaseModuleBuilder
12 |
13 |
14 | '''EncoderBuilder'''
15 | class EncoderBuilder(BaseModuleBuilder):
16 | REGISTERED_MODULES = {
17 | 'ResNet': ResNet, 'ResNetILT': ResNetILT, 'ResNetPLOP': ResNetPLOP, 'ResNetRCIL': ResNetRCIL,
18 | }
19 | '''build'''
20 | def build(self, encoder_cfg):
21 | return super().build(encoder_cfg)
22 |
23 |
24 | '''BuildEncoder'''
25 | BuildEncoder = EncoderBuilder().build
--------------------------------------------------------------------------------
/csseg/modules/models/encoders/resnetilt.py:
--------------------------------------------------------------------------------
1 | '''
2 | Function:
3 | Implementation of ResNetILT
4 | Author:
5 | Zhenchao Jin
6 | '''
7 | from .resnet import ResNet, BasicBlock, Bottleneck
8 |
9 |
10 | '''BasicBlockILT'''
11 | class BasicBlockILT(BasicBlock):
12 | expansion = 1
13 | def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, norm_cfg=None, act_cfg=None, shortcut_norm_cfg=None, shortcut_act_cfg=None):
14 | super(BasicBlockILT, self).__init__(
15 | inplanes=inplanes, planes=planes, stride=stride, dilation=dilation, downsample=downsample,
16 | norm_cfg=norm_cfg, act_cfg=act_cfg, shortcut_norm_cfg=shortcut_norm_cfg, shortcut_act_cfg=shortcut_act_cfg,
17 | )
18 |
19 |
20 | '''BottleneckILT'''
21 | class BottleneckILT(Bottleneck):
22 | expansion = 4
23 | def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, norm_cfg=None, act_cfg=None, shortcut_norm_cfg=None, shortcut_act_cfg=None):
24 | super(BottleneckILT, self).__init__(
25 | inplanes=inplanes, planes=planes, stride=stride, dilation=dilation, downsample=downsample,
26 | norm_cfg=norm_cfg, act_cfg=act_cfg, shortcut_norm_cfg=shortcut_norm_cfg, shortcut_act_cfg=shortcut_act_cfg,
27 | )
28 |
29 |
30 | '''ResNetILT'''
31 | class ResNetILT(ResNet):
32 | def __init__(self, structure_type, in_channels=3, base_channels=64, stem_channels=64, depth=101, outstride=16, contract_dilation=False, deep_stem=False,
33 | out_indices=(3,), use_avg_for_downsample=False, norm_cfg={'type': 'InPlaceABNSync', 'activation': 'leaky_relu', 'activation_param': 0.01}, act_cfg=None,
34 | shortcut_norm_cfg=None, shortcut_act_cfg=None, pretrained=True, pretrained_model_path=None, user_defined_block=None, use_inplaceabn_style=True):
35 | if user_defined_block is None:
36 | user_defined_block = BasicBlockILT if depth in [18, 34] else BottleneckILT
37 | super(ResNetILT, self).__init__(
38 | in_channels=in_channels, base_channels=base_channels, stem_channels=stem_channels, depth=depth, outstride=outstride, contract_dilation=contract_dilation,
39 | deep_stem=deep_stem, out_indices=out_indices, use_avg_for_downsample=use_avg_for_downsample, norm_cfg=norm_cfg, act_cfg=act_cfg, shortcut_norm_cfg=shortcut_norm_cfg,
40 | shortcut_act_cfg=shortcut_act_cfg, pretrained=pretrained, pretrained_model_path=pretrained_model_path, user_defined_block=user_defined_block,
41 | use_inplaceabn_style=use_inplaceabn_style, structure_type=structure_type,
42 | )
--------------------------------------------------------------------------------
/csseg/modules/models/encoders/resnetplop.py:
--------------------------------------------------------------------------------
1 | '''
2 | Function:
3 | Implementation of ResNetPLOP
4 | Author:
5 | Zhenchao Jin
6 | '''
7 | from .resnet import ResNet, BasicBlock, Bottleneck
8 |
9 |
10 | '''BasicBlockPLOP'''
11 | class BasicBlockPLOP(BasicBlock):
12 | expansion = 1
13 | def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, norm_cfg=None, act_cfg=None, shortcut_norm_cfg=None, shortcut_act_cfg=None):
14 | super(BasicBlockPLOP, self).__init__(
15 | inplanes=inplanes, planes=planes, stride=stride, dilation=dilation, downsample=downsample,
16 | norm_cfg=norm_cfg, act_cfg=act_cfg, shortcut_norm_cfg=shortcut_norm_cfg, shortcut_act_cfg=shortcut_act_cfg,
17 | )
18 | '''forward'''
19 | def forward(self, x):
20 | if isinstance(x, tuple): x = x[0]
21 | identity = x
22 | out = self.conv1(x)
23 | out = self.bn1(out)
24 | out = self.relu(out)
25 | out = self.conv2(out)
26 | out = self.bn2(out)
27 | if self.downsample is not None: identity = self.downsample(x)
28 | out = out + identity
29 | distillation = out
30 | out = self.shortcut_relu(out)
31 | return out, distillation
32 |
33 |
34 | '''BottleneckPLOP'''
35 | class BottleneckPLOP(Bottleneck):
36 | expansion = 4
37 | def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, norm_cfg=None, act_cfg=None, shortcut_norm_cfg=None, shortcut_act_cfg=None):
38 | super(BottleneckPLOP, self).__init__(
39 | inplanes=inplanes, planes=planes, stride=stride, dilation=dilation, downsample=downsample,
40 | norm_cfg=norm_cfg, act_cfg=act_cfg, shortcut_norm_cfg=shortcut_norm_cfg, shortcut_act_cfg=shortcut_act_cfg,
41 | )
42 | '''forward'''
43 | def forward(self, x):
44 | if isinstance(x, tuple): x = x[0]
45 | identity = x
46 | out = self.conv1(x)
47 | out = self.bn1(out)
48 | out = self.relu(out)
49 | out = self.conv2(out)
50 | out = self.bn2(out)
51 | out = self.relu(out)
52 | out = self.conv3(out)
53 | out = self.bn3(out)
54 | if self.downsample is not None: identity = self.downsample(x)
55 | out = out + identity
56 | distillation = out
57 | out = self.shortcut_relu(out)
58 | return out, distillation
59 |
60 |
61 | '''ResNetPLOP'''
62 | class ResNetPLOP(ResNet):
63 | def __init__(self, structure_type, in_channels=3, base_channels=64, stem_channels=64, depth=101, outstride=16, contract_dilation=False, deep_stem=False,
64 | out_indices=(0, 1, 2, 3), use_avg_for_downsample=False, norm_cfg={'type': 'InPlaceABNSync', 'activation': 'leaky_relu', 'activation_param': 0.01},
65 | act_cfg=None, shortcut_norm_cfg=None, shortcut_act_cfg=None, pretrained=True, pretrained_model_path=None, user_defined_block=None,
66 | use_inplaceabn_style=True):
67 | if user_defined_block is None:
68 | user_defined_block = BasicBlockPLOP if depth in [18, 34] else BottleneckPLOP
69 | super(ResNetPLOP, self).__init__(
70 | in_channels=in_channels, base_channels=base_channels, stem_channels=stem_channels, depth=depth, outstride=outstride, contract_dilation=contract_dilation,
71 | deep_stem=deep_stem, out_indices=out_indices, use_avg_for_downsample=use_avg_for_downsample, norm_cfg=norm_cfg, act_cfg=act_cfg, shortcut_norm_cfg=shortcut_norm_cfg,
72 | shortcut_act_cfg=shortcut_act_cfg, pretrained=pretrained, pretrained_model_path=pretrained_model_path, user_defined_block=user_defined_block,
73 | use_inplaceabn_style=use_inplaceabn_style, structure_type=structure_type,
74 | )
75 | '''forward'''
76 | def forward(self, x):
77 | outs, distillation_feats = [], []
78 | if self.deep_stem:
79 | x = self.stem(x)
80 | else:
81 | x = self.conv1(x)
82 | x = self.bn1(x)
83 | x = self.relu(x)
84 | x = self.maxpool(x)
85 | x1, distillation1 = self.layer1(x)
86 | x2, distillation2 = self.layer2(x1)
87 | x3, distillation3 = self.layer3(x2)
88 | x4, distillation4 = self.layer4(x3)
89 | for i, feats in enumerate([(x1, distillation1), (x2, distillation2), (x3, distillation3), (x4, distillation4)]):
90 | if i in self.out_indices:
91 | outs.append(feats[0])
92 | distillation_feats.append(feats[1])
93 | return tuple(outs), tuple(distillation_feats)
--------------------------------------------------------------------------------
/csseg/modules/models/losses/__init__.py:
--------------------------------------------------------------------------------
1 | '''initialize'''
2 | from .builder import BuildLoss, LossBuilder
--------------------------------------------------------------------------------
/csseg/modules/models/losses/builder.py:
--------------------------------------------------------------------------------
1 | '''
2 | Function:
3 | Implementation of BuildLoss and LossBuilder
4 | Author:
5 | Zhenchao Jin
6 | '''
7 | from .mseloss import MSELoss
8 | from .klloss import KLDivLoss
9 | from ...utils import BaseModuleBuilder
10 | from .csloss import CosineSimilarityLoss
11 | from .celoss import CrossEntropyLoss, MIBUnbiasedCrossEntropyLoss
12 |
13 |
14 | '''LossBuilder'''
15 | class LossBuilder(BaseModuleBuilder):
16 | REGISTERED_MODULES = {
17 | 'MSELoss': MSELoss, 'KLDivLoss': KLDivLoss, 'CrossEntropyLoss': CrossEntropyLoss, 'CosineSimilarityLoss': CosineSimilarityLoss,
18 | 'MIBUnbiasedCrossEntropyLoss': MIBUnbiasedCrossEntropyLoss,
19 | }
20 | '''build'''
21 | def build(self, loss_cfg):
22 | return super().build(loss_cfg)
23 |
24 |
25 | '''BuildLoss'''
26 | BuildLoss = LossBuilder().build
--------------------------------------------------------------------------------
/csseg/modules/models/losses/celoss.py:
--------------------------------------------------------------------------------
1 | '''
2 | Function:
3 | Implementation of CrossEntropyLoss
4 | Author:
5 | Zhenchao Jin
6 | '''
7 | import torch
8 | import torch.nn as nn
9 | import torch.nn.functional as F
10 |
11 |
12 | '''CrossEntropyLoss'''
13 | class CrossEntropyLoss(nn.Module):
14 | def __init__(self, reduction='mean', ignore_index=255, scale_factor=1.0, weight=None, label_smoothing=None):
15 | super(CrossEntropyLoss, self).__init__()
16 | self.weight = weight
17 | self.reduction = reduction
18 | self.scale_factor = scale_factor
19 | self.ignore_index = ignore_index
20 | self.label_smoothing = label_smoothing
21 | '''forward'''
22 | def forward(self, prediction, target):
23 | # construct config
24 | ce_args = {
25 | 'weight': self.weight, 'ignore_index': self.ignore_index, 'reduction': self.reduction,
26 | }
27 | if self.label_smoothing is not None:
28 | ce_args.update({'label_smoothing': self.label_smoothing})
29 | # calculate loss according to config
30 | if prediction.dim() == target.dim():
31 | ce_args.pop('ignore_index')
32 | loss = F.cross_entropy(prediction, target, **ce_args)
33 | else:
34 | loss = F.cross_entropy(prediction, target.long(), **ce_args)
35 | loss = loss * self.scale_factor
36 | # return
37 | return loss
38 |
39 |
40 | '''MIBUnbiasedCrossEntropyLoss'''
41 | class MIBUnbiasedCrossEntropyLoss(nn.Module):
42 | def __init__(self, num_history_known_classes=None, reduction='mean', ignore_index=255, scale_factor=1.0):
43 | super(MIBUnbiasedCrossEntropyLoss, self).__init__()
44 | self.reduction = reduction
45 | self.scale_factor = scale_factor
46 | self.ignore_index = ignore_index
47 | self.num_history_known_classes = num_history_known_classes
48 | '''forward'''
49 | def forward(self, prediction, target):
50 | # calculate loss according to config
51 | num_history_known_classes = self.num_history_known_classes
52 | outputs = torch.zeros_like(prediction)
53 | den = torch.logsumexp(prediction, dim=1)
54 | outputs[:, 0] = torch.logsumexp(prediction[:, :num_history_known_classes], dim=1) - den
55 | outputs[:, num_history_known_classes:] = prediction[:, num_history_known_classes:] - den.unsqueeze(dim=1)
56 | labels = target.clone()
57 | labels[target < num_history_known_classes] = 0
58 | loss = F.nll_loss(outputs, labels, ignore_index=self.ignore_index, reduction=self.reduction)
59 | loss = loss * self.scale_factor
60 | # return
61 | return loss
--------------------------------------------------------------------------------
/csseg/modules/models/losses/csloss.py:
--------------------------------------------------------------------------------
1 | '''
2 | Function:
3 | Implementation of CosineSimilarityLoss
4 | Author:
5 | Zhenchao Jin
6 | '''
7 | import torch.nn as nn
8 | import torch.nn.functional as F
9 |
10 |
11 | '''CosineSimilarityLoss'''
12 | class CosineSimilarityLoss(nn.Module):
13 | def __init__(self, scale_factor=1.0, reduction='mean'):
14 | super(CosineSimilarityLoss, self).__init__()
15 | self.reduction = reduction
16 | self.scale_factor = scale_factor
17 | '''forward'''
18 | def forward(self, prediction, target):
19 | # assert
20 | assert prediction.size() == target.size()
21 | # calculate loss according to config
22 | loss = 1 - F.cosine_similarity(prediction, target, dim=1)
23 | if self.reduction == 'mean':
24 | loss = loss.mean()
25 | elif self.reduction == 'sum':
26 | loss = loss.sum()
27 | loss = loss * self.scale_factor
28 | # return
29 | return loss
--------------------------------------------------------------------------------
/csseg/modules/models/losses/klloss.py:
--------------------------------------------------------------------------------
1 | '''
2 | Function:
3 | Implementation of KLDivLoss
4 | Author:
5 | Zhenchao Jin
6 | '''
7 | import torch.nn as nn
8 | import torch.nn.functional as F
9 |
10 |
11 | '''KLDivLoss'''
12 | class KLDivLoss(nn.Module):
13 | def __init__(self, log_target=None, reduction='batchmean', temperature=1.0, scale_factor=1.0):
14 | super(KLDivLoss, self).__init__()
15 | self.reduction = reduction
16 | self.log_target = log_target
17 | self.temperature = temperature
18 | self.scale_factor = scale_factor
19 | '''forward'''
20 | def forward(self, prediction, target):
21 | # assert
22 | assert prediction.size() == target.size()
23 | # construct config
24 | kl_args = {
25 | 'reduction': self.reduction,
26 | }
27 | if self.log_target is not None:
28 | kl_args.update({'log_target': self.log_target})
29 | # calculate loss according to config
30 | src_distribution = nn.LogSoftmax(dim=1)(prediction / self.temperature)
31 | tgt_distribution = nn.Softmax(dim=1)(target / self.temperature)
32 | loss = (self.temperature ** 2) * nn.KLDivLoss(**kl_args)(src_distribution, tgt_distribution)
33 | loss = loss * self.scale_factor
34 | # return
35 | return loss
--------------------------------------------------------------------------------
/csseg/modules/models/losses/mseloss.py:
--------------------------------------------------------------------------------
1 | '''
2 | Function:
3 | Implementation of MSELoss
4 | Author:
5 | Zhenchao Jin
6 | '''
7 | import torch.nn as nn
8 | import torch.nn.functional as F
9 |
10 |
11 | '''MSELoss'''
12 | class MSELoss(nn.Module):
13 | def __init__(self, scale_factor=1.0, reduction='mean'):
14 | super(MSELoss, self).__init__()
15 | self.reduction = reduction
16 | self.scale_factor = scale_factor
17 | '''forward'''
18 | def forward(self, prediction, target):
19 | # assert
20 | assert prediction.size() == target.size()
21 | # calculate loss according to config
22 | loss = F.mse_loss(prediction, target, reduction=self.reduction)
23 | loss = loss * self.scale_factor
24 | # return
25 | return loss
--------------------------------------------------------------------------------
/csseg/modules/models/optimizers/__init__.py:
--------------------------------------------------------------------------------
1 | '''initialize'''
2 | from .builder import BuildOptimizer, OptimizerBuilder
3 | from .paramsconstructor import ParamsConstructorBuilder, BuildParamsConstructor
--------------------------------------------------------------------------------
/csseg/modules/models/optimizers/builder.py:
--------------------------------------------------------------------------------
1 | '''
2 | Function:
3 | Implementation of BuildOptimizer and OptimizerBuilder
4 | Author:
5 | Zhenchao Jin
6 | '''
7 | import copy
8 | import torch.optim as optim
9 | from ...utils import BaseModuleBuilder
10 | from .paramsconstructor import BuildParamsConstructor
11 |
12 |
13 | '''OptimizerBuilder'''
14 | class OptimizerBuilder(BaseModuleBuilder):
15 | REGISTERED_MODULES = {
16 | 'SGD': optim.SGD, 'Adam': optim.Adam, 'AdamW': optim.AdamW, 'Adadelta': optim.Adadelta,
17 | 'Adagrad': optim.Adagrad, 'Rprop': optim.Rprop, 'RMSprop': optim.RMSprop,
18 | }
19 | '''build'''
20 | def build(self, model, optimizer_cfg):
21 | # parse config
22 | optimizer_cfg = copy.deepcopy(optimizer_cfg)
23 | optimizer_type = optimizer_cfg.pop('type')
24 | paramwise_cfg, filter_params = optimizer_cfg.pop('paramwise_cfg', {}), optimizer_cfg.pop('filter_params', False)
25 | # build params_constructor
26 | params_constructor = BuildParamsConstructor(paramwise_cfg=paramwise_cfg, filter_params=filter_params, optimizer_cfg=optimizer_cfg)
27 | # obtain params
28 | optimizer_cfg['params'] = params_constructor(model=model)
29 | # build optimizer
30 | optimizer = self.REGISTERED_MODULES[optimizer_type](**optimizer_cfg)
31 | # return
32 | return optimizer
33 |
34 |
35 | '''BuildOptimizer'''
36 | BuildOptimizer = OptimizerBuilder().build
--------------------------------------------------------------------------------
/csseg/modules/models/optimizers/paramsconstructor.py:
--------------------------------------------------------------------------------
1 | '''
2 | Function:
3 | Implementation of ParamsConstructors
4 | Author:
5 | Zhenchao Jin
6 | '''
7 | import copy
8 | from ...utils import BaseModuleBuilder
9 | from ..encoders import NormalizationBuilder
10 |
11 |
12 | '''DefaultParamsConstructor'''
13 | class DefaultParamsConstructor():
14 | def __init__(self, paramwise_cfg={}, filter_params=False, optimizer_cfg=None):
15 | self.paramwise_cfg = paramwise_cfg
16 | self.filter_params = filter_params
17 | self.optimizer_cfg = optimizer_cfg
18 | '''call'''
19 | def __call__(self, model):
20 | # fetch attributes
21 | paramwise_cfg, filter_params, optimizer_cfg = self.paramwise_cfg, self.filter_params, self.optimizer_cfg
22 | # without specific parameter rules
23 | if not paramwise_cfg:
24 | params = model.parameters() if not filter_params else filter(lambda p: p.requires_grad, model.parameters())
25 | return params
26 | # with specific parameter rules
27 | params = []
28 | self.groupparams(model, paramwise_cfg, filter_params, optimizer_cfg, params)
29 | return params
30 | '''groupparams'''
31 | def groupparams(self, model, paramwise_cfg, filter_params, optimizer_cfg, params, prefix=''):
32 | # fetch base_setting
33 | optimizer_cfg = copy.deepcopy(optimizer_cfg)
34 | if 'base_setting' in optimizer_cfg:
35 | base_setting = optimizer_cfg.pop('base_setting')
36 | else:
37 | base_setting = {
38 | 'bias_lr_multiplier': 1.0, 'bias_wd_multiplier': 1.0, 'norm_wd_multiplier': 1.0,
39 | 'lr_multiplier': 1.0, 'wd_multiplier': 1.0
40 | }
41 | # iter to group current parameters
42 | sorted_rule_keys = sorted(sorted(paramwise_cfg.keys()), key=len, reverse=True)
43 | for name, param in model.named_parameters(recurse=False):
44 | param_group = {'params': [param]}
45 | # --if `parameter requires gradient` is False
46 | if not param.requires_grad:
47 | if not filter_params:
48 | params.append(param_group)
49 | continue
50 | # --find parameters with specific rules
51 | set_base_setting = True
52 | for rule_key in sorted_rule_keys:
53 | if rule_key not in f'{prefix}.{name}': continue
54 | set_base_setting = False
55 | param_group['lr'] = paramwise_cfg[rule_key].get('lr_multiplier', 1.0) * optimizer_cfg['lr']
56 | param_group['name'] = f'{prefix}.{name}' if prefix else name
57 | param_group['rule_key'] = rule_key
58 | if 'weight_decay' in optimizer_cfg:
59 | param_group['weight_decay'] = paramwise_cfg[rule_key].get('wd_multiplier', 1.0) * optimizer_cfg['weight_decay']
60 | for k, v in paramwise_cfg[rule_key].items():
61 | assert k not in param_group, 'construct param_group error'
62 | param_group[k] = v
63 | params.append(param_group)
64 | break
65 | if not set_base_setting: continue
66 | # --set base setting
67 | param_group['lr'] = optimizer_cfg['lr']
68 | param_group['name'] = f'{prefix}.{name}' if prefix else name
69 | param_group['rule_key'] = 'base_setting'
70 | if name == 'bias' and (not NormalizationBuilder.isnorm(model)):
71 | param_group['lr'] = param_group['lr'] * base_setting.get('bias_lr_multiplier', 1.0)
72 | param_group['lr_multiplier'] = base_setting.get('bias_lr_multiplier', 1.0)
73 | else:
74 | param_group['lr'] = param_group['lr'] * base_setting.get('lr_multiplier', 1.0)
75 | param_group['lr_multiplier'] = base_setting.get('lr_multiplier', 1.0)
76 | if 'weight_decay' in optimizer_cfg:
77 | param_group['weight_decay'] = optimizer_cfg['weight_decay']
78 | if NormalizationBuilder.isnorm(model):
79 | param_group['weight_decay'] = param_group['weight_decay'] * base_setting.get('norm_wd_multiplier', 1.0)
80 | elif name == 'bias':
81 | param_group['weight_decay'] = param_group['weight_decay'] * base_setting.get('bias_wd_multiplier', 1.0)
82 | else:
83 | param_group['weight_decay'] = param_group['weight_decay'] * base_setting.get('wd_multiplier', 1.0)
84 | params.append(param_group)
85 | # iter to group children parameters
86 | for child_name, child_model in model.named_children():
87 | if prefix:
88 | child_prefix = f'{prefix}.{child_name}'
89 | else:
90 | child_prefix = child_name
91 | self.groupparams(child_model, paramwise_cfg, filter_params, optimizer_cfg, params, prefix=child_prefix)
92 | '''isin'''
93 | def isin(self, param_group, param_group_list):
94 | param = set(param_group['params'])
95 | param_set = set()
96 | for group in param_group_list:
97 | param_set.update(set(group['params']))
98 | return not param.isdisjoint(param_set)
99 |
100 |
101 | '''ParamsConstructorBuilder'''
102 | class ParamsConstructorBuilder(BaseModuleBuilder):
103 | REGISTERED_MODULES = {
104 | 'DefaultParamsConstructor': DefaultParamsConstructor,
105 | }
106 | '''build'''
107 | def build(self, paramwise_cfg={}, filter_params=False, optimizer_cfg={}):
108 | constructor_type = paramwise_cfg.pop('type', 'DefaultParamsConstructor')
109 | module_cfg = {
110 | 'paramwise_cfg': paramwise_cfg, 'filter_params': filter_params, 'optimizer_cfg': optimizer_cfg, 'type': constructor_type
111 | }
112 | return super().build(module_cfg)
113 |
114 |
115 | '''BuildParamsConstructor'''
116 | BuildParamsConstructor = ParamsConstructorBuilder().build
--------------------------------------------------------------------------------
/csseg/modules/models/schedulers/__init__.py:
--------------------------------------------------------------------------------
1 | '''initialize'''
2 | from .builder import BuildScheduler, SchedulerBuilder
--------------------------------------------------------------------------------
/csseg/modules/models/schedulers/base.py:
--------------------------------------------------------------------------------
1 | '''
2 | Function:
3 | Implementation of BaseScheduler
4 | Author:
5 | Zhenchao Jin
6 | '''
7 | from torch.nn.utils import clip_grad
8 |
9 |
10 | '''BaseScheduler'''
11 | class BaseScheduler():
12 | def __init__(self, optimizer=None, lr=0.01, min_lr=None, warmup_cfg=None, clipgrad_cfg=None, max_epochs=-1, iters_per_epoch=-1, paramwise_cfg=dict()):
13 | # set attrs
14 | self.lr = lr
15 | self.min_lr = min_lr if min_lr is not None else lr * 0.01
16 | self.optimizer = optimizer
17 | self.max_epochs = max_epochs
18 | self.warmup_cfg = warmup_cfg
19 | self.clipgrad_cfg = clipgrad_cfg
20 | self.paramwise_cfg = paramwise_cfg
21 | self.max_iters = max_epochs * iters_per_epoch
22 | # initialize some variables
23 | self.cur_epoch = 0
24 | self.cur_iter = 0
25 | '''step'''
26 | def step(self, grad_scaler=None):
27 | if self.clipgrad_cfg is not None:
28 | for param_group in self.optimizer.param_groups:
29 | self.clipgradients(params=param_group['params'], **self.clipgrad_cfg)
30 | if grad_scaler is None:
31 | self.optimizer.step()
32 | else:
33 | grad_scaler.step(self.optimizer)
34 | grad_scaler.update()
35 | self.cur_iter += 1
36 | '''updatelr'''
37 | def updatelr(self):
38 | raise NotImplementedError('not to be implemented')
39 | '''zerograd'''
40 | def zerograd(self):
41 | self.optimizer.zero_grad()
42 | '''getwarmuplr'''
43 | def getwarmuplr(self, cur_iter, warmup_cfg, regular_lr):
44 | warmup_type, warmup_ratio, warmup_iters = warmup_cfg['type'], warmup_cfg['ratio'], warmup_cfg['iters']
45 | if warmup_type == 'constant':
46 | warmup_lr = regular_lr * warmup_ratio
47 | elif warmup_type == 'linear':
48 | k = (1 - cur_iter / warmup_iters) * (1 - warmup_ratio)
49 | warmup_lr = (1 - k) * regular_lr
50 | elif warmup_type == 'exp':
51 | k = warmup_ratio**(1 - cur_iter / warmup_iters)
52 | warmup_lr = k * regular_lr
53 | return warmup_lr
54 | '''clipgradients'''
55 | def clipgradients(self, params, max_norm=35, norm_type=2):
56 | params = list(filter(lambda p: p.requires_grad and p.grad is not None, params))
57 | if len(params) > 0:
58 | clip_grad.clip_grad_norm_(params, max_norm=max_norm, norm_type=norm_type)
59 | '''state'''
60 | def state(self):
61 | state_dict = {
62 | 'cur_epoch': self.cur_epoch, 'cur_iter': self.cur_iter,
63 | 'optimizer': self.optimizer.state_dict()
64 | }
65 | return state_dict
66 | '''setstate'''
67 | def setstate(self, state_dict):
68 | self.cur_epoch = state_dict['cur_epoch']
69 | self.cur_iter = state_dict['cur_iter']
--------------------------------------------------------------------------------
/csseg/modules/models/schedulers/builder.py:
--------------------------------------------------------------------------------
1 | '''
2 | Function:
3 | Implementation of SchedulerBuilder and BuildScheduler
4 | Author:
5 | Zhenchao Jin
6 | '''
7 | import copy
8 | from .poly import PolyScheduler
9 | from ...utils import BaseModuleBuilder
10 |
11 |
12 | '''SchedulerBuilder'''
13 | class SchedulerBuilder(BaseModuleBuilder):
14 | REGISTERED_MODULES = {
15 | 'PolyScheduler': PolyScheduler,
16 | }
17 | '''build'''
18 | def build(self, optimizer, scheduler_cfg):
19 | scheduler_cfg = copy.deepcopy(scheduler_cfg)
20 | scheduler_type = scheduler_cfg.pop('type')
21 | scheduler_cfg.pop('optimizer_cfg')
22 | scheduler = self.REGISTERED_MODULES[scheduler_type](optimizer=optimizer, **scheduler_cfg)
23 | return scheduler
24 |
25 |
26 | '''BuildScheduler'''
27 | BuildScheduler = SchedulerBuilder().build
--------------------------------------------------------------------------------
/csseg/modules/models/schedulers/poly.py:
--------------------------------------------------------------------------------
1 | '''
2 | Function:
3 | Implementation of PolyScheduler
4 | Author:
5 | Zhenchao Jin
6 | '''
7 | from .base import BaseScheduler
8 |
9 |
10 | '''PolyScheduler'''
11 | class PolyScheduler(BaseScheduler):
12 | def __init__(self, power=0.9, optimizer=None, lr=0.01, min_lr=None, warmup_cfg=None, clipgrad_cfg=None, max_epochs=-1, iters_per_epoch=-1, paramwise_cfg=dict()):
13 | super(PolyScheduler, self).__init__(
14 | optimizer=optimizer, lr=lr, min_lr=min_lr, warmup_cfg=warmup_cfg, clipgrad_cfg=clipgrad_cfg,
15 | max_epochs=max_epochs, iters_per_epoch=iters_per_epoch, paramwise_cfg=paramwise_cfg,
16 | )
17 | self.power = power
18 | '''updatelr'''
19 | def updatelr(self):
20 | # obtain variables
21 | base_lr, min_lr, cur_iter, max_iters, power = self.lr, self.min_lr, self.cur_iter, self.max_iters, self.power
22 | optimizer, warmup_cfg, paramwise_cfg = self.optimizer, self.warmup_cfg, self.paramwise_cfg
23 | # calculate target learning rate
24 | coeff = (1 - cur_iter / max_iters) ** power
25 | target_lr = coeff * (base_lr - min_lr) + min_lr
26 | if (warmup_cfg is not None) and (warmup_cfg['iters'] >= cur_iter):
27 | target_lr = self.getwarmuplr(cur_iter, warmup_cfg, target_lr)
28 | # update learning rate
29 | for param_group in optimizer.param_groups:
30 | if paramwise_cfg and paramwise_cfg.get('type', 'DefaultParamsConstructor') == 'DefaultParamsConstructor':
31 | param_group['lr'] = param_group.get('lr_multiplier', 1.0) * target_lr
32 | else:
33 | param_group['lr'] = target_lr
34 | # return
35 | return target_lr
--------------------------------------------------------------------------------
/csseg/modules/models/segmentors/__init__.py:
--------------------------------------------------------------------------------
1 | '''initialize'''
2 | from .builder import BuildSegmentor, SegmentorBuilder
--------------------------------------------------------------------------------
/csseg/modules/models/segmentors/base.py:
--------------------------------------------------------------------------------
1 | '''
2 | Function:
3 | Implementation of BaseSegmentor
4 | Author:
5 | Zhenchao Jin
6 | '''
7 | import torch
8 | import numbers
9 | import collections
10 | import torch.nn as nn
11 | import torch.nn.functional as F
12 | import torch.distributed as dist
13 | from ..losses import BuildLoss
14 | from ..encoders import BuildEncoder
15 | from ..decoders import BuildDecoder
16 |
17 |
18 | '''BaseSegmentor'''
19 | class BaseSegmentor(nn.Module):
20 | def __init__(self, selected_indices=(0, 1, 2, 3), num_known_classes_list=[], align_corners=False, encoder_cfg={}, decoder_cfg={}):
21 | super(BaseSegmentor, self).__init__()
22 | # assert
23 | assert isinstance(selected_indices, (numbers.Number, collections.abc.Sequence))
24 | # set attributes
25 | self.align_corners = align_corners
26 | self.selected_indices = selected_indices
27 | self.num_known_classes_list = num_known_classes_list
28 | # build encoder and decoder
29 | self.encoder = BuildEncoder(encoder_cfg)
30 | self.decoder = BuildDecoder(decoder_cfg)
31 | # build classifier
32 | self.convs_cls = nn.ModuleList([
33 | nn.Conv2d(self.decoder.out_channels, num_classes, kernel_size=1, stride=1, padding=0) for num_classes in num_known_classes_list
34 | ])
35 | '''forward'''
36 | def forward(self, x):
37 | # feed to encoder
38 | encoder_outputs = self.encoder(x)
39 | # select encoder outputs
40 | selected_feats = self.transforminputs(encoder_outputs, self.selected_indices)
41 | # feed to decoder
42 | decoder_outputs = self.decoder(selected_feats)
43 | # feed to classifier
44 | seg_logits = [conv(decoder_outputs) for conv in self.convs_cls]
45 | seg_logits = torch.cat(seg_logits, dim=1)
46 | # construct outputs
47 | outputs = {'seg_logits': seg_logits}
48 | # return
49 | return outputs
50 | '''calculatesegloss'''
51 | def calculatesegloss(self, seg_logits, seg_targets, losses_cfg):
52 | loss = 0
53 | for loss_type, loss_cfg in losses_cfg.items():
54 | loss_cfg = loss_cfg.copy()
55 | loss_cfg['type'] = loss_type
56 | loss += BuildLoss(loss_cfg)(prediction=seg_logits, target=seg_targets)
57 | return loss.mean()
58 | '''calculateseglosses'''
59 | def calculateseglosses(self, seg_logits, seg_targets, losses_cfgs):
60 | # interpolate seg_logits
61 | if seg_logits.shape[-2:] != seg_targets.shape[-2:]:
62 | seg_logits = F.interpolate(seg_logits, size=seg_targets.shape[-2:], mode='bilinear', align_corners=self.align_corners)
63 | # iter to calculate losses
64 | losses_log_dict, loss_total = {}, 0
65 | for losses_name, losses_cfg in losses_cfgs.items():
66 | losses_log_dict[losses_name] = self.calculatesegloss(
67 | seg_logits=seg_logits, seg_targets=seg_targets, losses_cfg=losses_cfg
68 | )
69 | loss_total += losses_log_dict[losses_name]
70 | losses_log_dict.update({'loss_total': loss_total})
71 | # syn losses_log_dict
72 | for key, value in losses_log_dict.items():
73 | value = value.data.clone()
74 | dist.all_reduce(value.div_(dist.get_world_size()))
75 | losses_log_dict[key] = value.item()
76 | # return
77 | return loss_total, losses_log_dict
78 | '''transforminputs'''
79 | def transforminputs(self, inputs, selected_indices):
80 | if isinstance(selected_indices, numbers.Number):
81 | selected_indices = [selected_indices]
82 | outputs = [inputs[idx] for idx in selected_indices]
83 | return outputs if len(selected_indices) > 1 else outputs[0]
--------------------------------------------------------------------------------
/csseg/modules/models/segmentors/builder.py:
--------------------------------------------------------------------------------
1 | '''
2 | Function:
3 | Implementation of BuildSegmentor and SegmentorBuilder
4 | Author:
5 | Zhenchao Jin
6 | '''
7 | from .ilt import ILTSegmentor
8 | from .ucd import UCDSegmentor
9 | from .mib import MIBSegmentor
10 | from .base import BaseSegmentor
11 | from .plop import PLOPSegmentor
12 | from ...utils import BaseModuleBuilder
13 |
14 |
15 | '''SegmentorBuilder'''
16 | class SegmentorBuilder(BaseModuleBuilder):
17 | REGISTERED_MODULES = {
18 | 'MIBSegmentor': MIBSegmentor, 'ILTSegmentor': ILTSegmentor, 'BaseSegmentor': BaseSegmentor, 'PLOPSegmentor': PLOPSegmentor, 'UCDSegmentor': UCDSegmentor,
19 | }
20 | '''build'''
21 | def build(self, segmentor_cfg):
22 | return super().build(segmentor_cfg)
23 |
24 |
25 | '''BuildSegmentor'''
26 | BuildSegmentor = SegmentorBuilder().build
--------------------------------------------------------------------------------
/csseg/modules/models/segmentors/ilt.py:
--------------------------------------------------------------------------------
1 | '''
2 | Function:
3 | Implementation of ILTSegmentor
4 | Author:
5 | Zhenchao Jin
6 | '''
7 | import torch
8 | from .base import BaseSegmentor
9 |
10 |
11 | '''ILTSegmentor'''
12 | class ILTSegmentor(BaseSegmentor):
13 | def __init__(self, selected_indices=(0,), num_known_classes_list=[], align_corners=False, encoder_cfg={}, decoder_cfg={}):
14 | super(ILTSegmentor, self).__init__(
15 | selected_indices=selected_indices, num_known_classes_list=num_known_classes_list,
16 | align_corners=align_corners, encoder_cfg=encoder_cfg, decoder_cfg=decoder_cfg,
17 | )
18 | '''forward'''
19 | def forward(self, x):
20 | # feed to encoder
21 | encoder_outputs = self.encoder(x)
22 | # select encoder outputs
23 | selected_feats = self.transforminputs(encoder_outputs, self.selected_indices)
24 | # feed to decoder
25 | decoder_outputs = self.decoder(selected_feats)
26 | # feed to classifier
27 | seg_logits = [conv(decoder_outputs) for conv in self.convs_cls]
28 | seg_logits = torch.cat(seg_logits, dim=1)
29 | # construct outputs
30 | outputs = {'seg_logits': seg_logits, 'distillation_feats': selected_feats}
31 | # return
32 | return outputs
--------------------------------------------------------------------------------
/csseg/modules/models/segmentors/mib.py:
--------------------------------------------------------------------------------
1 | '''
2 | Function:
3 | Implementation of MIBSegmentor
4 | Author:
5 | Zhenchao Jin
6 | '''
7 | import torch
8 | from .base import BaseSegmentor
9 |
10 |
11 | '''MIBSegmentor'''
12 | class MIBSegmentor(BaseSegmentor):
13 | def __init__(self, selected_indices=(0,), num_known_classes_list=[], align_corners=False, encoder_cfg={}, decoder_cfg={}):
14 | super(MIBSegmentor, self).__init__(
15 | selected_indices=selected_indices, num_known_classes_list=num_known_classes_list,
16 | align_corners=align_corners, encoder_cfg=encoder_cfg, decoder_cfg=decoder_cfg,
17 | )
18 | '''initaddedclassifier'''
19 | def initaddedclassifier(self, device=None):
20 | conv_cls = self.convs_cls[-1]
21 | imprinting_w = self.convs_cls[0].weight[0]
22 | bkg_bias = self.convs_cls[0].bias[0]
23 | bias_diff = torch.log(torch.FloatTensor([self.num_known_classes_list[-1] + 1])).to(device)
24 | new_bias = (bkg_bias - bias_diff)
25 | conv_cls.weight.data.copy_(imprinting_w)
26 | conv_cls.bias.data.copy_(new_bias)
27 | self.convs_cls[0].bias[0].data.copy_(new_bias.squeeze(0))
--------------------------------------------------------------------------------
/csseg/modules/models/segmentors/plop.py:
--------------------------------------------------------------------------------
1 | '''
2 | Function:
3 | Implementation of PLOPSegmentor
4 | Author:
5 | Zhenchao Jin
6 | '''
7 | import torch
8 | from .mib import MIBSegmentor
9 |
10 |
11 | '''PLOPSegmentor'''
12 | class PLOPSegmentor(MIBSegmentor):
13 | def __init__(self, selected_indices=(0, 1, 2, 3), num_known_classes_list=[], align_corners=False, encoder_cfg={}, decoder_cfg={}):
14 | super(PLOPSegmentor, self).__init__(
15 | selected_indices=selected_indices, num_known_classes_list=num_known_classes_list,
16 | align_corners=align_corners, encoder_cfg=encoder_cfg, decoder_cfg=decoder_cfg,
17 | )
18 | '''forward'''
19 | def forward(self, x):
20 | # feed to encoder
21 | encoder_outputs, distillation_feats = self.encoder(x)
22 | # select encoder outputs
23 | selected_feats = self.transforminputs(encoder_outputs, self.selected_indices)
24 | # feed to decoder
25 | decoder_outputs = self.decoder(selected_feats)
26 | # feed to classifier
27 | seg_logits = [conv(decoder_outputs) for conv in self.convs_cls]
28 | seg_logits = torch.cat(seg_logits, dim=1)
29 | # construct outputs
30 | outputs = {'seg_logits': seg_logits, 'distillation_feats': list(distillation_feats) + [decoder_outputs]}
31 | # return
32 | return outputs
--------------------------------------------------------------------------------
/csseg/modules/models/segmentors/ucd.py:
--------------------------------------------------------------------------------
1 | '''
2 | Function:
3 | Implementation of UCDSegmentor
4 | Author:
5 | Zhenchao Jin
6 | '''
7 | import torch
8 | from .mib import MIBSegmentor
9 |
10 |
11 | '''UCDSegmentor'''
12 | class UCDSegmentor(MIBSegmentor):
13 | def __init__(self, selected_indices=(0,), num_known_classes_list=[], align_corners=False, encoder_cfg={}, decoder_cfg={}):
14 | super(UCDSegmentor, self).__init__(
15 | selected_indices=selected_indices, num_known_classes_list=num_known_classes_list,
16 | align_corners=align_corners, encoder_cfg=encoder_cfg, decoder_cfg=decoder_cfg,
17 | )
18 | '''forward'''
19 | def forward(self, x, **kwargs):
20 | # feed to encoder
21 | encoder_outputs = self.encoder(x)
22 | # select encoder outputs
23 | selected_feats = self.transforminputs(encoder_outputs, self.selected_indices)
24 | # feed to decoder
25 | decoder_outputs = self.decoder(selected_feats)
26 | # feed to classifier
27 | seg_logits = [conv(decoder_outputs) for conv in self.convs_cls]
28 | seg_logits = torch.cat(seg_logits, dim=1)
29 | # construct outputs
30 | if kwargs.get('task_id', 0) > 0:
31 | outputs = {'seg_logits': seg_logits, 'decoder_outputs': self.attention(decoder_outputs)}
32 | else:
33 | outputs = {'seg_logits': seg_logits}
34 | # return
35 | return outputs
36 | '''attention'''
37 | def attention(self, x):
38 | attn = torch.sum(x ** 2, dim=1)
39 | for i in range(attn.shape[0]):
40 | attn[i] = attn[i] / torch.norm(attn[i])
41 | attn = torch.unsqueeze(attn, 1)
42 | x = attn.detach() * x
43 | return x
--------------------------------------------------------------------------------
/csseg/modules/parallel/__init__.py:
--------------------------------------------------------------------------------
1 | '''initialize'''
2 | from .model import BuildDistributedModel
3 | from .dataloader import BuildDistributedDataloader
--------------------------------------------------------------------------------
/csseg/modules/parallel/dataloader.py:
--------------------------------------------------------------------------------
1 | '''
2 | Function:
3 | Implementation of BuildDistributedDataloader
4 | Author:
5 | Zhenchao Jin
6 | '''
7 | import copy
8 | import torch
9 |
10 |
11 | '''BuildDistributedDataloader'''
12 | def BuildDistributedDataloader(dataset, dataloader_cfg):
13 | dataloader_cfg = copy.deepcopy(dataloader_cfg)
14 | # parse
15 | dataloader_cfg = dataloader_cfg[dataset.mode.lower()]
16 | shuffle = dataloader_cfg.pop('shuffle')
17 | dataloader_cfg['shuffle'] = False
18 | dataloader_cfg['batch_size'] = dataloader_cfg.pop('batch_size_per_gpu')
19 | dataloader_cfg['num_workers'] = dataloader_cfg.pop('num_workers_per_gpu')
20 | # sampler
21 | sampler = torch.utils.data.distributed.DistributedSampler(dataset, shuffle=shuffle)
22 | dataloader_cfg['sampler'] = sampler
23 | # dataloader
24 | dataloader = torch.utils.data.DataLoader(dataset, **dataloader_cfg)
25 | # return
26 | return dataloader
--------------------------------------------------------------------------------
/csseg/modules/parallel/model.py:
--------------------------------------------------------------------------------
1 | '''
2 | Function:
3 | Implementation of BuildDistributedModel
4 | Author:
5 | Zhenchao Jin
6 | '''
7 | import torch.nn as nn
8 |
9 |
10 | '''BuildDistributedModel'''
11 | def BuildDistributedModel(model, model_cfg):
12 | return nn.parallel.DistributedDataParallel(model, **model_cfg)
--------------------------------------------------------------------------------
/csseg/modules/runners/__init__.py:
--------------------------------------------------------------------------------
1 | '''initialize'''
2 | from .builder import BuildRunner, RunnerBuilder
--------------------------------------------------------------------------------
/csseg/modules/runners/builder.py:
--------------------------------------------------------------------------------
1 | '''
2 | Function:
3 | Implementation of BuildRunner and RunnerBuilder
4 | Author:
5 | Zhenchao Jin
6 | '''
7 | from .ilt import ILTRunner
8 | from .mib import MIBRunner
9 | from .caf import CAFRunner
10 | from .sdr import SDRRunner
11 | from .plop import PLOPRunner
12 | from .rcil import RCILRunner
13 | from .ucd import UCDMIBRunner
14 | from .reminder import REMINDERRunner
15 | from ..utils import BaseModuleBuilder
16 |
17 |
18 | '''RunnerBuilder'''
19 | class RunnerBuilder(BaseModuleBuilder):
20 | REGISTERED_MODULES = {
21 | 'UCDMIBRunner': UCDMIBRunner, 'ILTRunner': ILTRunner, 'MIBRunner': MIBRunner, 'PLOPRunner': PLOPRunner,
22 | 'RCILRunner': RCILRunner, 'REMINDERRunner': REMINDERRunner, 'CAFRunner': CAFRunner, 'SDRRunner': SDRRunner,
23 | }
24 | '''build'''
25 | def build(self, mode, cmd_args, runner_cfg):
26 | module_cfg = {
27 | 'mode': mode, 'cmd_args': cmd_args, 'runner_cfg': runner_cfg, 'type': runner_cfg['type'],
28 | }
29 | return super().build(module_cfg)
30 |
31 |
32 | '''BuildRunner'''
33 | BuildRunner = RunnerBuilder().build
--------------------------------------------------------------------------------
/csseg/modules/runners/caf.py:
--------------------------------------------------------------------------------
1 | '''
2 | Function:
3 | Implementation of "Continual attentive fusion for incremental learning in semantic segmentation"
4 | Author:
5 | Zhenchao Jin
6 | '''
7 | import copy
8 | import torch
9 | import functools
10 | import torch.nn.functional as F
11 | import torch.distributed as dist
12 | from .base import BaseRunner
13 |
14 |
15 | '''CAFRunner'''
16 | class CAFRunner(BaseRunner):
17 | def __init__(self, mode, cmd_args, runner_cfg):
18 | super(CAFRunner, self).__init__(
19 | mode=mode, cmd_args=cmd_args, runner_cfg=runner_cfg
20 | )
--------------------------------------------------------------------------------
/csseg/modules/runners/ewf.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SegmentationBLWX/cssegmentation/c12517594ee91e22ba99974e30f066c0b081728f/csseg/modules/runners/ewf.py
--------------------------------------------------------------------------------
/csseg/modules/runners/ilt.py:
--------------------------------------------------------------------------------
1 | '''
2 | Function:
3 | Implementation of "Incremental learning techniques for semantic segmentation"
4 | Author:
5 | Zhenchao Jin
6 | '''
7 | import copy
8 | import torch
9 | import torch.nn.functional as F
10 | import torch.distributed as dist
11 | from .base import BaseRunner
12 | from ..models import BuildLoss
13 |
14 |
15 | '''ILTRunner'''
16 | class ILTRunner(BaseRunner):
17 | def __init__(self, mode, cmd_args, runner_cfg):
18 | super(ILTRunner, self).__init__(
19 | mode=mode, cmd_args=cmd_args, runner_cfg=runner_cfg
20 | )
21 | '''call'''
22 | def __call__(self, images, seg_targets):
23 | # initialize
24 | losses_cfgs = copy.deepcopy(self.losses_cfgs)
25 | # feed to history_segmentor
26 | if self.history_segmentor is not None:
27 | with torch.no_grad():
28 | history_outputs = self.history_segmentor(images)
29 | # feed to segmentor
30 | outputs = self.segmentor(images)
31 | # calculate segmentation losses
32 | seg_losses_cfgs = copy.deepcopy(losses_cfgs['segmentation_cl']) if self.history_segmentor is not None else copy.deepcopy(losses_cfgs['segmentation_init'])
33 | seg_total_loss, seg_losses_log_dict = self.segmentor.module.calculateseglosses(
34 | seg_logits=outputs['seg_logits'], seg_targets=seg_targets, losses_cfgs=seg_losses_cfgs,
35 | )
36 | # calculate distillation losses
37 | kd_total_loss, kd_losses_log_dict = 0, {}
38 | if self.history_segmentor is not None:
39 | kd_loss_logits, kd_losses_log_dict = self.featuresdistillation(
40 | history_distillation_feats=F.interpolate(history_outputs['seg_logits'], size=images.shape[2:], mode="bilinear", align_corners=self.segmentor.module.align_corners),
41 | distillation_feats=F.interpolate(outputs['seg_logits'], size=images.shape[2:], mode="bilinear", align_corners=self.segmentor.module.align_corners),
42 | **losses_cfgs['distillation_logits']
43 | )
44 | kd_loss_feats = BuildLoss(losses_cfgs['distillation_features'])(prediction=outputs['distillation_feats'], target=history_outputs['distillation_feats'])
45 | value = kd_loss_feats.data.clone()
46 | dist.all_reduce(value.div_(dist.get_world_size()))
47 | kd_losses_log_dict['kd_loss_feats'] = value.item()
48 | kd_total_loss = kd_loss_logits + kd_loss_feats
49 | # deal with losses
50 | loss_total = kd_total_loss + seg_total_loss
51 | seg_losses_log_dict.update(kd_losses_log_dict)
52 | seg_losses_log_dict.pop('loss_total')
53 | seg_losses_log_dict['loss_total'] = loss_total.item()
54 | # return
55 | return loss_total, seg_losses_log_dict
56 | '''featuresdistillation'''
57 | @staticmethod
58 | def featuresdistillation(history_distillation_feats, distillation_feats, reduction='mean', alpha=1., scale_factor=100):
59 | distillation_feats = distillation_feats.narrow(1, 0, history_distillation_feats.shape[1])
60 | outputs = torch.log_softmax(distillation_feats, dim=1)
61 | labels = torch.softmax(history_distillation_feats * alpha, dim=1)
62 | loss = (outputs * labels).mean(dim=1)
63 | if reduction == 'mean':
64 | loss = -torch.mean(loss)
65 | elif reduction == 'sum':
66 | loss = -torch.sum(loss)
67 | else:
68 | loss = -loss
69 | loss = loss * scale_factor
70 | value = loss.data.clone()
71 | dist.all_reduce(value.div_(dist.get_world_size()))
72 | kd_losses_log_dict = {'kd_loss_logits': value.item()}
73 | return loss, kd_losses_log_dict
--------------------------------------------------------------------------------
/csseg/modules/runners/mib.py:
--------------------------------------------------------------------------------
1 | '''
2 | Function:
3 | Implementation of "Modeling the Background for Incremental Learning in Semantic Segmentation"
4 | Author:
5 | Zhenchao Jin
6 | '''
7 | import copy
8 | import torch
9 | import functools
10 | import torch.nn.functional as F
11 | import torch.distributed as dist
12 | from .base import BaseRunner
13 |
14 |
15 | '''MIBRunner'''
16 | class MIBRunner(BaseRunner):
17 | def __init__(self, mode, cmd_args, runner_cfg):
18 | super(MIBRunner, self).__init__(
19 | mode=mode, cmd_args=cmd_args, runner_cfg=runner_cfg
20 | )
21 | '''call'''
22 | def __call__(self, images, seg_targets):
23 | # initialize
24 | losses_cfgs = copy.deepcopy(self.losses_cfgs)
25 | # feed to history_segmentor
26 | if self.history_segmentor is not None:
27 | with torch.no_grad():
28 | history_outputs = self.history_segmentor(images)
29 | # feed to segmentor
30 | outputs = self.segmentor(images)
31 | # calculate segmentation losses
32 | seg_losses_cfgs = copy.deepcopy(losses_cfgs['segmentation_cl']) if self.history_segmentor is not None else copy.deepcopy(losses_cfgs['segmentation_init'])
33 | if self.history_segmentor is not None:
34 | num_history_known_classes = functools.reduce(lambda a, b: a + b, self.runner_cfg['segmentor_cfg']['num_known_classes_list'][:-1])
35 | for _, seg_losses_cfg in seg_losses_cfgs.items():
36 | for loss_type, loss_cfg in seg_losses_cfg.items():
37 | loss_cfg.update({'num_history_known_classes': num_history_known_classes})
38 | seg_total_loss, seg_losses_log_dict = self.segmentor.module.calculateseglosses(
39 | seg_logits=outputs['seg_logits'], seg_targets=seg_targets, losses_cfgs=seg_losses_cfgs,
40 | )
41 | # calculate distillation losses
42 | kd_total_loss, kd_losses_log_dict = 0, {}
43 | if self.history_segmentor is not None:
44 | kd_total_loss, kd_losses_log_dict = self.featuresdistillation(
45 | history_distillation_feats=F.interpolate(history_outputs['seg_logits'], size=images.shape[2:], mode="bilinear", align_corners=self.segmentor.module.align_corners),
46 | distillation_feats=F.interpolate(outputs['seg_logits'], size=images.shape[2:], mode="bilinear", align_corners=self.segmentor.module.align_corners),
47 | **losses_cfgs['distillation']
48 | )
49 | # deal with losses
50 | loss_total = kd_total_loss + seg_total_loss
51 | seg_losses_log_dict.update(kd_losses_log_dict)
52 | seg_losses_log_dict.pop('loss_total')
53 | seg_losses_log_dict['loss_total'] = loss_total.item()
54 | # return
55 | return loss_total, seg_losses_log_dict
56 | '''featuresdistillation'''
57 | @staticmethod
58 | def featuresdistillation(history_distillation_feats, distillation_feats, reduction='mean', alpha=1., scale_factor=10):
59 | new_cl = distillation_feats.shape[1] - history_distillation_feats.shape[1]
60 | history_distillation_feats = history_distillation_feats * alpha
61 | new_bkg_idx = torch.tensor([0] + [x for x in range(history_distillation_feats.shape[1], distillation_feats.shape[1])]).to(distillation_feats.device)
62 | den = torch.logsumexp(distillation_feats, dim=1)
63 | outputs_no_bgk = distillation_feats[:, 1:-new_cl] - den.unsqueeze(dim=1)
64 | outputs_bkg = torch.logsumexp(torch.index_select(distillation_feats, index=new_bkg_idx, dim=1), dim=1) - den
65 | labels = torch.softmax(history_distillation_feats, dim=1)
66 | loss = (labels[:, 0] * outputs_bkg + (labels[:, 1:] * outputs_no_bgk).sum(dim=1)) / history_distillation_feats.shape[1]
67 | if reduction == 'mean':
68 | loss = -torch.mean(loss)
69 | elif reduction == 'sum':
70 | loss = -torch.sum(loss)
71 | else:
72 | loss = -loss
73 | loss = loss * scale_factor
74 | value = loss.data.clone()
75 | dist.all_reduce(value.div_(dist.get_world_size()))
76 | kd_losses_log_dict = {'loss_kd': value.item()}
77 | return loss, kd_losses_log_dict
--------------------------------------------------------------------------------
/csseg/modules/runners/sdr.py:
--------------------------------------------------------------------------------
1 | '''
2 | Function:
3 | Implementation of "Continual semantic segmentation via repulsion-attraction of sparse and disentangled latent representations"
4 | Author:
5 | Zhenchao Jin
6 | '''
7 | import copy
8 | import torch
9 | import functools
10 | import torch.nn.functional as F
11 | import torch.distributed as dist
12 | from .base import BaseRunner
13 |
14 |
15 | '''SDRRunner'''
16 | class SDRRunner(BaseRunner):
17 | def __init__(self, mode, cmd_args, runner_cfg):
18 | super(SDRRunner, self).__init__(
19 | mode=mode, cmd_args=cmd_args, runner_cfg=runner_cfg
20 | )
--------------------------------------------------------------------------------
/csseg/modules/utils/__init__.py:
--------------------------------------------------------------------------------
1 | '''initialize'''
2 | from .misc import setrandomseed
3 | from .env import EnvironmentCollector
4 | from .configparser import ConfigParser
5 | from .modulebuilder import BaseModuleBuilder
6 | from .logger import LoggerHandleBuilder, BuildLoggerHandle
7 | from .io import saveckpts, loadckpts, touchdir, saveaspickle, loadpicklefile, symlink, loadpretrainedweights
--------------------------------------------------------------------------------
/csseg/modules/utils/configparser.py:
--------------------------------------------------------------------------------
1 | '''
2 | Function:
3 | Implementation of ConfigParser
4 | Author:
5 | Zhenchao
6 | '''
7 | import os
8 | import sys
9 | import importlib
10 | import importlib.util
11 | import dill as pickle
12 | from .io import touchdir
13 |
14 |
15 | '''ConfigParser'''
16 | class ConfigParser():
17 | def __init__(self, library_name='csseg'):
18 | self.library_name = library_name
19 | '''parsefrompy'''
20 | def parsefrompy(self, cfg_file_path):
21 | # assert
22 | assert cfg_file_path.endswith('.py')
23 | # obtain module path
24 | module_path = cfg_file_path[len(os.getcwd()):].replace('\\', '/')
25 | module_path = module_path.replace('/', '.')
26 | module_path = module_path.strip('.')[:-3]
27 | # load cfg
28 | spec = importlib.util.spec_from_file_location(module_path, cfg_file_path)
29 | cfg = importlib.util.module_from_spec(spec)
30 | sys.modules[module_path] = cfg
31 | spec.loader.exec_module(cfg)
32 | # return cfg
33 | return cfg, cfg_file_path
34 | '''parsefrompkl'''
35 | def parsefrompkl(self, cfg_file_path):
36 | cfg = pickle.load(open(cfg_file_path, 'rb'))
37 | return cfg, cfg_file_path
38 | '''parse'''
39 | def parse(self, cfg_file_path):
40 | # ext to parse method
41 | ext_to_parse_method = {
42 | '.py': self.parsefrompy, '.pkl': self.parsefrompkl,
43 | }
44 | # config ext
45 | cfg_ext = os.path.splitext(cfg_file_path)[-1]
46 | # assert
47 | assert cfg_ext in ext_to_parse_method, f'unable to parse config with extension {cfg_ext}'
48 | # parse
49 | return ext_to_parse_method[cfg_ext](cfg_file_path=cfg_file_path)
50 | '''save'''
51 | def save(self, work_dir=''):
52 | work_dir = os.path.join(work_dir, 'configs')
53 | touchdir(work_dir)
54 | savepath = os.path.join(work_dir, os.path.basename(self.cfg_file_path) + '.pkl')
55 | return pickle.dump(self.cfg, open(savepath, 'wb'))
56 | '''call'''
57 | def __call__(self, cfg_file_path):
58 | cfg_file_path = os.path.abspath(os.path.expanduser(cfg_file_path))
59 | self.cfg, self.cfg_file_path = self.parse(cfg_file_path=cfg_file_path)
60 | return self.cfg, self.cfg_file_path
--------------------------------------------------------------------------------
/csseg/modules/utils/io.py:
--------------------------------------------------------------------------------
1 | '''
2 | Function:
3 | Implementation of some utils for io-related operations
4 | Author:
5 | Zhenchao Jin
6 | '''
7 | import os
8 | import torch
9 | import pickle
10 | import torch.utils.model_zoo as model_zoo
11 |
12 |
13 | '''touchdir'''
14 | def touchdir(dirname):
15 | if not os.path.exists(dirname):
16 | try:
17 | os.mkdir(dirname)
18 | return True
19 | except:
20 | return False
21 | return False
22 |
23 |
24 | '''saveckpts'''
25 | def saveckpts(ckpts, savepath):
26 | torch.save(ckpts, savepath)
27 | return True
28 |
29 |
30 | '''loadckpts'''
31 | def loadckpts(ckptspath, map_to_cpu=True):
32 | if os.path.islink(ckptspath):
33 | ckptspath = os.readlink(ckptspath)
34 | if map_to_cpu:
35 | ckpts = torch.load(ckptspath, map_location=torch.device('cpu'))
36 | else:
37 | ckpts = torch.load(ckptspath)
38 | return ckpts
39 |
40 |
41 | '''saveaspickle'''
42 | def saveaspickle(data, savepath):
43 | with open(savepath, 'wb') as handle:
44 | pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL)
45 | return True
46 |
47 |
48 | '''loadpicklefile'''
49 | def loadpicklefile(filepath):
50 | with open(filepath, 'rb') as handle:
51 | data = pickle.load(handle)
52 | return data
53 |
54 |
55 | '''symlink'''
56 | def symlink(src_path, dst_path):
57 | if os.path.islink(dst_path):
58 | os.unlink(dst_path)
59 | os.symlink(src_path, dst_path)
60 | return True
61 |
62 |
63 | '''loadpretrainedweights'''
64 | def loadpretrainedweights(structure_type, pretrained_model_path='', pretrained_weights_table={}, map_to_cpu=True, possible_model_keys=['model', 'state_dict']):
65 | if pretrained_model_path and os.path.exists(pretrained_model_path):
66 | checkpoint = torch.load(pretrained_model_path, map_location='cpu') if map_to_cpu else torch.load(pretrained_model_path)
67 | else:
68 | checkpoint = model_zoo.load_url(pretrained_weights_table[structure_type], map_location='cpu') if map_to_cpu else model_zoo.load_url(pretrained_weights_table[structure_type])
69 | state_dict = checkpoint
70 | for key in possible_model_keys:
71 | if key in checkpoint:
72 | state_dict = checkpoint[key]
73 | break
74 | return state_dict
--------------------------------------------------------------------------------
/csseg/modules/utils/logger.py:
--------------------------------------------------------------------------------
1 | '''
2 | Function:
3 | Implementation of LoggerHandles
4 | Author:
5 | Zhenchao Jin
6 | '''
7 | import time
8 | from .modulebuilder import BaseModuleBuilder
9 |
10 |
11 | '''LocalLoggerHandle'''
12 | class LocalLoggerHandle():
13 | def __init__(self, logfilepath):
14 | self.logfilepath = logfilepath
15 | '''log'''
16 | def log(self, message, level='INFO', endwithnewline=True):
17 | message = f'{time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())} {level} {message}'
18 | print(message)
19 | if not message.endswith('\n') and endwithnewline:
20 | message = message + '\n'
21 | with open(self.logfilepath, 'a') as fp:
22 | fp.write(message)
23 | '''debug'''
24 | def debug(self, message, endwithnewline=True):
25 | self.log(message, 'DEBUG', endwithnewline)
26 | '''info'''
27 | def info(self, message, endwithnewline=True):
28 | self.log(message, 'INFO', endwithnewline)
29 | '''warning'''
30 | def warning(self, message, endwithnewline=True):
31 | self.log(message, 'WARNING', endwithnewline)
32 | '''error'''
33 | def error(self, message, endwithnewline=True):
34 | self.log(message, 'ERROR', endwithnewline)
35 |
36 |
37 | '''LoggerHandleBuilder'''
38 | class LoggerHandleBuilder(BaseModuleBuilder):
39 | REGISTERED_MODULES = {
40 | 'LocalLoggerHandle': LocalLoggerHandle,
41 | }
42 | '''build'''
43 | def build(self, logger_handle_cfg):
44 | return super().build(logger_handle_cfg)
45 |
46 |
47 | '''BuildLoggerHandle'''
48 | BuildLoggerHandle = LoggerHandleBuilder().build
--------------------------------------------------------------------------------
/csseg/modules/utils/misc.py:
--------------------------------------------------------------------------------
1 | '''
2 | Function:
3 | Implementation of utils, e.g., setrandomseed
4 | Author:
5 | Zhenchao Jin
6 | '''
7 | import torch
8 | import random
9 | import numpy as np
10 |
11 |
12 | '''setrandomseed'''
13 | def setrandomseed(seed=42):
14 | random.seed(seed)
15 | np.random.seed(seed)
16 | torch.manual_seed(seed)
17 | torch.cuda.manual_seed_all(seed)
--------------------------------------------------------------------------------
/csseg/modules/utils/modulebuilder.py:
--------------------------------------------------------------------------------
1 | '''
2 | Function:
3 | Implementation of BaseModuleBuilder
4 | Author:
5 | Zhenchao Jin
6 | '''
7 | import copy
8 | import collections
9 |
10 |
11 | '''BaseModuleBuilder'''
12 | class BaseModuleBuilder():
13 | REGISTERED_MODULES = collections.OrderedDict()
14 | def __init__(self, requires_register_modules=None, requires_renew_modules=None):
15 | if requires_register_modules is not None and isinstance(requires_register_modules, (dict, collections.OrderedDict)):
16 | for name, module in requires_register_modules.items():
17 | self.register(name, module)
18 | if requires_renew_modules is not None and isinstance(requires_renew_modules, (dict, collections.OrderedDict)):
19 | for name, module in requires_renew_modules.items():
20 | self.renew(name, module)
21 | self.validate()
22 | '''build'''
23 | def build(self, module_cfg):
24 | module_cfg = copy.deepcopy(module_cfg)
25 | module_type = module_cfg.pop('type')
26 | module = self.REGISTERED_MODULES[module_type](**module_cfg)
27 | return module
28 | '''register'''
29 | def register(self, name, module):
30 | assert callable(module)
31 | assert name not in self.REGISTERED_MODULES
32 | self.REGISTERED_MODULES[name] = module
33 | '''renew'''
34 | def renew(self, name, module):
35 | assert callable(module)
36 | assert name in self.REGISTERED_MODULES
37 | self.REGISTERED_MODULES[name] = module
38 | '''validate'''
39 | def validate(self):
40 | for _, module in self.REGISTERED_MODULES.items():
41 | assert callable(module)
42 | '''delete'''
43 | def delete(self, name):
44 | assert name in self.REGISTERED_MODULES
45 | del self.REGISTERED_MODULES[name]
46 | '''pop'''
47 | def pop(self, name):
48 | assert name in self.REGISTERED_MODULES
49 | module = self.REGISTERED_MODULES.pop(name)
50 | return module
51 | '''get'''
52 | def get(self, name):
53 | assert name in self.REGISTERED_MODULES
54 | module = self.REGISTERED_MODULES.get(name)
55 | return module
56 | '''items'''
57 | def items(self):
58 | return self.REGISTERED_MODULES.items()
59 | '''clear'''
60 | def clear(self):
61 | return self.REGISTERED_MODULES.clear()
62 | '''values'''
63 | def values(self):
64 | return self.REGISTERED_MODULES.values()
65 | '''keys'''
66 | def keys(self):
67 | return self.REGISTERED_MODULES.keys()
68 | '''copy'''
69 | def copy(self):
70 | return self.REGISTERED_MODULES.copy()
71 | '''update'''
72 | def update(self, requires_update_modules):
73 | return self.REGISTERED_MODULES.update(requires_update_modules)
--------------------------------------------------------------------------------
/csseg/test.py:
--------------------------------------------------------------------------------
1 | '''
2 | Function:
3 | Implementation of Tester
4 | Author:
5 | Zhenchao Jin
6 | '''
7 | import torch
8 | import warnings
9 | import argparse
10 | import torch.distributed as dist
11 | from modules import BuildRunner, ConfigParser, loadckpts
12 | warnings.filterwarnings('ignore')
13 |
14 |
15 | '''parsecmdargs'''
16 | def parsecmdargs():
17 | parser = argparse.ArgumentParser(description='CSSegmentation: An Open Source Continual Semantic Segmentation Toolbox Based on PyTorch.')
18 | parser.add_argument('--local_rank', '--local-rank', dest='local_rank', help='node rank for distributed training.', default=0, type=int)
19 | parser.add_argument('--nproc_per_node', dest='nproc_per_node', help='number of processes per node.', default=4, type=int)
20 | parser.add_argument('--cfgfilepath', dest='cfgfilepath', help='config file path you want to load.', type=str, required=True)
21 | parser.add_argument('--ckptspath', dest='ckptspath', help='checkpoints path you want to load.', type=str, required=True)
22 | cmd_args = parser.parse_args()
23 | if torch.__version__.startswith('2.'):
24 | cmd_args.local_rank = int(os.environ['LOCAL_RANK'])
25 | return cmd_args
26 |
27 |
28 | '''Tester'''
29 | class Tester():
30 | def __init__(self, cmd_args):
31 | self.cmd_args = cmd_args
32 | config_parser = ConfigParser()
33 | self.cfg, _ = config_parser(cmd_args.cfgfilepath)
34 | '''start'''
35 | def start(self):
36 | # initialize
37 | assert torch.cuda.is_available(), 'cuda is not available'
38 | cmd_args, runner_cfg = self.cmd_args, self.cfg.RUNNER_CFG
39 | dist.init_process_group(backend=runner_cfg['parallel_cfg']['backend'], init_method=runner_cfg['parallel_cfg']['init_method'])
40 | torch.cuda.set_device(cmd_args.local_rank)
41 | torch.backends.cudnn.allow_tf32 = False
42 | torch.backends.cuda.matmul.allow_tf32 = False
43 | torch.backends.cudnn.benchmark = runner_cfg['benchmark']
44 | # load ckpts
45 | ckpts = loadckpts(cmd_args.ckptspath)
46 | runner_cfg['task_id'] = ckpts['task_id']
47 | runner_client = BuildRunner(mode='TEST', cmd_args=cmd_args, runner_cfg=runner_cfg)
48 | runner_client.segmentor.load_state_dict(ckpts['segmentor'], strict=True)
49 | # start to test and print results
50 | results = runner_client.test(cur_epoch=ckpts['cur_epoch'])
51 | if cmd_args.local_rank == 0:
52 | runner_client.logger_handle.info(results)
53 |
54 |
55 | '''main'''
56 | if __name__ == '__main__':
57 | cmd_args = parsecmdargs()
58 | tester_client = Tester(cmd_args=cmd_args)
59 | tester_client.start()
--------------------------------------------------------------------------------
/csseg/train.py:
--------------------------------------------------------------------------------
1 | '''
2 | Function:
3 | Implementation of Trainer
4 | Author:
5 | Zhenchao Jin
6 | '''
7 | import os
8 | import copy
9 | import torch
10 | import warnings
11 | import argparse
12 | import torch.distributed as dist
13 | from modules import BuildRunner, ConfigParser
14 | warnings.filterwarnings('ignore')
15 |
16 |
17 | '''parsecmdargs'''
18 | def parsecmdargs():
19 | parser = argparse.ArgumentParser(description='CSSegmentation: An Open Source Continual Semantic Segmentation Toolbox Based on PyTorch.')
20 | parser.add_argument('--local_rank', '--local-rank', dest='local_rank', help='node rank for distributed training.', default=0, type=int)
21 | parser.add_argument('--nproc_per_node', dest='nproc_per_node', help='number of processes per node.', default=4, type=int)
22 | parser.add_argument('--cfgfilepath', dest='cfgfilepath', help='config file path you want to load.', type=str, required=True)
23 | parser.add_argument('--starttaskid', dest='starttaskid', help='task id you want to start from.', default=0, type=int)
24 | cmd_args = parser.parse_args()
25 | if torch.__version__.startswith('2.'):
26 | cmd_args.local_rank = int(os.environ['LOCAL_RANK'])
27 | return cmd_args
28 |
29 |
30 | '''Trainer'''
31 | class Trainer():
32 | def __init__(self, cmd_args):
33 | self.cmd_args = cmd_args
34 | config_parser = ConfigParser()
35 | self.cfg, _ = config_parser(cmd_args.cfgfilepath)
36 | '''start'''
37 | def start(self):
38 | # initialize
39 | assert torch.cuda.is_available(), 'cuda is not available'
40 | cmd_args, runner_cfg = self.cmd_args, self.cfg.RUNNER_CFG
41 | dist.init_process_group(backend=runner_cfg['parallel_cfg']['backend'], init_method=runner_cfg['parallel_cfg']['init_method'])
42 | torch.cuda.set_device(cmd_args.local_rank)
43 | torch.backends.cudnn.allow_tf32 = False
44 | torch.backends.cuda.matmul.allow_tf32 = False
45 | torch.backends.cudnn.benchmark = runner_cfg['benchmark']
46 | # iter tasks
47 | for task_id in range(cmd_args.starttaskid, runner_cfg['num_tasks']):
48 | runner_cfg_task = copy.deepcopy(runner_cfg)
49 | runner_cfg_task['task_id'] = task_id
50 | for key in ['segmentor_cfg', 'dataset_cfg', 'dataloader_cfg', 'scheduler_cfg', 'parallel_cfg']:
51 | if isinstance(runner_cfg_task[key], list):
52 | assert len(runner_cfg_task[key]) == runner_cfg_task['num_tasks']
53 | runner_cfg_task[key] = runner_cfg_task[key][task_id]
54 | runner_client = BuildRunner(mode='TRAIN', cmd_args=cmd_args, runner_cfg=runner_cfg_task)
55 | runner_client.start()
56 |
57 |
58 | '''main'''
59 | if __name__ == '__main__':
60 | cmd_args = parsecmdargs()
61 | trainer_client = Trainer(cmd_args=cmd_args)
62 | trainer_client.start()
--------------------------------------------------------------------------------
/docs/DatasetPreparation.md:
--------------------------------------------------------------------------------
1 | # Dataset Preparation
2 |
3 |
4 | ## Dataset Notes
5 |
6 | **1.Download Source**
7 |
8 | For easier io reading, some supported datasets have been pre-processed like creating the train.txt/val.txt/test.txt used to record the corresponding imageids.
9 | So, it is recommended to adopt the provided script (*i.e.*, `scripts/prepare_datasets.sh`) to download the supported datasets or download the supported datasets from the provided network disk link rather than official website.
10 |
11 |
12 | ## Supported Datasets
13 |
14 | **1.ADE20k**
15 |
16 | - Official Website: [click](https://groups.csail.mit.edu/vision/datasets/ADE20K/),
17 | - Baidu Disk: [click](https://pan.baidu.com/s/1TZbgxPnY0Als6LoiV80Xrw) (access code: fn1i),
18 | - Script Command: `bash scripts/prepare_datasets.sh ade20k`.
19 |
20 | **2.PASCAL VOC**
21 |
22 | - Official Website: [click](http://host.robots.ox.ac.uk/pascal/VOC/),
23 | - Baidu Disk: [click](https://pan.baidu.com/s/1TZbgxPnY0Als6LoiV80Xrw) (access code: fn1i),
24 | - Script Command: `bash scripts/prepare_datasets.sh pascalvoc`.
25 |
26 | **3.CityScapes**
27 |
28 | - Official Website: [click](https://www.cityscapes-dataset.com/),
29 | - Baidu Disk: [click](https://pan.baidu.com/s/1TZbgxPnY0Als6LoiV80Xrw) (access code: fn1i),
30 | - Script Command: `bash scripts/prepare_datasets.sh cityscapes`.
--------------------------------------------------------------------------------
/docs/GetStarted.md:
--------------------------------------------------------------------------------
1 | # Get Started
2 |
3 |
4 | ## Prerequisites
5 |
6 | In this section, we introduce some prerequisites for using CSSegmentation.
7 | If you are experienced with Python and PyTorch and have already installed them, just skip this part.
8 |
9 | **1.Operation System**
10 |
11 | CSSegmentation works on Linux, Windows and macOS. It requires Python 3.6+, CUDA 9.2+ and PyTorch 1.3+.
12 |
13 | **2.Anaconda**
14 |
15 | For Linux and Mac users, we strongly recommend you to download and install [Anaconda](https://docs.conda.io/en/latest/miniconda.html).
16 | Then, you can create a conda environment for CSSegmentation and activate it,
17 |
18 | ```sh
19 | conda create --name csseg python=3.8 -y
20 | conda activate csseg
21 | ```
22 |
23 | **3.WGET & Decompression Software**
24 |
25 | If you want to utilize the provided scripts to prepare the datasets, it is necessary for you to install wget (for downloading datasets), 7z (for processing compressed packages) and tar (for processing compressed packages) in your operation system.
26 | For windows users, the resources are listed as following,
27 |
28 | - 7Z: [Download](https://sparanoid.com/lab/7z/download.html),
29 | - RAR: [Download](https://www.win-rar.com/start.html?&L=0),
30 | - WGET: [Download](http://downloads.sourceforge.net/gnuwin32/wget-1.11.4-1-setup.exe?spm=a2c6h.12873639.article-detail.7.3f825677H6sKF2&file=wget-1.11.4-1-setup.exe).
31 |
32 | Besides, [Cmder](https://cmder.app/) are recommended to help the windows users execute the provided scripts successfully.
33 |
34 | ## Installation
35 |
36 | **1.Clone CSSegmentation**
37 |
38 | You can run the following commands to clone the cssegmentation repository,
39 |
40 | ```sh
41 | git clone https://github.com/SegmentationBLWX/cssegmentation.git
42 | cd cssegmentation
43 | ```
44 |
45 | **2.Install Requirements**
46 |
47 | **2.1 Basic Requirements (Necessary)**
48 |
49 | To set up the essential prerequisites for running CSSegmentation, execute the following commands,
50 |
51 | ```sh
52 | pip install -r requirements.txt
53 | ```
54 |
55 | This command will automatically install the following packages,
56 |
57 | - `pillow`: set in requirements/io.txt,
58 | - `pandas`: set in requirements/io.txt,
59 | - `opencv-python`: set in requirements/io.txt,
60 | - `inplace-abn`: set in requirements/nn.txt,
61 | - `numpy`: set in requirements/science.txt,
62 | - `scipy`: set in requirements/science.txt,
63 | - `tqdm`: set in requirements/terminal.txt,
64 | - `argparse`: set in requirements/terminal.txt,
65 | - `cython`: set in requirements/misc.txt.
66 |
67 | **2.2 Pytorch and Torchvision (Necessary)**
68 |
69 | If you intend to utilize CSSegmentation, it is imperative to install PyTorch and torchvision.
70 | We recommend you to follow the [official instructions](https://pytorch.org/get-started/previous-versions/) to install them, *e.g.*,
71 |
72 | ```sh
73 | # CUDA 11.0
74 | pip install torch==1.8.0+cu111 torchvision==0.9.0+cu111 torchaudio==0.8.0 -f https://download.pytorch.org/whl/torch_stable.html
75 | # CUDA 10.2
76 | pip install torch==1.8.0 torchvision==0.9.0 torchaudio==0.8.0
77 | ```
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line, and also
5 | # from the environment for the first two.
6 | SPHINXOPTS ?=
7 | SPHINXBUILD ?= sphinx-build
8 | SOURCEDIR = source
9 | BUILDDIR = build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
21 |
--------------------------------------------------------------------------------
/docs/ModelZoo.md:
--------------------------------------------------------------------------------
1 | # Model Zoo
2 |
3 |
4 | ## Common Settings
5 |
6 | - We use distributed training with 4 GPUs by default.
7 | - Our ResNet style backbone are based on [In-Place Activated BatchNorm](https://github.com/mapillary/inplace_abn/).
8 |
9 |
10 | ## Supported Backbones
11 |
12 |
13 | ## Supported Segmentors
14 |
15 |
--------------------------------------------------------------------------------
/docs/QuickRun.md:
--------------------------------------------------------------------------------
1 | # Quick Run
2 |
3 |
4 | ## Train A Segmentor
5 |
6 | CSSegmentation only supports distributed training which uses DistributedDataParallel.
7 |
8 | All outputs (log files and checkpoints) will be saved to the working directory, which is specified by "work_dir" in the config file.
9 |
10 | #### Train on a single machine
11 |
12 | You can train the segmentors in a single machine as follows,
13 |
14 | ```sh
15 | bash scripts/distrain.sh ${NGPUS} ${CFGFILEPATH} [optional arguments]
16 | ```
17 |
18 | where "${NGPUS}" means the number of GPUS you want to use and "${CFGFILEPATH}" denotes for the config file path.
19 | For example, you can train a segmentor on a single machine with the following commands,
20 |
21 | ```sh
22 | bash scripts/distrain.sh 4 csseg/configs/annnet/annnet_resnet50os16_ade20k.py
23 | ```
24 |
25 | If you want to resume from the checkpoints, you can run as follows,
26 |
27 | ```sh
28 | bash scripts/distrain.sh 4 csseg/configs/annnet/annnet_resnet50os16_ade20k.py --ckptspath annnet_resnet50os16_ade20k/epoch_44.pth
29 | ```
30 |
31 | #### Train with multiple machines
32 |
33 | Now, we only support training with multiple machines with Slurm.
34 | Slurm is a good job scheduling system for computing clusters.
35 | On a cluster managed by Slurm, you can use "slurmtrain.sh" to spawn training jobs.
36 | It supports both single-node and multi-node training.
37 |
38 | Specifically, you can train the segmentors with multiple machines as follows,
39 |
40 | ```sh
41 | bash scripts/slurmtrain.sh ${PARTITION} ${JOBNAME} ${NGPUS} ${CFGFILEPATH} [optional arguments]
42 | ```
43 |
44 | Here is an example of using 16 GPUs to train PSPNet on the dev partition,
45 |
46 | ```sh
47 | bash scripts/slurmtrain.sh dev pspnet 16 csseg/configs/pspnet/pspnet_resnet101os8_ade20k.py
48 | ```
49 |
50 |
51 | ## Test A Segmentor
52 |
53 | We provide testing scripts to evaluate a whole dataset (Cityscapes, PASCAL VOC, ADE20k, etc.), and also some high-level apis for easier integration to other projects.
54 |
55 | #### Test on a single machine
56 |
57 | You can test the segmentors in a single machine as follows,
58 |
59 | ```sh
60 | bash scripts/distest.sh ${NGPUS} ${CFGFILEPATH} ${ckptspath} [optional arguments]
61 | ```
62 |
63 | For example, you can test a segmentor on a single machine with the following commands,
64 |
65 | ```sh
66 | bash scripts/distest.sh 4 csseg/configs/annnet/annnet_resnet50os16_ade20k.py annnet_resnet50os16_ade20k/epoch_130.pth
67 | ```
68 |
69 | #### Test with multiple machines
70 |
71 | Now, we only support testing with multiple machines with Slurm.
72 | Slurm is a good job scheduling system for computing clusters.
73 | On a cluster managed by Slurm, you can use "slurmtest.sh" to spawn testing jobs.
74 | It supports both single-node and multi-node testing.
75 |
76 | Specifically, you can test the segmentors with multiple machines as follows,
77 |
78 | ```sh
79 | bash scripts/slurmtest.sh ${PARTITION} ${JOBNAME} ${NGPUS} ${CFGFILEPATH} ${ckptspath} [optional arguments]
80 | ```
81 |
82 | Here is an example of using 16 GPUs to test PSPNet on the dev partition,
83 |
84 | ```sh
85 | bash scripts/slurmtest.sh dev pspnet 16 csseg/configs/pspnet/pspnet_resnet101os8_ade20k.py pspnet_resnet101os8_ade20k/epoch_130.pth
86 | ```
87 |
88 |
89 | ## Inference A Segmentor
90 |
91 | You can apply the segmentor as follows:
92 |
93 | ```sh
94 | bash scripts/inference.sh ${CFGFILEPATH} ${ckptspath} [optional arguments]
95 | ```
96 |
97 | For example, if you want to inference one image, the command can be,
98 |
99 | ```sh
100 | bash scripts/inference.sh csseg/configs/pspnet/pspnet_resnet101os8_ade20k.py pspnet_resnet101os8_ade20k/epoch_130.pth --imagepath dog.jpg
101 | ```
102 |
103 | If you want to inference the images in one directory, the command can be,
104 |
105 | ```sh
106 | bash scripts/inference.sh csseg/configs/pspnet/pspnet_resnet101os8_ade20k.py pspnet_resnet101os8_ade20k/epoch_130.pth --imagedir dogs
107 | ```
--------------------------------------------------------------------------------
/docs/Tutorials.md:
--------------------------------------------------------------------------------
1 | # Tutorials
2 |
3 |
4 | ## Learn about Config
--------------------------------------------------------------------------------
/docs/conf.py:
--------------------------------------------------------------------------------
1 | # Configuration file for the Sphinx documentation builder.
2 | #
3 | # This file only contains a selection of the most common options. For a full
4 | # list see the documentation:
5 | # https://www.sphinx-doc.org/en/master/usage/configuration.html
6 |
7 | # -- Path setup --------------------------------------------------------------
8 |
9 | # If extensions (or modules to document with autodoc) are in another directory,
10 | # add these directories to sys.path here. If the directory is relative to the
11 | # documentation root, use os.path.abspath to make it absolute, like shown here.
12 | #
13 | # import os
14 | # import sys
15 | # sys.path.insert(0, os.path.abspath('.'))
16 |
17 |
18 | # -- Project information -----------------------------------------------------
19 |
20 | project = 'CSSegmentation'
21 | copyright = '2023-2030, Zhenchao Jin'
22 | author = 'Zhenchao Jin'
23 | release = '0.1.0'
24 |
25 | # -- General configuration ---------------------------------------------------
26 |
27 | # Add any Sphinx extension module names here, as strings. They can be
28 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
29 | # ones.
30 | extensions = [
31 | 'sphinx.ext.autodoc',
32 | 'sphinx.ext.napoleon',
33 | 'sphinx.ext.viewcode',
34 | 'recommonmark',
35 | 'sphinx_markdown_tables',
36 | ]
37 |
38 | # Add any paths that contain templates here, relative to this directory.
39 | templates_path = ['_templates']
40 |
41 | # The suffix(es) of source filenames.
42 | # You can specify multiple suffix as a list of string:
43 | #
44 | source_suffix = {
45 | '.rst': 'restructuredtext',
46 | '.md': 'markdown',
47 | }
48 |
49 | # The master toctree document.
50 | master_doc = 'index'
51 |
52 | # List of patterns, relative to source directory, that match files and
53 | # directories to ignore when looking for source files.
54 | # This pattern also affects html_static_path and html_extra_path.
55 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
56 |
57 |
58 | # -- Options for HTML output -------------------------------------------------
59 |
60 | # The theme to use for HTML and HTML Help pages. See the documentation for
61 | # a list of builtin themes.
62 | #
63 | html_theme = 'sphinx_rtd_theme'
64 | # html_theme = 'pytorch_sphinx_theme'
65 |
66 |
67 | # Add any paths that contain custom static files (such as style sheets) here,
68 | # relative to this directory. They are copied after the builtin static files,
69 | # so a file named "default.css" will overwrite the builtin "default.css".
70 | html_static_path = ['_static']
71 |
72 | # For multi language
73 | # locale_dirs = ['locale/']
74 | # gettext_compact = False
--------------------------------------------------------------------------------
/docs/index.rst:
--------------------------------------------------------------------------------
1 | .. cssegmentation documentation master file, created by
2 | sphinx-quickstart on Sat Feb 29 22:07:23 2020.
3 | You can adapt this file completely to your liking, but it should at least
4 | contain the root `toctree` directive.
5 |
6 | Welcome to CSSegmentation's documentation!
7 | ========================================
8 |
9 | .. toctree::
10 | :maxdepth: 2
11 |
12 | GetStarted.md
13 | DatasetPreparation.md
14 | ModelZoo.md
15 | QuickRun.md
16 | Tutorials.md
--------------------------------------------------------------------------------
/docs/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SegmentationBLWX/cssegmentation/c12517594ee91e22ba99974e30f066c0b081728f/docs/logo.png
--------------------------------------------------------------------------------
/docs/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=source
11 | set BUILDDIR=build
12 |
13 | if "%1" == "" goto help
14 |
15 | %SPHINXBUILD% >NUL 2>NUL
16 | if errorlevel 9009 (
17 | echo.
18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
19 | echo.installed, then set the SPHINXBUILD environment variable to point
20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
21 | echo.may add the Sphinx directory to PATH.
22 | echo.
23 | echo.If you don't have Sphinx installed, grab it from
24 | echo.http://sphinx-doc.org/
25 | exit /b 1
26 | )
27 |
28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
29 | goto end
30 |
31 | :help
32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
33 |
34 | :end
35 | popd
36 |
--------------------------------------------------------------------------------
/docs/modelzoo/caf/README.md:
--------------------------------------------------------------------------------
1 | ## Introduction
2 |
3 | Official Repo
4 |
5 | Code Snippet
6 |
7 |
8 | CAF (TMM'2022)
9 |
10 | ```latex
11 | @article{yang2022continual,
12 | title={Continual attentive fusion for incremental learning in semantic segmentation},
13 | author={Yang, Guanglei and Fini, Enrico and Xu, Dan and Rota, Paolo and Ding, Mingli and Hao, Tang and Alameda-Pineda, Xavier and Ricci, Elisa},
14 | journal={IEEE Transactions on Multimedia},
15 | year={2022},
16 | publisher={IEEE}
17 | }
18 | ```
19 |
20 |
21 |
22 |
23 | ## Results
24 |
25 | #### PASCAL VOC
26 |
27 | | Backbone | Crop Size | Setting | mIoU | Download |
28 | | :-: | :-: | :-: | :-: | :-: |
29 | | R-101-D16 | 512x512 | 15-5-disjoint | | [cfg]() | [modellinks-per-step]() | [log]() |
30 | | R-101-D16 | 512x512 | 15-5-overlapped | | [cfg]() | [modellinks-per-step]() | [log]() |
31 | | R-101-D16 | 512x512 | 15-1-disjoint | | [cfg]() | [modellinks-per-step]() | [log]() |
32 | | R-101-D16 | 512x512 | 15-1-overlapped | | [cfg]() | [modellinks-per-step]() | [log]() |
33 | | R-101-D16 | 512x512 | 10-1-disjoint | | [cfg]() | [modellinks-per-step]() | [log]() |
34 | | R-101-D16 | 512x512 | 10-1-overlapped | | [cfg]() | [modellinks-per-step]() | [log]() |
35 |
36 | #### ADE20k
37 |
38 | | Backbone | Crop Size | Setting | mIoU | Download |
39 | | :-: | :-: | :-: | :-: | :-: |
40 | | R-101-D16 | 512x512 | 100-50-disjoint | | [cfg]() | [modellinks-per-step]() | [log]() |
41 | | R-101-D16 | 512x512 | 100-50-overlapped | | [cfg]() | [modellinks-per-step]() | [log]() |
42 | | R-101-D16 | 512x512 | 100-10-disjoint | | [cfg]() | [modellinks-per-step]() | [log]() |
43 | | R-101-D16 | 512x512 | 100-10-overlapped | | [cfg]() | [modellinks-per-step]() | [log]() |
44 | | R-101-D16 | 512x512 | 100-5-disjoint | | [cfg]() | [modellinks-per-step]() | [log]() |
45 | | R-101-D16 | 512x512 | 100-5-overlapped | | [cfg]() | [modellinks-per-step]() | [log]() |
46 |
47 |
48 | ## More
49 |
50 | You can also download the model weights from following sources:
51 | - BaiduNetdisk: https://pan.baidu.com/s/1e9LlD6ITuLECstFcVPaCxQ with access code **55mq**
--------------------------------------------------------------------------------
/docs/modelzoo/ewf/README.md:
--------------------------------------------------------------------------------
1 | ## Introduction
2 |
3 | Official Repo
4 |
5 | Code Snippet
6 |
7 |
8 | EWF (CVPR'2023)
9 |
10 | ```latex
11 | @inproceedings{xiao2023endpoints,
12 | title={Endpoints Weight Fusion for Class Incremental Semantic Segmentation},
13 | author={Xiao, Jia-Wen and Zhang, Chang-Bin and Feng, Jiekang and Liu, Xialei and van de Weijer, Joost and Cheng, Ming-Ming},
14 | booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
15 | pages={7204--7213},
16 | year={2023}
17 | }
18 | ```
19 |
20 |
21 |
22 |
23 | ## Results
24 |
25 | #### PASCAL VOC
26 |
27 | | Backbone | Crop Size | Setting | mIoU | Download |
28 | | :-: | :-: | :-: | :-: | :-: |
29 | | R-101-D16 | 512x512 | 15-5-disjoint | | [cfg]() | [modellinks-per-step]() | [log]() |
30 | | R-101-D16 | 512x512 | 15-5-overlapped | | [cfg]() | [modellinks-per-step]() | [log]() |
31 | | R-101-D16 | 512x512 | 15-1-disjoint | | [cfg]() | [modellinks-per-step]() | [log]() |
32 | | R-101-D16 | 512x512 | 15-1-overlapped | | [cfg]() | [modellinks-per-step]() | [log]() |
33 | | R-101-D16 | 512x512 | 10-1-disjoint | | [cfg]() | [modellinks-per-step]() | [log]() |
34 | | R-101-D16 | 512x512 | 10-1-overlapped | | [cfg]() | [modellinks-per-step]() | [log]() |
35 |
36 | #### ADE20k
37 |
38 | | Backbone | Crop Size | Setting | mIoU | Download |
39 | | :-: | :-: | :-: | :-: | :-: |
40 | | R-101-D16 | 512x512 | 100-50-disjoint | | [cfg]() | [modellinks-per-step]() | [log]() |
41 | | R-101-D16 | 512x512 | 100-50-overlapped | | [cfg]() | [modellinks-per-step]() | [log]() |
42 | | R-101-D16 | 512x512 | 100-10-disjoint | | [cfg]() | [modellinks-per-step]() | [log]() |
43 | | R-101-D16 | 512x512 | 100-10-overlapped | | [cfg]() | [modellinks-per-step]() | [log]() |
44 | | R-101-D16 | 512x512 | 100-5-disjoint | | [cfg]() | [modellinks-per-step]() | [log]() |
45 | | R-101-D16 | 512x512 | 100-5-overlapped | | [cfg]() | [modellinks-per-step]() | [log]() |
46 |
47 |
48 | ## More
49 |
50 | You can also download the model weights from following sources:
51 | - BaiduNetdisk: https://pan.baidu.com/s/1e9LlD6ITuLECstFcVPaCxQ with access code **55mq**
--------------------------------------------------------------------------------
/docs/modelzoo/ilt/README.md:
--------------------------------------------------------------------------------
1 | ## Introduction
2 |
3 | Official Repo
4 |
5 | Code Snippet
6 |
7 |
8 | ILT (ICCVW'2019)
9 |
10 | ```latex
11 | @inproceedings{michieli2019incremental,
12 | title={Incremental learning techniques for semantic segmentation},
13 | author={Michieli, Umberto and Zanuttigh, Pietro},
14 | booktitle={Proceedings of the IEEE/CVF international conference on computer vision workshops},
15 | pages={0--0},
16 | year={2019}
17 | }
18 | ```
19 |
20 |
21 |
22 |
23 | ## Results
24 |
25 | #### PASCAL VOC
26 |
27 | #### PASCAL VOC
28 |
29 | | Backbone | Crop Size | Setting | mIoU | Download |
30 | | :-: | :-: | :-: | :-: | :-: |
31 | | R-101-D16 | 512x512 | 15-5-disjoint | | [cfg](https://raw.githubusercontent.com/SegmentationBLWX/cssegmentation/main/csseg/configs/ilt/ilt_r101iabnd16_aspp_512x512_vocaug15-5_disjoint.py) | [modellinks-per-step](https://github.com/SegmentationBLWX/modelstore/releases/tag/csseg_ilt) | [log](https://github.com/SegmentationBLWX/modelstore/releases/download/csseg_ilt/ilt_r101iabnd16_aspp_512x512_vocaug15-5_disjoint.log) |
32 | | R-101-D16 | 512x512 | 15-5-overlapped | | [cfg](https://raw.githubusercontent.com/SegmentationBLWX/cssegmentation/main/csseg/configs/ilt/ilt_r101iabnd16_aspp_512x512_vocaug15-5_overlap.py) | [modellinks-per-step](https://github.com/SegmentationBLWX/modelstore/releases/tag/csseg_ilt) | [log](https://github.com/SegmentationBLWX/modelstore/releases/download/csseg_ilt/ilt_r101iabnd16_aspp_512x512_vocaug15-5_overlap.log) |
33 | | R-101-D16 | 512x512 | 15-1-disjoint | 8.6% | [cfg](https://raw.githubusercontent.com/SegmentationBLWX/cssegmentation/main/csseg/configs/ilt/ilt_r101iabnd16_aspp_512x512_vocaug15-1_disjoint.py) | [modellinks-per-step](https://github.com/SegmentationBLWX/modelstore/releases/tag/csseg_ilt) | [log](https://github.com/SegmentationBLWX/modelstore/releases/download/csseg_ilt/ilt_r101iabnd16_aspp_512x512_vocaug15-1_disjoint.log) |
34 | | R-101-D16 | 512x512 | 15-1-overlapped | 9.3% | [cfg](https://raw.githubusercontent.com/SegmentationBLWX/cssegmentation/main/csseg/configs/ilt/ilt_r101iabnd16_aspp_512x512_vocaug15-1_overlap.py) | [modellinks-per-step](https://github.com/SegmentationBLWX/modelstore/releases/tag/csseg_ilt) | [log](https://github.com/SegmentationBLWX/modelstore/releases/download/csseg_ilt/ilt_r101iabnd16_aspp_512x512_vocaug15-1_overlap.log) |
35 | | R-101-D16 | 512x512 | 10-1-disjoint | 5.4% | [cfg](https://raw.githubusercontent.com/SegmentationBLWX/cssegmentation/main/csseg/configs/ilt/ilt_r101iabnd16_aspp_512x512_vocaug10-1_disjoint.py) | [modellinks-per-step](https://github.com/SegmentationBLWX/modelstore/releases/tag/csseg_ilt) | [log](https://github.com/SegmentationBLWX/modelstore/releases/download/csseg_ilt/ilt_r101iabnd16_aspp_512x512_vocaug10-1_disjoint.log) |
36 | | R-101-D16 | 512x512 | 10-1-overlapped | 5.9% | [cfg](https://raw.githubusercontent.com/SegmentationBLWX/cssegmentation/main/csseg/configs/ilt/ilt_r101iabnd16_aspp_512x512_vocaug10-1_overlap.py) | [modellinks-per-step](https://github.com/SegmentationBLWX/modelstore/releases/tag/csseg_ilt) | [log](https://github.com/SegmentationBLWX/modelstore/releases/download/csseg_ilt/ilt_r101iabnd16_aspp_512x512_vocaug10-1_overlap.log) |
37 |
38 | #### ADE20k
39 |
40 | | Backbone | Crop Size | Setting | mIoU | Download |
41 | | :-: | :-: | :-: | :-: | :-: |
42 | | R-101-D16 | 512x512 | 100-50-disjoint | | [cfg]() | [modellinks-per-step]() | [log]() |
43 | | R-101-D16 | 512x512 | 100-50-overlapped | | [cfg]() | [modellinks-per-step]() | [log]() |
44 | | R-101-D16 | 512x512 | 100-10-disjoint | | [cfg]() | [modellinks-per-step]() | [log]() |
45 | | R-101-D16 | 512x512 | 100-10-overlapped | | [cfg]() | [modellinks-per-step]() | [log]() |
46 | | R-101-D16 | 512x512 | 100-5-disjoint | | [cfg]() | [modellinks-per-step]() | [log]() |
47 | | R-101-D16 | 512x512 | 100-5-overlapped | | [cfg]() | [modellinks-per-step]() | [log]() |
48 |
49 |
50 | ## More
51 |
52 | You can also download the model weights from following sources:
53 | - BaiduNetdisk: https://pan.baidu.com/s/1e9LlD6ITuLECstFcVPaCxQ with access code **55mq**
--------------------------------------------------------------------------------
/docs/modelzoo/rcil/README.md:
--------------------------------------------------------------------------------
1 | ## Introduction
2 |
3 | Official Repo
4 |
5 | Code Snippet
6 |
7 |
8 | RCIL (CVPR'2022)
9 |
10 | ```latex
11 | @inproceedings{zhang2022representation,
12 | title={Representation compensation networks for continual semantic segmentation},
13 | author={Zhang, Chang-Bin and Xiao, Jia-Wen and Liu, Xialei and Chen, Ying-Cong and Cheng, Ming-Ming},
14 | booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
15 | pages={7053--7064},
16 | year={2022}
17 | }
18 | ```
19 |
20 |
21 |
22 |
23 | ## Results
24 |
25 | #### PASCAL VOC
26 |
27 | | Backbone | Crop Size | Setting | mIoU | Download |
28 | | :-: | :-: | :-: | :-: | :-: |
29 | | R-101-D16 | 512x512 | 15-5-disjoint | | [cfg]() | [modellinks-per-step]() | [log]() |
30 | | R-101-D16 | 512x512 | 15-5-overlapped | | [cfg]() | [modellinks-per-step]() | [log]() |
31 | | R-101-D16 | 512x512 | 15-1-disjoint | | [cfg]() | [modellinks-per-step]() | [log]() |
32 | | R-101-D16 | 512x512 | 15-1-overlapped | | [cfg]() | [modellinks-per-step]() | [log]() |
33 | | R-101-D16 | 512x512 | 10-1-disjoint | | [cfg]() | [modellinks-per-step]() | [log]() |
34 | | R-101-D16 | 512x512 | 10-1-overlapped | | [cfg]() | [modellinks-per-step]() | [log]() |
35 |
36 | #### ADE20k
37 |
38 | | Backbone | Crop Size | Setting | mIoU | Download |
39 | | :-: | :-: | :-: | :-: | :-: |
40 | | R-101-D16 | 512x512 | 100-50-disjoint | | [cfg]() | [modellinks-per-step]() | [log]() |
41 | | R-101-D16 | 512x512 | 100-50-overlapped | | [cfg]() | [modellinks-per-step]() | [log]() |
42 | | R-101-D16 | 512x512 | 100-10-disjoint | | [cfg]() | [modellinks-per-step]() | [log]() |
43 | | R-101-D16 | 512x512 | 100-10-overlapped | | [cfg]() | [modellinks-per-step]() | [log]() |
44 | | R-101-D16 | 512x512 | 100-5-disjoint | | [cfg]() | [modellinks-per-step]() | [log]() |
45 | | R-101-D16 | 512x512 | 100-5-overlapped | | [cfg]() | [modellinks-per-step]() | [log]() |
46 |
47 |
48 | ## More
49 |
50 | You can also download the model weights from following sources:
51 | - BaiduNetdisk: https://pan.baidu.com/s/1e9LlD6ITuLECstFcVPaCxQ with access code **55mq**
--------------------------------------------------------------------------------
/docs/modelzoo/reminder/README.md:
--------------------------------------------------------------------------------
1 | ## Introduction
2 |
3 | Official Repo
4 |
5 | Code Snippet
6 |
7 |
8 | REMINDER (CVPR'2022)
9 |
10 | ```latex
11 | @inproceedings{phan2022class,
12 | title={Class Similarity Weighted Knowledge Distillation for Continual Semantic Segmentation},
13 | author={Phan, Minh Hieu and Phung, Son Lam and Tran-Thanh, Long and Bouzerdoum, Abdesselam and others},
14 | booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
15 | pages={16866--16875},
16 | year={2022}
17 | }
18 | ```
19 |
20 |
21 |
22 |
23 | ## Results
24 |
25 | #### PASCAL VOC
26 |
27 | | Backbone | Crop Size | Setting | mIoU | Download |
28 | | :-: | :-: | :-: | :-: | :-: |
29 | | R-101-D16 | 512x512 | 15-5-disjoint | | [cfg]() | [modellinks-per-step]() | [log]() |
30 | | R-101-D16 | 512x512 | 15-5-overlapped | | [cfg]() | [modellinks-per-step]() | [log]() |
31 | | R-101-D16 | 512x512 | 15-1-disjoint | | [cfg]() | [modellinks-per-step]() | [log]() |
32 | | R-101-D16 | 512x512 | 15-1-overlapped | | [cfg]() | [modellinks-per-step]() | [log]() |
33 | | R-101-D16 | 512x512 | 10-1-disjoint | | [cfg]() | [modellinks-per-step]() | [log]() |
34 | | R-101-D16 | 512x512 | 10-1-overlapped | | [cfg]() | [modellinks-per-step]() | [log]() |
35 |
36 | #### ADE20k
37 |
38 | | Backbone | Crop Size | Setting | mIoU | Download |
39 | | :-: | :-: | :-: | :-: | :-: |
40 | | R-101-D16 | 512x512 | 100-50-disjoint | | [cfg]() | [modellinks-per-step]() | [log]() |
41 | | R-101-D16 | 512x512 | 100-50-overlapped | | [cfg]() | [modellinks-per-step]() | [log]() |
42 | | R-101-D16 | 512x512 | 100-10-disjoint | | [cfg]() | [modellinks-per-step]() | [log]() |
43 | | R-101-D16 | 512x512 | 100-10-overlapped | | [cfg]() | [modellinks-per-step]() | [log]() |
44 | | R-101-D16 | 512x512 | 100-5-disjoint | | [cfg]() | [modellinks-per-step]() | [log]() |
45 | | R-101-D16 | 512x512 | 100-5-overlapped | | [cfg]() | [modellinks-per-step]() | [log]() |
46 |
47 |
48 | ## More
49 |
50 | You can also download the model weights from following sources:
51 | - BaiduNetdisk: https://pan.baidu.com/s/1e9LlD6ITuLECstFcVPaCxQ with access code **55mq**
--------------------------------------------------------------------------------
/docs/modelzoo/sdr/README.md:
--------------------------------------------------------------------------------
1 | ## Introduction
2 |
3 | Official Repo
4 |
5 | Code Snippet
6 |
7 |
8 | SDR (CVPR'2021)
9 |
10 | ```latex
11 | @inproceedings{michieli2021continual,
12 | title={Continual semantic segmentation via repulsion-attraction of sparse and disentangled latent representations},
13 | author={Michieli, Umberto and Zanuttigh, Pietro},
14 | booktitle={Proceedings of the IEEE/CVF conference on computer vision and pattern recognition},
15 | pages={1114--1124},
16 | year={2021}
17 | }
18 | ```
19 |
20 |
21 |
22 |
23 | ## Results
24 |
25 | #### PASCAL VOC
26 |
27 | | Backbone | Crop Size | Setting | mIoU | Download |
28 | | :-: | :-: | :-: | :-: | :-: |
29 | | R-101-D16 | 512x512 | 15-5-disjoint | | [cfg]() | [modellinks-per-step]() | [log]() |
30 | | R-101-D16 | 512x512 | 15-5-overlapped | | [cfg]() | [modellinks-per-step]() | [log]() |
31 | | R-101-D16 | 512x512 | 15-1-disjoint | | [cfg]() | [modellinks-per-step]() | [log]() |
32 | | R-101-D16 | 512x512 | 15-1-overlapped | | [cfg]() | [modellinks-per-step]() | [log]() |
33 | | R-101-D16 | 512x512 | 10-1-disjoint | | [cfg]() | [modellinks-per-step]() | [log]() |
34 | | R-101-D16 | 512x512 | 10-1-overlapped | | [cfg]() | [modellinks-per-step]() | [log]() |
35 |
36 | #### ADE20k
37 |
38 | | Backbone | Crop Size | Setting | mIoU | Download |
39 | | :-: | :-: | :-: | :-: | :-: |
40 | | R-101-D16 | 512x512 | 100-50-disjoint | | [cfg]() | [modellinks-per-step]() | [log]() |
41 | | R-101-D16 | 512x512 | 100-50-overlapped | | [cfg]() | [modellinks-per-step]() | [log]() |
42 | | R-101-D16 | 512x512 | 100-10-disjoint | | [cfg]() | [modellinks-per-step]() | [log]() |
43 | | R-101-D16 | 512x512 | 100-10-overlapped | | [cfg]() | [modellinks-per-step]() | [log]() |
44 | | R-101-D16 | 512x512 | 100-5-disjoint | | [cfg]() | [modellinks-per-step]() | [log]() |
45 | | R-101-D16 | 512x512 | 100-5-overlapped | | [cfg]() | [modellinks-per-step]() | [log]() |
46 |
47 |
48 | ## More
49 |
50 | You can also download the model weights from following sources:
51 | - BaiduNetdisk: https://pan.baidu.com/s/1e9LlD6ITuLECstFcVPaCxQ with access code **55mq**
--------------------------------------------------------------------------------
/docs/modelzoo/ucd/README.md:
--------------------------------------------------------------------------------
1 | ## Introduction
2 |
3 | Official Repo
4 |
5 | Code Snippet
6 |
7 |
8 | UCD (TPAMI'2022)
9 |
10 | ```latex
11 | @article{yang2022uncertainty,
12 | title={Uncertainty-aware contrastive distillation for incremental semantic segmentation},
13 | author={Yang, Guanglei and Fini, Enrico and Xu, Dan and Rota, Paolo and Ding, Mingli and Nabi, Moin and Alameda-Pineda, Xavier and Ricci, Elisa},
14 | journal={IEEE Transactions on Pattern Analysis and Machine Intelligence},
15 | year={2022},
16 | publisher={IEEE}
17 | }
18 | ```
19 |
20 |
21 |
22 |
23 | ## Results
24 |
25 | #### PASCAL VOC
26 |
27 | | Backbone | Crop Size | Setting | mIoU | Download |
28 | | :-: | :-: | :-: | :-: | :-: |
29 | | R-101-D16 | 512x512 | 15-5-disjoint | | [cfg]() | [modellinks-per-step]() | [log]() |
30 | | R-101-D16 | 512x512 | 15-5-overlapped | | [cfg]() | [modellinks-per-step]() | [log]() |
31 | | R-101-D16 | 512x512 | 15-1-disjoint | | [cfg]() | [modellinks-per-step]() | [log]() |
32 | | R-101-D16 | 512x512 | 15-1-overlapped | | [cfg]() | [modellinks-per-step]() | [log]() |
33 | | R-101-D16 | 512x512 | 10-1-disjoint | | [cfg]() | [modellinks-per-step]() | [log]() |
34 | | R-101-D16 | 512x512 | 10-1-overlapped | | [cfg]() | [modellinks-per-step]() | [log]() |
35 |
36 | #### ADE20k
37 |
38 | | Backbone | Crop Size | Setting | mIoU | Download |
39 | | :-: | :-: | :-: | :-: | :-: |
40 | | R-101-D16 | 512x512 | 100-50-disjoint | | [cfg]() | [modellinks-per-step]() | [log]() |
41 | | R-101-D16 | 512x512 | 100-50-overlapped | | [cfg]() | [modellinks-per-step]() | [log]() |
42 | | R-101-D16 | 512x512 | 100-10-disjoint | | [cfg]() | [modellinks-per-step]() | [log]() |
43 | | R-101-D16 | 512x512 | 100-10-overlapped | | [cfg]() | [modellinks-per-step]() | [log]() |
44 | | R-101-D16 | 512x512 | 100-5-disjoint | | [cfg]() | [modellinks-per-step]() | [log]() |
45 | | R-101-D16 | 512x512 | 100-5-overlapped | | [cfg]() | [modellinks-per-step]() | [log]() |
46 |
47 |
48 | ## More
49 |
50 | You can also download the model weights from following sources:
51 | - BaiduNetdisk: https://pan.baidu.com/s/1e9LlD6ITuLECstFcVPaCxQ with access code **55mq**
--------------------------------------------------------------------------------
/docs/requirements.txt:
--------------------------------------------------------------------------------
1 | recommonmark
2 | sphinx==4.5.0
3 | sphinx_markdown_tables==0.0.12
4 | sphinx_rtd_theme
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | -r requirements/io.txt
2 | -r requirements/misc.txt
3 | -r requirements/bricks.txt
4 | -r requirements/science.txt
5 | -r requirements/terminal.txt
--------------------------------------------------------------------------------
/requirements/bricks.txt:
--------------------------------------------------------------------------------
1 | inplace-abn
--------------------------------------------------------------------------------
/requirements/io.txt:
--------------------------------------------------------------------------------
1 | dill
2 | pillow
3 | pandas
4 | opencv-python
--------------------------------------------------------------------------------
/requirements/misc.txt:
--------------------------------------------------------------------------------
1 | cython
--------------------------------------------------------------------------------
/requirements/nn.txt:
--------------------------------------------------------------------------------
1 | torch
2 | torchvision
--------------------------------------------------------------------------------
/requirements/science.txt:
--------------------------------------------------------------------------------
1 | numpy
2 | scipy
--------------------------------------------------------------------------------
/requirements/terminal.txt:
--------------------------------------------------------------------------------
1 | tqdm
2 | argparse
--------------------------------------------------------------------------------
/scripts/collect_env.py:
--------------------------------------------------------------------------------
1 | '''
2 | Function:
3 | Scripts for collecting develop environment
4 | Author:
5 | Zhenchao Jin
6 | '''
7 | from csseg.modules import EnvironmentCollector
8 |
9 |
10 | '''run'''
11 | if __name__ == '__main__':
12 | for key, value in EnvironmentCollector().collectenv().items():
13 | print(f'{key}: {value}')
--------------------------------------------------------------------------------
/scripts/dist_test.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | THIS_DIR="$( cd "$( dirname "$0" )" && pwd )"
3 | cd $THIS_DIR
4 | cd ..
5 |
6 | NGPUS=$1
7 | CFGFILEPATH=$2
8 | CKPTSPATH=$3
9 | PORT=${PORT:-$(($RANDOM+6666))}
10 | NNODES=${NNODES:-1}
11 | NODERANK=${NODERANK:-0}
12 | MASTERADDR=${MASTERADDR:-"127.0.0.1"}
13 | TORCHVERSION=`python -c 'import torch; print(torch.__version__)'`
14 |
15 | if [[ $TORCHVERSION == "2."* ]]; then
16 | torchrun --nnodes=$NNODES --nproc_per_node=$NGPUS --master_addr=$MASTERADDR --master_port=$PORT --node_rank=$NODERANK \
17 | csseg/test.py --nproc_per_node $NGPUS --cfgfilepath $CFGFILEPATH --ckptspath $CKPTSPATH ${@:4}
18 | else
19 | python -m torch.distributed.launch \
20 | --nnodes=$NNODES \
21 | --node_rank=$NODERANK \
22 | --master_addr=$MASTERADDR \
23 | --nproc_per_node=$NGPUS \
24 | --master_port=$PORT \
25 | csseg/test.py --nproc_per_node $NGPUS \
26 | --cfgfilepath $CFGFILEPATH \
27 | --ckptspath $CKPTSPATH ${@:4}
28 | fi
--------------------------------------------------------------------------------
/scripts/dist_train.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | THIS_DIR="$( cd "$( dirname "$0" )" && pwd )"
3 | cd $THIS_DIR
4 | cd ..
5 |
6 | NGPUS=$1
7 | CFGFILEPATH=$2
8 | PORT=${PORT:-$(($RANDOM+8888))}
9 | NNODES=${NNODES:-1}
10 | NODERANK=${NODERANK:-0}
11 | MASTERADDR=${MASTERADDR:-"127.0.0.1"}
12 | TORCHVERSION=`python -c 'import torch; print(torch.__version__)'`
13 |
14 | if [[ $TORCHVERSION == "2."* ]]; then
15 | torchrun --nnodes=$NNODES --nproc_per_node=$NGPUS --master_addr=$MASTERADDR --master_port=$PORT --node_rank=$NODERANK \
16 | csseg/train.py --nproc_per_node $NGPUS --cfgfilepath $CFGFILEPATH ${@:3}
17 | else
18 | python -m torch.distributed.launch \
19 | --nnodes=$NNODES \
20 | --node_rank=$NODERANK \
21 | --master_addr=$MASTERADDR \
22 | --nproc_per_node=$NGPUS \
23 | --master_port=$PORT \
24 | csseg/train.py --nproc_per_node $NGPUS \
25 | --cfgfilepath $CFGFILEPATH ${@:3}
26 | fi
--------------------------------------------------------------------------------
/scripts/prepare_dataset.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | help() {
4 | echo "------------------------------------------------------------------------------------"
5 | echo "$0 - prepare datasets for training and inference of SSSegmentation."
6 | echo "------------------------------------------------------------------------------------"
7 | echo "Usage:"
8 | echo " bash $0 "
9 | echo "Options:"
10 | echo " : The dataset name you want to download and prepare."
11 | echo " The keyword should be in ['ade20k', 'pascalvoc', 'cityscapes']"
12 | echo " <-h> or <--help>: Show this message."
13 | echo "Examples:"
14 | echo " If you want to fetch ADE20k dataset, you can run 'bash $0 ade20k'."
15 | echo " If you want to fetch Cityscapes dataset, you can run 'bash $0 cityscapes'."
16 | echo "------------------------------------------------------------------------------------"
17 | exit 0
18 | }
19 |
20 | DATASET=$1
21 | OPT="$(echo $DATASET | tr '[:upper:]' '[:lower:]')"
22 | if [ "$OPT" == "-h" ] || [ "$OPT" == "--help" ] || [ "$OPT" == "" ]; then
23 | help
24 | elif [[ "$OPT" == "ade20k" ]]; then
25 | {
26 | wget https://github.com/SegmentationBLWX/modelstore/releases/download/ssseg_datasets/ADE20k.tar.gz
27 | tar zxvf ADE20k.tar.gz
28 | } || {
29 | echo "Fail to download ${DATASET} dataset."
30 | exit 0
31 | }
32 | rm -rf ADE20k.tar.gz
33 | elif [[ "$OPT" == "pascalvoc" ]]; then
34 | {
35 | wget https://github.com/SegmentationBLWX/modelstore/releases/download/ssseg_datasets/VOCdevkit.zip.001
36 | wget https://github.com/SegmentationBLWX/modelstore/releases/download/ssseg_datasets/VOCdevkit.zip.002
37 | wget https://github.com/SegmentationBLWX/modelstore/releases/download/ssseg_datasets/VOCdevkit.zip.003
38 | 7z x VOCdevkit.zip.001
39 | } || {
40 | echo "Fail to download ${DATASET} dataset."
41 | exit 0
42 | }
43 | rm -rf VOCdevkit.zip.001 VOCdevkit.zip.002 VOCdevkit.zip.003
44 | elif [[ "$OPT" == "cityscapes" ]]; then
45 | {
46 | wget https://github.com/SegmentationBLWX/modelstore/releases/download/ssseg_datasets/CityScapes.zip
47 | wget https://github.com/SegmentationBLWX/modelstore/releases/download/ssseg_datasets/CityScapes.z01
48 | wget https://github.com/SegmentationBLWX/modelstore/releases/download/ssseg_datasets/CityScapes.z02
49 | wget https://github.com/SegmentationBLWX/modelstore/releases/download/ssseg_datasets/CityScapes.z03
50 | wget https://github.com/SegmentationBLWX/modelstore/releases/download/ssseg_datasets/CityScapes.z04
51 | wget https://github.com/SegmentationBLWX/modelstore/releases/download/ssseg_datasets/CityScapes.z05
52 | wget https://github.com/SegmentationBLWX/modelstore/releases/download/ssseg_datasets/CityScapes.z06
53 | wget https://github.com/SegmentationBLWX/modelstore/releases/download/ssseg_datasets/CityScapes.z07
54 | wget https://github.com/SegmentationBLWX/modelstore/releases/download/ssseg_datasets/CityScapes.z08
55 | wget https://github.com/SegmentationBLWX/modelstore/releases/download/ssseg_datasets/CityScapes.z09
56 | wget https://github.com/SegmentationBLWX/modelstore/releases/download/ssseg_datasets/CityScapes.z10
57 | wget https://github.com/SegmentationBLWX/modelstore/releases/download/ssseg_datasets/CityScapes.z11
58 | 7z x CityScapes.zip
59 | } || {
60 | echo "Fail to download ${DATASET} dataset."
61 | exit 0
62 | }
63 | rm -rf CityScapes.zip CityScapes.z01 CityScapes.z02 CityScapes.z03 CityScapes.z04 \
64 | CityScapes.z04 CityScapes.z05 CityScapes.z06 CityScapes.z07 CityScapes.z08 \
65 | CityScapes.z09 CityScapes.z10 CityScapes.z11
66 | else
67 | echo "Preparing dataset ${DATASET} is not supported in this script now."
68 | exit 0
69 | fi
70 | echo "Download ${DATASET} done."
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | '''
2 | Function:
3 | Setup cssegmentation
4 | Author:
5 | Zhenchao Jin
6 | '''
7 | import os
8 | import re
9 | import sys
10 | import csseg
11 | from setuptools import setup, find_packages
12 |
13 |
14 | '''readme'''
15 | with open('README.md', 'r', encoding='utf-8') as f:
16 | long_description = f.read()
17 |
18 |
19 | '''parserequirements'''
20 | def parserequirements(fname='requirements.txt', with_version=True):
21 | require_fpath = fname
22 | '''parseline'''
23 | def parseline(line):
24 | if line.startswith('-r '):
25 | target = line.split(' ')[1]
26 | for info in parserequirefile(target):
27 | yield info
28 | else:
29 | info = {'line': line}
30 | if line.startswith('-e '):
31 | info['package'] = line.split('#egg=')[1]
32 | elif '@git+' in line:
33 | info['package'] = line
34 | else:
35 | pat = '(' + '|'.join(['>=', '==', '>']) + ')'
36 | parts = re.split(pat, line, maxsplit=1)
37 | parts = [p.strip() for p in parts]
38 | info['package'] = parts[0]
39 | if len(parts) > 1:
40 | op, rest = parts[1:]
41 | if ';' in rest:
42 | version, platform_deps = map(str.strip, rest.split(';'))
43 | info['platform_deps'] = platform_deps
44 | else:
45 | version = rest
46 | info['version'] = (op, version)
47 | yield info
48 | '''parserequirefile'''
49 | def parserequirefile(fpath):
50 | with open(fpath, 'r') as f:
51 | for line in f.readlines():
52 | line = line.strip()
53 | if line and not line.startswith('#'):
54 | for info in parseline(line):
55 | yield info
56 | '''genpackagesitems'''
57 | def genpackagesitems():
58 | if os.path.exists(require_fpath):
59 | for info in parserequirefile(require_fpath):
60 | parts = [info['package']]
61 | if with_version and 'version' in info:
62 | parts.extend(info['version'])
63 | if not sys.version.startswith('3.4'):
64 | platform_deps = info.get('platform_deps')
65 | if platform_deps is not None:
66 | parts.append(';' + platform_deps)
67 | item = ''.join(parts)
68 | yield item
69 | # parse and return
70 | packages = list(genpackagesitems())
71 | return packages
72 |
73 |
74 | '''setup'''
75 | setup(
76 | name=csseg.__title__,
77 | version=csseg.__version__,
78 | description=csseg.__description__,
79 | long_description=long_description,
80 | long_description_content_type='text/markdown',
81 | classifiers=[
82 | 'License :: OSI Approved :: Apache Software License',
83 | 'Programming Language :: Python :: 3',
84 | 'Intended Audience :: Developers',
85 | 'Operating System :: OS Independent'
86 | ],
87 | author=csseg.__author__,
88 | url=csseg.__url__,
89 | author_email=csseg.__email__,
90 | license=csseg.__license__,
91 | include_package_data=True,
92 | install_requires=parserequirements('requirements.txt'),
93 | zip_safe=True,
94 | packages=find_packages()
95 | )
--------------------------------------------------------------------------------