├── .DS_Store
├── LICENSE
├── README.md
├── argparser.py
├── average_csv.py
├── data
├── download_ade.sh
├── download_voc.sh
└── voc
│ ├── 10-1-ov
│ ├── test_on_val-0.npy
│ ├── test_on_val-1.npy
│ ├── test_on_val-10.npy
│ ├── test_on_val-2.npy
│ ├── test_on_val-3.npy
│ ├── test_on_val-4.npy
│ ├── test_on_val-5.npy
│ ├── test_on_val-6.npy
│ ├── test_on_val-7.npy
│ ├── test_on_val-8.npy
│ ├── test_on_val-9.npy
│ ├── train-0.npy
│ ├── train-1.npy
│ ├── train-10.npy
│ ├── train-2.npy
│ ├── train-3.npy
│ ├── train-4.npy
│ ├── train-5.npy
│ ├── train-6.npy
│ ├── train-7.npy
│ ├── train-8.npy
│ ├── train-9.npy
│ ├── val-0.npy
│ ├── val-1.npy
│ ├── val-10.npy
│ ├── val-2.npy
│ ├── val-3.npy
│ ├── val-4.npy
│ ├── val-5.npy
│ ├── val-6.npy
│ ├── val-7.npy
│ ├── val-8.npy
│ └── val-9.npy
│ ├── 10-1
│ ├── test_on_val-0.npy
│ ├── test_on_val-1.npy
│ ├── test_on_val-10.npy
│ ├── test_on_val-2.npy
│ ├── test_on_val-3.npy
│ ├── test_on_val-4.npy
│ ├── test_on_val-5.npy
│ ├── test_on_val-6.npy
│ ├── test_on_val-7.npy
│ ├── test_on_val-8.npy
│ ├── test_on_val-9.npy
│ ├── train-0.npy
│ ├── train-1.npy
│ ├── train-10.npy
│ ├── train-2.npy
│ ├── train-3.npy
│ ├── train-4.npy
│ ├── train-5.npy
│ ├── train-6.npy
│ ├── train-7.npy
│ ├── train-8.npy
│ ├── train-9.npy
│ ├── val-0.npy
│ ├── val-1.npy
│ ├── val-10.npy
│ ├── val-2.npy
│ ├── val-3.npy
│ ├── val-4.npy
│ ├── val-5.npy
│ ├── val-6.npy
│ ├── val-7.npy
│ ├── val-8.npy
│ └── val-9.npy
│ ├── 15-5s-ov
│ ├── test_on_val-0.npy
│ ├── test_on_val-1.npy
│ ├── test_on_val-2.npy
│ ├── test_on_val-3.npy
│ ├── test_on_val-4.npy
│ ├── test_on_val-5.npy
│ ├── train-0.npy
│ ├── train-1.npy
│ ├── train-2.npy
│ ├── train-3.npy
│ ├── train-4.npy
│ ├── train-5.npy
│ ├── val-0.npy
│ ├── val-1.npy
│ ├── val-2.npy
│ ├── val-3.npy
│ ├── val-4.npy
│ └── val-5.npy
│ └── 15-5s
│ ├── test_on_val-0.npy
│ ├── test_on_val-1.npy
│ ├── test_on_val-2.npy
│ ├── test_on_val-3.npy
│ ├── test_on_val-4.npy
│ ├── test_on_val-5.npy
│ ├── train-0.npy
│ ├── train-1.npy
│ ├── train-2.npy
│ ├── train-3.npy
│ ├── train-4.npy
│ ├── train-5.npy
│ ├── val-0.npy
│ ├── val-1.npy
│ ├── val-2.npy
│ ├── val-3.npy
│ ├── val-4.npy
│ └── val-5.npy
├── dataset
├── __init__.py
├── ade.py
├── cityscape.py
├── cityscapes_domain.py
├── transform.py
├── utils.py
└── voc.py
├── environment.yaml
├── images
├── plop_viz.png
└── plop_voc.png
├── logs
└── 15-5s-voc
│ ├── RCIL_disjoint
│ ├── events.out.tfevents.1666002308.SSADL3860
│ ├── events.out.tfevents.1666007523.SSADL3860
│ ├── events.out.tfevents.1666008515.SSADL3860
│ ├── events.out.tfevents.1666009336.SSADL3860
│ ├── events.out.tfevents.1666010448.SSADL3860
│ ├── events.out.tfevents.1666011746.SSADL3860
│ └── main.txt
│ └── RCIL_overlap
│ ├── events.out.tfevents.1665987569.SSADL3860
│ ├── events.out.tfevents.1665987736.SSADL3860
│ ├── events.out.tfevents.1665993707.SSADL3860
│ ├── events.out.tfevents.1665995034.SSADL3860
│ ├── events.out.tfevents.1665995876.SSADL3860
│ ├── events.out.tfevents.1665997203.SSADL3860
│ ├── events.out.tfevents.1665998541.SSADL3860
│ └── main.txt
├── metrics
├── __init__.py
└── stream_metrics.py
├── models
├── __init__.py
├── resnet.py
└── util.py
├── modules
├── __init__.py
├── deeplab.py
├── misc.py
└── residual.py
├── results
├── 2022-08-26_voc_10-1_RCIL_overlap.csv
├── 2022-08-26_voc_15-5s_RCIL_overlap.csv
├── 2022-08-28_voc_10-1_RCIL_disjoint.csv
├── 2022-08-28_voc_15-5s_RCIL_disjoint.csv
├── 2022-10-04_ade_100-50_RCIL.csv
├── 2022-10-04_ade_100-5_RCIL.csv
├── 2022-10-17_voc_15-5s_RCIL_disjoint.csv
└── 2022-10-17_voc_15-5s_RCIL_overlap.csv
├── run.py
├── scripts
├── ade
│ ├── RCIL_ade_100-10.sh
│ ├── RCIL_ade_100-5.sh
│ └── RCIL_ade_100-50.sh
├── cityscapeClL
│ └── dev.sh
├── cityscapesDomain
│ ├── RCIL_cityscapes_1-1.sh
│ ├── RCIL_cityscapes_11-1.sh
│ └── RCIL_cityscapes_11-5.sh
└── voc
│ ├── RCIL_10-1-disjoint.sh
│ ├── RCIL_10-1-overlap.sh
│ ├── RCIL_15-1-disjoint.sh
│ └── RCIL_15-1-overlap.sh
├── segmentation_module.py
├── tasks.py
├── train.py
└── utils
├── __init__.py
├── logger.py
├── loss.py
├── regularizer.py
├── scheduler.py
└── utils.py
/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/.DS_Store
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2022 Jiawen-Schuy1er-Xiao
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # RCIL
2 | **[CVPR2022] Representation Compensation Networks for Continual Semantic Segmentation**
3 | Chang-Bin Zhang1, Jia-Wen Xiao1, Xialei Liu1, Ying-Cong Chen2, Ming-Ming Cheng1
4 | 1 College of Computer Science, Nankai University
5 | 2 The Hong Kong University of Science and Technology
6 |
7 | []()
8 | [](https://arxiv.org/abs/2203.05402)
9 |
10 |
11 | [](https://paperswithcode.com/sota/overlapped-100-50-on-ade20k?p=representation-compensation-networks-for)
12 | [](https://paperswithcode.com/sota/overlapped-100-5-on-ade20k?p=representation-compensation-networks-for)
13 | [](https://paperswithcode.com/sota/overlapped-50-50-on-ade20k?p=representation-compensation-networks-for)
14 | [](https://paperswithcode.com/sota/overlapped-100-10-on-ade20k?p=representation-compensation-networks-for)
15 | [](https://paperswithcode.com/sota/domain-1-1-on-cityscapes?p=representation-compensation-networks-for)
16 | [](https://paperswithcode.com/sota/domain-11-1-on-cityscapes?p=representation-compensation-networks-for)
17 | [](https://paperswithcode.com/sota/domain-11-5-on-cityscapes?p=representation-compensation-networks-for)
18 | [](https://paperswithcode.com/sota/overlapped-15-1-on-pascal-voc-2012?p=representation-compensation-networks-for)
19 | [](https://paperswithcode.com/sota/disjoint-15-1-on-pascal-voc-2012?p=representation-compensation-networks-for)
20 | [](https://paperswithcode.com/sota/disjoint-10-1-on-pascal-voc-2012?p=representation-compensation-networks-for)
21 | [](https://paperswithcode.com/sota/disjoint-15-5-on-pascal-voc-2012?p=representation-compensation-networks-for)
22 | [](https://paperswithcode.com/sota/overlapped-10-1-on-pascal-voc-2012?p=representation-compensation-networks-for)
23 | [](https://paperswithcode.com/sota/overlapped-15-5-on-pascal-voc-2012?p=representation-compensation-networks-for)
24 |
25 | ## News
26 | - **Our work can be reproduced with high version pytorch after fixing [the issue](https://github.com/mapillary/inplace_abn/issues/219) caused by InplaceABNSync. Please refer to [our modification](https://github.com/zhangchbin/RCIL/blob/main/segmentation_module.py#L21). 😃😃😃Thanks to Zhengyuan and Jiawen.**
27 | - Our another work [[EWF]](https://github.com/schuy1er/EWF_official) has been accepted to CVPR 2023.
28 | - Our method RCIL has been re-implemented in the framework 🔥🔥🔥 [[CSSegmentation]](https://github.com/SegmentationBLWX/cssegmentation).
29 |
30 |
31 | ## Method
32 |
33 |
34 | ## Update
35 | - ~~Support CIL on cityscapes dataset~~
36 | - init code for Classification
37 | - ~~We have fixed bugs in the repository~~
38 | - ~~Add training scripts for ADE20K~~
39 | - ~~09/04/2022 init code for segmentation~~
40 | - ~~09/04/2022 init readme~~
41 |
42 |
43 |
44 | ## Benchmark and Setting
45 | There are two commonly used settings, ```disjoint``` and ```overlapped```.
46 | In the ```disjoint``` setting, assuming we know all classes in the future, the images in the current training step do not contain any classes in the future. The ```overlapped``` setting allows potential classes in the future to appear in the current training images. We call each training on the newly added dataset as a step. Formally, ```X-Y``` denotes the continual setting in our experiments, where ```X``` denotes the number of classes that we need to train in the first step. In each subsequent learning step, the newly added dataset contains ```Y``` classes.
47 |
48 | There are some settings reported in our paper. You can also try it on other any custom settings.
49 | - Continual Class Segmentation:
50 | 1. PASCAL VOC 2012 dataset:
51 | - 15-5 overlapped
52 | - 15-5 disjoint
53 | - 15-1 overlapped
54 | - 15-1 disjoint
55 | - 10-1 overlapped
56 | - 10-1 disjoint
57 | 2. ADE20K dataset:
58 | - 100-50 overlapped
59 | - 100-10 overlapped
60 | - 50-50 overlapped
61 | - 100-5 overlapped
62 | - Continual Domain Segmentation:
63 | 1. Cityscapes:
64 | - 11-5
65 | - 11-1
66 | - 1-1
67 |
68 | - Extension Experiments on Continual Classification
69 | 1. ImageNet-100
70 | - 50-10
71 |
72 | ## Performance
73 | - Continual Class Segmentation on PASCAL VOC 2012
74 |
75 | | Method | Pub. | 15-5 disjoint | 15-5 overlapped | 15-1 disjoint | 15-1 overlapped | 10-1 disjoint | 10-1 overlapped | 5-3 overlapped | 5-3 disjoint |
76 | | ------ | ---------- | ------------- | --------------- | ------------- | --------------- | ------------- | --------------- | -------------- | ------------ |
77 | | LWF | TPAMI 2017 | 54.9 | 55.0 | 5.3 | 5.5 | 4.3 | 4.8 | | |
78 | | ILT | ICCVW 2019 | 58.9 | 61.3 | 7.9 | 9.2 | 5.4 | 5.5 | | |
79 | | MiB | CVPR 2020 | 65.9 | 70.0 | 39.9 | 32.2 | 6.9 | 20.1 | | |
80 | | SDR | CVPR 2021 | 67.3 | 70.1 | 48.7 | 39.5 | 14.3 | 25.1 | | |
81 | | PLOP | CVPR 2021 | 64.3 | 70.1 | 46.5 | 54.6 | 8.4 | 30.5 | | |
82 | | Ours | CVPR 2022 | 67.3 | 72.4 | 54.7 | 59.4 | 18.2 | 34.3 | 42.88 | |
83 |
84 |
85 | - Continual Class Segmentation on ADE20K
86 |
87 | | Method | Pub. | 100-50 overlapped | 100-10 overlapped | 50-50 overlapped | 100-5 overlapped |
88 | | ------ | ---------- | ----------------- | ----------------- | ---------------- | ---------------- |
89 | | ILT | ICCVW 2019 | 17.0 | 1.1 | 9.7 | 0.5 |
90 | | MiB | CVPR 2020 | 32.8 | 29.2 | 29.3 | 25.9 |
91 | | PLOP | CVPR 2021 | 32.9 | 31.6 | 30.4 | 28.7 |
92 | | Ours | CVPR 2022 | 34.5 | 32.1 | 32.5 | 29.6 |
93 |
94 |
95 | - Continual Domain Segmentation on Cityscapes
96 |
97 | | Method | Pub. | 11-5 | 11-1 | 1-1 |
98 | | ------ | ---------- | ---- | ---- | ---- |
99 | | LWF | TPAMI 2017 | 59.7 | 57.3 | 33.0 |
100 | | LWF-MC | CVPR 2017 | 58.7 | 57.0 | 31.4 |
101 | | ILT | ICCVW 2019 | 59.1 | 57.8 | 30.1 |
102 | | MiB | CVPR 2020 | 61.5 | 60.0 | 42.2 |
103 | | PLOP | CVPR 2021 | 63.5 | 62.1 | 45.2 |
104 | | Ours | CVPR 2022 | 64.3 | 63.0 | 48.9 |
105 |
106 |
107 |
108 | ## Dataset Prepare
109 | - PASCVAL VOC 2012
110 | ```sh data/download_voc.sh```
111 | - ADE20K
112 | ```sh data/download_ade.sh```
113 | - Cityscapes
114 | ```sh data/download_cityscapes.sh```
115 |
116 |
117 | ## Environment
118 | 1. ```conda install --yes --file requirements.txt``` (Higher version pytorch should be suitable.)
119 | 2. Install [inplace-abn](https://github.com/mapillary/inplace_abn)
120 |
121 |
122 |
123 | ## Training
124 | 1. Dowload pretrained model from [ResNet-101_iabn](https://github.com/arthurdouillard/CVPR2021_PLOP/releases/download/v1.0/resnet101_iabn_sync.pth.tar) to ```pretrained/```
125 | 2. We have prepared some training scripts in ```scripts/```. You can train the model by
126 | ```
127 | sh scripts/voc/rcil_10-1-overlap.sh
128 | ```
129 |
130 | ## Inference
131 | You can simply modify the bash file by adding ```--test```, like
132 | ```
133 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --data xxx ... --test
134 | ```
135 |
136 |
137 |
138 |
139 |
140 |
141 | ## Reference
142 | If this work is useful for you, please cite us by:
143 | ```
144 | @inproceedings{zhang2022representation,
145 | title={Representation Compensation Networks for Continual Semantic Segmentation},
146 | author={Zhang, Chang-Bin and Xiao, Jia-Wen and Liu, Xialei and Chen, Ying-Cong and Cheng, Ming-Ming},
147 | booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
148 | pages={7053--7064},
149 | year={2022}
150 | }
151 | ```
152 |
153 | ## Contact
154 | If you have any questions about this work, please feel easy to contact us (zhangchbin ^ mail.nankai.edu.cn or zhangchbin ^ gmail.com).
155 |
156 |
157 |
158 | ## Thanks
159 | This code is heavily borrowed from [[MiB]](https://github.com/fcdl94/MiB) and [[PLOP]](https://github.com/arthurdouillard/CVPR2021_PLOP).
160 |
161 |
162 |
163 |
164 | ## Awesome Continual Segmentation
165 | There is a collection of AWESOME things about continual semantic segmentation, including papers, code, demos, etc. Feel free to pull request and star.
166 |
167 | ### 2022
168 | - Representation Compensation Networks for Continual Semantic Segmentation [[CVPR 2022]](https://arxiv.org/abs/2203.05402) [[PyTorch]](https://github.com/zhangchbin/RCIL)
169 | - Self-training for Class-incremental Semantic Segmentation [[TNNLS 2022]](https://arxiv.org/abs/2012.03362) [PyTorch]
170 | - Uncertainty-aware Contrastive Distillation for Incremental Semantic Segmentation [[TPAMI 2022]](https://arxiv.org/pdf/2203.14098.pdf) [[PyTorch]]
171 |
172 |
173 | ### 2021
174 | - PLOP: Learning without Forgetting for Continual Semantic Segmentation [[CVPR 2021]](https://arxiv.org/abs/2011.11390) [[PyTorch]](https://github.com/arthurdouillard/CVPR2021_PLOP)
175 | - Continual Semantic Segmentation via Repulsion-Attraction of Sparse and Disentangled Latent Representations [[CVPR2021]](https://arxiv.org/abs/2103.06342) [[PyTorch]](https://github.com/LTTM/SDR)
176 | - An EM Framework for Online Incremental Learning of Semantic Segmentation [[ACM MM 2021]](https://arxiv.org/pdf/2108.03613.pdf) [[PyTorch]](https://github.com/Rhyssiyan/Online.Inc.Seg-Pytorch)
177 | - SSUL: Semantic Segmentation with Unknown Label for Exemplar-based Class-Incremental Learning [[NeurIPS 2021]](https://proceedings.neurips.cc/paper/2021/file/5a9542c773018268fc6271f7afeea969-Paper.pdf) [[PyTorch]](https://github.com/clovaai/SSUL)
178 |
179 |
180 | ### 2020
181 | - Modeling the Background for Incremental Learning in Semantic Segmentation [[CVPR 2020]](https://arxiv.org/abs/2002.00718) [[PyTorch]](https://github.com/fcdl94/MiB)
182 |
183 | ### 2019
184 | - Incremental Learning Techniques for Semantic Segmentation [[ICCV Workshop 2019]](https://arxiv.org/abs/1907.13372) [[PyTorch]](https://github.com/LTTM/IL-SemSegm)
185 |
186 |
187 |
188 |
189 |
190 |
--------------------------------------------------------------------------------
/average_csv.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | path = sys.argv[1]
4 |
5 | c, s = 0, 0.
6 |
7 | avg_first = 0.
8 | avg_last = 0.
9 | nb_first_classes = 0
10 | col_index = -1
11 |
12 | with open(path, 'r') as f:
13 | for line_index, line in enumerate(f):
14 | split = line.split(',')
15 | a = split[-1]
16 | a = float(a)
17 | s += a
18 | c += 1
19 | step = line.split(',')[0]
20 |
21 | if line_index == 0:
22 | for col_index in range(1, len(split)):
23 | if split[col_index] == "x": break
24 | elif col_index > -1:
25 | if len(split[1:col_index]) == 0:
26 | avg_first = 0.
27 | else:
28 | avg_first = sum([float(i) for i in split[1:col_index] if i not in ('x', 'X')]) / len(split[1:col_index])
29 | if len(split[col_index:-1]) == 0:
30 | avg_last = 0.
31 | else:
32 | avg_last = sum([float(i) for i in split[col_index:-1] if i not in ('x', 'X')]) / len(split[col_index:-1])
33 |
34 |
35 |
36 | print(f"Last Step: {step}")
37 | print(f"Final Mean IoU {round(100 * a, 2)}")
38 | print(f'Average Mean IoU {round(100 * s / c, 2)}')
39 | print(f'Mean IoU first {round(100 * avg_first, 2)}')
40 | print(f'Mean IoU last {round(100 * avg_last, 2)}')
41 |
--------------------------------------------------------------------------------
/data/download_ade.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # use this script in the destination folder.
4 |
5 | wget http://data.csail.mit.edu/places/ADEchallenge/ADEChallengeData2016.zip
6 | unzip ADEChallengeData2016.zip
--------------------------------------------------------------------------------
/data/download_voc.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # use this script in the destination folder.
4 |
5 | wget http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar
6 | tar -xf VOCtrainval_11-May-2012.tar
7 | mkdir PascalVOC12
8 | mv VOCdevkit/VOC2012/* PascalVOC12
9 | cd PascalVOC12
10 | wget http://cs.jhu.edu/~cxliu/data/SegmentationClassAug.zip
11 | wget http://cs.jhu.edu/~cxliu/data/SegmentationClassAug_Visualization.zip
12 | wget http://cs.jhu.edu/~cxliu/data/list.zip
13 | unzip SegmentationClassAug.zip
14 | unzip SegmentationClassAug_Visualization.zip
15 | unzip list.zip
16 | cp -r list splits
17 |
--------------------------------------------------------------------------------
/data/voc/10-1-ov/test_on_val-0.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1-ov/test_on_val-0.npy
--------------------------------------------------------------------------------
/data/voc/10-1-ov/test_on_val-1.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1-ov/test_on_val-1.npy
--------------------------------------------------------------------------------
/data/voc/10-1-ov/test_on_val-10.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1-ov/test_on_val-10.npy
--------------------------------------------------------------------------------
/data/voc/10-1-ov/test_on_val-2.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1-ov/test_on_val-2.npy
--------------------------------------------------------------------------------
/data/voc/10-1-ov/test_on_val-3.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1-ov/test_on_val-3.npy
--------------------------------------------------------------------------------
/data/voc/10-1-ov/test_on_val-4.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1-ov/test_on_val-4.npy
--------------------------------------------------------------------------------
/data/voc/10-1-ov/test_on_val-5.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1-ov/test_on_val-5.npy
--------------------------------------------------------------------------------
/data/voc/10-1-ov/test_on_val-6.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1-ov/test_on_val-6.npy
--------------------------------------------------------------------------------
/data/voc/10-1-ov/test_on_val-7.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1-ov/test_on_val-7.npy
--------------------------------------------------------------------------------
/data/voc/10-1-ov/test_on_val-8.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1-ov/test_on_val-8.npy
--------------------------------------------------------------------------------
/data/voc/10-1-ov/test_on_val-9.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1-ov/test_on_val-9.npy
--------------------------------------------------------------------------------
/data/voc/10-1-ov/train-0.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1-ov/train-0.npy
--------------------------------------------------------------------------------
/data/voc/10-1-ov/train-1.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1-ov/train-1.npy
--------------------------------------------------------------------------------
/data/voc/10-1-ov/train-10.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1-ov/train-10.npy
--------------------------------------------------------------------------------
/data/voc/10-1-ov/train-2.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1-ov/train-2.npy
--------------------------------------------------------------------------------
/data/voc/10-1-ov/train-3.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1-ov/train-3.npy
--------------------------------------------------------------------------------
/data/voc/10-1-ov/train-4.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1-ov/train-4.npy
--------------------------------------------------------------------------------
/data/voc/10-1-ov/train-5.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1-ov/train-5.npy
--------------------------------------------------------------------------------
/data/voc/10-1-ov/train-6.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1-ov/train-6.npy
--------------------------------------------------------------------------------
/data/voc/10-1-ov/train-7.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1-ov/train-7.npy
--------------------------------------------------------------------------------
/data/voc/10-1-ov/train-8.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1-ov/train-8.npy
--------------------------------------------------------------------------------
/data/voc/10-1-ov/train-9.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1-ov/train-9.npy
--------------------------------------------------------------------------------
/data/voc/10-1-ov/val-0.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1-ov/val-0.npy
--------------------------------------------------------------------------------
/data/voc/10-1-ov/val-1.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1-ov/val-1.npy
--------------------------------------------------------------------------------
/data/voc/10-1-ov/val-10.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1-ov/val-10.npy
--------------------------------------------------------------------------------
/data/voc/10-1-ov/val-2.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1-ov/val-2.npy
--------------------------------------------------------------------------------
/data/voc/10-1-ov/val-3.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1-ov/val-3.npy
--------------------------------------------------------------------------------
/data/voc/10-1-ov/val-4.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1-ov/val-4.npy
--------------------------------------------------------------------------------
/data/voc/10-1-ov/val-5.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1-ov/val-5.npy
--------------------------------------------------------------------------------
/data/voc/10-1-ov/val-6.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1-ov/val-6.npy
--------------------------------------------------------------------------------
/data/voc/10-1-ov/val-7.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1-ov/val-7.npy
--------------------------------------------------------------------------------
/data/voc/10-1-ov/val-8.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1-ov/val-8.npy
--------------------------------------------------------------------------------
/data/voc/10-1-ov/val-9.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1-ov/val-9.npy
--------------------------------------------------------------------------------
/data/voc/10-1/test_on_val-0.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1/test_on_val-0.npy
--------------------------------------------------------------------------------
/data/voc/10-1/test_on_val-1.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1/test_on_val-1.npy
--------------------------------------------------------------------------------
/data/voc/10-1/test_on_val-10.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1/test_on_val-10.npy
--------------------------------------------------------------------------------
/data/voc/10-1/test_on_val-2.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1/test_on_val-2.npy
--------------------------------------------------------------------------------
/data/voc/10-1/test_on_val-3.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1/test_on_val-3.npy
--------------------------------------------------------------------------------
/data/voc/10-1/test_on_val-4.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1/test_on_val-4.npy
--------------------------------------------------------------------------------
/data/voc/10-1/test_on_val-5.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1/test_on_val-5.npy
--------------------------------------------------------------------------------
/data/voc/10-1/test_on_val-6.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1/test_on_val-6.npy
--------------------------------------------------------------------------------
/data/voc/10-1/test_on_val-7.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1/test_on_val-7.npy
--------------------------------------------------------------------------------
/data/voc/10-1/test_on_val-8.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1/test_on_val-8.npy
--------------------------------------------------------------------------------
/data/voc/10-1/test_on_val-9.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1/test_on_val-9.npy
--------------------------------------------------------------------------------
/data/voc/10-1/train-0.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1/train-0.npy
--------------------------------------------------------------------------------
/data/voc/10-1/train-1.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1/train-1.npy
--------------------------------------------------------------------------------
/data/voc/10-1/train-10.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1/train-10.npy
--------------------------------------------------------------------------------
/data/voc/10-1/train-2.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1/train-2.npy
--------------------------------------------------------------------------------
/data/voc/10-1/train-3.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1/train-3.npy
--------------------------------------------------------------------------------
/data/voc/10-1/train-4.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1/train-4.npy
--------------------------------------------------------------------------------
/data/voc/10-1/train-5.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1/train-5.npy
--------------------------------------------------------------------------------
/data/voc/10-1/train-6.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1/train-6.npy
--------------------------------------------------------------------------------
/data/voc/10-1/train-7.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1/train-7.npy
--------------------------------------------------------------------------------
/data/voc/10-1/train-8.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1/train-8.npy
--------------------------------------------------------------------------------
/data/voc/10-1/train-9.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1/train-9.npy
--------------------------------------------------------------------------------
/data/voc/10-1/val-0.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1/val-0.npy
--------------------------------------------------------------------------------
/data/voc/10-1/val-1.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1/val-1.npy
--------------------------------------------------------------------------------
/data/voc/10-1/val-10.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1/val-10.npy
--------------------------------------------------------------------------------
/data/voc/10-1/val-2.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1/val-2.npy
--------------------------------------------------------------------------------
/data/voc/10-1/val-3.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1/val-3.npy
--------------------------------------------------------------------------------
/data/voc/10-1/val-4.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1/val-4.npy
--------------------------------------------------------------------------------
/data/voc/10-1/val-5.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1/val-5.npy
--------------------------------------------------------------------------------
/data/voc/10-1/val-6.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1/val-6.npy
--------------------------------------------------------------------------------
/data/voc/10-1/val-7.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1/val-7.npy
--------------------------------------------------------------------------------
/data/voc/10-1/val-8.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1/val-8.npy
--------------------------------------------------------------------------------
/data/voc/10-1/val-9.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/10-1/val-9.npy
--------------------------------------------------------------------------------
/data/voc/15-5s-ov/test_on_val-0.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/15-5s-ov/test_on_val-0.npy
--------------------------------------------------------------------------------
/data/voc/15-5s-ov/test_on_val-1.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/15-5s-ov/test_on_val-1.npy
--------------------------------------------------------------------------------
/data/voc/15-5s-ov/test_on_val-2.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/15-5s-ov/test_on_val-2.npy
--------------------------------------------------------------------------------
/data/voc/15-5s-ov/test_on_val-3.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/15-5s-ov/test_on_val-3.npy
--------------------------------------------------------------------------------
/data/voc/15-5s-ov/test_on_val-4.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/15-5s-ov/test_on_val-4.npy
--------------------------------------------------------------------------------
/data/voc/15-5s-ov/test_on_val-5.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/15-5s-ov/test_on_val-5.npy
--------------------------------------------------------------------------------
/data/voc/15-5s-ov/train-0.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/15-5s-ov/train-0.npy
--------------------------------------------------------------------------------
/data/voc/15-5s-ov/train-1.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/15-5s-ov/train-1.npy
--------------------------------------------------------------------------------
/data/voc/15-5s-ov/train-2.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/15-5s-ov/train-2.npy
--------------------------------------------------------------------------------
/data/voc/15-5s-ov/train-3.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/15-5s-ov/train-3.npy
--------------------------------------------------------------------------------
/data/voc/15-5s-ov/train-4.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/15-5s-ov/train-4.npy
--------------------------------------------------------------------------------
/data/voc/15-5s-ov/train-5.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/15-5s-ov/train-5.npy
--------------------------------------------------------------------------------
/data/voc/15-5s-ov/val-0.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/15-5s-ov/val-0.npy
--------------------------------------------------------------------------------
/data/voc/15-5s-ov/val-1.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/15-5s-ov/val-1.npy
--------------------------------------------------------------------------------
/data/voc/15-5s-ov/val-2.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/15-5s-ov/val-2.npy
--------------------------------------------------------------------------------
/data/voc/15-5s-ov/val-3.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/15-5s-ov/val-3.npy
--------------------------------------------------------------------------------
/data/voc/15-5s-ov/val-4.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/15-5s-ov/val-4.npy
--------------------------------------------------------------------------------
/data/voc/15-5s-ov/val-5.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/15-5s-ov/val-5.npy
--------------------------------------------------------------------------------
/data/voc/15-5s/test_on_val-0.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/15-5s/test_on_val-0.npy
--------------------------------------------------------------------------------
/data/voc/15-5s/test_on_val-1.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/15-5s/test_on_val-1.npy
--------------------------------------------------------------------------------
/data/voc/15-5s/test_on_val-2.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/15-5s/test_on_val-2.npy
--------------------------------------------------------------------------------
/data/voc/15-5s/test_on_val-3.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/15-5s/test_on_val-3.npy
--------------------------------------------------------------------------------
/data/voc/15-5s/test_on_val-4.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/15-5s/test_on_val-4.npy
--------------------------------------------------------------------------------
/data/voc/15-5s/test_on_val-5.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/15-5s/test_on_val-5.npy
--------------------------------------------------------------------------------
/data/voc/15-5s/train-0.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/15-5s/train-0.npy
--------------------------------------------------------------------------------
/data/voc/15-5s/train-1.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/15-5s/train-1.npy
--------------------------------------------------------------------------------
/data/voc/15-5s/train-2.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/15-5s/train-2.npy
--------------------------------------------------------------------------------
/data/voc/15-5s/train-3.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/15-5s/train-3.npy
--------------------------------------------------------------------------------
/data/voc/15-5s/train-4.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/15-5s/train-4.npy
--------------------------------------------------------------------------------
/data/voc/15-5s/train-5.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/15-5s/train-5.npy
--------------------------------------------------------------------------------
/data/voc/15-5s/val-0.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/15-5s/val-0.npy
--------------------------------------------------------------------------------
/data/voc/15-5s/val-1.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/15-5s/val-1.npy
--------------------------------------------------------------------------------
/data/voc/15-5s/val-2.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/15-5s/val-2.npy
--------------------------------------------------------------------------------
/data/voc/15-5s/val-3.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/15-5s/val-3.npy
--------------------------------------------------------------------------------
/data/voc/15-5s/val-4.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/15-5s/val-4.npy
--------------------------------------------------------------------------------
/data/voc/15-5s/val-5.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/data/voc/15-5s/val-5.npy
--------------------------------------------------------------------------------
/dataset/__init__.py:
--------------------------------------------------------------------------------
1 | from .ade import AdeSegmentation, AdeSegmentationIncremental
2 | from .cityscapes_domain import (CityscapesSegmentationDomain,
3 | CityscapesSegmentationIncrementalDomain)
4 | from .voc import VOCSegmentation, VOCSegmentationIncremental
5 | from .cityscape import CityscapeSegmentationIncremental
6 |
--------------------------------------------------------------------------------
/dataset/ade.py:
--------------------------------------------------------------------------------
1 | import os
2 | import random
3 |
4 | import numpy as np
5 | import torch.utils.data as data
6 | import torchvision as tv
7 | from PIL import Image
8 | from torch import distributed
9 |
10 | from .utils import Subset, filter_images, group_images
11 |
12 | classes = [
13 | "void", "wall", "building", "sky", "floor", "tree", "ceiling", "road", "bed ", "windowpane",
14 | "grass", "cabinet", "sidewalk", "person", "earth", "door", "table", "mountain", "plant",
15 | "curtain", "chair", "car", "water", "painting", "sofa", "shelf", "house", "sea", "mirror",
16 | "rug", "field", "armchair", "seat", "fence", "desk", "rock", "wardrobe", "lamp", "bathtub",
17 | "railing", "cushion", "base", "box", "column", "signboard", "chest of drawers", "counter",
18 | "sand", "sink", "skyscraper", "fireplace", "refrigerator", "grandstand", "path", "stairs",
19 | "runway", "case", "pool table", "pillow", "screen door", "stairway", "river", "bridge",
20 | "bookcase", "blind", "coffee table", "toilet", "flower", "book", "hill", "bench", "countertop",
21 | "stove", "palm", "kitchen island", "computer", "swivel chair", "boat", "bar", "arcade machine",
22 | "hovel", "bus", "towel", "light", "truck", "tower", "chandelier", "awning", "streetlight",
23 | "booth", "television receiver", "airplane", "dirt track", "apparel", "pole", "land",
24 | "bannister", "escalator", "ottoman", "bottle", "buffet", "poster", "stage", "van", "ship",
25 | "fountain", "conveyer belt", "canopy", "washer", "plaything", "swimming pool", "stool",
26 | "barrel", "basket", "waterfall", "tent", "bag", "minibike", "cradle", "oven", "ball", "food",
27 | "step", "tank", "trade name", "microwave", "pot", "animal", "bicycle", "lake", "dishwasher",
28 | "screen", "blanket", "sculpture", "hood", "sconce", "vase", "traffic light", "tray", "ashcan",
29 | "fan", "pier", "crt screen", "plate", "monitor", "bulletin board", "shower", "radiator",
30 | "glass", "clock", "flag"
31 | ]
32 |
33 |
34 | class AdeSegmentation(data.Dataset):
35 |
36 | def __init__(self, root, train=True, transform=None):
37 |
38 | root = os.path.expanduser(root)
39 | base_dir = "ADEChallengeData2016"
40 | ade_root = os.path.join(root, base_dir)
41 | if train:
42 | split = 'training'
43 | else:
44 | split = 'validation'
45 | annotation_folder = os.path.join(ade_root, 'annotations', split)
46 | image_folder = os.path.join(ade_root, 'images', split)
47 |
48 | self.images = []
49 | fnames = sorted(os.listdir(image_folder))
50 | self.images = [
51 | (os.path.join(image_folder, x), os.path.join(annotation_folder, x[:-3] + "png"))
52 | for x in fnames
53 | ]
54 |
55 | self.transform = transform
56 |
57 | def __getitem__(self, index):
58 | """
59 | Args:
60 | index (int): Index
61 | Returns:
62 | tuple: (image, target) where target is the image segmentation.
63 | """
64 | img = Image.open(self.images[index][0]).convert('RGB')
65 | target = Image.open(self.images[index][1])
66 |
67 | if self.transform is not None:
68 | img, target = self.transform(img, target)
69 |
70 | return img, target
71 |
72 | def __len__(self):
73 | return len(self.images)
74 |
75 |
76 | class AdeSegmentationIncremental(data.Dataset):
77 |
78 | def __init__(
79 | self,
80 | root,
81 | train=True,
82 | transform=None,
83 | labels=None,
84 | labels_old=None,
85 | idxs_path=None,
86 | masking=True,
87 | overlap=True,
88 | data_masking="current",
89 | ignore_test_bg=False,
90 | **kwargs
91 | ):
92 |
93 | full_data = AdeSegmentation(root, train)
94 |
95 | self.labels = []
96 | self.labels_old = []
97 |
98 | if labels is not None:
99 | # store the labels
100 | labels_old = labels_old if labels_old is not None else []
101 |
102 | self.__strip_zero(labels)
103 | self.__strip_zero(labels_old)
104 |
105 | assert not any(
106 | l in labels_old for l in labels
107 | ), "labels and labels_old must be disjoint sets"
108 |
109 | self.labels = labels
110 | self.labels_old = labels_old
111 |
112 | self.order = [0] + labels_old + labels
113 |
114 | # take index of images with at least one class in labels and all classes in labels+labels_old+[255]
115 | if idxs_path is not None and os.path.exists(idxs_path):
116 | idxs = np.load(idxs_path).tolist()
117 | else:
118 | idxs = filter_images(full_data, labels, labels_old, overlap=overlap)
119 | if idxs_path is not None and distributed.get_rank() == 0:
120 | np.save(idxs_path, np.array(idxs, dtype=int))
121 |
122 | #if train:
123 | # masking_value = 0
124 | #else:
125 | # masking_value = 255
126 |
127 | #self.inverted_order = {label: self.order.index(label) for label in self.order}
128 | #self.inverted_order[0] = masking_value
129 |
130 | self.inverted_order = {label: self.order.index(label) for label in self.order}
131 | if ignore_test_bg:
132 | masking_value = 255
133 | self.inverted_order[0] = masking_value
134 | else:
135 | masking_value = 0 # Future classes will be considered as background.
136 | self.inverted_order[255] = 255
137 |
138 | reorder_transform = tv.transforms.Lambda(
139 | lambda t: t.apply_(
140 | lambda x: self.inverted_order[x] if x in self.inverted_order else masking_value
141 | )
142 | )
143 |
144 | if masking:
145 | target_transform = tv.transforms.Lambda(
146 | lambda t: t.
147 | apply_(lambda x: self.inverted_order[x] if x in self.labels else masking_value)
148 | )
149 | else:
150 | target_transform = reorder_transform
151 |
152 | # make the subset of the dataset
153 | self.dataset = Subset(full_data, idxs, transform, target_transform)
154 | else:
155 | self.dataset = full_data
156 |
157 | def __getitem__(self, index):
158 | """
159 | Args:
160 | index (int): Index
161 | Returns:
162 | tuple: (image, target) where target is the image segmentation.
163 | """
164 |
165 | return self.dataset[index]
166 |
167 | @staticmethod
168 | def __strip_zero(labels):
169 | while 0 in labels:
170 | labels.remove(0)
171 |
172 | def __len__(self):
173 | return len(self.dataset)
174 |
--------------------------------------------------------------------------------
/dataset/cityscape.py:
--------------------------------------------------------------------------------
1 | import os
2 | import random
3 | import copy
4 |
5 | import numpy as np
6 | import torch.utils.data as data
7 | import torchvision as tv
8 | from PIL import Image
9 | from torch import distributed
10 | import glob
11 | from .utils import Subset, filter_images, group_images
12 |
13 | # Converting the id to the train_id. Many objects have a train id at
14 | # 255 (unknown / ignored).
15 | # See there for more information:
16 | # https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/helpers/labels.py
17 | id_to_trainid = {
18 | 0: 255,
19 | 1: 255,
20 | 2: 255,
21 | 3: 255,
22 | 4: 255,
23 | 5: 255,
24 | 6: 255,
25 | 7: 1, # road
26 | 8: 2, # sidewalk
27 | 9: 255,
28 | 10: 255,
29 | 11: 3, # building
30 | 12: 4, # wall
31 | 13: 5, # fence
32 | 14: 255,
33 | 15: 255,
34 | 16: 255,
35 | 17: 6, # pole
36 | 18: 255,
37 | 19: 7, # traffic light
38 | 20: 8, # traffic sign
39 | 21: 9, # vegetation
40 | 22: 10, # terrain
41 | 23: 11, # sky
42 | 24: 12, # person
43 | 25: 13, # rider
44 | 26: 14, # car
45 | 27: 15, # truck
46 | 28: 16, # bus
47 | 29: 255,
48 | 30: 255,
49 | 31: 17, # train
50 | 32: 18, # motorcycle
51 | 33: 19, # bicycle
52 | -1: 255
53 | }
54 |
55 | class CityscapeSegmentation(data.Dataset):
56 | """`Pascal VOC `_ Segmentation Dataset.
57 | Args:
58 | root (string): Root directory of the VOC Dataset.
59 | image_set (string, optional): Select the image_set to use, ``train``, ``trainval`` or ``val``
60 | is_aug (bool, optional): If you want to use the augmented train set or not (default is True)
61 | transform (callable, optional): A function/transform that takes in an PIL image
62 | and returns a transformed version. E.g, ``transforms.RandomCrop``
63 | """
64 |
65 | def __init__(self, root, train=True, is_aug=True, transform=None):
66 |
67 | self.root = os.path.expanduser(root)
68 | annotation_folder = os.path.join(self.root, 'gtFine')
69 | image_folder = os.path.join(self.root, 'leftImg8bit')
70 |
71 | self.train = train
72 | self.transform = transform
73 | if self.train:
74 | self.images = [ # Add train cities
75 | (
76 | path,
77 | os.path.join(
78 | annotation_folder,
79 | "train",
80 | path.split("/")[-2],
81 | path.split("/")[-1][:-15] + "gtFine_labelIds.png"
82 | )
83 | ) for path in sorted(glob.glob(os.path.join(image_folder, "train/*/*.png")))
84 | ]
85 | else:
86 | self.images = [ # Add validation cities
87 | (
88 | path,
89 | os.path.join(
90 | annotation_folder,
91 | "val",
92 | path.split("/")[-2],
93 | path.split("/")[-1][:-15] + "gtFine_labelIds.png"
94 | )
95 | ) for path in sorted(glob.glob(os.path.join(image_folder, "val/*/*.png")))
96 | ]
97 |
98 | def __getitem__(self, index):
99 | """
100 | Args:
101 | index (int): Index
102 | Returns:
103 | tuple: (image, target) where target is the image segmentation.
104 | """
105 | img = Image.open(self.images[index][0]).convert('RGB')
106 | target = Image.open(self.images[index][1])
107 | if self.transform is not None:
108 | img, target = self.transform(img, target)
109 |
110 | #for idx, map_id in id_to_trainid.items():
111 | # target[target == idx] = map_id
112 |
113 | return img, target
114 |
115 | def viz_getter(self, index):
116 | try:
117 | img = Image.open(self.images[index][0]).convert('RGB')
118 | target = Image.open(self.images[index][1])
119 | except Exception as e:
120 | raise Exception(f"Index: {index}, len: {len(self)}, message: {str(e)}")
121 |
122 | if self.transform is not None:
123 | img, target = self.transform(img, target)
124 |
125 | return img, target
126 |
127 | def __len__(self):
128 | return len(self.images)
129 |
130 |
131 | class CityscapeSegmentationIncremental(data.Dataset):
132 |
133 | def __init__(
134 | self,
135 | root,
136 | train=True,
137 | transform=None,
138 | labels=None,
139 | labels_old=None,
140 | idxs_path=None,
141 | masking=True,
142 | overlap=True,
143 | data_masking="current",
144 | test_on_val=False,
145 | **kwargs
146 | ):
147 |
148 | full_data = CityscapeSegmentation(root, train)
149 |
150 | self.labels = []
151 | self.labels_old = []
152 |
153 | # filter images
154 | cls2img = [[] for i in range(20)]
155 | for i in range(len(full_data)):
156 | cls = np.unique(np.array(full_data[i][1]))
157 | for k in range(cls.shape[0]):
158 | cls[k] = id_to_trainid[cls[k]]
159 | if cls[k] < 20:
160 | cls2img[cls[k]].append(i)
161 |
162 | cls_order = [17, 16, 15, 18, 4, 13, 5, 7, 9, 1, 2, 3, 6, 8, 10, 11, 12, 14, 19]
163 | mp_used = [0 for i in range(len(full_data))]
164 | per_cls_idx = [[] for i in range(20)]
165 |
166 | print('start select!')
167 | for cls_num in cls_order:
168 | selected = 0
169 | for kk in cls2img[cls_num]:
170 | if mp_used[kk] == 1:
171 | continue
172 | selected += 1
173 | mp_used[kk] = 1
174 | per_cls_idx[cls_num].append(kk)
175 | if cls_num != 19 and selected >= 150:
176 | break
177 |
178 | print('select done!')
179 |
180 | if labels is not None:
181 | # store the labels
182 | labels_old = labels_old if labels_old is not None else []
183 |
184 | self.__strip_zero(labels)
185 | self.__strip_zero(labels_old)
186 |
187 | assert not any(
188 | l in labels_old for l in labels
189 | ), "labels and labels_old must be disjoint sets"
190 |
191 | self.labels = [0] + labels
192 | self.labels_old = [0] + labels_old
193 |
194 | self.order = [0] + labels_old + labels
195 |
196 | # take index of images with at least one class in labels and all classes in labels+labels_old+[0,255]
197 | if idxs_path is not None and os.path.exists(idxs_path):
198 | idxs = np.load(idxs_path).tolist()
199 | else:
200 | idxs = []
201 | for cls in labels:
202 | idxs = idxs + per_cls_idx[cls]
203 |
204 | if idxs_path is not None and distributed.get_rank() == 0:
205 | np.save(idxs_path, np.array(idxs, dtype=int))
206 |
207 | if test_on_val:
208 | rnd = np.random.RandomState(1)
209 | rnd.shuffle(idxs)
210 | train_len = int(0.8 * len(idxs))
211 | if train:
212 | idxs = idxs[:train_len]
213 | else:
214 | idxs = idxs[train_len:]
215 |
216 | #if train:
217 | # masking_value = 0
218 | #else:
219 | # masking_value = 255
220 |
221 | #self.inverted_order = {label: self.order.index(label) for label in self.order}
222 | #self.inverted_order[255] = masking_value
223 |
224 | masking_value = 0 # Future classes will be considered as background.
225 | self.inverted_order = {label: self.order.index(label) for label in self.order}
226 | self.inverted_order[255] = 255
227 | self.inverted_order[-1] = 255
228 |
229 | reorder_transform = tv.transforms.Lambda(
230 | lambda t: t.apply_(
231 | lambda x: self.inverted_order[x] if x in self.inverted_order else masking_value
232 | )
233 | )
234 |
235 | if masking:
236 | if data_masking == "current":
237 | tmp_labels = self.labels + [255]
238 | elif data_masking == "current+old":
239 | tmp_labels = labels_old + self.labels + [255]
240 | elif data_masking == "all":
241 | raise NotImplementedError(
242 | f"data_masking={data_masking} not yet implemented sorry not sorry."
243 | )
244 | elif data_masking == "new":
245 | tmp_labels = self.labels
246 | masking_value = 255
247 |
248 | target_transform = tv.transforms.Lambda(
249 | lambda t: t.
250 | apply_(lambda x: self.inverted_order[x] if x in self.inverted_order else masking_value)
251 | )
252 | else:
253 | assert False
254 | target_transform = reorder_transform
255 |
256 | # make the subset of the dataset
257 | self.dataset = Subset(full_data, idxs, transform, target_transform, id_to_trainid)
258 | else:
259 | self.dataset = full_cityscape
260 |
261 | def __getitem__(self, index):
262 | """
263 | Args:
264 | index (int): Index
265 | Returns:
266 | tuple: (image, target) where target is the image segmentation.
267 | """
268 |
269 | return self.dataset[index]
270 |
271 | def viz_getter(self, index):
272 | return self.dataset.viz_getter(index)
273 |
274 | def __len__(self):
275 | return len(self.dataset)
276 |
277 | @staticmethod
278 | def __strip_zero(labels):
279 | while 0 in labels:
280 | labels.remove(0)
281 |
--------------------------------------------------------------------------------
/dataset/cityscapes_domain.py:
--------------------------------------------------------------------------------
1 | import copy
2 | import glob
3 | import os
4 |
5 | import numpy as np
6 | import torch.utils.data as data
7 | import torchvision as tv
8 | from PIL import Image
9 | from torch import distributed
10 |
11 | from .utils import Subset, group_images
12 |
13 |
14 | # Converting the id to the train_id. Many objects have a train id at
15 | # 255 (unknown / ignored).
16 | # See there for more information:
17 | # https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/helpers/labels.py
18 | id_to_trainid = {
19 | 0: 255,
20 | 1: 255,
21 | 2: 255,
22 | 3: 255,
23 | 4: 255,
24 | 5: 255,
25 | 6: 255,
26 | 7: 0, # road
27 | 8: 1, # sidewalk
28 | 9: 255,
29 | 10: 255,
30 | 11: 2, # building
31 | 12: 3, # wall
32 | 13: 4, # fence
33 | 14: 255,
34 | 15: 255,
35 | 16: 255,
36 | 17: 5, # pole
37 | 18: 255,
38 | 19: 6, # traffic light
39 | 20: 7, # traffic sign
40 | 21: 8, # vegetation
41 | 22: 9, # terrain
42 | 23: 10, # sky
43 | 24: 11, # person
44 | 25: 12, # rider
45 | 26: 13, # car
46 | 27: 14, # truck
47 | 28: 15, # bus
48 | 29: 255,
49 | 30: 255,
50 | 31: 16, # train
51 | 32: 17, # motorcycle
52 | 33: 18, # bicycle
53 | -1: 255
54 | }
55 |
56 | city_to_id = {
57 | "aachen": 0, "bremen": 1, "darmstadt": 2, "erfurt": 3, "hanover": 4,
58 | "krefeld": 5, "strasbourg": 6, "tubingen": 7, "weimar": 8, "bochum": 9,
59 | "cologne": 10, "dusseldorf": 11, "hamburg": 12, "jena": 13,
60 | "monchengladbach": 14, "stuttgart": 15, "ulm": 16, "zurich": 17,
61 | "frankfurt": 18, "lindau": 19, "munster": 20
62 | }
63 |
64 |
65 | def filter_images(dataset, labels):
66 | # Filter images without any label in LABELS (using labels not reordered)
67 | idxs = []
68 |
69 | print(f"Filtering images...")
70 | for i in range(len(dataset)):
71 | domain_id = dataset.__getitem__(i, get_domain=True) # taking domain id
72 | if domain_id in labels:
73 | idxs.append(i)
74 | if i % 1000 == 0:
75 | print(f"\t{i}/{len(dataset)} ...")
76 | return idxs
77 |
78 |
79 | class CityscapesSegmentationDomain(data.Dataset):
80 |
81 | def __init__(self, root, train=True, transform=None, domain_transform=None):
82 | root = os.path.expanduser(root)
83 | annotation_folder = os.path.join(root, 'gtFine')
84 | image_folder = os.path.join(root, 'leftImg8bit')
85 |
86 | self.images = [ # Add train cities
87 | (
88 | path,
89 | os.path.join(
90 | annotation_folder,
91 | "train",
92 | path.split("/")[-2],
93 | path.split("/")[-1][:-15] + "gtFine_labelIds.png"
94 | ),
95 | city_to_id[path.split("/")[-2]]
96 | ) for path in sorted(glob.glob(os.path.join(image_folder, "train/*/*.png")))
97 | ]
98 | self.images += [ # Add validation cities
99 | (
100 | path,
101 | os.path.join(
102 | annotation_folder,
103 | "val",
104 | path.split("/")[-2],
105 | path.split("/")[-1][:-15] + "gtFine_labelIds.png"
106 | ),
107 | city_to_id[path.split("/")[-2]]
108 | ) for path in sorted(glob.glob(os.path.join(image_folder, "val/*/*.png")))
109 | ]
110 |
111 | self.transform = transform
112 | self.domain_transform = domain_transform
113 |
114 | def __getitem__(self, index, get_domain=False):
115 | """
116 | Args:
117 | index (int): Index
118 | Returns:
119 | tuple: (image, target) where target is the image segmentation.
120 | """
121 | if get_domain:
122 | domain = self.images[index][2]
123 | if self.domain_transform is not None:
124 | domain = self.domain_transform(domain)
125 | return domain
126 |
127 | try:
128 | img = Image.open(self.images[index][0]).convert('RGB')
129 | target = Image.open(self.images[index][1])
130 | except Exception as e:
131 | raise Exception(f"Index: {index}, len: {len(self)}, message: {str(e)}")
132 |
133 | if self.transform is not None:
134 | img, target = self.transform(img, target)
135 |
136 | return img, target
137 |
138 | def __len__(self):
139 | return len(self.images)
140 |
141 |
142 | class CityscapesSegmentationIncrementalDomain(data.Dataset):
143 | """Labels correspond to domains not classes in this case."""
144 | def __init__(
145 | self,
146 | root,
147 | train=True,
148 | transform=None,
149 | labels=None,
150 | idxs_path=None,
151 | masking=True,
152 | overlap=True,
153 | **kwargs
154 | ):
155 | full_data = CityscapesSegmentationDomain(root, train)
156 |
157 | # take index of images with at least one class in labels and all classes in labels+labels_old+[255]
158 | if idxs_path is not None and os.path.exists(idxs_path):
159 | idxs = np.load(idxs_path).tolist()
160 | else:
161 | idxs = filter_images(full_data, labels)
162 | if idxs_path is not None and distributed.get_rank() == 0:
163 | np.save(idxs_path, np.array(idxs, dtype=int))
164 |
165 | rnd = np.random.RandomState(1)
166 | rnd.shuffle(idxs)
167 | train_len = int(0.8 * len(idxs))
168 | if train:
169 | idxs = idxs[:train_len]
170 | print(f"{len(idxs)} images for train")
171 | else:
172 | idxs = idxs[train_len:]
173 | print(f"{len(idxs)} images for val")
174 |
175 | target_transform = tv.transforms.Lambda(
176 | lambda t: t.
177 | apply_(lambda x: id_to_trainid.get(x, 255))
178 | )
179 | # make the subset of the dataset
180 | self.dataset = Subset(full_data, idxs, transform, target_transform)
181 |
182 | def __getitem__(self, index):
183 | """
184 | Args:
185 | index (int): Index
186 | Returns:
187 | tuple: (image, target) where target is the image segmentation.
188 | """
189 |
190 | return self.dataset[index]
191 |
192 | def __len__(self):
193 | return len(self.dataset)
194 |
--------------------------------------------------------------------------------
/dataset/utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 |
4 |
5 | def group_images(dataset, labels):
6 | # Group images based on the label in LABELS (using labels not reordered)
7 | idxs = {lab: [] for lab in labels}
8 |
9 | labels_cum = labels + [0, 255]
10 | for i in range(len(dataset)):
11 | cls = np.unique(np.array(dataset[i][1]))
12 | if all(x in labels_cum for x in cls):
13 | for x in cls:
14 | if x in labels:
15 | idxs[x].append(i)
16 | return idxs
17 |
18 |
19 | def filter_images(dataset, labels, labels_old=None, overlap=True):
20 | # Filter images without any label in LABELS (using labels not reordered)
21 | idxs = []
22 |
23 | if 0 in labels:
24 | labels.remove(0)
25 |
26 | print(f"Filtering images...")
27 | if labels_old is None:
28 | labels_old = []
29 | labels_cum = labels + labels_old + [0, 255]
30 |
31 | if overlap:
32 | fil = lambda c: any(x in labels for x in cls)
33 | else:
34 | fil = lambda c: any(x in labels for x in cls) and all(x in labels_cum for x in c)
35 |
36 | for i in range(len(dataset)):
37 | cls = np.unique(np.array(dataset[i][1]))
38 | if fil(cls):
39 | idxs.append(i)
40 | if i % 1000 == 0:
41 | print(f"\t{i}/{len(dataset)} ...")
42 | return idxs
43 |
44 |
45 | class Subset(torch.utils.data.Dataset):
46 | """
47 | Subset of a dataset at specified indices.
48 | Arguments:
49 | dataset (Dataset): The whole Dataset
50 | indices (sequence): Indices in the whole set selected for subset
51 | transform (callable): way to transform the images and the targets
52 | target_transform(callable): way to transform the target labels
53 | """
54 |
55 | def __init__(self, dataset, indices, transform=None, target_transform=None):
56 | self.dataset = dataset
57 | self.indices = indices
58 | self.transform = transform
59 | self.target_transform = target_transform
60 |
61 | def __getitem__(self, idx):
62 | try:
63 | sample, target = self.dataset[self.indices[idx]]
64 | except Exception as e:
65 | raise Exception(
66 | f"dataset = {len(self.dataset)}, indices = {len(self.indices)}, idx = {idx}, msg = {str(e)}"
67 | )
68 |
69 | if self.transform is not None:
70 | sample, target = self.transform(sample, target)
71 |
72 | if self.target_transform is not None:
73 | target = self.target_transform(target)
74 |
75 | return sample, target
76 |
77 | def viz_getter(self, idx):
78 | image_path, raw_image, sample, target = self.dataset.viz_getter(self.indices[idx])
79 | if self.transform is not None:
80 | sample, target = self.transform(sample, target)
81 | if self.target_transform is not None:
82 | target = self.target_transform(target)
83 |
84 | return image_path, raw_image, sample, target
85 |
86 | def __len__(self):
87 | return len(self.indices)
88 |
89 |
90 | class MaskLabels:
91 | """
92 | Use this class to mask labels that you don't want in your dataset.
93 | Arguments:
94 | labels_to_keep (list): The list of labels to keep in the target images
95 | mask_value (int): The value to replace ignored values (def: 0)
96 | """
97 |
98 | def __init__(self, labels_to_keep, mask_value=0):
99 | self.labels = labels_to_keep
100 | self.value = torch.tensor(mask_value, dtype=torch.uint8)
101 |
102 | def __call__(self, sample):
103 | # sample must be a tensor
104 | assert isinstance(sample, torch.Tensor), "Sample must be a tensor"
105 |
106 | sample.apply_(lambda t: t.apply_(lambda x: x if x in self.labels else self.value))
107 |
108 | return sample
109 |
--------------------------------------------------------------------------------
/dataset/voc.py:
--------------------------------------------------------------------------------
1 | import os
2 | import random
3 | import copy
4 |
5 | import numpy as np
6 | import torch.utils.data as data
7 | import torchvision as tv
8 | from PIL import Image
9 | from torch import distributed
10 |
11 | from .utils import Subset, filter_images, group_images
12 |
13 | classes = {
14 | 0: 'background',
15 | 1: 'aeroplane',
16 | 2: 'bicycle',
17 | 3: 'bird',
18 | 4: 'boat',
19 | 5: 'bottle',
20 | 6: 'bus',
21 | 7: 'car',
22 | 8: 'cat',
23 | 9: 'chair',
24 | 10: 'cow',
25 | 11: 'diningtable',
26 | 12: 'dog',
27 | 13: 'horse',
28 | 14: 'motorbike',
29 | 15: 'person',
30 | 16: 'pottedplant',
31 | 17: 'sheep',
32 | 18: 'sofa',
33 | 19: 'train',
34 | 20: 'tvmonitor'
35 | }
36 |
37 |
38 | class VOCSegmentation(data.Dataset):
39 | """`Pascal VOC `_ Segmentation Dataset.
40 | Args:
41 | root (string): Root directory of the VOC Dataset.
42 | image_set (string, optional): Select the image_set to use, ``train``, ``trainval`` or ``val``
43 | is_aug (bool, optional): If you want to use the augmented train set or not (default is True)
44 | transform (callable, optional): A function/transform that takes in an PIL image
45 | and returns a transformed version. E.g, ``transforms.RandomCrop``
46 | """
47 |
48 | def __init__(self, root, image_set='train', is_aug=True, transform=None):
49 |
50 | self.root = os.path.expanduser(root)
51 | self.year = "2012"
52 |
53 | self.transform = transform
54 |
55 | self.image_set = image_set
56 | voc_root = self.root
57 | splits_dir = os.path.join(voc_root, 'list')
58 |
59 | if not os.path.isdir(voc_root):
60 | raise RuntimeError(
61 | 'Dataset not found or corrupted.' + ' You can use download=True to download it'
62 | f'at location = {voc_root}'
63 | )
64 |
65 | if is_aug and image_set == 'train':
66 | mask_dir = os.path.join(voc_root, 'SegmentationClassAug')
67 | assert os.path.exists(mask_dir), "SegmentationClassAug not found"
68 | split_f = os.path.join(splits_dir, 'train_aug.txt')
69 | else:
70 | split_f = os.path.join(splits_dir, image_set.rstrip('\n') + '.txt')
71 |
72 | if not os.path.exists(split_f):
73 | raise ValueError(
74 | 'Wrong image_set entered! Please use image_set="train" '
75 | 'or image_set="trainval" or image_set="val" '
76 | f'{split_f}'
77 | )
78 |
79 | # remove leading \n
80 | with open(os.path.join(split_f), "r") as f:
81 | file_names = [x[:-1].split(' ') for x in f.readlines()]
82 |
83 | # REMOVE FIRST SLASH OTHERWISE THE JOIN WILL start from root
84 | self.images = [
85 | (
86 | os.path.join(voc_root,
87 | x[0][1:]), os.path.join(voc_root, x[1][1:])
88 | ) for x in file_names
89 | ]
90 |
91 | def __getitem__(self, index):
92 | """
93 | Args:
94 | index (int): Index
95 | Returns:
96 | tuple: (image, target) where target is the image segmentation.
97 | """
98 | img = Image.open(self.images[index][0]).convert('RGB')
99 | target = Image.open(self.images[index][1])
100 | if self.transform is not None:
101 | img, target = self.transform(img, target)
102 |
103 | return img, target
104 |
105 | def viz_getter(self, index):
106 | image_path = self.images[index][0]
107 | raw_image = Image.open(self.images[index][0]).convert('RGB')
108 | target = Image.open(self.images[index][1])
109 | if self.transform is not None:
110 | img, target = self.transform(raw_image, target)
111 | else:
112 | img = copy.deepcopy(raw_image)
113 | return image_path, raw_image, img, target
114 |
115 | def __len__(self):
116 | return len(self.images)
117 |
118 |
119 | class VOCSegmentationIncremental(data.Dataset):
120 |
121 | def __init__(
122 | self,
123 | root,
124 | train=True,
125 | transform=None,
126 | labels=None,
127 | labels_old=None,
128 | idxs_path=None,
129 | masking=True,
130 | overlap=True,
131 | data_masking="current",
132 | test_on_val=False,
133 | **kwargs
134 | ):
135 |
136 | full_voc = VOCSegmentation(root, 'train' if train else 'val', is_aug=True, transform=None)
137 |
138 | self.labels = []
139 | self.labels_old = []
140 |
141 | if labels is not None:
142 | # store the labels
143 | labels_old = labels_old if labels_old is not None else []
144 |
145 | self.__strip_zero(labels)
146 | self.__strip_zero(labels_old)
147 |
148 | assert not any(
149 | l in labels_old for l in labels
150 | ), "labels and labels_old must be disjoint sets"
151 |
152 | self.labels = [0] + labels
153 | self.labels_old = [0] + labels_old
154 |
155 | self.order = [0] + labels_old + labels
156 |
157 | # take index of images with at least one class in labels and all classes in labels+labels_old+[0,255]
158 | if idxs_path is not None and os.path.exists(idxs_path):
159 | idxs = np.load(idxs_path).tolist()
160 | else:
161 | idxs = filter_images(full_voc, labels, labels_old, overlap=overlap)
162 | if idxs_path is not None and distributed.get_rank() == 0:
163 | np.save(idxs_path, np.array(idxs, dtype=int))
164 |
165 | if test_on_val:
166 | rnd = np.random.RandomState(1)
167 | rnd.shuffle(idxs)
168 | train_len = int(0.8 * len(idxs))
169 | if train:
170 | idxs = idxs[:train_len]
171 | else:
172 | idxs = idxs[train_len:]
173 |
174 | #if train:
175 | # masking_value = 0
176 | #else:
177 | # masking_value = 255
178 |
179 | #self.inverted_order = {label: self.order.index(label) for label in self.order}
180 | #self.inverted_order[255] = masking_value
181 |
182 | masking_value = 0 # Future classes will be considered as background.
183 | self.inverted_order = {label: self.order.index(label) for label in self.order}
184 | self.inverted_order[255] = 255
185 |
186 | reorder_transform = tv.transforms.Lambda(
187 | lambda t: t.apply_(
188 | lambda x: self.inverted_order[x] if x in self.inverted_order else masking_value
189 | )
190 | )
191 |
192 | if masking:
193 | if data_masking == "current":
194 | tmp_labels = self.labels + [255]
195 | elif data_masking == "current+old":
196 | tmp_labels = labels_old + self.labels + [255]
197 | elif data_masking == "all":
198 | raise NotImplementedError(
199 | f"data_masking={data_masking} not yet implemented sorry not sorry."
200 | )
201 | elif data_masking == "new":
202 | tmp_labels = self.labels
203 | masking_value = 255
204 |
205 | target_transform = tv.transforms.Lambda(
206 | lambda t: t.
207 | apply_(lambda x: self.inverted_order[x] if x in tmp_labels else masking_value)
208 | )
209 | else:
210 | assert False
211 | target_transform = reorder_transform
212 |
213 | # make the subset of the dataset
214 | self.dataset = Subset(full_voc, idxs, transform, target_transform)
215 | else:
216 | self.dataset = full_voc
217 |
218 | def __getitem__(self, index):
219 | """
220 | Args:
221 | index (int): Index
222 | Returns:
223 | tuple: (image, target) where target is the image segmentation.
224 | """
225 |
226 | return self.dataset[index]
227 |
228 | def viz_getter(self, index):
229 | return self.dataset.viz_getter(index)
230 |
231 | def __len__(self):
232 | return len(self.dataset)
233 |
234 | @staticmethod
235 | def __strip_zero(labels):
236 | while 0 in labels:
237 | labels.remove(0)
238 |
--------------------------------------------------------------------------------
/environment.yaml:
--------------------------------------------------------------------------------
1 | name: torch13
2 | channels:
3 | - pytorch
4 | - https://mirrors.ustc.edu.cn/anaconda/cloud/pytorch/
5 | - https://mirrors.ustc.edu.cn/anaconda/cloud/menpo/
6 | - https://mirrors.ustc.edu.cn/anaconda/cloud/bioconda/
7 | - https://mirrors.ustc.edu.cn/anaconda/cloud/msys2/
8 | - https://mirrors.ustc.edu.cn/anaconda/cloud/conda-forge/
9 | - https://mirrors.ustc.edu.cn/anaconda/pkgs/free/
10 | - https://mirrors.ustc.edu.cn/anaconda/pkgs/main/
11 | - defaults
12 | dependencies:
13 | - _libgcc_mutex=0.1=main
14 | - blas=1.0=mkl
15 | - ca-certificates=2020.6.24=0
16 | - certifi=2020.6.20=py36_0
17 | - cffi=1.14.0=py36he30daa8_1
18 | - cudatoolkit=10.1.243=h6bb024c_0
19 | - cycler=0.10.0=py36_0
20 | - dbus=1.13.16=hb2f20db_0
21 | - expat=2.2.9=he6710b0_2
22 | - fontconfig=2.13.0=h9420a91_0
23 | - freetype=2.10.2=h5ab3b9f_0
24 | - glib=2.65.0=h3eb4bd4_0
25 | - gst-plugins-base=1.14.0=hbbd80ab_1
26 | - gstreamer=1.14.0=hb31296c_0
27 | - icu=58.2=he6710b0_3
28 | - intel-openmp=2020.1=217
29 | - jpeg=9b=h024ee3a_2
30 | - kiwisolver=1.2.0=py36hfd86e86_0
31 | - lcms2=2.11=h396b838_0
32 | - ld_impl_linux-64=2.33.1=h53a641e_7
33 | - libedit=3.1.20191231=h14c3975_1
34 | - libffi=3.3=he6710b0_2
35 | - libgcc-ng=9.1.0=hdf63c60_0
36 | - libgfortran-ng=7.3.0=hdf63c60_0
37 | - libpng=1.6.37=hbc83047_0
38 | - libstdcxx-ng=9.1.0=hdf63c60_0
39 | - libtiff=4.1.0=h2733197_1
40 | - libuuid=1.0.3=h1bed415_2
41 | - libxcb=1.14=h7b6447c_0
42 | - libxml2=2.9.10=he19cac6_1
43 | - lz4-c=1.9.2=he6710b0_0
44 | - matplotlib=3.3.1=0
45 | - matplotlib-base=3.3.1=py36h817c723_0
46 | - mkl=2020.1=217
47 | - mkl-service=2.3.0=py36he904b0f_0
48 | - mkl_fft=1.1.0=py36h23d657b_0
49 | - mkl_random=1.1.1=py36h0573a6f_0
50 | - ncurses=6.2=he6710b0_1
51 | - ninja=1.9.0=py36hfd86e86_0
52 | - numpy=1.18.5=py36ha1c710e_0
53 | - numpy-base=1.18.5=py36hde5b4d6_0
54 | - olefile=0.46=py_0
55 | - openssl=1.1.1g=h7b6447c_0
56 | - pandas=1.1.0=py36he6710b0_0
57 | - pcre=8.44=he6710b0_0
58 | - pillow=7.2.0=py36hb39fc2d_0
59 | - pip=20.1.1=py36_1
60 | - pycparser=2.20=py_2
61 | - pyparsing=2.4.7=py_0
62 | - pyqt=5.9.2=py36h05f1152_2
63 | - python=3.6.10=h7579374_2
64 | - python-dateutil=2.8.1=py_0
65 | - pytz=2020.1=py_0
66 | - qt=5.9.7=h5867ecd_1
67 | - readline=8.0=h7b6447c_0
68 | - scipy=1.5.2=py36h0b6359f_0
69 | - seaborn=0.10.1=py_0
70 | - setuptools=49.2.0=py36_0
71 | - sip=4.19.8=py36hf484d3e_0
72 | - six=1.15.0=py_0
73 | - sqlite=3.32.3=h62c20be_0
74 | - tk=8.6.10=hbc83047_0
75 | - tornado=6.0.4=py36h7b6447c_1
76 | - wheel=0.34.2=py36_0
77 | - xz=5.2.5=h7b6447c_0
78 | - zlib=1.2.11=h7b6447c_3
79 | - zstd=1.4.5=h0b5b093_0
80 | - pytorch=1.3.1=py3.6_cuda10.1.243_cudnn7.6.3_0
81 | - torchvision=0.4.2=py36_cu101
82 | prefix: /home/cbzhang/anaconda3/envs/torch13
83 |
84 |
--------------------------------------------------------------------------------
/images/plop_viz.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/images/plop_viz.png
--------------------------------------------------------------------------------
/images/plop_voc.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/images/plop_voc.png
--------------------------------------------------------------------------------
/logs/15-5s-voc/RCIL_disjoint/events.out.tfevents.1666002308.SSADL3860:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/logs/15-5s-voc/RCIL_disjoint/events.out.tfevents.1666002308.SSADL3860
--------------------------------------------------------------------------------
/logs/15-5s-voc/RCIL_disjoint/events.out.tfevents.1666007523.SSADL3860:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/logs/15-5s-voc/RCIL_disjoint/events.out.tfevents.1666007523.SSADL3860
--------------------------------------------------------------------------------
/logs/15-5s-voc/RCIL_disjoint/events.out.tfevents.1666008515.SSADL3860:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/logs/15-5s-voc/RCIL_disjoint/events.out.tfevents.1666008515.SSADL3860
--------------------------------------------------------------------------------
/logs/15-5s-voc/RCIL_disjoint/events.out.tfevents.1666009336.SSADL3860:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/logs/15-5s-voc/RCIL_disjoint/events.out.tfevents.1666009336.SSADL3860
--------------------------------------------------------------------------------
/logs/15-5s-voc/RCIL_disjoint/events.out.tfevents.1666010448.SSADL3860:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/logs/15-5s-voc/RCIL_disjoint/events.out.tfevents.1666010448.SSADL3860
--------------------------------------------------------------------------------
/logs/15-5s-voc/RCIL_disjoint/events.out.tfevents.1666011746.SSADL3860:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/logs/15-5s-voc/RCIL_disjoint/events.out.tfevents.1666011746.SSADL3860
--------------------------------------------------------------------------------
/logs/15-5s-voc/RCIL_disjoint/main.txt:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/logs/15-5s-voc/RCIL_overlap/events.out.tfevents.1665987569.SSADL3860:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/logs/15-5s-voc/RCIL_overlap/events.out.tfevents.1665987569.SSADL3860
--------------------------------------------------------------------------------
/logs/15-5s-voc/RCIL_overlap/events.out.tfevents.1665987736.SSADL3860:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/logs/15-5s-voc/RCIL_overlap/events.out.tfevents.1665987736.SSADL3860
--------------------------------------------------------------------------------
/logs/15-5s-voc/RCIL_overlap/events.out.tfevents.1665993707.SSADL3860:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/logs/15-5s-voc/RCIL_overlap/events.out.tfevents.1665993707.SSADL3860
--------------------------------------------------------------------------------
/logs/15-5s-voc/RCIL_overlap/events.out.tfevents.1665995034.SSADL3860:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/logs/15-5s-voc/RCIL_overlap/events.out.tfevents.1665995034.SSADL3860
--------------------------------------------------------------------------------
/logs/15-5s-voc/RCIL_overlap/events.out.tfevents.1665995876.SSADL3860:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/logs/15-5s-voc/RCIL_overlap/events.out.tfevents.1665995876.SSADL3860
--------------------------------------------------------------------------------
/logs/15-5s-voc/RCIL_overlap/events.out.tfevents.1665997203.SSADL3860:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/logs/15-5s-voc/RCIL_overlap/events.out.tfevents.1665997203.SSADL3860
--------------------------------------------------------------------------------
/logs/15-5s-voc/RCIL_overlap/events.out.tfevents.1665998541.SSADL3860:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhangchbin/RCIL/b9aad58973c8f0fa63fea2b55d4477dfa05e1202/logs/15-5s-voc/RCIL_overlap/events.out.tfevents.1665998541.SSADL3860
--------------------------------------------------------------------------------
/logs/15-5s-voc/RCIL_overlap/main.txt:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/metrics/__init__.py:
--------------------------------------------------------------------------------
1 | from .stream_metrics import StreamSegMetrics, AverageMeter
2 |
3 |
--------------------------------------------------------------------------------
/metrics/stream_metrics.py:
--------------------------------------------------------------------------------
1 | import matplotlib
2 | import matplotlib.pyplot as plt
3 | import numpy as np
4 | import torch
5 |
6 | matplotlib.use('Agg')
7 |
8 |
9 | class _StreamMetrics(object):
10 |
11 | def __init__(self):
12 | """ Overridden by subclasses """
13 | pass
14 |
15 | def update(self, gt, pred):
16 | """ Overridden by subclasses """
17 | raise NotImplementedError()
18 |
19 | def get_results(self):
20 | """ Overridden by subclasses """
21 | raise NotImplementedError()
22 |
23 | def to_str(self, metrics):
24 | """ Overridden by subclasses """
25 | raise NotImplementedError()
26 |
27 | def reset(self):
28 | """ Overridden by subclasses """
29 | raise NotImplementedError()
30 |
31 | def synch(self, device):
32 | """ Overridden by subclasses """
33 | raise NotImplementedError()
34 |
35 |
36 | class StreamSegMetrics(_StreamMetrics):
37 | """
38 | Stream Metrics for Semantic Segmentation Task
39 | """
40 |
41 | def __init__(self, n_classes):
42 | super().__init__()
43 | self.n_classes = n_classes
44 | self.confusion_matrix = np.zeros((n_classes, n_classes))
45 | self.total_samples = 0
46 |
47 | def update(self, label_trues, label_preds):
48 | for lt, lp in zip(label_trues, label_preds):
49 | self.confusion_matrix += self._fast_hist(lt.flatten(), lp.flatten())
50 | self.total_samples += len(label_trues)
51 |
52 | def to_str(self, results):
53 | string = "\n"
54 | for k, v in results.items():
55 | if k != "Class IoU" and k != "Class Acc" and k != "Confusion Matrix":
56 | string += "%s: %f\n" % (k, v)
57 |
58 | string += 'Class IoU:\n'
59 | for k, v in results['Class IoU'].items():
60 | string += "\tclass %d: %s\n" % (k, str(v))
61 |
62 | string += 'Class Acc:\n'
63 | for k, v in results['Class Acc'].items():
64 | string += "\tclass %d: %s\n" % (k, str(v))
65 |
66 | return string
67 |
68 | def _fast_hist(self, label_true, label_pred):
69 | mask = (label_true >= 0) & (label_true < self.n_classes)
70 | hist = np.bincount(
71 | self.n_classes * label_true[mask].astype(int) + label_pred[mask],
72 | minlength=self.n_classes**2,
73 | ).reshape(self.n_classes, self.n_classes)
74 | return hist
75 |
76 | def get_results(self):
77 | """Returns accuracy score evaluation result.
78 | - overall accuracy
79 | - mean accuracy
80 | - mean IU
81 | - fwavacc
82 | """
83 | EPS = 1e-6
84 | hist = self.confusion_matrix
85 |
86 | gt_sum = hist.sum(axis=1)
87 | mask = (gt_sum != 0)
88 | diag = np.diag(hist)
89 |
90 | acc = diag.sum() / hist.sum()
91 | acc_cls_c = diag / (gt_sum + EPS)
92 | acc_cls = np.mean(acc_cls_c[mask])
93 | iu = diag / (gt_sum + hist.sum(axis=0) - diag + EPS)
94 | mean_iu = np.mean(iu[mask])
95 | freq = hist.sum(axis=1) / hist.sum()
96 | fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()
97 | cls_iu = dict(zip(range(self.n_classes), [iu[i] if m else "X" for i, m in enumerate(mask)]))
98 | cls_acc = dict(
99 | zip(range(self.n_classes), [acc_cls_c[i] if m else "X" for i, m in enumerate(mask)])
100 | )
101 |
102 | return {
103 | "Total samples": self.total_samples,
104 | "Overall Acc": acc,
105 | "Mean Acc": acc_cls,
106 | "FreqW Acc": fwavacc,
107 | "Mean IoU": mean_iu,
108 | "Class IoU": cls_iu,
109 | "Class Acc": cls_acc,
110 | "Confusion Matrix": self.confusion_matrix_to_fig()
111 | }
112 |
113 | def reset(self):
114 | self.confusion_matrix = np.zeros((self.n_classes, self.n_classes))
115 | self.total_samples = 0
116 |
117 | def synch(self, device):
118 | # collect from multi-processes
119 | confusion_matrix = torch.tensor(self.confusion_matrix).to(device)
120 | samples = torch.tensor(self.total_samples).to(device)
121 |
122 | torch.distributed.reduce(confusion_matrix, dst=0)
123 | torch.distributed.reduce(samples, dst=0)
124 |
125 | if torch.distributed.get_rank() == 0:
126 | self.confusion_matrix = confusion_matrix.cpu().numpy()
127 | self.total_samples = samples.cpu().numpy()
128 |
129 | def confusion_matrix_to_fig(self):
130 | cm = self.confusion_matrix.astype('float') / (self.confusion_matrix.sum(axis=1) +
131 | 0.000001)[:, np.newaxis]
132 | fig, ax = plt.subplots()
133 | im = ax.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
134 | ax.figure.colorbar(im, ax=ax)
135 |
136 | ax.set(title=f'Confusion Matrix', ylabel='True label', xlabel='Predicted label')
137 |
138 | fig.tight_layout()
139 | return fig
140 |
141 |
142 | class AverageMeter(object):
143 | """Computes average values"""
144 |
145 | def __init__(self):
146 | self.book = dict()
147 |
148 | def reset_all(self):
149 | self.book.clear()
150 |
151 | def reset(self, id):
152 | item = self.book.get(id, None)
153 | if item is not None:
154 | item[0] = 0
155 | item[1] = 0
156 |
157 | def update(self, id, val):
158 | record = self.book.get(id, None)
159 | if record is None:
160 | self.book[id] = [val, 1]
161 | else:
162 | record[0] += val
163 | record[1] += 1
164 |
165 | def get_results(self, id):
166 | record = self.book.get(id, None)
167 | assert record is not None
168 | return record[0] / record[1]
169 |
--------------------------------------------------------------------------------
/models/__init__.py:
--------------------------------------------------------------------------------
1 | from .resnet import *
2 |
--------------------------------------------------------------------------------
/models/resnet.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from collections import OrderedDict
3 | from functools import partial
4 |
5 | import torch.nn as nn
6 |
7 | from modules import GlobalAvgPool2d, ResidualBlock
8 |
9 | from .util import try_index
10 |
11 |
12 | class ResNet(nn.Module):
13 | """Standard residual network
14 |
15 | Parameters
16 | ----------
17 | structure : list of int
18 | Number of residual blocks in each of the four modules of the network
19 | bottleneck : bool
20 | If `True` use "bottleneck" residual blocks with 3 convolutions, otherwise use standard blocks
21 | norm_act : callable
22 | Function to create normalization / activation Module
23 | classes : int
24 | If not `0` also include global average pooling and a fully-connected layer with `classes` outputs at the end
25 | of the network
26 | dilation : int or list of int
27 | List of dilation factors for the four modules of the network, or `1` to ignore dilation
28 | keep_outputs : bool
29 | If `True` output a list with the outputs of all modules
30 | """
31 |
32 | def __init__(
33 | self,
34 | structure,
35 | bottleneck,
36 | norm_act=nn.BatchNorm2d,
37 | classes=0,
38 | output_stride=16,
39 | keep_outputs=False
40 | ):
41 | super(ResNet, self).__init__()
42 | self.structure = structure
43 | self.bottleneck = bottleneck
44 | self.keep_outputs = keep_outputs
45 |
46 | if len(structure) != 4:
47 | raise ValueError("Expected a structure with four values")
48 | if output_stride != 8 and output_stride != 16:
49 | raise ValueError("Output stride must be 8 or 16")
50 |
51 | if output_stride == 16:
52 | dilation = [1, 1, 1, 2] # dilated conv for last 3 blocks (9 layers)
53 | elif output_stride == 8:
54 | dilation = [1, 1, 2, 4] # 23+3 blocks (78 layers)
55 | else:
56 | raise NotImplementedError
57 |
58 | self.dilation = dilation
59 |
60 | # Initial layers
61 | layers = [
62 | ("conv1", nn.Conv2d(3, 64, 7, stride=2, padding=3, bias=False)), ("bn1", norm_act(64))
63 | ]
64 | if try_index(dilation, 0) == 1:
65 | layers.append(("pool1", nn.MaxPool2d(3, stride=2, padding=1)))
66 | self.mod1 = nn.Sequential(OrderedDict(layers))
67 |
68 | # Groups of residual blocks
69 | in_channels = 64
70 | if self.bottleneck:
71 | channels = (64, 64, 256)
72 | else:
73 | channels = (64, 64)
74 | for mod_id, num in enumerate(structure):
75 | # Create blocks for module
76 | blocks = []
77 | for block_id in range(num):
78 | stride, dil = self._stride_dilation(dilation, mod_id, block_id)
79 | blocks.append(
80 | (
81 | "block%d" % (block_id + 1),
82 | ResidualBlock(
83 | in_channels,
84 | channels,
85 | norm_act=norm_act,
86 | stride=stride,
87 | dilation=dil,
88 | last=block_id == num - 1
89 | )
90 | )
91 | )
92 |
93 | # Update channels and p_keep
94 | in_channels = channels[-1]
95 |
96 | # Create module
97 | self.add_module("mod%d" % (mod_id + 2), nn.Sequential(OrderedDict(blocks)))
98 |
99 | # Double the number of channels for the next module
100 | channels = [c * 2 for c in channels]
101 |
102 | self.out_channels = in_channels
103 |
104 | # Pooling and predictor
105 | if classes != 0:
106 | self.classifier = nn.Sequential(
107 | OrderedDict(
108 | [("avg_pool", GlobalAvgPool2d()), ("fc", nn.Linear(in_channels, classes))]
109 | )
110 | )
111 |
112 | @staticmethod
113 | def _stride_dilation(dilation, mod_id, block_id):
114 | d = try_index(dilation, mod_id)
115 | s = 2 if d == 1 and block_id == 0 and mod_id > 0 else 1
116 | return s, d
117 |
118 | def forward(self, x):
119 | outs = []
120 | attentions = []
121 |
122 | branch1_x, branch2_x = [], []
123 |
124 | x = self.mod1(x)
125 | #attentions.append(x)
126 | outs.append(x)
127 |
128 | x, xb1, xb2, att = self.mod2(x)
129 | attentions.append(att)
130 | outs.append(x)
131 | branch1_x.append(xb1)
132 | branch2_x.append(xb2)
133 |
134 | x, xb1, xb2, att = self.mod3(x)
135 | attentions.append(att)
136 | outs.append(x)
137 | branch1_x.append(xb1)
138 | branch2_x.append(xb2)
139 |
140 | x, xb1, xb2, att = self.mod4(x)
141 | attentions.append(att)
142 | outs.append(x)
143 | branch1_x.append(xb1)
144 | branch2_x.append(xb2)
145 |
146 | x, xb1, xb2, att = self.mod5(x)
147 | attentions.append(att)
148 | outs.append(x)
149 | branch1_x.append(xb1)
150 | branch2_x.append(xb2)
151 |
152 | if hasattr(self, "classifier"):
153 | outs.append(self.classifier(outs[-1]))
154 |
155 | if self.keep_outputs:
156 | return outs, attentions, branch1_x, branch2_x
157 | else:
158 | return outs[-1], attentions, branch1_x, branch2_x
159 |
160 |
161 | _NETS = {
162 | "18": {
163 | "structure": [2, 2, 2, 2],
164 | "bottleneck": False
165 | },
166 | "34": {
167 | "structure": [3, 4, 6, 3],
168 | "bottleneck": False
169 | },
170 | "50": {
171 | "structure": [3, 4, 6, 3],
172 | "bottleneck": True
173 | },
174 | "101": {
175 | "structure": [3, 4, 23, 3],
176 | "bottleneck": True
177 | },
178 | "152": {
179 | "structure": [3, 8, 36, 3],
180 | "bottleneck": True
181 | },
182 | }
183 |
184 | __all__ = []
185 | for name, params in _NETS.items():
186 | net_name = "net_resnet" + name
187 | setattr(sys.modules[__name__], net_name, partial(ResNet, **params))
188 | __all__.append(net_name)
189 |
--------------------------------------------------------------------------------
/models/util.py:
--------------------------------------------------------------------------------
1 | def try_index(scalar_or_list, i):
2 | try:
3 | return scalar_or_list[i]
4 | except TypeError:
5 | return scalar_or_list
6 |
--------------------------------------------------------------------------------
/modules/__init__.py:
--------------------------------------------------------------------------------
1 | from .deeplab import DeeplabV3
2 | from .residual import IdentityResidualBlock, ResidualBlock
3 | from .misc import GlobalAvgPool2d
4 |
--------------------------------------------------------------------------------
/modules/deeplab.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.nn.functional as functional
4 |
5 | from models.util import try_index
6 |
7 |
8 | class DeeplabV3(nn.Module):
9 | def __init__(self,
10 | in_channels,
11 | out_channels,
12 | hidden_channels=256,
13 | out_stride=16,
14 | norm_act=nn.BatchNorm2d,
15 | pooling_size=None):
16 | super(DeeplabV3, self).__init__()
17 | self.pooling_size = pooling_size
18 |
19 | if out_stride == 16:
20 | dilations = [6, 12, 18]
21 | elif out_stride == 8:
22 | dilations = [12, 24, 32]
23 |
24 | self.map_convs = nn.ModuleList([
25 | nn.Conv2d(in_channels, hidden_channels, 1, bias=False),
26 | nn.Conv2d(in_channels, hidden_channels, 3, bias=False, dilation=dilations[0], padding=dilations[0]),
27 | nn.Conv2d(in_channels, hidden_channels, 3, bias=False, dilation=dilations[1], padding=dilations[1]),
28 | nn.Conv2d(in_channels, hidden_channels, 3, bias=False, dilation=dilations[2], padding=dilations[2])
29 | ])
30 | self.map_bn = norm_act(hidden_channels * 4)
31 |
32 | self.map_convs_new = nn.ModuleList([
33 | nn.Conv2d(in_channels, hidden_channels, 1, bias=False),
34 | nn.Conv2d(in_channels, hidden_channels, 3, bias=False, dilation=dilations[0], padding=dilations[0]),
35 | nn.Conv2d(in_channels, hidden_channels, 3, bias=False, dilation=dilations[1], padding=dilations[1]),
36 | nn.Conv2d(in_channels, hidden_channels, 3, bias=False, dilation=dilations[2], padding=dilations[2])
37 | ])
38 | self.map_bn_new = norm_act(hidden_channels * 4)
39 |
40 | self.global_pooling_conv = nn.Conv2d(in_channels, hidden_channels, 1, bias=False)
41 | self.global_pooling_bn = norm_act(hidden_channels)
42 |
43 | self.red_conv = nn.Conv2d(hidden_channels * 4, out_channels, 1, bias=False)
44 | self.pool_red_conv = nn.Conv2d(hidden_channels, out_channels, 1, bias=False)
45 | self.red_bn = norm_act(out_channels)
46 |
47 | self.reset_parameters(self.map_bn.activation, self.map_bn.activation_param)
48 |
49 | def reset_parameters(self, activation, slope):
50 | gain = nn.init.calculate_gain(activation, slope)
51 | for m in self.modules():
52 | if isinstance(m, nn.Conv2d):
53 | nn.init.xavier_normal_(m.weight.data, gain)
54 | if hasattr(m, "bias") and m.bias is not None:
55 | nn.init.constant_(m.bias, 0)
56 | elif isinstance(m, nn.BatchNorm2d):
57 | if hasattr(m, "weight") and m.weight is not None:
58 | nn.init.constant_(m.weight, 1)
59 | if hasattr(m, "bias") and m.bias is not None:
60 | nn.init.constant_(m.bias, 0)
61 |
62 | def forward(self, x):
63 | # Map convolutions
64 | out = torch.cat([m(x) for m in self.map_convs], dim=1)
65 | out = self.map_bn(out)
66 |
67 | out_new = torch.cat([m(x) for m in self.map_convs_new], dim=1)
68 | out_new = self.map_bn_new(out_new)
69 |
70 | r = torch.rand(1, out.shape[1], 1, 1, dtype=torch.float32)
71 | if self.training == False:
72 | r[:,:,:,:] = 1.0
73 | weight_out_branch = torch.zeros_like(r)
74 | weight_out_new_branch = torch.zeros_like(r)
75 | weight_out_branch[r < 0.33] = 2.
76 | weight_out_new_branch[r < 0.33] = 0.
77 | weight_out_branch[(r < 0.66)*(r>=0.33)] = 0.
78 | weight_out_new_branch[(r < 0.66)*(r>=0.33)] = 2.
79 | weight_out_branch[r>=0.66] = 1.
80 | weight_out_new_branch[r>=0.66] = 1.
81 |
82 | out = out * weight_out_branch.to(out.device) * 0.5 + out_new * weight_out_new_branch.to(out.device) * 0.5
83 |
84 | out = functional.leaky_relu(out, 0.01)
85 |
86 | out = self.red_conv(out)
87 |
88 | # Global pooling
89 | pool = self._global_pooling(x) # if training is global avg pooling 1x1, else use larger pool size
90 | pool = self.global_pooling_conv(pool)
91 | pool = self.global_pooling_bn(pool)
92 |
93 | pool = functional.leaky_relu(pool, 0.01)
94 |
95 | pool = self.pool_red_conv(pool)
96 |
97 | if self.training or self.pooling_size is None:
98 | pool = pool.repeat(1, 1, x.size(2), x.size(3))
99 |
100 | out += pool
101 | out = self.red_bn(out)
102 |
103 | out = functional.leaky_relu(out, 0.01)
104 |
105 | return out
106 |
107 | def _global_pooling(self, x):
108 | if self.training or self.pooling_size is None:
109 | # this is like Adaptive Average Pooling (1,1)
110 | pool = x.view(x.size(0), x.size(1), -1).mean(dim=-1)
111 | pool = pool.view(x.size(0), x.size(1), 1, 1)
112 | else:
113 | pooling_size = (min(try_index(self.pooling_size, 0), x.shape[2]),
114 | min(try_index(self.pooling_size, 1), x.shape[3]))
115 | padding = (
116 | (pooling_size[1] - 1) // 2,
117 | (pooling_size[1] - 1) // 2 if pooling_size[1] % 2 == 1 else (pooling_size[1] - 1) // 2 + 1,
118 | (pooling_size[0] - 1) // 2,
119 | (pooling_size[0] - 1) // 2 if pooling_size[0] % 2 == 1 else (pooling_size[0] - 1) // 2 + 1
120 | )
121 |
122 | pool = functional.avg_pool2d(x, pooling_size, stride=1)
123 | pool = functional.pad(pool, pad=padding, mode="replicate")
124 | return pool
125 |
--------------------------------------------------------------------------------
/modules/misc.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 |
3 |
4 | class GlobalAvgPool2d(nn.Module):
5 | def __init__(self):
6 | """Global average pooling over the input's spatial dimensions"""
7 | super(GlobalAvgPool2d, self).__init__()
8 |
9 | def forward(self, inputs):
10 | in_size = inputs.size()
11 | return inputs.view((in_size[0], in_size[1], -1)).mean(dim=2)
12 |
13 |
--------------------------------------------------------------------------------
/modules/residual.py:
--------------------------------------------------------------------------------
1 | from collections import OrderedDict
2 | import torch
3 | import torch.nn as nn
4 | import torch.nn.functional as functional
5 |
6 |
7 | class ResidualBlock(nn.Module):
8 | """Configurable residual block
9 |
10 | Parameters
11 | ----------
12 | in_channels : int
13 | Number of input channels.
14 | channels : list of int
15 | Number of channels in the internal feature maps. Can either have two or three elements: if three construct
16 | a residual block with two `3 x 3` convolutions, otherwise construct a bottleneck block with `1 x 1`, then
17 | `3 x 3` then `1 x 1` convolutions.
18 | stride : int
19 | Stride of the first `3 x 3` convolution
20 | dilation : int
21 | Dilation to apply to the `3 x 3` convolutions.
22 | groups : int
23 | Number of convolution groups. This is used to create ResNeXt-style blocks and is only compatible with
24 | bottleneck blocks.
25 | norm_act : callable
26 | Function to create normalization / activation Module.
27 | dropout: callable
28 | Function to create Dropout Module.
29 | """
30 |
31 | def __init__(
32 | self,
33 | in_channels,
34 | channels,
35 | stride=1,
36 | dilation=1,
37 | groups=1,
38 | norm_act=nn.BatchNorm2d,
39 | dropout=None,
40 | last=False
41 | ):
42 | super(ResidualBlock, self).__init__()
43 |
44 | # Check parameters for inconsistencies
45 | if len(channels) != 2 and len(channels) != 3:
46 | raise ValueError("channels must contain either two or three values")
47 | if len(channels) == 2 and groups != 1:
48 | raise ValueError("groups > 1 are only valid if len(channels) == 3")
49 |
50 | is_bottleneck = len(channels) == 3
51 | need_proj_conv = stride != 1 or in_channels != channels[-1]
52 |
53 | if not is_bottleneck:
54 | bn2 = norm_act(channels[1])
55 | bn2.activation = "identity"
56 | layers = [
57 | (
58 | "conv1",
59 | nn.Conv2d(
60 | in_channels,
61 | channels[0],
62 | 3,
63 | stride=stride,
64 | padding=dilation,
65 | bias=False,
66 | dilation=dilation
67 | )
68 | ), ("bn1", norm_act(channels[0])),
69 | (
70 | "conv2",
71 | nn.Conv2d(
72 | channels[0],
73 | channels[1],
74 | 3,
75 | stride=1,
76 | padding=dilation,
77 | bias=False,
78 | dilation=dilation
79 | )
80 | ), ("bn2", bn2)
81 | ]
82 | if dropout is not None:
83 | layers = layers[0:2] + [("dropout", dropout())] + layers[2:]
84 | else:
85 | bn3 = norm_act(channels[2])
86 | bn3.activation = "identity"
87 | layers = [
88 | ("conv1", nn.Conv2d(in_channels, channels[0], 1, stride=1, padding=0, bias=False)),
89 | ("bn1", norm_act(channels[0])),
90 | ("extra1", nn.LeakyReLU(0.01)),
91 | (
92 | "conv2",
93 | nn.Conv2d(
94 | channels[0],
95 | channels[1],
96 | 3,
97 | stride=stride,
98 | padding=dilation,
99 | bias=False,
100 | groups=groups,
101 | dilation=dilation
102 | )
103 | ), ("bn2", norm_act(channels[1])),
104 | ("dropout", nn.Dropout()),
105 | ("extra2", nn.LeakyReLU(0.01)),
106 | ("conv3", nn.Conv2d(channels[1], channels[2], 1, stride=1, padding=0, bias=False)),
107 | ("bn3", bn3),
108 | ("conv2_new", nn.Conv2d(channels[0], channels[1], 3, stride=stride, padding=dilation, bias=False,
109 | groups=groups, dilation=dilation)),
110 | ("bn2_new", norm_act(channels[1])),
111 | ("dropout_new", nn.Dropout())
112 | ]
113 | # if dropout is not None:
114 | # layers = layers[0:4] + [("dropout", dropout())] + layers[4:]
115 | self.dropout = dropout
116 | self.convs = nn.Sequential(OrderedDict(layers))
117 |
118 | if need_proj_conv:
119 | self.proj_conv = nn.Conv2d(
120 | in_channels, channels[-1], 1, stride=stride, padding=0, bias=False
121 | )
122 | self.proj_bn = norm_act(channels[-1])
123 | self.proj_bn.activation = "identity"
124 |
125 | self._last = last
126 |
127 | def forward(self, x):
128 | if hasattr(self, "proj_conv"):
129 | residual = self.proj_conv(x)
130 | residual = self.proj_bn(residual)
131 | else:
132 | residual = x
133 | # x = self.convs(x) + residual
134 | x = self.convs[0:3](x)
135 |
136 | x_branch1 = self.convs[3:5](x)
137 | if self.dropout is not None:
138 | x_branch1 = self.convs[5](x_branch1)
139 | x_branch2 = self.convs[9:11](x)
140 | if self.dropout is not None:
141 | x_branch2 = self.convs[11](x_branch2)
142 |
143 | r = torch.rand(1, x_branch1.shape[1], 1, 1, dtype=torch.float32)
144 | if self.training == False:
145 | r[:,:,:,:] = 1.0
146 | weight_out_branch = torch.zeros_like(r)
147 | weight_out_new_branch = torch.zeros_like(r)
148 | weight_out_branch[r < 0.33] = 2.
149 | weight_out_new_branch[r < 0.33] = 0.
150 | weight_out_branch[(r < 0.66)*(r>=0.33)] = 0.
151 | weight_out_new_branch[(r < 0.66)*(r>=0.33)] = 2.
152 | weight_out_branch[r>=0.66] = 1.
153 | weight_out_new_branch[r>=0.66] = 1.
154 |
155 | x = x_branch1 * weight_out_branch.to(x_branch1.device) * 0.5 + x_branch2 * weight_out_new_branch.to(x_branch1.device) * 0.5
156 | ######## random drop-path
157 |
158 | x = self.convs[6:9](x) + residual
159 |
160 |
161 | # if self.convs.bn1.activation == "leaky_relu":
162 | # act = functional.leaky_relu(
163 | # x, negative_slope=self.convs.bn1.activation_param, inplace=not self._last
164 | # )
165 | # elif self.convs.bn1.activation == "elu":
166 | # act = functional.elu(x, alpha=self.convs.bn1.activation_param, inplace=not self._last)
167 | # elif self.convs.bn1.activation == "identity":
168 | # act = x
169 |
170 | if self._last:
171 | return functional.leaky_relu_(x, negative_slope=0.01), x_branch1, x_branch2, x
172 | return functional.leaky_relu_(x, negative_slope=0.01)
173 |
174 |
175 | class IdentityResidualBlock(nn.Module):
176 |
177 | def __init__(
178 | self,
179 | in_channels,
180 | channels,
181 | stride=1,
182 | dilation=1,
183 | groups=1,
184 | norm_act=nn.BatchNorm2d,
185 | dropout=None
186 | ):
187 | """Configurable identity-mapping residual block
188 |
189 | Parameters
190 | ----------
191 | in_channels : int
192 | Number of input channels.
193 | channels : list of int
194 | Number of channels in the internal feature maps. Can either have two or three elements: if three construct
195 | a residual block with two `3 x 3` convolutions, otherwise construct a bottleneck block with `1 x 1`, then
196 | `3 x 3` then `1 x 1` convolutions.
197 | stride : int
198 | Stride of the first `3 x 3` convolution
199 | dilation : int
200 | Dilation to apply to the `3 x 3` convolutions.
201 | groups : int
202 | Number of convolution groups. This is used to create ResNeXt-style blocks and is only compatible with
203 | bottleneck blocks.
204 | norm_act : callable
205 | Function to create normalization / activation Module.
206 | dropout: callable
207 | Function to create Dropout Module.
208 | """
209 | super(IdentityResidualBlock, self).__init__()
210 |
211 | # Check parameters for inconsistencies
212 | if len(channels) != 2 and len(channels) != 3:
213 | raise ValueError("channels must contain either two or three values")
214 | if len(channels) == 2 and groups != 1:
215 | raise ValueError("groups > 1 are only valid if len(channels) == 3")
216 |
217 | is_bottleneck = len(channels) == 3
218 | need_proj_conv = stride != 1 or in_channels != channels[-1]
219 |
220 | self.bn1 = norm_act(in_channels)
221 | if not is_bottleneck:
222 | layers = [
223 | (
224 | "conv1",
225 | nn.Conv2d(
226 | in_channels,
227 | channels[0],
228 | 3,
229 | stride=stride,
230 | padding=dilation,
231 | bias=False,
232 | dilation=dilation
233 | )
234 | ), ("bn2", norm_act(channels[0])),
235 | (
236 | "conv2",
237 | nn.Conv2d(
238 | channels[0],
239 | channels[1],
240 | 3,
241 | stride=1,
242 | padding=dilation,
243 | bias=False,
244 | dilation=dilation
245 | )
246 | )
247 | ]
248 | if dropout is not None:
249 | layers = layers[0:2] + [("dropout", dropout())] + layers[2:]
250 | else:
251 | layers = [
252 | (
253 | "conv1",
254 | nn.Conv2d(in_channels, channels[0], 1, stride=stride, padding=0, bias=False)
255 | ), ("bn2", norm_act(channels[0])),
256 | (
257 | "conv2",
258 | nn.Conv2d(
259 | channels[0],
260 | channels[1],
261 | 3,
262 | stride=1,
263 | padding=dilation,
264 | bias=False,
265 | groups=groups,
266 | dilation=dilation
267 | )
268 | ), ("bn3", norm_act(channels[1])),
269 | ("conv3", nn.Conv2d(channels[1], channels[2], 1, stride=1, padding=0, bias=False))
270 | ]
271 | if dropout is not None:
272 | layers = layers[0:4] + [("dropout", dropout())] + layers[4:]
273 | self.convs = nn.Sequential(OrderedDict(layers))
274 |
275 | if need_proj_conv:
276 | self.proj_conv = nn.Conv2d(
277 | in_channels, channels[-1], 1, stride=stride, padding=0, bias=False
278 | )
279 |
280 | def forward(self, x):
281 | if hasattr(self, "proj_conv"):
282 | bn1 = self.bn1(x)
283 | shortcut = self.proj_conv(bn1)
284 | else:
285 | shortcut = x.clone()
286 | bn1 = self.bn1(x)
287 |
288 | out = self.convs(bn1)
289 | out.add_(shortcut)
290 |
291 | return out
292 |
--------------------------------------------------------------------------------
/results/2022-08-26_voc_10-1_RCIL_overlap.csv:
--------------------------------------------------------------------------------
1 | 0,0.95089155,0.89876163,0.4151985,0.88122326,0.70634246,0.8473183,0.9514839,0.87938136,0.920397,0.48052424,0.91368866,x,x,x,x,x,x,x,x,x,x,0.8041101098060608
2 | 1,0.90379035,0.90699446,0.4090379,0.8555996,0.6969214,0.8272267,0.935766,0.8780787,0.9161994,0.49986368,0.9176442,0.25727844,x,x,x,x,x,x,x,x,x,0.7503666877746582
3 | 2,0.89751005,0.9137675,0.4136626,0.8449985,0.6961307,0.81965595,0.9152721,0.87618923,0.8416552,0.46621302,0.88139516,0.24576756,0.66064745,x,x,x,x,x,x,x,x,0.7286819219589233
4 | 3,0.9093318,0.9029603,0.40329203,0.8183127,0.67543966,0.8052487,0.9083219,0.87852603,0.85191333,0.46681142,0.73642087,0.30729133,0.44432294,0.0027365456,x,x,x,x,x,x,x,0.6507806181907654
5 | 4,0.89968026,0.9011259,0.34064978,0.81658214,0.6489716,0.80777216,0.8883094,0.86390215,0.8365746,0.45933193,0.503004,0.27600047,0.3685692,0.01090799,0.32988927,x,x,x,x,x,x,0.5967513918876648
6 | 5,0.8792848,0.8977728,0.3367081,0.7941235,0.63828105,0.80857056,0.8895229,0.86556274,0.8492231,0.42895487,0.5302957,0.2318576,0.35776937,0.0103400955,0.3057474,0.6887053,x,x,x,x,x,0.5945450067520142
7 | 6,0.8425333,0.8791887,0.33996668,0.7565994,0.625907,0.7926743,0.8743645,0.8655236,0.8470372,0.4078049,0.41702384,0.1608563,0.34181458,0.008686906,0.30137503,0.6831973,0.061288737,x,x,x,x,0.5415201783180237
8 | 7,0.8389669,0.8579514,0.32842726,0.72502434,0.61489487,0.76747936,0.841649,0.8553333,0.77642787,0.39212394,0.4050792,0.16104361,0.28954697,0.0048611816,0.32937884,0.6649143,0.04121818,0.018326951,x,x,x,0.49514707922935486
9 | 8,0.7422038,0.8473317,0.32555038,0.5836337,0.565963,0.7554807,0.8622531,0.86400795,0.79980946,0.3548617,0.07291534,0.12836608,0.2652683,0.0032105464,0.33591396,0.65190583,0.06318373,0.006446207,0.10420244,x,x,0.43855300545692444
10 | 9,0.71541417,0.83263123,0.34039518,0.4886248,0.55899143,0.7303137,0.7958543,0.8567157,0.52299905,0.097785436,0.01782123,0.18011905,0.23694792,0.0008592654,0.30489823,0.66641164,0.056657832,0.027670635,0.09233559,0.17588043,x,0.3849663734436035
11 | 10,0.61597806,0.75047874,0.32280964,0.42837402,0.4490231,0.70404476,0.7201332,0.8531448,0.56784815,0.11284213,0.0028484133,0.1677077,0.23398092,0.00066456915,0.2740474,0.6516764,0.0642559,0.019483257,0.07554913,0.11766258,0.03531266,0.34132692217826843
12 |
--------------------------------------------------------------------------------
/results/2022-08-26_voc_15-5s_RCIL_overlap.csv:
--------------------------------------------------------------------------------
1 | 0,0.9435382,0.89631164,0.41205335,0.89128953,0.70238,0.84427077,0.9262657,0.91839623,0.92153823,0.44740173,0.85710883,0.6216881,0.8834467,0.85085106,0.8850706,0.86730176,x,x,x,x,x,0.8043069839477539
2 | 1,0.9125084,0.85484755,0.4051165,0.8535315,0.72940975,0.8095226,0.9328268,0.91644233,0.9057732,0.43827245,0.8312098,0.6090195,0.8454651,0.8289459,0.8747401,0.85988843,0.14393356,x,x,x,x,0.7500854730606079
3 | 2,0.9185967,0.8627041,0.41143695,0.8614238,0.6935155,0.7824836,0.9270865,0.9080432,0.90237033,0.3505627,0.7214436,0.51502085,0.8299621,0.8190334,0.87419224,0.8438665,0.14210498,0.44700462,x,x,x,0.711713969707489
4 | 3,0.86295706,0.8759095,0.39476308,0.81177276,0.67782474,0.75876784,0.9231016,0.90173006,0.88245404,0.359833,0.74314374,0.54820365,0.7744275,0.8232274,0.8649266,0.82139856,0.18886802,0.43942255,0.17034052,x,x,0.6748985648155212
5 | 4,0.85293126,0.865286,0.39272776,0.79612714,0.67968273,0.7461738,0.88593245,0.88590413,0.8605231,0.20438921,0.708988,0.50997144,0.8125218,0.7988098,0.83568674,0.77981,0.20917015,0.43408087,0.16834442,0.37267223,x,0.6399866342544556
6 | 5,0.8268275,0.8222172,0.39027506,0.764712,0.6063432,0.7079811,0.8416076,0.85620755,0.8629939,0.21518445,0.709648,0.5171112,0.7772585,0.7695282,0.8413933,0.7591464,0.21374348,0.45605338,0.16618516,0.26518303,0.092057966,0.5934123396873474
7 |
--------------------------------------------------------------------------------
/results/2022-08-28_voc_10-1_RCIL_disjoint.csv:
--------------------------------------------------------------------------------
1 | 0,0.9304783,0.89444244,0.33915022,0.88900983,0.73639286,0.78166854,0.92210114,0.8309136,0.8609211,0.3288784,0.866555,x,x,x,x,x,x,x,x,x,x,0.7618646621704102
2 | 1,0.78829646,0.84922767,0.30461437,0.7727829,0.69999444,0.75030863,0.9098074,0.81364757,0.82427967,0.34103474,0.79305434,0.115079775,x,x,x,x,x,x,x,x,x,0.6635107398033142
3 | 2,0.76936483,0.86197764,0.31124792,0.77725405,0.69794863,0.7715149,0.90545523,0.8075278,0.573904,0.32432562,0.5146239,0.07990825,0.0150719825,x,x,x,x,x,x,x,x,0.5700095891952515
4 | 3,0.79971635,0.86524725,0.31302357,0.77762073,0.6932139,0.7603409,0.8925084,0.8049613,0.69535357,0.31822452,0.24231832,0.08332427,0.12271786,8.0001475e-05,x,x,x,x,x,x,x,0.5263321995735168
5 | 4,0.7380819,0.83321446,0.20278634,0.67229265,0.669829,0.7408594,0.8625662,0.6037119,0.6679072,0.28195956,0.32396424,0.053372614,0.24614373,0.036943372,0.046008714,x,x,x,x,x,x,0.46530941128730774
6 | 5,0.75564975,0.81136423,0.20111619,0.691778,0.69175404,0.7305902,0.84475523,0.59191185,0.686963,0.2686428,0.31689397,0.044366818,0.23954922,0.03700813,0.034761913,0.09405573,x,x,x,x,x,0.44007256627082825
7 | 6,0.7047454,0.8123623,0.20491843,0.6534701,0.6501103,0.6936578,0.8134007,0.5904827,0.6784723,0.2622482,0.35585934,0.04036571,0.2367443,0.019026859,0.032112055,0.099028155,0.022936061,x,x,x,x,0.40411415696144104
8 | 7,0.6696136,0.8014942,0.19948699,0.65473914,0.6378002,0.71452874,0.8189692,0.5814909,0.6860349,0.23605363,0.2704357,0.0362047,0.24451292,0.0077381,0.045911506,0.114856176,0.011901991,0.09466273,x,x,x,0.37924638390541077
9 | 8,0.549721,0.77369285,0.18698326,0.7734879,0.530265,0.67338556,0.7837399,0.60541004,0.6624484,0.20682454,0.22627056,0.028063728,0.21490966,0.0145022115,0.06328861,0.113310754,0.018206112,0.058288794,0.04792252,x,x,0.3437221646308899
10 | 9,0.5328524,0.7390536,0.1842102,0.71114284,0.5217879,0.5973948,0.6763708,0.557391,0.36603102,0.05675302,0.13522694,0.025624229,0.22525957,0.002559292,0.08003627,0.075853385,0.0049990523,0.10952196,0.043520868,0.2434724,x,0.29445308446884155
11 | 10,0.4297546,0.5990116,0.16636327,0.63828856,0.26645088,0.6182678,0.27558625,0.560001,0.3960604,0.05556691,0.14834777,0.022865983,0.22829732,0.0026008731,0.07775164,0.060220823,0.013225545,0.12763286,0.04194809,0.18676037,0.06086524,0.23694613575935364
12 |
--------------------------------------------------------------------------------
/results/2022-08-28_voc_15-5s_RCIL_disjoint.csv:
--------------------------------------------------------------------------------
1 | 0,0.93803084,0.90134865,0.42456806,0.9047449,0.69415635,0.79963773,0.8875202,0.9182805,0.9140329,0.43597305,0.83601314,0.5925765,0.87521946,0.8392464,0.87186414,0.8565822,x,x,x,x,x,0.7931121587753296
2 | 1,0.90387577,0.84906304,0.4125576,0.8952073,0.6937233,0.7581501,0.8854338,0.91761476,0.90541434,0.39367327,0.83811516,0.553197,0.8384663,0.83373064,0.8483014,0.84748113,0.121833995,x,x,x,x,0.7350493669509888
3 | 2,0.9114411,0.8413359,0.4034174,0.8545339,0.65250206,0.72222525,0.875782,0.91038585,0.8670623,0.35377872,0.61930114,0.5263114,0.822304,0.8261719,0.8453847,0.83354867,0.1106283,0.07527872,x,x,x,0.6695218086242676
4 | 3,0.85768205,0.8528622,0.37320384,0.8193788,0.64057016,0.73632693,0.8962346,0.89728343,0.83639926,0.30582818,0.712404,0.5442223,0.7396935,0.8160268,0.83915335,0.8118619,0.15944138,0.2917932,0.112183064,x,x,0.6443446278572083
5 | 4,0.8584185,0.8141067,0.37512392,0.80884844,0.62392044,0.7291361,0.76982236,0.8952847,0.85153776,0.050536636,0.6240096,0.47219145,0.783686,0.7914762,0.7768021,0.7957912,0.18010029,0.35679594,0.16896744,0.30696738,x,0.6016762256622314
6 | 5,0.8233437,0.779386,0.3623762,0.77678823,0.5848956,0.7081866,0.45927906,0.86528957,0.8381437,0.04643704,0.48597243,0.49434838,0.76664174,0.79110086,0.78044075,0.76474565,0.16638654,0.31007782,0.14486076,0.24611962,0.093435556,0.5375359654426575
7 |
--------------------------------------------------------------------------------
/results/2022-10-04_ade_100-50_RCIL.csv:
--------------------------------------------------------------------------------
1 | 0,0.3636114,0.67831814,0.74550724,0.920761,0.7263673,0.6970905,0.74835986,0.77812356,0.7348483,0.56751186,0.60455513,0.5064667,0.54191625,0.71389055,0.2598584,0.33229688,0.45012075,0.5427138,0.42231813,0.6561331,0.49056685,0.7704959,0.48096976,0.5739578,0.564064,0.36353794,0.34717566,0.6834148,0.500861,0.5101507,0.30287042,0.32600245,0.48669282,0.29287735,0.38928092,0.35700107,0.41400498,0.4464307,0.6430896,0.25009108,0.4593251,0.1248783,0.12687916,0.34884745,0.24038924,0.4451352,0.18360381,0.3864861,0.56695884,0.49260828,0.57754236,0.5579286,0.30723614,0.13735299,0.27775824,0.557404,0.45588315,0.8683364,0.45546347,0.5531636,0.36629593,0.22893068,0.40531716,0.32908416,0.30814722,0.44468546,0.7131308,0.33702862,0.3860547,0.13188116,0.3602346,0.45801893,0.5754272,0.50728524,0.27281222,0.47918355,0.3910715,0.27376366,0.21478488,0.4235692,0.358341,0.7619978,0.43584257,0.3512941,0.05074502,0.29771313,0.54516876,0.15928294,0.15616775,0.15795183,0.5469554,0.42752576,0.15579931,0.24614426,0.10136532,0.0003305916,0.038397495,0.31140503,0.40317053,0.15155277,0.36579913,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,0.4251202940940857
2 | 1,0.27573407,0.6711656,0.73558813,0.91933525,0.72390366,0.69792575,0.7448941,0.773026,0.7277569,0.5592059,0.5989315,0.51313096,0.53824455,0.7063096,0.2471758,0.3320389,0.45204037,0.52615273,0.41283417,0.6472333,0.49050686,0.7740593,0.4685841,0.5604835,0.5517567,0.35224965,0.30030966,0.66080433,0.5018804,0.50657785,0.32568768,0.30531216,0.4733088,0.29890573,0.37685362,0.3215577,0.40120828,0.44560483,0.6415554,0.23150474,0.44863135,0.1638713,0.15445222,0.34975958,0.2617169,0.45434308,0.19115065,0.40138957,0.56432706,0.46553108,0.54750603,0.58047354,0.29402003,0.12135583,0.27690038,0.5687915,0.4294594,0.86737984,0.4573373,0.5373887,0.34856477,0.26849112,0.36223948,0.3134626,0.28649455,0.45085457,0.6779934,0.3702569,0.39365056,0.15600792,0.34583303,0.4518692,0.56047446,0.48390552,0.2712609,0.4769455,0.39092425,0.2986132,0.23687649,0.5875661,0.40068159,0.765544,0.4394135,0.3558548,0.08913967,0.2826759,0.53973746,0.17151268,0.16379449,0.18395202,0.580663,0.33649972,0.15656601,0.2521425,0.07568143,0.0034030306,0.027964296,0.34484994,0.43879023,0.1541946,0.35480413,0.11506087,0.026074601,0.36108258,0.034634024,0.20240906,0.043525036,0.15409492,0.5895165,0.046370454,0.30982503,0.15875782,0.0,0.054614056,0.48175576,0.7812479,0.0,0.31478125,0.67000365,0.24088565,0.4627834,0.258749,0.044542983,0.09729086,0.20828389,0.32808113,0.17077346,0.48496103,0.30007222,0.38479945,0.52514124,0.54633325,0.0037358105,0.010584123,0.22175004,0.07901752,0.0420206,0.0,0.0,0.0,0.23071654,0.19057001,0.00061835133,0.04124022,0.010726497,0.009930433,0.0,0.0,0.0,0.0,0.0,0.34446778893470764
3 |
--------------------------------------------------------------------------------
/results/2022-10-17_voc_15-5s_RCIL_disjoint.csv:
--------------------------------------------------------------------------------
1 | 0,0.9392957,0.87639076,0.4199407,0.8891285,0.68562204,0.81803256,0.8913655,0.9125613,0.91806406,0.4447751,0.84244055,0.6194558,0.88817406,0.8608917,0.87325364,0.8640695,x,x,x,x,x,0.7964663505554199
2 | 1,0.9066761,0.8522871,0.4095023,0.8753517,0.6924991,0.7879578,0.8853484,0.91031474,0.90546995,0.40099892,0.8353409,0.5651091,0.870013,0.8233101,0.8540086,0.85197073,0.14343625,x,x,x,x,0.7393879890441895
3 | 2,0.9129685,0.8269965,0.40859532,0.8581973,0.6614217,0.7748348,0.8898815,0.90073806,0.8760883,0.36443496,0.62392324,0.487623,0.8258621,0.81686455,0.85921526,0.84700775,0.12512185,0.11188957,x,x,x,0.6762035489082336
4 | 3,0.86193204,0.86468405,0.39184877,0.84404594,0.6761761,0.76660573,0.88926643,0.89717007,0.8531008,0.29168016,0.6462933,0.5161888,0.76709026,0.8218213,0.8319283,0.8329574,0.14504454,0.2503627,0.12662546,x,x,0.6460432410240173
5 | 4,0.85671693,0.8392462,0.3940379,0.8072071,0.6444481,0.7586846,0.74724495,0.8915063,0.8668237,0.026497187,0.59369534,0.47771716,0.807097,0.7777526,0.7880006,0.8131544,0.17393346,0.3402702,0.18715267,0.23352365,x,0.6012355089187622
6 | 5,0.8236754,0.78579944,0.38181427,0.77614033,0.6072145,0.72347987,0.5672922,0.86342233,0.8594102,0.020834593,0.51676387,0.47107968,0.7750919,0.7801805,0.7661633,0.78658813,0.16082549,0.32172462,0.16901252,0.2055818,0.07142766,0.544453501701355
7 |
--------------------------------------------------------------------------------
/results/2022-10-17_voc_15-5s_RCIL_overlap.csv:
--------------------------------------------------------------------------------
1 | 0,0.9457726,0.89108616,0.4253083,0.9029974,0.7249548,0.8373216,0.9376237,0.90068984,0.9308707,0.48663175,0.8437603,0.634081,0.8996713,0.8481684,0.85218537,0.86540544,x,x,x,x,x,0.8079080581665039
2 | 1,0.9126766,0.84303427,0.41687128,0.8702857,0.71507275,0.81110364,0.944687,0.8989807,0.9137932,0.45467374,0.8467138,0.62786007,0.8664778,0.84817725,0.8432979,0.86053854,0.13175264,x,x,x,x,0.7532939314842224
3 | 2,0.90958136,0.8745674,0.41949633,0.8705489,0.67759836,0.7901692,0.93002075,0.89502543,0.9115262,0.3073103,0.7382754,0.56543356,0.8440492,0.84022087,0.8415629,0.85133016,0.09951019,0.46051782,x,x,x,0.7125968933105469
4 | 3,0.84889257,0.8817981,0.41422194,0.8072579,0.6795392,0.7721502,0.9330152,0.89182615,0.8843357,0.32014602,0.7808588,0.5841078,0.8090889,0.84365475,0.8456089,0.83239067,0.14389306,0.40250978,0.15768984,x,x,0.6754202842712402
5 | 4,0.8475094,0.85131013,0.41100353,0.8028882,0.6594982,0.74496645,0.9161088,0.8655726,0.86665887,0.119581915,0.74381983,0.57299894,0.8260389,0.8183584,0.83791,0.79990005,0.17593998,0.4164559,0.16204503,0.3933265,x,0.6415945887565613
6 | 5,0.8189106,0.8257633,0.4193321,0.75071704,0.64601445,0.726597,0.9022461,0.8337075,0.86862785,0.13039204,0.7347005,0.5735201,0.81154186,0.79398966,0.82943684,0.78087705,0.17952763,0.4113089,0.15023288,0.27930617,0.106065966,0.598705530166626
7 |
--------------------------------------------------------------------------------
/scripts/ade/RCIL_ade_100-10.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | start=`date +%s`
6 |
7 | START_DATE=$(date '+%Y-%m-%d')
8 |
9 | PORT=$((9000 + RANDOM % 1000))
10 | GPU=0,1
11 | NB_GPU=2
12 |
13 | DATA_ROOT=/path/to/my/ade
14 |
15 | DATASET=ade
16 | TASK=100-10
17 | NAME=RCIL
18 | METHOD=FT
19 |
20 | OPTIONS="--checkpoint checkpoints/step/ --pod local --pod_factor 0.001 --pod_logits --unce --unkd --loss_kd 100 --classif_adaptive_factor --init_balanced"
21 | SCREENNAME="${DATASET}_${TASK}_${NAME} On GPUs ${GPU}"
22 |
23 | RESULTSFILE=results/${START_DATE}_${DATASET}_${TASK}_${NAME}.csv
24 | rm -f ${RESULTSFILE}
25 |
26 | echo -ne "\ek${SCREENNAME}\e\\"
27 |
28 | echo "Writing in ${RESULTSFILE}"
29 |
30 | # If you already trained the model for the first step, you can re-use those weights
31 | # in order to skip this initial step --> faster iteration on your model
32 | # Set this variable with the weights path
33 | # FIRSTMODEL=/path/to/my/first/weights
34 | # Then, for the first step, append those options:
35 | # --ckpt ${FIRSTMODEL} --test
36 | # And for the second step, this option:
37 | # --step_ckpt ${FIRSTMODEL}
38 |
39 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size 12 --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 0 --lr 0.01 --epochs 60 --method ${METHOD} --opt_level O1 ${OPTIONS}
40 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size 12 --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 1 --lr 0.001 --epochs 60 --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.00001, \"type\": \"local\"}}}"
41 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size 12 --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 2 --lr 0.001 --epochs 60 --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.00001, \"type\": \"local\"}}}"
42 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size 12 --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 3 --lr 0.001 --epochs 60 --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.00001, \"type\": \"local\"}}}"
43 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size 12 --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 4 --lr 0.001 --epochs 60 --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.00001, \"type\": \"local\"}}}"
44 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size 12 --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 5 --lr 0.001 --epochs 60 --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.00001, \"type\": \"local\"}}}"
45 | python3 average_csv.py ${RESULTSFILE}
46 |
47 | echo ${SCREENNAME}
48 |
49 |
50 | end=`date +%s`
51 | runtime=$((end-start))
52 | echo "Run in ${runtime}s"
53 |
--------------------------------------------------------------------------------
/scripts/ade/RCIL_ade_100-5.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | start=`date +%s`
6 |
7 | START_DATE=$(date '+%Y-%m-%d')
8 |
9 | PORT=$((9000 + RANDOM % 1000))
10 | GPU=0,1
11 | NB_GPU=2
12 |
13 | DATA_ROOT=/path/to/my/ade
14 |
15 | DATASET=ade
16 | TASK=100-5
17 | NAME=RCIL
18 | METHOD=FT
19 | OPTIONS="--checkpoint checkpoints/step/ --pod local --pod_factor 0.001 --pod_logits --unce --unkd --loss_kd 100 --classif_adaptive_factor --init_balanced"
20 | SCREENNAME="${DATASET}_${TASK}_${NAME} On GPUs ${GPU}"
21 |
22 | RESULTSFILE=results/${START_DATE}_${DATASET}_${TASK}_${NAME}.csv
23 | rm -f ${RESULTSFILE}
24 |
25 | echo -ne "\ek${SCREENNAME}\e\\"
26 |
27 | echo "Writing in ${RESULTSFILE}"
28 |
29 | # If you already trained the model for the first step, you can re-use those weights
30 | # in order to skip this initial step --> faster iteration on your model
31 | # Set this variable with the weights path
32 | # FIRSTMODEL=/path/to/my/first/weights
33 | # Then, for the first step, append those options:
34 | # --ckpt ${FIRSTMODEL} --test
35 | # And for the second step, this option:
36 | # --step_ckpt ${FIRSTMODEL}
37 |
38 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size 12 --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 0 --lr 0.01 --epochs 60 --method ${METHOD} --opt_level O1 ${OPTIONS}
39 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size 12 --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 1 --lr 0.001 --epochs 60 --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.00001, \"type\": \"local\"}}}"
40 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size 12 --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 2 --lr 0.001 --epochs 60 --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.00001, \"type\": \"local\"}}}"
41 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size 12 --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 3 --lr 0.001 --epochs 60 --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.00001, \"type\": \"local\"}}}"
42 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size 12 --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 4 --lr 0.001 --epochs 60 --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.00001, \"type\": \"local\"}}}"
43 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size 12 --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 5 --lr 0.001 --epochs 60 --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.00001, \"type\": \"local\"}}}"
44 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size 12 --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 6 --lr 0.001 --epochs 60 --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.00001, \"type\": \"local\"}}}"
45 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size 12 --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 7 --lr 0.001 --epochs 60 --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.00001, \"type\": \"local\"}}}"
46 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size 12 --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 8 --lr 0.001 --epochs 60 --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.00001, \"type\": \"local\"}}}"
47 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size 12 --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 9 --lr 0.001 --epochs 60 --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.00001, \"type\": \"local\"}}}"
48 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size 12 --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 10 --lr 0.001 --epochs 60 --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.00001, \"type\": \"local\"}}}"
49 | python3 average_csv.py ${RESULTSFILE}
50 |
51 | echo ${SCREENNAME}
52 |
53 |
54 | end=`date +%s`
55 | runtime=$((end-start))
56 | echo "Run in ${runtime}s"
57 |
--------------------------------------------------------------------------------
/scripts/ade/RCIL_ade_100-50.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | start=`date +%s`
6 |
7 | START_DATE=$(date '+%Y-%m-%d')
8 |
9 | PORT=$((9000 + RANDOM % 1000))
10 | GPU=0,1,2,3
11 | NB_GPU=4
12 |
13 | DATA_ROOT=/path/to/my/ade
14 |
15 | DATASET=ade
16 | TASK=100-50
17 | NAME=RCIL
18 | METHOD=FT
19 | OPTIONS="--checkpoint checkpoints/step/ --pod local --pod_factor 0.001 --pod_logits --unce --unkd --loss_kd 100 --classif_adaptive_factor --init_balanced"
20 |
21 | SCREENNAME="${DATASET}_${TASK}_${NAME} On GPUs ${GPU}"
22 |
23 | RESULTSFILE=results/${START_DATE}_${DATASET}_${TASK}_${NAME}.csv
24 | rm -f ${RESULTSFILE}
25 |
26 | echo -ne "\ek${SCREENNAME}\e\\"
27 |
28 | echo "Writing in ${RESULTSFILE}"
29 |
30 | # If you already trained the model for the first step, you can re-use those weights
31 | # in order to skip this initial step --> faster iteration on your model
32 | # Set this variable with the weights path
33 | # FIRSTMODEL=/path/to/my/first/weights
34 | # Then, for the first step, append those options:
35 | # --ckpt ${FIRSTMODEL} --test
36 | # And for the second step, this option:
37 | # --step_ckpt ${FIRSTMODEL}
38 |
39 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size 6 --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 0 --lr 0.01 --epochs 60 --method ${METHOD} --opt_level O1 ${OPTIONS}
40 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size 6 --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 1 --lr 0.001 --epochs 60 --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.00001, \"type\": \"local\"}}}"
41 | python3 average_csv.py ${RESULTSFILE}
42 |
43 | echo ${SCREENNAME}
44 |
45 |
46 | end=`date +%s`
47 | runtime=$((end-start))
48 | echo "Run in ${runtime}s"
--------------------------------------------------------------------------------
/scripts/cityscapeClL/dev.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | start=`date +%s`
6 |
7 | START_DATE=$(date '+%Y-%m-%d')
8 |
9 | PORT=$((9000 + RANDOM % 1000))
10 | GPU=4,5,6,7
11 | NB_GPU=4
12 |
13 |
14 | DATA_ROOT=/PATH/TO/cityscapes
15 |
16 | DATASET=cityscape
17 | TASK=10-9
18 | NAME=RCIL_overlap
19 | METHOD=RCIL
20 | OPTIONS="--checkpoint checkpoints/step/"
21 |
22 | SCREENNAME="${DATASET}_${TASK}_${NAME} On GPUs ${GPU}"
23 |
24 | RESULTSFILE=results/${START_DATE}_${DATASET}_${TASK}_${NAME}.csv
25 | rm -f ${RESULTSFILE}
26 |
27 | echo -ne "\ek${SCREENNAME}\e\\"
28 |
29 | echo "Writing in ${RESULTSFILE}"
30 |
31 | # If you already trained the model for the first step, you can re-use those weights
32 | # in order to skip this initial step --> faster iteration on your model
33 | # Set this variable with the weights path
34 | # FIRSTMODEL=/path/to/my/first/weights
35 | # Then, for the first step, append those options:
36 | # --ckpt ${FIRSTMODEL} --test
37 | # And for the second step, this option:
38 | # --step_ckpt ${FIRSTMODEL}
39 |
40 | BATCH_SIZE=6
41 | INITIAL_EPOCHS=50
42 | EPOCHS=50
43 |
44 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 0 --lr 0.02 --epochs ${INITIAL_EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} --overlap
45 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 1 --lr 0.001 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} --overlap
46 |
47 | python3 average_csv.py ${RESULTSFILE}
48 |
49 | echo ${SCREENNAME}
50 |
51 |
52 | end=`date +%s`
53 | 1,1 Top
54 |
55 |
--------------------------------------------------------------------------------
/scripts/cityscapesDomain/RCIL_cityscapes_1-1.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | start=`date +%s`
6 |
7 | START_DATE=$(date '+%Y-%m-%d')
8 |
9 | PORT=$((9000 + RANDOM % 1000))
10 | GPU=0,1,2,3
11 | NB_GPU=4
12 |
13 |
14 | DATA_ROOT=/media/ssd/plop/data/cityscapes_domain/
15 |
16 | DATASET=cityscapes_domain
17 | TASK=1-1
18 | NAME=RCIL
19 | METHOD=FT
20 | OPTIONS="--checkpoint checkpoints/cityscapes-ours/ --pod local --pod_factor 0.0001 --pod_logits"
21 |
22 | SCREENNAME="${DATASET}_${TASK}_${NAME} On GPUs ${GPU}"
23 |
24 | RESULTSFILE=results/ablation/${START_DATE}_${DATASET}_${TASK}_${NAME}.csv
25 | rm -f ${RESULTSFILE}
26 |
27 | echo -ne "\ek${SCREENNAME}\e\\"
28 |
29 | echo "Writing in ${RESULTSFILE}"
30 |
31 | # If you already trained the model for the first step, you can re-use those weights
32 | # in order to skip this initial step --> faster iteration on your model
33 | # Set this variable with the weights path
34 | # FIRSTMODEL=/path/to/my/first/weights
35 | # Then, for the first step, append those options:
36 | # --ckpt ${FIRSTMODEL} --test
37 | # And for the second step, this option:
38 | # --step_ckpt ${FIRSTMODEL}
39 |
40 | BATCH_SIZE=6
41 | INITIAL_EPOCHS=50
42 | EPOCHS=50
43 |
44 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 0 --lr 0.02 --epochs ${INITIAL_EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS}
45 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 1 --lr 0.002 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.001, \"type\": \"local\"}}}"
46 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 2 --lr 0.002 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.001, \"type\": \"local\"}}}"
47 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 3 --lr 0.002 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.001, \"type\": \"local\"}}}"
48 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 4 --lr 0.002 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.001, \"type\": \"local\"}}}"
49 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 5 --lr 0.002 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.001, \"type\": \"local\"}}}"
50 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 6 --lr 0.002 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.001, \"type\": \"local\"}}}"
51 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 7 --lr 0.002 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.001, \"type\": \"local\"}}}"
52 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 8 --lr 0.002 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.001, \"type\": \"local\"}}}"
53 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 9 --lr 0.002 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.001, \"type\": \"local\"}}}"
54 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 10 --lr 0.002 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.001, \"type\": \"local\"}}}"
55 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 11 --lr 0.002 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.001, \"type\": \"local\"}}}"
56 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 12 --lr 0.002 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.001, \"type\": \"local\"}}}"
57 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 13 --lr 0.002 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.001, \"type\": \"local\"}}}"
58 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 14 --lr 0.002 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.001, \"type\": \"local\"}}}"
59 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 15 --lr 0.002 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.001, \"type\": \"local\"}}}"
60 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 16 --lr 0.002 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.001, \"type\": \"local\"}}}"
61 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 17 --lr 0.002 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.001, \"type\": \"local\"}}}"
62 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 18 --lr 0.002 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.001, \"type\": \"local\"}}}"
63 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 19 --lr 0.002 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.001, \"type\": \"local\"}}}"
64 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 20 --lr 0.002 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.001, \"type\": \"local\"}}}"
65 | python3 average_csv.py ${RESULTSFILE}
66 |
67 | echo ${SCREENNAME}
68 |
69 |
70 | end=`date +%s`
71 | runtime=$((end-start))
72 | echo "Run in ${runtime}s"
73 |
--------------------------------------------------------------------------------
/scripts/cityscapesDomain/RCIL_cityscapes_11-1.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | start=`date +%s`
6 |
7 | START_DATE=$(date '+%Y-%m-%d')
8 |
9 | PORT=$((9000 + RANDOM % 1000))
10 | GPU=0,1,2,3
11 | NB_GPU=4
12 |
13 |
14 | DATA_ROOT=/media/ssd/plop/data/cityscapes_domain/
15 |
16 | DATASET=cityscapes_domain
17 | TASK=11-1
18 | NAME=RCIL
19 | METHOD=FT
20 | OPTIONS="--checkpoint checkpoints/cityscapes-ours/ --pod local --pod_factor 0.0001 --pod_logits"
21 |
22 | SCREENNAME="${DATASET}_${TASK}_${NAME} On GPUs ${GPU}"
23 |
24 | RESULTSFILE=results/${START_DATE}_${DATASET}_${TASK}_${NAME}.csv
25 | rm -f ${RESULTSFILE}
26 |
27 | echo -ne "\ek${SCREENNAME}\e\\"
28 |
29 | echo "Writing in ${RESULTSFILE}"
30 |
31 | # If you already trained the model for the first step, you can re-use those weights
32 | # in order to skip this initial step --> faster iteration on your model
33 | # Set this variable with the weights path
34 | # FIRSTMODEL=/path/to/my/first/weights
35 | # Then, for the first step, append those options:
36 | # --ckpt ${FIRSTMODEL} --test
37 | # And for the second step, this option:
38 | # --step_ckpt ${FIRSTMODEL}
39 |
40 | BATCH_SIZE=6
41 | INITIAL_EPOCHS=50
42 | EPOCHS=50
43 |
44 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 0 --lr 0.02 --epochs ${INITIAL_EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS}
45 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 1 --lr 0.004 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.001, \"type\": \"local\"}}}"
46 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 2 --lr 0.004 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.001, \"type\": \"local\"}}}"
47 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 3 --lr 0.004 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.001, \"type\": \"local\"}}}"
48 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 4 --lr 0.004 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.001, \"type\": \"local\"}}}"
49 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 5 --lr 0.004 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.001, \"type\": \"local\"}}}"
50 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 6 --lr 0.004 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.001, \"type\": \"local\"}}}"
51 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 7 --lr 0.004 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.001, \"type\": \"local\"}}}"
52 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 8 --lr 0.004 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.001, \"type\": \"local\"}}}"
53 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 9 --lr 0.004 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.001, \"type\": \"local\"}}}"
54 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 10 --lr 0.004 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.001, \"type\": \"local\"}}}"
55 | python3 average_csv.py ${RESULTSFILE}
56 |
57 | echo ${SCREENNAME}
58 |
59 |
60 | end=`date +%s`
61 | runtime=$((end-start))
62 | echo "Run in ${runtime}s"
63 |
--------------------------------------------------------------------------------
/scripts/cityscapesDomain/RCIL_cityscapes_11-5.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | start=`date +%s`
6 |
7 | START_DATE=$(date '+%Y-%m-%d')
8 |
9 | PORT=$((9000 + RANDOM % 1000))
10 | GPU=0,1,2,3
11 | NB_GPU=4
12 |
13 |
14 | DATA_ROOT=/media/ssd/plop/data/cityscapes_domain/
15 |
16 | DATASET=cityscapes_domain
17 | TASK=11-5
18 | NAME=RCIL
19 | METHOD=FT
20 | OPTIONS="--checkpoint checkpoints/cityscapes-ours/ --pod local --pod_factor 0.0001 --pod_logits"
21 |
22 | SCREENNAME="${DATASET}_${TASK}_${NAME} On GPUs ${GPU}"
23 |
24 | RESULTSFILE=results/${START_DATE}_${DATASET}_${TASK}_${NAME}.csv
25 | rm -f ${RESULTSFILE}
26 |
27 | echo -ne "\ek${SCREENNAME}\e\\"
28 |
29 | echo "Writing in ${RESULTSFILE}"
30 |
31 | # If you already trained the model for the first step, you can re-use those weights
32 | # in order to skip this initial step --> faster iteration on your model
33 | # Set this variable with the weights path
34 | # FIRSTMODEL=/path/to/my/first/weights
35 | # Then, for the first step, append those options:
36 | # --ckpt ${FIRSTMODEL} --test
37 | # And for the second step, this option:
38 | # --step_ckpt ${FIRSTMODEL}
39 |
40 | BATCH_SIZE=6
41 | INITIAL_EPOCHS=50
42 | EPOCHS=50
43 |
44 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 0 --lr 0.02 --epochs ${INITIAL_EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS}
45 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 1 --lr 0.002 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.001, \"type\": \"local\"}}}"
46 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 2 --lr 0.002 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.001, \"type\": \"local\"}}}"
47 | python3 average_csv.py ${RESULTSFILE}
48 |
49 | echo ${SCREENNAME}
50 |
51 |
52 | end=`date +%s`
53 | runtime=$((end-start))
54 | echo "Run in ${runtime}s"
55 |
--------------------------------------------------------------------------------
/scripts/voc/RCIL_10-1-disjoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | start=`date +%s`
6 |
7 | START_DATE=$(date '+%Y-%m-%d')
8 |
9 | PORT=$((9000 + RANDOM % 1000))
10 | GPU=0,1,2,3
11 | NB_GPU=4
12 |
13 |
14 | DATA_ROOT=/Datapath/to/PascalVOC12
15 |
16 | DATASET=voc
17 | TASK=10-1
18 | NAME=RCIL_disjoint
19 | METHOD=RCIL
20 | OPTIONS="--checkpoint checkpoints/step/"
21 |
22 | SCREENNAME="${DATASET}_${TASK}_${NAME} On GPUs ${GPU}"
23 |
24 | RESULTSFILE=results/${START_DATE}_${DATASET}_${TASK}_${NAME}.csv
25 | rm -f ${RESULTSFILE}
26 |
27 | echo -ne "\ek${SCREENNAME}\e\\"
28 |
29 | echo "Writing in ${RESULTSFILE}"
30 |
31 | # If you already trained the model for the first step, you can re-use those weights
32 | # in order to skip this initial step --> faster iteration on your model
33 | # Set this variable with the weights path
34 | # FIRSTMODEL=/path/to/my/first/weights
35 | # Then, for the first step, append those options:
36 | # --ckpt ${FIRSTMODEL} --test
37 | # And for the second step, this option:
38 | # --step_ckpt ${FIRSTMODEL}
39 |
40 | BATCH_SIZE=6
41 | INITIAL_EPOCHS=30
42 | EPOCHS=30
43 |
44 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 0 --lr 0.02 --epochs ${INITIAL_EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS}
45 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 1 --lr 0.001 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS}
46 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 2 --lr 0.001 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS}
47 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 3 --lr 0.001 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS}
48 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 4 --lr 0.001 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS}
49 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 5 --lr 0.001 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS}
50 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 6 --lr 0.001 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS}
51 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 7 --lr 0.001 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS}
52 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 8 --lr 0.001 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS}
53 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 9 --lr 0.001 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS}
54 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 10 --lr 0.001 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS}
55 | python3 average_csv.py ${RESULTSFILE}
56 |
57 | echo ${SCREENNAME}
58 |
59 |
60 | end=`date +%s`
61 | runtime=$((end-start))
62 | echo "Run in ${runtime}s"
63 |
--------------------------------------------------------------------------------
/scripts/voc/RCIL_10-1-overlap.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | start=`date +%s`
6 |
7 | START_DATE=$(date '+%Y-%m-%d')
8 |
9 | PORT=$((9000 + RANDOM % 1000))
10 | GPU=0,1,2,3
11 | NB_GPU=4
12 |
13 |
14 | DATA_ROOT=/Datapath/to/PascalVOC12
15 |
16 | DATASET=voc
17 | TASK=10-1
18 | NAME=RCIL_overlap
19 | METHOD=RCIL
20 | OPTIONS="--checkpoint checkpoints/step/"
21 |
22 | SCREENNAME="${DATASET}_${TASK}_${NAME} On GPUs ${GPU}"
23 |
24 | RESULTSFILE=results/${START_DATE}_${DATASET}_${TASK}_${NAME}.csv
25 | rm -f ${RESULTSFILE}
26 |
27 | echo -ne "\ek${SCREENNAME}\e\\"
28 |
29 | echo "Writing in ${RESULTSFILE}"
30 |
31 | # If you already trained the model for the first step, you can re-use those weights
32 | # in order to skip this initial step --> faster iteration on your model
33 | # Set this variable with the weights path
34 | # FIRSTMODEL=/path/to/my/first/weights
35 | # Then, for the first step, append those options:
36 | # --ckpt ${FIRSTMODEL} --test
37 | # And for the second step, this option:
38 | # --step_ckpt ${FIRSTMODEL}
39 |
40 | BATCH_SIZE=6
41 | INITIAL_EPOCHS=30
42 | EPOCHS=30
43 |
44 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 0 --lr 0.02 --epochs ${INITIAL_EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} --overlap
45 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 1 --lr 0.001 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} --overlap
46 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 2 --lr 0.001 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} --overlap
47 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 3 --lr 0.001 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} --overlap
48 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 4 --lr 0.001 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} --overlap
49 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 5 --lr 0.001 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} --overlap
50 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 6 --lr 0.001 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} --overlap
51 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 7 --lr 0.001 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} --overlap
52 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 8 --lr 0.001 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} --overlap
53 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 9 --lr 0.001 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} --overlap
54 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 10 --lr 0.001 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} --overlap
55 | python3 average_csv.py ${RESULTSFILE}
56 |
57 | echo ${SCREENNAME}
58 |
59 |
60 | end=`date +%s`
61 | runtime=$((end-start))
62 | echo "Run in ${runtime}s"
63 |
--------------------------------------------------------------------------------
/scripts/voc/RCIL_15-1-disjoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | start=`date +%s`
6 |
7 | START_DATE=$(date '+%Y-%m-%d')
8 |
9 | PORT=$((9000 + RANDOM % 1000))
10 | GPU=0,1,2,3
11 | NB_GPU=4
12 |
13 |
14 | DATA_ROOT=/media/ssd/plop/data/PascalVOC12
15 |
16 | DATASET=voc
17 | TASK=15-5s
18 | NAME=RCIL_disjoint
19 | METHOD=RCIL
20 | OPTIONS="--checkpoint checkpoints/step/"
21 |
22 | SCREENNAME="${DATASET}_${TASK}_${NAME} On GPUs ${GPU}"
23 |
24 | RESULTSFILE=results/${START_DATE}_${DATASET}_${TASK}_${NAME}.csv
25 | rm -f ${RESULTSFILE}
26 |
27 | echo -ne "\ek${SCREENNAME}\e\\"
28 |
29 | echo "Writing in ${RESULTSFILE}"
30 |
31 | # If you already trained the model for the first step, you can re-use those weights
32 | # in order to skip this initial step --> faster iteration on your model
33 | # Set this variable with the weights path
34 | # FIRSTMODEL=/path/to/my/first/weights
35 | # Then, for the first step, append those options:
36 | # --ckpt ${FIRSTMODEL} --test
37 | # And for the second step, this option:
38 | # --step_ckpt ${FIRSTMODEL}
39 |
40 | BATCH_SIZE=6
41 | INITIAL_EPOCHS=30
42 | EPOCHS=30
43 |
44 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 0 --lr 0.02 --epochs ${INITIAL_EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS}
45 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 1 --lr 0.001 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS}
46 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 2 --lr 0.001 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS}
47 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 3 --lr 0.001 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS}
48 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 4 --lr 0.001 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS}
49 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 5 --lr 0.001 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS}
50 | python3 average_csv.py ${RESULTSFILE}
51 |
52 | echo ${SCREENNAME}
53 |
54 |
55 | end=`date +%s`
56 | runtime=$((end-start))
57 | echo "Run in ${runtime}s"
58 |
--------------------------------------------------------------------------------
/scripts/voc/RCIL_15-1-overlap.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | start=`date +%s`
6 |
7 | START_DATE=$(date '+%Y-%m-%d')
8 |
9 | PORT=$((9000 + RANDOM % 1000))
10 | GPU=0,1,2,3
11 | NB_GPU=4
12 |
13 |
14 | DATA_ROOT=/media/ssd/plop/data/PascalVOC12
15 |
16 | DATASET=voc
17 | TASK=15-5s
18 | NAME=RCIL_overlap
19 | METHOD=RCIL
20 | OPTIONS="--checkpoint checkpoints/step/"
21 |
22 | SCREENNAME="${DATASET}_${TASK}_${NAME} On GPUs ${GPU}"
23 |
24 | RESULTSFILE=results/${START_DATE}_${DATASET}_${TASK}_${NAME}.csv
25 | rm -f ${RESULTSFILE}
26 |
27 | echo -ne "\ek${SCREENNAME}\e\\"
28 |
29 | echo "Writing in ${RESULTSFILE}"
30 |
31 | # If you already trained the model for the first step, you can re-use those weights
32 | # in order to skip this initial step --> faster iteration on your model
33 | # Set this variable with the weights path
34 | # FIRSTMODEL=/path/to/my/first/weights
35 | # Then, for the first step, append those options:
36 | # --ckpt ${FIRSTMODEL} --test
37 | # And for the second step, this option:
38 | # --step_ckpt ${FIRSTMODEL}
39 |
40 | BATCH_SIZE=6
41 | INITIAL_EPOCHS=30
42 | EPOCHS=30
43 |
44 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 0 --lr 0.02 --epochs ${INITIAL_EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} --overlap
45 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 1 --lr 0.001 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} --overlap
46 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 2 --lr 0.001 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} --overlap
47 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 3 --lr 0.001 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} --overlap
48 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 4 --lr 0.001 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} --overlap
49 | CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --batch_size ${BATCH_SIZE} --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 5 --lr 0.001 --epochs ${EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} --overlap
50 | python3 average_csv.py ${RESULTSFILE}
51 |
52 | echo ${SCREENNAME}
53 |
54 |
55 | end=`date +%s`
56 | runtime=$((end-start))
57 | echo "Run in ${runtime}s"
58 |
--------------------------------------------------------------------------------
/segmentation_module.py:
--------------------------------------------------------------------------------
1 | import copy
2 | import math
3 | import os
4 | from functools import partial, reduce
5 |
6 | import torch
7 | import torch.nn as nn
8 | import torch.nn.functional as F
9 | from torch import distributed
10 | from torch.nn import init
11 |
12 | import inplace_abn
13 | import models
14 | from inplace_abn import ABN, InPlaceABN, InPlaceABNSync
15 | from modules import DeeplabV3
16 |
17 |
18 | def make_model(opts, classes=None):
19 | if opts.norm_act == 'iabn_sync':
20 | ### xjw fixed
21 | norm = partial(InPlaceABNSync, activation="leaky_relu", activation_param=1., group=distributed.group.WORLD)
22 | elif opts.norm_act == 'iabn':
23 | norm = partial(InPlaceABN, activation="leaky_relu", activation_param=.01)
24 | elif opts.norm_act == 'abn':
25 | norm = partial(ABN, activation="leaky_relu", activation_param=.01)
26 | else:
27 | norm = nn.BatchNorm2d # not synchronized, can be enabled with apex
28 |
29 | if opts.norm_act == "iabn_sync_test":
30 | opts.norm_act = "iabn_sync"
31 |
32 | body = models.__dict__[f'net_{opts.backbone}'](norm_act=norm, output_stride=opts.output_stride)
33 | if not opts.no_pretrained:
34 | pretrained_path = os.path.join(opts.code_directory, f'pretrained/{opts.backbone}_{opts.norm_act}.pth.tar')
35 | pre_dict = torch.load(pretrained_path, map_location='cpu')
36 | for key in copy.deepcopy(list(pre_dict['state_dict'].keys())):
37 | pre_dict['state_dict'][key[7:]] = pre_dict['state_dict'].pop(key)
38 | del pre_dict['state_dict']['classifier.fc.weight']
39 | del pre_dict['state_dict']['classifier.fc.bias']
40 |
41 | #### xjw fixed
42 | load_dict = pre_dict['state_dict']
43 | new_load_dict = {}
44 | for key in load_dict.keys():
45 | if 'conv2' in key:
46 | name = key
47 | new_name = name.replace('conv2', 'conv2_new')
48 | new_load_dict[new_name] = load_dict[key]
49 | if 'bn2' in key:
50 | name = key
51 | new_name = name.replace('bn2', 'bn2_new')
52 | # new_name = name.replace('bn2', 'bn2_new')
53 | new_load_dict[new_name] = load_dict[key]
54 |
55 | if 'map_convs' in key:
56 | name = key
57 | new_name = name.replace('map_convs', 'map_convs_new')
58 | new_load_dict[new_name] = load_dict[key]
59 | if 'map_bn' in key:
60 | name = key
61 | new_name = name.replace('map_bn', 'map_bn_new')
62 | new_load_dict[new_name] = load_dict[key]
63 |
64 | for k, v in new_load_dict.items():
65 | load_dict[k] = v
66 |
67 | body.load_state_dict(load_dict)
68 | del pre_dict # free memory
69 |
70 | head_channels = 256
71 |
72 | head = DeeplabV3(
73 | body.out_channels,
74 | head_channels,
75 | 256,
76 | norm_act=norm,
77 | out_stride=opts.output_stride,
78 | pooling_size=opts.pooling
79 | )
80 |
81 | if classes is not None:
82 | model = IncrementalSegmentationModule(
83 | body,
84 | head,
85 | head_channels,
86 | classes=classes,
87 | fusion_mode=opts.fusion_mode,
88 | nb_background_modes=opts.nb_background_modes,
89 | multimodal_fusion=opts.multimodal_fusion,
90 | use_cosine=opts.cosine,
91 | disable_background=opts.disable_background,
92 | only_base_weights=opts.base_weights,
93 | opts=opts
94 | )
95 | else:
96 | model = SegmentationModule(body, head, head_channels, opts.num_classes, opts.fusion_mode)
97 |
98 | return model
99 |
100 |
101 | def flip(x, dim):
102 | indices = [slice(None)] * x.dim()
103 | indices[dim] = torch.arange(x.size(dim) - 1, -1, -1, dtype=torch.long, device=x.device)
104 | return x[tuple(indices)]
105 |
106 |
107 | class IncrementalSegmentationModule(nn.Module):
108 |
109 | def __init__(
110 | self,
111 | body,
112 | head,
113 | head_channels,
114 | classes,
115 | ncm=False,
116 | fusion_mode="mean",
117 | nb_background_modes=1,
118 | multimodal_fusion="sum",
119 | use_cosine=False,
120 | disable_background=False,
121 | only_base_weights=False,
122 | opts=None
123 | ):
124 | super(IncrementalSegmentationModule, self).__init__()
125 | self.body = body
126 | self.head = head
127 | # classes must be a list where [n_class_task[i] for i in tasks]
128 | assert isinstance(classes, list), \
129 | "Classes must be a list where to every index correspond the num of classes for that task"
130 |
131 | use_bias = not use_cosine
132 |
133 | if nb_background_modes > 1:
134 | classes[0] -= 1
135 | classes = [nb_background_modes] + classes
136 |
137 | if only_base_weights:
138 | classes = [classes[0]]
139 |
140 | if opts.dataset == "cityscapes_domain":
141 | classes = [opts.num_classes]
142 | self.opts = opts
143 | self.cls = nn.ModuleList([nn.Conv2d(head_channels, c, 1, bias=use_bias) for c in classes])
144 | self.classes = classes
145 | self.head_channels = head_channels
146 | self.tot_classes = reduce(lambda a, b: a + b, self.classes)
147 | self.means = None
148 |
149 | self.multi_modal_background = nb_background_modes > 1
150 | self.disable_background = disable_background
151 |
152 | self.nb_background_modes = nb_background_modes
153 | self.multimodal_fusion = multimodal_fusion
154 |
155 | self.use_cosine = use_cosine
156 | if use_cosine:
157 | self.scalar = nn.Parameter(torch.tensor(1.)).float()
158 | assert not self.multi_modal_background
159 | else:
160 | self.scalar = None
161 |
162 | self.in_eval = False
163 |
164 | def align_weight(self, align_type):
165 | old_weight_norm = self._compute_weights_norm(self.cls[:-1], only=align_type)
166 |
167 | new_weight_norm = self._compute_weights_norm(self.cls[-1:])
168 |
169 | gamma = old_weight_norm / new_weight_norm
170 |
171 | self.cls[-1].weight.data = gamma * self.cls[-1].weight.data
172 |
173 | def _compute_weights_norm(self, convs, only="all"):
174 | c = 0
175 | s = 0.
176 |
177 | for i, conv in enumerate(convs):
178 | w = conv.weight.data[..., 0, 0]
179 |
180 | if only == "old" and i == 0:
181 | w = w[1:]
182 | elif only == "background" and i == 0:
183 | w = w[:1]
184 |
185 | s += w.norm(dim=1).sum()
186 | c += w.shape[0]
187 |
188 | return s / c
189 |
190 | def _network(self, x, ret_intermediate=False, only_bg=False):
191 | x_b, attentions, branch1_x, branch2_x = self.body(x)
192 | x_pl = self.head(x_b)
193 | out = []
194 |
195 | if self.use_cosine:
196 | x_clf = x_pl.permute(0, 2, 3, 1)
197 | x_clf = x_clf.reshape(x_pl.shape[0] * x_pl.shape[2] * x_pl.shape[3], x_pl.shape[1])
198 | x_clf = F.normalize(x_clf, dim=1, p=2)
199 | x_clf = x_clf.view(x_pl.shape[0], x_pl.shape[2], x_pl.shape[3], x_pl.shape[1])
200 | x_clf = x_clf.permute(0, 3, 1, 2)
201 | else:
202 | x_clf = x_pl
203 |
204 | if only_bg:
205 | return self.cls[0](x_pl)
206 | else:
207 | for i, mod in enumerate(self.cls):
208 | if i == 0 and self.multi_modal_background:
209 | out.append(self.fusion(mod(x_pl)))
210 | elif self.use_cosine:
211 | w = F.normalize(mod.weight, dim=1, p=2)
212 | out.append(F.conv2d(x_pl, w))
213 | else:
214 | out.append(mod(x_pl))
215 |
216 | x_o = torch.cat(out, dim=1)
217 |
218 | if self.disable_background and self.in_eval:
219 | x_o[:, 0] = 0.
220 |
221 | if ret_intermediate:
222 | return x_o, x_b, x_pl, attentions
223 | return x_o
224 |
225 | def fusion(self, tensors):
226 | if self.multimodal_fusion == "sum":
227 | return tensors.sum(dim=1, keepdims=True)
228 | elif self.multimodal_fusion == "mean":
229 | return tensors.mean(dim=1, keepdims=True)
230 | elif self.multimodal_fusion == "max":
231 | return tensors.max(dim=1, keepdims=True)[0]
232 | elif self.multimodal_fusion == "softmax":
233 | return (F.softmax(tensors, dim=1) * tensors).sum(dim=1, keepdims=True)
234 | else:
235 | raise NotImplementedError(
236 | f"Unknown fusion mode for multi-modality: {self.multimodal_fusion}."
237 | )
238 |
239 | def init_new_classifier(self, device):
240 | cls = self.cls[-1]
241 |
242 | if self.multi_modal_background:
243 | imprinting_w = self.cls[0].weight.sum(dim=0)
244 | bkg_bias = self.cls[0].bias.sum(dim=0)
245 | else:
246 | imprinting_w = self.cls[0].weight[0]
247 | if self.opts.dataset == "cityscapes_domain":
248 | if not self.use_cosine:
249 | bkg_bias = self.cls[0].bias[0]
250 | if self.opts.dataset == "cityscapes_domain":
251 | if not self.use_cosine:
252 | bias_diff = torch.log(torch.FloatTensor([self.classes[-1] + 1])).to(device)
253 | new_bias = (bkg_bias - bias_diff)
254 |
255 | cls.weight.data.copy_(imprinting_w)
256 | if self.opts.dataset == "cityscapes_domain":
257 | if not self.use_cosine:
258 | cls.bias.data.copy_(new_bias)
259 |
260 | if self.multi_modal_background:
261 | self.cls[0].bias.data.copy_(new_bias.squeeze(0))
262 | else:
263 | if not self.use_cosine:
264 | self.cls[0].bias[0].data.copy_(new_bias.squeeze(0))
265 |
266 | def init_new_classifier_multimodal(self, device, train_loader, init_type):
267 | print("Init new multimodal classifier")
268 | winners = torch.zeros(self.nb_background_modes,
269 | self.classes[-1]).to(device, dtype=torch.long)
270 |
271 | nb_old_classes = sum(self.classes[1:-1]) + 1
272 |
273 | for images, labels in train_loader:
274 | images = images.to(device, dtype=torch.float32)
275 | labels = labels.to(device, dtype=torch.long)
276 |
277 | modalities = self.forward(images, only_bg=True)[0].argmax(dim=1)
278 | mask = (0 < labels) & (labels < 255)
279 |
280 | modalities = modalities[mask].view(-1)
281 | labels = labels[mask].view(-1)
282 |
283 | winners.index_put_(
284 | (modalities, labels - nb_old_classes),
285 | torch.LongTensor([1]).expand_as(modalities).to(device),
286 | accumulate=True
287 | )
288 |
289 | bias_diff = torch.log(torch.FloatTensor([self.classes[-1] + 1])).to(device)
290 |
291 | if "_" in init_type:
292 | init_type, to_reinit = init_type.split("_")
293 | else:
294 | to_reinit = None
295 |
296 | for c in range(self.classes[-1]):
297 | if init_type == "max":
298 | modality = winners[:, c].argmax()
299 | new_weight = self.cls[0].weight.data[modality]
300 | new_bias = (self.cls[0].bias.data[modality] - bias_diff)[0]
301 | elif init_type == "softmax":
302 | modality = winners[:, c].argmax()
303 | weighting = F.softmax(winners[:, c].float(), dim=0)
304 | new_weight = (weighting[:, None, None, None] * self.cls[0].weight.data).sum(dim=0)
305 | new_bias = (weighting * self.cls[0].bias.data).sum(dim=0)
306 | else:
307 | raise ValueError(f"Unknown multimodal init type: {init_type}.")
308 |
309 | self.cls[-1].weight.data[c].copy_(new_weight)
310 | self.cls[-1].bias.data[c].copy_(new_bias)
311 |
312 | self.cls[0].bias.data[modality].copy_(new_bias)
313 |
314 | if to_reinit is not None:
315 | if to_reinit == "init":
316 | init.kaiming_uniform_(self.cls[0].weights.data[modality], a=math.sqrt(5))
317 | fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
318 | bound = 1 / math.sqrt(fan_in)
319 | init.uniform_(self.cls[0].bias.data[modality], -bound, bound)
320 | elif to_reinit == "remove":
321 | self.cls[0].bias.data = torch.cat(
322 | (self.cls[0].bias.data[:modality], self.cls[0].bias.data[modality + 1:])
323 | )
324 |
325 | def forward(self, x, scales=None, do_flip=False, ret_intermediate=False, only_bg=False):
326 | out_size = x.shape[-2:]
327 |
328 | out = self._network(x, ret_intermediate, only_bg=only_bg)
329 |
330 | sem_logits_small = out[0] if ret_intermediate else out
331 |
332 | sem_logits = F.interpolate(
333 | sem_logits_small, size=out_size, mode="bilinear", align_corners=False
334 | )
335 |
336 | if ret_intermediate:
337 | return sem_logits, {
338 | "body": out[1],
339 | "pre_logits": out[2],
340 | "attentions": out[3] + [out[2]],
341 | "sem_logits_small": sem_logits_small
342 | }
343 |
344 | return sem_logits, {}
345 |
346 | def fix_bn(self):
347 | for m in self.modules():
348 | if isinstance(m, nn.BatchNorm2d) or isinstance(m, inplace_abn.ABN):
349 | m.eval()
350 | m.weight.requires_grad = False
351 | m.bias.requires_grad = False
352 |
--------------------------------------------------------------------------------
/tasks.py:
--------------------------------------------------------------------------------
1 | tasks_voc = {
2 | "offline": {
3 | 0: list(range(21)),
4 | },
5 | "19-1": {
6 | 0: list(range(20)),
7 | 1: [20],
8 | },
9 | "15-5": {
10 | 0: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
11 | 1: [16, 17, 18, 19, 20]
12 | },
13 | "15-5s":
14 | {
15 | 0: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
16 | 1: [16],
17 | 2: [17],
18 | 3: [18],
19 | 4: [19],
20 | 5: [20]
21 | },
22 | "15-5s_b":
23 | {
24 | 0: [0, 12, 9, 20, 7, 15, 8, 14, 16, 5, 19, 4, 1, 13, 2, 11],
25 | 1: [17], 2: [3], 3: [6], 4: [18], 5: [10]
26 | },
27 | "15-5s_c":
28 | {
29 | 0: [0, 13, 19, 15, 17, 9, 8, 5, 20, 4, 3, 10, 11, 18, 16, 7],
30 | 1: [12], 2: [14], 3: [6], 4: [1], 5: [2]
31 | },
32 | "15-5s_d":
33 | {
34 | 0: [0, 15, 3, 2, 12, 14, 18, 20, 16, 11, 1, 19, 8, 10, 7, 17],
35 | 1: [6], 2: [5], 3: [13], 4: [9], 5: [4]
36 | },
37 | "15-5s_e":
38 | {
39 | 0: [0, 7, 5, 3, 9, 13, 12, 14, 19, 10, 2, 1, 4, 16, 8, 17],
40 | 1: [15], 2: [18], 3: [6], 4: [11], 5: [20]
41 | },
42 | "15-5s_f":
43 | {
44 | 0: [0, 7, 13, 5, 11, 9, 2, 15, 12, 14, 3, 20, 1, 16, 4, 18],
45 | 1: [8], 2: [6], 3: [10], 4: [19], 5: [17]
46 | },
47 | "15-5s_g":
48 | {
49 | 0: [0, 7, 5, 9, 1, 15, 18, 14, 3, 20, 10, 4, 19, 11, 17, 16],
50 | 1: [12], 2: [8], 3: [6], 4: [2], 5: [13]
51 | },
52 | "15-5s_h":
53 | {
54 | 0: [0, 12, 9, 19, 6, 4, 10, 5, 18, 14, 15, 16, 3, 8, 7, 11],
55 | 1: [13], 2: [2], 3: [20], 4: [17], 5: [1]
56 | },
57 | "15-5s_i":
58 | {
59 | 0: [0, 13, 10, 15, 8, 7, 19, 4, 3, 16, 12, 14, 11, 5, 20, 6],
60 | 1: [2], 2: [18], 3: [9], 4: [17], 5: [1]
61 | },
62 | "15-5s_j":
63 | {
64 | 0: [0, 1, 14, 9, 5, 2, 15, 8, 20, 6, 16, 18, 7, 11, 10, 19],
65 | 1: [3], 2: [4], 3: [17], 4: [12], 5: [13]
66 | },
67 | "15-5s_k":
68 | {
69 | 0: [0, 16, 13, 1, 11, 12, 18, 6, 14, 5, 3, 7, 9, 20, 19, 15],
70 | 1: [4], 2: [2], 3: [10], 4: [8], 5: [17]
71 | },
72 | "15-5s_l":
73 | {
74 | 0: [0, 10, 7, 6, 19, 16, 8, 17, 1, 14, 4, 9, 3, 15, 11, 12],
75 | 1: [2], 2: [18], 3: [20], 4: [13], 5: [5]
76 | },
77 | "15-5s_m":
78 | {
79 | 0: [0, 18, 4, 14, 17, 12, 10, 7, 3, 9, 1, 8, 15, 6, 13, 2],
80 | 1: [5], 2: [11], 3: [20], 4: [16], 5: [19]
81 | },
82 | "15-5s_n":
83 | {
84 | 0: [0, 5, 4, 13, 18, 14, 10, 19, 15, 7, 9, 3, 2, 8, 16, 20],
85 | 1: [1], 2: [12], 3: [11], 4: [6], 5: [17]
86 | },
87 | "15-5s_o":
88 | {
89 | 0: [0, 9, 12, 13, 18, 7, 1, 15, 17, 10, 8, 4, 5, 20, 16, 6],
90 | 1: [14], 2: [19], 3: [11], 4: [2], 5: [3]
91 | },
92 | #
93 | "15-5s_p":
94 | {
95 | 0: [0, 9, 12, 13, 18, 2, 11, 15, 17, 10, 8, 4, 5, 20, 16, 6],
96 | 1: [14], 2: [19], 3: [1], 4: [7], 5: [3]
97 | },
98 | "15-5s_q":
99 | {
100 | 0: [0, 3, 14, 13, 18, 2, 11, 15, 17, 10, 8, 4, 5, 20, 16, 6],
101 | 1: [12], 2: [19], 3: [1], 4: [7], 5: [9]
102 | },
103 | "15-5s_r":
104 | {
105 | 0: [0, 3, 14, 13, 1, 2, 11, 15, 17, 7, 8, 4, 5, 9, 16, 19],
106 | 1: [12], 2: [6], 3: [18], 4: [10], 5: [20]
107 | },
108 | "15-5s_s":
109 | {
110 | 0: [0, 3, 14, 6, 1, 2, 11, 12, 17, 7, 20, 4, 5, 9, 16, 19],
111 | 1: [15], 2: [13], 3: [18], 4: [10], 5: [8]
112 | }, #
113 | "15-5s_t":
114 | {
115 | 0: [0, 3, 15, 13, 1, 2, 11, 18, 17, 7, 20, 8, 5, 9, 16, 19],
116 | 1: [14], 2: [6], 3: [12], 4: [10], 5: [4]
117 | },
118 | "15-5s_u":
119 | {
120 | 0: [0, 3, 15, 13, 14, 6, 11, 18, 17, 7, 20, 8, 4, 9, 16, 10],
121 | 1: [1], 2: [2], 3: [12], 4: [19], 5: [5]
122 | },
123 | "15-5s_v":
124 | {
125 | 0: [0, 1, 2, 12, 14, 6, 19, 18, 17, 5, 20, 8, 4, 9, 16, 10],
126 | 1: [3], 2: [15], 3: [13], 4: [11], 5: [7]
127 | },
128 | "15-5s_w":
129 | {
130 | 0: [0, 1, 2, 12, 14, 13, 19, 18, 7, 11, 20, 8, 4, 9, 16, 10],
131 | 1: [3], 2: [15], 3: [6], 4: [5], 5: [17]
132 | },
133 | "10-1":
134 | {
135 | 0: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
136 | 1: [11],
137 | 2: [12],
138 | 3: [13],
139 | 4: [14],
140 | 5: [15],
141 | 6: [16],
142 | 7: [17],
143 | 8: [18],
144 | 9: [19],
145 | 10: [20]
146 | },
147 | }
148 |
149 |
150 | tasks_cityscapes_domain = {
151 | "offline": {0: list(range(21))},
152 | "11-5":
153 | {
154 | 0: list(range(11)),
155 | 1: [11, 12, 13, 14, 15],
156 | 2: [16, 17, 18, 19, 20],
157 | },
158 | "11-5":
159 | {
160 | 0: list(range(11)),
161 | 1: [11, 12, 13, 14, 15],
162 | 2: [16, 17, 18, 19, 20],
163 | },
164 | "11-1":
165 | {
166 | 0: list(range(11)),
167 | 1: [11], 2: [12], 3: [13], 4: [14], 5: [15],
168 | 6: [16], 7: [17], 8: [18], 9: [19], 10: [20]
169 | },
170 | "1-1":
171 | {
172 | i: [i] for i in range(21)
173 | }
174 | }
175 |
176 | tasks_ade = {
177 | "offline": {
178 | 0: [x for x in range(151)]
179 | },
180 | "100-50": {
181 | 0: [x for x in range(0, 101)],
182 | 1: [x for x in range(101, 151)]
183 | },
184 | "100-10":
185 | {
186 | 0: [x for x in range(0, 101)],
187 | 1: [x for x in range(101, 111)],
188 | 2: [x for x in range(111, 121)],
189 | 3: [x for x in range(121, 131)],
190 | 4: [x for x in range(131, 141)],
191 | 5: [x for x in range(141, 151)]
192 | },
193 | "100-5":
194 | {
195 | 0: [x for x in range(0, 101)],
196 | 1: [x for x in range(101, 106)],
197 | 2: [x for x in range(106, 111)],
198 | 3: [x for x in range(111, 116)],
199 | 4: [x for x in range(116, 121)],
200 | 5: [x for x in range(121, 126)],
201 | 6: [x for x in range(126, 131)],
202 | 7: [x for x in range(131, 136)],
203 | 8: [x for x in range(136, 141)],
204 | 9: [x for x in range(141, 146)],
205 | 10: [x for x in range(146, 151)]
206 | },
207 | "50":
208 | {
209 | 0: [x for x in range(0, 51)],
210 | 1: [x for x in range(51, 101)],
211 | 2: [x for x in range(101, 151)]
212 | },
213 | }
214 |
215 |
216 | tasks_cityscape = {
217 | "10-9":
218 | {
219 | 0: [x for x in range(0, 11)],
220 | 1: [x for x in range(11, 20)]
221 | }
222 | }
223 |
224 | def get_task_list():
225 | return list(tasks_voc.keys()) + list(tasks_ade.keys()) + list(tasks_cityscapes_domain.keys()) + list(tasks_cityscape.keys())
226 |
227 |
228 | def get_task_labels(dataset, name, step):
229 | if dataset == 'voc':
230 | task_dict = tasks_voc[name]
231 | elif dataset == 'ade':
232 | task_dict = tasks_ade[name]
233 | elif dataset == "cityscapes_domain":
234 | task_dict = tasks_cityscapes_domain[name]
235 | elif dataset == 'cityscape':
236 | task_dict = tasks_cityscape[name]
237 | else:
238 | raise NotImplementedError
239 | assert step in task_dict.keys(), f"You should provide a valid step! [{step} is out of range]"
240 |
241 | labels = list(task_dict[step])
242 | labels_old = [label for s in range(step) for label in task_dict[s]]
243 | return labels, labels_old, f'data/{dataset}/{name}'
244 |
245 |
246 | def get_per_task_classes(dataset, name, step):
247 | if dataset == 'voc':
248 | task_dict = tasks_voc[name]
249 | elif dataset == 'ade':
250 | task_dict = tasks_ade[name]
251 | elif dataset == "cityscapes_domain":
252 | task_dict = tasks_cityscapes_domain[name]
253 | elif dataset == "cityscape":
254 | task_dict = tasks_cityscape[name]
255 | else:
256 | raise NotImplementedError
257 | assert step in task_dict.keys(), f"You should provide a valid step! [{step} is out of range]"
258 |
259 | classes = [len(task_dict[s]) for s in range(step + 1)]
260 | return classes
261 |
--------------------------------------------------------------------------------
/utils/__init__.py:
--------------------------------------------------------------------------------
1 | from .utils import *
2 | from .scheduler import PolyLR
3 | from .loss import get_loss
4 | from .regularizer import get_regularizer
--------------------------------------------------------------------------------
/utils/logger.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 |
4 | class Logger:
5 |
6 | def __init__(self, logdir, rank, type='tensorboardX', debug=False, filename=None, summary=True, step=None):
7 | self.logger = None
8 | self.type = type
9 | self.rank = rank
10 | self.step = step
11 |
12 | self.summary = summary
13 | if summary:
14 | if type == 'tensorboardX':
15 | import tensorboardX
16 | self.logger = tensorboardX.SummaryWriter(logdir)
17 | else:
18 | raise NotImplementedError
19 | else:
20 | self.type = 'None'
21 |
22 | self.debug_flag = debug
23 | logging.basicConfig(filename=filename, level=logging.INFO, format=f'%(levelname)s:rank{rank}: %(message)s')
24 |
25 | if rank == 0:
26 | logging.info(f"[!] starting logging at directory {logdir}")
27 | if self.debug_flag:
28 | logging.info(f"[!] Entering DEBUG mode")
29 |
30 | def close(self):
31 | if self.logger is not None:
32 | self.logger.close()
33 | self.info("Closing the Logger.")
34 |
35 | def add_scalar(self, tag, scalar_value, step=None):
36 | if self.type == 'tensorboardX':
37 | tag = self._transform_tag(tag)
38 | self.logger.add_scalar(tag, scalar_value, step)
39 |
40 | def add_image(self, tag, image, step=None):
41 | if self.type == 'tensorboardX':
42 | tag = self._transform_tag(tag)
43 | self.logger.add_image(tag, image, step)
44 |
45 | def add_figure(self, tag, image, step=None):
46 | if self.type == 'tensorboardX':
47 | tag = self._transform_tag(tag)
48 | self.logger.add_figure(tag, image, step)
49 |
50 | def add_table(self, tag, tbl, step=None):
51 | if self.type == 'tensorboardX':
52 | tag = self._transform_tag(tag)
53 | tbl_str = "
"
54 | tbl_str += " \
55 | Term | \
56 | Value | \
57 |
"
58 | for k, v in tbl.items():
59 | tbl_str += " \
60 | %s | \
61 | %s | \
62 |
" % (k, v)
63 |
64 | tbl_str += "
"
65 | self.logger.add_text(tag, tbl_str, step)
66 |
67 | def print(self, msg):
68 | logging.info(msg)
69 |
70 | def info(self, msg):
71 | if self.rank == 0:
72 | logging.info(msg)
73 |
74 | def debug(self, msg):
75 | if self.rank == 0 and self.debug_flag:
76 | logging.info(msg)
77 |
78 | def error(self, msg):
79 | logging.error(msg)
80 |
81 | def _transform_tag(self, tag):
82 | tag = tag + f"/{self.step}" if self.step is not None else tag
83 | return tag
84 |
85 | def add_results(self, results):
86 | if self.type == 'tensorboardX':
87 | tag = self._transform_tag("Results")
88 | text = ""
89 | for k, res in results.items():
90 | text += f"{k} | " + " ".join([str(f'{x} | ') for x in res.values()]) + "
"
91 | text += "
"
92 | self.logger.add_text(tag, text)
93 |
--------------------------------------------------------------------------------
/utils/scheduler.py:
--------------------------------------------------------------------------------
1 | from torch.optim.lr_scheduler import _LRScheduler, StepLR
2 |
3 | class PolyLR(_LRScheduler):
4 | def __init__(self, optimizer, max_iters, power=0.9, last_epoch=-1):
5 | self.power = power
6 | self.max_iters = max_iters
7 | super(PolyLR, self).__init__(optimizer, last_epoch)
8 |
9 | def get_lr(self):
10 | return [ base_lr * ( 1 - self.last_epoch/self.max_iters )**self.power
11 | for base_lr in self.base_lrs]
--------------------------------------------------------------------------------
/utils/utils.py:
--------------------------------------------------------------------------------
1 | from torchvision.transforms.functional import normalize
2 | import torch.nn as nn
3 | import numpy as np
4 |
5 |
6 | def denormalize(tensor, mean, std):
7 | mean = np.array(mean)
8 | std = np.array(std)
9 |
10 | _mean = -mean/std
11 | _std = 1/std
12 | return normalize(tensor, _mean, _std)
13 |
14 |
15 | class Denormalize(object):
16 | def __init__(self, mean, std):
17 | mean = np.array(mean)
18 | std = np.array(std)
19 | self._mean = -mean/std
20 | self._std = 1/std
21 |
22 | def __call__(self, tensor):
23 | if isinstance(tensor, np.ndarray):
24 | return (tensor - self._mean.reshape(-1,1,1)) / self._std.reshape(-1,1,1)
25 | return normalize(tensor, self._mean, self._std)
26 |
27 |
28 | def fix_bn(model):
29 | for m in model.modules():
30 | if isinstance(m, nn.BatchNorm2d):
31 | m.eval()
32 | m.weight.requires_grad = False
33 | m.bias.requires_grad = False
34 |
35 |
36 | def color_map(dataset):
37 | if dataset=='voc':
38 | return voc_cmap()
39 | elif dataset=='cityscapes':
40 | return cityscapes_cmap()
41 | elif dataset=='ade':
42 | return ade_cmap()
43 |
44 |
45 | def cityscapes_cmap():
46 | return np.array([(128, 64,128), (244, 35,232), ( 70, 70, 70), (102,102,156), (190,153,153), (153,153,153), (250,170, 30),
47 | (220,220, 0), (107,142, 35), (152,251,152), ( 70,130,180), (220, 20, 60), (255, 0, 0), ( 0, 0,142),
48 | ( 0, 0, 70), ( 0, 60,100), ( 0, 80,100), ( 0, 0,230), (119, 11, 32), ( 0, 0, 0)],
49 | dtype=np.uint8)
50 |
51 |
52 | def ade_cmap():
53 | cmap = np.zeros((256, 3), dtype=np.uint8)
54 | colors = [
55 | [0, 0, 0],
56 | [120, 120, 120],
57 | [180, 120, 120],
58 | [6, 230, 230],
59 | [80, 50, 50],
60 | [4, 200, 3],
61 | [120, 120, 80],
62 | [140, 140, 140],
63 | [204, 5, 255],
64 | [230, 230, 230],
65 | [4, 250, 7],
66 | [224, 5, 255],
67 | [235, 255, 7],
68 | [150, 5, 61],
69 | [120, 120, 70],
70 | [8, 255, 51],
71 | [255, 6, 82],
72 | [143, 255, 140],
73 | [204, 255, 4],
74 | [255, 51, 7],
75 | [204, 70, 3],
76 | [0, 102, 200],
77 | [61, 230, 250],
78 | [255, 6, 51],
79 | [11, 102, 255],
80 | [255, 7, 71],
81 | [255, 9, 224],
82 | [9, 7, 230],
83 | [220, 220, 220],
84 | [255, 9, 92],
85 | [112, 9, 255],
86 | [8, 255, 214],
87 | [7, 255, 224],
88 | [255, 184, 6],
89 | [10, 255, 71],
90 | [255, 41, 10],
91 | [7, 255, 255],
92 | [224, 255, 8],
93 | [102, 8, 255],
94 | [255, 61, 6],
95 | [255, 194, 7],
96 | [255, 122, 8],
97 | [0, 255, 20],
98 | [255, 8, 41],
99 | [255, 5, 153],
100 | [6, 51, 255],
101 | [235, 12, 255],
102 | [160, 150, 20],
103 | [0, 163, 255],
104 | [140, 140, 140],
105 | [250, 10, 15],
106 | [20, 255, 0],
107 | [31, 255, 0],
108 | [255, 31, 0],
109 | [255, 224, 0],
110 | [153, 255, 0],
111 | [0, 0, 255],
112 | [255, 71, 0],
113 | [0, 235, 255],
114 | [0, 173, 255],
115 | [31, 0, 255],
116 | [11, 200, 200],
117 | [255, 82, 0],
118 | [0, 255, 245],
119 | [0, 61, 255],
120 | [0, 255, 112],
121 | [0, 255, 133],
122 | [255, 0, 0],
123 | [255, 163, 0],
124 | [255, 102, 0],
125 | [194, 255, 0],
126 | [0, 143, 255],
127 | [51, 255, 0],
128 | [0, 82, 255],
129 | [0, 255, 41],
130 | [0, 255, 173],
131 | [10, 0, 255],
132 | [173, 255, 0],
133 | [0, 255, 153],
134 | [255, 92, 0],
135 | [255, 0, 255],
136 | [255, 0, 245],
137 | [255, 0, 102],
138 | [255, 173, 0],
139 | [255, 0, 20],
140 | [255, 184, 184],
141 | [0, 31, 255],
142 | [0, 255, 61],
143 | [0, 71, 255],
144 | [255, 0, 204],
145 | [0, 255, 194],
146 | [0, 255, 82],
147 | [0, 10, 255],
148 | [0, 112, 255],
149 | [51, 0, 255],
150 | [0, 194, 255],
151 | [0, 122, 255],
152 | [0, 255, 163],
153 | [255, 153, 0],
154 | [0, 255, 10],
155 | [255, 112, 0],
156 | [143, 255, 0],
157 | [82, 0, 255],
158 | [163, 255, 0],
159 | [255, 235, 0],
160 | [8, 184, 170],
161 | [133, 0, 255],
162 | [0, 255, 92],
163 | [184, 0, 255],
164 | [255, 0, 31],
165 | [0, 184, 255],
166 | [0, 214, 255],
167 | [255, 0, 112],
168 | [92, 255, 0],
169 | [0, 224, 255],
170 | [112, 224, 255],
171 | [70, 184, 160],
172 | [163, 0, 255],
173 | [153, 0, 255],
174 | [71, 255, 0],
175 | [255, 0, 163],
176 | [255, 204, 0],
177 | [255, 0, 143],
178 | [0, 255, 235],
179 | [133, 255, 0],
180 | [255, 0, 235],
181 | [245, 0, 255],
182 | [255, 0, 122],
183 | [255, 245, 0],
184 | [10, 190, 212],
185 | [214, 255, 0],
186 | [0, 204, 255],
187 | [20, 0, 255],
188 | [255, 255, 0],
189 | [0, 153, 255],
190 | [0, 41, 255],
191 | [0, 255, 204],
192 | [41, 0, 255],
193 | [41, 255, 0],
194 | [173, 0, 255],
195 | [0, 245, 255],
196 | [71, 0, 255],
197 | [122, 0, 255],
198 | [0, 255, 184],
199 | [0, 92, 255],
200 | [184, 255, 0],
201 | [0, 133, 255],
202 | [255, 214, 0],
203 | [25, 194, 194],
204 | [102, 255, 0],
205 | [92, 0, 255]
206 | ]
207 |
208 | for i in range(len(colors)):
209 | cmap[i] = colors[i]
210 |
211 | return cmap.astype(np.uint8)
212 |
213 |
214 | def voc_cmap(N=256, normalized=False):
215 | def bitget(byteval, idx):
216 | return ((byteval & (1 << idx)) != 0)
217 |
218 | dtype = 'float32' if normalized else 'uint8'
219 | cmap = np.zeros((N, 3), dtype=dtype)
220 | for i in range(N):
221 | r = g = b = 0
222 | c = i
223 | for j in range(8):
224 | r = r | (bitget(c, 0) << 7-j)
225 | g = g | (bitget(c, 1) << 7-j)
226 | b = b | (bitget(c, 2) << 7-j)
227 | c = c >> 3
228 |
229 | cmap[i] = np.array([r, g, b])
230 |
231 | cmap = cmap/255 if normalized else cmap
232 | return cmap
233 |
234 |
235 | class Label2Color(object):
236 | def __init__(self, cmap):
237 | self.cmap = cmap
238 |
239 | def __call__(self, lbls):
240 | return self.cmap[lbls]
241 |
242 |
243 | def convert_bn2gn(module):
244 | mod = module
245 | if isinstance(module, nn.modules.batchnorm._BatchNorm):
246 | num_features = module.num_features
247 | num_groups = num_features//16
248 | mod = nn.GroupNorm(num_groups=num_groups, num_channels=num_features)
249 | for name, child in module.named_children():
250 | mod.add_module(name, convert_bn2gn(child))
251 | del module
252 | return mod
253 |
--------------------------------------------------------------------------------