├── RAML_paper_and_appendix.pdf
├── README.md
└── incremental
├── .ipynb_checkpoints
├── main-checkpoint.py
├── main_metric-checkpoint.py
└── readme-checkpoint.txt
├── datasets
├── .DS_Store
├── .ipynb_checkpoints
│ ├── cityscapes-checkpoint.py
│ └── cityscapes_novel-checkpoint.py
├── __init__.py
├── __pycache__
│ ├── __init__.cpython-36.pyc
│ ├── __init__.cpython-37.pyc
│ ├── __init__.cpython-38.pyc
│ ├── cityscapes.cpython-36.pyc
│ ├── cityscapes.cpython-37.pyc
│ ├── cityscapes.cpython-38.pyc
│ ├── cityscapes_novel.cpython-37.pyc
│ ├── cityscapes_novel.cpython-38.pyc
│ ├── voc.cpython-36.pyc
│ ├── voc.cpython-37.pyc
│ └── voc.cpython-38.pyc
├── cityscapes.py
├── cityscapes_novel.py
├── data
│ └── train_aug.txt
├── utils.py
└── voc.py
├── main.py
├── main_metric.py
├── metrics
├── __init__.py
├── __pycache__
│ ├── __init__.cpython-36.pyc
│ ├── __init__.cpython-37.pyc
│ ├── __init__.cpython-38.pyc
│ ├── stream_metrics.cpython-36.pyc
│ ├── stream_metrics.cpython-37.pyc
│ └── stream_metrics.cpython-38.pyc
└── stream_metrics.py
├── network
├── .DS_Store
├── .ipynb_checkpoints
│ ├── _deeplab-checkpoint.py
│ ├── modeling-checkpoint.py
│ └── utils-checkpoint.py
├── __init__.py
├── __pycache__
│ ├── __init__.cpython-36.pyc
│ ├── __init__.cpython-37.pyc
│ ├── __init__.cpython-38.pyc
│ ├── _deeplab.cpython-36.pyc
│ ├── _deeplab.cpython-37.pyc
│ ├── _deeplab.cpython-38.pyc
│ ├── modeling.cpython-36.pyc
│ ├── modeling.cpython-37.pyc
│ ├── modeling.cpython-38.pyc
│ ├── utils.cpython-36.pyc
│ ├── utils.cpython-37.pyc
│ └── utils.cpython-38.pyc
├── _deeplab.py
├── backbone
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── __init__.cpython-36.pyc
│ │ ├── __init__.cpython-37.pyc
│ │ ├── __init__.cpython-38.pyc
│ │ ├── mobilenetv2.cpython-36.pyc
│ │ ├── mobilenetv2.cpython-37.pyc
│ │ ├── mobilenetv2.cpython-38.pyc
│ │ ├── resnet.cpython-36.pyc
│ │ ├── resnet.cpython-37.pyc
│ │ └── resnet.cpython-38.pyc
│ ├── mobilenetv2.py
│ └── resnet.py
├── modeling.py
└── utils.py
├── novel
├── 10
│ ├── .ipynb_checkpoints
│ │ └── novel-checkpoint.txt
│ ├── novel.pth
│ └── novel.txt
├── 11
│ ├── .ipynb_checkpoints
│ │ └── novel-checkpoint.txt
│ ├── download.png
│ ├── download1.png
│ ├── download2.png
│ ├── novel.pth
│ └── novel.txt
├── 12
│ ├── .ipynb_checkpoints
│ │ └── novel-checkpoint.txt
│ ├── novel.pth
│ └── novel.txt
├── 13
│ ├── .ipynb_checkpoints
│ │ └── novel-checkpoint.txt
│ ├── novel.pth
│ └── novel.txt
├── 14
│ ├── .ipynb_checkpoints
│ │ └── novel-checkpoint.txt
│ ├── novel.pth
│ └── novel.txt
├── 15
│ ├── .ipynb_checkpoints
│ │ └── novel-checkpoint.txt
│ ├── novel.pth
│ └── novel.txt
├── 16
│ ├── novel.pth
│ └── novel.txt
├── 17
│ ├── novel.pth
│ └── novel.txt
└── 18
│ ├── .ipynb_checkpoints
│ └── novel-checkpoint.txt
│ ├── novel.pth
│ └── novel.txt
├── novel_1
├── 10
│ ├── .ipynb_checkpoints
│ │ └── novel-checkpoint.txt
│ ├── novel.pth
│ └── novel.txt
├── 11
│ ├── .ipynb_checkpoints
│ │ └── novel-checkpoint.txt
│ ├── download.png
│ ├── download1.png
│ ├── download2.png
│ ├── novel.pth
│ └── novel.txt
├── 12
│ ├── .ipynb_checkpoints
│ │ └── novel-checkpoint.txt
│ ├── novel.pth
│ └── novel.txt
├── 13
│ ├── .ipynb_checkpoints
│ │ └── novel-checkpoint.txt
│ ├── novel.pth
│ └── novel.txt
├── 14
│ ├── .ipynb_checkpoints
│ │ └── novel-checkpoint.txt
│ ├── novel.pth
│ └── novel.txt
├── 15
│ ├── .ipynb_checkpoints
│ │ └── novel-checkpoint.txt
│ ├── novel.pth
│ └── novel.txt
├── 16
│ ├── novel.pth
│ └── novel.txt
├── 17
│ ├── novel.pth
│ └── novel.txt
└── 18
│ ├── .ipynb_checkpoints
│ └── novel-checkpoint.txt
│ ├── novel.pth
│ └── novel.txt
├── readme.txt
├── test_metric.py
└── utils
├── __init__.py
├── __pycache__
├── __init__.cpython-36.pyc
├── __init__.cpython-37.pyc
├── __init__.cpython-38.pyc
├── ext_transforms.cpython-36.pyc
├── ext_transforms.cpython-37.pyc
├── ext_transforms.cpython-38.pyc
├── loss.cpython-36.pyc
├── loss.cpython-37.pyc
├── loss.cpython-38.pyc
├── scheduler.cpython-36.pyc
├── scheduler.cpython-37.pyc
├── scheduler.cpython-38.pyc
├── utils.cpython-36.pyc
├── utils.cpython-37.pyc
├── utils.cpython-38.pyc
├── visualizer.cpython-36.pyc
├── visualizer.cpython-37.pyc
└── visualizer.cpython-38.pyc
├── ext_transforms.py
├── loss.py
├── scheduler.py
├── utils.py
└── visualizer.py
/RAML_paper_and_appendix.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/RAML_paper_and_appendix.pdf
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Region-Aware Metric Learning for Open World Semantic Segmentation via Meta-Channel Aggregation
2 |
3 | ## Introduction
4 | This is an official pytorch implementation of *Region-Aware Metric Learning for Open World Semantic Segmentation via Meta-Channel Aggregation*, IJCAI 2022. This work proposes a method called region-aware metric learning (RAML) to first separate the regions of the images and generate region-aware features for further metric learning for open world semantic segmentation. The link to the paper is [here](https://arxiv.org/abs/2205.08083).
5 |
6 | ## Quick starts
7 |
8 | ### Dataset
9 | We follow [DMLNet](https://github.com/Jun-CEN/Open-World-Semantic-Segmentation) to prepare datasets.
10 |
11 | Note: For different settings, you need to manually modify lines 71 through 82 in datasets/cityscapes.py.
12 |
13 | ### Pretrained model
14 | The pretrained models can be downloaded from [Google Drive](https://drive.google.com/file/d/1GYKxToN3YzKSmx9RsDCW8A0QWFU9liZ8/view?usp=sharing) or [Baidu Drive](https://pan.baidu.com/s/1dza_9Fr75wEKX_mmncvofA) (code: 63z1). Put four folders into RAML/incremental/.
15 |
16 | ### Training
17 | First, go to "incremental":
18 | ```
19 | cd incremental
20 | ```
21 | Then, there are three sub-stages for training (5-shot 16+3 setting):
22 | - Sub-Stage1: training close set module
23 | ```
24 | python -u main.py --output_dir ./output_stage1_16 --gpu_id 0,1
25 | ```
26 | - Sub-Stage2: training meta channel module
27 | ```
28 | python -u main.py --finetune --ckpt ./output_stage1_16/final.pth --output_dir ./output_stage2_16/ --total_itrs 10000 --gpu_id 0,1
29 | ```
30 | - Sub-Stage3: training region-aware metric learning module
31 | ```
32 | python -u main_metric.py --ckpt ./output_stage2_16/final.pth --output_dir ./output_stage3_16/ --novel_dir ./novel/
33 | ```
34 |
35 | ### Inference
36 | For 16+3 5-shots:
37 | ```
38 | python main_metric.py --ckpt ./output_stage3_16/final.pth --test_only --test_mode 16_3 --novel_dir ./novel
39 | ```
40 | For 16+3 1-shots:
41 | ```
42 | python main_metric.py --ckpt ./output_stage3_16/final.pth --test_only --test_mode 16_3 --novel_dir ./novel_1
43 | ```
44 | For 16+1 5-shots:
45 | ```
46 | python main_metric.py --ckpt ./output_stage3_16/final.pth --test_only --test_mode 16_1 --novel_dir ./novel
47 | ```
48 | For 12+7 5-shots:
49 | ```
50 | python main_metric.py --ckpt ./output_stage3_12/final.pth --test_only --test_mode 12 --novel_dir ./novel
51 | ```
52 |
53 | ## Citation
54 | ```
55 | @inproceedings{raml2022,
56 | author = {Dong, Hexin and Chen, Zifan and Yuan, Mingze and Xie, Yutong and Zhao, Jie and Yu, Fei and Dong, Bin and Zhang, Li},
57 | title = {Region-Aware Metric Learning for Open World Semantic Segmentation via Meta-Channel Aggregation},
58 | booktitle = {31th International Joint Conference on Artificial Intelligence (IJCAI-22)},
59 | year = {2022},
60 | }
61 | ```
62 |
--------------------------------------------------------------------------------
/incremental/.ipynb_checkpoints/readme-checkpoint.txt:
--------------------------------------------------------------------------------
1 | three sub-stages (5-shot 16+3 setting):
2 |
3 | sub-stage1: training close set module
4 | python -u main.py --output_dir ./output_stage1_16 --gpu_id 0,1
5 |
6 | sub-stage2: training meta channel module
7 | python -u main.py --finetune --ckpt ./output_stage1_16/final.pth --output_dir ./output_stage2_16/ --total_itrs 10000 --gpu_id 0,1
8 |
9 | sub-stage3: training region-aware metric learning module
10 | python -u main_metric.py --ckpt ./output_stage2_16/final.pth --output_dir ./output_stage3_16/ --novel_dir ./novel/
11 |
12 | inference:
13 |
14 | 16+3 5shots:
15 | python main_metric.py --ckpt ./output_stage3_16/final.pth --test_only --test_mode 16_3 --novel_dir ./novel
16 |
17 | 16+1 5shots:
18 | python main_metric.py --ckpt ./output_stage3_16/final.pth --test_only --test_mode 16_3 --novel_dir ./novel_1
19 |
20 | 16+1 5shots:
21 | python main_metric.py --ckpt ./output_stage3_16/final.pth --test_only --test_mode 16_1 --novel_dir ./novel
22 |
23 | 12+7 5shots:
24 | python main_metric.py --ckpt ./output_stage3_12/final.pth --test_only --test_mode 12 --novel_dir ./novel
25 |
26 |
--------------------------------------------------------------------------------
/incremental/datasets/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/datasets/.DS_Store
--------------------------------------------------------------------------------
/incremental/datasets/.ipynb_checkpoints/cityscapes-checkpoint.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 | from collections import namedtuple
4 |
5 | from matplotlib import set_loglevel
6 |
7 | import torch
8 | import torch.utils.data as data
9 | from PIL import Image
10 | import numpy as np
11 | import matplotlib.pyplot as plt
12 | from torchvision import transforms
13 | import cv2
14 |
15 | class Cityscapes(data.Dataset):
16 | """Cityscapes Dataset.
17 |
18 | **Parameters:**
19 | - **root** (string): Root directory of dataset where directory 'leftImg8bit' and 'gtFine' or 'gtCoarse' are located.
20 | - **split** (string, optional): The image split to use, 'train', 'test' or 'val' if mode="gtFine" otherwise 'train', 'train_extra' or 'val'
21 | - **mode** (string, optional): The quality mode to use, 'gtFine' or 'gtCoarse' or 'color'. Can also be a list to output a tuple with all specified target types.
22 | - **transform** (callable, optional): A function/transform that takes in a PIL image and returns a transformed version. E.g, ``transforms.RandomCrop``
23 | - **target_transform** (callable, optional): A function/transform that takes in the target and transforms it.
24 | """
25 |
26 | # Based on https://github.com/mcordts/cityscapesScripts
27 | CityscapesClass = namedtuple('CityscapesClass', ['name', 'id', 'train_id', 'category', 'category_id',
28 | 'has_instances', 'ignore_in_eval', 'color'])
29 | classes = [
30 | CityscapesClass('unlabeled', 0, 255, 'void', 0, False, True, (0, 0, 0)),
31 | CityscapesClass('ego vehicle', 1, 255, 'void', 0, False, True, (0, 0, 0)),
32 | CityscapesClass('rectification border', 2, 255, 'void', 0, False, True, (0, 0, 0)),
33 | CityscapesClass('out of roi', 3, 255, 'void', 0, False, True, (0, 0, 0)),
34 | CityscapesClass('static', 4, 255, 'void', 0, False, True, (0, 0, 0)),
35 | CityscapesClass('dynamic', 5, 255, 'void', 0, False, True, (111, 74, 0)),
36 | CityscapesClass('ground', 6, 255, 'void', 0, False, True, (81, 0, 81)),
37 | CityscapesClass('road', 7, 0, 'flat', 1, False, False, (128, 64, 128)),
38 | CityscapesClass('sidewalk', 8, 1, 'flat', 1, False, False, (244, 35, 232)),
39 | CityscapesClass('parking', 9, 255, 'flat', 1, False, True, (250, 170, 160)),
40 | CityscapesClass('rail track', 10, 255, 'flat', 1, False, True, (230, 150, 140)),
41 | CityscapesClass('building', 11, 2, 'construction', 2, False, False, (70, 70, 70)),
42 | CityscapesClass('wall', 12, 3, 'construction', 2, False, False, (102, 102, 156)),
43 | CityscapesClass('fence', 13, 4, 'construction', 2, False, False, (190, 153, 153)),
44 | CityscapesClass('guard rail', 14, 255, 'construction', 2, False, True, (180, 165, 180)),
45 | CityscapesClass('bridge', 15, 255, 'construction', 2, False, True, (150, 100, 100)),
46 | CityscapesClass('tunnel', 16, 255, 'construction', 2, False, True, (150, 120, 90)),
47 | CityscapesClass('pole', 17, 5, 'object', 3, False, False, (153, 153, 153)),
48 | CityscapesClass('polegroup', 18, 255, 'object', 3, False, True, (153, 153, 153)),
49 | CityscapesClass('traffic light', 19, 6, 'object', 3, False, False, (250, 170, 30)),
50 | CityscapesClass('traffic sign', 20, 7, 'object', 3, False, False, (220, 220, 0)),
51 | CityscapesClass('vegetation', 21, 8, 'nature', 4, False, False, (107, 142, 35)),
52 | CityscapesClass('terrain', 22, 9, 'nature', 4, False, False, (152, 251, 152)),
53 | CityscapesClass('sky', 23, 10, 'sky', 5, False, False, (70, 130, 180)),
54 | CityscapesClass('person', 24, 11, 'human', 6, True, False, (220, 20, 60)),
55 | CityscapesClass('rider', 25, 12, 'human', 6, True, False, (255, 0, 0)),
56 | CityscapesClass('car', 26, 13, 'vehicle', 7, True, False, (0, 0, 142)),
57 | CityscapesClass('truck', 27, 14, 'vehicle', 7, True, False, (0, 0, 70)),
58 | CityscapesClass('bus', 28, 15, 'vehicle', 7, True, False, (0, 60, 100)),
59 | CityscapesClass('caravan', 29, 255, 'vehicle', 7, True, True, (0, 0, 90)),
60 | CityscapesClass('trailer', 30, 255, 'vehicle', 7, True, True, (0, 0, 110)),
61 | CityscapesClass('train', 31, 16, 'vehicle', 7, True, False, (0, 80, 100)),
62 | CityscapesClass('motorcycle', 32, 17, 'vehicle', 7, True, False, (0, 0, 230)),
63 | CityscapesClass('bicycle', 33, 18, 'vehicle', 7, True, False, (119, 11, 32)),
64 | CityscapesClass('license plate', -1, 255, 'vehicle', 7, False, True, (0, 0, 142)),
65 | ]
66 |
67 | train_id_to_color = [c.color for c in classes if (c.train_id != -1 and c.train_id != 255)]
68 | train_id_to_color.append([0, 0, 0])
69 | train_id_to_color = np.array(train_id_to_color)
70 | id_to_train_id = np.array([c.train_id for c in classes])
71 | unknown_target = None
72 | # unknown_target = [1, 3, 4, 5, 6, 7, 8, 9, 12, 14, 15, 16, 18]
73 | # 12+7
74 | unknown_target = [10,13,14,15,16,17,18]
75 | # 14+5
76 | # unknown_target = [10,13,14,15,16]
77 | # 18+1
78 | #unknown_target = [13]
79 | # 16+3 / 16+1
80 | #unknown_target = [13,14,15]
81 | # unknown_target = [i for i in range(19)]
82 | # unknown_target.pop(13)
83 | print('unknown_target is : ', unknown_target)
84 | # unknown_target = [18]
85 | #train_id_to_color = [(0, 0, 0), (128, 64, 128), (70, 70, 70), (153, 153, 153), (107, 142, 35),
86 | # (70, 130, 180), (220, 20, 60), (0, 0, 142)]
87 | #train_id_to_color = np.array(train_id_to_color)
88 | #id_to_train_id = np.array([c.category_id for c in classes], dtype='uint8') - 1
89 |
90 | def __init__(self, root, split='train', mode='fine', target_type='semantic', transform=None):
91 | self.root = os.path.expanduser(root)
92 | self.mode = 'gtFine'
93 | self.target_type = target_type
94 | self.images_dir = os.path.join(self.root, 'leftImg8bit', split)
95 |
96 | self.targets_dir = os.path.join(self.root, self.mode, split)
97 | # self.targets_dir = self.images_dir
98 |
99 | self.transform = transform
100 |
101 | self.split = split
102 | self.images = []
103 | self.targets = []
104 |
105 |
106 | if split not in ['train', 'test_car', 'val','test_truck', 'test_bus', 'test_car_1_shot',
107 | 'test_truck_1_shot', 'test_bus_1_shot', 'car_vis', 'bus_vis','demo_video',
108 | 'car_100','car_1000']:
109 | raise ValueError('Invalid split for mode! Please use split="train", split="test"'
110 | ' or split="val"')
111 |
112 | if not os.path.isdir(self.images_dir) or not os.path.isdir(self.targets_dir):
113 | raise RuntimeError('Dataset not found or incomplete. Please make sure all required folders for the'
114 | ' specified "split" and "mode" are inside the "root" directory')
115 |
116 | for city in os.listdir(self.images_dir):
117 | img_dir = os.path.join(self.images_dir, city)
118 | target_dir = os.path.join(self.targets_dir, city)
119 | files_name = os.listdir(img_dir)
120 | files_name = sorted(files_name)
121 | for file_name in files_name:
122 | self.images.append(os.path.join(img_dir, file_name))
123 | target_name = '{}_{}'.format(file_name.split('_leftImg8bit')[0],
124 | self._get_target_suffix(self.mode, self.target_type))
125 | self.targets.append(os.path.join(target_dir, target_name))
126 |
127 | @classmethod
128 | def encode_target(cls, target):
129 |
130 | target = cls.id_to_train_id[np.array(target)]
131 | target_true = target.copy()
132 | # instance, counts = np.unique(target, False, False, True)
133 | # print('target', instance, counts)
134 | if cls.unknown_target != None:
135 | cont = 0
136 | for h_c in cls.unknown_target:
137 |
138 | target[target == h_c - cont] = 100
139 | for c in range(h_c - cont + 1, 19):
140 | target[target == c] = c - 1
141 | # target_true[target_true == c] = c - 1
142 | cont = cont + 1
143 | # target_true[target == 100] = 19 - len(cls.unknown_target)
144 | target[target == 100] = 255
145 |
146 | return target, target_true
147 |
148 | @classmethod
149 | def decode_target(cls, target):
150 | target[target == 255] = 19
151 | #target = target.astype('uint8') + 1
152 | return cls.train_id_to_color[target]
153 |
154 | def __getitem__(self, index):
155 | """
156 | Args:
157 | index (int): Index
158 | Returns:
159 | tuple: (image, target) where target is a tuple of all target types if target_type is a list with more
160 | than one item. Otherwise target is a json object if target_type="polygon", else the image segmentation.
161 | """
162 | image = Image.open(self.images[index]).convert('RGB')
163 | # image = Image.open(self.images[index])
164 | target = Image.open(self.targets[index])
165 | if self.transform:
166 | image, target = self.transform(image, target)
167 | target, target_true = self.encode_target(target)
168 | target_lst, class_lst = self.encode_target_czifan(target)
169 |
170 | return image, target, target_true, target_lst, class_lst
171 |
172 | def __len__(self):
173 | return len(self.images)
174 |
175 | def _load_json(self, path):
176 | with open(path, 'r') as file:
177 | data = json.load(file)
178 | return data
179 |
180 | def _get_target_suffix(self, mode, target_type):
181 | if target_type == 'instance':
182 | return '{}_instanceIds.png'.format(mode)
183 | elif target_type == 'semantic':
184 | return '{}_labelIds.png'.format(mode)
185 | elif target_type == 'color':
186 | return '{}_color.png'.format(mode)
187 | elif target_type == 'polygon':
188 | return '{}_polygons.json'.format(mode)
189 | elif target_type == 'depth':
190 | return '{}_disparity.png'.format(mode)
191 |
192 | def encode_target_czifan(self, target, output_size=16):
193 | known_class = 19 - len(Cityscapes.unknown_target)
194 | target_lst = np.zeros((known_class + 1, *target.shape))
195 | class_lst = np.ones(known_class + 1) * 255
196 | for c in range(known_class):
197 | target_lst[c] = (target == c)
198 | class_lst[c] = c
199 | return target_lst.astype(np.uint8), class_lst.astype(np.uint8)
200 |
201 | # target_lst = np.zeros((output_size**2, *target.shape))
202 | # class_lst = np.ones(output_size**2) * 255
203 | # for t in np.unique(target):
204 | # tmp = np.where(target == t)
205 | # gy, gx = int(np.mean(tmp[0])/32), int(np.mean(tmp[1])/32)
206 | # target_lst[gy*output_size+gx,...] = (target == t)
207 | # class_lst[gy*output_size+gx] = t
208 | # return target_lst.astype(np.uint8), class_lst.astype(np.uint8)
209 |
210 | # temp = cv2.resize(target.astype(np.uint8), (output_size, output_size), interpolation=cv2.INTER_LINEAR).reshape(-1)
211 | # #temp = torch.nn.functional.interpolate(target.clone().unsqueeze(dim=1).float(), size=[output_size, output_size], mode="nearest").view(-1)
212 | # target_lst, class_lst = [], []
213 | # for t in temp:
214 | # if t == 255:
215 | # target_lst.append(np.zeros_like(target))
216 | # else:
217 | # target_lst.append(target == t)
218 | # class_lst.append(t.item())
219 | # target_lst = np.stack(target_lst, axis=0).astype(np.uint8) # (256, 512, 512)
220 | # class_lst = np.asarray(class_lst).astype(np.uint8) # (256,)
221 | # return target_lst, class_lst
222 |
--------------------------------------------------------------------------------
/incremental/datasets/.ipynb_checkpoints/cityscapes_novel-checkpoint.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 | from collections import namedtuple
4 |
5 | from matplotlib import set_loglevel
6 |
7 | import torch
8 | import torch.utils.data as data
9 | from PIL import Image
10 | import numpy as np
11 | import matplotlib.pyplot as plt
12 | from torchvision import transforms
13 |
14 |
15 | class Cityscapes_Novel(data.Dataset):
16 | """Cityscapes Dataset.
17 |
18 | **Parameters:**
19 | - **root** (string): Root directory of dataset where directory 'leftImg8bit' and 'gtFine' or 'gtCoarse' are located.
20 | - **split** (string, optional): The image split to use, 'train', 'test' or 'val' if mode="gtFine" otherwise 'train', 'train_extra' or 'val'
21 | - **mode** (string, optional): The quality mode to use, 'gtFine' or 'gtCoarse' or 'color'. Can also be a list to output a tuple with all specified target types.
22 | - **transform** (callable, optional): A function/transform that takes in a PIL image and returns a transformed version. E.g, ``transforms.RandomCrop``
23 | - **target_transform** (callable, optional): A function/transform that takes in the target and transforms it.
24 | """
25 |
26 | # Based on https://github.com/mcordts/cityscapesScripts
27 | CityscapesClass = namedtuple('CityscapesClass', ['name', 'id', 'train_id', 'category', 'category_id',
28 | 'has_instances', 'ignore_in_eval', 'color'])
29 | classes = [
30 | CityscapesClass('unlabeled', 0, 255, 'void', 0, False, True, (0, 0, 0)),
31 | CityscapesClass('ego vehicle', 1, 255, 'void', 0, False, True, (0, 0, 0)),
32 | CityscapesClass('rectification border', 2, 255, 'void', 0, False, True, (0, 0, 0)),
33 | CityscapesClass('out of roi', 3, 255, 'void', 0, False, True, (0, 0, 0)),
34 | CityscapesClass('static', 4, 255, 'void', 0, False, True, (0, 0, 0)),
35 | CityscapesClass('dynamic', 5, 255, 'void', 0, False, True, (111, 74, 0)),
36 | CityscapesClass('ground', 6, 255, 'void', 0, False, True, (81, 0, 81)),
37 | CityscapesClass('road', 7, 0, 'flat', 1, False, False, (128, 64, 128)),
38 | CityscapesClass('sidewalk', 8, 1, 'flat', 1, False, False, (244, 35, 232)),
39 | CityscapesClass('parking', 9, 255, 'flat', 1, False, True, (250, 170, 160)),
40 | CityscapesClass('rail track', 10, 255, 'flat', 1, False, True, (230, 150, 140)),
41 | CityscapesClass('building', 11, 2, 'construction', 2, False, False, (70, 70, 70)),
42 | CityscapesClass('wall', 12, 3, 'construction', 2, False, False, (102, 102, 156)),
43 | CityscapesClass('fence', 13, 4, 'construction', 2, False, False, (190, 153, 153)),
44 | CityscapesClass('guard rail', 14, 255, 'construction', 2, False, True, (180, 165, 180)),
45 | CityscapesClass('bridge', 15, 255, 'construction', 2, False, True, (150, 100, 100)),
46 | CityscapesClass('tunnel', 16, 255, 'construction', 2, False, True, (150, 120, 90)),
47 | CityscapesClass('pole', 17, 5, 'object', 3, False, False, (153, 153, 153)),
48 | CityscapesClass('polegroup', 18, 255, 'object', 3, False, True, (153, 153, 153)),
49 | CityscapesClass('traffic light', 19, 6, 'object', 3, False, False, (250, 170, 30)),
50 | CityscapesClass('traffic sign', 20, 7, 'object', 3, False, False, (220, 220, 0)),
51 | CityscapesClass('vegetation', 21, 8, 'nature', 4, False, False, (107, 142, 35)),
52 | CityscapesClass('terrain', 22, 9, 'nature', 4, False, False, (152, 251, 152)),
53 | CityscapesClass('sky', 23, 10, 'sky', 5, False, False, (70, 130, 180)),
54 | CityscapesClass('person', 24, 11, 'human', 6, True, False, (220, 20, 60)),
55 | CityscapesClass('rider', 25, 12, 'human', 6, True, False, (255, 0, 0)),
56 | CityscapesClass('car', 26, 13, 'vehicle', 7, True, False, (0, 0, 142)),
57 | CityscapesClass('truck', 27, 14, 'vehicle', 7, True, False, (0, 0, 70)),
58 | CityscapesClass('bus', 28, 15, 'vehicle', 7, True, False, (0, 60, 100)),
59 | CityscapesClass('caravan', 29, 255, 'vehicle', 7, True, True, (0, 0, 90)),
60 | CityscapesClass('trailer', 30, 255, 'vehicle', 7, True, True, (0, 0, 110)),
61 | CityscapesClass('train', 31, 16, 'vehicle', 7, True, False, (0, 80, 100)),
62 | CityscapesClass('motorcycle', 32, 17, 'vehicle', 7, True, False, (0, 0, 230)),
63 | CityscapesClass('bicycle', 33, 18, 'vehicle', 7, True, False, (119, 11, 32)),
64 | CityscapesClass('license plate', -1, 255, 'vehicle', 7, False, True, (0, 0, 142)),
65 | ]
66 |
67 | train_id_to_color = [c.color for c in classes if (c.train_id != -1 and c.train_id != 255)]
68 | train_id_to_color.append([0, 0, 0])
69 | train_id_to_color = np.array(train_id_to_color)
70 | id_to_train_id = np.array([c.train_id for c in classes])
71 | unknown_target = None
72 | # unknown_target = [1, 3, 4, 5, 6, 7, 8, 9, 12, 14, 15, 16, 18]
73 | unknown_target = [13,14,15]
74 | # unknown_target = [i for i in range(19)]
75 | # unknown_target.pop(13)
76 | print('unknown_target is : ', unknown_target)
77 | # unknown_target = [18]
78 | #train_id_to_color = [(0, 0, 0), (128, 64, 128), (70, 70, 70), (153, 153, 153), (107, 142, 35),
79 | # (70, 130, 180), (220, 20, 60), (0, 0, 142)]
80 | #train_id_to_color = np.array(train_id_to_color)
81 | #id_to_train_id = np.array([c.category_id for c in classes], dtype='uint8') - 1
82 |
83 | def __init__(self, novel_path, novel_no, novel_name='novel.txt', transform=None):
84 | self.root=os.path.join(novel_path,str(novel_no))
85 | self.root=os.path.join(self.root,novel_name)
86 | self.transform=transform
87 | file = open(self.root,'r').readlines()
88 | self.images=[]
89 | self.targets=[]
90 | for line in file:
91 | lines=line.strip('\n').split('\t')
92 | self.images.append(lines[0])
93 | self.targets.append(lines[1])
94 |
95 |
96 | # self.targets = self.images
97 | # print(self.images)
98 |
99 |
100 | # print(self.images[10])
101 | # print(self.images[102])
102 | # print(self.images[107])
103 | # print(self.images[197])
104 | # print(self.images[200])
105 | # print(self.images[207])
106 | # print(self.images[474])
107 | # print(self.images[486])
108 |
109 |
110 | @classmethod
111 | def encode_target(cls, target):
112 | target = cls.id_to_train_id[np.array(target)]
113 | return target
114 |
115 | @classmethod
116 | def decode_target(cls, target):
117 | target[target == 255] = 19
118 | #target = target.astype('uint8') + 1
119 | return cls.train_id_to_color[target]
120 |
121 | def __getitem__(self, index):
122 | """
123 | Args:
124 | index (int): Index
125 | Returns:
126 | tuple: (image, target) where target is a tuple of all target types if target_type is a list with more
127 | than one item. Otherwise target is a json object if target_type="polygon", else the image segmentation.
128 | """
129 | image = Image.open(self.images[index]).convert('RGB')
130 | # image = Image.open(self.images[index])
131 | target = Image.open(self.targets[index])
132 | if self.transform:
133 | image, target = self.transform(image, target)
134 | target = self.encode_target(target)
135 |
136 |
137 | # unloader = transforms.ToPILImage()
138 | #
139 | # plt.figure()
140 | # plt.imshow(unloader(image.cpu().clone()))
141 | # plt.show()
142 | #
143 | # plt.figure()
144 | # plt.imshow(target)
145 | # plt.show()
146 | #
147 | # plt.figure()
148 | # plt.imshow(target_true)
149 | # plt.show()
150 | #
151 | # instance, counts = np.unique(target, False, False, True)
152 | # print('target', instance, counts)
153 | # instance, counts = np.unique(target_true, False, False, True)
154 | # print('true', instance, counts)
155 | # return image
156 |
157 | return image, target
158 |
159 | def __len__(self):
160 | return len(self.images)
161 |
162 | def _load_json(self, path):
163 | with open(path, 'r') as file:
164 | data = json.load(file)
165 | return data
166 |
167 | def _get_target_suffix(self, mode, target_type):
168 | if target_type == 'instance':
169 | return '{}_instanceIds.png'.format(mode)
170 | elif target_type == 'semantic':
171 | return '{}_labelIds.png'.format(mode)
172 | elif target_type == 'color':
173 | return '{}_color.png'.format(mode)
174 | elif target_type == 'polygon':
175 | return '{}_polygons.json'.format(mode)
176 | elif target_type == 'depth':
177 | return '{}_disparity.png'.format(mode)
--------------------------------------------------------------------------------
/incremental/datasets/__init__.py:
--------------------------------------------------------------------------------
1 | from .voc import VOCSegmentation
2 | from .cityscapes import Cityscapes
3 | from .cityscapes_novel import Cityscapes_Novel
--------------------------------------------------------------------------------
/incremental/datasets/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/datasets/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/incremental/datasets/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/datasets/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/incremental/datasets/__pycache__/__init__.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/datasets/__pycache__/__init__.cpython-38.pyc
--------------------------------------------------------------------------------
/incremental/datasets/__pycache__/cityscapes.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/datasets/__pycache__/cityscapes.cpython-36.pyc
--------------------------------------------------------------------------------
/incremental/datasets/__pycache__/cityscapes.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/datasets/__pycache__/cityscapes.cpython-37.pyc
--------------------------------------------------------------------------------
/incremental/datasets/__pycache__/cityscapes.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/datasets/__pycache__/cityscapes.cpython-38.pyc
--------------------------------------------------------------------------------
/incremental/datasets/__pycache__/cityscapes_novel.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/datasets/__pycache__/cityscapes_novel.cpython-37.pyc
--------------------------------------------------------------------------------
/incremental/datasets/__pycache__/cityscapes_novel.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/datasets/__pycache__/cityscapes_novel.cpython-38.pyc
--------------------------------------------------------------------------------
/incremental/datasets/__pycache__/voc.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/datasets/__pycache__/voc.cpython-36.pyc
--------------------------------------------------------------------------------
/incremental/datasets/__pycache__/voc.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/datasets/__pycache__/voc.cpython-37.pyc
--------------------------------------------------------------------------------
/incremental/datasets/__pycache__/voc.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/datasets/__pycache__/voc.cpython-38.pyc
--------------------------------------------------------------------------------
/incremental/datasets/cityscapes.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 | from collections import namedtuple
4 |
5 | from matplotlib import set_loglevel
6 |
7 | import torch
8 | import torch.utils.data as data
9 | from PIL import Image
10 | import numpy as np
11 | import matplotlib.pyplot as plt
12 | from torchvision import transforms
13 | import cv2
14 |
15 | class Cityscapes(data.Dataset):
16 | """Cityscapes Dataset.
17 |
18 | **Parameters:**
19 | - **root** (string): Root directory of dataset where directory 'leftImg8bit' and 'gtFine' or 'gtCoarse' are located.
20 | - **split** (string, optional): The image split to use, 'train', 'test' or 'val' if mode="gtFine" otherwise 'train', 'train_extra' or 'val'
21 | - **mode** (string, optional): The quality mode to use, 'gtFine' or 'gtCoarse' or 'color'. Can also be a list to output a tuple with all specified target types.
22 | - **transform** (callable, optional): A function/transform that takes in a PIL image and returns a transformed version. E.g, ``transforms.RandomCrop``
23 | - **target_transform** (callable, optional): A function/transform that takes in the target and transforms it.
24 | """
25 |
26 | # Based on https://github.com/mcordts/cityscapesScripts
27 | CityscapesClass = namedtuple('CityscapesClass', ['name', 'id', 'train_id', 'category', 'category_id',
28 | 'has_instances', 'ignore_in_eval', 'color'])
29 | classes = [
30 | CityscapesClass('unlabeled', 0, 255, 'void', 0, False, True, (0, 0, 0)),
31 | CityscapesClass('ego vehicle', 1, 255, 'void', 0, False, True, (0, 0, 0)),
32 | CityscapesClass('rectification border', 2, 255, 'void', 0, False, True, (0, 0, 0)),
33 | CityscapesClass('out of roi', 3, 255, 'void', 0, False, True, (0, 0, 0)),
34 | CityscapesClass('static', 4, 255, 'void', 0, False, True, (0, 0, 0)),
35 | CityscapesClass('dynamic', 5, 255, 'void', 0, False, True, (111, 74, 0)),
36 | CityscapesClass('ground', 6, 255, 'void', 0, False, True, (81, 0, 81)),
37 | CityscapesClass('road', 7, 0, 'flat', 1, False, False, (128, 64, 128)),
38 | CityscapesClass('sidewalk', 8, 1, 'flat', 1, False, False, (244, 35, 232)),
39 | CityscapesClass('parking', 9, 255, 'flat', 1, False, True, (250, 170, 160)),
40 | CityscapesClass('rail track', 10, 255, 'flat', 1, False, True, (230, 150, 140)),
41 | CityscapesClass('building', 11, 2, 'construction', 2, False, False, (70, 70, 70)),
42 | CityscapesClass('wall', 12, 3, 'construction', 2, False, False, (102, 102, 156)),
43 | CityscapesClass('fence', 13, 4, 'construction', 2, False, False, (190, 153, 153)),
44 | CityscapesClass('guard rail', 14, 255, 'construction', 2, False, True, (180, 165, 180)),
45 | CityscapesClass('bridge', 15, 255, 'construction', 2, False, True, (150, 100, 100)),
46 | CityscapesClass('tunnel', 16, 255, 'construction', 2, False, True, (150, 120, 90)),
47 | CityscapesClass('pole', 17, 5, 'object', 3, False, False, (153, 153, 153)),
48 | CityscapesClass('polegroup', 18, 255, 'object', 3, False, True, (153, 153, 153)),
49 | CityscapesClass('traffic light', 19, 6, 'object', 3, False, False, (250, 170, 30)),
50 | CityscapesClass('traffic sign', 20, 7, 'object', 3, False, False, (220, 220, 0)),
51 | CityscapesClass('vegetation', 21, 8, 'nature', 4, False, False, (107, 142, 35)),
52 | CityscapesClass('terrain', 22, 9, 'nature', 4, False, False, (152, 251, 152)),
53 | CityscapesClass('sky', 23, 10, 'sky', 5, False, False, (70, 130, 180)),
54 | CityscapesClass('person', 24, 11, 'human', 6, True, False, (220, 20, 60)),
55 | CityscapesClass('rider', 25, 12, 'human', 6, True, False, (255, 0, 0)),
56 | CityscapesClass('car', 26, 13, 'vehicle', 7, True, False, (0, 0, 142)),
57 | CityscapesClass('truck', 27, 14, 'vehicle', 7, True, False, (0, 0, 70)),
58 | CityscapesClass('bus', 28, 15, 'vehicle', 7, True, False, (0, 60, 100)),
59 | CityscapesClass('caravan', 29, 255, 'vehicle', 7, True, True, (0, 0, 90)),
60 | CityscapesClass('trailer', 30, 255, 'vehicle', 7, True, True, (0, 0, 110)),
61 | CityscapesClass('train', 31, 16, 'vehicle', 7, True, False, (0, 80, 100)),
62 | CityscapesClass('motorcycle', 32, 17, 'vehicle', 7, True, False, (0, 0, 230)),
63 | CityscapesClass('bicycle', 33, 18, 'vehicle', 7, True, False, (119, 11, 32)),
64 | CityscapesClass('license plate', -1, 255, 'vehicle', 7, False, True, (0, 0, 142)),
65 | ]
66 |
67 | train_id_to_color = [c.color for c in classes if (c.train_id != -1 and c.train_id != 255)]
68 | train_id_to_color.append([0, 0, 0])
69 | train_id_to_color = np.array(train_id_to_color)
70 | id_to_train_id = np.array([c.train_id for c in classes])
71 | unknown_target = None
72 | # unknown_target = [1, 3, 4, 5, 6, 7, 8, 9, 12, 14, 15, 16, 18]
73 | # 12+7
74 | unknown_target = [10,13,14,15,16,17,18]
75 | # 14+5
76 | # unknown_target = [10,13,14,15,16]
77 | # 18+1
78 | #unknown_target = [13]
79 | # 16+3 / 16+1
80 | #unknown_target = [13,14,15]
81 | # unknown_target = [i for i in range(19)]
82 | # unknown_target.pop(13)
83 | print('unknown_target is : ', unknown_target)
84 | # unknown_target = [18]
85 | #train_id_to_color = [(0, 0, 0), (128, 64, 128), (70, 70, 70), (153, 153, 153), (107, 142, 35),
86 | # (70, 130, 180), (220, 20, 60), (0, 0, 142)]
87 | #train_id_to_color = np.array(train_id_to_color)
88 | #id_to_train_id = np.array([c.category_id for c in classes], dtype='uint8') - 1
89 |
90 | def __init__(self, root, split='train', mode='fine', target_type='semantic', transform=None):
91 | self.root = os.path.expanduser(root)
92 | self.mode = 'gtFine'
93 | self.target_type = target_type
94 | self.images_dir = os.path.join(self.root, 'leftImg8bit', split)
95 |
96 | self.targets_dir = os.path.join(self.root, self.mode, split)
97 | # self.targets_dir = self.images_dir
98 |
99 | self.transform = transform
100 |
101 | self.split = split
102 | self.images = []
103 | self.targets = []
104 |
105 |
106 | if split not in ['train', 'test_car', 'val','test_truck', 'test_bus', 'test_car_1_shot',
107 | 'test_truck_1_shot', 'test_bus_1_shot', 'car_vis', 'bus_vis','demo_video',
108 | 'car_100','car_1000']:
109 | raise ValueError('Invalid split for mode! Please use split="train", split="test"'
110 | ' or split="val"')
111 |
112 | if not os.path.isdir(self.images_dir) or not os.path.isdir(self.targets_dir):
113 | raise RuntimeError('Dataset not found or incomplete. Please make sure all required folders for the'
114 | ' specified "split" and "mode" are inside the "root" directory')
115 |
116 | for city in os.listdir(self.images_dir):
117 | img_dir = os.path.join(self.images_dir, city)
118 | target_dir = os.path.join(self.targets_dir, city)
119 | files_name = os.listdir(img_dir)
120 | files_name = sorted(files_name)
121 | for file_name in files_name:
122 | self.images.append(os.path.join(img_dir, file_name))
123 | target_name = '{}_{}'.format(file_name.split('_leftImg8bit')[0],
124 | self._get_target_suffix(self.mode, self.target_type))
125 | self.targets.append(os.path.join(target_dir, target_name))
126 |
127 | @classmethod
128 | def encode_target(cls, target):
129 |
130 | target = cls.id_to_train_id[np.array(target)]
131 | target_true = target.copy()
132 | # instance, counts = np.unique(target, False, False, True)
133 | # print('target', instance, counts)
134 | if cls.unknown_target != None:
135 | cont = 0
136 | for h_c in cls.unknown_target:
137 |
138 | target[target == h_c - cont] = 100
139 | for c in range(h_c - cont + 1, 19):
140 | target[target == c] = c - 1
141 | # target_true[target_true == c] = c - 1
142 | cont = cont + 1
143 | # target_true[target == 100] = 19 - len(cls.unknown_target)
144 | target[target == 100] = 255
145 |
146 | return target, target_true
147 |
148 | @classmethod
149 | def decode_target(cls, target):
150 | target[target == 255] = 19
151 | #target = target.astype('uint8') + 1
152 | return cls.train_id_to_color[target]
153 |
154 | def __getitem__(self, index):
155 | """
156 | Args:
157 | index (int): Index
158 | Returns:
159 | tuple: (image, target) where target is a tuple of all target types if target_type is a list with more
160 | than one item. Otherwise target is a json object if target_type="polygon", else the image segmentation.
161 | """
162 | image = Image.open(self.images[index]).convert('RGB')
163 | # image = Image.open(self.images[index])
164 | target = Image.open(self.targets[index])
165 | if self.transform:
166 | image, target = self.transform(image, target)
167 | target, target_true = self.encode_target(target)
168 | target_lst, class_lst = self.encode_target_czifan(target)
169 |
170 | return image, target, target_true, target_lst, class_lst
171 |
172 | def __len__(self):
173 | return len(self.images)
174 |
175 | def _load_json(self, path):
176 | with open(path, 'r') as file:
177 | data = json.load(file)
178 | return data
179 |
180 | def _get_target_suffix(self, mode, target_type):
181 | if target_type == 'instance':
182 | return '{}_instanceIds.png'.format(mode)
183 | elif target_type == 'semantic':
184 | return '{}_labelIds.png'.format(mode)
185 | elif target_type == 'color':
186 | return '{}_color.png'.format(mode)
187 | elif target_type == 'polygon':
188 | return '{}_polygons.json'.format(mode)
189 | elif target_type == 'depth':
190 | return '{}_disparity.png'.format(mode)
191 |
192 | def encode_target_czifan(self, target, output_size=16):
193 | known_class = 19 - len(Cityscapes.unknown_target)
194 | target_lst = np.zeros((known_class + 1, *target.shape))
195 | class_lst = np.ones(known_class + 1) * 255
196 | for c in range(known_class):
197 | target_lst[c] = (target == c)
198 | class_lst[c] = c
199 | return target_lst.astype(np.uint8), class_lst.astype(np.uint8)
200 |
201 | # target_lst = np.zeros((output_size**2, *target.shape))
202 | # class_lst = np.ones(output_size**2) * 255
203 | # for t in np.unique(target):
204 | # tmp = np.where(target == t)
205 | # gy, gx = int(np.mean(tmp[0])/32), int(np.mean(tmp[1])/32)
206 | # target_lst[gy*output_size+gx,...] = (target == t)
207 | # class_lst[gy*output_size+gx] = t
208 | # return target_lst.astype(np.uint8), class_lst.astype(np.uint8)
209 |
210 | # temp = cv2.resize(target.astype(np.uint8), (output_size, output_size), interpolation=cv2.INTER_LINEAR).reshape(-1)
211 | # #temp = torch.nn.functional.interpolate(target.clone().unsqueeze(dim=1).float(), size=[output_size, output_size], mode="nearest").view(-1)
212 | # target_lst, class_lst = [], []
213 | # for t in temp:
214 | # if t == 255:
215 | # target_lst.append(np.zeros_like(target))
216 | # else:
217 | # target_lst.append(target == t)
218 | # class_lst.append(t.item())
219 | # target_lst = np.stack(target_lst, axis=0).astype(np.uint8) # (256, 512, 512)
220 | # class_lst = np.asarray(class_lst).astype(np.uint8) # (256,)
221 | # return target_lst, class_lst
222 |
--------------------------------------------------------------------------------
/incremental/datasets/cityscapes_novel.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 | from collections import namedtuple
4 |
5 | from matplotlib import set_loglevel
6 |
7 | import torch
8 | import torch.utils.data as data
9 | from PIL import Image
10 | import numpy as np
11 | import matplotlib.pyplot as plt
12 | from torchvision import transforms
13 |
14 |
15 | class Cityscapes_Novel(data.Dataset):
16 | """Cityscapes Dataset.
17 |
18 | **Parameters:**
19 | - **root** (string): Root directory of dataset where directory 'leftImg8bit' and 'gtFine' or 'gtCoarse' are located.
20 | - **split** (string, optional): The image split to use, 'train', 'test' or 'val' if mode="gtFine" otherwise 'train', 'train_extra' or 'val'
21 | - **mode** (string, optional): The quality mode to use, 'gtFine' or 'gtCoarse' or 'color'. Can also be a list to output a tuple with all specified target types.
22 | - **transform** (callable, optional): A function/transform that takes in a PIL image and returns a transformed version. E.g, ``transforms.RandomCrop``
23 | - **target_transform** (callable, optional): A function/transform that takes in the target and transforms it.
24 | """
25 |
26 | # Based on https://github.com/mcordts/cityscapesScripts
27 | CityscapesClass = namedtuple('CityscapesClass', ['name', 'id', 'train_id', 'category', 'category_id',
28 | 'has_instances', 'ignore_in_eval', 'color'])
29 | classes = [
30 | CityscapesClass('unlabeled', 0, 255, 'void', 0, False, True, (0, 0, 0)),
31 | CityscapesClass('ego vehicle', 1, 255, 'void', 0, False, True, (0, 0, 0)),
32 | CityscapesClass('rectification border', 2, 255, 'void', 0, False, True, (0, 0, 0)),
33 | CityscapesClass('out of roi', 3, 255, 'void', 0, False, True, (0, 0, 0)),
34 | CityscapesClass('static', 4, 255, 'void', 0, False, True, (0, 0, 0)),
35 | CityscapesClass('dynamic', 5, 255, 'void', 0, False, True, (111, 74, 0)),
36 | CityscapesClass('ground', 6, 255, 'void', 0, False, True, (81, 0, 81)),
37 | CityscapesClass('road', 7, 0, 'flat', 1, False, False, (128, 64, 128)),
38 | CityscapesClass('sidewalk', 8, 1, 'flat', 1, False, False, (244, 35, 232)),
39 | CityscapesClass('parking', 9, 255, 'flat', 1, False, True, (250, 170, 160)),
40 | CityscapesClass('rail track', 10, 255, 'flat', 1, False, True, (230, 150, 140)),
41 | CityscapesClass('building', 11, 2, 'construction', 2, False, False, (70, 70, 70)),
42 | CityscapesClass('wall', 12, 3, 'construction', 2, False, False, (102, 102, 156)),
43 | CityscapesClass('fence', 13, 4, 'construction', 2, False, False, (190, 153, 153)),
44 | CityscapesClass('guard rail', 14, 255, 'construction', 2, False, True, (180, 165, 180)),
45 | CityscapesClass('bridge', 15, 255, 'construction', 2, False, True, (150, 100, 100)),
46 | CityscapesClass('tunnel', 16, 255, 'construction', 2, False, True, (150, 120, 90)),
47 | CityscapesClass('pole', 17, 5, 'object', 3, False, False, (153, 153, 153)),
48 | CityscapesClass('polegroup', 18, 255, 'object', 3, False, True, (153, 153, 153)),
49 | CityscapesClass('traffic light', 19, 6, 'object', 3, False, False, (250, 170, 30)),
50 | CityscapesClass('traffic sign', 20, 7, 'object', 3, False, False, (220, 220, 0)),
51 | CityscapesClass('vegetation', 21, 8, 'nature', 4, False, False, (107, 142, 35)),
52 | CityscapesClass('terrain', 22, 9, 'nature', 4, False, False, (152, 251, 152)),
53 | CityscapesClass('sky', 23, 10, 'sky', 5, False, False, (70, 130, 180)),
54 | CityscapesClass('person', 24, 11, 'human', 6, True, False, (220, 20, 60)),
55 | CityscapesClass('rider', 25, 12, 'human', 6, True, False, (255, 0, 0)),
56 | CityscapesClass('car', 26, 13, 'vehicle', 7, True, False, (0, 0, 142)),
57 | CityscapesClass('truck', 27, 14, 'vehicle', 7, True, False, (0, 0, 70)),
58 | CityscapesClass('bus', 28, 15, 'vehicle', 7, True, False, (0, 60, 100)),
59 | CityscapesClass('caravan', 29, 255, 'vehicle', 7, True, True, (0, 0, 90)),
60 | CityscapesClass('trailer', 30, 255, 'vehicle', 7, True, True, (0, 0, 110)),
61 | CityscapesClass('train', 31, 16, 'vehicle', 7, True, False, (0, 80, 100)),
62 | CityscapesClass('motorcycle', 32, 17, 'vehicle', 7, True, False, (0, 0, 230)),
63 | CityscapesClass('bicycle', 33, 18, 'vehicle', 7, True, False, (119, 11, 32)),
64 | CityscapesClass('license plate', -1, 255, 'vehicle', 7, False, True, (0, 0, 142)),
65 | ]
66 |
67 | train_id_to_color = [c.color for c in classes if (c.train_id != -1 and c.train_id != 255)]
68 | train_id_to_color.append([0, 0, 0])
69 | train_id_to_color = np.array(train_id_to_color)
70 | id_to_train_id = np.array([c.train_id for c in classes])
71 | unknown_target = None
72 | # unknown_target = [1, 3, 4, 5, 6, 7, 8, 9, 12, 14, 15, 16, 18]
73 | unknown_target = [13,14,15]
74 | # unknown_target = [i for i in range(19)]
75 | # unknown_target.pop(13)
76 | print('unknown_target is : ', unknown_target)
77 | # unknown_target = [18]
78 | #train_id_to_color = [(0, 0, 0), (128, 64, 128), (70, 70, 70), (153, 153, 153), (107, 142, 35),
79 | # (70, 130, 180), (220, 20, 60), (0, 0, 142)]
80 | #train_id_to_color = np.array(train_id_to_color)
81 | #id_to_train_id = np.array([c.category_id for c in classes], dtype='uint8') - 1
82 |
83 | def __init__(self, novel_path, novel_no, novel_name='novel.txt', transform=None):
84 | self.root=os.path.join(novel_path,str(novel_no))
85 | self.root=os.path.join(self.root,novel_name)
86 | self.transform=transform
87 | file = open(self.root,'r').readlines()
88 | self.images=[]
89 | self.targets=[]
90 | for line in file:
91 | lines=line.strip('\n').split('\t')
92 | self.images.append(lines[0])
93 | self.targets.append(lines[1])
94 |
95 |
96 | # self.targets = self.images
97 | # print(self.images)
98 |
99 |
100 | # print(self.images[10])
101 | # print(self.images[102])
102 | # print(self.images[107])
103 | # print(self.images[197])
104 | # print(self.images[200])
105 | # print(self.images[207])
106 | # print(self.images[474])
107 | # print(self.images[486])
108 |
109 |
110 | @classmethod
111 | def encode_target(cls, target):
112 | target = cls.id_to_train_id[np.array(target)]
113 | return target
114 |
115 | @classmethod
116 | def decode_target(cls, target):
117 | target[target == 255] = 19
118 | #target = target.astype('uint8') + 1
119 | return cls.train_id_to_color[target]
120 |
121 | def __getitem__(self, index):
122 | """
123 | Args:
124 | index (int): Index
125 | Returns:
126 | tuple: (image, target) where target is a tuple of all target types if target_type is a list with more
127 | than one item. Otherwise target is a json object if target_type="polygon", else the image segmentation.
128 | """
129 | image = Image.open(self.images[index]).convert('RGB')
130 | # image = Image.open(self.images[index])
131 | target = Image.open(self.targets[index])
132 | if self.transform:
133 | image, target = self.transform(image, target)
134 | target = self.encode_target(target)
135 |
136 |
137 | # unloader = transforms.ToPILImage()
138 | #
139 | # plt.figure()
140 | # plt.imshow(unloader(image.cpu().clone()))
141 | # plt.show()
142 | #
143 | # plt.figure()
144 | # plt.imshow(target)
145 | # plt.show()
146 | #
147 | # plt.figure()
148 | # plt.imshow(target_true)
149 | # plt.show()
150 | #
151 | # instance, counts = np.unique(target, False, False, True)
152 | # print('target', instance, counts)
153 | # instance, counts = np.unique(target_true, False, False, True)
154 | # print('true', instance, counts)
155 | # return image
156 |
157 | return image, target
158 |
159 | def __len__(self):
160 | return len(self.images)
161 |
162 | def _load_json(self, path):
163 | with open(path, 'r') as file:
164 | data = json.load(file)
165 | return data
166 |
167 | def _get_target_suffix(self, mode, target_type):
168 | if target_type == 'instance':
169 | return '{}_instanceIds.png'.format(mode)
170 | elif target_type == 'semantic':
171 | return '{}_labelIds.png'.format(mode)
172 | elif target_type == 'color':
173 | return '{}_color.png'.format(mode)
174 | elif target_type == 'polygon':
175 | return '{}_polygons.json'.format(mode)
176 | elif target_type == 'depth':
177 | return '{}_disparity.png'.format(mode)
--------------------------------------------------------------------------------
/incremental/datasets/utils.py:
--------------------------------------------------------------------------------
1 | import os
2 | import os.path
3 | import hashlib
4 | import errno
5 | from tqdm import tqdm
6 |
7 |
8 | def gen_bar_updater(pbar):
9 | def bar_update(count, block_size, total_size):
10 | if pbar.total is None and total_size:
11 | pbar.total = total_size
12 | progress_bytes = count * block_size
13 | pbar.update(progress_bytes - pbar.n)
14 |
15 | return bar_update
16 |
17 |
18 | def check_integrity(fpath, md5=None):
19 | if md5 is None:
20 | return True
21 | if not os.path.isfile(fpath):
22 | return False
23 | md5o = hashlib.md5()
24 | with open(fpath, 'rb') as f:
25 | # read in 1MB chunks
26 | for chunk in iter(lambda: f.read(1024 * 1024), b''):
27 | md5o.update(chunk)
28 | md5c = md5o.hexdigest()
29 | if md5c != md5:
30 | return False
31 | return True
32 |
33 |
34 | def makedir_exist_ok(dirpath):
35 | """
36 | Python2 support for os.makedirs(.., exist_ok=True)
37 | """
38 | try:
39 | os.makedirs(dirpath)
40 | except OSError as e:
41 | if e.errno == errno.EEXIST:
42 | pass
43 | else:
44 | raise
45 |
46 |
47 | def download_url(url, root, filename=None, md5=None):
48 | """Download a file from a url and place it in root.
49 | Args:
50 | url (str): URL to download file from
51 | root (str): Directory to place downloaded file in
52 | filename (str): Name to save the file under. If None, use the basename of the URL
53 | md5 (str): MD5 checksum of the download. If None, do not check
54 | """
55 | from six.moves import urllib
56 |
57 | root = os.path.expanduser(root)
58 | if not filename:
59 | filename = os.path.basename(url)
60 | fpath = os.path.join(root, filename)
61 |
62 | makedir_exist_ok(root)
63 |
64 | # downloads file
65 | if os.path.isfile(fpath) and check_integrity(fpath, md5):
66 | print('Using downloaded and verified file: ' + fpath)
67 | else:
68 | try:
69 | print('Downloading ' + url + ' to ' + fpath)
70 | urllib.request.urlretrieve(
71 | url, fpath,
72 | reporthook=gen_bar_updater(tqdm(unit='B', unit_scale=True))
73 | )
74 | except OSError:
75 | if url[:5] == 'https':
76 | url = url.replace('https:', 'http:')
77 | print('Failed download. Trying https -> http instead.'
78 | ' Downloading ' + url + ' to ' + fpath)
79 | urllib.request.urlretrieve(
80 | url, fpath,
81 | reporthook=gen_bar_updater(tqdm(unit='B', unit_scale=True))
82 | )
83 |
84 |
85 | def list_dir(root, prefix=False):
86 | """List all directories at a given root
87 | Args:
88 | root (str): Path to directory whose folders need to be listed
89 | prefix (bool, optional): If true, prepends the path to each result, otherwise
90 | only returns the name of the directories found
91 | """
92 | root = os.path.expanduser(root)
93 | directories = list(
94 | filter(
95 | lambda p: os.path.isdir(os.path.join(root, p)),
96 | os.listdir(root)
97 | )
98 | )
99 |
100 | if prefix is True:
101 | directories = [os.path.join(root, d) for d in directories]
102 |
103 | return directories
104 |
105 |
106 | def list_files(root, suffix, prefix=False):
107 | """List all files ending with a suffix at a given root
108 | Args:
109 | root (str): Path to directory whose folders need to be listed
110 | suffix (str or tuple): Suffix of the files to match, e.g. '.png' or ('.jpg', '.png').
111 | It uses the Python "str.endswith" method and is passed directly
112 | prefix (bool, optional): If true, prepends the path to each result, otherwise
113 | only returns the name of the files found
114 | """
115 | root = os.path.expanduser(root)
116 | files = list(
117 | filter(
118 | lambda p: os.path.isfile(os.path.join(root, p)) and p.endswith(suffix),
119 | os.listdir(root)
120 | )
121 | )
122 |
123 | if prefix is True:
124 | files = [os.path.join(root, d) for d in files]
125 |
126 | return files
--------------------------------------------------------------------------------
/incremental/datasets/voc.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import tarfile
4 | import collections
5 | import torch.utils.data as data
6 | import shutil
7 | import numpy as np
8 |
9 | from PIL import Image
10 | from torchvision.datasets.utils import download_url, check_integrity
11 |
12 | DATASET_YEAR_DICT = {
13 | '2012': {
14 | 'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar',
15 | 'filename': 'VOCtrainval_11-May-2012.tar',
16 | 'md5': '6cd6e144f989b92b3379bac3b3de84fd',
17 | 'base_dir': 'VOCdevkit/VOC2012'
18 | },
19 | '2011': {
20 | 'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2011/VOCtrainval_25-May-2011.tar',
21 | 'filename': 'VOCtrainval_25-May-2011.tar',
22 | 'md5': '6c3384ef61512963050cb5d687e5bf1e',
23 | 'base_dir': 'TrainVal/VOCdevkit/VOC2011'
24 | },
25 | '2010': {
26 | 'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2010/VOCtrainval_03-May-2010.tar',
27 | 'filename': 'VOCtrainval_03-May-2010.tar',
28 | 'md5': 'da459979d0c395079b5c75ee67908abb',
29 | 'base_dir': 'VOCdevkit/VOC2010'
30 | },
31 | '2009': {
32 | 'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2009/VOCtrainval_11-May-2009.tar',
33 | 'filename': 'VOCtrainval_11-May-2009.tar',
34 | 'md5': '59065e4b188729180974ef6572f6a212',
35 | 'base_dir': 'VOCdevkit/VOC2009'
36 | },
37 | '2008': {
38 | 'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2008/VOCtrainval_14-Jul-2008.tar',
39 | 'filename': 'VOCtrainval_11-May-2012.tar',
40 | 'md5': '2629fa636546599198acfcfbfcf1904a',
41 | 'base_dir': 'VOCdevkit/VOC2008'
42 | },
43 | '2007': {
44 | 'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtrainval_06-Nov-2007.tar',
45 | 'filename': 'VOCtrainval_06-Nov-2007.tar',
46 | 'md5': 'c52e279531787c972589f7e41ab4ae64',
47 | 'base_dir': 'VOCdevkit/VOC2007'
48 | }
49 | }
50 |
51 |
52 | def voc_cmap(N=256, normalized=False):
53 | def bitget(byteval, idx):
54 | return ((byteval & (1 << idx)) != 0)
55 |
56 | dtype = 'float32' if normalized else 'uint8'
57 | cmap = np.zeros((N, 3), dtype=dtype)
58 | for i in range(N):
59 | r = g = b = 0
60 | c = i
61 | for j in range(8):
62 | r = r | (bitget(c, 0) << 7-j)
63 | g = g | (bitget(c, 1) << 7-j)
64 | b = b | (bitget(c, 2) << 7-j)
65 | c = c >> 3
66 |
67 | cmap[i] = np.array([r, g, b])
68 |
69 | cmap = cmap/255 if normalized else cmap
70 | return cmap
71 |
72 | class VOCSegmentation(data.Dataset):
73 | """`Pascal VOC `_ Segmentation Dataset.
74 | Args:
75 | root (string): Root directory of the VOC Dataset.
76 | year (string, optional): The dataset year, supports years 2007 to 2012.
77 | image_set (string, optional): Select the image_set to use, ``train``, ``trainval`` or ``val``
78 | download (bool, optional): If true, downloads the dataset from the internet and
79 | puts it in root directory. If dataset is already downloaded, it is not
80 | downloaded again.
81 | transform (callable, optional): A function/transform that takes in an PIL image
82 | and returns a transformed version. E.g, ``transforms.RandomCrop``
83 | """
84 | cmap = voc_cmap()
85 | def __init__(self,
86 | root,
87 | year='2012',
88 | image_set='train',
89 | download=False,
90 | transform=None):
91 |
92 | is_aug=False
93 | if year=='2012_aug':
94 | is_aug = True
95 | year = '2012'
96 |
97 | self.root = os.path.expanduser(root)
98 | self.year = year
99 | self.url = DATASET_YEAR_DICT[year]['url']
100 | self.filename = DATASET_YEAR_DICT[year]['filename']
101 | self.md5 = DATASET_YEAR_DICT[year]['md5']
102 | self.transform = transform
103 |
104 | self.image_set = image_set
105 | base_dir = DATASET_YEAR_DICT[year]['base_dir']
106 | voc_root = os.path.join(self.root, base_dir)
107 | image_dir = os.path.join(voc_root, 'JPEGImages')
108 |
109 | if download:
110 | download_extract(self.url, self.root, self.filename, self.md5)
111 |
112 | if not os.path.isdir(voc_root):
113 | raise RuntimeError('Dataset not found or corrupted.' +
114 | ' You can use download=True to download it')
115 |
116 | if is_aug and image_set=='train':
117 | mask_dir = os.path.join(voc_root, 'SegmentationClassAug')
118 | assert os.path.exists(mask_dir), "SegmentationClassAug not found, please refer to README.md and prepare it manually"
119 | split_f = os.path.join( self.root, 'train_aug.txt')#'./datasets/data/train_aug.txt'
120 | else:
121 | mask_dir = os.path.join(voc_root, 'SegmentationClass')
122 | splits_dir = os.path.join(voc_root, 'ImageSets/Segmentation')
123 | split_f = os.path.join(splits_dir, image_set.rstrip('\n') + '.txt')
124 |
125 | if not os.path.exists(split_f):
126 | raise ValueError(
127 | 'Wrong image_set entered! Please use image_set="train" '
128 | 'or image_set="trainval" or image_set="val"')
129 |
130 | with open(os.path.join(split_f), "r") as f:
131 | file_names = [x.strip() for x in f.readlines()]
132 |
133 | self.images = [os.path.join(image_dir, x + ".jpg") for x in file_names]
134 | self.masks = [os.path.join(mask_dir, x + ".png") for x in file_names]
135 | assert (len(self.images) == len(self.masks))
136 |
137 | def __getitem__(self, index):
138 | """
139 | Args:
140 | index (int): Index
141 | Returns:
142 | tuple: (image, target) where target is the image segmentation.
143 | """
144 | img = Image.open(self.images[index]).convert('RGB')
145 | target = Image.open(self.masks[index])
146 | if self.transform is not None:
147 | img, target = self.transform(img, target)
148 |
149 | return img, target
150 |
151 |
152 | def __len__(self):
153 | return len(self.images)
154 |
155 | @classmethod
156 | def decode_target(cls, mask):
157 | """decode semantic mask to RGB image"""
158 | return cls.cmap[mask]
159 |
160 | def download_extract(url, root, filename, md5):
161 | download_url(url, root, filename, md5)
162 | with tarfile.open(os.path.join(root, filename), "r") as tar:
163 | tar.extractall(path=root)
--------------------------------------------------------------------------------
/incremental/metrics/__init__.py:
--------------------------------------------------------------------------------
1 | from .stream_metrics import StreamSegMetrics, AverageMeter
2 |
3 |
--------------------------------------------------------------------------------
/incremental/metrics/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/metrics/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/incremental/metrics/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/metrics/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/incremental/metrics/__pycache__/__init__.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/metrics/__pycache__/__init__.cpython-38.pyc
--------------------------------------------------------------------------------
/incremental/metrics/__pycache__/stream_metrics.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/metrics/__pycache__/stream_metrics.cpython-36.pyc
--------------------------------------------------------------------------------
/incremental/metrics/__pycache__/stream_metrics.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/metrics/__pycache__/stream_metrics.cpython-37.pyc
--------------------------------------------------------------------------------
/incremental/metrics/__pycache__/stream_metrics.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/metrics/__pycache__/stream_metrics.cpython-38.pyc
--------------------------------------------------------------------------------
/incremental/metrics/stream_metrics.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from sklearn.metrics import confusion_matrix
3 |
4 | class _StreamMetrics(object):
5 | def __init__(self):
6 | """ Overridden by subclasses """
7 | raise NotImplementedError()
8 |
9 | def update(self, gt, pred):
10 | """ Overridden by subclasses """
11 | raise NotImplementedError()
12 |
13 | def get_results(self):
14 | """ Overridden by subclasses """
15 | raise NotImplementedError()
16 |
17 | def to_str(self, metrics):
18 | """ Overridden by subclasses """
19 | raise NotImplementedError()
20 |
21 | def reset(self):
22 | """ Overridden by subclasses """
23 | raise NotImplementedError()
24 |
25 | class StreamSegMetrics(_StreamMetrics):
26 | """
27 | Stream Metrics for Semantic Segmentation Task
28 | """
29 | def __init__(self, n_classes, known_class=None):
30 | self.n_classes = 19
31 | self.confusion_matrix = np.zeros((n_classes, n_classes))
32 | self.known_class = known_class
33 | def update(self, label_trues, label_preds):
34 | for lt, lp in zip(label_trues, label_preds):
35 | self.confusion_matrix += self._fast_hist( lt.flatten(), lp.flatten() )
36 |
37 | @staticmethod
38 | def to_str(results):
39 | string = "\n"
40 | for k, v in results.items():
41 | if k!="Class IoU":
42 | string += "%s: %f\n"%(k, v)
43 |
44 | #string+='Class IoU:\n'
45 | #for k, v in results['Class IoU'].items():
46 | # string += "\tclass %d: %f\n"%(k, v)
47 | return string
48 |
49 | def _fast_hist(self, label_true, label_pred):
50 | mask = (label_true >= 0) & (label_true < self.n_classes)
51 | hist = np.bincount(
52 | self.n_classes * label_true[mask].astype(int) + label_pred[mask],
53 | minlength=self.n_classes ** 2,
54 | ).reshape(self.n_classes, self.n_classes)
55 | return hist
56 |
57 | def get_results(self):
58 | """Returns accuracy score evaluation result.
59 | - overall accuracy
60 | - mean accuracy
61 | - mean IU
62 | - fwavacc
63 | """
64 | hist = self.confusion_matrix
65 | acc = np.diag(hist).sum() / hist.sum()
66 | acc_cls = np.diag(hist) / hist.sum(axis=1)
67 | acc_cls = np.nanmean(acc_cls)
68 | iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))
69 | mean_iu = np.nanmean(iu)
70 | print(iu)
71 | freq = hist.sum(axis=1) / hist.sum()
72 | fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()
73 | cls_iu = dict(zip(range(self.n_classes), iu))
74 | if (self.known_class == None):
75 | return {
76 | "Overall Acc": acc,
77 | "Mean Acc": acc_cls,
78 | "FreqW Acc": fwavacc,
79 | "Mean IoU": mean_iu,
80 | "Class IoU": cls_iu,
81 | }
82 | else:
83 | known_iu = iu[0:self.known_class]
84 | unknown_iu = iu[self.known_class:]
85 | known_mean_iu = np.nanmean(known_iu)
86 | unknown_mean_iu =np.nanmean(unknown_iu)
87 | return {
88 | "Overall Acc": acc,
89 | "Mean Acc": acc_cls,
90 | "FreqW Acc": fwavacc,
91 | "Mean IoU": mean_iu,
92 | "Class IoU": cls_iu,
93 | "Known IoU": known_mean_iu,
94 | "Unknown IoU": unknown_mean_iu,
95 | }
96 |
97 | def reset(self):
98 | self.confusion_matrix = np.zeros((self.n_classes, self.n_classes))
99 |
100 | class AverageMeter(object):
101 | """Computes average values"""
102 | def __init__(self):
103 | self.book = dict()
104 |
105 | def reset_all(self):
106 | self.book.clear()
107 |
108 | def reset(self, id):
109 | item = self.book.get(id, None)
110 | if item is not None:
111 | item[0] = 0
112 | item[1] = 0
113 |
114 | def update(self, id, val):
115 | record = self.book.get(id, None)
116 | if record is None:
117 | self.book[id] = [val, 1]
118 | else:
119 | record[0]+=val
120 | record[1]+=1
121 |
122 | def get_results(self, id):
123 | record = self.book.get(id, None)
124 | assert record is not None
125 | return record[0] / record[1]
126 |
--------------------------------------------------------------------------------
/incremental/network/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/network/.DS_Store
--------------------------------------------------------------------------------
/incremental/network/.ipynb_checkpoints/_deeplab-checkpoint.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import nn
3 | from torch.nn import functional as F
4 |
5 | from .utils import _SimpleSegmentationModel, _SimpleSegmentationModel_embedding, _SimpleSegmentationModel_embedding_self_distillation,_SimpleSegmentationModel_Metric
6 |
7 |
8 | __all__ = ["DeepLabV3"]
9 |
10 |
11 | class DeepLabV3(_SimpleSegmentationModel):
12 | """
13 | Implements DeepLabV3 model from
14 | `"Rethinking Atrous Convolution for Semantic Image Segmentation"
15 | `_.
16 |
17 | Arguments:
18 | backbone (nn.Module): the network used to compute the features for the model.
19 | The backbone should return an OrderedDict[Tensor], with the key being
20 | "out" for the last feature map used, and "aux" if an auxiliary classifier
21 | is used.
22 | classifier (nn.Module): module that takes the "out" element returned from
23 | the backbone and returns a dense prediction.
24 | aux_classifier (nn.Module, optional): auxiliary classifier used during training
25 | """
26 | pass
27 |
28 | class DeepLabV3_metric(_SimpleSegmentationModel_Metric):
29 | pass
30 |
31 | class DeepLabV3_embedding(_SimpleSegmentationModel_embedding):
32 | """
33 | Implements DeepLabV3 model from
34 | `"Rethinking Atrous Convolution for Semantic Image Segmentation"
35 | `_.
36 |
37 | Arguments:
38 | backbone (nn.Module): the network used to compute the features for the model.
39 | The backbone should return an OrderedDict[Tensor], with the key being
40 | "out" for the last feature map used, and "aux" if an auxiliary classifier
41 | is used.
42 | classifier (nn.Module): module that takes the "out" element returned from
43 | the backbone and returns a dense prediction.
44 | aux_classifier (nn.Module, optional): auxiliary classifier used during training
45 | """
46 | pass
47 |
48 | class DeepLabV3_embedding_self_distillation(_SimpleSegmentationModel_embedding_self_distillation):
49 | """
50 | Implements DeepLabV3 model from
51 | `"Rethinking Atrous Convolution for Semantic Image Segmentation"
52 | `_.
53 |
54 | Arguments:
55 | backbone (nn.Module): the network used to compute the features for the model.
56 | The backbone should return an OrderedDict[Tensor], with the key being
57 | "out" for the last feature map used, and "aux" if an auxiliary classifier
58 | is used.
59 | classifier (nn.Module): module that takes the "out" element returned from
60 | the backbone and returns a dense prediction.
61 | aux_classifier (nn.Module, optional): auxiliary classifier used during training
62 | """
63 | pass
64 |
65 |
66 | # class DeepLabHeadV3Plus(nn.Module):
67 | # def __init__(self, in_channels, low_level_channels, num_classes, aspp_dilate=[12, 24, 36]):
68 | # super(DeepLabHeadV3Plus, self).__init__()
69 | # self.project = nn.Sequential(
70 | # nn.Conv2d(low_level_channels, 48, 1, bias=False),
71 | # nn.BatchNorm2d(48),
72 | # nn.ReLU(inplace=True),
73 | # )
74 | #
75 | # self.aspp = ASPP(in_channels, aspp_dilate)
76 | #
77 | # self.classifier = nn.Sequential(
78 | # nn.Conv2d(304, 256, 3, padding=1, bias=False),
79 | # nn.BatchNorm2d(256),
80 | # nn.ReLU(inplace=True),
81 | # nn.Conv2d(256, num_classes, 1)
82 | # )
83 | # self._init_weight()
84 | #
85 | # def forward(self, feature):
86 | # low_level_feature = self.project(feature['low_level'])
87 | # output_feature = self.aspp(feature['out'])
88 | # output_feature = F.interpolate(output_feature, size=low_level_feature.shape[2:], mode='bilinear',
89 | # align_corners=False)
90 | # return self.classifier(torch.cat([low_level_feature, output_feature], dim=1))
91 | #
92 | # def _init_weight(self):
93 | # for m in self.modules():
94 | # if isinstance(m, nn.Conv2d):
95 | # nn.init.kaiming_normal_(m.weight)
96 | # elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
97 | # nn.init.constant_(m.weight, 1)
98 | # nn.init.constant_(m.bias, 0)
99 |
100 | # class DeepLabHead(nn.Module):
101 | # def __init__(self, in_channels, num_classes, aspp_dilate=[12, 24, 36]):
102 | # super(DeepLabHead, self).__init__()
103 | #
104 | # self.classifier = nn.Sequential(
105 | # ASPP(in_channels, aspp_dilate),
106 | # nn.Conv2d(256, 256, 3, padding=1, bias=False),
107 | # nn.BatchNorm2d(256),
108 | # nn.ReLU(inplace=True),
109 | # nn.Conv2d(256, num_classes, 1)
110 | # )
111 | # self._init_weight()
112 | #
113 | # def forward(self, feature):
114 | # return self.classifier( feature['out'] )
115 | #
116 | # def _init_weight(self):
117 | # for m in self.modules():
118 | # if isinstance(m, nn.Conv2d):
119 | # nn.init.kaiming_normal_(m.weight)
120 | # elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
121 | # nn.init.constant_(m.weight, 1)
122 | # nn.init.constant_(m.bias, 0)
123 | #
124 | # class AtrousSeparableConvolution(nn.Module):
125 | # """ Atrous Separable Convolution
126 | # """
127 | # def __init__(self, in_channels, out_channels, kernel_size,
128 | # stride=1, padding=0, dilation=1, bias=True):
129 | # super(AtrousSeparableConvolution, self).__init__()
130 | # self.body = nn.Sequential(
131 | # # Separable Conv
132 | # nn.Conv2d( in_channels, in_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias, groups=in_channels ),
133 | # # PointWise Conv
134 | # nn.Conv2d( in_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=bias),
135 | # )
136 | #
137 | # self._init_weight()
138 | #
139 | # def forward(self, x):
140 | # return self.body(x)
141 | #
142 | # def _init_weight(self):
143 | # for m in self.modules():
144 | # if isinstance(m, nn.Conv2d):
145 | # nn.init.kaiming_normal_(m.weight)
146 | # elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
147 | # nn.init.constant_(m.weight, 1)
148 | # nn.init.constant_(m.bias, 0)
149 | #
150 | # class ASPPConv(nn.Sequential):
151 | # def __init__(self, in_channels, out_channels, dilation):
152 | # modules = [
153 | # nn.Conv2d(in_channels, out_channels, 3, padding=dilation, dilation=dilation, bias=False),
154 | # nn.BatchNorm2d(out_channels),
155 | # nn.ReLU(inplace=True)
156 | # ]
157 | # super(ASPPConv, self).__init__(*modules)
158 | #
159 | # class ASPPPooling(nn.Sequential):
160 | # def __init__(self, in_channels, out_channels):
161 | # super(ASPPPooling, self).__init__(
162 | # nn.AdaptiveAvgPool2d(1),
163 | # nn.Conv2d(in_channels, out_channels, 1, bias=False),
164 | # nn.BatchNorm2d(out_channels),
165 | # nn.ReLU(inplace=True))
166 | #
167 | # def forward(self, x):
168 | # size = x.shape[-2:]
169 | # x = super(ASPPPooling, self).forward(x)
170 | # return F.interpolate(x, size=size, mode='bilinear', align_corners=False)
171 | #
172 | # class ASPP(nn.Module):
173 | # def __init__(self, in_channels, atrous_rates):
174 | # super(ASPP, self).__init__()
175 | # out_channels = 256
176 | # modules = []
177 | # modules.append(nn.Sequential(
178 | # nn.Conv2d(in_channels, out_channels, 1, bias=False),
179 | # nn.BatchNorm2d(out_channels),
180 | # nn.ReLU(inplace=True)))
181 | #
182 | # rate1, rate2, rate3 = tuple(atrous_rates)
183 | # modules.append(ASPPConv(in_channels, out_channels, rate1))
184 | # modules.append(ASPPConv(in_channels, out_channels, rate2))
185 | # modules.append(ASPPConv(in_channels, out_channels, rate3))
186 | # modules.append(ASPPPooling(in_channels, out_channels))
187 | #
188 | # self.convs = nn.ModuleList(modules)
189 | #
190 | # self.project = nn.Sequential(
191 | # nn.Conv2d(5 * out_channels, out_channels, 1, bias=False),
192 | # nn.BatchNorm2d(out_channels),
193 | # nn.ReLU(inplace=True),
194 | # nn.Dropout(0.1),)
195 | #
196 | # def forward(self, x):
197 | # res = []
198 | # for conv in self.convs:
199 | # res.append(conv(x))
200 | # res = torch.cat(res, dim=1)
201 | # return self.project(res)
202 | #
203 | #
204 | #
205 | # def convert_to_separable_conv(module):
206 | # new_module = module
207 | # if isinstance(module, nn.Conv2d) and module.kernel_size[0]>1:
208 | # new_module = AtrousSeparableConvolution(module.in_channels,
209 | # module.out_channels,
210 | # module.kernel_size,
211 | # module.stride,
212 | # module.padding,
213 | # module.dilation,
214 | # module.bias)
215 | # for name, child in module.named_children():
216 | # new_module.add_module(name, convert_to_separable_conv(child))
217 | # return new_module
--------------------------------------------------------------------------------
/incremental/network/.ipynb_checkpoints/modeling-checkpoint.py:
--------------------------------------------------------------------------------
1 | from PIL.Image import NONE
2 | from .utils import IntermediateLayerGetter, DeepLabHeadV3Plus, DeepLabHead, DeepLabHeadV3Plus_Metric
3 | from ._deeplab import DeepLabV3, DeepLabV3_embedding, DeepLabV3_embedding_self_distillation, DeepLabV3_metric
4 | from .backbone import resnet
5 | from .backbone import mobilenetv2
6 |
7 | def _segm_resnet(name, backbone_name, num_classes, output_stride, pretrained_backbone, metric_dim=None, finetune=False):
8 |
9 | if output_stride==8:
10 | replace_stride_with_dilation=[False, True, True]
11 | aspp_dilate = [12, 24, 36]
12 | else:
13 | replace_stride_with_dilation=[False, False, True]
14 | aspp_dilate = [6, 12, 18]
15 |
16 | backbone = resnet.__dict__[backbone_name](
17 | pretrained=pretrained_backbone,
18 | replace_stride_with_dilation=replace_stride_with_dilation)
19 |
20 | inplanes = 2048
21 | low_level_planes = 256
22 |
23 | if name=='deeplabv3plus':
24 | return_layers = {'layer4': 'out', 'layer1': 'low_level'}
25 | classifier = DeepLabHeadV3Plus(inplanes, low_level_planes, num_classes, aspp_dilate)
26 | elif name=='deeplabv3':
27 | return_layers = {'layer4': 'out'}
28 | classifier = DeepLabHead(inplanes , num_classes, aspp_dilate)
29 | elif name=='deeplabv3plus_embedding':
30 | return_layers = {'layer4': 'out', 'layer1': 'low_level'}
31 | classifier = DeepLabHeadV3Plus(inplanes, low_level_planes, num_classes, aspp_dilate)
32 | elif name=='deeplabv3plus_embedding_self_distillation':
33 | return_layers = {'layer4': 'out', 'layer1': 'low_level'}
34 | elif name=='deeplabv3plus_metirc_resnet101':
35 | return_layers= {'layer4': 'out', 'layer1': 'low_level'}
36 | classifier = DeepLabHeadV3Plus_Metric(inplanes, low_level_planes, num_classes, aspp_dilate,finetune)
37 |
38 | backbone = IntermediateLayerGetter(backbone, return_layers=return_layers)
39 |
40 |
41 | if name=='deeplabv3plus_embedding':
42 | model = DeepLabV3_embedding(backbone, classifier)
43 | elif name== 'deeplabv3plus_embedding_self_distillation':
44 | model = DeepLabV3_embedding_self_distillation(backbone)
45 | elif name== 'deeplabv3plus_metirc_resnet101':
46 | model = DeepLabV3_metric(backbone,classifier,finetune)
47 | else:
48 | model = DeepLabV3(backbone, classifier)
49 | return model
50 |
51 | def _segm_mobilenet(name, backbone_name, num_classes, output_stride, pretrained_backbone):
52 | if output_stride==8:
53 | aspp_dilate = [12, 24, 36]
54 | else:
55 | aspp_dilate = [6, 12, 18]
56 |
57 | backbone = mobilenetv2.mobilenet_v2(pretrained=pretrained_backbone, output_stride=output_stride)
58 |
59 | # rename layers
60 | backbone.low_level_features = backbone.features[0:4]
61 | backbone.high_level_features = backbone.features[4:-1]
62 | backbone.features = None
63 | backbone.classifier = None
64 |
65 | inplanes = 320
66 | low_level_planes = 24
67 |
68 | if name=='deeplabv3plus':
69 | return_layers = {'high_level_features': 'out', 'low_level_features': 'low_level'}
70 | classifier = DeepLabHeadV3Plus(inplanes, low_level_planes, num_classes, aspp_dilate)
71 | elif name=='deeplabv3':
72 | return_layers = {'high_level_features': 'out'}
73 | classifier = DeepLabHead(inplanes , num_classes, aspp_dilate)
74 | backbone = IntermediateLayerGetter(backbone, return_layers=return_layers)
75 |
76 | model = DeepLabV3(backbone, classifier)
77 | return model
78 |
79 | def _load_model(arch_type, backbone, num_classes, output_stride, pretrained_backbone, metric_dim=None, finetune=False):
80 |
81 | if backbone=='mobilenetv2':
82 | model = _segm_mobilenet(arch_type, backbone, num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)
83 | elif backbone.startswith('resnet'):
84 | model = _segm_resnet(arch_type, backbone, num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone, metric_dim=metric_dim, finetune=finetune)
85 | else:
86 | raise NotImplementedError
87 | return model
88 |
89 |
90 | # Deeplab v3
91 |
92 | def deeplabv3_resnet50(num_classes=21, output_stride=8, pretrained_backbone=True):
93 | """Constructs a DeepLabV3 model with a ResNet-50 backbone.
94 |
95 | Args:
96 | num_classes (int): number of classes.
97 | output_stride (int): output stride for deeplab.
98 | pretrained_backbone (bool): If True, use the pretrained backbone.
99 | """
100 | return _load_model('deeplabv3', 'resnet50', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)
101 |
102 | def deeplabv3_resnet101(num_classes=21, output_stride=8, pretrained_backbone=True):
103 | """Constructs a DeepLabV3 model with a ResNet-101 backbone.
104 |
105 | Args:
106 | num_classes (int): number of classes.
107 | output_stride (int): output stride for deeplab.
108 | pretrained_backbone (bool): If True, use the pretrained backbone.
109 | """
110 | return _load_model('deeplabv3', 'resnet101', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)
111 |
112 | def deeplabv3_mobilenet(num_classes=21, output_stride=8, pretrained_backbone=True, **kwargs):
113 | """Constructs a DeepLabV3 model with a MobileNetv2 backbone.
114 |
115 | Args:
116 | num_classes (int): number of classes.
117 | output_stride (int): output stride for deeplab.
118 | pretrained_backbone (bool): If True, use the pretrained backbone.
119 | """
120 | return _load_model('deeplabv3', 'mobilenetv2', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)
121 |
122 |
123 | # Deeplab v3+
124 |
125 | def deeplabv3plus_resnet50(num_classes=21, output_stride=8, pretrained_backbone=True):
126 | """Constructs a DeepLabV3 model with a ResNet-50 backbone.
127 |
128 | Args:
129 | num_classes (int): number of classes.
130 | output_stride (int): output stride for deeplab.
131 | pretrained_backbone (bool): If True, use the pretrained backbone.
132 | """
133 | return _load_model('deeplabv3plus', 'resnet50', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)
134 |
135 |
136 | def deeplabv3plus_resnet101(num_classes=21, output_stride=8, pretrained_backbone=True):
137 | """Constructs a DeepLabV3+ model with a ResNet-101 backbone.
138 |
139 | Args:
140 | num_classes (int): number of classes.
141 | output_stride (int): output stride for deeplab.
142 | pretrained_backbone (bool): If True, use the pretrained backbone.
143 | """
144 | return _load_model('deeplabv3plus', 'resnet101', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)
145 |
146 | def deeplabv3plus_embedding_resnet101(num_classes=21, output_stride=8, pretrained_backbone=True):
147 | """Constructs a DeepLabV3+ model with a ResNet-101 backbone.
148 |
149 | Args:
150 | num_classes (int): number of classes.
151 | output_stride (int): output stride for deeplab.
152 | pretrained_backbone (bool): If True, use the pretrained backbone.
153 | """
154 | return _load_model('deeplabv3plus_embedding', 'resnet101', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)
155 |
156 | def deeplabv3plus_embedding_self_distillation_resnet101(num_classes=21, output_stride=8, pretrained_backbone=True):
157 | """Constructs a DeepLabV3+ model with a ResNet-101 backbone.
158 |
159 | Args:
160 | num_classes (int): number of classes.
161 | output_stride (int): output stride for deeplab.
162 | pretrained_backbone (bool): If True, use the pretrained backbone.
163 | """
164 | return _load_model('deeplabv3plus_embedding_self_distillation', 'resnet101', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)
165 |
166 |
167 |
168 | def deeplabv3plus_mobilenet(num_classes=21, output_stride=8, pretrained_backbone=True):
169 | """Constructs a DeepLabV3+ model with a MobileNetv2 backbone.
170 |
171 | Args:
172 | num_classes (int): number of classes.
173 | output_stride (int): output stride for deeplab.
174 | pretrained_backbone (bool): If True, use the pretrained backbone.
175 | """
176 | return _load_model('deeplabv3plus', 'mobilenetv2', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)
177 |
178 | def deeplabv3plus_metirc_resnet101(num_classes=21, metric_dim=64, output_stride=8, pretrained_backbone=True, finetune=False):
179 |
180 | return _load_model('deeplabv3plus_metirc_resnet101', 'resnet101', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone, metric_dim=metric_dim, finetune=finetune)
--------------------------------------------------------------------------------
/incremental/network/__init__.py:
--------------------------------------------------------------------------------
1 | from .modeling import *
2 | from .utils import convert_to_separable_conv
--------------------------------------------------------------------------------
/incremental/network/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/network/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/incremental/network/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/network/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/incremental/network/__pycache__/__init__.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/network/__pycache__/__init__.cpython-38.pyc
--------------------------------------------------------------------------------
/incremental/network/__pycache__/_deeplab.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/network/__pycache__/_deeplab.cpython-36.pyc
--------------------------------------------------------------------------------
/incremental/network/__pycache__/_deeplab.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/network/__pycache__/_deeplab.cpython-37.pyc
--------------------------------------------------------------------------------
/incremental/network/__pycache__/_deeplab.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/network/__pycache__/_deeplab.cpython-38.pyc
--------------------------------------------------------------------------------
/incremental/network/__pycache__/modeling.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/network/__pycache__/modeling.cpython-36.pyc
--------------------------------------------------------------------------------
/incremental/network/__pycache__/modeling.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/network/__pycache__/modeling.cpython-37.pyc
--------------------------------------------------------------------------------
/incremental/network/__pycache__/modeling.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/network/__pycache__/modeling.cpython-38.pyc
--------------------------------------------------------------------------------
/incremental/network/__pycache__/utils.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/network/__pycache__/utils.cpython-36.pyc
--------------------------------------------------------------------------------
/incremental/network/__pycache__/utils.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/network/__pycache__/utils.cpython-37.pyc
--------------------------------------------------------------------------------
/incremental/network/__pycache__/utils.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/network/__pycache__/utils.cpython-38.pyc
--------------------------------------------------------------------------------
/incremental/network/_deeplab.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import nn
3 | from torch.nn import functional as F
4 |
5 | from .utils import _SimpleSegmentationModel, _SimpleSegmentationModel_embedding, _SimpleSegmentationModel_embedding_self_distillation,_SimpleSegmentationModel_Metric
6 |
7 |
8 | __all__ = ["DeepLabV3"]
9 |
10 |
11 | class DeepLabV3(_SimpleSegmentationModel):
12 | """
13 | Implements DeepLabV3 model from
14 | `"Rethinking Atrous Convolution for Semantic Image Segmentation"
15 | `_.
16 |
17 | Arguments:
18 | backbone (nn.Module): the network used to compute the features for the model.
19 | The backbone should return an OrderedDict[Tensor], with the key being
20 | "out" for the last feature map used, and "aux" if an auxiliary classifier
21 | is used.
22 | classifier (nn.Module): module that takes the "out" element returned from
23 | the backbone and returns a dense prediction.
24 | aux_classifier (nn.Module, optional): auxiliary classifier used during training
25 | """
26 | pass
27 |
28 | class DeepLabV3_metric(_SimpleSegmentationModel_Metric):
29 | pass
30 |
31 | class DeepLabV3_embedding(_SimpleSegmentationModel_embedding):
32 | """
33 | Implements DeepLabV3 model from
34 | `"Rethinking Atrous Convolution for Semantic Image Segmentation"
35 | `_.
36 |
37 | Arguments:
38 | backbone (nn.Module): the network used to compute the features for the model.
39 | The backbone should return an OrderedDict[Tensor], with the key being
40 | "out" for the last feature map used, and "aux" if an auxiliary classifier
41 | is used.
42 | classifier (nn.Module): module that takes the "out" element returned from
43 | the backbone and returns a dense prediction.
44 | aux_classifier (nn.Module, optional): auxiliary classifier used during training
45 | """
46 | pass
47 |
48 | class DeepLabV3_embedding_self_distillation(_SimpleSegmentationModel_embedding_self_distillation):
49 | """
50 | Implements DeepLabV3 model from
51 | `"Rethinking Atrous Convolution for Semantic Image Segmentation"
52 | `_.
53 |
54 | Arguments:
55 | backbone (nn.Module): the network used to compute the features for the model.
56 | The backbone should return an OrderedDict[Tensor], with the key being
57 | "out" for the last feature map used, and "aux" if an auxiliary classifier
58 | is used.
59 | classifier (nn.Module): module that takes the "out" element returned from
60 | the backbone and returns a dense prediction.
61 | aux_classifier (nn.Module, optional): auxiliary classifier used during training
62 | """
63 | pass
64 |
65 |
66 | # class DeepLabHeadV3Plus(nn.Module):
67 | # def __init__(self, in_channels, low_level_channels, num_classes, aspp_dilate=[12, 24, 36]):
68 | # super(DeepLabHeadV3Plus, self).__init__()
69 | # self.project = nn.Sequential(
70 | # nn.Conv2d(low_level_channels, 48, 1, bias=False),
71 | # nn.BatchNorm2d(48),
72 | # nn.ReLU(inplace=True),
73 | # )
74 | #
75 | # self.aspp = ASPP(in_channels, aspp_dilate)
76 | #
77 | # self.classifier = nn.Sequential(
78 | # nn.Conv2d(304, 256, 3, padding=1, bias=False),
79 | # nn.BatchNorm2d(256),
80 | # nn.ReLU(inplace=True),
81 | # nn.Conv2d(256, num_classes, 1)
82 | # )
83 | # self._init_weight()
84 | #
85 | # def forward(self, feature):
86 | # low_level_feature = self.project(feature['low_level'])
87 | # output_feature = self.aspp(feature['out'])
88 | # output_feature = F.interpolate(output_feature, size=low_level_feature.shape[2:], mode='bilinear',
89 | # align_corners=False)
90 | # return self.classifier(torch.cat([low_level_feature, output_feature], dim=1))
91 | #
92 | # def _init_weight(self):
93 | # for m in self.modules():
94 | # if isinstance(m, nn.Conv2d):
95 | # nn.init.kaiming_normal_(m.weight)
96 | # elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
97 | # nn.init.constant_(m.weight, 1)
98 | # nn.init.constant_(m.bias, 0)
99 |
100 | # class DeepLabHead(nn.Module):
101 | # def __init__(self, in_channels, num_classes, aspp_dilate=[12, 24, 36]):
102 | # super(DeepLabHead, self).__init__()
103 | #
104 | # self.classifier = nn.Sequential(
105 | # ASPP(in_channels, aspp_dilate),
106 | # nn.Conv2d(256, 256, 3, padding=1, bias=False),
107 | # nn.BatchNorm2d(256),
108 | # nn.ReLU(inplace=True),
109 | # nn.Conv2d(256, num_classes, 1)
110 | # )
111 | # self._init_weight()
112 | #
113 | # def forward(self, feature):
114 | # return self.classifier( feature['out'] )
115 | #
116 | # def _init_weight(self):
117 | # for m in self.modules():
118 | # if isinstance(m, nn.Conv2d):
119 | # nn.init.kaiming_normal_(m.weight)
120 | # elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
121 | # nn.init.constant_(m.weight, 1)
122 | # nn.init.constant_(m.bias, 0)
123 | #
124 | # class AtrousSeparableConvolution(nn.Module):
125 | # """ Atrous Separable Convolution
126 | # """
127 | # def __init__(self, in_channels, out_channels, kernel_size,
128 | # stride=1, padding=0, dilation=1, bias=True):
129 | # super(AtrousSeparableConvolution, self).__init__()
130 | # self.body = nn.Sequential(
131 | # # Separable Conv
132 | # nn.Conv2d( in_channels, in_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias, groups=in_channels ),
133 | # # PointWise Conv
134 | # nn.Conv2d( in_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=bias),
135 | # )
136 | #
137 | # self._init_weight()
138 | #
139 | # def forward(self, x):
140 | # return self.body(x)
141 | #
142 | # def _init_weight(self):
143 | # for m in self.modules():
144 | # if isinstance(m, nn.Conv2d):
145 | # nn.init.kaiming_normal_(m.weight)
146 | # elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
147 | # nn.init.constant_(m.weight, 1)
148 | # nn.init.constant_(m.bias, 0)
149 | #
150 | # class ASPPConv(nn.Sequential):
151 | # def __init__(self, in_channels, out_channels, dilation):
152 | # modules = [
153 | # nn.Conv2d(in_channels, out_channels, 3, padding=dilation, dilation=dilation, bias=False),
154 | # nn.BatchNorm2d(out_channels),
155 | # nn.ReLU(inplace=True)
156 | # ]
157 | # super(ASPPConv, self).__init__(*modules)
158 | #
159 | # class ASPPPooling(nn.Sequential):
160 | # def __init__(self, in_channels, out_channels):
161 | # super(ASPPPooling, self).__init__(
162 | # nn.AdaptiveAvgPool2d(1),
163 | # nn.Conv2d(in_channels, out_channels, 1, bias=False),
164 | # nn.BatchNorm2d(out_channels),
165 | # nn.ReLU(inplace=True))
166 | #
167 | # def forward(self, x):
168 | # size = x.shape[-2:]
169 | # x = super(ASPPPooling, self).forward(x)
170 | # return F.interpolate(x, size=size, mode='bilinear', align_corners=False)
171 | #
172 | # class ASPP(nn.Module):
173 | # def __init__(self, in_channels, atrous_rates):
174 | # super(ASPP, self).__init__()
175 | # out_channels = 256
176 | # modules = []
177 | # modules.append(nn.Sequential(
178 | # nn.Conv2d(in_channels, out_channels, 1, bias=False),
179 | # nn.BatchNorm2d(out_channels),
180 | # nn.ReLU(inplace=True)))
181 | #
182 | # rate1, rate2, rate3 = tuple(atrous_rates)
183 | # modules.append(ASPPConv(in_channels, out_channels, rate1))
184 | # modules.append(ASPPConv(in_channels, out_channels, rate2))
185 | # modules.append(ASPPConv(in_channels, out_channels, rate3))
186 | # modules.append(ASPPPooling(in_channels, out_channels))
187 | #
188 | # self.convs = nn.ModuleList(modules)
189 | #
190 | # self.project = nn.Sequential(
191 | # nn.Conv2d(5 * out_channels, out_channels, 1, bias=False),
192 | # nn.BatchNorm2d(out_channels),
193 | # nn.ReLU(inplace=True),
194 | # nn.Dropout(0.1),)
195 | #
196 | # def forward(self, x):
197 | # res = []
198 | # for conv in self.convs:
199 | # res.append(conv(x))
200 | # res = torch.cat(res, dim=1)
201 | # return self.project(res)
202 | #
203 | #
204 | #
205 | # def convert_to_separable_conv(module):
206 | # new_module = module
207 | # if isinstance(module, nn.Conv2d) and module.kernel_size[0]>1:
208 | # new_module = AtrousSeparableConvolution(module.in_channels,
209 | # module.out_channels,
210 | # module.kernel_size,
211 | # module.stride,
212 | # module.padding,
213 | # module.dilation,
214 | # module.bias)
215 | # for name, child in module.named_children():
216 | # new_module.add_module(name, convert_to_separable_conv(child))
217 | # return new_module
--------------------------------------------------------------------------------
/incremental/network/backbone/__init__.py:
--------------------------------------------------------------------------------
1 | from . import resnet
2 | from . import mobilenetv2
3 |
--------------------------------------------------------------------------------
/incremental/network/backbone/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/network/backbone/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/incremental/network/backbone/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/network/backbone/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/incremental/network/backbone/__pycache__/__init__.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/network/backbone/__pycache__/__init__.cpython-38.pyc
--------------------------------------------------------------------------------
/incremental/network/backbone/__pycache__/mobilenetv2.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/network/backbone/__pycache__/mobilenetv2.cpython-36.pyc
--------------------------------------------------------------------------------
/incremental/network/backbone/__pycache__/mobilenetv2.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/network/backbone/__pycache__/mobilenetv2.cpython-37.pyc
--------------------------------------------------------------------------------
/incremental/network/backbone/__pycache__/mobilenetv2.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/network/backbone/__pycache__/mobilenetv2.cpython-38.pyc
--------------------------------------------------------------------------------
/incremental/network/backbone/__pycache__/resnet.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/network/backbone/__pycache__/resnet.cpython-36.pyc
--------------------------------------------------------------------------------
/incremental/network/backbone/__pycache__/resnet.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/network/backbone/__pycache__/resnet.cpython-37.pyc
--------------------------------------------------------------------------------
/incremental/network/backbone/__pycache__/resnet.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/network/backbone/__pycache__/resnet.cpython-38.pyc
--------------------------------------------------------------------------------
/incremental/network/backbone/mobilenetv2.py:
--------------------------------------------------------------------------------
1 | from torch import nn
2 | #from torchvision.models.utils import load_state_dict_from_url
3 | from torch.hub import load_state_dict_from_url
4 | import torch.nn.functional as F
5 |
6 | __all__ = ['MobileNetV2', 'mobilenet_v2']
7 |
8 |
9 | model_urls = {
10 | 'mobilenet_v2': 'https://download.pytorch.org/models/mobilenet_v2-b0353104.pth',
11 | }
12 |
13 |
14 | def _make_divisible(v, divisor, min_value=None):
15 | """
16 | This function is taken from the original tf repo.
17 | It ensures that all layers have a channel number that is divisible by 8
18 | It can be seen here:
19 | https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
20 | :param v:
21 | :param divisor:
22 | :param min_value:
23 | :return:
24 | """
25 | if min_value is None:
26 | min_value = divisor
27 | new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
28 | # Make sure that round down does not go down by more than 10%.
29 | if new_v < 0.9 * v:
30 | new_v += divisor
31 | return new_v
32 |
33 |
34 | class ConvBNReLU(nn.Sequential):
35 | def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, dilation=1, groups=1):
36 | #padding = (kernel_size - 1) // 2
37 | super(ConvBNReLU, self).__init__(
38 | nn.Conv2d(in_planes, out_planes, kernel_size, stride, 0, dilation=dilation, groups=groups, bias=False),
39 | nn.BatchNorm2d(out_planes),
40 | nn.ReLU6(inplace=True)
41 | )
42 |
43 | def fixed_padding(kernel_size, dilation):
44 | kernel_size_effective = kernel_size + (kernel_size - 1) * (dilation - 1)
45 | pad_total = kernel_size_effective - 1
46 | pad_beg = pad_total // 2
47 | pad_end = pad_total - pad_beg
48 | return (pad_beg, pad_end, pad_beg, pad_end)
49 |
50 | class InvertedResidual(nn.Module):
51 | def __init__(self, inp, oup, stride, dilation, expand_ratio):
52 | super(InvertedResidual, self).__init__()
53 | self.stride = stride
54 | assert stride in [1, 2]
55 |
56 | hidden_dim = int(round(inp * expand_ratio))
57 | self.use_res_connect = self.stride == 1 and inp == oup
58 |
59 | layers = []
60 | if expand_ratio != 1:
61 | # pw
62 | layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1))
63 |
64 | layers.extend([
65 | # dw
66 | ConvBNReLU(hidden_dim, hidden_dim, stride=stride, dilation=dilation, groups=hidden_dim),
67 | # pw-linear
68 | nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
69 | nn.BatchNorm2d(oup),
70 | ])
71 | self.conv = nn.Sequential(*layers)
72 |
73 | self.input_padding = fixed_padding( 3, dilation )
74 |
75 | def forward(self, x):
76 | x_pad = F.pad(x, self.input_padding)
77 | if self.use_res_connect:
78 | return x + self.conv(x_pad)
79 | else:
80 | return self.conv(x_pad)
81 |
82 | class MobileNetV2(nn.Module):
83 | def __init__(self, num_classes=1000, output_stride=8, width_mult=1.0, inverted_residual_setting=None, round_nearest=8):
84 | """
85 | MobileNet V2 main class
86 |
87 | Args:
88 | num_classes (int): Number of classes
89 | width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
90 | inverted_residual_setting: Network structure
91 | round_nearest (int): Round the number of channels in each layer to be a multiple of this number
92 | Set to 1 to turn off rounding
93 | """
94 | super(MobileNetV2, self).__init__()
95 | block = InvertedResidual
96 | input_channel = 32
97 | last_channel = 1280
98 | self.output_stride = output_stride
99 | current_stride = 1
100 | if inverted_residual_setting is None:
101 | inverted_residual_setting = [
102 | # t, c, n, s
103 | [1, 16, 1, 1],
104 | [6, 24, 2, 2],
105 | [6, 32, 3, 2],
106 | [6, 64, 4, 2],
107 | [6, 96, 3, 1],
108 | [6, 160, 3, 2],
109 | [6, 320, 1, 1],
110 | ]
111 |
112 | # only check the first element, assuming user knows t,c,n,s are required
113 | if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
114 | raise ValueError("inverted_residual_setting should be non-empty "
115 | "or a 4-element list, got {}".format(inverted_residual_setting))
116 |
117 | # building first layer
118 | input_channel = _make_divisible(input_channel * width_mult, round_nearest)
119 | self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
120 | features = [ConvBNReLU(3, input_channel, stride=2)]
121 | current_stride *= 2
122 | dilation=1
123 | previous_dilation = 1
124 |
125 | # building inverted residual blocks
126 | for t, c, n, s in inverted_residual_setting:
127 | output_channel = _make_divisible(c * width_mult, round_nearest)
128 | previous_dilation = dilation
129 | if current_stride == output_stride:
130 | stride = 1
131 | dilation *= s
132 | else:
133 | stride = s
134 | current_stride *= s
135 | output_channel = int(c * width_mult)
136 |
137 | for i in range(n):
138 | if i==0:
139 | features.append(block(input_channel, output_channel, stride, previous_dilation, expand_ratio=t))
140 | else:
141 | features.append(block(input_channel, output_channel, 1, dilation, expand_ratio=t))
142 | input_channel = output_channel
143 | # building last several layers
144 | features.append(ConvBNReLU(input_channel, self.last_channel, kernel_size=1))
145 | # make it nn.Sequential
146 | self.features = nn.Sequential(*features)
147 |
148 | # building classifier
149 | self.classifier = nn.Sequential(
150 | nn.Dropout(0.2),
151 | nn.Linear(self.last_channel, num_classes),
152 | )
153 |
154 | # weight initialization
155 | for m in self.modules():
156 | if isinstance(m, nn.Conv2d):
157 | nn.init.kaiming_normal_(m.weight, mode='fan_out')
158 | if m.bias is not None:
159 | nn.init.zeros_(m.bias)
160 | elif isinstance(m, nn.BatchNorm2d):
161 | nn.init.ones_(m.weight)
162 | nn.init.zeros_(m.bias)
163 | elif isinstance(m, nn.Linear):
164 | nn.init.normal_(m.weight, 0, 0.01)
165 | nn.init.zeros_(m.bias)
166 |
167 | def forward(self, x):
168 | x = self.features(x)
169 | x = x.mean([2, 3])
170 | x = self.classifier(x)
171 | return x
172 |
173 |
174 | def mobilenet_v2(pretrained=False, progress=True, **kwargs):
175 | """
176 | Constructs a MobileNetV2 architecture from
177 | `"MobileNetV2: Inverted Residuals and Linear Bottlenecks" `_.
178 |
179 | Args:
180 | pretrained (bool): If True, returns a model pre-trained on ImageNet
181 | progress (bool): If True, displays a progress bar of the download to stderr
182 | """
183 | model = MobileNetV2(**kwargs)
184 | if pretrained:
185 | state_dict = load_state_dict_from_url(model_urls['mobilenet_v2'],
186 | progress=progress)
187 | model.load_state_dict(state_dict)
188 | return model
189 |
--------------------------------------------------------------------------------
/incremental/network/backbone/resnet.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | #from torchvision.models.utils import load_state_dict_from_url
4 | from torch.hub import load_state_dict_from_url
5 |
6 | __all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
7 | 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
8 | 'wide_resnet50_2', 'wide_resnet101_2']
9 |
10 |
11 | model_urls = {
12 | 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
13 | 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
14 | 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
15 | 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
16 | 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
17 | 'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
18 | 'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
19 | 'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
20 | 'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
21 | }
22 |
23 |
24 | def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
25 | """3x3 convolution with padding"""
26 | return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
27 | padding=dilation, groups=groups, bias=False, dilation=dilation)
28 |
29 |
30 | def conv1x1(in_planes, out_planes, stride=1):
31 | """1x1 convolution"""
32 | return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
33 |
34 |
35 | class BasicBlock(nn.Module):
36 | expansion = 1
37 |
38 | def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
39 | base_width=64, dilation=1, norm_layer=None):
40 | super(BasicBlock, self).__init__()
41 | if norm_layer is None:
42 | norm_layer = nn.BatchNorm2d
43 | if groups != 1 or base_width != 64:
44 | raise ValueError('BasicBlock only supports groups=1 and base_width=64')
45 | if dilation > 1:
46 | raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
47 | # Both self.conv1 and self.downsample layers downsample the input when stride != 1
48 | self.conv1 = conv3x3(inplanes, planes, stride)
49 | self.bn1 = norm_layer(planes)
50 | self.relu = nn.ReLU(inplace=True)
51 | self.conv2 = conv3x3(planes, planes)
52 | self.bn2 = norm_layer(planes)
53 | self.downsample = downsample
54 | self.stride = stride
55 |
56 | def forward(self, x):
57 | identity = x
58 |
59 | out = self.conv1(x)
60 | out = self.bn1(out)
61 | out = self.relu(out)
62 |
63 | out = self.conv2(out)
64 | out = self.bn2(out)
65 |
66 | if self.downsample is not None:
67 | identity = self.downsample(x)
68 |
69 | out += identity
70 | out = self.relu(out)
71 |
72 | return out
73 |
74 |
75 | class Bottleneck(nn.Module):
76 | expansion = 4
77 |
78 | def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
79 | base_width=64, dilation=1, norm_layer=None):
80 | super(Bottleneck, self).__init__()
81 | if norm_layer is None:
82 | norm_layer = nn.BatchNorm2d
83 | width = int(planes * (base_width / 64.)) * groups
84 | # Both self.conv2 and self.downsample layers downsample the input when stride != 1
85 | self.conv1 = conv1x1(inplanes, width)
86 | self.bn1 = norm_layer(width)
87 | self.conv2 = conv3x3(width, width, stride, groups, dilation)
88 | self.bn2 = norm_layer(width)
89 | self.conv3 = conv1x1(width, planes * self.expansion)
90 | self.bn3 = norm_layer(planes * self.expansion)
91 | self.relu = nn.ReLU(inplace=True)
92 | self.downsample = downsample
93 | self.stride = stride
94 |
95 | def forward(self, x):
96 | identity = x
97 |
98 | out = self.conv1(x)
99 | out = self.bn1(out)
100 | out = self.relu(out)
101 |
102 | out = self.conv2(out)
103 | out = self.bn2(out)
104 | out = self.relu(out)
105 |
106 | out = self.conv3(out)
107 | out = self.bn3(out)
108 |
109 | if self.downsample is not None:
110 | identity = self.downsample(x)
111 |
112 | out += identity
113 | out = self.relu(out)
114 |
115 | return out
116 |
117 |
118 | class ResNet(nn.Module):
119 |
120 | def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
121 | groups=1, width_per_group=64, replace_stride_with_dilation=None,
122 | norm_layer=None):
123 | super(ResNet, self).__init__()
124 | if norm_layer is None:
125 | norm_layer = nn.BatchNorm2d
126 | self._norm_layer = norm_layer
127 |
128 | self.inplanes = 64
129 | self.dilation = 1
130 | if replace_stride_with_dilation is None:
131 | # each element in the tuple indicates if we should replace
132 | # the 2x2 stride with a dilated convolution instead
133 | replace_stride_with_dilation = [False, False, False]
134 | if len(replace_stride_with_dilation) != 3:
135 | raise ValueError("replace_stride_with_dilation should be None "
136 | "or a 3-element tuple, got {}".format(replace_stride_with_dilation))
137 | self.groups = groups
138 | self.base_width = width_per_group
139 | self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
140 | bias=False)
141 | self.bn1 = norm_layer(self.inplanes)
142 | self.relu = nn.ReLU(inplace=True)
143 | self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
144 | self.layer1 = self._make_layer(block, 64, layers[0])
145 | self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
146 | dilate=replace_stride_with_dilation[0])
147 | self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
148 | dilate=replace_stride_with_dilation[1])
149 | self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
150 | dilate=replace_stride_with_dilation[2])
151 | self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
152 | self.fc = nn.Linear(512 * block.expansion, num_classes)
153 |
154 | for m in self.modules():
155 | if isinstance(m, nn.Conv2d):
156 | nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
157 | elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
158 | nn.init.constant_(m.weight, 1)
159 | nn.init.constant_(m.bias, 0)
160 |
161 | # Zero-initialize the last BN in each residual branch,
162 | # so that the residual branch starts with zeros, and each residual block behaves like an identity.
163 | # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
164 | if zero_init_residual:
165 | for m in self.modules():
166 | if isinstance(m, Bottleneck):
167 | nn.init.constant_(m.bn3.weight, 0)
168 | elif isinstance(m, BasicBlock):
169 | nn.init.constant_(m.bn2.weight, 0)
170 |
171 | def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
172 | norm_layer = self._norm_layer
173 | downsample = None
174 | previous_dilation = self.dilation
175 | if dilate:
176 | self.dilation *= stride
177 | stride = 1
178 | if stride != 1 or self.inplanes != planes * block.expansion:
179 | downsample = nn.Sequential(
180 | conv1x1(self.inplanes, planes * block.expansion, stride),
181 | norm_layer(planes * block.expansion),
182 | )
183 |
184 | layers = []
185 | layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
186 | self.base_width, previous_dilation, norm_layer))
187 | self.inplanes = planes * block.expansion
188 | for _ in range(1, blocks):
189 | layers.append(block(self.inplanes, planes, groups=self.groups,
190 | base_width=self.base_width, dilation=self.dilation,
191 | norm_layer=norm_layer))
192 |
193 | return nn.Sequential(*layers)
194 |
195 | def forward(self, x):
196 | x = self.conv1(x)
197 | x = self.bn1(x)
198 | x = self.relu(x)
199 | x = self.maxpool(x)
200 |
201 | x = self.layer1(x)
202 | x = self.layer2(x)
203 | x = self.layer3(x)
204 | x = self.layer4(x)
205 |
206 | x = self.avgpool(x)
207 | x = torch.flatten(x, 1)
208 | x = self.fc(x)
209 |
210 | return x
211 |
212 |
213 | def _resnet(arch, block, layers, pretrained, progress, **kwargs):
214 | model = ResNet(block, layers, **kwargs)
215 | if pretrained:
216 | state_dict = load_state_dict_from_url(model_urls[arch],
217 | progress=progress)
218 | model.load_state_dict(state_dict)
219 | return model
220 |
221 |
222 | def resnet18(pretrained=False, progress=True, **kwargs):
223 | r"""ResNet-18 model from
224 | `"Deep Residual Learning for Image Recognition" `_
225 |
226 | Args:
227 | pretrained (bool): If True, returns a model pre-trained on ImageNet
228 | progress (bool): If True, displays a progress bar of the download to stderr
229 | """
230 | return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
231 | **kwargs)
232 |
233 |
234 | def resnet34(pretrained=False, progress=True, **kwargs):
235 | r"""ResNet-34 model from
236 | `"Deep Residual Learning for Image Recognition" `_
237 |
238 | Args:
239 | pretrained (bool): If True, returns a model pre-trained on ImageNet
240 | progress (bool): If True, displays a progress bar of the download to stderr
241 | """
242 | return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
243 | **kwargs)
244 |
245 |
246 | def resnet50(pretrained=False, progress=True, **kwargs):
247 | r"""ResNet-50 model from
248 | `"Deep Residual Learning for Image Recognition" `_
249 |
250 | Args:
251 | pretrained (bool): If True, returns a model pre-trained on ImageNet
252 | progress (bool): If True, displays a progress bar of the download to stderr
253 | """
254 | return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
255 | **kwargs)
256 |
257 |
258 | def resnet101(pretrained=False, progress=True, **kwargs):
259 | r"""ResNet-101 model from
260 | `"Deep Residual Learning for Image Recognition" `_
261 |
262 | Args:
263 | pretrained (bool): If True, returns a model pre-trained on ImageNet
264 | progress (bool): If True, displays a progress bar of the download to stderr
265 | """
266 | return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
267 | **kwargs)
268 |
269 |
270 | def resnet152(pretrained=False, progress=True, **kwargs):
271 | r"""ResNet-152 model from
272 | `"Deep Residual Learning for Image Recognition" `_
273 |
274 | Args:
275 | pretrained (bool): If True, returns a model pre-trained on ImageNet
276 | progress (bool): If True, displays a progress bar of the download to stderr
277 | """
278 | return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
279 | **kwargs)
280 |
281 |
282 | def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
283 | r"""ResNeXt-50 32x4d model from
284 | `"Aggregated Residual Transformation for Deep Neural Networks" `_
285 |
286 | Args:
287 | pretrained (bool): If True, returns a model pre-trained on ImageNet
288 | progress (bool): If True, displays a progress bar of the download to stderr
289 | """
290 | kwargs['groups'] = 32
291 | kwargs['width_per_group'] = 4
292 | return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
293 | pretrained, progress, **kwargs)
294 |
295 |
296 | def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
297 | r"""ResNeXt-101 32x8d model from
298 | `"Aggregated Residual Transformation for Deep Neural Networks" `_
299 |
300 | Args:
301 | pretrained (bool): If True, returns a model pre-trained on ImageNet
302 | progress (bool): If True, displays a progress bar of the download to stderr
303 | """
304 | kwargs['groups'] = 32
305 | kwargs['width_per_group'] = 8
306 | return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
307 | pretrained, progress, **kwargs)
308 |
309 |
310 | def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
311 | r"""Wide ResNet-50-2 model from
312 | `"Wide Residual Networks" `_
313 |
314 | The model is the same as ResNet except for the bottleneck number of channels
315 | which is twice larger in every block. The number of channels in outer 1x1
316 | convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
317 | channels, and in Wide ResNet-50-2 has 2048-1024-2048.
318 |
319 | Args:
320 | pretrained (bool): If True, returns a model pre-trained on ImageNet
321 | progress (bool): If True, displays a progress bar of the download to stderr
322 | """
323 | kwargs['width_per_group'] = 64 * 2
324 | return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
325 | pretrained, progress, **kwargs)
326 |
327 |
328 | def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
329 | r"""Wide ResNet-101-2 model from
330 | `"Wide Residual Networks" `_
331 |
332 | The model is the same as ResNet except for the bottleneck number of channels
333 | which is twice larger in every block. The number of channels in outer 1x1
334 | convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
335 | channels, and in Wide ResNet-50-2 has 2048-1024-2048.
336 |
337 | Args:
338 | pretrained (bool): If True, returns a model pre-trained on ImageNet
339 | progress (bool): If True, displays a progress bar of the download to stderr
340 | """
341 | kwargs['width_per_group'] = 64 * 2
342 | return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
343 | pretrained, progress, **kwargs)
344 |
--------------------------------------------------------------------------------
/incremental/network/modeling.py:
--------------------------------------------------------------------------------
1 | from PIL.Image import NONE
2 | from .utils import IntermediateLayerGetter, DeepLabHeadV3Plus, DeepLabHead, DeepLabHeadV3Plus_Metric
3 | from ._deeplab import DeepLabV3, DeepLabV3_embedding, DeepLabV3_embedding_self_distillation, DeepLabV3_metric
4 | from .backbone import resnet
5 | from .backbone import mobilenetv2
6 |
7 | def _segm_resnet(name, backbone_name, num_classes, output_stride, pretrained_backbone, metric_dim=None, finetune=False):
8 |
9 | if output_stride==8:
10 | replace_stride_with_dilation=[False, True, True]
11 | aspp_dilate = [12, 24, 36]
12 | else:
13 | replace_stride_with_dilation=[False, False, True]
14 | aspp_dilate = [6, 12, 18]
15 |
16 | backbone = resnet.__dict__[backbone_name](
17 | pretrained=pretrained_backbone,
18 | replace_stride_with_dilation=replace_stride_with_dilation)
19 |
20 | inplanes = 2048
21 | low_level_planes = 256
22 |
23 | if name=='deeplabv3plus':
24 | return_layers = {'layer4': 'out', 'layer1': 'low_level'}
25 | classifier = DeepLabHeadV3Plus(inplanes, low_level_planes, num_classes, aspp_dilate)
26 | elif name=='deeplabv3':
27 | return_layers = {'layer4': 'out'}
28 | classifier = DeepLabHead(inplanes , num_classes, aspp_dilate)
29 | elif name=='deeplabv3plus_embedding':
30 | return_layers = {'layer4': 'out', 'layer1': 'low_level'}
31 | classifier = DeepLabHeadV3Plus(inplanes, low_level_planes, num_classes, aspp_dilate)
32 | elif name=='deeplabv3plus_embedding_self_distillation':
33 | return_layers = {'layer4': 'out', 'layer1': 'low_level'}
34 | elif name=='deeplabv3plus_metirc_resnet101':
35 | return_layers= {'layer4': 'out', 'layer1': 'low_level'}
36 | classifier = DeepLabHeadV3Plus_Metric(inplanes, low_level_planes, num_classes, aspp_dilate,finetune)
37 |
38 | backbone = IntermediateLayerGetter(backbone, return_layers=return_layers)
39 |
40 |
41 | if name=='deeplabv3plus_embedding':
42 | model = DeepLabV3_embedding(backbone, classifier)
43 | elif name== 'deeplabv3plus_embedding_self_distillation':
44 | model = DeepLabV3_embedding_self_distillation(backbone)
45 | elif name== 'deeplabv3plus_metirc_resnet101':
46 | model = DeepLabV3_metric(backbone,classifier,finetune)
47 | else:
48 | model = DeepLabV3(backbone, classifier)
49 | return model
50 |
51 | def _segm_mobilenet(name, backbone_name, num_classes, output_stride, pretrained_backbone):
52 | if output_stride==8:
53 | aspp_dilate = [12, 24, 36]
54 | else:
55 | aspp_dilate = [6, 12, 18]
56 |
57 | backbone = mobilenetv2.mobilenet_v2(pretrained=pretrained_backbone, output_stride=output_stride)
58 |
59 | # rename layers
60 | backbone.low_level_features = backbone.features[0:4]
61 | backbone.high_level_features = backbone.features[4:-1]
62 | backbone.features = None
63 | backbone.classifier = None
64 |
65 | inplanes = 320
66 | low_level_planes = 24
67 |
68 | if name=='deeplabv3plus':
69 | return_layers = {'high_level_features': 'out', 'low_level_features': 'low_level'}
70 | classifier = DeepLabHeadV3Plus(inplanes, low_level_planes, num_classes, aspp_dilate)
71 | elif name=='deeplabv3':
72 | return_layers = {'high_level_features': 'out'}
73 | classifier = DeepLabHead(inplanes , num_classes, aspp_dilate)
74 | backbone = IntermediateLayerGetter(backbone, return_layers=return_layers)
75 |
76 | model = DeepLabV3(backbone, classifier)
77 | return model
78 |
79 | def _load_model(arch_type, backbone, num_classes, output_stride, pretrained_backbone, metric_dim=None, finetune=False):
80 |
81 | if backbone=='mobilenetv2':
82 | model = _segm_mobilenet(arch_type, backbone, num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)
83 | elif backbone.startswith('resnet'):
84 | model = _segm_resnet(arch_type, backbone, num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone, metric_dim=metric_dim, finetune=finetune)
85 | else:
86 | raise NotImplementedError
87 | return model
88 |
89 |
90 | # Deeplab v3
91 |
92 | def deeplabv3_resnet50(num_classes=21, output_stride=8, pretrained_backbone=True):
93 | """Constructs a DeepLabV3 model with a ResNet-50 backbone.
94 |
95 | Args:
96 | num_classes (int): number of classes.
97 | output_stride (int): output stride for deeplab.
98 | pretrained_backbone (bool): If True, use the pretrained backbone.
99 | """
100 | return _load_model('deeplabv3', 'resnet50', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)
101 |
102 | def deeplabv3_resnet101(num_classes=21, output_stride=8, pretrained_backbone=True):
103 | """Constructs a DeepLabV3 model with a ResNet-101 backbone.
104 |
105 | Args:
106 | num_classes (int): number of classes.
107 | output_stride (int): output stride for deeplab.
108 | pretrained_backbone (bool): If True, use the pretrained backbone.
109 | """
110 | return _load_model('deeplabv3', 'resnet101', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)
111 |
112 | def deeplabv3_mobilenet(num_classes=21, output_stride=8, pretrained_backbone=True, **kwargs):
113 | """Constructs a DeepLabV3 model with a MobileNetv2 backbone.
114 |
115 | Args:
116 | num_classes (int): number of classes.
117 | output_stride (int): output stride for deeplab.
118 | pretrained_backbone (bool): If True, use the pretrained backbone.
119 | """
120 | return _load_model('deeplabv3', 'mobilenetv2', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)
121 |
122 |
123 | # Deeplab v3+
124 |
125 | def deeplabv3plus_resnet50(num_classes=21, output_stride=8, pretrained_backbone=True):
126 | """Constructs a DeepLabV3 model with a ResNet-50 backbone.
127 |
128 | Args:
129 | num_classes (int): number of classes.
130 | output_stride (int): output stride for deeplab.
131 | pretrained_backbone (bool): If True, use the pretrained backbone.
132 | """
133 | return _load_model('deeplabv3plus', 'resnet50', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)
134 |
135 |
136 | def deeplabv3plus_resnet101(num_classes=21, output_stride=8, pretrained_backbone=True):
137 | """Constructs a DeepLabV3+ model with a ResNet-101 backbone.
138 |
139 | Args:
140 | num_classes (int): number of classes.
141 | output_stride (int): output stride for deeplab.
142 | pretrained_backbone (bool): If True, use the pretrained backbone.
143 | """
144 | return _load_model('deeplabv3plus', 'resnet101', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)
145 |
146 | def deeplabv3plus_embedding_resnet101(num_classes=21, output_stride=8, pretrained_backbone=True):
147 | """Constructs a DeepLabV3+ model with a ResNet-101 backbone.
148 |
149 | Args:
150 | num_classes (int): number of classes.
151 | output_stride (int): output stride for deeplab.
152 | pretrained_backbone (bool): If True, use the pretrained backbone.
153 | """
154 | return _load_model('deeplabv3plus_embedding', 'resnet101', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)
155 |
156 | def deeplabv3plus_embedding_self_distillation_resnet101(num_classes=21, output_stride=8, pretrained_backbone=True):
157 | """Constructs a DeepLabV3+ model with a ResNet-101 backbone.
158 |
159 | Args:
160 | num_classes (int): number of classes.
161 | output_stride (int): output stride for deeplab.
162 | pretrained_backbone (bool): If True, use the pretrained backbone.
163 | """
164 | return _load_model('deeplabv3plus_embedding_self_distillation', 'resnet101', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)
165 |
166 |
167 |
168 | def deeplabv3plus_mobilenet(num_classes=21, output_stride=8, pretrained_backbone=True):
169 | """Constructs a DeepLabV3+ model with a MobileNetv2 backbone.
170 |
171 | Args:
172 | num_classes (int): number of classes.
173 | output_stride (int): output stride for deeplab.
174 | pretrained_backbone (bool): If True, use the pretrained backbone.
175 | """
176 | return _load_model('deeplabv3plus', 'mobilenetv2', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)
177 |
178 | def deeplabv3plus_metirc_resnet101(num_classes=21, metric_dim=64, output_stride=8, pretrained_backbone=True, finetune=False):
179 |
180 | return _load_model('deeplabv3plus_metirc_resnet101', 'resnet101', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone, metric_dim=metric_dim, finetune=finetune)
--------------------------------------------------------------------------------
/incremental/novel/10/.ipynb_checkpoints/novel-checkpoint.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/novel/10/.ipynb_checkpoints/novel-checkpoint.txt
--------------------------------------------------------------------------------
/incremental/novel/10/novel.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/novel/10/novel.pth
--------------------------------------------------------------------------------
/incremental/novel/10/novel.txt:
--------------------------------------------------------------------------------
1 | ../data/cityscapes/leftImg8bit/train/bremen/bremen_000158_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/bremen/bremen_000158_000019_gtFine_labelIds.png
2 | ../data/cityscapes/leftImg8bit/train/bremen/bremen_000255_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/bremen/bremen_000255_000019_gtFine_labelIds.png
3 | ../data/cityscapes/leftImg8bit/train/strasbourg/strasbourg_000001_043748_leftImg8bit.png ../data/cityscapes/gtFine/train/strasbourg/strasbourg_000001_043748_gtFine_labelIds.png
4 | ../data/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000195_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/stuttgart/stuttgart_000195_000019_gtFine_labelIds.png
5 | ../data/cityscapes/leftImg8bit/train/ulm/ulm_000047_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/ulm/ulm_000047_000019_gtFine_labelIds.png
6 |
--------------------------------------------------------------------------------
/incremental/novel/11/.ipynb_checkpoints/novel-checkpoint.txt:
--------------------------------------------------------------------------------
1 | ../data/cityscapes/leftImg8bit/train/cologne/cologne_000135_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/cologne/cologne_000135_000019_gtFine_labelIds.png
2 | ../data/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000186_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/stuttgart/stuttgart_000186_000019_gtFine_labelIds.png
3 | ../data/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_055414_leftImg8bit.png ../data/cityscapes/gtFine/train/hamburg/hamburg_000000_055414_gtFine_labelIds.png
4 | ../data/cityscapes/leftImg8bit/train/darmstadt/darmstadt_000027_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/darmstadt/darmstadt_000027_000019_gtFine_labelIds.png
5 | ../data/cityscapes/leftImg8bit/train/darmstadt/darmstadt_000028_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/darmstadt/darmstadt_000028_000019_gtFine_labelIds.png
6 |
--------------------------------------------------------------------------------
/incremental/novel/11/download.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/novel/11/download.png
--------------------------------------------------------------------------------
/incremental/novel/11/download1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/novel/11/download1.png
--------------------------------------------------------------------------------
/incremental/novel/11/download2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/novel/11/download2.png
--------------------------------------------------------------------------------
/incremental/novel/11/novel.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/novel/11/novel.pth
--------------------------------------------------------------------------------
/incremental/novel/11/novel.txt:
--------------------------------------------------------------------------------
1 | ../data/cityscapes/leftImg8bit/train/cologne/cologne_000135_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/cologne/cologne_000135_000019_gtFine_labelIds.png
2 | ../data/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000186_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/stuttgart/stuttgart_000186_000019_gtFine_labelIds.png
3 | ../data/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_055414_leftImg8bit.png ../data/cityscapes/gtFine/train/hamburg/hamburg_000000_055414_gtFine_labelIds.png
4 | ../data/cityscapes/leftImg8bit/train/darmstadt/darmstadt_000027_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/darmstadt/darmstadt_000027_000019_gtFine_labelIds.png
5 | ../data/cityscapes/leftImg8bit/train/darmstadt/darmstadt_000028_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/darmstadt/darmstadt_000028_000019_gtFine_labelIds.png
6 |
--------------------------------------------------------------------------------
/incremental/novel/12/.ipynb_checkpoints/novel-checkpoint.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/novel/12/.ipynb_checkpoints/novel-checkpoint.txt
--------------------------------------------------------------------------------
/incremental/novel/12/novel.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/novel/12/novel.pth
--------------------------------------------------------------------------------
/incremental/novel/12/novel.txt:
--------------------------------------------------------------------------------
1 | ../data/cityscapes/leftImg8bit/train/cologne/cologne_000134_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/cologne/cologne_000134_000019_gtFine_labelIds.png
2 | ../data/cityscapes/leftImg8bit/train/cologne/cologne_000019_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/cologne/cologne_000019_000019_gtFine_labelIds.png
3 | ../data/cityscapes/leftImg8bit/train/cologne/cologne_000056_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/cologne/cologne_000056_000019_gtFine_labelIds.png
4 | ../data/cityscapes/leftImg8bit/train/zurich/zurich_000012_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/zurich/zurich_000012_000019_gtFine_labelIds.png
5 | ../data/cityscapes/leftImg8bit/train/ulm/ulm_000069_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/ulm/ulm_000069_000019_gtFine_labelIds.png
6 |
--------------------------------------------------------------------------------
/incremental/novel/13/.ipynb_checkpoints/novel-checkpoint.txt:
--------------------------------------------------------------------------------
1 | ../data/cityscapes/leftImg8bit/train/bremen/bremen_000225_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/bremen/bremen_000225_000019_gtFine_labelIds.png
2 | ../data/cityscapes/leftImg8bit/train/cologne/cologne_000090_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/cologne/cologne_000090_000019_gtFine_labelIds.png
3 | ../data/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000090_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/stuttgart/stuttgart_000090_000019_gtFine_labelIds.png
4 | ../data/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000025_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/stuttgart/stuttgart_000025_000019_gtFine_labelIds.png
5 | ../data/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000206_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/dusseldorf/dusseldorf_000206_000019_gtFine_labelIds.png
6 |
--------------------------------------------------------------------------------
/incremental/novel/13/novel.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/novel/13/novel.pth
--------------------------------------------------------------------------------
/incremental/novel/13/novel.txt:
--------------------------------------------------------------------------------
1 | ../data/cityscapes/leftImg8bit/train/bremen/bremen_000225_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/bremen/bremen_000225_000019_gtFine_labelIds.png
2 | ../data/cityscapes/leftImg8bit/train/cologne/cologne_000090_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/cologne/cologne_000090_000019_gtFine_labelIds.png
3 | ../data/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000090_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/stuttgart/stuttgart_000090_000019_gtFine_labelIds.png
4 | ../data/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000025_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/stuttgart/stuttgart_000025_000019_gtFine_labelIds.png
5 | ../data/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000206_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/dusseldorf/dusseldorf_000206_000019_gtFine_labelIds.png
6 |
--------------------------------------------------------------------------------
/incremental/novel/14/.ipynb_checkpoints/novel-checkpoint.txt:
--------------------------------------------------------------------------------
1 | ../data/cityscapes/leftImg8bit/train/ulm/ulm_000048_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/ulm/ulm_000048_000019_gtFine_labelIds.png
2 | ../data/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000001_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/stuttgart/stuttgart_000001_000019_gtFine_labelIds.png
3 | ../data/cityscapes/leftImg8bit/train/erfurt/erfurt_000057_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/erfurt/erfurt_000057_000019_gtFine_labelIds.png
4 | ../data/cityscapes/leftImg8bit/train/darmstadt/darmstadt_000066_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/darmstadt/darmstadt_000066_000019_gtFine_labelIds.png
5 | ../data/cityscapes/leftImg8bit/train/hanover/hanover_000000_020089_leftImg8bit.png ../data/cityscapes/gtFine/train/hanover/hanover_000000_020089_gtFine_labelIds.png
6 |
--------------------------------------------------------------------------------
/incremental/novel/14/novel.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/novel/14/novel.pth
--------------------------------------------------------------------------------
/incremental/novel/14/novel.txt:
--------------------------------------------------------------------------------
1 | ../data/cityscapes/leftImg8bit/train/ulm/ulm_000048_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/ulm/ulm_000048_000019_gtFine_labelIds.png
2 | ../data/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000001_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/stuttgart/stuttgart_000001_000019_gtFine_labelIds.png
3 | ../data/cityscapes/leftImg8bit/train/erfurt/erfurt_000057_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/erfurt/erfurt_000057_000019_gtFine_labelIds.png
4 | ../data/cityscapes/leftImg8bit/train/darmstadt/darmstadt_000066_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/darmstadt/darmstadt_000066_000019_gtFine_labelIds.png
5 | ../data/cityscapes/leftImg8bit/train/hanover/hanover_000000_020089_leftImg8bit.png ../data/cityscapes/gtFine/train/hanover/hanover_000000_020089_gtFine_labelIds.png
6 |
--------------------------------------------------------------------------------
/incremental/novel/15/.ipynb_checkpoints/novel-checkpoint.txt:
--------------------------------------------------------------------------------
1 | ../data/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_071150_leftImg8bit.png ../data/cityscapes/gtFine/train/hamburg/hamburg_000000_071150_gtFine_labelIds.png
2 | ../data/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_074139_leftImg8bit.png ../data/cityscapes/gtFine/train/hamburg/hamburg_000000_074139_gtFine_labelIds.png
3 | ../data/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_045437_leftImg8bit.png ../data/cityscapes/gtFine/train/hamburg/hamburg_000000_045437_gtFine_labelIds.png
4 | ../data/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_074545_leftImg8bit.png ../data/cityscapes/gtFine/train/hamburg/hamburg_000000_074545_gtFine_labelIds.png
5 | ../data/cityscapes/leftImg8bit/train/bremen/bremen_000011_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/bremen/bremen_000011_000019_gtFine_labelIds.png
6 |
--------------------------------------------------------------------------------
/incremental/novel/15/novel.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/novel/15/novel.pth
--------------------------------------------------------------------------------
/incremental/novel/15/novel.txt:
--------------------------------------------------------------------------------
1 | ../data/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_071150_leftImg8bit.png ../data/cityscapes/gtFine/train/hamburg/hamburg_000000_071150_gtFine_labelIds.png
2 | ../data/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_074139_leftImg8bit.png ../data/cityscapes/gtFine/train/hamburg/hamburg_000000_074139_gtFine_labelIds.png
3 | ../data/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_045437_leftImg8bit.png ../data/cityscapes/gtFine/train/hamburg/hamburg_000000_045437_gtFine_labelIds.png
4 | ../data/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_074545_leftImg8bit.png ../data/cityscapes/gtFine/train/hamburg/hamburg_000000_074545_gtFine_labelIds.png
5 | ../data/cityscapes/leftImg8bit/train/bremen/bremen_000011_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/bremen/bremen_000011_000019_gtFine_labelIds.png
6 |
--------------------------------------------------------------------------------
/incremental/novel/16/novel.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/novel/16/novel.pth
--------------------------------------------------------------------------------
/incremental/novel/16/novel.txt:
--------------------------------------------------------------------------------
1 | ../data/cityscapes/leftImg8bit/train/zurich/zurich_000015_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/zurich/zurich_000015_000019_gtFine_labelIds.png
2 | ../data/cityscapes/leftImg8bit/train/zurich/zurich_000044_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/zurich/zurich_000044_000019_gtFine_labelIds.png
3 | ../data/cityscapes/leftImg8bit/train/zurich/zurich_000059_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/zurich/zurich_000059_000019_gtFine_labelIds.png
4 | ../data/cityscapes/leftImg8bit/train/zurich/zurich_000041_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/zurich/zurich_000041_000019_gtFine_labelIds.png
5 | ../data/cityscapes/leftImg8bit/train/erfurt/erfurt_000066_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/erfurt/erfurt_000066_000019_gtFine_labelIds.png
6 |
--------------------------------------------------------------------------------
/incremental/novel/17/novel.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/novel/17/novel.pth
--------------------------------------------------------------------------------
/incremental/novel/17/novel.txt:
--------------------------------------------------------------------------------
1 | ../data/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000182_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/stuttgart/stuttgart_000182_000019_gtFine_labelIds.png
2 | ../data/cityscapes/leftImg8bit/train/strasbourg/strasbourg_000000_029915_leftImg8bit.png ../data/cityscapes/gtFine/train/strasbourg/strasbourg_000000_029915_gtFine_labelIds.png
3 | ../data/cityscapes/leftImg8bit/train/zurich/zurich_000059_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/zurich/zurich_000059_000019_gtFine_labelIds.png
4 | ../data/cityscapes/leftImg8bit/train/strasbourg/strasbourg_000001_052430_leftImg8bit.png ../data/cityscapes/gtFine/train/strasbourg/strasbourg_000001_052430_gtFine_labelIds.png
5 | ../data/cityscapes/leftImg8bit/train/ulm/ulm_000010_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/ulm/ulm_000010_000019_gtFine_labelIds.png
6 |
--------------------------------------------------------------------------------
/incremental/novel/18/.ipynb_checkpoints/novel-checkpoint.txt:
--------------------------------------------------------------------------------
1 | ../data/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_070444_leftImg8bit.png ../data/cityscapes/gtFine/train/hamburg/hamburg_000000_070444_gtFine_labelIds.png
2 | ../data/cityscapes/leftImg8bit/train/bremen/bremen_000220_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/bremen/bremen_000220_000019_gtFine_labelIds.png
3 | ../data/cityscapes/leftImg8bit/train/hanover/hanover_000000_046200_leftImg8bit.png ../data/cityscapes/gtFine/train/hanover/hanover_000000_046200_gtFine_labelIds.png
4 | ../data/cityscapes/leftImg8bit/train/strasbourg/strasbourg_000000_034387_leftImg8bit.png ../data/cityscapes/gtFine/train/strasbourg/strasbourg_000000_034387_gtFine_labelIds.png
5 | ../data/cityscapes/leftImg8bit/train/cologne/cologne_000050_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/cologne/cologne_000050_000019_gtFine_labelIds.png
6 |
--------------------------------------------------------------------------------
/incremental/novel/18/novel.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/novel/18/novel.pth
--------------------------------------------------------------------------------
/incremental/novel/18/novel.txt:
--------------------------------------------------------------------------------
1 | ../data/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_070444_leftImg8bit.png ../data/cityscapes/gtFine/train/hamburg/hamburg_000000_070444_gtFine_labelIds.png
2 | ../data/cityscapes/leftImg8bit/train/bremen/bremen_000220_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/bremen/bremen_000220_000019_gtFine_labelIds.png
3 | ../data/cityscapes/leftImg8bit/train/hanover/hanover_000000_046200_leftImg8bit.png ../data/cityscapes/gtFine/train/hanover/hanover_000000_046200_gtFine_labelIds.png
4 | ../data/cityscapes/leftImg8bit/train/strasbourg/strasbourg_000000_034387_leftImg8bit.png ../data/cityscapes/gtFine/train/strasbourg/strasbourg_000000_034387_gtFine_labelIds.png
5 | ../data/cityscapes/leftImg8bit/train/cologne/cologne_000050_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/cologne/cologne_000050_000019_gtFine_labelIds.png
6 |
--------------------------------------------------------------------------------
/incremental/novel_1/10/.ipynb_checkpoints/novel-checkpoint.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/novel_1/10/.ipynb_checkpoints/novel-checkpoint.txt
--------------------------------------------------------------------------------
/incremental/novel_1/10/novel.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/novel_1/10/novel.pth
--------------------------------------------------------------------------------
/incremental/novel_1/10/novel.txt:
--------------------------------------------------------------------------------
1 | ../data/cityscapes/leftImg8bit/train/bremen/bremen_000158_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/bremen/bremen_000158_000019_gtFine_labelIds.png
2 | ../data/cityscapes/leftImg8bit/train/bremen/bremen_000255_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/bremen/bremen_000255_000019_gtFine_labelIds.png
3 | ../data/cityscapes/leftImg8bit/train/strasbourg/strasbourg_000001_043748_leftImg8bit.png ../data/cityscapes/gtFine/train/strasbourg/strasbourg_000001_043748_gtFine_labelIds.png
4 | ../data/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000195_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/stuttgart/stuttgart_000195_000019_gtFine_labelIds.png
5 | ../data/cityscapes/leftImg8bit/train/ulm/ulm_000047_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/ulm/ulm_000047_000019_gtFine_labelIds.png
6 |
--------------------------------------------------------------------------------
/incremental/novel_1/11/.ipynb_checkpoints/novel-checkpoint.txt:
--------------------------------------------------------------------------------
1 | ../data/cityscapes/leftImg8bit/train/cologne/cologne_000135_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/cologne/cologne_000135_000019_gtFine_labelIds.png
2 | ../data/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000186_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/stuttgart/stuttgart_000186_000019_gtFine_labelIds.png
3 | ../data/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_055414_leftImg8bit.png ../data/cityscapes/gtFine/train/hamburg/hamburg_000000_055414_gtFine_labelIds.png
4 | ../data/cityscapes/leftImg8bit/train/darmstadt/darmstadt_000027_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/darmstadt/darmstadt_000027_000019_gtFine_labelIds.png
5 | ../data/cityscapes/leftImg8bit/train/darmstadt/darmstadt_000028_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/darmstadt/darmstadt_000028_000019_gtFine_labelIds.png
6 |
--------------------------------------------------------------------------------
/incremental/novel_1/11/download.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/novel_1/11/download.png
--------------------------------------------------------------------------------
/incremental/novel_1/11/download1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/novel_1/11/download1.png
--------------------------------------------------------------------------------
/incremental/novel_1/11/download2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/novel_1/11/download2.png
--------------------------------------------------------------------------------
/incremental/novel_1/11/novel.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/novel_1/11/novel.pth
--------------------------------------------------------------------------------
/incremental/novel_1/11/novel.txt:
--------------------------------------------------------------------------------
1 | ../data/cityscapes/leftImg8bit/train/cologne/cologne_000135_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/cologne/cologne_000135_000019_gtFine_labelIds.png
2 | ../data/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000186_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/stuttgart/stuttgart_000186_000019_gtFine_labelIds.png
3 | ../data/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_055414_leftImg8bit.png ../data/cityscapes/gtFine/train/hamburg/hamburg_000000_055414_gtFine_labelIds.png
4 | ../data/cityscapes/leftImg8bit/train/darmstadt/darmstadt_000027_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/darmstadt/darmstadt_000027_000019_gtFine_labelIds.png
5 | ../data/cityscapes/leftImg8bit/train/darmstadt/darmstadt_000028_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/darmstadt/darmstadt_000028_000019_gtFine_labelIds.png
6 |
--------------------------------------------------------------------------------
/incremental/novel_1/12/.ipynb_checkpoints/novel-checkpoint.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/novel_1/12/.ipynb_checkpoints/novel-checkpoint.txt
--------------------------------------------------------------------------------
/incremental/novel_1/12/novel.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/novel_1/12/novel.pth
--------------------------------------------------------------------------------
/incremental/novel_1/12/novel.txt:
--------------------------------------------------------------------------------
1 | ../data/cityscapes/leftImg8bit/train/cologne/cologne_000134_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/cologne/cologne_000134_000019_gtFine_labelIds.png
2 | ../data/cityscapes/leftImg8bit/train/cologne/cologne_000019_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/cologne/cologne_000019_000019_gtFine_labelIds.png
3 | ../data/cityscapes/leftImg8bit/train/cologne/cologne_000056_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/cologne/cologne_000056_000019_gtFine_labelIds.png
4 | ../data/cityscapes/leftImg8bit/train/zurich/zurich_000012_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/zurich/zurich_000012_000019_gtFine_labelIds.png
5 | ../data/cityscapes/leftImg8bit/train/ulm/ulm_000069_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/ulm/ulm_000069_000019_gtFine_labelIds.png
6 |
--------------------------------------------------------------------------------
/incremental/novel_1/13/.ipynb_checkpoints/novel-checkpoint.txt:
--------------------------------------------------------------------------------
1 | ../data/cityscapes/leftImg8bit/train/bremen/bremen_000225_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/bremen/bremen_000225_000019_gtFine_labelIds.png
--------------------------------------------------------------------------------
/incremental/novel_1/13/novel.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/novel_1/13/novel.pth
--------------------------------------------------------------------------------
/incremental/novel_1/13/novel.txt:
--------------------------------------------------------------------------------
1 | ../data/cityscapes/leftImg8bit/train/bremen/bremen_000225_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/bremen/bremen_000225_000019_gtFine_labelIds.png
--------------------------------------------------------------------------------
/incremental/novel_1/14/.ipynb_checkpoints/novel-checkpoint.txt:
--------------------------------------------------------------------------------
1 | ../data/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000001_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/stuttgart/stuttgart_000001_000019_gtFine_labelIds.png
--------------------------------------------------------------------------------
/incremental/novel_1/14/novel.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/novel_1/14/novel.pth
--------------------------------------------------------------------------------
/incremental/novel_1/14/novel.txt:
--------------------------------------------------------------------------------
1 | ../data/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000001_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/stuttgart/stuttgart_000001_000019_gtFine_labelIds.png
--------------------------------------------------------------------------------
/incremental/novel_1/15/.ipynb_checkpoints/novel-checkpoint.txt:
--------------------------------------------------------------------------------
1 | ../data/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_071150_leftImg8bit.png ../data/cityscapes/gtFine/train/hamburg/hamburg_000000_071150_gtFine_labelIds.png
--------------------------------------------------------------------------------
/incremental/novel_1/15/novel.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/novel_1/15/novel.pth
--------------------------------------------------------------------------------
/incremental/novel_1/15/novel.txt:
--------------------------------------------------------------------------------
1 | ../data/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_071150_leftImg8bit.png ../data/cityscapes/gtFine/train/hamburg/hamburg_000000_071150_gtFine_labelIds.png
--------------------------------------------------------------------------------
/incremental/novel_1/16/novel.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/novel_1/16/novel.pth
--------------------------------------------------------------------------------
/incremental/novel_1/16/novel.txt:
--------------------------------------------------------------------------------
1 | ../data/cityscapes/leftImg8bit/train/zurich/zurich_000015_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/zurich/zurich_000015_000019_gtFine_labelIds.png
2 | ../data/cityscapes/leftImg8bit/train/zurich/zurich_000044_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/zurich/zurich_000044_000019_gtFine_labelIds.png
3 | ../data/cityscapes/leftImg8bit/train/zurich/zurich_000059_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/zurich/zurich_000059_000019_gtFine_labelIds.png
4 | ../data/cityscapes/leftImg8bit/train/zurich/zurich_000041_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/zurich/zurich_000041_000019_gtFine_labelIds.png
5 | ../data/cityscapes/leftImg8bit/train/erfurt/erfurt_000066_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/erfurt/erfurt_000066_000019_gtFine_labelIds.png
6 |
--------------------------------------------------------------------------------
/incremental/novel_1/17/novel.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/novel_1/17/novel.pth
--------------------------------------------------------------------------------
/incremental/novel_1/17/novel.txt:
--------------------------------------------------------------------------------
1 | ../data/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000182_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/stuttgart/stuttgart_000182_000019_gtFine_labelIds.png
2 | ../data/cityscapes/leftImg8bit/train/strasbourg/strasbourg_000000_029915_leftImg8bit.png ../data/cityscapes/gtFine/train/strasbourg/strasbourg_000000_029915_gtFine_labelIds.png
3 | ../data/cityscapes/leftImg8bit/train/zurich/zurich_000059_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/zurich/zurich_000059_000019_gtFine_labelIds.png
4 | ../data/cityscapes/leftImg8bit/train/strasbourg/strasbourg_000001_052430_leftImg8bit.png ../data/cityscapes/gtFine/train/strasbourg/strasbourg_000001_052430_gtFine_labelIds.png
5 | ../data/cityscapes/leftImg8bit/train/ulm/ulm_000010_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/ulm/ulm_000010_000019_gtFine_labelIds.png
6 |
--------------------------------------------------------------------------------
/incremental/novel_1/18/.ipynb_checkpoints/novel-checkpoint.txt:
--------------------------------------------------------------------------------
1 | ../data/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_070444_leftImg8bit.png ../data/cityscapes/gtFine/train/hamburg/hamburg_000000_070444_gtFine_labelIds.png
2 | ../data/cityscapes/leftImg8bit/train/bremen/bremen_000220_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/bremen/bremen_000220_000019_gtFine_labelIds.png
3 | ../data/cityscapes/leftImg8bit/train/hanover/hanover_000000_046200_leftImg8bit.png ../data/cityscapes/gtFine/train/hanover/hanover_000000_046200_gtFine_labelIds.png
4 | ../data/cityscapes/leftImg8bit/train/strasbourg/strasbourg_000000_034387_leftImg8bit.png ../data/cityscapes/gtFine/train/strasbourg/strasbourg_000000_034387_gtFine_labelIds.png
5 | ../data/cityscapes/leftImg8bit/train/cologne/cologne_000050_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/cologne/cologne_000050_000019_gtFine_labelIds.png
6 |
--------------------------------------------------------------------------------
/incremental/novel_1/18/novel.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/novel_1/18/novel.pth
--------------------------------------------------------------------------------
/incremental/novel_1/18/novel.txt:
--------------------------------------------------------------------------------
1 | ../data/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_070444_leftImg8bit.png ../data/cityscapes/gtFine/train/hamburg/hamburg_000000_070444_gtFine_labelIds.png
2 | ../data/cityscapes/leftImg8bit/train/bremen/bremen_000220_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/bremen/bremen_000220_000019_gtFine_labelIds.png
3 | ../data/cityscapes/leftImg8bit/train/hanover/hanover_000000_046200_leftImg8bit.png ../data/cityscapes/gtFine/train/hanover/hanover_000000_046200_gtFine_labelIds.png
4 | ../data/cityscapes/leftImg8bit/train/strasbourg/strasbourg_000000_034387_leftImg8bit.png ../data/cityscapes/gtFine/train/strasbourg/strasbourg_000000_034387_gtFine_labelIds.png
5 | ../data/cityscapes/leftImg8bit/train/cologne/cologne_000050_000019_leftImg8bit.png ../data/cityscapes/gtFine/train/cologne/cologne_000050_000019_gtFine_labelIds.png
6 |
--------------------------------------------------------------------------------
/incremental/readme.txt:
--------------------------------------------------------------------------------
1 | three sub-stages (5-shot 16+3 setting):
2 |
3 | sub-stage1: training close set module
4 | python -u main.py --output_dir ./output_stage1_16 --gpu_id 0,1
5 |
6 | sub-stage2: training meta channel module
7 | python -u main.py --finetune --ckpt ./output_stage1_16/final.pth --output_dir ./output_stage2_16/ --total_itrs 10000 --gpu_id 0,1
8 |
9 | sub-stage3: training region-aware metric learning module
10 | python -u main_metric.py --ckpt ./output_stage2_16/final.pth --output_dir ./output_stage3_16/ --novel_dir ./novel/
11 |
12 | inference:
13 |
14 | 16+3 5shots:
15 | python main_metric.py --ckpt ./output_stage3_16/final.pth --test_only --test_mode 16_3 --novel_dir ./novel
16 |
17 | 16+1 5shots:
18 | python main_metric.py --ckpt ./output_stage3_16/final.pth --test_only --test_mode 16_3 --novel_dir ./novel_1
19 |
20 | 16+1 5shots:
21 | python main_metric.py --ckpt ./output_stage3_16/final.pth --test_only --test_mode 16_1 --novel_dir ./novel
22 |
23 | 12+7 5shots:
24 | python main_metric.py --ckpt ./output_stage3_12/final.pth --test_only --test_mode 12 --novel_dir ./novel
25 |
26 |
--------------------------------------------------------------------------------
/incremental/utils/__init__.py:
--------------------------------------------------------------------------------
1 | from .utils import *
2 | # from .visualizer import Visualizer
3 | from .scheduler import PolyLR
4 | from .loss import FocalLoss, CrossEntropyLoss, CrossEntropyLoss_dis, CenterLoss
--------------------------------------------------------------------------------
/incremental/utils/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/utils/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/incremental/utils/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/utils/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/incremental/utils/__pycache__/__init__.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/utils/__pycache__/__init__.cpython-38.pyc
--------------------------------------------------------------------------------
/incremental/utils/__pycache__/ext_transforms.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/utils/__pycache__/ext_transforms.cpython-36.pyc
--------------------------------------------------------------------------------
/incremental/utils/__pycache__/ext_transforms.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/utils/__pycache__/ext_transforms.cpython-37.pyc
--------------------------------------------------------------------------------
/incremental/utils/__pycache__/ext_transforms.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/utils/__pycache__/ext_transforms.cpython-38.pyc
--------------------------------------------------------------------------------
/incremental/utils/__pycache__/loss.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/utils/__pycache__/loss.cpython-36.pyc
--------------------------------------------------------------------------------
/incremental/utils/__pycache__/loss.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/utils/__pycache__/loss.cpython-37.pyc
--------------------------------------------------------------------------------
/incremental/utils/__pycache__/loss.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/utils/__pycache__/loss.cpython-38.pyc
--------------------------------------------------------------------------------
/incremental/utils/__pycache__/scheduler.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/utils/__pycache__/scheduler.cpython-36.pyc
--------------------------------------------------------------------------------
/incremental/utils/__pycache__/scheduler.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/utils/__pycache__/scheduler.cpython-37.pyc
--------------------------------------------------------------------------------
/incremental/utils/__pycache__/scheduler.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/utils/__pycache__/scheduler.cpython-38.pyc
--------------------------------------------------------------------------------
/incremental/utils/__pycache__/utils.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/utils/__pycache__/utils.cpython-36.pyc
--------------------------------------------------------------------------------
/incremental/utils/__pycache__/utils.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/utils/__pycache__/utils.cpython-37.pyc
--------------------------------------------------------------------------------
/incremental/utils/__pycache__/utils.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/utils/__pycache__/utils.cpython-38.pyc
--------------------------------------------------------------------------------
/incremental/utils/__pycache__/visualizer.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/utils/__pycache__/visualizer.cpython-36.pyc
--------------------------------------------------------------------------------
/incremental/utils/__pycache__/visualizer.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/utils/__pycache__/visualizer.cpython-37.pyc
--------------------------------------------------------------------------------
/incremental/utils/__pycache__/visualizer.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/czifan/RAML/670be907b5266cb11fa0137e49d302f6c568339a/incremental/utils/__pycache__/visualizer.cpython-38.pyc
--------------------------------------------------------------------------------
/incremental/utils/ext_transforms.py:
--------------------------------------------------------------------------------
1 | import torchvision
2 | import torch
3 | import torchvision.transforms.functional as F
4 | import random
5 | import numbers
6 | import numpy as np
7 | from PIL import Image
8 |
9 |
10 | #
11 | # Extended Transforms for Semantic Segmentation
12 | #
13 | class ExtRandomHorizontalFlip(object):
14 | """Horizontally flip the given PIL Image randomly with a given probability.
15 |
16 | Args:
17 | p (float): probability of the image being flipped. Default value is 0.5
18 | """
19 |
20 | def __init__(self, p=0.5):
21 | self.p = p
22 |
23 | def __call__(self, img, lbl):
24 | """
25 | Args:
26 | img (PIL Image): Image to be flipped.
27 |
28 | Returns:
29 | PIL Image: Randomly flipped image.
30 | """
31 | if random.random() < self.p:
32 | return F.hflip(img), F.hflip(lbl)
33 | return img, lbl
34 |
35 | def __repr__(self):
36 | return self.__class__.__name__ + '(p={})'.format(self.p)
37 |
38 |
39 |
40 | class ExtCompose(object):
41 | """Composes several transforms together.
42 | Args:
43 | transforms (list of ``Transform`` objects): list of transforms to compose.
44 | Example:
45 | >>> transforms.Compose([
46 | >>> transforms.CenterCrop(10),
47 | >>> transforms.ToTensor(),
48 | >>> ])
49 | """
50 |
51 | def __init__(self, transforms):
52 | self.transforms = transforms
53 |
54 | def __call__(self, img, lbl):
55 | for t in self.transforms:
56 | img, lbl = t(img, lbl)
57 | return img, lbl
58 |
59 | def __repr__(self):
60 | format_string = self.__class__.__name__ + '('
61 | for t in self.transforms:
62 | format_string += '\n'
63 | format_string += ' {0}'.format(t)
64 | format_string += '\n)'
65 | return format_string
66 |
67 |
68 | class ExtCenterCrop(object):
69 | """Crops the given PIL Image at the center.
70 | Args:
71 | size (sequence or int): Desired output size of the crop. If size is an
72 | int instead of sequence like (h, w), a square crop (size, size) is
73 | made.
74 | """
75 |
76 | def __init__(self, size):
77 | if isinstance(size, numbers.Number):
78 | self.size = (int(size), int(size))
79 | else:
80 | self.size = size
81 |
82 | def __call__(self, img, lbl):
83 | """
84 | Args:
85 | img (PIL Image): Image to be cropped.
86 | Returns:
87 | PIL Image: Cropped image.
88 | """
89 | return F.center_crop(img, self.size), F.center_crop(lbl, self.size)
90 |
91 | def __repr__(self):
92 | return self.__class__.__name__ + '(size={0})'.format(self.size)
93 |
94 |
95 | class ExtRandomScale(object):
96 | def __init__(self, scale_range, interpolation=Image.BILINEAR):
97 | self.scale_range = scale_range
98 | self.interpolation = interpolation
99 |
100 | def __call__(self, img, lbl):
101 | """
102 | Args:
103 | img (PIL Image): Image to be scaled.
104 | lbl (PIL Image): Label to be scaled.
105 | Returns:
106 | PIL Image: Rescaled image.
107 | PIL Image: Rescaled label.
108 | """
109 | assert img.size == lbl.size
110 | scale = random.uniform(self.scale_range[0], self.scale_range[1])
111 | target_size = ( int(img.size[1]*scale), int(img.size[0]*scale) )
112 | return F.resize(img, target_size, self.interpolation), F.resize(lbl, target_size, Image.NEAREST)
113 |
114 | def __repr__(self):
115 | interpolate_str = _pil_interpolation_to_str[self.interpolation]
116 | return self.__class__.__name__ + '(size={0}, interpolation={1})'.format(self.size, interpolate_str)
117 |
118 | class ExtScale(object):
119 | """Resize the input PIL Image to the given scale.
120 | Args:
121 | Scale (sequence or int): scale factors
122 | interpolation (int, optional): Desired interpolation. Default is
123 | ``PIL.Image.BILINEAR``
124 | """
125 |
126 | def __init__(self, scale, interpolation=Image.BILINEAR):
127 | self.scale = scale
128 | self.interpolation = interpolation
129 |
130 | def __call__(self, img, lbl):
131 | """
132 | Args:
133 | img (PIL Image): Image to be scaled.
134 | lbl (PIL Image): Label to be scaled.
135 | Returns:
136 | PIL Image: Rescaled image.
137 | PIL Image: Rescaled label.
138 | """
139 | assert img.size == lbl.size
140 | target_size = ( int(img.size[1]*self.scale), int(img.size[0]*self.scale) ) # (H, W)
141 | return F.resize(img, target_size, self.interpolation), F.resize(lbl, target_size, Image.NEAREST)
142 |
143 | def __repr__(self):
144 | interpolate_str = _pil_interpolation_to_str[self.interpolation]
145 | return self.__class__.__name__ + '(size={0}, interpolation={1})'.format(self.size, interpolate_str)
146 |
147 |
148 | class ExtRandomRotation(object):
149 | """Rotate the image by angle.
150 | Args:
151 | degrees (sequence or float or int): Range of degrees to select from.
152 | If degrees is a number instead of sequence like (min, max), the range of degrees
153 | will be (-degrees, +degrees).
154 | resample ({PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC}, optional):
155 | An optional resampling filter.
156 | See http://pillow.readthedocs.io/en/3.4.x/handbook/concepts.html#filters
157 | If omitted, or if the image has mode "1" or "P", it is set to PIL.Image.NEAREST.
158 | expand (bool, optional): Optional expansion flag.
159 | If true, expands the output to make it large enough to hold the entire rotated image.
160 | If false or omitted, make the output image the same size as the input image.
161 | Note that the expand flag assumes rotation around the center and no translation.
162 | center (2-tuple, optional): Optional center of rotation.
163 | Origin is the upper left corner.
164 | Default is the center of the image.
165 | """
166 |
167 | def __init__(self, degrees, resample=False, expand=False, center=None):
168 | if isinstance(degrees, numbers.Number):
169 | if degrees < 0:
170 | raise ValueError("If degrees is a single number, it must be positive.")
171 | self.degrees = (-degrees, degrees)
172 | else:
173 | if len(degrees) != 2:
174 | raise ValueError("If degrees is a sequence, it must be of len 2.")
175 | self.degrees = degrees
176 |
177 | self.resample = resample
178 | self.expand = expand
179 | self.center = center
180 |
181 | @staticmethod
182 | def get_params(degrees):
183 | """Get parameters for ``rotate`` for a random rotation.
184 | Returns:
185 | sequence: params to be passed to ``rotate`` for random rotation.
186 | """
187 | angle = random.uniform(degrees[0], degrees[1])
188 |
189 | return angle
190 |
191 | def __call__(self, img, lbl):
192 | """
193 | img (PIL Image): Image to be rotated.
194 | lbl (PIL Image): Label to be rotated.
195 | Returns:
196 | PIL Image: Rotated image.
197 | PIL Image: Rotated label.
198 | """
199 |
200 | angle = self.get_params(self.degrees)
201 |
202 | return F.rotate(img, angle, self.resample, self.expand, self.center), F.rotate(lbl, angle, self.resample, self.expand, self.center)
203 |
204 | def __repr__(self):
205 | format_string = self.__class__.__name__ + '(degrees={0}'.format(self.degrees)
206 | format_string += ', resample={0}'.format(self.resample)
207 | format_string += ', expand={0}'.format(self.expand)
208 | if self.center is not None:
209 | format_string += ', center={0}'.format(self.center)
210 | format_string += ')'
211 | return format_string
212 |
213 | class ExtRandomHorizontalFlip(object):
214 | """Horizontally flip the given PIL Image randomly with a given probability.
215 | Args:
216 | p (float): probability of the image being flipped. Default value is 0.5
217 | """
218 |
219 | def __init__(self, p=0.5):
220 | self.p = p
221 |
222 | def __call__(self, img, lbl):
223 | """
224 | Args:
225 | img (PIL Image): Image to be flipped.
226 | Returns:
227 | PIL Image: Randomly flipped image.
228 | """
229 | if random.random() < self.p:
230 | return F.hflip(img), F.hflip(lbl)
231 | return img, lbl
232 |
233 | def __repr__(self):
234 | return self.__class__.__name__ + '(p={})'.format(self.p)
235 |
236 |
237 | class ExtRandomVerticalFlip(object):
238 | """Vertically flip the given PIL Image randomly with a given probability.
239 | Args:
240 | p (float): probability of the image being flipped. Default value is 0.5
241 | """
242 |
243 | def __init__(self, p=0.5):
244 | self.p = p
245 |
246 | def __call__(self, img, lbl):
247 | """
248 | Args:
249 | img (PIL Image): Image to be flipped.
250 | lbl (PIL Image): Label to be flipped.
251 | Returns:
252 | PIL Image: Randomly flipped image.
253 | PIL Image: Randomly flipped label.
254 | """
255 | if random.random() < self.p:
256 | return F.vflip(img), F.vflip(lbl)
257 | return img, lbl
258 |
259 | def __repr__(self):
260 | return self.__class__.__name__ + '(p={})'.format(self.p)
261 |
262 | class ExtPad(object):
263 | def __init__(self, diviser=32):
264 | self.diviser = diviser
265 |
266 | def __call__(self, img, lbl):
267 | h, w = img.size
268 | ph = (h//32+1)*32 - h if h%32!=0 else 0
269 | pw = (w//32+1)*32 - w if w%32!=0 else 0
270 | im = F.pad(img, ( pw//2, pw-pw//2, ph//2, ph-ph//2) )
271 | lbl = F.pad(lbl, ( pw//2, pw-pw//2, ph//2, ph-ph//2))
272 | return im, lbl
273 |
274 | class ExtToTensor(object):
275 | """Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.
276 | Converts a PIL Image or numpy.ndarray (H x W x C) in the range
277 | [0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0].
278 | """
279 | def __init__(self, normalize=True, target_type='uint8'):
280 | self.normalize = normalize
281 | self.target_type = target_type
282 | def __call__(self, pic, lbl):
283 | """
284 | Note that labels will not be normalized to [0, 1].
285 | Args:
286 | pic (PIL Image or numpy.ndarray): Image to be converted to tensor.
287 | lbl (PIL Image or numpy.ndarray): Label to be converted to tensor.
288 | Returns:
289 | Tensor: Converted image and label
290 | """
291 | if self.normalize:
292 | return F.to_tensor(pic), torch.from_numpy( np.array( lbl, dtype=self.target_type) )
293 | else:
294 | return torch.from_numpy( np.array( pic, dtype=np.float32).transpose(2, 0, 1) ), torch.from_numpy( np.array( lbl, dtype=self.target_type) )
295 |
296 | def __repr__(self):
297 | return self.__class__.__name__ + '()'
298 |
299 | class ExtNormalize(object):
300 | """Normalize a tensor image with mean and standard deviation.
301 | Given mean: ``(M1,...,Mn)`` and std: ``(S1,..,Sn)`` for ``n`` channels, this transform
302 | will normalize each channel of the input ``torch.*Tensor`` i.e.
303 | ``input[channel] = (input[channel] - mean[channel]) / std[channel]``
304 | Args:
305 | mean (sequence): Sequence of means for each channel.
306 | std (sequence): Sequence of standard deviations for each channel.
307 | """
308 |
309 | def __init__(self, mean, std):
310 | self.mean = mean
311 | self.std = std
312 |
313 | def __call__(self, tensor, lbl):
314 | """
315 | Args:
316 | tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
317 | tensor (Tensor): Tensor of label. A dummy input for ExtCompose
318 | Returns:
319 | Tensor: Normalized Tensor image.
320 | Tensor: Unchanged Tensor label
321 | """
322 | return F.normalize(tensor, self.mean, self.std), lbl
323 |
324 | def __repr__(self):
325 | return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)
326 |
327 |
328 | class ExtRandomCrop(object):
329 | """Crop the given PIL Image at a random location.
330 | Args:
331 | size (sequence or int): Desired output size of the crop. If size is an
332 | int instead of sequence like (h, w), a square crop (size, size) is
333 | made.
334 | padding (int or sequence, optional): Optional padding on each border
335 | of the image. Default is 0, i.e no padding. If a sequence of length
336 | 4 is provided, it is used to pad left, top, right, bottom borders
337 | respectively.
338 | pad_if_needed (boolean): It will pad the image if smaller than the
339 | desired size to avoid raising an exception.
340 | """
341 |
342 | def __init__(self, size, padding=0, pad_if_needed=False):
343 | if isinstance(size, numbers.Number):
344 | self.size = (int(size), int(size))
345 | else:
346 | self.size = size
347 | self.padding = padding
348 | self.pad_if_needed = pad_if_needed
349 |
350 | @staticmethod
351 | def get_params(img, output_size):
352 | """Get parameters for ``crop`` for a random crop.
353 | Args:
354 | img (PIL Image): Image to be cropped.
355 | output_size (tuple): Expected output size of the crop.
356 | Returns:
357 | tuple: params (i, j, h, w) to be passed to ``crop`` for random crop.
358 | """
359 | w, h = img.size
360 | th, tw = output_size
361 | if w == tw and h == th:
362 | return 0, 0, h, w
363 |
364 | i = random.randint(0, h - th)
365 | j = random.randint(0, w - tw)
366 | return i, j, th, tw
367 |
368 | def __call__(self, img, lbl):
369 | """
370 | Args:
371 | img (PIL Image): Image to be cropped.
372 | lbl (PIL Image): Label to be cropped.
373 | Returns:
374 | PIL Image: Cropped image.
375 | PIL Image: Cropped label.
376 | """
377 | assert img.size == lbl.size, 'size of img and lbl should be the same. %s, %s'%(img.size, lbl.size)
378 | if self.padding > 0:
379 | img = F.pad(img, self.padding)
380 | lbl = F.pad(lbl, self.padding)
381 |
382 | # pad the width if needed
383 | if self.pad_if_needed and img.size[0] < self.size[1]:
384 | img = F.pad(img, padding=int((1 + self.size[1] - img.size[0]) / 2))
385 | lbl = F.pad(lbl, padding=int((1 + self.size[1] - lbl.size[0]) / 2))
386 |
387 | # pad the height if needed
388 | if self.pad_if_needed and img.size[1] < self.size[0]:
389 | img = F.pad(img, padding=int((1 + self.size[0] - img.size[1]) / 2))
390 | lbl = F.pad(lbl, padding=int((1 + self.size[0] - lbl.size[1]) / 2))
391 |
392 | i, j, h, w = self.get_params(img, self.size)
393 |
394 | return F.crop(img, i, j, h, w), F.crop(lbl, i, j, h, w)
395 |
396 | def __repr__(self):
397 | return self.__class__.__name__ + '(size={0}, padding={1})'.format(self.size, self.padding)
398 |
399 |
400 | class ExtResize(object):
401 | """Resize the input PIL Image to the given size.
402 | Args:
403 | size (sequence or int): Desired output size. If size is a sequence like
404 | (h, w), output size will be matched to this. If size is an int,
405 | smaller edge of the image will be matched to this number.
406 | i.e, if height > width, then image will be rescaled to
407 | (size * height / width, size)
408 | interpolation (int, optional): Desired interpolation. Default is
409 | ``PIL.Image.BILINEAR``
410 | """
411 |
412 | def __init__(self, size, interpolation=Image.BILINEAR):
413 | assert isinstance(size, int) or (isinstance(size, collections.Iterable) and len(size) == 2)
414 | self.size = size
415 | self.interpolation = interpolation
416 |
417 | def __call__(self, img, lbl):
418 | """
419 | Args:
420 | img (PIL Image): Image to be scaled.
421 | Returns:
422 | PIL Image: Rescaled image.
423 | """
424 | return F.resize(img, self.size, self.interpolation), F.resize(lbl, self.size, Image.NEAREST)
425 |
426 | def __repr__(self):
427 | interpolate_str = _pil_interpolation_to_str[self.interpolation]
428 | return self.__class__.__name__ + '(size={0}, interpolation={1})'.format(self.size, interpolate_str)
429 |
430 | class ExtColorJitter(object):
431 | """Randomly change the brightness, contrast and saturation of an image.
432 |
433 | Args:
434 | brightness (float or tuple of float (min, max)): How much to jitter brightness.
435 | brightness_factor is chosen uniformly from [max(0, 1 - brightness), 1 + brightness]
436 | or the given [min, max]. Should be non negative numbers.
437 | contrast (float or tuple of float (min, max)): How much to jitter contrast.
438 | contrast_factor is chosen uniformly from [max(0, 1 - contrast), 1 + contrast]
439 | or the given [min, max]. Should be non negative numbers.
440 | saturation (float or tuple of float (min, max)): How much to jitter saturation.
441 | saturation_factor is chosen uniformly from [max(0, 1 - saturation), 1 + saturation]
442 | or the given [min, max]. Should be non negative numbers.
443 | hue (float or tuple of float (min, max)): How much to jitter hue.
444 | hue_factor is chosen uniformly from [-hue, hue] or the given [min, max].
445 | Should have 0<= hue <= 0.5 or -0.5 <= min <= max <= 0.5.
446 | """
447 | def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
448 | self.brightness = self._check_input(brightness, 'brightness')
449 | self.contrast = self._check_input(contrast, 'contrast')
450 | self.saturation = self._check_input(saturation, 'saturation')
451 | self.hue = self._check_input(hue, 'hue', center=0, bound=(-0.5, 0.5),
452 | clip_first_on_zero=False)
453 |
454 | def _check_input(self, value, name, center=1, bound=(0, float('inf')), clip_first_on_zero=True):
455 | if isinstance(value, numbers.Number):
456 | if value < 0:
457 | raise ValueError("If {} is a single number, it must be non negative.".format(name))
458 | value = [center - value, center + value]
459 | if clip_first_on_zero:
460 | value[0] = max(value[0], 0)
461 | elif isinstance(value, (tuple, list)) and len(value) == 2:
462 | if not bound[0] <= value[0] <= value[1] <= bound[1]:
463 | raise ValueError("{} values should be between {}".format(name, bound))
464 | else:
465 | raise TypeError("{} should be a single number or a list/tuple with lenght 2.".format(name))
466 |
467 | # if value is 0 or (1., 1.) for brightness/contrast/saturation
468 | # or (0., 0.) for hue, do nothing
469 | if value[0] == value[1] == center:
470 | value = None
471 | return value
472 |
473 | @staticmethod
474 | def get_params(brightness, contrast, saturation, hue):
475 | """Get a randomized transform to be applied on image.
476 |
477 | Arguments are same as that of __init__.
478 |
479 | Returns:
480 | Transform which randomly adjusts brightness, contrast and
481 | saturation in a random order.
482 | """
483 | transforms = []
484 |
485 | if brightness is not None:
486 | brightness_factor = random.uniform(brightness[0], brightness[1])
487 | transforms.append(Lambda(lambda img: F.adjust_brightness(img, brightness_factor)))
488 |
489 | if contrast is not None:
490 | contrast_factor = random.uniform(contrast[0], contrast[1])
491 | transforms.append(Lambda(lambda img: F.adjust_contrast(img, contrast_factor)))
492 |
493 | if saturation is not None:
494 | saturation_factor = random.uniform(saturation[0], saturation[1])
495 | transforms.append(Lambda(lambda img: F.adjust_saturation(img, saturation_factor)))
496 |
497 | if hue is not None:
498 | hue_factor = random.uniform(hue[0], hue[1])
499 | transforms.append(Lambda(lambda img: F.adjust_hue(img, hue_factor)))
500 |
501 | random.shuffle(transforms)
502 | transform = Compose(transforms)
503 |
504 | return transform
505 |
506 | def __call__(self, img, lbl):
507 | """
508 | Args:
509 | img (PIL Image): Input image.
510 |
511 | Returns:
512 | PIL Image: Color jittered image.
513 | """
514 | transform = self.get_params(self.brightness, self.contrast,
515 | self.saturation, self.hue)
516 | return transform(img), lbl
517 |
518 | def __repr__(self):
519 | format_string = self.__class__.__name__ + '('
520 | format_string += 'brightness={0}'.format(self.brightness)
521 | format_string += ', contrast={0}'.format(self.contrast)
522 | format_string += ', saturation={0}'.format(self.saturation)
523 | format_string += ', hue={0})'.format(self.hue)
524 | return format_string
525 |
526 | class Lambda(object):
527 | """Apply a user-defined lambda as a transform.
528 |
529 | Args:
530 | lambd (function): Lambda/function to be used for transform.
531 | """
532 |
533 | def __init__(self, lambd):
534 | assert callable(lambd), repr(type(lambd).__name__) + " object is not callable"
535 | self.lambd = lambd
536 |
537 | def __call__(self, img):
538 | return self.lambd(img)
539 |
540 | def __repr__(self):
541 | return self.__class__.__name__ + '()'
542 |
543 |
544 | class Compose(object):
545 | """Composes several transforms together.
546 |
547 | Args:
548 | transforms (list of ``Transform`` objects): list of transforms to compose.
549 |
550 | Example:
551 | >>> transforms.Compose([
552 | >>> transforms.CenterCrop(10),
553 | >>> transforms.ToTensor(),
554 | >>> ])
555 | """
556 |
557 | def __init__(self, transforms):
558 | self.transforms = transforms
559 |
560 | def __call__(self, img):
561 | for t in self.transforms:
562 | img = t(img)
563 | return img
564 |
565 | def __repr__(self):
566 | format_string = self.__class__.__name__ + '('
567 | for t in self.transforms:
568 | format_string += '\n'
569 | format_string += ' {0}'.format(t)
570 | format_string += '\n)'
571 | return format_string
--------------------------------------------------------------------------------
/incremental/utils/loss.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 | import torch.nn.functional as F
3 | import torch
4 | import numpy as np
5 | from torch.autograd import Variable
6 |
7 | class FocalLoss(nn.Module):
8 | def __init__(self, alpha=1, gamma=0, size_average=True, ignore_index=255):
9 | super(FocalLoss, self).__init__()
10 | self.alpha = alpha
11 | self.gamma = gamma
12 | self.ignore_index = ignore_index
13 | self.size_average = size_average
14 |
15 | def forward(self, inputs, targets):
16 | ce_loss = F.cross_entropy(
17 | inputs, targets, reduction='none', ignore_index=self.ignore_index)
18 | pt = torch.exp(-ce_loss)
19 | focal_loss = self.alpha * (1-pt)**self.gamma * ce_loss
20 | if self.size_average:
21 | return focal_loss.mean()
22 | else:
23 | return focal_loss.sum()
24 |
25 | class CrossEntropyLoss(nn.Module):
26 | def __init__(self, alpha=0, beta=0, gamma=0, size_average=True, ignore_index=255):
27 | super(CrossEntropyLoss, self).__init__()
28 | self.alpha = alpha
29 | self.beta = beta
30 | self.gamma = gamma
31 | self.ignore_index = ignore_index
32 | self.size_average = size_average
33 | self.criterion = nn.CrossEntropyLoss(ignore_index=self.ignore_index,size_average=self.size_average)
34 | if self.cuda:
35 | self.criterion = self.criterion.cuda()
36 |
37 | def forward(self, logit, target, features_in):
38 | n, c, h, w = logit.size()
39 |
40 | CE_loss = self.criterion(logit, target.long())
41 | return CE_loss / n
42 | VAR_loss = Variable(torch.Tensor([0])).cuda()
43 | Inter_loss = Variable(torch.Tensor([0])).cuda()
44 | Center_loss = Variable(torch.Tensor([0])).cuda()
45 | for i in range(n):
46 | label = target[i]
47 | label = label.flatten().cpu().numpy()
48 | features = logit[i]
49 | features = features.permute(1, 2, 0).contiguous()
50 | shape = features.size()
51 | features = features.view(shape[0]*shape[1], shape[2])
52 | features_in_temp = features_in[i]
53 |
54 | instances, counts = np.unique(label, False, False, True)
55 | # print('counts', counts)
56 | total_size = int(np.sum(counts))
57 | for instance in instances:
58 |
59 | if instance == self.ignore_index: # Ignore background
60 | continue
61 |
62 | locations = torch.LongTensor(np.where(label == instance)[0]).cuda()
63 | vectors = torch.index_select(features, dim=0, index=locations)
64 | features_temp = torch.index_select(features_in_temp, dim=0, index=locations)
65 | centers_temp = torch.mean(features_temp, dim=0)
66 | features_temp = features_temp - centers_temp
67 | Center_loss += torch.sum(features_temp ** 2) / total_size
68 | # print(size)
69 | # print(-vectors[:,int(instance)])
70 | # get instance mean and distances to mean of all points in an instance
71 | VAR_loss += torch.sum((-vectors[:,int(instance)]))/total_size
72 | Inter_loss += (torch.sum(vectors) - torch.sum((vectors[:,int(instance)]))) / total_size
73 |
74 | # total_size += size
75 |
76 | # VAR_loss += var_loss/total_size
77 |
78 | loss = (CE_loss + self.alpha * VAR_loss + self.beta * Inter_loss +self.gamma * Center_loss) / n
79 | # print(CE_loss/n, self.alpha * VAR_loss/n, self.beta * Inter_loss/n, self.gamma * Center_loss/n)
80 |
81 | return loss
82 |
83 | class CrossEntropyLoss_dis(nn.Module):
84 | def __init__(self, alpha=0, beta=0, gamma=0, size_average=True, ignore_index=255):
85 | super(CrossEntropyLoss_dis, self).__init__()
86 | self.alpha = alpha
87 | self.beta = beta
88 | self.gamma = gamma
89 | self.ignore_index = ignore_index
90 | self.size_average = size_average
91 |
92 | def forward(self, logit, target, features_1, features_2):
93 | n, c, h, w = logit.size()
94 | criterion = nn.CrossEntropyLoss(ignore_index=self.ignore_index,size_average=self.size_average)
95 |
96 | if self.cuda:
97 | criterion = criterion.cuda()
98 |
99 | CE_loss = criterion(logit, target.long())
100 |
101 | return CE_loss / n
102 |
103 | DIS_loss = Variable(torch.Tensor([0])).cuda()
104 |
105 | appendix_lay = torch.zeros(n,w,h,1).cuda()
106 | features_1 = torch.cat((features_1, appendix_lay), dim=3)
107 | # print('features_1.shape: ', features_1.shape)
108 | # print('features_2.shape: ', features_2.shape)
109 |
110 | for i in range(n):
111 | features_origin = features_1[i][target[i] != 16]
112 | features_new = features_2[i][target[i] != 16]
113 | features_diff = features_new - features_origin
114 | DIS_loss += torch.sum(features_diff ** 2) / (features_diff.shape[0])
115 |
116 | loss = CE_loss / n + 0.01 * DIS_loss / n
117 | # print(CE_loss, DIS_loss)
118 |
119 |
120 |
121 | return loss
122 |
123 | # class CenterLoss(nn.Module):
124 | # """Center loss.
125 |
126 | # Reference:
127 | # Wen et al. A Discriminative Feature Learning Approach for Deep Face Recognition. ECCV 2016.
128 |
129 | # Args:
130 | # num_classes (int): number of classes.
131 | # feat_dim (int): feature dimension.
132 | # """
133 | # def __init__(self, num_classes=10, feat_dim=256, use_gpu=True):
134 | # super(CenterLoss, self).__init__()
135 | # self.num_classes = num_classes
136 | # self.feat_dim = feat_dim
137 | # self.use_gpu = use_gpu
138 |
139 | # if self.use_gpu:
140 | # self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim).cuda()) # (C, M)
141 | # else:
142 | # self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim))
143 |
144 | # def forward(self, x, labels):
145 | # """
146 | # Args:
147 | # x: feature matrix with shape (batch_size, feat_dim, h, w).
148 | # labels: ground truth labels with shape (batch_size, h, w).
149 | # """
150 | # batch_size = x.size(0)
151 | # x = x.permute(0, 2, 3, 1) # (B, H, W, M)
152 |
153 | # x = x.reshape((-1,self.feat_dim)) # (N, M)
154 | # sample_size= x.size(0) # N
155 | # labels = labels.flatten() # (N,)
156 | # assert sample_size == labels.size(0)
157 | # # (N, M) --> (N, 1) --> (N, C) | (C, M) --> (C, 1) --> (C, N) --> (N, C)
158 | # # (N, C)
159 | # distmat = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(sample_size, self.num_classes) + \
160 | # torch.pow(self.centers, 2).sum(dim=1, keepdim=True).expand(self.num_classes, sample_size).t()
161 | # # distmat - 2 (x * center.T)
162 | # distmat.addmm_(1, -2, x, self.centers.t())
163 |
164 | # classes = torch.arange(self.num_classes).long()
165 | # if self.use_gpu: classes = classes.cuda()
166 | # labels = labels.unsqueeze(1).expand(sample_size, self.num_classes)
167 | # mask = labels.eq(classes.expand(sample_size, self.num_classes))
168 |
169 | # dist = distmat * mask.float()
170 | # loss = dist.clamp(min=1e-12, max=1e+12).sum() / sample_size
171 |
172 | # return loss / batch_size
173 |
174 | class CenterLoss(nn.Module):
175 | """Center loss.
176 |
177 | Reference:
178 | Wen et al. A Discriminative Feature Learning Approach for Deep Face Recognition. ECCV 2016.
179 |
180 | Args:
181 | num_classes (int): number of classes.
182 | feat_dim (int): feature dimension.
183 | """
184 | def __init__(self, num_classes=10, feat_dim=256, use_gpu=True):
185 | super(CenterLoss, self).__init__()
186 | self.num_classes = num_classes
187 | self.feat_dim = feat_dim
188 | self.use_gpu = use_gpu
189 |
190 | if self.use_gpu:
191 | self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim).cuda()) # (C, M)
192 | self.criterion = nn.CrossEntropyLoss().cuda()
193 | else:
194 | self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim))
195 | self.criterion = nn.CrossEntropyLoss()
196 |
197 | def _dis_criterion(self, x, labels):
198 | # x: (B, M, H, W) | labels: (B, H, W)
199 | _, _, H, W = x.shape
200 | assert H == W
201 | x = torch.nn.functional.interpolate(x, size=[H//2, W//2])
202 | labels = torch.nn.functional.interpolate(labels.unsqueeze(dim=1).float(), size=[H//2, W//2], mode="nearest")
203 | logit = [-torch.sum((x.unsqueeze(dim=1) - self.centers.clone()[c:c+1, :].detach().view(1, 1, self.centers.shape[1], 1, 1)) ** 2, dim=2) for c in range(self.num_classes)]
204 | logit = torch.cat(logit, dim=1)
205 | logit = logit.permute(0, 2, 3, 1).contiguous().view(-1, self.num_classes)
206 | label = labels.contiguous().view(-1)
207 | #logit = -torch.sum((x.unsqueeze(dim=1) - self.centers.clone().detach().view(1, *self.centers.shape, 1, 1)) ** 2, dim=2)
208 | loss = self.criterion(logit[label != 255], label[label != 255].long())
209 | return loss
210 |
211 | def forward(self, x, labels):
212 | """
213 | Args:
214 | x: feature matrix with shape (batch_size, feat_dim, h, w).
215 | labels: ground truth labels with shape (batch_size, h, w).
216 | """
217 | # feature = x.clone()
218 | # feature_label = labels.clone()
219 |
220 | batch_size = x.size(0)
221 | x = x.permute(0, 2, 3, 1) # (B, H, W, M)
222 |
223 | x = x.reshape((-1,self.feat_dim)) # (N, M)
224 | sample_size= x.size(0) # N
225 | labels = labels.flatten() # (N,)
226 | assert sample_size == labels.size(0)
227 | # (N, M) --> (N, 1) --> (N, C) | (C, M) --> (C, 1) --> (C, N) --> (N, C)
228 | # (N, C)
229 | distmat = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(sample_size, self.num_classes) + \
230 | torch.pow(self.centers, 2).sum(dim=1, keepdim=True).expand(self.num_classes, sample_size).t()
231 | # distmat - 2 (x * center.T)
232 | distmat.addmm_(1, -2, x, self.centers.t())
233 |
234 | classes = torch.arange(self.num_classes).long()
235 | if self.use_gpu: classes = classes.cuda()
236 | labels = labels.unsqueeze(1).expand(sample_size, self.num_classes)
237 | mask = labels.eq(classes.expand(sample_size, self.num_classes))
238 |
239 | dist = distmat * mask.float()
240 | loss = dist.clamp(min=1e-12, max=1e+12).sum() / sample_size
241 |
242 | #norm_loss = torch.exp(-torch.norm(self.centers.unsqueeze(dim=0)-self.centers.unsqueeze(dim=1), p=2, dim=-1))
243 |
244 | #dis_loss = self._dis_criterion(feature, feature_label)
245 |
246 | return loss / batch_size #+ norm_loss / batch_size
247 |
248 | if __name__ =='__main__':
249 | center_loss=CenterLoss()
250 | print(center_loss.centers.data.shape)
251 | center=center_loss.centers.data
252 | torch.save(center,'center.pth')
253 | #torch.save('./center.pth',center_loss.state_dict())
--------------------------------------------------------------------------------
/incremental/utils/scheduler.py:
--------------------------------------------------------------------------------
1 | from torch.optim.lr_scheduler import _LRScheduler, StepLR
2 |
3 | class PolyLR(_LRScheduler):
4 | def __init__(self, optimizer, max_iters, power=0.9, last_epoch=-1, min_lr=1e-6):
5 | self.power = power
6 | self.max_iters = max_iters # avoid zero lr
7 | self.min_lr = min_lr
8 | super(PolyLR, self).__init__(optimizer, last_epoch)
9 |
10 | def get_lr(self):
11 | return [ max( base_lr * ( 1 - self.last_epoch/self.max_iters )**self.power, self.min_lr)
12 | for base_lr in self.base_lrs]
--------------------------------------------------------------------------------
/incremental/utils/utils.py:
--------------------------------------------------------------------------------
1 | from torchvision.transforms.functional import normalize
2 | import torch.nn as nn
3 | import numpy as np
4 | import os
5 |
6 | def denormalize(tensor, mean, std):
7 | mean = np.array(mean)
8 | std = np.array(std)
9 |
10 | _mean = -mean/std
11 | _std = 1/std
12 | return normalize(tensor, _mean, _std)
13 |
14 | class Denormalize(object):
15 | def __init__(self, mean, std):
16 | mean = np.array(mean)
17 | std = np.array(std)
18 | self._mean = -mean/std
19 | self._std = 1/std
20 |
21 | def __call__(self, tensor):
22 | if isinstance(tensor, np.ndarray):
23 | return (tensor - self._mean.reshape(-1,1,1)) / self._std.reshape(-1,1,1)
24 | return normalize(tensor, self._mean, self._std)
25 |
26 | def set_bn_momentum(model, momentum=0.1):
27 | for m in model.modules():
28 | if isinstance(m, nn.BatchNorm2d):
29 | m.momentum = momentum
30 |
31 | def fix_bn(model):
32 | for m in model.modules():
33 | if isinstance(m, nn.BatchNorm2d):
34 | m.eval()
35 |
36 | def mkdir(path):
37 | if not os.path.exists(path):
38 | os.mkdir(path)
39 |
40 | def colorEncode(labelmap, colors, mode='RGB'):
41 | labelmap = labelmap.astype('int')
42 | labelmap_rgb = np.zeros((labelmap.shape[0], labelmap.shape[1], 3),
43 | dtype=np.uint8)
44 | for label in unique(labelmap):
45 | if label < 0:
46 | continue
47 | labelmap_rgb += (labelmap == label)[:, :, np.newaxis] * \
48 | np.tile(colors[label],
49 | (labelmap.shape[0], labelmap.shape[1], 1))
50 |
51 | if mode == 'BGR':
52 | return labelmap_rgb[:, :, ::-1]
53 | else:
54 | return labelmap_rgb
55 |
56 | def unique(ar, return_index=False, return_inverse=False, return_counts=False):
57 | ar = np.asanyarray(ar).flatten()
58 |
59 | optional_indices = return_index or return_inverse
60 | optional_returns = optional_indices or return_counts
61 |
62 | if ar.size == 0:
63 | if not optional_returns:
64 | ret = ar
65 | else:
66 | ret = (ar,)
67 | if return_index:
68 | ret += (np.empty(0, np.bool),)
69 | if return_inverse:
70 | ret += (np.empty(0, np.bool),)
71 | if return_counts:
72 | ret += (np.empty(0, np.intp),)
73 | return ret
74 | if optional_indices:
75 | perm = ar.argsort(kind='mergesort' if return_index else 'quicksort')
76 | aux = ar[perm]
77 | else:
78 | ar.sort()
79 | aux = ar
80 | flag = np.concatenate(([True], aux[1:] != aux[:-1]))
81 |
82 | if not optional_returns:
83 | ret = aux[flag]
84 | else:
85 | ret = (aux[flag],)
86 | if return_index:
87 | ret += (perm[flag],)
88 | if return_inverse:
89 | iflag = np.cumsum(flag) - 1
90 | inv_idx = np.empty(ar.shape, dtype=np.intp)
91 | inv_idx[perm] = iflag
92 | ret += (inv_idx,)
93 | if return_counts:
94 | idx = np.concatenate(np.nonzero(flag) + ([ar.size],))
95 | ret += (np.diff(idx),)
96 | return ret
97 |
--------------------------------------------------------------------------------
/incremental/utils/visualizer.py:
--------------------------------------------------------------------------------
1 | from visdom import Visdom
2 | import json
3 |
4 | class Visualizer(object):
5 | """ Visualizer
6 | """
7 | def __init__(self, port='13579', env='main', id=None):
8 | self.cur_win = {}
9 | self.vis = Visdom(port=port, env=env)
10 | self.id = id
11 | self.env = env
12 | # Restore
13 | ori_win = self.vis.get_window_data()
14 | ori_win = json.loads(ori_win)
15 | #print(ori_win)
16 | self.cur_win = { v['title']: k for k, v in ori_win.items() }
17 |
18 | def vis_scalar(self, name, x, y, opts=None):
19 | if not isinstance(x, list):
20 | x = [x]
21 | if not isinstance(y, list):
22 | y = [y]
23 |
24 | if self.id is not None:
25 | name = "[%s]"%self.id + name
26 | default_opts = { 'title': name }
27 | if opts is not None:
28 | default_opts.update(opts)
29 |
30 | win = self.cur_win.get(name, None)
31 | if win is not None:
32 | self.vis.line( X=x, Y=y, opts=default_opts, update='append',win=win )
33 | else:
34 | self.cur_win[name] = self.vis.line( X=x, Y=y, opts=default_opts)
35 |
36 | def vis_image(self, name, img, env=None, opts=None):
37 | """ vis image in visdom
38 | """
39 | if env is None:
40 | env = self.env
41 | if self.id is not None:
42 | name = "[%s]"%self.id + name
43 | win = self.cur_win.get(name, None)
44 | default_opts = { 'title': name }
45 | if opts is not None:
46 | default_opts.update(opts)
47 | if win is not None:
48 | self.vis.image( img=img, win=win, opts=opts, env=env )
49 | else:
50 | self.cur_win[name] = self.vis.image( img=img, opts=default_opts, env=env )
51 |
52 | def vis_table(self, name, tbl, opts=None):
53 | win = self.cur_win.get(name, None)
54 |
55 | tbl_str = " "
56 | tbl_str+=" \
57 | Term | \
58 | Value | \
59 |
"
60 | for k, v in tbl.items():
61 | tbl_str+= " \
62 | %s | \
63 | %s | \
64 |
"%(k, v)
65 |
66 | tbl_str+="
"
67 |
68 | default_opts = { 'title': name }
69 | if opts is not None:
70 | default_opts.update(opts)
71 | if win is not None:
72 | self.vis.text(tbl_str, win=win, opts=default_opts)
73 | else:
74 | self.cur_win[name] = self.vis.text(tbl_str, opts=default_opts)
75 |
76 |
77 | if __name__=='__main__':
78 | import numpy as np
79 | vis = Visualizer(port=13500, env='main')
80 | tbl = {"lr": 214, "momentum": 0.9}
81 | vis.vis_table("test_table", tbl)
82 | tbl = {"lr": 244444, "momentum": 0.9, "haha": "hoho"}
83 | vis.vis_table("test_table", tbl)
84 |
--------------------------------------------------------------------------------