├── .gitignore ├── LICENSE ├── README.md ├── crowd_dataset.py ├── models.py ├── requirements.txt ├── resources ├── cssccnn_architecture.png ├── cssccnn_main_prediction_results.png ├── parta_validation_files.npy ├── parta_xy_positions.log ├── ucfqnrf_validation_files.npy └── ucfqnrf_xy_positions.log ├── sinkhorn.py ├── stage1_main.py ├── stage2_main++.py ├── stage2_main.py └── test_model.py /.gitignore: -------------------------------------------------------------------------------- 1 | models_stage_1 2 | models_stage_2 3 | models 4 | 5 | # Byte-compiled / optimized / DLL files 6 | __pycache__/ 7 | *.py[cod] 8 | *$py.class 9 | 10 | # C extensions 11 | *.so 12 | 13 | # Distribution / packaging 14 | .Python 15 | build/ 16 | develop-eggs/ 17 | dist/ 18 | downloads/ 19 | eggs/ 20 | .eggs/ 21 | lib/ 22 | lib64/ 23 | parts/ 24 | sdist/ 25 | var/ 26 | wheels/ 27 | share/python-wheels/ 28 | *.egg-info/ 29 | .installed.cfg 30 | *.egg 31 | MANIFEST 32 | 33 | # PyInstaller 34 | # Usually these files are written by a python script from a template 35 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 36 | *.manifest 37 | *.spec 38 | 39 | # Installer logs 40 | pip-log.txt 41 | pip-delete-this-directory.txt 42 | 43 | # Unit test / coverage reports 44 | htmlcov/ 45 | .tox/ 46 | .nox/ 47 | .coverage 48 | .coverage.* 49 | .cache 50 | nosetests.xml 51 | coverage.xml 52 | *.cover 53 | *.py,cover 54 | .hypothesis/ 55 | .pytest_cache/ 56 | cover/ 57 | 58 | # Translations 59 | *.mo 60 | *.pot 61 | 62 | # Django stuff: 63 | # *.log 64 | local_settings.py 65 | db.sqlite3 66 | db.sqlite3-journal 67 | 68 | # Flask stuff: 69 | instance/ 70 | .webassets-cache 71 | 72 | # Scrapy stuff: 73 | .scrapy 74 | 75 | # Sphinx documentation 76 | docs/_build/ 77 | 78 | # PyBuilder 79 | .pybuilder/ 80 | target/ 81 | 82 | # Jupyter Notebook 83 | .ipynb_checkpoints 84 | 85 | # IPython 86 | profile_default/ 87 | ipython_config.py 88 | 89 | # pyenv 90 | # For a library or package, you might want to ignore these files since the code is 91 | # intended to run in multiple environments; otherwise, check them in: 92 | # .python-version 93 | 94 | # pipenv 95 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 96 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 97 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 98 | # install all needed dependencies. 99 | #Pipfile.lock 100 | 101 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 102 | __pypackages__/ 103 | 104 | # Celery stuff 105 | celerybeat-schedule 106 | celerybeat.pid 107 | 108 | # SageMath parsed files 109 | *.sage.py 110 | 111 | # Environments 112 | .env 113 | .venv 114 | env/ 115 | venv/ 116 | ENV/ 117 | env.bak/ 118 | venv.bak/ 119 | 120 | # Spyder project settings 121 | .spyderproject 122 | .spyproject 123 | 124 | # Rope project settings 125 | .ropeproject 126 | 127 | # mkdocs documentation 128 | /site 129 | 130 | # mypy 131 | .mypy_cache/ 132 | .dmypy.json 133 | dmypy.json 134 | 135 | # Pyre type checker 136 | .pyre/ 137 | 138 | # pytype static type analyzer 139 | .pytype/ 140 | 141 | # Cython debug symbols 142 | cython_debug/ 143 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Video Analytics Lab -- IISc 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Completely Self-Supervised Crowd Counting via Distribution Matching 2 | 3 | This repository provides a [PyTorch](http://pytorch.org/) implementation and pretrained models for CSS-CCNN, as described in the paper [Completely Self-Supervised Crowd Counting 4 | via Distribution Matching](http://arxiv.org/abs/2009.06420). 5 | 6 | ![CSS-CCNN Architecture](/resources/cssccnn_architecture.png) 7 | Existing self-supervised approaches can learn good representations, but require some labeled data to map these features to the end task of crowd density estimation. We mitigate this issue with the proposed paradigm of complete self-supervision, which does not need even a single labeled image. Our method dwells on the idea that natural crowds follow a power law distribution, which could be leveraged to yield error signals for backpropagation. A density regressor is first pretrained with self-supervision and then the distribution of predictions is matched to the prior by optimizing Sinkhorn distance between the two. 8 | 9 | # Dataset Requirements 10 | Download Shanghaitech dataset from [here](https://github.com/desenzhou/ShanghaiTechDataset). 11 | Download UCF-QNRF dataset from [here](http://crcv.ucf.edu/data/ucf-qnrf/). 12 | 13 | Place the dataset in `../dataset/` folder. (`dataset` and `css-cnn` folders should have the same parent directory). So the directory structure should look like the following: 14 | ``` 15 | -- css-cnn 16 | -- network.py 17 | -- stage1_main.py 18 | -- .... 19 | -- dataset 20 | --ST_partA 21 | -- test_data 22 | -- ground-truth 23 | -- images 24 | -- train_data 25 | -- ground-truth 26 | -- images 27 | --UCF-QNRF 28 | --Train 29 | -- ... 30 | --Test 31 | -- ... 32 | ``` 33 | 34 | # Dependencies and Installation 35 | We strongly recommend to run the codes in Nvidia-Docker. Install both `docker` and `nvidia-docker` (please find instructions from their respective installation pages). 36 | After the docker installations, pull pytorch docker image with the following command: 37 | `docker pull nvcr.io/nvidia/pytorch:18.04-py3` 38 | and run the image using the command: 39 | `nvidia-docker run --rm -ti --ipc=host nvcr.io/nvidia/pytorch:18.04-py3` 40 | 41 | Further software requirements are listed in `requirements.txt`. 42 | 43 | To install them type, `pip install -r requirements.txt` 44 | 45 | The code has been run and tested on `Python 3.6.4`, `Cuda 9.0, V9.0.176` and `PyTorch 0.4.1`. 46 | 47 | # Usage 48 | 49 | ## Pretrained Models 50 | 51 | The pretrained models can be downloaded from [here](https://drive.google.com/drive/folders/1KhAzNrOvyN5oiFUePfnzibjY3w_6DML6?usp=sharing). The directory structure is as follows: 52 | 53 | ``` 54 | -- parta 55 | -- models_stage_1 56 | -- unsup_vgg_best_model_meta.pkl 57 | -- stage1_epoch_parta.pth 58 | -- models_stage_2 59 | -- stage2_epoch_parta_cssccnn.pth 60 | -- stage2_epoch_parta_cssccnn.pth 61 | -- ucfqnrf 62 | -- models_stage_1 63 | -- ... 64 | -- models_stage_2 65 | -- ... 66 | ``` 67 | 68 | * For testing the Stage-2 pretrained models, save the pretrained weights files from `{dataset}/models_stage_2` in `models_stage_2/train2/snapshots/` and follow the steps outlined in Testing section. 69 | 70 | * For training only Stage-2 using Stage-1 pretrained model, save the pretrained weights files from `{dataset}/models_stage_1` in `models_stage_1/train2/snapshots/` and follow steps for Stage-2 CSS-CCNN or CSS-CCNN++ training. 71 | 72 | ## Testing 73 | 74 | After either finishing the training or downloading pretrained models, the model can be tested using the below script. 75 | The model must be present in `models_stage_2/train2/snapshots`. 76 | 77 | * `python test_model.py --best_model_name parta_cssccnnv2 --dataset parta` 78 | ``` 79 | --dataset = parta / ucfqnrf + cssccnn / cssccnnv2 80 | --best_model_name = Name of the model checkpoint to be tested 81 | ``` 82 | 83 | ## Training 84 | After downloading the datasets and installing all dependencies, proceed with training as follows: 85 | 86 | ### Stage-1 Training: 87 | * `python stage1_main.py --dataset parta --gpu 0` 88 | ``` 89 | -b = Batch size [For ucfqnrf, set 16] 90 | --dataset = parta / ucfqnrf 91 | --gpu = GPU Number 92 | --epochs = Number of epochs to train 93 | ``` 94 | 95 | ### Stage-2 CSS-CCNN Training: 96 | * `python stage2_main.py --dataset parta --gpu 0 --cmax 3000 --num_samples 482` 97 | ``` 98 | --dataset = parta / ucfqnrf 99 | --cmax = Max count value [For ucfqnrf, set 12000] 100 | --num_samples = Number of samples [For ucfqnrf, set 1500] 101 | --epochs = Epochs [For ucfqnrf, set 200] 102 | ``` 103 | 104 | ### Stage-2 CSS-CCNN++ Training: 105 | * `python stage2_main++.py --dataset parta --gpu 0 --cmax 3500 --num_samples 482` 106 | ``` 107 | --dataset = parta / ucfqnrf 108 | --cmax = Max count value [For ucfqnrf, set 12000] 109 | --num_samples = Number of samples [For ucfqnrf, set 1500]= 110 | --epochs = Epochs [For ucfqnrf, set 200] 111 | ``` 112 | 113 | # Results 114 | 115 | ![Visualisations](/resources/cssccnn_main_prediction_results.png) 116 | 117 | ## License 118 | 119 | See the [LICENSE](https://github.com/val-iisc/css-ccnn/blob/master/LICENSE) file for more details. 120 | 121 | ## Citation 122 | 123 | If you find this work useful in your research, please consider citing the paper: 124 | 125 | ``` 126 | @article{CSSCNN20, 127 | title = {Completely Self-Supervised Crowd Counting via Distribution Matching}, 128 | author = {Babu Sam, Deepak and Agarwalla, Abhinav and Joseph, Jimmy and Sindagi, A. Vishwanath and Babu, R. Venkatesh and Patel, M. Vishal}, 129 | journal = {arXiv preprint arXiv:2009.06420}, 130 | Year = {2020} 131 | } -------------------------------------------------------------------------------- /crowd_dataset.py: -------------------------------------------------------------------------------- 1 | """ 2 | crowd_dataset.py: Code to use crowd counting datasets for training and testing. 3 | """ 4 | 5 | import os 6 | import pickle 7 | import random 8 | from pdb import set_trace as bp 9 | 10 | import cv2 11 | import numpy as np 12 | import scipy.io 13 | 14 | 15 | class CrowdDataset: 16 | """ 17 | Class to use crowd counting datasets for training and testing. 18 | Version: 4.1 19 | DataReader supports the following: 20 | ground truths: can create density maps. 21 | testing: full image testing. 22 | training: extract random crops with flip augmentation. 23 | validating: extract random crops without augmentation. 24 | """ 25 | 26 | def __init__(self, data_path, name='parta', valid_set_size=0, 27 | gt_downscale_factor=2, 28 | image_size_min=224, image_size_max=1024, image_crop_size=224, 29 | density_map_sigma=1.0, stage_1=False): 30 | """ 31 | Initialize dataset class. 32 | 33 | Parameters 34 | ---------- 35 | data_path: string 36 | Path to the dataset data; the provided directory MUST have following structure: 37 | |-train_data 38 | |-images 39 | |-ground_truth 40 | |-test_data 41 | |-images 42 | |-ground_truth 43 | name: string 44 | Dataset name; MUST be one of ['parta', 'partb', 45 | 'ucfqnrf', 'ucf50'] 46 | valid_set_size: int 47 | Number of images from train set to be randomly taken for validation. Value MUST BE < number of training 48 | images. Default is 0 and no validation set is created. 49 | gt_downscale_factor: int 50 | Scale factor specifying the spatial size of square GT maps in relation to the input. For instance, 51 | `gt_downscale_factor` = 4 means that the spatial size of returned `gt_head_maps` (gtH, gtW) is exactly 52 | one-fourth that of input `images` (H, W) [see details of train_get_data() and test_get_data() functions for 53 | more information]. The value MUST BE one of [1, 2, 4, 8, 16, 32]. 54 | """ 55 | self.image_size_min = image_size_min 56 | self.image_size_max = image_size_max 57 | self.image_crop_size = image_crop_size 58 | assert(self.image_crop_size >= self.image_size_min >= 224) 59 | assert(self.image_size_max > self.image_size_min) 60 | self.data_path = data_path 61 | self.name = name 62 | self.gt_downscale_factor = gt_downscale_factor 63 | 64 | self.density_map_kernel = self._gaussian_kernel(density_map_sigma) 65 | self._image_size_multiple = gt_downscale_factor 66 | assert(self.image_size_min % self._image_size_multiple == 0) 67 | assert(self.image_size_max % self._image_size_multiple == 0) 68 | assert(self.image_crop_size % self._image_size_multiple == 0) 69 | self.train_iterator = None 70 | self.val_iterator = None 71 | self.data_paths = { 72 | 'train': { 73 | 'images': os.path.join(self.data_path, 'train_data', 'images'), 74 | 'gt': os.path.join(self.data_path, 'train_data', 'ground_truth') 75 | }, 76 | 'test': { 77 | 'images': os.path.join(self.data_path, 'test_data', 'images'), 78 | 'gt': os.path.join(self.data_path, 'test_data', 'ground_truth') 79 | } 80 | } 81 | 82 | if "ucfqnrf" in self.name: 83 | self.data_paths['train']['images'] = self.data_paths['train']['images'].replace('train_data', 'Train') 84 | self.data_paths['train']['gt'] = self.data_paths['train']['gt'].replace('train_data', 'Train') 85 | self.data_paths['test']['images'] = self.data_paths['test']['images'].replace('test_data', 'Test') 86 | self.data_paths['test']['gt'] = self.data_paths['test']['gt'].replace('test_data', 'Test') 87 | 88 | self.data_files = { 89 | 'train': [f for f in sorted(os.listdir(self.data_paths['train']['images'])) 90 | if os.path.isfile(os.path.join(self.data_paths['train']['images'], f))], 91 | 'test': [f for f in sorted(os.listdir(self.data_paths['test']['images'])) 92 | if os.path.isfile(os.path.join(self.data_paths['test']['images'], f))] 93 | } 94 | 95 | self.num_train_images = len(self.data_files['train']) 96 | self.num_test_images = len(self.data_files['test']) 97 | assert(valid_set_size < self.num_train_images) 98 | self.num_val_images = valid_set_size 99 | assert(self.num_train_images > 0 and self.num_test_images > 0) 100 | print('In CrowdDataset.__init__(): {} train and {} test images.'.format(self.num_train_images, 101 | self.num_test_images)) 102 | if valid_set_size > 0: 103 | files = self.data_files['train'] 104 | if stage_1: 105 | files_selected = random.sample(range(0, len(files)), valid_set_size) 106 | np.save('resources/{}_validation_files.npy'.format(name), files_selected) 107 | files_selected = np.load('resources/{}_validation_files.npy'.format(name)) 108 | validation_files = [f for i, f in enumerate(files) 109 | if i in files_selected] 110 | train_files = [f for i, f in enumerate(files) 111 | if i not in files_selected] 112 | self.data_paths['test_valid'] = self.data_paths['train'] 113 | self.data_files['test_valid'] = validation_files 114 | self.data_files['train'] = train_files 115 | self.num_train_images = len(self.data_files['train']) 116 | print('In CrowdDataset.__init__(): {} valid images selected and train set reduces to {}.' 117 | .format(len(self.data_files['test_valid']), len(self.data_files['train']))) 118 | 119 | self.val_pos = open('resources/{}_xy_positions.log'.format(name), 'r').readlines() 120 | self.val_pos_counter = 0 121 | print('In CrowdDataset.__init__(): {} dataset initialized.'.format(self.name)) 122 | 123 | 124 | def train_get_data(self, batch_size=4): 125 | """ 126 | Returns a batch of randomly cropped images from train set (with flip augmentation). 127 | 128 | Parameters 129 | ---------- 130 | batch_size: int 131 | Required batch size. 132 | 133 | Returns 134 | ---------- 135 | List of [images: ndarray((B, C, H, W)), 136 | gt_density_maps: ndarray((B, 1, gtH, gtW)), 137 | where (gtH, gtW) = (H, W) // self.gt_downscale_factor. 138 | """ 139 | assert(batch_size > 0) 140 | 141 | # randomly sample train dataset 142 | files = self.data_files['train'] 143 | if self.train_iterator is None or (self.train_iterator + batch_size) > self.num_files_rounded: 144 | self.train_iterator = 0 145 | self.num_files_rounded = len(files) - (len(files) % batch_size) 146 | self.file_ids = random.sample(range(0, len(files)), self.num_files_rounded) 147 | 148 | file_ids = self.file_ids[self.train_iterator: self.train_iterator + batch_size] 149 | assert(len(file_ids) == batch_size) 150 | file_batch = [files[i] for i in file_ids] 151 | self.train_iterator += batch_size 152 | 153 | # initialize train batch 154 | num_channels = 3 155 | images = np.empty((batch_size, num_channels, self.image_crop_size, self.image_crop_size), dtype=np.float32) 156 | gt_crop_size = self.image_crop_size // self.gt_downscale_factor 157 | gt_density_maps = np.empty((batch_size, 1, gt_crop_size, gt_crop_size), dtype=np.float32) 158 | flip_flags = np.random.randint(2, size=batch_size) 159 | 160 | # create batch 161 | for i, (file_name, flip_flag) in enumerate(zip(file_batch, flip_flags)): 162 | #print(file_name) 163 | image, gt_head_map = self._read_image_and_gt_map(file_name, self.data_paths['train']['images'], 164 | self.data_paths['train']['gt']) 165 | 166 | h, w = image.shape[1] // self.gt_downscale_factor, image.shape[2] // self.gt_downscale_factor 167 | gt_density_map = self._create_heatmap((image.shape[1], image.shape[2]), (h, w), 168 | gt_head_map, self.density_map_kernel) 169 | gt_density_map = gt_density_map[np.newaxis, ...] 170 | 171 | if flip_flag == 1: 172 | image = image[:, :, :: -1] 173 | gt_density_map = gt_density_map[:, :, :: -1] 174 | 175 | y, x = 0, 0 176 | # random draw (y, x) and make multiple of self._image_size_multiple 177 | if image.shape[1] != self.image_crop_size: 178 | y = (np.random.randint(image.shape[1] - self.image_crop_size) // self._image_size_multiple) \ 179 | * self._image_size_multiple 180 | if image.shape[2] != self.image_crop_size: 181 | x = (np.random.randint(image.shape[2] - self.image_crop_size) // self._image_size_multiple) \ 182 | * self._image_size_multiple 183 | images[i, :, :, :] = image[:, y: y + self.image_crop_size, x: x + self.image_crop_size] 184 | y //= self.gt_downscale_factor 185 | x //= self.gt_downscale_factor 186 | 187 | gt_density_maps[i, 0, :, :] = gt_density_map[:, y: y + gt_crop_size, x: x + gt_crop_size] 188 | 189 | assert(np.all(np.logical_and(0.0 <= images, images <= 255.0))) 190 | 191 | return images, gt_density_maps 192 | 193 | def val_get_data(self, batch_size=4): 194 | """ 195 | Returns a batch of randomly cropped images from val set (without augmentation). 196 | 197 | Parameters 198 | ---------- 199 | batch_size: int 200 | Required batch size. 201 | 202 | Returns 203 | ---------- 204 | List of [images: ndarray((B, C, H, W)), 205 | gt_density_maps: ndarray((B, 1, gtH, gtW)), 206 | where (gtH, gtW) = (H, W) // self.gt_downscale_factor. 207 | """ 208 | assert(batch_size > 0) 209 | 210 | # sequentially select val dataset 211 | files = self.data_files['test_valid'] 212 | 213 | if self.val_iterator is None or (self.val_iterator + batch_size) > self.num_val_files_rounded: 214 | self.val_iterator = 0 215 | self.num_val_files_rounded = len(files) - (len(files) % batch_size) 216 | self.file_ids = range(0, self.num_val_files_rounded) 217 | 218 | file_ids = self.file_ids[self.val_iterator: self.val_iterator + batch_size] 219 | assert(len(file_ids) == batch_size) 220 | file_batch = [files[i] for i in file_ids] 221 | self.val_iterator += batch_size 222 | 223 | # initialize val batch 224 | num_channels = 3 225 | images = np.empty((batch_size, num_channels, self.image_crop_size, self.image_crop_size), dtype=np.float32) 226 | gt_crop_size = self.image_crop_size // self.gt_downscale_factor 227 | gt_density_maps = np.empty((batch_size, 1, gt_crop_size, gt_crop_size), dtype=np.float32) 228 | 229 | # create batch 230 | for i, file_name in enumerate(file_batch): 231 | image, gt_head_map = self._read_image_and_gt_map(file_name, self.data_paths['train']['images'], 232 | self.data_paths['train']['gt']) 233 | 234 | h, w = image.shape[1] // self.gt_downscale_factor, image.shape[2] // self.gt_downscale_factor 235 | gt_density_map = self._create_heatmap((image.shape[1], image.shape[2]), (h, w), 236 | gt_head_map, self.density_map_kernel) 237 | gt_density_map = gt_density_map[np.newaxis, ...] 238 | 239 | if False: 240 | y, x = 0, 0 241 | # random draw (y, x) and make multiple of self._image_size_multiple 242 | if image.shape[1] != self.image_crop_size: 243 | y = (np.random.randint(image.shape[1] - self.image_crop_size) // self._image_size_multiple) \ 244 | * self._image_size_multiple 245 | if image.shape[2] != self.image_crop_size: 246 | x = (np.random.randint(image.shape[2] - self.image_crop_size) // self._image_size_multiple) \ 247 | * self._image_size_multiple 248 | self.f.write("sampling positions: {}, {}, {}, {}\n".format(i, file_name, y, x)) 249 | images[i, :, :, :] = image[:, y: y + self.image_crop_size, x: x + self.image_crop_size] 250 | y //= self.gt_downscale_factor 251 | x //= self.gt_downscale_factor 252 | else: 253 | y, x = int(self.val_pos[self.val_pos_counter].split(',')[-2].strip()), int(self.val_pos[self.val_pos_counter].split(',')[-1].strip()) 254 | self.val_pos_counter = (self.val_pos_counter + 1) % len(self.val_pos) 255 | images[i, :, :, :] = image[:, y: y + self.image_crop_size, x: x + self.image_crop_size] 256 | y //= self.gt_downscale_factor 257 | x //= self.gt_downscale_factor 258 | gt_density_maps[i, 0, :, :] = gt_density_map[:, y: y + gt_crop_size, x: x + gt_crop_size] 259 | 260 | assert(np.all(np.logical_and(0.0 <= images, images <= 255.0))) 261 | return images, gt_density_maps 262 | 263 | def test_get_data(self, set_name='test'): 264 | """ 265 | An iterator to run over images of test/valid set. 266 | 267 | Parameters 268 | ---------- 269 | set_name: string 270 | Name of the set ('test' or 'test_valid') for evaluation. 271 | 272 | Returns 273 | ---------- 274 | An iterator which outputs a tuple of 4 items: 275 | image_name: string 276 | file name 277 | image: ndarray((1, 3, H, W)) 278 | gt_density_map: ndarray((1, 1, gtH, gtW)) 279 | where (gtH, gtW) = (H, W) // self.gt_downscale_factor. 280 | 281 | Example Usage 282 | ---------- 283 | for name, image, gt_head_map, gt_box_maps_list in _.test_get_data() 284 | # process 285 | """ 286 | assert(set_name in ['test', 'test_valid']) 287 | for image_name in self.data_files[set_name]: 288 | image, gt_head_map = self._read_image_and_gt_map(image_name, self.data_paths[set_name]['images'], 289 | self.data_paths[set_name]['gt']) 290 | 291 | h, w = image.shape[1] // self.gt_downscale_factor, image.shape[2] // self.gt_downscale_factor 292 | gt_density_map = self._create_heatmap((image.shape[1], image.shape[2]), (h, w), 293 | gt_head_map, self.density_map_kernel) 294 | gt_density_map = gt_density_map[np.newaxis, ...] 295 | 296 | yield image_name, image[np.newaxis, :, :, :], gt_density_map[:,np.newaxis,:,:] 297 | 298 | # ### ### ### Internal functions ### ### ### # 299 | 300 | def _read_image_and_gt_map(self, image_name, image_path, gt_path=None): 301 | """ 302 | Reads image and corresponding ground truth. 303 | 304 | Parameters 305 | ---------- 306 | image_name: string 307 | file name 308 | image_path: string 309 | directory path to image file 310 | gt_path: string 311 | directory path to corresponding gt file 312 | 313 | Returns 314 | ---------- 315 | image: ndarray(3, H, W) 316 | gt_head_map: ndarray(1, gtH, gtW) 317 | """ 318 | image = cv2.imread(os.path.join(image_path, image_name)) 319 | if len(image.shape) < 3: 320 | image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB) 321 | else: 322 | image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) 323 | assert(len(image.shape) == 3) 324 | height_orig, width_orig, _ = image.shape 325 | height, width, _ = image.shape 326 | 327 | # setting minimum size 328 | if height < self.image_size_min or width < self.image_size_min: 329 | if height <= width: 330 | width = int((float(self.image_size_min * width) / height) + 0.5) 331 | height = self.image_size_min 332 | else: 333 | height = int((float(self.image_size_min * height) / width) + 0.5) 334 | width = self.image_size_min 335 | 336 | # setting maximum size 337 | if height > self.image_size_max or width > self.image_size_max: 338 | if height >= width: 339 | width = int((float(self.image_size_max * width) / height) + 0.5) 340 | height = self.image_size_max 341 | else: 342 | height = int((float(self.image_size_max * height) / width) + 0.5) 343 | width = self.image_size_max 344 | 345 | # make sizes multiple 346 | if height % self._image_size_multiple != 0: 347 | height = ((height // self._image_size_multiple) + 1) * self._image_size_multiple 348 | if width % self._image_size_multiple != 0: 349 | width = ((width // self._image_size_multiple) + 1) * self._image_size_multiple 350 | 351 | # resize image 352 | if height != height_orig or width != width_orig: 353 | image = cv2.resize(src = image, dsize = (width, height)) 354 | image = image.transpose((2, 0, 1)).astype(np.float32) # (3, H, W) 355 | assert(np.all(np.logical_and(0.0 <= image, image <= 255.0))) 356 | assert(np.all(np.isfinite(image))) 357 | if gt_path is None: 358 | return image, None 359 | 360 | # read GT 361 | f_name, _ = os.path.splitext(image_name) 362 | if self.name in ['ucfqnrf', 'ucf50']: 363 | gt_path = os.path.join(gt_path, f_name + '_ann.mat') 364 | elif self.name in ['parta', 'partb', 'parta_1', 'parta_5', 'parta_10', 'parta_30']: 365 | gt_path = os.path.join(gt_path, 'GT_' + f_name + '.mat') 366 | data_mat = scipy.io.loadmat(gt_path) 367 | if self.name in ['ucfqnrf', 'ucf50']: 368 | gt_annotation_points = data_mat['annPoints'] 369 | elif self.name in ['parta', 'partb', 'parta_1', 'parta_5', 'parta_10', 'parta_30']: 370 | gt_annotation_points = data_mat['image_info'][0, 0]['location'][0, 0] 371 | gt_annotation_points -= 1 # MATLAB indices 372 | ''' 373 | annotation_points : ndarray Nx2, 374 | annotation_points[:, 0] -> x coordinate 375 | annotation_points[:, 1] -> y coordinate 376 | ''' 377 | # scale GT points 378 | 379 | gt_map_shape = (height, width) 380 | gt_annotation_points[:, 0] *= (float(gt_map_shape[1]) / width_orig) 381 | gt_annotation_points[:, 1] *= (float(gt_map_shape[0]) / height_orig) 382 | 383 | # remove invalid indices 384 | indices = (gt_annotation_points[:, 0] < gt_map_shape[1]) & \ 385 | (gt_annotation_points[:, 0] >= 0) & \ 386 | (gt_annotation_points[:, 1] < gt_map_shape[0]) & \ 387 | (gt_annotation_points[:, 1] >= 0) 388 | gt_annotation_points = gt_annotation_points[indices, :].astype(int) 389 | 390 | gt_annotation_points = np.floor(gt_annotation_points) 391 | 392 | gt_head_map = gt_annotation_points 393 | return image, gt_head_map 394 | 395 | def _gaussian_kernel(self, sigma=1.0, kernel_size=None): 396 | ''' 397 | Returns gaussian kernel if sigma > 0.0, otherwise dot kernel. 398 | ''' 399 | if sigma <= 0.0: 400 | return np.array([[0.0, 0.0, 0.0], 401 | [0.0, 1.0, 0.0], 402 | [0.0, 0.0, 0.0]], dtype=np.float32) 403 | if kernel_size is None: 404 | kernel_size = int(3.0 * sigma) 405 | if kernel_size % 2 == 0: 406 | kernel_size += 1 407 | print('In data_reader.gaussian_kernel: Kernel size even; ' \ 408 | 'increased by 1.') 409 | if kernel_size < 3: 410 | kernel_size = 3 411 | print('In data_reader.gaussian_kernel: Kernel size less than 3;' \ 412 | 'set as 3.') 413 | tmp = np.arange((-kernel_size // 2) + 1.0, (kernel_size // 2) + 1.0) 414 | xx, yy = np.meshgrid(tmp, tmp) 415 | kernel = np.exp(-((xx ** 2) + (yy ** 2)) / (2.0 * (sigma ** 2))) 416 | kernel_sum = np.sum(kernel) 417 | assert (kernel_sum > 1e-3) 418 | return kernel / kernel_sum 419 | 420 | def _create_heatmap(self, image_shape, heatmap_shape, 421 | annotation_points, kernel): 422 | """ 423 | Creates density map. 424 | annotation_points : ndarray Nx2, 425 | annotation_points[:, 0] -> x coordinate 426 | annotation_points[:, 1] -> y coordinate 427 | """ 428 | assert (kernel.shape[0] == kernel.shape[1] and kernel.shape[0] % 2 429 | and kernel.shape[0] > 1) 430 | indices = (annotation_points[:, 0] < image_shape[1]) & \ 431 | (annotation_points[:, 0] >= 0) & \ 432 | (annotation_points[:, 1] < image_shape[0]) & \ 433 | (annotation_points[:, 1] >= 0) 434 | annot_error_count = len(annotation_points) 435 | annotation_points = annotation_points[indices, :] 436 | 437 | hmap_height, hmap_width = heatmap_shape 438 | annotation_points[:, 0] *= (1. * heatmap_shape[1] / image_shape[1]) 439 | annotation_points[:, 1] *= (1. * heatmap_shape[0] / image_shape[0]) 440 | annotation_points = annotation_points.astype(np.int32) 441 | annot_error_count -= np.sum(indices) 442 | if annot_error_count: 443 | print('In data_reader.create_heatmap: Error in annotations; ' \ 444 | '%d point(s) skipped.' % annot_error_count) 445 | indices = (annotation_points[:, 0] >= heatmap_shape[1]) & \ 446 | (annotation_points[:, 0] < 0) & \ 447 | (annotation_points[:, 1] >= heatmap_shape[0]) & \ 448 | (annotation_points[:, 1] < 0) 449 | assert(np.sum(indices) == 0) 450 | 451 | prediction_map = np.zeros(heatmap_shape, dtype = np.float32) 452 | kernel_half_size = kernel.shape[0] // 2 453 | kernel_copy = np.empty_like(kernel) 454 | 455 | for x, y in annotation_points: 456 | y_start = y - kernel_half_size 457 | y_end = y_start + kernel.shape[0] 458 | x_start = x - kernel_half_size 459 | x_end = x_start + kernel.shape[1] 460 | kernel_copy[:] = kernel[:] 461 | kernel_tmp = kernel_copy 462 | if y_start < 0: 463 | i = -y_start 464 | kernel_tmp[i: 2 * i, :] += kernel_tmp[i - 1:: -1, :] 465 | kernel_tmp = kernel_tmp[i:, :] 466 | y_start = 0 467 | if x_start < 0: 468 | i = -x_start 469 | kernel_tmp[:, i: 2 * i] += kernel_tmp[:, i - 1:: -1] 470 | kernel_tmp = kernel_tmp[:, i:] 471 | x_start = 0 472 | if y_end > hmap_height: 473 | i = (hmap_height - y - 1) - kernel_half_size 474 | kernel_tmp[2 * i: i, :] += kernel_tmp[-1: i - 1: -1, :] 475 | kernel_tmp = kernel_tmp[: i, :] 476 | y_end = hmap_height 477 | if x_end > hmap_width: 478 | i = (hmap_width - x - 1) - kernel_half_size 479 | kernel_tmp[:, 2 * i: i] += kernel_tmp[:, -1: i - 1: -1] 480 | kernel_tmp = kernel_tmp[:, : i] 481 | x_end = hmap_width 482 | prediction_map[y_start: y_end, x_start: x_end] += kernel_tmp 483 | return prediction_map 484 | 485 | class CrowdDatasetLabelled(CrowdDataset): 486 | """ 487 | Class to extract a predefine number or percentage of crowd counting samples for training. 488 | Version: 0.1 489 | DataReader supports the following: 490 | ground truths: can create density maps. 491 | testing: full image testing. 492 | training: extract random crops with flip augmentation. 493 | validating: extract random crops without augmentation. 494 | """ 495 | def __init__(self, data_path, name='parta', valid_set_size=0, 496 | gt_downscale_factor=2, 497 | image_size_min=224, image_size_max=1024, image_crop_size=224, 498 | density_map_sigma=1.0, num_labels = None): 499 | 500 | self.image_size_min = image_size_min 501 | self.image_size_max = image_size_max 502 | self.image_crop_size = image_crop_size 503 | assert(self.image_crop_size >= self.image_size_min >= 224) 504 | assert(self.image_size_max > self.image_size_min) 505 | self.data_path = data_path 506 | self.name = name 507 | self.gt_downscale_factor = gt_downscale_factor 508 | 509 | self.density_map_kernel = self._gaussian_kernel(density_map_sigma) 510 | self._image_size_multiple = gt_downscale_factor 511 | assert(self.image_size_min % self._image_size_multiple == 0) 512 | assert(self.image_size_max % self._image_size_multiple == 0) 513 | assert(self.image_crop_size % self._image_size_multiple == 0) 514 | self.train_iterator = None 515 | self.data_paths = { 516 | 'train': { 517 | 'images': os.path.join(self.data_path, 'train_data', 'images'), 518 | 'gt': os.path.join(self.data_path, 'train_data', 'ground_truth') 519 | }, 520 | 'test': { 521 | 'images': os.path.join(self.data_path, 'test_data', 'images'), 522 | 'gt': os.path.join(self.data_path, 'test_data', 'ground_truth') 523 | } 524 | } 525 | 526 | if "ucfqnrf" in self.name: 527 | self.data_paths['train']['images'] = self.data_paths['train']['images'].replace('train_data', 'Train') 528 | self.data_paths['train']['gt'] = self.data_paths['train']['gt'].replace('train_data', 'Train') 529 | self.data_paths['test']['images'] = self.data_paths['test']['images'].replace('test_data', 'Test') 530 | self.data_paths['test']['gt'] = self.data_paths['test']['gt'].replace('test_data', 'Test') 531 | 532 | self.data_files = { 533 | 'train': [f for f in sorted(os.listdir(self.data_paths['train']['images'])) 534 | if os.path.isfile(os.path.join(self.data_paths['train']['images'], f))], 535 | 'test': [f for f in sorted(os.listdir(self.data_paths['test']['images'])) 536 | if os.path.isfile(os.path.join(self.data_paths['test']['images'], f))] 537 | } 538 | 539 | 540 | self.num_train_images = len(self.data_files['train']) 541 | self.num_test_images = len(self.data_files['test']) 542 | assert(valid_set_size < self.num_train_images) 543 | self.num_val_images = valid_set_size 544 | assert(self.num_train_images > 0 and self.num_test_images > 0) 545 | print('In CrowdDatasetLabelled.__init__(): {} train and {} test images.'.format(self.num_train_images, 546 | self.num_test_images)) 547 | if valid_set_size > 0: 548 | files = self.data_files['train'] 549 | files_selected = np.load('resources/{}_validation_files.npy'.format(name)) 550 | validation_files = [f for i, f in enumerate(files) 551 | if i in files_selected] 552 | train_files = [f for i, f in enumerate(files) 553 | if i not in files_selected] 554 | self.data_paths['test_valid'] = self.data_paths['train'] 555 | self.data_files['test_valid'] = validation_files 556 | 557 | 558 | if(int(num_labels) == num_labels): 559 | self.data_files['train'] = np.random.choice(train_files, int(num_labels), replace=False) 560 | elif 1e-8 <= num_labels < 1.: 561 | self.data_files['train'] = np.random.choice(train_files, math.ceil(num_labels*len(train_files)), replace=False) 562 | else: 563 | raise Exception("Correct the num_labels specified") 564 | 565 | self.num_train_images = len(self.data_files['train']) 566 | print('In CrowdDatasetLabelled.__init__(): {} valid images selected and train set reduces to {}.' 567 | .format(len(self.data_files['test_valid']), len(self.data_files['train']))) 568 | 569 | print('In CrowdDatasetLabelled.__init__(): {} dataset initialized.'.format(self.name)) 570 | -------------------------------------------------------------------------------- /models.py: -------------------------------------------------------------------------------- 1 | """ 2 | models.py: Model definitions and utilities 3 | """ 4 | 5 | import torch 6 | import torch.nn as nn 7 | import os 8 | import pickle 9 | import numpy as np 10 | 11 | num_rot_conv_layers = 7 12 | num_rot_batch_norm_layers = 7 13 | 14 | class Stage2CountingNet(nn.Module): 15 | """ 16 | Counting net to use for stage2 training 17 | """ 18 | def __init__(self, name='stage2'): 19 | """ 20 | Initialise Stage2CountingNet class. 21 | 22 | Parameters 23 | ---------- 24 | name: string 25 | an alias for the network 26 | """ 27 | super(Stage2CountingNet, self).__init__() 28 | self.name = name 29 | if torch.cuda.is_available(): 30 | self.rgb_means = torch.cuda.FloatTensor([104.008, 116.669, 122.675]) 31 | else: 32 | self.rgb_means = torch.FloatTensor([104.008, 116.669, 122.675]) 33 | self.rgb_means = torch.autograd.Variable(self.rgb_means, 34 | requires_grad=False).unsqueeze(0).unsqueeze(2).unsqueeze(3) 35 | layers = [] 36 | in_channels = 3 37 | 38 | self.relu = nn.functional.relu 39 | 40 | self.conv1_1 = nn.Conv2d(in_channels, 64, kernel_size=3, padding=1, bias=False) 41 | self.batch_norm_1_1 = nn.BatchNorm2d(64) 42 | self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, padding=1, bias=False) 43 | self.batch_norm_1_2 = nn.BatchNorm2d(64) 44 | self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2) 45 | 46 | self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, padding=1, bias=False) 47 | self.batch_norm_2_1 = nn.BatchNorm2d(128) 48 | self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, padding=1, bias=False) 49 | self.batch_norm_2_2 = nn.BatchNorm2d(128) 50 | self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2) 51 | 52 | self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, padding=1, bias=False) 53 | self.batch_norm_3_1 = nn.BatchNorm2d(256) 54 | self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=False) 55 | self.batch_norm_3_2 = nn.BatchNorm2d(256) 56 | self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=False) 57 | self.batch_norm_3_3 = nn.BatchNorm2d(256) 58 | 59 | self.conv4_1 = nn.Conv2d(384, 128, kernel_size=3, padding=1) 60 | self.conv4_2 = nn.Conv2d(128, 64, kernel_size=3, padding=1) 61 | self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2) 62 | self.conv5_1 = nn.Conv2d(64, 1, kernel_size=3, padding=1) 63 | 64 | self._initialize_weights() 65 | 66 | def _initialize_weights(self): 67 | for m in self.modules(): 68 | if isinstance(m, nn.Conv2d): 69 | m.weight.data.normal_(std=0.01, mean=0.0) 70 | try: 71 | m.bias.data.zero_() 72 | except: 73 | continue 74 | elif isinstance(m, nn.Linear): 75 | assert(0) 76 | 77 | def forward(self, x): 78 | mean_sub_input = x 79 | mean_sub_input -= self.rgb_means 80 | 81 | main_out_block1 = self.relu(self.batch_norm_1_2(self.conv1_2(self.relu(self.batch_norm_1_1(self.conv1_1(mean_sub_input)))))) 82 | main_out_pool1 = self.pool1(main_out_block1) 83 | 84 | main_out_block2 = self.relu(self.batch_norm_2_2(self.conv2_2(self.relu(self.batch_norm_2_1(self.conv2_1(main_out_pool1)))))) 85 | main_out_pool2 = self.pool2(main_out_block2) 86 | 87 | main_out_block3 = self.relu(self.batch_norm_3_3(self.conv3_3(self.relu(self.batch_norm_3_2(self.conv3_2(self.relu(self.batch_norm_3_1(self.conv3_1(main_out_pool2))))))))) 88 | 89 | hyper_out = torch.cat((main_out_pool2, main_out_block3), dim=1) 90 | 91 | main_out_block4 = self.relu(self.conv4_2(self.relu(self.conv4_1(hyper_out)))) 92 | main_out_pool4 = self.pool4(main_out_block4) 93 | 94 | main_out_block5 = self.relu(self.conv5_1(main_out_pool4)) 95 | return main_out_block5 96 | 97 | class Stage1CountingNet(nn.Module): 98 | """ 99 | Counting net to use for stage1 training 100 | """ 101 | def __init__(self, name='stage1'): 102 | """ 103 | Initialise Stage1CountingNet class. 104 | 105 | Parameters 106 | ---------- 107 | name: string 108 | an alias for the network 109 | """ 110 | super(Stage1CountingNet, self).__init__() 111 | self.name = name 112 | if torch.cuda.is_available(): 113 | self.rgb_means = torch.cuda.FloatTensor([104.008, 116.669, 122.675]) 114 | else: 115 | self.rgb_means = torch.FloatTensor([104.008, 116.669, 122.675]) 116 | self.rgb_means = torch.autograd.Variable(self.rgb_means, 117 | requires_grad=False).unsqueeze(0).unsqueeze(2).unsqueeze(3) 118 | layers = [] 119 | in_channels = 3 120 | 121 | self.relu = nn.functional.relu 122 | 123 | self.conv1_1 = nn.Conv2d(in_channels, 64, kernel_size=3, padding=1, bias=False) 124 | self.batch_norm_1_1 = nn.BatchNorm2d(64) 125 | self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, padding=1, bias=False) 126 | self.batch_norm_1_2 = nn.BatchNorm2d(64) 127 | self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2) 128 | 129 | self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, padding=1, bias=False) 130 | self.batch_norm_2_1 = nn.BatchNorm2d(128) 131 | self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, padding=1, bias=False) 132 | self.batch_norm_2_2 = nn.BatchNorm2d(128) 133 | self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2) 134 | 135 | self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, padding=1, bias=False) 136 | self.batch_norm_3_1 = nn.BatchNorm2d(256) 137 | self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=False) 138 | self.batch_norm_3_2 = nn.BatchNorm2d(256) 139 | self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=False) 140 | self.batch_norm_3_3 = nn.BatchNorm2d(256) 141 | self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2) 142 | 143 | self.conv4_1 = nn.Conv2d(256, 128, kernel_size=3, padding=1, bias=False) 144 | self.batch_norm_4_1 = nn.BatchNorm2d(128) 145 | self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2) 146 | 147 | self.conv5_1 = nn.Conv2d(128, 64, kernel_size=3, padding=1, bias=False) 148 | self.batch_norm_5_1 = nn.BatchNorm2d(64) 149 | self.avg_pool = nn.AdaptiveAvgPool2d(1) # (B,C,1,1) 150 | self.fc = nn.Linear(64*1*1, 4) 151 | 152 | self._initialize_weights() 153 | 154 | def _initialize_weights(self): 155 | for m in self.modules(): 156 | if isinstance(m, nn.Conv2d): 157 | nn.init.kaiming_normal_(m.weight.data, nonlinearity='relu') 158 | elif isinstance(m, nn.Linear): 159 | nn.init.kaiming_normal_(m.weight.data, nonlinearity='relu') 160 | m.bias.data.zero_() 161 | 162 | def forward(self, x): 163 | mean_sub_input = x 164 | mean_sub_input -= self.rgb_means 165 | 166 | main_out_block1 = self.relu(self.batch_norm_1_2(self.conv1_2(self.relu(self.batch_norm_1_1(self.conv1_1(mean_sub_input)))))) 167 | main_out_pool1 = self.pool1(main_out_block1) 168 | 169 | main_out_block2 = self.relu(self.batch_norm_2_2(self.conv2_2(self.relu(self.batch_norm_2_1(self.conv2_1(main_out_pool1)))))) 170 | main_out_pool2 = self.pool2(main_out_block2) 171 | 172 | main_out_block3 = self.relu(self.batch_norm_3_3(self.conv3_3(self.relu(self.batch_norm_3_2(self.conv3_2(self.relu(self.batch_norm_3_1(self.conv3_1(main_out_pool2))))))))) 173 | main_out_pool3 = self.pool3(main_out_block3) 174 | 175 | main_out_block4 = self.relu(self.batch_norm_4_1(self.conv4_1(main_out_pool3))) 176 | main_out_pool4 = self.pool4(main_out_block4) 177 | 178 | main_out_block5 = self.relu(self.batch_norm_5_1(self.conv5_1(main_out_pool4))) 179 | 180 | global_avg_pool_out = self.avg_pool(main_out_block5) 181 | fc_out = self.fc(global_avg_pool_out.view(global_avg_pool_out.size(0), -1)) 182 | return fc_out 183 | 184 | def load_rot_model_blocks(network, snapshot_path, excluded_layers): 185 | """ 186 | Loading Feature Extraction Network (FEN) for stage2 training 187 | 188 | Parameters 189 | ---------- 190 | network: Stage2CountingNet object 191 | uninitialised random Stage2CountingNetwork 192 | snapshot_path: str 193 | directory path to load weights for FEN 194 | excluded_layers: list 195 | ignore loading particular layers 196 | """ 197 | best_epoch_file_name = open(os.path.join(snapshot_path,'unsup_vgg_best_model_meta.pkl'),'rb') 198 | best_epoch_file_name = pickle.load(best_epoch_file_name) 199 | 200 | print('Loading Stage 1 best epoch model :{}'.format(best_epoch_file_name)) 201 | model_checkpoint = torch.load(os.path.join(snapshot_path,best_epoch_file_name)) 202 | count = 0 203 | parameter_count = 0 204 | 205 | for name, module in network.named_children(): 206 | if name.startswith('conv') and name not in excluded_layers: 207 | module.weight.data.copy_(model_checkpoint['state_dict']['{}.weight'.format(name)]) 208 | module.weight.requires_grad = False 209 | parameter_count +=1 210 | if module.bias != None: 211 | module.bias.data.copy_(model_checkpoint['state_dict']['{}.bias'.format(name)]) 212 | module.bias.requires_grad = False 213 | parameter_count+=1 214 | count += 1 215 | elif name.startswith('batch_norm') and name not in excluded_layers: 216 | module.weight.data.copy_(model_checkpoint['state_dict']['{}.weight'.format(name)]) 217 | parameter_count += 1 218 | module.bias.data.copy_(model_checkpoint['state_dict']['{}.bias'.format(name)]) 219 | parameter_count += 1 220 | 221 | module.weight.requires_grad = False 222 | module.bias.requires_grad = False 223 | 224 | module.running_mean.requires_grad = False 225 | module.running_var.requires_grad = False 226 | 227 | module.running_mean.data.copy_(model_checkpoint['state_dict']['{}.running_mean'.format(name)]) 228 | parameter_count += 1 229 | module.running_var.data.copy_(model_checkpoint['state_dict']['{}.running_var'.format(name)]) 230 | parameter_count += 1 231 | 232 | module.eval() # freeze batch norm 233 | count += 1 234 | assert (count == (num_rot_conv_layers + num_rot_batch_norm_layers)) 235 | assert (parameter_count == (num_rot_conv_layers*1 + num_rot_batch_norm_layers*4)) 236 | return network 237 | 238 | def check_BN_no_gradient_change(network, exclude_list=[]): 239 | """ 240 | checking if BN weights are not being updated 241 | 242 | Parameters 243 | ---------- 244 | network: Stage2CountingNet object 245 | excluded_layers: list 246 | ignore checking particular layers 247 | """ 248 | s = [] 249 | count = 0 250 | for name, module in network.named_children(): 251 | if name.startswith('batch_norm') and name not in exclude_list: 252 | count += 1 253 | s.append(module.running_mean.data.cpu().detach().numpy().reshape(-1)) 254 | assert (count == num_rot_batch_norm_layers) 255 | return np.concatenate(s) 256 | 257 | def check_conv_no_gradient_change(network, exclude_list=[]): 258 | """ 259 | checking if conv weights are not being updated 260 | 261 | Parameters 262 | ---------- 263 | network: Stage2CountingNet object 264 | excluded_layers: list 265 | ignore checking particular layers 266 | """ 267 | s = [] 268 | count = 0 269 | for name, module in network.named_children(): 270 | if name.startswith('conv') and name not in exclude_list: 271 | assert (module.weight.requires_grad == False) 272 | s.append(module.weight.data.cpu().detach().numpy().reshape(-1)) 273 | count += 1 274 | assert (count == num_rot_conv_layers) 275 | return np.concatenate(s) 276 | 277 | def set_batch_norm_to_eval(network): 278 | """ 279 | setting all batch norm layers to eval mode 280 | 281 | Parameters 282 | ---------- 283 | network: Stage2CountingNet object 284 | """ 285 | count = 0 286 | for name, module in network.named_children(): 287 | if name.startswith('batch_norm'): 288 | module.eval() 289 | count += 1 290 | assert (count == num_rot_batch_norm_layers) 291 | return network 292 | 293 | def load_net(networks, fdir, name, set_epoch=True): 294 | """ 295 | setting all batch norm layers to eval mode 296 | 297 | Parameters 298 | ---------- 299 | networks: Stage2CountingNet object 300 | fdir: str 301 | Directory to load the network from 302 | name: str 303 | Name of the checkpoint to be loaded 304 | set_epoch: bool 305 | to resume training 306 | """ 307 | net = networks 308 | 309 | filepath = os.path.join(fdir, name) 310 | print("Loading file...", filepath) 311 | 312 | if not os.path.isfile(filepath): 313 | print("Checkpoint file" + filepath + " not found!") 314 | raise IOError 315 | 316 | checkpoint_1 = torch.load(filepath) 317 | 318 | if set_epoch: 319 | try: 320 | args.start_epoch = checkpoint_1['epoch'] 321 | except NameError: 322 | pass 323 | net.load_state_dict(checkpoint_1['state_dict']) 324 | print("=> loaded checkpoint '{}' ({} epochs over)".format(filepath, checkpoint_1['epoch'])) 325 | return net 326 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy==1.15.4 2 | opencv-python==3.4.3.18 3 | Pillow==4.3.0 4 | powerlaw==1.4.4 5 | scipy==1.0.0 6 | torch==0.4.1 7 | torchvision==0.2.0 8 | tqdm==4.19.4 9 | mpmath==1.1.0 -------------------------------------------------------------------------------- /resources/cssccnn_architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/css-ccnn/0d5691b4d9839f92997469f7dbb0a475c00f4ca8/resources/cssccnn_architecture.png -------------------------------------------------------------------------------- /resources/cssccnn_main_prediction_results.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/css-ccnn/0d5691b4d9839f92997469f7dbb0a475c00f4ca8/resources/cssccnn_main_prediction_results.png -------------------------------------------------------------------------------- /resources/parta_validation_files.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/css-ccnn/0d5691b4d9839f92997469f7dbb0a475c00f4ca8/resources/parta_validation_files.npy -------------------------------------------------------------------------------- /resources/parta_xy_positions.log: -------------------------------------------------------------------------------- 1 | sampling positions: 0, IMG_105.jpg, 184, 80 2 | sampling positions: 1, IMG_112.jpg, 88, 336 3 | sampling positions: 2, IMG_115.jpg, 264, 328 4 | sampling positions: 3, IMG_118.jpg, 160, 576 5 | sampling positions: 4, IMG_126.jpg, 336, 792 6 | sampling positions: 5, IMG_128.jpg, 104, 600 7 | sampling positions: 6, IMG_140.jpg, 416, 232 8 | sampling positions: 7, IMG_142.jpg, 120, 384 9 | sampling positions: 8, IMG_164.jpg, 32, 136 10 | sampling positions: 9, IMG_171.jpg, 0, 328 11 | sampling positions: 10, IMG_184.jpg, 32, 496 12 | sampling positions: 11, IMG_185.jpg, 224, 80 13 | sampling positions: 12, IMG_187.jpg, 88, 408 14 | sampling positions: 13, IMG_19.jpg, 144, 576 15 | sampling positions: 14, IMG_21.jpg, 320, 680 16 | sampling positions: 15, IMG_239.jpg, 64, 120 17 | sampling positions: 16, IMG_25.jpg, 72, 608 18 | sampling positions: 17, IMG_281.jpg, 16, 88 19 | sampling positions: 18, IMG_31.jpg, 360, 136 20 | sampling positions: 19, IMG_34.jpg, 440, 24 21 | sampling positions: 20, IMG_37.jpg, 296, 240 22 | sampling positions: 21, IMG_42.jpg, 136, 256 23 | sampling positions: 22, IMG_43.jpg, 432, 328 24 | sampling positions: 23, IMG_48.jpg, 192, 168 25 | sampling positions: 24, IMG_63.jpg, 496, 280 26 | sampling positions: 25, IMG_65.jpg, 248, 728 27 | sampling positions: 26, IMG_68.jpg, 128, 688 28 | sampling positions: 27, IMG_72.jpg, 320, 688 29 | sampling positions: 28, IMG_77.jpg, 224, 696 30 | sampling positions: 29, IMG_87.jpg, 0, 128 31 | sampling positions: 0, IMG_105.jpg, 160, 480 32 | sampling positions: 1, IMG_112.jpg, 32, 160 33 | sampling positions: 2, IMG_115.jpg, 344, 416 34 | sampling positions: 3, IMG_118.jpg, 192, 344 35 | sampling positions: 4, IMG_126.jpg, 104, 648 36 | sampling positions: 5, IMG_128.jpg, 256, 112 37 | sampling positions: 6, IMG_140.jpg, 432, 8 38 | sampling positions: 7, IMG_142.jpg, 136, 424 39 | sampling positions: 8, IMG_164.jpg, 72, 152 40 | sampling positions: 9, IMG_171.jpg, 120, 280 41 | sampling positions: 10, IMG_184.jpg, 152, 392 42 | sampling positions: 11, IMG_185.jpg, 480, 16 43 | sampling positions: 12, IMG_187.jpg, 64, 104 44 | sampling positions: 13, IMG_19.jpg, 80, 152 45 | sampling positions: 14, IMG_21.jpg, 224, 552 46 | sampling positions: 15, IMG_239.jpg, 56, 48 47 | sampling positions: 16, IMG_25.jpg, 176, 64 48 | sampling positions: 17, IMG_281.jpg, 104, 536 49 | sampling positions: 18, IMG_31.jpg, 280, 216 50 | sampling positions: 19, IMG_34.jpg, 264, 544 51 | sampling positions: 20, IMG_37.jpg, 368, 232 52 | sampling positions: 21, IMG_42.jpg, 288, 448 53 | sampling positions: 22, IMG_43.jpg, 240, 128 54 | sampling positions: 23, IMG_48.jpg, 184, 248 55 | sampling positions: 24, IMG_63.jpg, 512, 72 56 | sampling positions: 25, IMG_65.jpg, 392, 16 57 | sampling positions: 26, IMG_68.jpg, 240, 104 58 | sampling positions: 27, IMG_72.jpg, 88, 632 59 | sampling positions: 28, IMG_77.jpg, 440, 792 60 | sampling positions: 29, IMG_87.jpg, 40, 240 61 | sampling positions: 0, IMG_105.jpg, 192, 488 62 | sampling positions: 1, IMG_112.jpg, 64, 248 63 | sampling positions: 2, IMG_115.jpg, 288, 440 64 | sampling positions: 3, IMG_118.jpg, 200, 200 65 | sampling positions: 4, IMG_126.jpg, 72, 536 66 | sampling positions: 5, IMG_128.jpg, 152, 64 67 | sampling positions: 6, IMG_140.jpg, 440, 216 68 | sampling positions: 7, IMG_142.jpg, 184, 128 69 | sampling positions: 8, IMG_164.jpg, 208, 56 70 | sampling positions: 9, IMG_171.jpg, 0, 72 71 | sampling positions: 10, IMG_184.jpg, 8, 760 72 | sampling positions: 11, IMG_185.jpg, 560, 152 73 | sampling positions: 12, IMG_187.jpg, 160, 512 74 | sampling positions: 13, IMG_19.jpg, 120, 56 75 | sampling positions: 14, IMG_21.jpg, 120, 512 76 | sampling positions: 15, IMG_239.jpg, 72, 56 77 | sampling positions: 16, IMG_25.jpg, 8, 256 78 | sampling positions: 17, IMG_281.jpg, 56, 104 79 | sampling positions: 18, IMG_31.jpg, 80, 136 80 | sampling positions: 19, IMG_34.jpg, 256, 240 81 | sampling positions: 20, IMG_37.jpg, 184, 576 82 | sampling positions: 21, IMG_42.jpg, 176, 136 83 | sampling positions: 22, IMG_43.jpg, 176, 144 84 | sampling positions: 23, IMG_48.jpg, 88, 256 85 | sampling positions: 24, IMG_63.jpg, 456, 712 86 | sampling positions: 25, IMG_65.jpg, 248, 48 87 | sampling positions: 26, IMG_68.jpg, 152, 56 88 | sampling positions: 27, IMG_72.jpg, 112, 200 89 | sampling positions: 28, IMG_77.jpg, 376, 80 90 | sampling positions: 29, IMG_87.jpg, 184, 264 91 | sampling positions: 0, IMG_105.jpg, 312, 80 92 | sampling positions: 1, IMG_112.jpg, 72, 16 93 | sampling positions: 2, IMG_115.jpg, 96, 112 94 | sampling positions: 3, IMG_118.jpg, 384, 584 95 | sampling positions: 4, IMG_126.jpg, 328, 248 96 | sampling positions: 5, IMG_128.jpg, 104, 784 97 | sampling positions: 6, IMG_140.jpg, 424, 672 98 | sampling positions: 7, IMG_142.jpg, 96, 160 99 | sampling positions: 8, IMG_164.jpg, 0, 384 100 | sampling positions: 9, IMG_171.jpg, 32, 336 101 | sampling positions: 10, IMG_184.jpg, 40, 600 102 | sampling positions: 11, IMG_185.jpg, 560, 128 103 | sampling positions: 12, IMG_187.jpg, 104, 536 104 | sampling positions: 13, IMG_19.jpg, 368, 584 105 | sampling positions: 14, IMG_21.jpg, 232, 168 106 | sampling positions: 15, IMG_239.jpg, 8, 224 107 | sampling positions: 16, IMG_25.jpg, 88, 408 108 | sampling positions: 17, IMG_281.jpg, 416, 200 109 | sampling positions: 18, IMG_31.jpg, 400, 240 110 | sampling positions: 19, IMG_34.jpg, 208, 432 111 | sampling positions: 20, IMG_37.jpg, 344, 576 112 | sampling positions: 21, IMG_42.jpg, 216, 288 113 | sampling positions: 22, IMG_43.jpg, 256, 200 114 | sampling positions: 23, IMG_48.jpg, 8, 80 115 | sampling positions: 24, IMG_63.jpg, 312, 592 116 | sampling positions: 25, IMG_65.jpg, 32, 64 117 | sampling positions: 26, IMG_68.jpg, 264, 264 118 | sampling positions: 27, IMG_72.jpg, 304, 32 119 | sampling positions: 28, IMG_77.jpg, 424, 176 120 | sampling positions: 29, IMG_87.jpg, 32, 264 121 | sampling positions: 0, IMG_105.jpg, 152, 8 122 | sampling positions: 1, IMG_112.jpg, 96, 568 123 | sampling positions: 2, IMG_115.jpg, 128, 448 124 | sampling positions: 3, IMG_118.jpg, 232, 176 125 | sampling positions: 4, IMG_126.jpg, 48, 336 126 | sampling positions: 5, IMG_128.jpg, 224, 688 127 | sampling positions: 6, IMG_140.jpg, 288, 544 128 | sampling positions: 7, IMG_142.jpg, 56, 376 129 | sampling positions: 8, IMG_164.jpg, 0, 248 130 | sampling positions: 9, IMG_171.jpg, 32, 360 131 | sampling positions: 10, IMG_184.jpg, 304, 104 132 | sampling positions: 11, IMG_185.jpg, 568, 248 133 | sampling positions: 12, IMG_187.jpg, 48, 184 134 | sampling positions: 13, IMG_19.jpg, 64, 312 135 | sampling positions: 14, IMG_21.jpg, 256, 160 136 | sampling positions: 15, IMG_239.jpg, 0, 288 137 | sampling positions: 16, IMG_25.jpg, 376, 592 138 | sampling positions: 17, IMG_281.jpg, 192, 512 139 | sampling positions: 18, IMG_31.jpg, 448, 368 140 | sampling positions: 19, IMG_34.jpg, 344, 208 141 | sampling positions: 20, IMG_37.jpg, 16, 432 142 | sampling positions: 21, IMG_42.jpg, 408, 360 143 | sampling positions: 22, IMG_43.jpg, 280, 464 144 | sampling positions: 23, IMG_48.jpg, 192, 328 145 | sampling positions: 24, IMG_63.jpg, 264, 208 146 | sampling positions: 25, IMG_65.jpg, 48, 632 147 | sampling positions: 26, IMG_68.jpg, 328, 488 148 | sampling positions: 27, IMG_72.jpg, 240, 200 149 | sampling positions: 28, IMG_77.jpg, 96, 736 150 | sampling positions: 29, IMG_87.jpg, 72, 16 151 | -------------------------------------------------------------------------------- /resources/ucfqnrf_validation_files.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/css-ccnn/0d5691b4d9839f92997469f7dbb0a475c00f4ca8/resources/ucfqnrf_validation_files.npy -------------------------------------------------------------------------------- /resources/ucfqnrf_xy_positions.log: -------------------------------------------------------------------------------- 1 | sampling positions: 0, img_0001.jpg, 408, 696 2 | sampling positions: 1, img_0002.jpg, 80, 88 3 | sampling positions: 2, img_0010.jpg, 336, 264 4 | sampling positions: 3, img_0020.jpg, 328, 672 5 | sampling positions: 4, img_0023.jpg, 64, 792 6 | sampling positions: 5, img_0024.jpg, 104, 600 7 | sampling positions: 6, img_0026.jpg, 416, 232 8 | sampling positions: 7, img_0030.jpg, 136, 32 9 | sampling positions: 8, img_0031.jpg, 224, 464 10 | sampling positions: 9, img_0032.jpg, 80, 88 11 | sampling positions: 10, img_0034.jpg, 408, 504 12 | sampling positions: 11, img_0035.jpg, 144, 576 13 | sampling positions: 12, img_0037.jpg, 320, 680 14 | sampling positions: 13, img_0041.jpg, 368, 240 15 | sampling positions: 14, img_0044.jpg, 192, 120 16 | sampling positions: 15, img_0046.jpg, 72, 608 17 | sampling positions: 16, img_0061.jpg, 16, 88 18 | sampling positions: 17, img_0062.jpg, 360, 136 19 | sampling positions: 18, img_0069.jpg, 440, 24 20 | sampling positions: 19, img_0072.jpg, 296, 240 21 | sampling positions: 20, img_0074.jpg, 136, 256 22 | sampling positions: 21, img_0080.jpg, 432, 328 23 | sampling positions: 22, img_0086.jpg, 496, 280 24 | sampling positions: 23, img_0087.jpg, 248, 216 25 | sampling positions: 24, img_0092.jpg, 128, 224 26 | sampling positions: 25, img_0096.jpg, 184, 0 27 | sampling positions: 26, img_0098.jpg, 128, 720 28 | sampling positions: 27, img_0108.jpg, 464, 576 29 | sampling positions: 28, img_0122.jpg, 328, 112 30 | sampling positions: 29, img_0123.jpg, 160, 272 31 | sampling positions: 30, img_0124.jpg, 88, 440 32 | sampling positions: 31, img_0130.jpg, 272, 40 33 | sampling positions: 0, img_0137.jpg, 512, 48 34 | sampling positions: 1, img_0138.jpg, 384, 608 35 | sampling positions: 2, img_0141.jpg, 448, 296 36 | sampling positions: 3, img_0142.jpg, 72, 720 37 | sampling positions: 4, img_0144.jpg, 376, 568 38 | sampling positions: 5, img_0148.jpg, 424, 496 39 | sampling positions: 6, img_0149.jpg, 272, 216 40 | sampling positions: 7, img_0150.jpg, 328, 8 41 | sampling positions: 8, img_0151.jpg, 144, 640 42 | sampling positions: 9, img_0152.jpg, 184, 440 43 | sampling positions: 10, img_0153.jpg, 88, 56 44 | sampling positions: 11, img_0154.jpg, 152, 424 45 | sampling positions: 12, img_0155.jpg, 120, 16 46 | sampling positions: 13, img_0160.jpg, 296, 664 47 | sampling positions: 14, img_0161.jpg, 16, 232 48 | sampling positions: 15, img_0165.jpg, 264, 112 49 | sampling positions: 16, img_0171.jpg, 240, 456 50 | sampling positions: 17, img_0172.jpg, 176, 80 51 | sampling positions: 18, img_0174.jpg, 384, 160 52 | sampling positions: 19, img_0175.jpg, 264, 560 53 | sampling positions: 20, img_0179.jpg, 216, 224 54 | sampling positions: 21, img_0180.jpg, 176, 720 55 | sampling positions: 22, img_0185.jpg, 144, 760 56 | sampling positions: 23, img_0186.jpg, 16, 480 57 | sampling positions: 24, img_0189.jpg, 264, 136 58 | sampling positions: 25, img_0193.jpg, 40, 472 59 | sampling positions: 26, img_0206.jpg, 168, 224 60 | sampling positions: 27, img_0207.jpg, 400, 320 61 | sampling positions: 28, img_0213.jpg, 448, 88 62 | sampling positions: 29, img_0221.jpg, 416, 464 63 | sampling positions: 30, img_0222.jpg, 64, 352 64 | sampling positions: 31, img_0224.jpg, 320, 72 65 | sampling positions: 0, img_0232.jpg, 8, 448 66 | sampling positions: 1, img_0237.jpg, 744, 240 67 | sampling positions: 2, img_0239.jpg, 312, 512 68 | sampling positions: 3, img_0242.jpg, 192, 488 69 | sampling positions: 4, img_0259.jpg, 184, 688 70 | sampling positions: 5, img_0261.jpg, 488, 336 71 | sampling positions: 6, img_0266.jpg, 352, 280 72 | sampling positions: 7, img_0269.jpg, 152, 400 73 | sampling positions: 8, img_0270.jpg, 432, 240 74 | sampling positions: 9, img_0276.jpg, 272, 272 75 | sampling positions: 10, img_0278.jpg, 296, 464 76 | sampling positions: 11, img_0280.jpg, 96, 48 77 | sampling positions: 12, img_0284.jpg, 0, 328 78 | sampling positions: 13, img_0288.jpg, 136, 368 79 | sampling positions: 14, img_0291.jpg, 384, 744 80 | sampling positions: 15, img_0294.jpg, 320, 448 81 | sampling positions: 16, img_0300.jpg, 128, 736 82 | sampling positions: 17, img_0316.jpg, 0, 8 83 | sampling positions: 18, img_0323.jpg, 128, 504 84 | sampling positions: 19, img_0325.jpg, 424, 688 85 | sampling positions: 20, img_0348.jpg, 0, 592 86 | sampling positions: 21, img_0360.jpg, 216, 392 87 | sampling positions: 22, img_0361.jpg, 8, 776 88 | sampling positions: 23, img_0364.jpg, 272, 136 89 | sampling positions: 24, img_0375.jpg, 152, 184 90 | sampling positions: 25, img_0379.jpg, 104, 760 91 | sampling positions: 26, img_0382.jpg, 544, 64 92 | sampling positions: 27, img_0384.jpg, 152, 584 93 | sampling positions: 28, img_0386.jpg, 48, 464 94 | sampling positions: 29, img_0389.jpg, 144, 80 95 | sampling positions: 30, img_0390.jpg, 192, 240 96 | sampling positions: 31, img_0391.jpg, 128, 88 97 | sampling positions: 0, img_0401.jpg, 296, 536 98 | sampling positions: 1, img_0403.jpg, 8, 688 99 | sampling positions: 2, img_0407.jpg, 488, 656 100 | sampling positions: 3, img_0415.jpg, 216, 440 101 | sampling positions: 4, img_0418.jpg, 192, 496 102 | sampling positions: 5, img_0428.jpg, 128, 72 103 | sampling positions: 6, img_0430.jpg, 232, 224 104 | sampling positions: 7, img_0432.jpg, 152, 192 105 | sampling positions: 8, img_0435.jpg, 328, 264 106 | sampling positions: 9, img_0436.jpg, 448, 88 107 | sampling positions: 10, img_0437.jpg, 440, 504 108 | sampling positions: 11, img_0438.jpg, 0, 760 109 | sampling positions: 12, img_0468.jpg, 200, 16 110 | sampling positions: 13, img_0471.jpg, 40, 424 111 | sampling positions: 14, img_0472.jpg, 64, 160 112 | sampling positions: 15, img_0479.jpg, 368, 792 113 | sampling positions: 16, img_0489.jpg, 208, 408 114 | sampling positions: 17, img_0495.jpg, 360, 0 115 | sampling positions: 18, img_0496.jpg, 32, 672 116 | sampling positions: 19, img_0498.jpg, 264, 336 117 | sampling positions: 20, img_0505.jpg, 392, 584 118 | sampling positions: 21, img_0511.jpg, 280, 512 119 | sampling positions: 22, img_0521.jpg, 344, 32 120 | sampling positions: 23, img_0523.jpg, 16, 600 121 | sampling positions: 24, img_0524.jpg, 0, 728 122 | sampling positions: 25, img_0527.jpg, 496, 368 123 | sampling positions: 26, img_0529.jpg, 248, 480 124 | sampling positions: 27, img_0552.jpg, 56, 352 125 | sampling positions: 28, img_0553.jpg, 72, 408 126 | sampling positions: 29, img_0570.jpg, 208, 688 127 | sampling positions: 30, img_0574.jpg, 248, 88 128 | sampling positions: 31, img_0578.jpg, 208, 192 129 | sampling positions: 0, img_0592.jpg, 16, 240 130 | sampling positions: 1, img_0596.jpg, 160, 336 131 | sampling positions: 2, img_0601.jpg, 336, 504 132 | sampling positions: 3, img_0603.jpg, 64, 512 133 | sampling positions: 4, img_0607.jpg, 56, 56 134 | sampling positions: 5, img_0617.jpg, 408, 168 135 | sampling positions: 6, img_0622.jpg, 632, 248 136 | sampling positions: 7, img_0623.jpg, 208, 616 137 | sampling positions: 8, img_0636.jpg, 280, 616 138 | sampling positions: 9, img_0637.jpg, 128, 368 139 | sampling positions: 10, img_0638.jpg, 312, 696 140 | sampling positions: 11, img_0639.jpg, 40, 208 141 | sampling positions: 12, img_0646.jpg, 472, 296 142 | sampling positions: 13, img_0654.jpg, 336, 576 143 | sampling positions: 14, img_0669.jpg, 0, 240 144 | sampling positions: 15, img_0670.jpg, 176, 688 145 | sampling positions: 16, img_0674.jpg, 40, 56 146 | sampling positions: 17, img_0677.jpg, 352, 16 147 | sampling positions: 18, img_0678.jpg, 416, 520 148 | sampling positions: 19, img_0682.jpg, 264, 760 149 | sampling positions: 20, img_0690.jpg, 416, 568 150 | sampling positions: 21, img_0700.jpg, 48, 216 151 | sampling positions: 22, img_0702.jpg, 184, 632 152 | sampling positions: 23, img_0721.jpg, 8, 584 153 | sampling positions: 24, img_0733.jpg, 440, 24 154 | sampling positions: 25, img_0742.jpg, 280, 432 155 | sampling positions: 26, img_0754.jpg, 312, 768 156 | sampling positions: 27, img_0756.jpg, 256, 416 157 | sampling positions: 28, img_0762.jpg, 288, 584 158 | sampling positions: 29, img_0769.jpg, 368, 56 159 | sampling positions: 30, img_0777.jpg, 128, 152 160 | sampling positions: 31, img_0784.jpg, 120, 624 161 | sampling positions: 0, img_0786.jpg, 40, 40 162 | sampling positions: 1, img_0791.jpg, 376, 208 163 | sampling positions: 2, img_0792.jpg, 136, 664 164 | sampling positions: 3, img_0796.jpg, 64, 776 165 | sampling positions: 4, img_0803.jpg, 216, 504 166 | sampling positions: 5, img_0808.jpg, 320, 736 167 | sampling positions: 6, img_0809.jpg, 224, 704 168 | sampling positions: 7, img_0812.jpg, 392, 64 169 | sampling positions: 8, img_0814.jpg, 208, 376 170 | sampling positions: 9, img_0821.jpg, 328, 312 171 | sampling positions: 10, img_0834.jpg, 280, 472 172 | sampling positions: 11, img_0840.jpg, 104, 608 173 | sampling positions: 12, img_0843.jpg, 160, 512 174 | sampling positions: 13, img_0853.jpg, 200, 176 175 | sampling positions: 14, img_0855.jpg, 344, 240 176 | sampling positions: 15, img_0859.jpg, 336, 0 177 | sampling positions: 16, img_0860.jpg, 176, 584 178 | sampling positions: 17, img_0862.jpg, 72, 120 179 | sampling positions: 18, img_0864.jpg, 160, 8 180 | sampling positions: 19, img_0903.jpg, 400, 0 181 | sampling positions: 20, img_0905.jpg, 312, 16 182 | sampling positions: 21, img_0906.jpg, 312, 152 183 | sampling positions: 22, img_0913.jpg, 80, 392 184 | sampling positions: 23, img_0915.jpg, 248, 712 185 | sampling positions: 24, img_0917.jpg, 344, 440 186 | sampling positions: 25, img_0923.jpg, 440, 248 187 | sampling positions: 26, img_0926.jpg, 344, 72 188 | sampling positions: 27, img_0927.jpg, 304, 184 189 | sampling positions: 28, img_0928.jpg, 144, 472 190 | sampling positions: 29, img_0929.jpg, 328, 216 191 | sampling positions: 30, img_0937.jpg, 16, 736 192 | sampling positions: 31, img_0938.jpg, 224, 512 193 | sampling positions: 0, img_0948.jpg, 376, 624 194 | sampling positions: 1, img_0951.jpg, 112, 160 195 | sampling positions: 2, img_0953.jpg, 136, 376 196 | sampling positions: 3, img_0954.jpg, 232, 728 197 | sampling positions: 4, img_0963.jpg, 280, 80 198 | sampling positions: 5, img_0970.jpg, 352, 200 199 | sampling positions: 6, img_0973.jpg, 136, 392 200 | sampling positions: 7, img_0975.jpg, 8, 0 201 | sampling positions: 8, img_0982.jpg, 104, 184 202 | sampling positions: 9, img_0985.jpg, 216, 336 203 | sampling positions: 10, img_0988.jpg, 264, 656 204 | sampling positions: 11, img_0989.jpg, 64, 720 205 | sampling positions: 12, img_0991.jpg, 160, 176 206 | sampling positions: 13, img_0996.jpg, 56, 168 207 | sampling positions: 14, img_0998.jpg, 288, 664 208 | sampling positions: 15, img_1006.jpg, 56, 48 209 | sampling positions: 16, img_1019.jpg, 56, 176 210 | sampling positions: 17, img_1021.jpg, 312, 48 211 | sampling positions: 18, img_1024.jpg, 80, 192 212 | sampling positions: 19, img_1041.jpg, 72, 712 213 | sampling positions: 20, img_1043.jpg, 40, 256 214 | sampling positions: 21, img_1049.jpg, 120, 640 215 | sampling positions: 22, img_1050.jpg, 32, 584 216 | sampling positions: 23, img_1051.jpg, 208, 416 217 | sampling positions: 24, img_1054.jpg, 288, 496 218 | sampling positions: 25, img_1062.jpg, 224, 520 219 | sampling positions: 26, img_1063.jpg, 328, 584 220 | sampling positions: 27, img_1064.jpg, 240, 352 221 | sampling positions: 28, img_1066.jpg, 104, 504 222 | sampling positions: 29, img_1069.jpg, 336, 632 223 | sampling positions: 30, img_1083.jpg, 128, 448 224 | sampling positions: 31, img_1088.jpg, 224, 408 225 | sampling positions: 0, img_0001.jpg, 456, 8 226 | sampling positions: 1, img_0002.jpg, 136, 296 227 | sampling positions: 2, img_0010.jpg, 312, 160 228 | sampling positions: 3, img_0020.jpg, 296, 408 229 | sampling positions: 4, img_0023.jpg, 64, 584 230 | sampling positions: 5, img_0024.jpg, 288, 528 231 | sampling positions: 6, img_0026.jpg, 120, 200 232 | sampling positions: 7, img_0030.jpg, 48, 224 233 | sampling positions: 8, img_0031.jpg, 160, 504 234 | sampling positions: 9, img_0032.jpg, 312, 400 235 | sampling positions: 10, img_0034.jpg, 16, 528 236 | sampling positions: 11, img_0035.jpg, 344, 504 237 | sampling positions: 12, img_0037.jpg, 192, 128 238 | sampling positions: 13, img_0041.jpg, 336, 648 239 | sampling positions: 14, img_0044.jpg, 200, 304 240 | sampling positions: 15, img_0046.jpg, 120, 584 241 | sampling positions: 16, img_0061.jpg, 368, 80 242 | sampling positions: 17, img_0062.jpg, 216, 152 243 | sampling positions: 18, img_0069.jpg, 240, 424 244 | sampling positions: 19, img_0072.jpg, 432, 40 245 | sampling positions: 20, img_0074.jpg, 296, 472 246 | sampling positions: 21, img_0080.jpg, 8, 608 247 | sampling positions: 22, img_0086.jpg, 192, 712 248 | sampling positions: 23, img_0087.jpg, 72, 136 249 | sampling positions: 24, img_0092.jpg, 320, 272 250 | sampling positions: 25, img_0096.jpg, 352, 728 251 | sampling positions: 26, img_0098.jpg, 440, 96 252 | sampling positions: 27, img_0108.jpg, 352, 0 253 | sampling positions: 28, img_0122.jpg, 232, 616 254 | sampling positions: 29, img_0123.jpg, 400, 456 255 | sampling positions: 30, img_0124.jpg, 8, 760 256 | sampling positions: 31, img_0130.jpg, 136, 392 257 | sampling positions: 0, img_0137.jpg, 352, 72 258 | sampling positions: 1, img_0138.jpg, 376, 352 259 | sampling positions: 2, img_0141.jpg, 304, 648 260 | sampling positions: 3, img_0142.jpg, 304, 64 261 | sampling positions: 4, img_0144.jpg, 176, 400 262 | sampling positions: 5, img_0148.jpg, 272, 632 263 | sampling positions: 6, img_0149.jpg, 88, 176 264 | sampling positions: 7, img_0150.jpg, 240, 80 265 | sampling positions: 8, img_0151.jpg, 232, 168 266 | sampling positions: 9, img_0152.jpg, 344, 152 267 | sampling positions: 10, img_0153.jpg, 440, 600 268 | sampling positions: 11, img_0154.jpg, 272, 128 269 | sampling positions: 12, img_0155.jpg, 424, 672 270 | sampling positions: 13, img_0160.jpg, 16, 648 271 | sampling positions: 14, img_0161.jpg, 400, 504 272 | sampling positions: 15, img_0165.jpg, 248, 328 273 | sampling positions: 16, img_0171.jpg, 304, 720 274 | sampling positions: 17, img_0172.jpg, 112, 304 275 | sampling positions: 18, img_0174.jpg, 368, 376 276 | sampling positions: 19, img_0175.jpg, 144, 184 277 | sampling positions: 20, img_0179.jpg, 40, 720 278 | sampling positions: 21, img_0180.jpg, 192, 728 279 | sampling positions: 22, img_0185.jpg, 272, 720 280 | sampling positions: 23, img_0186.jpg, 240, 360 281 | sampling positions: 24, img_0189.jpg, 112, 184 282 | sampling positions: 25, img_0193.jpg, 392, 760 283 | sampling positions: 26, img_0206.jpg, 304, 488 284 | sampling positions: 27, img_0207.jpg, 8, 728 285 | sampling positions: 28, img_0213.jpg, 240, 704 286 | sampling positions: 29, img_0221.jpg, 296, 200 287 | sampling positions: 30, img_0222.jpg, 80, 176 288 | sampling positions: 31, img_0224.jpg, 248, 728 289 | sampling positions: 0, img_0232.jpg, 200, 232 290 | sampling positions: 1, img_0237.jpg, 544, 736 291 | sampling positions: 2, img_0239.jpg, 40, 312 292 | sampling positions: 3, img_0242.jpg, 296, 640 293 | sampling positions: 4, img_0259.jpg, 232, 592 294 | sampling positions: 5, img_0261.jpg, 424, 80 295 | sampling positions: 6, img_0266.jpg, 72, 16 296 | sampling positions: 7, img_0269.jpg, 280, 384 297 | sampling positions: 8, img_0270.jpg, 240, 376 298 | sampling positions: 9, img_0276.jpg, 416, 456 299 | sampling positions: 10, img_0278.jpg, 88, 504 300 | sampling positions: 11, img_0280.jpg, 336, 200 301 | sampling positions: 12, img_0284.jpg, 48, 648 302 | sampling positions: 13, img_0288.jpg, 384, 544 303 | sampling positions: 14, img_0291.jpg, 40, 208 304 | sampling positions: 15, img_0294.jpg, 272, 584 305 | sampling positions: 16, img_0300.jpg, 176, 56 306 | sampling positions: 17, img_0316.jpg, 24, 616 307 | sampling positions: 18, img_0323.jpg, 288, 144 308 | sampling positions: 19, img_0325.jpg, 200, 672 309 | sampling positions: 20, img_0348.jpg, 440, 424 310 | sampling positions: 21, img_0360.jpg, 232, 784 311 | sampling positions: 22, img_0361.jpg, 152, 616 312 | sampling positions: 23, img_0364.jpg, 144, 272 313 | sampling positions: 24, img_0375.jpg, 352, 776 314 | sampling positions: 25, img_0379.jpg, 32, 784 315 | sampling positions: 26, img_0382.jpg, 152, 336 316 | sampling positions: 27, img_0384.jpg, 248, 64 317 | sampling positions: 28, img_0386.jpg, 96, 176 318 | sampling positions: 29, img_0389.jpg, 248, 240 319 | sampling positions: 30, img_0390.jpg, 312, 728 320 | sampling positions: 31, img_0391.jpg, 104, 320 321 | sampling positions: 0, img_0401.jpg, 440, 80 322 | sampling positions: 1, img_0403.jpg, 456, 80 323 | sampling positions: 2, img_0407.jpg, 528, 96 324 | sampling positions: 3, img_0415.jpg, 136, 208 325 | sampling positions: 4, img_0418.jpg, 416, 584 326 | sampling positions: 5, img_0428.jpg, 56, 216 327 | sampling positions: 6, img_0430.jpg, 376, 280 328 | sampling positions: 7, img_0432.jpg, 184, 80 329 | sampling positions: 8, img_0435.jpg, 504, 520 330 | sampling positions: 9, img_0436.jpg, 120, 336 331 | sampling positions: 10, img_0437.jpg, 336, 320 332 | sampling positions: 11, img_0438.jpg, 296, 528 333 | sampling positions: 12, img_0468.jpg, 200, 536 334 | sampling positions: 13, img_0471.jpg, 296, 512 335 | sampling positions: 14, img_0472.jpg, 392, 152 336 | sampling positions: 15, img_0479.jpg, 264, 616 337 | sampling positions: 16, img_0489.jpg, 104, 432 338 | sampling positions: 17, img_0495.jpg, 56, 376 339 | sampling positions: 18, img_0496.jpg, 64, 536 340 | sampling positions: 19, img_0498.jpg, 272, 296 341 | sampling positions: 20, img_0505.jpg, 264, 80 342 | sampling positions: 21, img_0511.jpg, 264, 136 343 | sampling positions: 22, img_0521.jpg, 104, 512 344 | sampling positions: 23, img_0523.jpg, 8, 408 345 | sampling positions: 24, img_0524.jpg, 136, 552 346 | sampling positions: 25, img_0527.jpg, 88, 728 347 | sampling positions: 26, img_0529.jpg, 120, 560 348 | sampling positions: 27, img_0552.jpg, 64, 232 349 | sampling positions: 28, img_0553.jpg, 296, 312 350 | sampling positions: 29, img_0570.jpg, 312, 32 351 | sampling positions: 30, img_0574.jpg, 160, 104 352 | sampling positions: 31, img_0578.jpg, 352, 360 353 | sampling positions: 0, img_0592.jpg, 128, 328 354 | sampling positions: 1, img_0596.jpg, 440, 360 355 | sampling positions: 2, img_0601.jpg, 456, 568 356 | sampling positions: 3, img_0603.jpg, 616, 144 357 | sampling positions: 4, img_0607.jpg, 768, 432 358 | sampling positions: 5, img_0617.jpg, 408, 48 359 | sampling positions: 6, img_0622.jpg, 104, 48 360 | sampling positions: 7, img_0623.jpg, 48, 792 361 | sampling positions: 8, img_0636.jpg, 400, 592 362 | sampling positions: 9, img_0637.jpg, 232, 304 363 | sampling positions: 10, img_0638.jpg, 384, 320 364 | sampling positions: 11, img_0639.jpg, 184, 272 365 | sampling positions: 12, img_0646.jpg, 280, 504 366 | sampling positions: 13, img_0654.jpg, 120, 64 367 | sampling positions: 14, img_0669.jpg, 248, 184 368 | sampling positions: 15, img_0670.jpg, 328, 704 369 | sampling positions: 16, img_0674.jpg, 152, 328 370 | sampling positions: 17, img_0677.jpg, 104, 368 371 | sampling positions: 18, img_0678.jpg, 376, 744 372 | sampling positions: 19, img_0682.jpg, 128, 688 373 | sampling positions: 20, img_0690.jpg, 128, 384 374 | sampling positions: 21, img_0700.jpg, 392, 120 375 | sampling positions: 22, img_0702.jpg, 280, 144 376 | sampling positions: 23, img_0721.jpg, 288, 472 377 | sampling positions: 24, img_0733.jpg, 416, 208 378 | sampling positions: 25, img_0742.jpg, 304, 560 379 | sampling positions: 26, img_0754.jpg, 8, 632 380 | sampling positions: 27, img_0756.jpg, 144, 456 381 | sampling positions: 28, img_0762.jpg, 232, 480 382 | sampling positions: 29, img_0769.jpg, 208, 128 383 | sampling positions: 30, img_0777.jpg, 320, 240 384 | sampling positions: 31, img_0784.jpg, 376, 680 385 | sampling positions: 0, img_0786.jpg, 72, 496 386 | sampling positions: 1, img_0791.jpg, 416, 752 387 | sampling positions: 2, img_0792.jpg, 80, 288 388 | sampling positions: 3, img_0796.jpg, 64, 96 389 | sampling positions: 4, img_0803.jpg, 256, 144 390 | sampling positions: 5, img_0808.jpg, 88, 168 391 | sampling positions: 6, img_0809.jpg, 232, 704 392 | sampling positions: 7, img_0812.jpg, 336, 144 393 | sampling positions: 8, img_0814.jpg, 32, 128 394 | sampling positions: 9, img_0821.jpg, 48, 592 395 | sampling positions: 10, img_0834.jpg, 168, 272 396 | sampling positions: 11, img_0840.jpg, 352, 32 397 | sampling positions: 12, img_0843.jpg, 104, 128 398 | sampling positions: 13, img_0853.jpg, 224, 48 399 | sampling positions: 14, img_0855.jpg, 248, 272 400 | sampling positions: 15, img_0859.jpg, 88, 536 401 | sampling positions: 16, img_0860.jpg, 128, 392 402 | sampling positions: 17, img_0862.jpg, 376, 520 403 | sampling positions: 18, img_0864.jpg, 352, 232 404 | sampling positions: 19, img_0903.jpg, 16, 128 405 | sampling positions: 20, img_0905.jpg, 232, 344 406 | sampling positions: 21, img_0906.jpg, 248, 112 407 | sampling positions: 22, img_0913.jpg, 112, 648 408 | sampling positions: 23, img_0915.jpg, 176, 344 409 | sampling positions: 24, img_0917.jpg, 264, 216 410 | sampling positions: 25, img_0923.jpg, 424, 664 411 | sampling positions: 26, img_0926.jpg, 448, 384 412 | sampling positions: 27, img_0927.jpg, 408, 392 413 | sampling positions: 28, img_0928.jpg, 144, 224 414 | sampling positions: 29, img_0929.jpg, 400, 752 415 | sampling positions: 30, img_0937.jpg, 184, 624 416 | sampling positions: 31, img_0938.jpg, 8, 424 417 | sampling positions: 0, img_0948.jpg, 432, 72 418 | sampling positions: 1, img_0951.jpg, 168, 464 419 | sampling positions: 2, img_0953.jpg, 392, 328 420 | sampling positions: 3, img_0954.jpg, 272, 16 421 | sampling positions: 4, img_0963.jpg, 176, 752 422 | sampling positions: 5, img_0970.jpg, 392, 72 423 | sampling positions: 6, img_0973.jpg, 280, 792 424 | sampling positions: 7, img_0975.jpg, 96, 488 425 | sampling positions: 8, img_0982.jpg, 344, 176 426 | sampling positions: 9, img_0985.jpg, 304, 688 427 | sampling positions: 10, img_0988.jpg, 88, 296 428 | sampling positions: 11, img_0989.jpg, 416, 120 429 | sampling positions: 12, img_0991.jpg, 8, 376 430 | sampling positions: 13, img_0996.jpg, 536, 600 431 | sampling positions: 14, img_0998.jpg, 16, 56 432 | sampling positions: 15, img_1006.jpg, 24, 368 433 | sampling positions: 16, img_1019.jpg, 248, 384 434 | sampling positions: 17, img_1021.jpg, 96, 560 435 | sampling positions: 18, img_1024.jpg, 288, 664 436 | sampling positions: 19, img_1041.jpg, 232, 336 437 | sampling positions: 20, img_1043.jpg, 296, 720 438 | sampling positions: 21, img_1049.jpg, 104, 80 439 | sampling positions: 22, img_1050.jpg, 296, 408 440 | sampling positions: 23, img_1051.jpg, 312, 344 441 | sampling positions: 24, img_1054.jpg, 80, 424 442 | sampling positions: 25, img_1062.jpg, 80, 104 443 | sampling positions: 26, img_1063.jpg, 208, 760 444 | sampling positions: 27, img_1064.jpg, 320, 320 445 | sampling positions: 28, img_1066.jpg, 208, 456 446 | sampling positions: 29, img_1069.jpg, 48, 688 447 | sampling positions: 30, img_1083.jpg, 208, 72 448 | sampling positions: 31, img_1088.jpg, 64, 280 449 | sampling positions: 0, img_0001.jpg, 136, 264 450 | sampling positions: 1, img_0002.jpg, 200, 424 451 | sampling positions: 2, img_0010.jpg, 336, 528 452 | sampling positions: 3, img_0020.jpg, 88, 512 453 | sampling positions: 4, img_0023.jpg, 144, 688 454 | sampling positions: 5, img_0024.jpg, 40, 416 455 | sampling positions: 6, img_0026.jpg, 328, 64 456 | sampling positions: 7, img_0030.jpg, 240, 112 457 | sampling positions: 8, img_0031.jpg, 24, 312 458 | sampling positions: 9, img_0032.jpg, 272, 344 459 | sampling positions: 10, img_0034.jpg, 312, 216 460 | sampling positions: 11, img_0035.jpg, 224, 272 461 | sampling positions: 12, img_0037.jpg, 488, 104 462 | sampling positions: 13, img_0041.jpg, 408, 280 463 | sampling positions: 14, img_0044.jpg, 16, 504 464 | sampling positions: 15, img_0046.jpg, 104, 368 465 | sampling positions: 16, img_0061.jpg, 376, 144 466 | sampling positions: 17, img_0062.jpg, 24, 328 467 | sampling positions: 18, img_0069.jpg, 440, 696 468 | sampling positions: 19, img_0072.jpg, 40, 512 469 | sampling positions: 20, img_0074.jpg, 128, 232 470 | sampling positions: 21, img_0080.jpg, 408, 752 471 | sampling positions: 22, img_0086.jpg, 456, 576 472 | sampling positions: 23, img_0087.jpg, 64, 408 473 | sampling positions: 24, img_0092.jpg, 32, 368 474 | sampling positions: 25, img_0096.jpg, 32, 472 475 | sampling positions: 26, img_0098.jpg, 256, 24 476 | sampling positions: 27, img_0108.jpg, 472, 384 477 | sampling positions: 28, img_0122.jpg, 104, 56 478 | sampling positions: 29, img_0123.jpg, 432, 672 479 | sampling positions: 30, img_0124.jpg, 56, 152 480 | sampling positions: 31, img_0130.jpg, 536, 672 481 | sampling positions: 0, img_0137.jpg, 552, 40 482 | sampling positions: 1, img_0138.jpg, 64, 488 483 | sampling positions: 2, img_0141.jpg, 24, 568 484 | sampling positions: 3, img_0142.jpg, 48, 600 485 | sampling positions: 4, img_0144.jpg, 248, 368 486 | sampling positions: 5, img_0148.jpg, 136, 448 487 | sampling positions: 6, img_0149.jpg, 272, 488 488 | sampling positions: 7, img_0150.jpg, 216, 488 489 | sampling positions: 8, img_0151.jpg, 440, 296 490 | sampling positions: 9, img_0152.jpg, 312, 528 491 | sampling positions: 10, img_0153.jpg, 240, 576 492 | sampling positions: 11, img_0154.jpg, 264, 752 493 | sampling positions: 12, img_0155.jpg, 0, 768 494 | sampling positions: 13, img_0160.jpg, 144, 616 495 | sampling positions: 14, img_0161.jpg, 424, 528 496 | sampling positions: 15, img_0165.jpg, 424, 104 497 | sampling positions: 16, img_0171.jpg, 320, 328 498 | sampling positions: 17, img_0172.jpg, 408, 448 499 | sampling positions: 18, img_0174.jpg, 240, 200 500 | sampling positions: 19, img_0175.jpg, 192, 224 501 | sampling positions: 20, img_0179.jpg, 144, 704 502 | sampling positions: 21, img_0180.jpg, 168, 224 503 | sampling positions: 22, img_0185.jpg, 120, 776 504 | sampling positions: 23, img_0186.jpg, 80, 456 505 | sampling positions: 24, img_0189.jpg, 448, 592 506 | sampling positions: 25, img_0193.jpg, 80, 176 507 | sampling positions: 26, img_0206.jpg, 240, 520 508 | sampling positions: 27, img_0207.jpg, 112, 432 509 | sampling positions: 28, img_0213.jpg, 248, 232 510 | sampling positions: 29, img_0221.jpg, 288, 752 511 | sampling positions: 30, img_0222.jpg, 80, 144 512 | sampling positions: 31, img_0224.jpg, 256, 176 513 | sampling positions: 0, img_0232.jpg, 72, 24 514 | sampling positions: 1, img_0237.jpg, 104, 416 515 | sampling positions: 2, img_0239.jpg, 328, 208 516 | sampling positions: 3, img_0242.jpg, 120, 400 517 | sampling positions: 4, img_0259.jpg, 288, 704 518 | sampling positions: 5, img_0261.jpg, 304, 168 519 | sampling positions: 6, img_0266.jpg, 16, 360 520 | sampling positions: 7, img_0269.jpg, 152, 360 521 | sampling positions: 8, img_0270.jpg, 24, 192 522 | sampling positions: 9, img_0276.jpg, 112, 520 523 | sampling positions: 10, img_0278.jpg, 336, 456 524 | sampling positions: 11, img_0280.jpg, 304, 208 525 | sampling positions: 12, img_0284.jpg, 520, 504 526 | sampling positions: 13, img_0288.jpg, 136, 424 527 | sampling positions: 14, img_0291.jpg, 40, 600 528 | sampling positions: 15, img_0294.jpg, 224, 368 529 | sampling positions: 16, img_0300.jpg, 344, 184 530 | sampling positions: 17, img_0316.jpg, 80, 280 531 | sampling positions: 18, img_0323.jpg, 200, 56 532 | sampling positions: 19, img_0325.jpg, 112, 120 533 | sampling positions: 20, img_0348.jpg, 368, 352 534 | sampling positions: 21, img_0360.jpg, 176, 656 535 | sampling positions: 22, img_0361.jpg, 312, 88 536 | sampling positions: 23, img_0364.jpg, 24, 504 537 | sampling positions: 24, img_0375.jpg, 104, 224 538 | sampling positions: 25, img_0379.jpg, 168, 560 539 | sampling positions: 26, img_0382.jpg, 224, 768 540 | sampling positions: 27, img_0384.jpg, 160, 280 541 | sampling positions: 28, img_0386.jpg, 160, 464 542 | sampling positions: 29, img_0389.jpg, 312, 344 543 | sampling positions: 30, img_0390.jpg, 304, 704 544 | sampling positions: 31, img_0391.jpg, 72, 320 545 | sampling positions: 0, img_0401.jpg, 232, 768 546 | sampling positions: 1, img_0403.jpg, 96, 440 547 | sampling positions: 2, img_0407.jpg, 224, 768 548 | sampling positions: 3, img_0415.jpg, 312, 552 549 | sampling positions: 4, img_0418.jpg, 496, 200 550 | sampling positions: 5, img_0428.jpg, 96, 104 551 | sampling positions: 6, img_0430.jpg, 328, 232 552 | sampling positions: 7, img_0432.jpg, 48, 360 553 | sampling positions: 8, img_0435.jpg, 256, 528 554 | sampling positions: 9, img_0436.jpg, 160, 504 555 | sampling positions: 10, img_0437.jpg, 440, 520 556 | sampling positions: 11, img_0438.jpg, 440, 144 557 | sampling positions: 12, img_0468.jpg, 336, 504 558 | sampling positions: 13, img_0471.jpg, 376, 304 559 | sampling positions: 14, img_0472.jpg, 232, 504 560 | sampling positions: 15, img_0479.jpg, 384, 512 561 | sampling positions: 16, img_0489.jpg, 176, 16 562 | sampling positions: 17, img_0495.jpg, 8, 184 563 | sampling positions: 18, img_0496.jpg, 352, 600 564 | sampling positions: 19, img_0498.jpg, 160, 288 565 | sampling positions: 20, img_0505.jpg, 216, 272 566 | sampling positions: 21, img_0511.jpg, 256, 328 567 | sampling positions: 22, img_0521.jpg, 16, 32 568 | sampling positions: 23, img_0523.jpg, 16, 72 569 | sampling positions: 24, img_0524.jpg, 88, 368 570 | sampling positions: 25, img_0527.jpg, 48, 0 571 | sampling positions: 26, img_0529.jpg, 152, 248 572 | sampling positions: 27, img_0552.jpg, 240, 256 573 | sampling positions: 28, img_0553.jpg, 328, 432 574 | sampling positions: 29, img_0570.jpg, 216, 568 575 | sampling positions: 30, img_0574.jpg, 32, 496 576 | sampling positions: 31, img_0578.jpg, 256, 168 577 | sampling positions: 0, img_0592.jpg, 40, 376 578 | sampling positions: 1, img_0596.jpg, 440, 344 579 | sampling positions: 2, img_0601.jpg, 128, 120 580 | sampling positions: 3, img_0603.jpg, 792, 24 581 | sampling positions: 4, img_0607.jpg, 720, 56 582 | sampling positions: 5, img_0617.jpg, 120, 40 583 | sampling positions: 6, img_0622.jpg, 152, 384 584 | sampling positions: 7, img_0623.jpg, 328, 520 585 | sampling positions: 8, img_0636.jpg, 560, 584 586 | sampling positions: 9, img_0637.jpg, 96, 56 587 | sampling positions: 10, img_0638.jpg, 32, 280 588 | sampling positions: 11, img_0639.jpg, 256, 120 589 | sampling positions: 12, img_0646.jpg, 104, 312 590 | sampling positions: 13, img_0654.jpg, 192, 448 591 | sampling positions: 14, img_0669.jpg, 336, 128 592 | sampling positions: 15, img_0670.jpg, 24, 160 593 | sampling positions: 16, img_0674.jpg, 232, 600 594 | sampling positions: 17, img_0677.jpg, 352, 72 595 | sampling positions: 18, img_0678.jpg, 272, 720 596 | sampling positions: 19, img_0682.jpg, 376, 480 597 | sampling positions: 20, img_0690.jpg, 96, 240 598 | sampling positions: 21, img_0700.jpg, 296, 680 599 | sampling positions: 22, img_0702.jpg, 200, 728 600 | sampling positions: 23, img_0721.jpg, 80, 240 601 | sampling positions: 24, img_0733.jpg, 440, 568 602 | sampling positions: 25, img_0742.jpg, 464, 616 603 | sampling positions: 26, img_0754.jpg, 104, 776 604 | sampling positions: 27, img_0756.jpg, 328, 544 605 | sampling positions: 28, img_0762.jpg, 448, 168 606 | sampling positions: 29, img_0769.jpg, 0, 136 607 | sampling positions: 30, img_0777.jpg, 408, 664 608 | sampling positions: 31, img_0784.jpg, 128, 152 609 | sampling positions: 0, img_0786.jpg, 112, 80 610 | sampling positions: 1, img_0791.jpg, 16, 312 611 | sampling positions: 2, img_0792.jpg, 32, 256 612 | sampling positions: 3, img_0796.jpg, 136, 480 613 | sampling positions: 4, img_0803.jpg, 200, 40 614 | sampling positions: 5, img_0808.jpg, 112, 312 615 | sampling positions: 6, img_0809.jpg, 64, 792 616 | sampling positions: 7, img_0812.jpg, 104, 96 617 | sampling positions: 8, img_0814.jpg, 32, 280 618 | sampling positions: 9, img_0821.jpg, 8, 592 619 | sampling positions: 10, img_0834.jpg, 192, 392 620 | sampling positions: 11, img_0840.jpg, 432, 200 621 | sampling positions: 12, img_0843.jpg, 184, 616 622 | sampling positions: 13, img_0853.jpg, 88, 752 623 | sampling positions: 14, img_0855.jpg, 40, 144 624 | sampling positions: 15, img_0859.jpg, 8, 120 625 | sampling positions: 16, img_0860.jpg, 408, 24 626 | sampling positions: 17, img_0862.jpg, 160, 168 627 | sampling positions: 18, img_0864.jpg, 152, 552 628 | sampling positions: 19, img_0903.jpg, 448, 72 629 | sampling positions: 20, img_0905.jpg, 8, 144 630 | sampling positions: 21, img_0906.jpg, 264, 296 631 | sampling positions: 22, img_0913.jpg, 520, 368 632 | sampling positions: 23, img_0915.jpg, 264, 360 633 | sampling positions: 24, img_0917.jpg, 296, 416 634 | sampling positions: 25, img_0923.jpg, 288, 184 635 | sampling positions: 26, img_0926.jpg, 24, 344 636 | sampling positions: 27, img_0927.jpg, 256, 504 637 | sampling positions: 28, img_0928.jpg, 344, 488 638 | sampling positions: 29, img_0929.jpg, 456, 688 639 | sampling positions: 30, img_0937.jpg, 216, 48 640 | sampling positions: 31, img_0938.jpg, 168, 328 641 | sampling positions: 0, img_0948.jpg, 224, 632 642 | sampling positions: 1, img_0951.jpg, 424, 8 643 | sampling positions: 2, img_0953.jpg, 256, 136 644 | sampling positions: 3, img_0954.jpg, 160, 496 645 | sampling positions: 4, img_0963.jpg, 368, 328 646 | sampling positions: 5, img_0970.jpg, 8, 752 647 | sampling positions: 6, img_0973.jpg, 80, 192 648 | sampling positions: 7, img_0975.jpg, 152, 592 649 | sampling positions: 8, img_0982.jpg, 408, 24 650 | sampling positions: 9, img_0985.jpg, 168, 32 651 | sampling positions: 10, img_0988.jpg, 208, 80 652 | sampling positions: 11, img_0989.jpg, 368, 496 653 | sampling positions: 12, img_0991.jpg, 168, 224 654 | sampling positions: 13, img_0996.jpg, 96, 464 655 | sampling positions: 14, img_0998.jpg, 304, 192 656 | sampling positions: 15, img_1006.jpg, 88, 328 657 | sampling positions: 16, img_1019.jpg, 72, 328 658 | sampling positions: 17, img_1021.jpg, 152, 192 659 | sampling positions: 18, img_1024.jpg, 264, 608 660 | sampling positions: 19, img_1041.jpg, 24, 584 661 | sampling positions: 20, img_1043.jpg, 56, 144 662 | sampling positions: 21, img_1049.jpg, 216, 400 663 | sampling positions: 22, img_1050.jpg, 216, 576 664 | sampling positions: 23, img_1051.jpg, 336, 296 665 | sampling positions: 24, img_1054.jpg, 120, 592 666 | sampling positions: 25, img_1062.jpg, 232, 176 667 | sampling positions: 26, img_1063.jpg, 40, 232 668 | sampling positions: 27, img_1064.jpg, 200, 456 669 | sampling positions: 28, img_1066.jpg, 48, 192 670 | sampling positions: 29, img_1069.jpg, 240, 384 671 | sampling positions: 30, img_1083.jpg, 72, 784 672 | sampling positions: 31, img_1088.jpg, 56, 568 673 | sampling positions: 0, img_0001.jpg, 440, 696 674 | sampling positions: 1, img_0002.jpg, 288, 656 675 | sampling positions: 2, img_0010.jpg, 400, 288 676 | sampling positions: 3, img_0020.jpg, 208, 320 677 | sampling positions: 4, img_0023.jpg, 112, 368 678 | sampling positions: 5, img_0024.jpg, 120, 560 679 | sampling positions: 6, img_0026.jpg, 248, 240 680 | sampling positions: 7, img_0030.jpg, 72, 792 681 | sampling positions: 8, img_0031.jpg, 80, 376 682 | sampling positions: 9, img_0032.jpg, 72, 576 683 | sampling positions: 10, img_0034.jpg, 64, 72 684 | sampling positions: 11, img_0035.jpg, 360, 256 685 | sampling positions: 12, img_0037.jpg, 200, 48 686 | sampling positions: 13, img_0041.jpg, 304, 0 687 | sampling positions: 14, img_0044.jpg, 48, 232 688 | sampling positions: 15, img_0046.jpg, 528, 344 689 | sampling positions: 16, img_0061.jpg, 440, 264 690 | sampling positions: 17, img_0062.jpg, 280, 16 691 | sampling positions: 18, img_0069.jpg, 144, 336 692 | sampling positions: 19, img_0072.jpg, 360, 360 693 | sampling positions: 20, img_0074.jpg, 72, 504 694 | sampling positions: 21, img_0080.jpg, 152, 112 695 | sampling positions: 22, img_0086.jpg, 400, 520 696 | sampling positions: 23, img_0087.jpg, 640, 64 697 | sampling positions: 24, img_0092.jpg, 320, 256 698 | sampling positions: 25, img_0096.jpg, 72, 120 699 | sampling positions: 26, img_0098.jpg, 336, 680 700 | sampling positions: 27, img_0108.jpg, 216, 344 701 | sampling positions: 28, img_0122.jpg, 304, 696 702 | sampling positions: 29, img_0123.jpg, 312, 584 703 | sampling positions: 30, img_0124.jpg, 192, 64 704 | sampling positions: 31, img_0130.jpg, 376, 104 705 | sampling positions: 0, img_0137.jpg, 360, 48 706 | sampling positions: 1, img_0138.jpg, 72, 104 707 | sampling positions: 2, img_0141.jpg, 232, 576 708 | sampling positions: 3, img_0142.jpg, 328, 720 709 | sampling positions: 4, img_0144.jpg, 160, 320 710 | sampling positions: 5, img_0148.jpg, 312, 88 711 | sampling positions: 6, img_0149.jpg, 344, 552 712 | sampling positions: 7, img_0150.jpg, 0, 160 713 | sampling positions: 8, img_0151.jpg, 48, 344 714 | sampling positions: 9, img_0152.jpg, 264, 8 715 | sampling positions: 10, img_0153.jpg, 112, 312 716 | sampling positions: 11, img_0154.jpg, 128, 280 717 | sampling positions: 12, img_0155.jpg, 8, 112 718 | sampling positions: 13, img_0160.jpg, 176, 584 719 | sampling positions: 14, img_0161.jpg, 176, 448 720 | sampling positions: 15, img_0165.jpg, 432, 40 721 | sampling positions: 16, img_0171.jpg, 280, 528 722 | sampling positions: 17, img_0172.jpg, 24, 512 723 | sampling positions: 18, img_0174.jpg, 200, 304 724 | sampling positions: 19, img_0175.jpg, 32, 568 725 | sampling positions: 20, img_0179.jpg, 448, 32 726 | sampling positions: 21, img_0180.jpg, 344, 136 727 | sampling positions: 22, img_0185.jpg, 272, 560 728 | sampling positions: 23, img_0186.jpg, 72, 176 729 | sampling positions: 24, img_0189.jpg, 176, 424 730 | sampling positions: 25, img_0193.jpg, 208, 144 731 | sampling positions: 26, img_0206.jpg, 96, 744 732 | sampling positions: 27, img_0207.jpg, 176, 736 733 | sampling positions: 28, img_0213.jpg, 152, 744 734 | sampling positions: 29, img_0221.jpg, 64, 8 735 | sampling positions: 30, img_0222.jpg, 56, 160 736 | sampling positions: 31, img_0224.jpg, 80, 200 737 | sampling positions: 0, img_0232.jpg, 24, 312 738 | sampling positions: 1, img_0237.jpg, 280, 200 739 | sampling positions: 2, img_0239.jpg, 440, 344 740 | sampling positions: 3, img_0242.jpg, 24, 424 741 | sampling positions: 4, img_0259.jpg, 280, 80 742 | sampling positions: 5, img_0261.jpg, 96, 192 743 | sampling positions: 6, img_0266.jpg, 88, 552 744 | sampling positions: 7, img_0269.jpg, 384, 328 745 | sampling positions: 8, img_0270.jpg, 480, 104 746 | sampling positions: 9, img_0276.jpg, 80, 144 747 | sampling positions: 10, img_0278.jpg, 328, 584 748 | sampling positions: 11, img_0280.jpg, 32, 768 749 | sampling positions: 12, img_0284.jpg, 400, 768 750 | sampling positions: 13, img_0288.jpg, 24, 400 751 | sampling positions: 14, img_0291.jpg, 128, 504 752 | sampling positions: 15, img_0294.jpg, 440, 72 753 | sampling positions: 16, img_0300.jpg, 176, 184 754 | sampling positions: 17, img_0316.jpg, 8, 392 755 | sampling positions: 18, img_0323.jpg, 152, 192 756 | sampling positions: 19, img_0325.jpg, 0, 320 757 | sampling positions: 20, img_0348.jpg, 400, 664 758 | sampling positions: 21, img_0360.jpg, 24, 336 759 | sampling positions: 22, img_0361.jpg, 424, 280 760 | sampling positions: 23, img_0364.jpg, 368, 48 761 | sampling positions: 24, img_0375.jpg, 96, 440 762 | sampling positions: 25, img_0379.jpg, 80, 608 763 | sampling positions: 26, img_0382.jpg, 344, 56 764 | sampling positions: 27, img_0384.jpg, 120, 488 765 | sampling positions: 28, img_0386.jpg, 88, 512 766 | sampling positions: 29, img_0389.jpg, 328, 656 767 | sampling positions: 30, img_0390.jpg, 96, 152 768 | sampling positions: 31, img_0391.jpg, 168, 488 769 | sampling positions: 0, img_0401.jpg, 96, 120 770 | sampling positions: 1, img_0403.jpg, 480, 248 771 | sampling positions: 2, img_0407.jpg, 464, 656 772 | sampling positions: 3, img_0415.jpg, 184, 672 773 | sampling positions: 4, img_0418.jpg, 456, 600 774 | sampling positions: 5, img_0428.jpg, 64, 160 775 | sampling positions: 6, img_0430.jpg, 288, 432 776 | sampling positions: 7, img_0432.jpg, 16, 80 777 | sampling positions: 8, img_0435.jpg, 248, 440 778 | sampling positions: 9, img_0436.jpg, 184, 72 779 | sampling positions: 10, img_0437.jpg, 104, 48 780 | sampling positions: 11, img_0438.jpg, 56, 0 781 | sampling positions: 12, img_0468.jpg, 696, 528 782 | sampling positions: 13, img_0471.jpg, 400, 720 783 | sampling positions: 14, img_0472.jpg, 224, 648 784 | sampling positions: 15, img_0479.jpg, 96, 728 785 | sampling positions: 16, img_0489.jpg, 392, 192 786 | sampling positions: 17, img_0495.jpg, 224, 264 787 | sampling positions: 18, img_0496.jpg, 112, 240 788 | sampling positions: 19, img_0498.jpg, 464, 152 789 | sampling positions: 20, img_0505.jpg, 80, 0 790 | sampling positions: 21, img_0511.jpg, 216, 736 791 | sampling positions: 22, img_0521.jpg, 432, 392 792 | sampling positions: 23, img_0523.jpg, 0, 552 793 | sampling positions: 24, img_0524.jpg, 144, 272 794 | sampling positions: 25, img_0527.jpg, 88, 120 795 | sampling positions: 26, img_0529.jpg, 144, 520 796 | sampling positions: 27, img_0552.jpg, 64, 184 797 | sampling positions: 28, img_0553.jpg, 320, 464 798 | sampling positions: 29, img_0570.jpg, 344, 592 799 | sampling positions: 30, img_0574.jpg, 176, 216 800 | sampling positions: 31, img_0578.jpg, 416, 616 801 | sampling positions: 0, img_0592.jpg, 48, 224 802 | sampling positions: 1, img_0596.jpg, 168, 712 803 | sampling positions: 2, img_0601.jpg, 136, 296 804 | sampling positions: 3, img_0603.jpg, 176, 176 805 | sampling positions: 4, img_0607.jpg, 216, 368 806 | sampling positions: 5, img_0617.jpg, 240, 64 807 | sampling positions: 6, img_0622.jpg, 208, 304 808 | sampling positions: 7, img_0623.jpg, 320, 648 809 | sampling positions: 8, img_0636.jpg, 128, 664 810 | sampling positions: 9, img_0637.jpg, 80, 392 811 | sampling positions: 10, img_0638.jpg, 464, 272 812 | sampling positions: 11, img_0639.jpg, 120, 264 813 | sampling positions: 12, img_0646.jpg, 184, 360 814 | sampling positions: 13, img_0654.jpg, 256, 456 815 | sampling positions: 14, img_0669.jpg, 304, 136 816 | sampling positions: 15, img_0670.jpg, 208, 176 817 | sampling positions: 16, img_0674.jpg, 432, 448 818 | sampling positions: 17, img_0677.jpg, 272, 760 819 | sampling positions: 18, img_0678.jpg, 264, 400 820 | sampling positions: 19, img_0682.jpg, 16, 288 821 | sampling positions: 20, img_0690.jpg, 328, 136 822 | sampling positions: 21, img_0700.jpg, 8, 568 823 | sampling positions: 22, img_0702.jpg, 24, 760 824 | sampling positions: 23, img_0721.jpg, 104, 600 825 | sampling positions: 24, img_0733.jpg, 168, 344 826 | sampling positions: 25, img_0742.jpg, 536, 664 827 | sampling positions: 26, img_0754.jpg, 120, 8 828 | sampling positions: 27, img_0756.jpg, 448, 56 829 | sampling positions: 28, img_0762.jpg, 304, 792 830 | sampling positions: 29, img_0769.jpg, 344, 696 831 | sampling positions: 30, img_0777.jpg, 400, 368 832 | sampling positions: 31, img_0784.jpg, 384, 248 833 | sampling positions: 0, img_0786.jpg, 408, 248 834 | sampling positions: 1, img_0791.jpg, 184, 16 835 | sampling positions: 2, img_0792.jpg, 16, 280 836 | sampling positions: 3, img_0796.jpg, 384, 672 837 | sampling positions: 4, img_0803.jpg, 264, 16 838 | sampling positions: 5, img_0808.jpg, 376, 672 839 | sampling positions: 6, img_0809.jpg, 352, 536 840 | sampling positions: 7, img_0812.jpg, 192, 432 841 | sampling positions: 8, img_0814.jpg, 408, 504 842 | sampling positions: 9, img_0821.jpg, 200, 472 843 | sampling positions: 10, img_0834.jpg, 408, 304 844 | sampling positions: 11, img_0840.jpg, 184, 176 845 | sampling positions: 12, img_0843.jpg, 80, 544 846 | sampling positions: 13, img_0853.jpg, 168, 664 847 | sampling positions: 14, img_0855.jpg, 32, 728 848 | sampling positions: 15, img_0859.jpg, 64, 704 849 | sampling positions: 16, img_0860.jpg, 328, 680 850 | sampling positions: 17, img_0862.jpg, 432, 768 851 | sampling positions: 18, img_0864.jpg, 112, 528 852 | sampling positions: 19, img_0903.jpg, 128, 328 853 | sampling positions: 20, img_0905.jpg, 32, 112 854 | sampling positions: 21, img_0906.jpg, 120, 80 855 | sampling positions: 22, img_0913.jpg, 264, 56 856 | sampling positions: 23, img_0915.jpg, 352, 528 857 | sampling positions: 24, img_0917.jpg, 312, 376 858 | sampling positions: 25, img_0923.jpg, 64, 392 859 | sampling positions: 26, img_0926.jpg, 128, 216 860 | sampling positions: 27, img_0927.jpg, 456, 144 861 | sampling positions: 28, img_0928.jpg, 264, 552 862 | sampling positions: 29, img_0929.jpg, 176, 280 863 | sampling positions: 30, img_0937.jpg, 256, 192 864 | sampling positions: 31, img_0938.jpg, 352, 520 865 | sampling positions: 0, img_0948.jpg, 56, 720 866 | sampling positions: 1, img_0951.jpg, 448, 408 867 | sampling positions: 2, img_0953.jpg, 176, 0 868 | sampling positions: 3, img_0954.jpg, 128, 336 869 | sampling positions: 4, img_0963.jpg, 272, 472 870 | sampling positions: 5, img_0970.jpg, 216, 248 871 | sampling positions: 6, img_0973.jpg, 192, 664 872 | sampling positions: 7, img_0975.jpg, 264, 312 873 | sampling positions: 8, img_0982.jpg, 224, 24 874 | sampling positions: 9, img_0985.jpg, 432, 432 875 | sampling positions: 10, img_0988.jpg, 72, 776 876 | sampling positions: 11, img_0989.jpg, 40, 416 877 | sampling positions: 12, img_0991.jpg, 8, 368 878 | sampling positions: 13, img_0996.jpg, 136, 632 879 | sampling positions: 14, img_0998.jpg, 320, 712 880 | sampling positions: 15, img_1006.jpg, 72, 456 881 | sampling positions: 16, img_1019.jpg, 456, 400 882 | sampling positions: 17, img_1021.jpg, 152, 480 883 | sampling positions: 18, img_1024.jpg, 224, 56 884 | sampling positions: 19, img_1041.jpg, 72, 696 885 | sampling positions: 20, img_1043.jpg, 16, 648 886 | sampling positions: 21, img_1049.jpg, 432, 648 887 | sampling positions: 22, img_1050.jpg, 80, 472 888 | sampling positions: 23, img_1051.jpg, 152, 40 889 | sampling positions: 24, img_1054.jpg, 192, 296 890 | sampling positions: 25, img_1062.jpg, 192, 376 891 | sampling positions: 26, img_1063.jpg, 24, 704 892 | sampling positions: 27, img_1064.jpg, 128, 176 893 | sampling positions: 28, img_1066.jpg, 128, 232 894 | sampling positions: 29, img_1069.jpg, 96, 672 895 | sampling positions: 30, img_1083.jpg, 40, 184 896 | sampling positions: 31, img_1088.jpg, 296, 552 897 | sampling positions: 0, img_0001.jpg, 64, 608 898 | sampling positions: 1, img_0002.jpg, 88, 8 899 | sampling positions: 2, img_0010.jpg, 448, 648 900 | sampling positions: 3, img_0020.jpg, 144, 712 901 | sampling positions: 4, img_0023.jpg, 136, 72 902 | sampling positions: 5, img_0024.jpg, 320, 424 903 | sampling positions: 6, img_0026.jpg, 136, 32 904 | sampling positions: 7, img_0030.jpg, 208, 288 905 | sampling positions: 8, img_0031.jpg, 328, 168 906 | sampling positions: 9, img_0032.jpg, 320, 216 907 | sampling positions: 10, img_0034.jpg, 224, 544 908 | sampling positions: 11, img_0035.jpg, 320, 256 909 | sampling positions: 12, img_0037.jpg, 448, 176 910 | sampling positions: 13, img_0041.jpg, 440, 496 911 | sampling positions: 14, img_0044.jpg, 232, 568 912 | sampling positions: 15, img_0046.jpg, 424, 592 913 | sampling positions: 16, img_0061.jpg, 176, 344 914 | sampling positions: 17, img_0062.jpg, 336, 352 915 | sampling positions: 18, img_0069.jpg, 408, 184 916 | sampling positions: 19, img_0072.jpg, 160, 568 917 | sampling positions: 20, img_0074.jpg, 72, 352 918 | sampling positions: 21, img_0080.jpg, 272, 720 919 | sampling positions: 22, img_0086.jpg, 16, 56 920 | sampling positions: 23, img_0087.jpg, 648, 64 921 | sampling positions: 24, img_0092.jpg, 224, 248 922 | sampling positions: 25, img_0096.jpg, 232, 120 923 | sampling positions: 26, img_0098.jpg, 72, 296 924 | sampling positions: 27, img_0108.jpg, 472, 96 925 | sampling positions: 28, img_0122.jpg, 152, 504 926 | sampling positions: 29, img_0123.jpg, 64, 128 927 | sampling positions: 30, img_0124.jpg, 80, 32 928 | sampling positions: 31, img_0130.jpg, 200, 200 929 | sampling positions: 0, img_0137.jpg, 400, 56 930 | sampling positions: 1, img_0138.jpg, 40, 464 931 | sampling positions: 2, img_0141.jpg, 416, 456 932 | sampling positions: 3, img_0142.jpg, 136, 256 933 | sampling positions: 4, img_0144.jpg, 416, 528 934 | sampling positions: 5, img_0148.jpg, 208, 544 935 | sampling positions: 6, img_0149.jpg, 72, 288 936 | sampling positions: 7, img_0150.jpg, 192, 576 937 | sampling positions: 8, img_0151.jpg, 240, 136 938 | sampling positions: 9, img_0152.jpg, 296, 640 939 | sampling positions: 10, img_0153.jpg, 0, 640 940 | sampling positions: 11, img_0154.jpg, 216, 64 941 | sampling positions: 12, img_0155.jpg, 200, 432 942 | sampling positions: 13, img_0160.jpg, 224, 424 943 | sampling positions: 14, img_0161.jpg, 272, 576 944 | sampling positions: 15, img_0165.jpg, 104, 320 945 | sampling positions: 16, img_0171.jpg, 200, 592 946 | sampling positions: 17, img_0172.jpg, 344, 664 947 | sampling positions: 18, img_0174.jpg, 256, 664 948 | sampling positions: 19, img_0175.jpg, 400, 104 949 | sampling positions: 20, img_0179.jpg, 80, 192 950 | sampling positions: 21, img_0180.jpg, 400, 8 951 | sampling positions: 22, img_0185.jpg, 0, 48 952 | sampling positions: 23, img_0186.jpg, 64, 352 953 | sampling positions: 24, img_0189.jpg, 56, 592 954 | sampling positions: 25, img_0193.jpg, 416, 24 955 | sampling positions: 26, img_0206.jpg, 320, 248 956 | sampling positions: 27, img_0207.jpg, 304, 48 957 | sampling positions: 28, img_0213.jpg, 224, 352 958 | sampling positions: 29, img_0221.jpg, 432, 64 959 | sampling positions: 30, img_0222.jpg, 472, 312 960 | sampling positions: 31, img_0224.jpg, 432, 480 961 | sampling positions: 0, img_0232.jpg, 144, 696 962 | sampling positions: 1, img_0237.jpg, 328, 792 963 | sampling positions: 2, img_0239.jpg, 472, 368 964 | sampling positions: 3, img_0242.jpg, 224, 176 965 | sampling positions: 4, img_0259.jpg, 40, 40 966 | sampling positions: 5, img_0261.jpg, 480, 448 967 | sampling positions: 6, img_0266.jpg, 136, 760 968 | sampling positions: 7, img_0269.jpg, 120, 264 969 | sampling positions: 8, img_0270.jpg, 624, 32 970 | sampling positions: 9, img_0276.jpg, 40, 48 971 | sampling positions: 10, img_0278.jpg, 416, 744 972 | sampling positions: 11, img_0280.jpg, 312, 8 973 | sampling positions: 12, img_0284.jpg, 208, 288 974 | sampling positions: 13, img_0288.jpg, 8, 176 975 | sampling positions: 14, img_0291.jpg, 392, 696 976 | sampling positions: 15, img_0294.jpg, 264, 248 977 | sampling positions: 16, img_0300.jpg, 448, 752 978 | sampling positions: 17, img_0316.jpg, 64, 184 979 | sampling positions: 18, img_0323.jpg, 368, 104 980 | sampling positions: 19, img_0325.jpg, 0, 328 981 | sampling positions: 20, img_0348.jpg, 464, 360 982 | sampling positions: 21, img_0360.jpg, 288, 384 983 | sampling positions: 22, img_0361.jpg, 192, 128 984 | sampling positions: 23, img_0364.jpg, 384, 704 985 | sampling positions: 24, img_0375.jpg, 24, 88 986 | sampling positions: 25, img_0379.jpg, 416, 512 987 | sampling positions: 26, img_0382.jpg, 296, 232 988 | sampling positions: 27, img_0384.jpg, 456, 88 989 | sampling positions: 28, img_0386.jpg, 72, 8 990 | sampling positions: 29, img_0389.jpg, 96, 696 991 | sampling positions: 30, img_0390.jpg, 168, 296 992 | sampling positions: 31, img_0391.jpg, 80, 664 993 | sampling positions: 0, img_0401.jpg, 432, 48 994 | sampling positions: 1, img_0403.jpg, 184, 272 995 | sampling positions: 2, img_0407.jpg, 48, 752 996 | sampling positions: 3, img_0415.jpg, 384, 40 997 | sampling positions: 4, img_0418.jpg, 216, 256 998 | sampling positions: 5, img_0428.jpg, 72, 0 999 | sampling positions: 6, img_0430.jpg, 272, 696 1000 | sampling positions: 7, img_0432.jpg, 48, 248 1001 | sampling positions: 8, img_0435.jpg, 440, 600 1002 | sampling positions: 9, img_0436.jpg, 296, 536 1003 | sampling positions: 10, img_0437.jpg, 40, 792 1004 | sampling positions: 11, img_0438.jpg, 48, 376 1005 | sampling positions: 12, img_0468.jpg, 24, 536 1006 | sampling positions: 13, img_0471.jpg, 256, 184 1007 | sampling positions: 14, img_0472.jpg, 168, 296 1008 | sampling positions: 15, img_0479.jpg, 456, 728 1009 | sampling positions: 16, img_0489.jpg, 528, 480 1010 | sampling positions: 17, img_0495.jpg, 520, 72 1011 | sampling positions: 18, img_0496.jpg, 8, 608 1012 | sampling positions: 19, img_0498.jpg, 320, 544 1013 | sampling positions: 20, img_0505.jpg, 16, 432 1014 | sampling positions: 21, img_0511.jpg, 304, 672 1015 | sampling positions: 22, img_0521.jpg, 360, 792 1016 | sampling positions: 23, img_0523.jpg, 0, 752 1017 | sampling positions: 24, img_0524.jpg, 224, 576 1018 | sampling positions: 25, img_0527.jpg, 496, 400 1019 | sampling positions: 26, img_0529.jpg, 112, 544 1020 | sampling positions: 27, img_0552.jpg, 384, 352 1021 | sampling positions: 28, img_0553.jpg, 144, 240 1022 | sampling positions: 29, img_0570.jpg, 520, 232 1023 | sampling positions: 30, img_0574.jpg, 384, 16 1024 | sampling positions: 31, img_0578.jpg, 32, 112 1025 | sampling positions: 0, img_0592.jpg, 200, 64 1026 | sampling positions: 1, img_0596.jpg, 176, 488 1027 | sampling positions: 2, img_0601.jpg, 272, 136 1028 | sampling positions: 3, img_0603.jpg, 688, 48 1029 | sampling positions: 4, img_0607.jpg, 448, 392 1030 | sampling positions: 5, img_0617.jpg, 216, 680 1031 | sampling positions: 6, img_0622.jpg, 312, 144 1032 | sampling positions: 7, img_0623.jpg, 8, 184 1033 | sampling positions: 8, img_0636.jpg, 320, 576 1034 | sampling positions: 9, img_0637.jpg, 72, 536 1035 | sampling positions: 10, img_0638.jpg, 480, 168 1036 | sampling positions: 11, img_0639.jpg, 104, 616 1037 | sampling positions: 12, img_0646.jpg, 104, 224 1038 | sampling positions: 13, img_0654.jpg, 64, 112 1039 | sampling positions: 14, img_0669.jpg, 192, 176 1040 | sampling positions: 15, img_0670.jpg, 168, 232 1041 | sampling positions: 16, img_0674.jpg, 152, 304 1042 | sampling positions: 17, img_0677.jpg, 144, 320 1043 | sampling positions: 18, img_0678.jpg, 16, 424 1044 | sampling positions: 19, img_0682.jpg, 256, 32 1045 | sampling positions: 20, img_0690.jpg, 328, 704 1046 | sampling positions: 21, img_0700.jpg, 144, 280 1047 | sampling positions: 22, img_0702.jpg, 136, 80 1048 | sampling positions: 23, img_0721.jpg, 104, 776 1049 | sampling positions: 24, img_0733.jpg, 120, 616 1050 | sampling positions: 25, img_0742.jpg, 40, 592 1051 | sampling positions: 26, img_0754.jpg, 152, 536 1052 | sampling positions: 27, img_0756.jpg, 248, 696 1053 | sampling positions: 28, img_0762.jpg, 144, 392 1054 | sampling positions: 29, img_0769.jpg, 352, 16 1055 | sampling positions: 30, img_0777.jpg, 320, 712 1056 | sampling positions: 31, img_0784.jpg, 184, 96 1057 | sampling positions: 0, img_0786.jpg, 384, 680 1058 | sampling positions: 1, img_0791.jpg, 368, 312 1059 | sampling positions: 2, img_0792.jpg, 104, 152 1060 | sampling positions: 3, img_0796.jpg, 24, 112 1061 | sampling positions: 4, img_0803.jpg, 64, 280 1062 | sampling positions: 5, img_0808.jpg, 184, 440 1063 | sampling positions: 6, img_0809.jpg, 408, 312 1064 | sampling positions: 7, img_0812.jpg, 128, 248 1065 | sampling positions: 8, img_0814.jpg, 200, 600 1066 | sampling positions: 9, img_0821.jpg, 440, 104 1067 | sampling positions: 10, img_0834.jpg, 400, 504 1068 | sampling positions: 11, img_0840.jpg, 208, 272 1069 | sampling positions: 12, img_0843.jpg, 336, 376 1070 | sampling positions: 13, img_0853.jpg, 104, 200 1071 | sampling positions: 14, img_0855.jpg, 176, 664 1072 | sampling positions: 15, img_0859.jpg, 240, 72 1073 | sampling positions: 16, img_0860.jpg, 48, 664 1074 | sampling positions: 17, img_0862.jpg, 48, 208 1075 | sampling positions: 18, img_0864.jpg, 168, 552 1076 | sampling positions: 19, img_0903.jpg, 472, 592 1077 | sampling positions: 20, img_0905.jpg, 88, 528 1078 | sampling positions: 21, img_0906.jpg, 0, 408 1079 | sampling positions: 22, img_0913.jpg, 112, 552 1080 | sampling positions: 23, img_0915.jpg, 424, 768 1081 | sampling positions: 24, img_0917.jpg, 304, 400 1082 | sampling positions: 25, img_0923.jpg, 352, 176 1083 | sampling positions: 26, img_0926.jpg, 360, 56 1084 | sampling positions: 27, img_0927.jpg, 288, 448 1085 | sampling positions: 28, img_0928.jpg, 384, 264 1086 | sampling positions: 29, img_0929.jpg, 136, 744 1087 | sampling positions: 30, img_0937.jpg, 88, 80 1088 | sampling positions: 31, img_0938.jpg, 344, 448 1089 | sampling positions: 0, img_0948.jpg, 144, 640 1090 | sampling positions: 1, img_0951.jpg, 128, 328 1091 | sampling positions: 2, img_0953.jpg, 8, 656 1092 | sampling positions: 3, img_0954.jpg, 216, 144 1093 | sampling positions: 4, img_0963.jpg, 328, 0 1094 | sampling positions: 5, img_0970.jpg, 96, 704 1095 | sampling positions: 6, img_0973.jpg, 104, 712 1096 | sampling positions: 7, img_0975.jpg, 256, 784 1097 | sampling positions: 8, img_0982.jpg, 216, 344 1098 | sampling positions: 9, img_0985.jpg, 128, 56 1099 | sampling positions: 10, img_0988.jpg, 280, 272 1100 | sampling positions: 11, img_0989.jpg, 184, 760 1101 | sampling positions: 12, img_0991.jpg, 0, 112 1102 | sampling positions: 13, img_0996.jpg, 144, 328 1103 | sampling positions: 14, img_0998.jpg, 336, 168 1104 | sampling positions: 15, img_1006.jpg, 16, 400 1105 | sampling positions: 16, img_1019.jpg, 384, 200 1106 | sampling positions: 17, img_1021.jpg, 16, 416 1107 | sampling positions: 18, img_1024.jpg, 232, 704 1108 | sampling positions: 19, img_1041.jpg, 200, 120 1109 | sampling positions: 20, img_1043.jpg, 200, 720 1110 | sampling positions: 21, img_1049.jpg, 216, 568 1111 | sampling positions: 22, img_1050.jpg, 456, 296 1112 | sampling positions: 23, img_1051.jpg, 136, 792 1113 | sampling positions: 24, img_1054.jpg, 352, 0 1114 | sampling positions: 25, img_1062.jpg, 216, 384 1115 | sampling positions: 26, img_1063.jpg, 144, 440 1116 | sampling positions: 27, img_1064.jpg, 288, 472 1117 | sampling positions: 28, img_1066.jpg, 168, 128 1118 | sampling positions: 29, img_1069.jpg, 200, 632 1119 | sampling positions: 30, img_1083.jpg, 256, 560 1120 | sampling positions: 31, img_1088.jpg, 248, 504 1121 | -------------------------------------------------------------------------------- /sinkhorn.py: -------------------------------------------------------------------------------- 1 | """ 2 | Source: https://gist.github.com/wohlert/8589045ab544082560cc5f8915cc90bd 3 | """ 4 | import torch 5 | import torch.nn as nn 6 | from pdb import set_trace as bp 7 | 8 | class SinkhornSolver(nn.Module): 9 | """ 10 | Optimal Transport solver under entropic regularisation. 11 | Based on the code of Gabriel Peyré. 12 | """ 13 | def __init__(self, epsilon, iterations=100, ground_metric=lambda x: torch.pow(x, 2)): 14 | super(SinkhornSolver, self).__init__() 15 | self.epsilon = epsilon 16 | self.iterations = iterations 17 | self.ground_metric = ground_metric 18 | 19 | def sinkhorn_loss(self, x, y): 20 | num_x = x.size(-2) 21 | num_y = y.size(-2) 22 | 23 | batch_size = 1 if x.dim() == 2 else x.size(0) 24 | 25 | # Marginal densities are empirical measures 26 | a = x.new_ones((batch_size, num_x), requires_grad=False) / num_x 27 | b = y.new_ones((batch_size, num_y), requires_grad=False) / num_y 28 | a = a.squeeze() 29 | b = b.squeeze() 30 | 31 | # Initialise approximation vectors in log domain 32 | u = torch.zeros_like(a) 33 | v = torch.zeros_like(b) 34 | 35 | # Stopping criterion 36 | threshold = 1e-1 37 | 38 | # Cost matrix 39 | C = self._compute_cost(x, y) 40 | 41 | # Sinkhorn iterations 42 | for i in range(self.iterations): 43 | u0, v0 = u, v 44 | 45 | # u^{l+1} = a / (K v^l) 46 | K = self._log_boltzmann_kernel(u, v, C) 47 | u_ = torch.log(a + 1e-8) - torch.logsumexp(K, dim=1) 48 | u = self.epsilon * u_ + u 49 | 50 | # v^{l+1} = b / (K^T u^(l+1)) 51 | K_t = self._log_boltzmann_kernel(u, v, C).transpose(-2, -1) 52 | v_ = torch.log(b + 1e-8) - torch.logsumexp(K_t, dim=1) 53 | v = self.epsilon * v_ + v 54 | 55 | # Size of the change we have performed on u 56 | diff = torch.sum(torch.abs(u - u0), dim=-1) + torch.sum(torch.abs(v - v0), dim=-1) 57 | mean_diff = torch.mean(diff) 58 | 59 | if mean_diff.item() < threshold: 60 | break 61 | 62 | # print("Finished computing transport plan in {} iterations".format(i)) 63 | 64 | # Transport plan pi = diag(a)*K*diag(b) 65 | K = self._log_boltzmann_kernel(u, v, C) 66 | pi = torch.exp(K) 67 | 68 | # Sinkhorn distance 69 | cost = torch.sum(pi * C, dim=(-2, -1)) 70 | 71 | return cost 72 | 73 | def sinkhorn_normalized(self, x, y): 74 | Wxy = self.sinkhorn_loss(x, y) 75 | Wxx = self.sinkhorn_loss(x, x) 76 | Wyy = self.sinkhorn_loss(y, y) 77 | return 2 * Wxy - Wxx - Wyy 78 | 79 | def forward(self, x, y): 80 | # return self.sinkhorn_normalized(x,y) 81 | return self.sinkhorn_loss(x, y) 82 | 83 | def _compute_cost(self, x, y): 84 | x_ = x.unsqueeze(-2) 85 | y_ = y.unsqueeze(-3) 86 | C = torch.sum(self.ground_metric(x_ - y_), dim=-1) 87 | return C 88 | 89 | def _log_boltzmann_kernel(self, u, v, C=None): 90 | C = self._compute_cost(x, y) if C is None else C 91 | kernel = -C + u.unsqueeze(-1) + v.unsqueeze(-2) 92 | kernel /= self.epsilon 93 | return kernel -------------------------------------------------------------------------------- /stage1_main.py: -------------------------------------------------------------------------------- 1 | """ 2 | stage1_main.py: stage 1 training script 3 | """ 4 | 5 | import argparse 6 | import datetime 7 | import os 8 | import pickle 9 | import random 10 | 11 | import cv2 12 | import matplotlib 13 | import numpy as np 14 | import torch 15 | from matplotlib import pyplot as plt 16 | from torch import nn as nn 17 | from torch import optim as optim 18 | 19 | from crowd_dataset import CrowdDataset 20 | from models import Stage1CountingNet 21 | 22 | matplotlib.use('Agg') 23 | 24 | rotation_angles = [0, 90, 180, 270] 25 | rotation_angles_cv2 = [0, cv2.ROTATE_90_COUNTERCLOCKWISE, 26 | cv2.ROTATE_180, cv2.ROTATE_90_CLOCKWISE] 27 | num_rotations = len(rotation_angles) 28 | image_new_crop_size = 112 29 | 30 | parser = argparse.ArgumentParser(description='CSS-CCNN Stage-1 Training') 31 | parser.add_argument('--epochs', default=1000, type=int, metavar='N', 32 | help='number of total epochs to run') 33 | parser.add_argument('--gpu', default=0, type=int, 34 | help='GPU number') 35 | parser.add_argument('-b', '--batch-size', default=4, type=int, metavar='N', 36 | help='mini-batch size (default: 4),only used for train') 37 | parser.add_argument('--patches', default=1, type=int, metavar='N', 38 | help='number of patches per image') 39 | parser.add_argument('--dataset', default="parta", type=str, 40 | help='dataset to train on') 41 | parser.add_argument('--lr', '--learning-rate', default=1e-3, type=float, 42 | metavar='LR', help='initial learning rate') 43 | parser.add_argument('--momentum', default=0.9, type=float, 44 | metavar='M', help='momentum') 45 | parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float, metavar='W', 46 | help='weight decay (default: 1e-4)') 47 | 48 | def log(f, txt, do_print=1): 49 | txt = str(datetime.datetime.now()) + ': ' + txt 50 | if do_print == 1: 51 | print(txt) 52 | f.write(txt + '\n') 53 | 54 | def get_filename(net_name, epochs_over): 55 | return net_name + "_epoch_" + str(epochs_over) + ".pth" 56 | 57 | def save_checkpoint(state, fdir, name='checkpoint.pth'): 58 | filepath = os.path.join(fdir, name) 59 | torch.save(state, filepath) 60 | 61 | def print_graph(maps, title, save_path): 62 | fig = plt.figure() 63 | st = fig.suptitle(title) 64 | for i, (map, args) in enumerate(maps): 65 | plt.subplot(1, len(maps), i + 1) 66 | if len(map.shape) > 2 and map.shape[0] == 3: 67 | plt.imshow(map.transpose((1, 2, 0)).astype( 68 | np.uint8), aspect='equal', **args) 69 | else: 70 | plt.imshow(map, aspect='equal', **args) 71 | plt.axis('off') 72 | plt.savefig(save_path + ".png", bbox_inches='tight', pad_inches=0) 73 | fig.clf() 74 | plt.clf() 75 | plt.close() 76 | 77 | def train_function(Xs, Ys, network, optimizer): 78 | network = network.cuda() 79 | optimizer.zero_grad() 80 | 81 | X = torch.autograd.Variable(torch.from_numpy(Xs)).cuda() 82 | Y = torch.autograd.Variable(torch.LongTensor(Ys)).cuda() 83 | 84 | outputs = network(X) 85 | assert(outputs.shape == (X.shape[0], num_rotations)) # (B,4) 86 | losses = [] 87 | 88 | loss_criterion = nn.CrossEntropyLoss(size_average=True) 89 | loss_ = loss_criterion(outputs, Y) 90 | loss = loss_ 91 | assert(loss.grad_fn != None) 92 | loss.backward() 93 | optimizer.step() 94 | losses.append(loss.item()) 95 | matches, actual_angle_dist, matches_by_angle = calculate_per_rot_acc( 96 | outputs, Y) 97 | return losses, matches, actual_angle_dist, matches_by_angle 98 | 99 | 100 | @torch.no_grad() 101 | def test_function(X, Y, network): 102 | X = torch.autograd.Variable(torch.from_numpy(X)).cuda() 103 | Y = torch.autograd.Variable(torch.from_numpy(Y)).cuda().long() 104 | 105 | network = network.cuda() 106 | network.eval() 107 | output = network(X) 108 | 109 | loss_criterion = nn.CrossEntropyLoss(size_average=True) 110 | loss_ = loss_criterion(output, Y) 111 | loss = loss_ 112 | 113 | matches, actual_angle_dist, matches_by_angle = calculate_per_rot_acc( 114 | output, Y) 115 | 116 | network.train() 117 | return loss.data, matches, actual_angle_dist, matches_by_angle 118 | 119 | 120 | def calculate_per_rot_acc(rotation_prediction, rotation_gt): 121 | out_argmax = torch.argmax(nn.functional.softmax( 122 | rotation_prediction, dim=1), dim=1) # (B,) 123 | Yss_argmax = rotation_gt # (B,) 124 | 125 | equat_mat = out_argmax == Yss_argmax 126 | matches = torch.sum(out_argmax == Yss_argmax).item() 127 | 128 | actual_angle_dist = np.array([torch.sum(Yss_argmax == rot_idx).item( 129 | ) for rot_idx in range(num_rotations)]) # len of n 130 | matches_by_angle = np.array([torch.sum( 131 | equat_mat[out_argmax == rot_idx]).item() for rot_idx in range(num_rotations)]) 132 | 133 | assert(np.sum(matches_by_angle) == matches) 134 | return matches, actual_angle_dist, matches_by_angle 135 | 136 | 137 | def test_network(dataset, set_name, network, print_output=False): 138 | global test_loss 139 | global counter 140 | test_loss = 0. 141 | counter = 0. 142 | metrics_test = {} 143 | metrics_ = ['new_mae', 'mle', 'mse', 'loss1'] 144 | for k in metrics_: 145 | metrics_test[k] = 0.0 146 | 147 | if isinstance(print_output, str): 148 | print_path = print_output 149 | elif isinstance(print_output, bool) and print_output: 150 | print_path = './models/dump' 151 | else: 152 | print_path = None 153 | 154 | total_matches_count = 0 155 | total_per_angle_count = np.zeros(num_rotations) 156 | total_per_angle_match_count = np.zeros(num_rotations) 157 | 158 | for idx, data in enumerate(dataset.test_get_data(set_name)): 159 | image_name, Xs, _ = data 160 | image = Xs[0].transpose((1, 2, 0)) 161 | 162 | # 1. Crop out the 112x112 image, Xs[0] (3,h,w) 163 | image_h, image_w = Xs[0].shape[-2:] 164 | image_center = np.array([image_h // 2, image_w // 2]) 165 | image_crop_start_loc = image_center - (image_new_crop_size//2) 166 | image_crop_start_loc[image_crop_start_loc < 0] = 0 167 | assert(image_h >= image_new_crop_size and image_w >= image_new_crop_size) 168 | cropped_Xs = Xs[0][:, image_crop_start_loc[0]: image_crop_start_loc[0] + image_new_crop_size, 169 | image_crop_start_loc[1]: image_crop_start_loc[1] + image_new_crop_size] # (3,h',w') 170 | assert(cropped_Xs.shape == (3, image_new_crop_size, image_new_crop_size)) 171 | 172 | # 2. Do all the rotations for image and form the batch of rotation 173 | new_images_input = np.zeros( 174 | (num_rotations,) + cropped_Xs.shape, dtype=Xs.dtype) # (num_rotations,3,h',w') 175 | new_image_rotation_gt = np.zeros( 176 | (num_rotations, ), dtype=np.int32) # (B, ) 177 | cropped_image = np.transpose(cropped_Xs, (1, 2, 0)) # (h',w',3) 178 | for i in range(num_rotations): 179 | rot_cropped_image = cropped_image.copy() 180 | if i != 0: 181 | rot_cropped_image = cv2.rotate( 182 | rot_cropped_image, rotation_angles_cv2[i]) 183 | new_images_input[i] = np.transpose(rot_cropped_image, (2, 0, 1)) 184 | assert (np.sum(cropped_Xs) == np.sum(rot_cropped_image)) 185 | new_image_rotation_gt[i] = i 186 | 187 | assert(new_images_input.shape == (num_rotations, 3, 188 | image_new_crop_size, image_new_crop_size)) 189 | 190 | loss, num_matches, actual_angle_dist, matches_by_angle = test_function(new_images_input, new_image_rotation_gt, 191 | network) 192 | total_matches_count += num_matches 193 | total_per_angle_count += actual_angle_dist 194 | total_per_angle_match_count += matches_by_angle 195 | 196 | test_loss += loss 197 | counter += 1 198 | 199 | rotation_match_acc = total_matches_count/(counter * num_rotations) 200 | per_rot_match_acc = total_per_angle_match_count/total_per_angle_count 201 | 202 | assert (np.sum(total_per_angle_count) == (counter * num_rotations)) 203 | 204 | metrics_test['loss1'] = test_loss / float(counter) 205 | txt = '' 206 | txt += '%s: %s ' % ('loss1', metrics_test['loss1']) 207 | 208 | return metrics_test, txt, rotation_match_acc, per_rot_match_acc 209 | 210 | 211 | def train_network(): 212 | network = Stage1CountingNet() 213 | model_save_dir = './models_stage_1' 214 | model_save_path = os.path.join(model_save_dir, 'train2') 215 | if not os.path.exists(model_save_path): 216 | os.makedirs(model_save_path) 217 | os.makedirs(os.path.join(model_save_path, 'snapshots')) 218 | global f 219 | snapshot_path = os.path.join(model_save_path, 'snapshots') 220 | f = open(os.path.join(model_save_path, 'train0.log'), 'w') 221 | 222 | # -- Logging Parameters 223 | log(f, 'args: ' + str(args)) 224 | log(f, 'model: ' + str(network), False) 225 | log(f, 'Stage1..') 226 | log(f, 'LR: %.12f.' % (args.lr)) 227 | 228 | start_epoch = 0 229 | num_epochs = args.epochs 230 | valid_losses = {} 231 | train_losses = {} 232 | for metric in ['loss1', 'new_mae']: 233 | valid_losses[metric] = [] 234 | 235 | for metric in ['loss1']: 236 | train_losses[metric] = [] 237 | 238 | batch_size = args.batch_size 239 | num_train_images = len(dataset.data_files['train']) 240 | num_patches_per_image = args.patches 241 | num_batches_per_epoch = num_patches_per_image * num_train_images // batch_size 242 | 243 | optimizer = optim.SGD(filter(lambda p: p.requires_grad, network.parameters()), 244 | lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay) 245 | 246 | # -- Main Training Loop 247 | all_epoch_test_valid_accs = [] 248 | all_epoch_test_valid_per_rot_accs = [] 249 | for e_i, epoch in enumerate(range(start_epoch, num_epochs)): 250 | avg_loss = [0.0 for _ in range(1)] 251 | 252 | # b_i - batch index 253 | total_match_count = 0 254 | total_count = 0 255 | total_per_angle_count = np.zeros(num_rotations) 256 | total_per_angle_match_count = np.zeros(num_rotations) 257 | for b_i in range(num_batches_per_epoch): 258 | # Generate next training sample 259 | Xs, _ = dataset.train_get_data(batch_size=args.batch_size) 260 | 261 | # 1. Crop image to 112x112 . Xs shape: (B,3,h,w) 262 | image_size = Xs.shape[-1] 263 | crop_start_loc = [image_size // 4, image_size // 4] 264 | 265 | Xs = Xs[:, :, crop_start_loc[0]: crop_start_loc[0] + image_new_crop_size, 266 | crop_start_loc[1]: crop_start_loc[1] + image_new_crop_size] 267 | 268 | # 2 . Randomly rotate each image 269 | new_images_input = np.zeros_like(Xs, dtype=Xs.dtype) # (B,3,h',w') 270 | new_image_rotation_gt = np.zeros( 271 | (Xs.shape[0], ), dtype=np.int32) # (B,4) 272 | images = np.transpose(Xs, (0, 2, 3, 1)) # (B,h',w',3) 273 | for i in range(images.shape[0]): 274 | image = images[i] # (h',w',3) 275 | chosen_index = np.random.choice(num_rotations, 1)[0] 276 | chosen_angle = rotation_angles[chosen_index] 277 | if chosen_angle != 0: 278 | image = cv2.rotate( 279 | image, rotation_angles_cv2[chosen_index]) 280 | new_images_input[i, :, :, :] = np.transpose(image, (2, 0, 1)) 281 | new_image_rotation_gt[i] = chosen_index 282 | 283 | losses, matches, actual_angle_dist, matches_by_angle = train_function(new_images_input, 284 | new_image_rotation_gt, 285 | network, optimizer) 286 | total_match_count += matches 287 | total_count += args.batch_size 288 | assert(total_match_count <= total_count) 289 | 290 | total_per_angle_count += actual_angle_dist 291 | total_per_angle_match_count += matches_by_angle 292 | 293 | assert(np.sum(total_per_angle_count) == total_count) 294 | for scale_idx in range(1): 295 | avg_loss[scale_idx] = avg_loss[scale_idx] + losses[scale_idx] 296 | 297 | # Logging losses after 1k iterations. 298 | if b_i % 100 == 0: 299 | log(f, 'Epoch %d [%d]: %s loss: %s.' % 300 | (epoch, b_i, [network.name], losses)) 301 | log(f, 'Epoch %d [%d]: %s rot acc: %s.' % ( 302 | epoch, b_i, [network.name], (total_match_count/total_count))) 303 | log(f, 'Epoch %d [%d]: %s rot acc(0,90,180,270): %s.' % (epoch, b_i, [network.name], 304 | (total_per_angle_match_count / total_per_angle_count))) 305 | 306 | # -- Stats update 307 | avg_loss = [al / num_batches_per_epoch for al in avg_loss] 308 | avg_loss = [av for av in avg_loss] 309 | 310 | train_losses['loss1'].append(avg_loss) 311 | 312 | torch.cuda.empty_cache() 313 | log(f, 'Validating...') 314 | 315 | epoch_val_losses, txt, rot_acc_valid, per_rot_acc_valid = test_network( 316 | dataset, 'test_valid', network, False) 317 | log(f, 'Valid epoch: ' + str(epoch) + ' ' + txt) 318 | log(f, 'Valid epoch: ' + str(epoch) + 319 | 'total rotation acc:' + str(rot_acc_valid)) 320 | log(f, 'Valid epoch: ' + str(epoch) + 321 | 'per rotation acc:' + str(per_rot_acc_valid)) 322 | all_epoch_test_valid_accs.append(rot_acc_valid) 323 | all_epoch_test_valid_per_rot_accs.append(per_rot_acc_valid) 324 | 325 | best_epoch = np.argmax(np.array(all_epoch_test_valid_accs)) 326 | best_valid_test_acc = np.array(all_epoch_test_valid_accs).max() 327 | log(f, 'Best valid rot acc so far epoch : {} , acc : {}'.format( 328 | best_epoch, best_valid_test_acc)) 329 | 330 | for metric in ['loss1', 'new_mae']: 331 | valid_losses[metric].append(epoch_val_losses[metric]) 332 | 333 | min_valid_epoch = np.argmin(valid_losses['new_mae']) 334 | 335 | # Save networks 336 | save_checkpoint({ 337 | 'epoch': epoch + 1, 338 | 'state_dict': network.state_dict(), 339 | 'optimizer': optimizer.state_dict(), 340 | }, snapshot_path, get_filename(network.name, epoch + 1)) 341 | 342 | print('saving graphs...') 343 | with open(os.path.join(snapshot_path, 'losses.pkl'), 'wb') as lossfile: 344 | pickle.dump((train_losses, valid_losses), 345 | lossfile, protocol=2) 346 | 347 | for metric in train_losses.keys(): 348 | if "maxima_split" not in metric: 349 | if isinstance(train_losses[metric][0], list): 350 | for i in range(len(train_losses[metric][0])): 351 | plt.plot([a[i] for a in train_losses[metric]]) 352 | plt.savefig(os.path.join(snapshot_path, 353 | 'train_%s_%d.png' % (metric, i))) 354 | plt.clf() 355 | plt.close() 356 | plt.plot(train_losses[metric]) 357 | plt.savefig(os.path.join( 358 | snapshot_path, 'train_%s.png' % metric)) 359 | plt.clf() 360 | plt.close() 361 | 362 | for metric in valid_losses.keys(): 363 | if isinstance(valid_losses[metric][0], list): 364 | for i in range(len(valid_losses[metric][0])): 365 | plt.plot([a[i] for a in valid_losses[metric]]) 366 | plt.savefig(os.path.join(snapshot_path, 367 | 'valid_%s_%d.png' % (metric, i))) 368 | plt.clf() 369 | plt.close() 370 | plt.plot(valid_losses[metric]) 371 | plt.savefig(os.path.join(snapshot_path, 'valid_%s.png' % metric)) 372 | plt.clf() 373 | plt.close() 374 | 375 | all_epoch_test_valid_accs = np.array(all_epoch_test_valid_accs) 376 | best_epoch = np.argmax(all_epoch_test_valid_accs) 377 | best_valid_test_acc = all_epoch_test_valid_accs.max() 378 | 379 | log(f, 'Best valid rot acc epoch : {} , acc : {}'.format( 380 | best_epoch, best_valid_test_acc)) 381 | 382 | # Plotting the valid accuracies 383 | plt.plot(np.array(all_epoch_test_valid_accs)) 384 | for i in range(num_rotations): 385 | plt.plot(np.array(all_epoch_test_valid_per_rot_accs)[:, i]) 386 | plt.legend(['overall acc', '0 deg acc', '90 deg acc', 387 | '180 deg acc', '270 deg acc'], loc='upper right') 388 | plt.savefig(os.path.join(snapshot_path, 'test_valid_all_rot_acc.png')) 389 | plt.clf() 390 | plt.close() 391 | 392 | # this is to be consistent with the file name written 393 | filename = get_filename(network.name, best_epoch + 1) 394 | with open(os.path.join(snapshot_path, 'unsup_vgg_best_model_meta.pkl'), 'wb') as unsup_file: 395 | pickle.dump(filename, unsup_file, protocol=2) 396 | log(f, 'Exiting train...') 397 | f.close() 398 | return 399 | 400 | 401 | if __name__ == '__main__': 402 | args = parser.parse_args() 403 | # -- Assign GPU 404 | os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu) 405 | 406 | # -- Assertions 407 | assert (args.dataset) 408 | 409 | # -- Check if requirements satisfied 410 | assert(np.__version__=="1.15.4") 411 | assert(cv2.__version__=="3.4.3") 412 | assert(torch.__version__=="0.4.1") 413 | assert("9.0" in torch.version.cuda) 414 | 415 | # -- Setting seeds for reproducability 416 | np.random.seed(11) 417 | random.seed(11) 418 | torch.manual_seed(11) 419 | torch.backends.cudnn.enabled = False 420 | torch.backends.cudnn.deterministic = True 421 | torch.backends.cudnn.benchmark = False 422 | torch.cuda.manual_seed(11) 423 | torch.cuda.manual_seed_all(11) 424 | 425 | # -- Dataset paths 426 | if args.dataset == "parta": 427 | validation_set = 30 428 | path = "../../dataset/ST_partA/" 429 | output_downscale = 4 430 | dataset = CrowdDataset(path, name=args.dataset, valid_set_size=validation_set, 431 | gt_downscale_factor=output_downscale, stage_1=True) 432 | elif args.dataset == "ucfqnrf": 433 | validation_set = 240 434 | output_downscale = 4 435 | path = "../../dataset/UCF-QNRF_ECCV18" 436 | dataset = CrowdDataset(path, name=args.dataset, valid_set_size=validation_set, 437 | gt_downscale_factor=output_downscale, stage_1=True, image_size_max=768) 438 | 439 | model_save_dir = './models' 440 | batch_size = args.batch_size 441 | 442 | print(dataset.data_files['test_valid'], 443 | len(dataset.data_files['test_valid'])) 444 | print(dataset.data_files['train'], len(dataset.data_files['train'])) 445 | 446 | # -- Train the model 447 | train_network() 448 | -------------------------------------------------------------------------------- /stage2_main++.py: -------------------------------------------------------------------------------- 1 | """ 2 | stage2_main.py: stage 2 training script 3 | """ 4 | 5 | import argparse 6 | import datetime 7 | import os 8 | import pickle 9 | import random 10 | 11 | import cv2 12 | import matplotlib 13 | import numpy as np 14 | import powerlaw 15 | import torch 16 | from matplotlib import pyplot as plt 17 | from mpmath import gammainc 18 | from torch import nn as nn 19 | from torch import optim as optim 20 | 21 | from crowd_dataset import CrowdDataset 22 | from models import (Stage2CountingNet, check_BN_no_gradient_change, 23 | check_conv_no_gradient_change, load_net, 24 | load_rot_model_blocks, set_batch_norm_to_eval) 25 | from sinkhorn import SinkhornSolver 26 | 27 | matplotlib.use('Agg') 28 | 29 | 30 | parser = argparse.ArgumentParser(description='CSS-CSNN++ Stage-2 Training') 31 | parser.add_argument('--epochs', default=600, type=int, metavar='N', 32 | help='number of total epochs to run') 33 | parser.add_argument('--gpu', default=0, type=int, 34 | help='GPU number') 35 | parser.add_argument('-b', '--batch-size', default=32, type=int, metavar='N', 36 | help='mini-batch size (default: 32),only used for train') 37 | parser.add_argument('--patches', default=1, type=int, metavar='N', 38 | help='number of patches per image') 39 | parser.add_argument('--dataset', default="parta", type=str, 40 | help='dataset to train on') 41 | parser.add_argument('--lr', '--learning-rate', default=1e-5, type=float, 42 | metavar='LR', help='initial learning rate') 43 | parser.add_argument('--momentum', default=0.9, type=float, 44 | metavar='M', help='momentum') 45 | parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float, metavar='W', 46 | help='weight decay (default: 1e-4)') 47 | parser.add_argument('--loss', default='sinkhorn', type=str, 48 | help="loss to use: mse or sinkhorn") 49 | parser.add_argument('--kernel_size', default=8, type=int, 50 | help="kernel size for summing counts") 51 | parser.add_argument('--sinkhorn_epsilon', default=0.1, type=float, 52 | help="entropy regularisation weight in sinkhorn") 53 | parser.add_argument('--sbs', '--sinkhorn_batch_size', default=32, 54 | type=int, help="points to sample from distribution") 55 | parser.add_argument('--sinkhorn_iterations', default=1000, 56 | type=int, help="no of iterations in sinkhorn") 57 | parser.add_argument('--seed', default=11, type=int, help="seed to use") 58 | parser.add_argument('--alpha', default=2.0, type=float, help="shape parameter of power law distribution") 59 | parser.add_argument('--cmax', default=3500, type=int, help="the maximum value") 60 | parser.add_argument('--scrop', default=4, type=int, help="patch approximation parameter") 61 | parser.add_argument('--num_samples', default=482, type=int, help="number of samples") 62 | parser.add_argument('--patience', default=300, type=int, help="epochs to train before stopping") 63 | parser.add_argument('--ma_window', default=10, type=int, help="window for computing moving average") 64 | parser.add_argument('--percentile_thresh', default=0.3, type=float, help="percentile for splitting regios") 65 | parser.add_argument('--dense_weight', default=0.1, type=float, help="weight for dense") 66 | sampled_GT = None 67 | blur_sigma = None 68 | 69 | # -- Compute CDF for Truncated Power Law Distribution 70 | def get_cdf(x, alpha, Lambda): 71 | CDF = ( (gammainc(1-alpha, Lambda*x)) / 72 | Lambda**(1-alpha) 73 | ) 74 | return 1-CDF 75 | 76 | # -- Obtain Lambda from max count 77 | def get_lambda(): 78 | m, n = 4, 4 79 | max_value = args.cmax / (args.scrop * m * n) 80 | 81 | for Lambda_t in np.arange(0.001, 0.1, 0.001): 82 | cdf = get_cdf(max_value, args.alpha, Lambda_t) 83 | if cdf > 1 - 1. / args.num_samples: 84 | return Lambda_t 85 | 86 | # -- Get shift thresh 87 | def get_shift_thresh(): 88 | Lambda = get_lambda() 89 | for value in np.arange(1.00001, 10, 0.01): 90 | cdf = get_cdf(value, args.alpha, Lambda) 91 | if cdf > 0.28: 92 | return value 93 | 94 | def log(f, txt, do_print=1): 95 | txt = str(datetime.datetime.now()) + ': ' + txt 96 | if do_print == 1: 97 | print(txt) 98 | f.write(txt + '\n') 99 | 100 | def get_filename(net_name, epochs_over): 101 | return net_name + "_epoch_" + str(epochs_over) + ".pth" 102 | 103 | 104 | def save_checkpoint(state, fdir, name='checkpoint.pth'): 105 | filepath = os.path.join(fdir, name) 106 | torch.save(state, filepath) 107 | 108 | 109 | def print_graph(maps, title, save_path): 110 | fig = plt.figure() 111 | st = fig.suptitle(title) 112 | for i, (map, args) in enumerate(maps): 113 | plt.subplot(1, len(maps), i + 1) 114 | if len(map.shape) > 2 and map.shape[0] == 3: 115 | plt.imshow(map.transpose((1, 2, 0)).astype( 116 | np.uint8), aspect='equal', **args) 117 | else: 118 | plt.imshow(map, aspect='equal', **args) 119 | plt.axis('off') 120 | plt.savefig(save_path + ".png", bbox_inches='tight', pad_inches=0) 121 | fig.clf() 122 | plt.clf() 123 | plt.close() 124 | 125 | 126 | excluded_layers = ['conv4_1', 'conv4_2', 'conv5_1'] 127 | 128 | 129 | def get_loss_criterion(): 130 | if args.loss == 'mse': 131 | return nn.MSELoss(size_average=True) 132 | elif args.loss == 'sinkhorn': 133 | return SinkhornSolver(epsilon=args.sinkhorn_epsilon, iterations=args.sinkhorn_iterations) 134 | else: 135 | raise NotImplementedError 136 | 137 | # -- Create edge density maps 138 | def create_pseudo_density(Xs): 139 | global blur_sigma 140 | kernal_size_from_actual = 5 141 | 142 | pseudo_density_maps = [] 143 | for i in range(Xs.shape[0]): 144 | image = Xs[i].transpose((1,2,0)).astype('uint8')#(224,224,3) 145 | 146 | gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) 147 | gray = cv2.Canny(gray, 225, 250) 148 | gray = cv2.resize(gray,(gray.shape[1]//output_downscale, gray.shape[0]//output_downscale)) 149 | 150 | blur = cv2.GaussianBlur(gray,(kernal_size_from_actual,kernal_size_from_actual), sigmaX = blur_sigma) 151 | 152 | orig_blur = blur.copy() 153 | blur = blur.astype('float32') / 255 154 | blur = blur * 0.8 / 10 155 | 156 | pseudo_density_maps.append(blur[None,...]) 157 | 158 | pseudo_density_maps = np.array(pseudo_density_maps) 159 | pseudo_density_maps = pseudo_density_maps / np.max(pseudo_density_maps) 160 | return pseudo_density_maps 161 | 162 | def train_function(Xs, Ys, network, optimizer): 163 | network = network.cuda() 164 | optimizer.zero_grad() 165 | 166 | X = torch.autograd.Variable(torch.from_numpy(Xs)).cuda() 167 | Y = torch.autograd.Variable(torch.FloatTensor(Ys)).cuda() 168 | 169 | outputs = network(X) 170 | 171 | losses = [] 172 | loss = 0.0 173 | loss_criterion = get_loss_criterion() 174 | 175 | pseudo_density_maps = create_pseudo_density(Xs) 176 | pseudo_density_maps = torch.from_numpy(pseudo_density_maps).cuda() 177 | 178 | avg_pool = nn.AvgPool2d(kernel_size=args.kernel_size, 179 | stride=args.kernel_size) 180 | output_reshape_ = avg_pool(outputs) * (args.kernel_size * args.kernel_size) 181 | pseudo_reshape_ = avg_pool(pseudo_density_maps) * (args.kernel_size * args.kernel_size) 182 | output_reshape = output_reshape_.view(-1, 1) 183 | pseudo_reshape = pseudo_reshape_.view(-1, 1) 184 | 185 | # -- Split predictions into sparse, dense using percentile_thresh 186 | pseudo_median = pseudo_reshape.topk(int(args.percentile_thresh*len(pseudo_reshape)), dim=0)[0][-1:][0] 187 | Y_median = Y.topk(int(args.percentile_thresh*(len(Y))), dim=0)[0][-1:][0] 188 | a_output_indices = pseudo_reshape < pseudo_median 189 | a_Y_indices = Y < Y_median 190 | 191 | if a_output_indices.sum() > 2: 192 | loss_sparse = loss_criterion(output_reshape[a_output_indices].view(-1, 1), Y[a_Y_indices].view(-1, 1)) 193 | loss_dense = loss_criterion(output_reshape[~a_output_indices].view(-1, 1), Y[~a_Y_indices].view(-1, 1)) 194 | loss = (loss_sparse + loss_dense)* 0.01 195 | else: 196 | loss = loss_criterion(output_reshape, Y.view(-1, 1)) * 0.01 197 | 198 | assert(loss.grad_fn != None) 199 | loss.backward() 200 | optimizer.step() 201 | losses.append(loss.item()) 202 | 203 | return loss.item() 204 | 205 | 206 | @torch.no_grad() 207 | def test_function(Xs, Ys, network, set_name=None): 208 | assert(set_name is not None) 209 | 210 | X = torch.autograd.Variable(torch.from_numpy(Xs)).cuda() 211 | Y = torch.autograd.Variable(torch.from_numpy(Ys)).float().cuda() 212 | 213 | network = network.cuda() 214 | network.eval() 215 | output = network(X) # (B,1,h,w) 216 | 217 | loss = 0.0 218 | loss_criterion = get_loss_criterion() 219 | 220 | avg_pool = nn.AvgPool2d(kernel_size=args.kernel_size, 221 | stride=args.kernel_size) 222 | 223 | output_reshape_ = avg_pool(output) * (args.kernel_size * args.kernel_size) 224 | 225 | if set_name == 'test_valid': 226 | pseudo_density_maps = create_pseudo_density(Xs) 227 | pseudo_density_maps = torch.from_numpy(pseudo_density_maps).cuda() 228 | 229 | pseudo_reshape_ = avg_pool(pseudo_density_maps) * (args.kernel_size * args.kernel_size) 230 | output_reshape = output_reshape_.view(-1, 1) 231 | pseudo_reshape = pseudo_reshape_.view(-1, 1) 232 | 233 | pseudo_median = pseudo_reshape.topk(int(args.percentile_thresh*len(pseudo_reshape)), dim=0)[0][-1:][0] 234 | Y_median = Y.topk(int(args.percentile_thresh*(len(Y))), dim=0)[0][-1:][0] 235 | a_output_indices = pseudo_reshape < pseudo_median 236 | a_Y_indices = Y < Y_median 237 | 238 | if a_output_indices.sum() > 2: 239 | loss_sparse = loss_criterion(output_reshape[a_output_indices].view(-1, 1), Y[a_Y_indices].view(-1, 1)) 240 | loss_dense = loss_criterion(output_reshape[~a_output_indices].view(-1, 1), Y[~a_Y_indices].view(-1, 1)) 241 | loss = (loss_sparse + loss_dense)* 0.01 242 | else: 243 | loss = loss_criterion(output_reshape, Y.view(-1, 1)) * 0.01 244 | else: 245 | output_reshape = output_reshape_.view(-1, 1) 246 | loss = loss_criterion(output_reshape, Y.view(-1, 1)) * 0.01 247 | count_error = torch.abs(torch.sum(Y.view(Y.size(0), -1), dim=1) - torch.sum(output.view(output.size(0), -1), dim=1)) 248 | 249 | network.train() 250 | network = set_batch_norm_to_eval(network) 251 | 252 | if set_name == "test_valid": 253 | return loss.item(), loss_sparse.item(), loss_dense.item(), output.cpu().detach().numpy() 254 | else: 255 | return loss.item(), output.cpu().detach().numpy(), count_error.cpu().detach().numpy() 256 | 257 | def test_network(dataset, set_name, network, print_output=False): 258 | assert(set_name == "test") 259 | if isinstance(print_output, str): 260 | print_path = print_output 261 | elif isinstance(print_output, bool) and print_output: 262 | print_path = './models_stage_2/dump' 263 | else: 264 | print_path = None 265 | 266 | loss_list = [] 267 | count_error_list = [] 268 | for idx, data in enumerate(dataset.test_get_data(set_name)): 269 | image_name, Xs, Ys = data 270 | image = Xs[0].transpose((1, 2, 0)) 271 | image = cv2.resize( 272 | image, (image.shape[1] // output_downscale, image.shape[0] // output_downscale)) 273 | 274 | loss, pred_dmap, count_error = test_function(Xs, Ys, network, set_name) 275 | max_val = max( 276 | np.max(pred_dmap[0, 0].reshape(-1)), np.max(Ys[0, 0].reshape(-1))) 277 | maps = [(np.transpose(image, (2, 0, 1)), {}), 278 | (pred_dmap[0, 0], {'cmap': 'jet', 279 | 'vmin': 0., 'vmax': max_val}), 280 | (Ys[0, 0], {'cmap': 'jet', 'vmin': 0., 'vmax': max_val})] 281 | 282 | loss_list.append(loss) 283 | count_error_list.append(count_error) 284 | 285 | # -- Plotting visualisations 286 | if print_path: 287 | print_graph(maps, "Gt:{},Pred:{}".format(np.sum(Ys), np.sum( 288 | pred_dmap)), os.path.join(print_path, image_name)) 289 | 290 | loss = np.mean(loss_list) 291 | mae = np.mean(count_error_list) 292 | return {'loss1': loss, 'new_mae': mae}, mae 293 | 294 | 295 | def val_network(dataset, set_name, network, print_output=False): 296 | assert(set_name == "test_valid") 297 | 298 | loss_list = [] 299 | loss_sparse_list, loss_dense_list = [], [] 300 | count_error_list = [] 301 | 302 | num_batches_per_epoch = 5 * len(dataset.data_files['test_valid']) // batch_size 303 | dataset.val_pos_counter = 0 304 | dataset.val_iterator = None 305 | 306 | for b_i in range(num_batches_per_epoch): 307 | Xs, _ = dataset.val_get_data(min(validation_set, args.batch_size)) 308 | 309 | loss, loss_sparse, loss_dense, pred_dmap = test_function(Xs, sampled_GT, network, set_name) 310 | 311 | loss_list.append(loss) 312 | loss_sparse_list.append(loss_sparse) 313 | loss_dense_list.append(loss_dense) 314 | 315 | loss = np.mean(loss_list) 316 | loss_s = np.mean(loss_sparse_list) 317 | loss_d = np.mean(loss_dense_list) 318 | return {'loss1': loss, 'loss_sparse': loss_s, 'loss_dense': loss_d}, None 319 | 320 | 321 | def train_network(): 322 | network = Stage2CountingNet() 323 | model_save_dir = './models_stage_2' 324 | model_save_path = os.path.join(model_save_dir, 'train2') 325 | if not os.path.exists(model_save_path): 326 | os.makedirs(model_save_path) 327 | os.makedirs(os.path.join(model_save_path, 'snapshots')) 328 | os.makedirs(os.path.join(model_save_dir, 'dump')) 329 | os.makedirs(os.path.join(model_save_dir, 'dump_test')) 330 | global f 331 | snapshot_path = os.path.join(model_save_path, 'snapshots') 332 | f = open(os.path.join(model_save_path, 'train0.log'), 'w') 333 | 334 | # -- Logging Parameters 335 | log(f, 'args: ' + str(args)) 336 | log(f, 'model: ' + str(network), False) 337 | log(f, 'Stage2...') 338 | log(f, 'LR: %.12f.' % (args.lr)) 339 | 340 | start_epoch = 0 341 | num_epochs = args.epochs 342 | valid_losses = {} 343 | train_losses = {} 344 | for metric in ['loss1', 'loss_sparse', 'loss_dense']: 345 | valid_losses[metric] = [] 346 | 347 | for metric in ['loss1']: 348 | train_losses[metric] = [] 349 | 350 | batch_size = args.batch_size 351 | args.percentile_thresh = float("{0:.2f}".format(1 - args.percentile_thresh)) 352 | num_train_images = len(dataset.data_files['train']) 353 | num_patches_per_image = args.patches 354 | assert(batch_size < (num_patches_per_image * num_train_images)) 355 | num_batches_per_epoch = num_patches_per_image * num_train_images // batch_size 356 | assert(num_batches_per_epoch >= 1) 357 | 358 | optimizer = optim.SGD(filter(lambda p: p.requires_grad, network.parameters()), 359 | lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay) 360 | 361 | network = load_rot_model_blocks( 362 | network, snapshot_path='models_stage_1/train2/snapshots/', excluded_layers=excluded_layers) 363 | 364 | shift_thresh = get_shift_thresh() 365 | Lambda = get_lambda() 366 | 367 | log(f, "Shift thresh: {}, Lambda: {}".format(shift_thresh, Lambda)) 368 | 369 | # -- Main Training Loop 370 | min_valid_loss = 100. 371 | min_valid_sparse_loss = 100. 372 | min_valid_epoch = -1 373 | min_valid_sparse_loss_epoch = -1 374 | 375 | before_BN_weights_sum = check_BN_no_gradient_change( 376 | network, exclude_list=excluded_layers) 377 | before_conv_weights_sum = check_conv_no_gradient_change( 378 | network, exclude_list=excluded_layers) 379 | 380 | stop_training = False 381 | 382 | global sampled_GT 383 | 384 | for e_i, epoch in enumerate(range(start_epoch, num_epochs)): 385 | avg_loss = [] 386 | 387 | # b_i - batch index 388 | for b_i in range(num_batches_per_epoch): 389 | # Generate next training sample 390 | Xs, _Ys = dataset.train_get_data(batch_size=args.batch_size) 391 | 392 | after_conv_weights_sum = check_conv_no_gradient_change( 393 | network, exclude_list=excluded_layers) 394 | assert (np.all(before_conv_weights_sum == after_conv_weights_sum)) 395 | 396 | sampled_GT = None 397 | sampled_GT_shape = args.sbs * 7 * 7 * \ 398 | (8 // args.kernel_size) * (8 // args.kernel_size) 399 | 400 | sampling_parameters = [args.alpha, Lambda] 401 | sampled_GT = powerlaw.Truncated_Power_Law( 402 | parameters=sampling_parameters).generate_random(sampled_GT_shape) 403 | 404 | for s_i, s_val in enumerate(sampled_GT): 405 | if s_val < shift_thresh: 406 | sampled_GT[s_i] = np.random.uniform(0, shift_thresh) 407 | assert(sampled_GT.shape[0] == ( 408 | sampled_GT_shape) and sampled_GT.ndim == 1) 409 | 410 | train_loss = train_function( 411 | Xs, sampled_GT, network, optimizer) 412 | avg_loss.append(train_loss) 413 | 414 | # Logging losses after 1k iterations. 415 | if b_i % 1 == 0: 416 | log(f, 'Epoch %d [%d]: %s loss: %s.' % 417 | (epoch, b_i, [network.name], train_loss)) 418 | after_BN_weights_sum = check_BN_no_gradient_change( 419 | network, exclude_list=excluded_layers) 420 | after_conv_weights_sum = check_conv_no_gradient_change( 421 | network, exclude_list=excluded_layers) 422 | 423 | assert (np.all(before_BN_weights_sum == after_BN_weights_sum)) 424 | assert (np.all(before_conv_weights_sum == after_conv_weights_sum)) 425 | 426 | # -- Stats update 427 | avg_loss = np.mean(np.array(avg_loss)) 428 | train_losses['loss1'].append(avg_loss) 429 | log(f, 'TRAIN epoch: ' + str(epoch) + 430 | ' train mean loss1:' + str(avg_loss)) 431 | 432 | torch.cuda.empty_cache() 433 | 434 | log(f, 'Validating...') 435 | 436 | epoch_val_losses, valid_mae = val_network( 437 | dataset, 'test_valid', network) 438 | log(f, 'TEST valid epoch: ' + str(epoch) + 439 | ' test valid loss1, mae' + str(epoch_val_losses)) 440 | 441 | for metric in ['loss1', 'loss_sparse', 'loss_dense']: 442 | valid_losses[metric].append(epoch_val_losses[metric]) 443 | 444 | if e_i > args.ma_window: 445 | valid_losses_sparse_smooth = np.mean(valid_losses['loss_sparse'][-args.ma_window:]) 446 | valid_losses_dense_smooth = np.mean(valid_losses['loss_dense'][-args.ma_window:]) 447 | valid_losses_smooth = valid_losses_sparse_smooth + args.dense_weight * valid_losses_dense_smooth 448 | 449 | if valid_losses_sparse_smooth < min_valid_sparse_loss: 450 | min_valid_sparse_loss = valid_losses_sparse_smooth 451 | min_valid_sparse_loss_epoch = e_i 452 | #Check out for divergence in sparse loss 453 | if valid_losses_sparse_smooth > (min_valid_sparse_loss + 1.): 454 | stop_training = True 455 | min_valid_epoch = min_valid_sparse_loss_epoch 456 | elif valid_losses_smooth < min_valid_loss: 457 | min_valid_loss = valid_losses_smooth 458 | min_valid_epoch = e_i 459 | count = 0 460 | else: 461 | count = count + 1 462 | if count > args.patience: 463 | stop_training = True 464 | 465 | log(f, 'Best valid so far epoch: {}, valid_loss: {}'.format(min_valid_epoch, 466 | valid_losses['loss1'][min_valid_epoch])) 467 | # Save networks 468 | save_checkpoint({ 469 | 'epoch': epoch + 1, 470 | 'state_dict': network.state_dict(), 471 | 'optimizer': optimizer.state_dict(), 472 | }, snapshot_path, get_filename(network.name, epoch + 1)) 473 | 474 | print('saving graphs...') 475 | with open(os.path.join(snapshot_path, 'losses.pkl'), 'wb') as lossfile: 476 | pickle.dump((train_losses, valid_losses), 477 | lossfile, protocol=2) 478 | 479 | for metric in train_losses.keys(): 480 | if "maxima_split" not in metric: 481 | if isinstance(train_losses[metric][0], list): 482 | for i in range(len(train_losses[metric][0])): 483 | plt.plot([a[i] for a in train_losses[metric]]) 484 | plt.savefig(os.path.join(snapshot_path, 485 | 'train_%s_%d.png' % (metric, i))) 486 | plt.clf() 487 | plt.close() 488 | plt.plot(train_losses[metric]) 489 | plt.savefig(os.path.join( 490 | snapshot_path, 'train_%s.png' % metric)) 491 | plt.clf() 492 | plt.close() 493 | 494 | for metric in valid_losses.keys(): 495 | if isinstance(valid_losses[metric][0], list): 496 | for i in range(len(valid_losses[metric][0])): 497 | plt.plot([a[i] for a in valid_losses[metric]]) 498 | plt.savefig(os.path.join(snapshot_path, 499 | 'valid_%s_%d.png' % (metric, i))) 500 | plt.clf() 501 | plt.close() 502 | plt.plot(valid_losses[metric]) 503 | plt.savefig(os.path.join(snapshot_path, 'valid_%s.png' % metric)) 504 | plt.clf() 505 | plt.close() 506 | 507 | if stop_training: 508 | break 509 | 510 | network = load_net(network, snapshot_path, get_filename( 511 | network.name, min_valid_epoch)) 512 | log(f, 'Testing on best model {}'.format(min_valid_epoch)) 513 | epoch_test_losses, mae = test_network( 514 | dataset, 'test', network, print_output=os.path.join(model_save_dir, 'dump_test')) 515 | log(f, 'TEST epoch: ' + str(epoch) + 516 | ' test loss1, mae:' + str(epoch_test_losses) + ", " + str(mae)) 517 | log(f, 'Exiting train...') 518 | f.close() 519 | return 520 | 521 | 522 | if __name__ == '__main__': 523 | args = parser.parse_args() 524 | # -- Assign GPU 525 | os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu) 526 | 527 | # -- Assertions 528 | assert (args.dataset) 529 | 530 | # -- Check if requirements satisfied 531 | assert(np.__version__=="1.15.4") 532 | assert(cv2.__version__=="3.4.3") 533 | assert(torch.__version__=="0.4.1") 534 | assert(powerlaw.__version__=="1.4.4") 535 | assert("9.0" in torch.version.cuda) 536 | 537 | # -- Setting seeds for reproducability 538 | seed = args.seed 539 | np.random.seed(seed) 540 | random.seed(seed) 541 | torch.manual_seed(seed) 542 | torch.backends.cudnn.enabled = False 543 | torch.backends.cudnn.deterministic = True 544 | torch.backends.cudnn.benchmark = False 545 | torch.cuda.manual_seed(seed) 546 | torch.cuda.manual_seed_all(seed) 547 | 548 | # -- Dataset paths 549 | if args.dataset == "parta": 550 | validation_set = 30 551 | output_downscale = 8 552 | blur_sigma = 2 553 | path = '../../dataset/ST_partA/' 554 | elif args.dataset == "ucfqnrf": 555 | validation_set = 240 556 | output_downscale = 8 557 | blur_sigma = 2 558 | args.dense_weight = 0.01 559 | path = "../../dataset/UCF-QNRF_ECCV18" 560 | 561 | model_save_dir = './models' 562 | 563 | batch_size = args.batch_size 564 | 565 | dataset = CrowdDataset(path, name=args.dataset, valid_set_size=validation_set, 566 | gt_downscale_factor=output_downscale) 567 | 568 | print(dataset.data_files['test_valid'], 569 | len(dataset.data_files['test_valid'])) 570 | print(dataset.data_files['train'], len(dataset.data_files['train'])) 571 | 572 | # -- Train the model 573 | train_network() 574 | -------------------------------------------------------------------------------- /stage2_main.py: -------------------------------------------------------------------------------- 1 | """ 2 | stage2_main.py: stage 2 training script 3 | """ 4 | 5 | import argparse 6 | import datetime 7 | import os 8 | import pickle 9 | import random 10 | 11 | import cv2 12 | import matplotlib 13 | import numpy as np 14 | import powerlaw 15 | import torch 16 | from matplotlib import pyplot as plt 17 | from mpmath import gammainc 18 | from torch import nn as nn 19 | from torch import optim as optim 20 | 21 | from crowd_dataset import CrowdDataset 22 | from models import (Stage2CountingNet, check_BN_no_gradient_change, 23 | check_conv_no_gradient_change, load_net, 24 | load_rot_model_blocks, set_batch_norm_to_eval) 25 | from sinkhorn import SinkhornSolver 26 | 27 | matplotlib.use('Agg') 28 | 29 | 30 | parser = argparse.ArgumentParser(description='CSS-CSNN Stage-2 Training') 31 | parser.add_argument('--epochs', default=600, type=int, metavar='N', 32 | help='number of total epochs to run') 33 | parser.add_argument('--gpu', default=0, type=int, 34 | help='GPU number') 35 | parser.add_argument('-b', '--batch-size', default=32, type=int, metavar='N', 36 | help='mini-batch size (default: 32),only used for train') 37 | parser.add_argument('--patches', default=1, type=int, metavar='N', 38 | help='number of patches per image') 39 | parser.add_argument('--dataset', default="parta", type=str, 40 | help='dataset to train on') 41 | parser.add_argument('--lr', '--learning-rate', default=1e-5, type=float, 42 | metavar='LR', help='initial learning rate') 43 | parser.add_argument('--momentum', default=0.9, type=float, 44 | metavar='M', help='momentum') 45 | parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float, metavar='W', 46 | help='weight decay (default: 1e-4)') 47 | parser.add_argument('--loss', default='sinkhorn', type=str, 48 | help="loss to use: mse or sinkhorn") 49 | parser.add_argument('--kernel_size', default=8, type=int, 50 | help="kernel size for summing counts") 51 | parser.add_argument('--sinkhorn_epsilon', default=0.1, type=float, 52 | help="entropy regularisation weight in sinkhorn") 53 | parser.add_argument('--sbs', '--sinkhorn_batch_size', default=32, 54 | type=int, help="points to sample from distribution") 55 | parser.add_argument('--sinkhorn_iterations', default=1000, 56 | type=int, help="no of iterations in sinkhorn") 57 | parser.add_argument('--seed', default=11, type=int, help="seed to use") 58 | parser.add_argument('--alpha', default=2.0, type=float, help="shape parameter of power law distribution") 59 | parser.add_argument('--cmax', default=3000, type=int, help="the maximum value") 60 | parser.add_argument('--scrop', default=4, type=int, help="patch approximation parameter") 61 | parser.add_argument('--num_samples', default=482, type=int, help="number of samples") 62 | parser.add_argument('--patience', default=300, type=int, help="epochs to train before stopping") 63 | parser.add_argument('--ma_window', default=5, type=int, help="window for computing moving average") 64 | sampled_GT = None 65 | 66 | # -- Compute CDF for Truncated Power Law Distribution 67 | def get_cdf(x, alpha, Lambda): 68 | CDF = ( (gammainc(1-alpha, Lambda*x)) / 69 | Lambda**(1-alpha) 70 | ) 71 | return 1-CDF 72 | 73 | # -- Obtain Lambda from max count 74 | def get_lambda(): 75 | m, n = 4, 4 76 | max_value = args.cmax / (args.scrop * m * n) 77 | 78 | for Lambda_t in np.arange(0.001, 0.1, 0.001): 79 | cdf = get_cdf(max_value, args.alpha, Lambda_t) 80 | if cdf > 1 - 1. / args.num_samples: 81 | return Lambda_t 82 | 83 | # -- Get shift thresh 84 | def get_shift_thresh(): 85 | Lambda = get_lambda() 86 | for value in np.arange(1.01, 10, 0.01): 87 | cdf = get_cdf(value, args.alpha, Lambda) 88 | if cdf > 0.28: 89 | return float("{0:.2f}".format(value)) 90 | 91 | def log(f, txt, do_print=1): 92 | txt = str(datetime.datetime.now()) + ': ' + txt 93 | if do_print == 1: 94 | print(txt) 95 | f.write(txt + '\n') 96 | 97 | def get_filename(net_name, epochs_over): 98 | return net_name + "_epoch_" + str(epochs_over) + ".pth" 99 | 100 | 101 | def save_checkpoint(state, fdir, name='checkpoint.pth'): 102 | filepath = os.path.join(fdir, name) 103 | torch.save(state, filepath) 104 | 105 | 106 | def print_graph(maps, title, save_path): 107 | fig = plt.figure() 108 | st = fig.suptitle(title) 109 | for i, (map, args) in enumerate(maps): 110 | plt.subplot(1, len(maps), i + 1) 111 | if len(map.shape) > 2 and map.shape[0] == 3: 112 | plt.imshow(map.transpose((1, 2, 0)).astype( 113 | np.uint8), aspect='equal', **args) 114 | else: 115 | plt.imshow(map, aspect='equal', **args) 116 | plt.axis('off') 117 | plt.savefig(save_path + ".png", bbox_inches='tight', pad_inches=0) 118 | fig.clf() 119 | plt.clf() 120 | plt.close() 121 | 122 | 123 | excluded_layers = ['conv4_1', 'conv4_2', 'conv5_1'] 124 | 125 | 126 | def get_loss_criterion(): 127 | if args.loss == 'mse': 128 | return nn.MSELoss(size_average=True) 129 | elif args.loss == 'sinkhorn': 130 | return SinkhornSolver(epsilon=args.sinkhorn_epsilon, iterations=args.sinkhorn_iterations) 131 | else: 132 | raise NotImplementedError 133 | 134 | 135 | def train_function(Xs, Ys, network, optimizer): 136 | network = network.cuda() 137 | optimizer.zero_grad() 138 | 139 | X = torch.autograd.Variable(torch.from_numpy(Xs)).cuda() 140 | Y = torch.autograd.Variable(torch.FloatTensor(Ys)).cuda() 141 | 142 | outputs = network(X) 143 | 144 | losses = [] 145 | loss = 0.0 146 | loss_criterion = get_loss_criterion() 147 | 148 | avg_pool = nn.AvgPool2d(kernel_size=args.kernel_size, 149 | stride=args.kernel_size) 150 | output_reshape = avg_pool(outputs) * (args.kernel_size * args.kernel_size) 151 | 152 | loss = loss_criterion(output_reshape.view(-1, 1), Y.view(-1, 1)) * 0.01 153 | assert(loss.grad_fn != None) 154 | loss.backward() 155 | optimizer.step() 156 | losses.append(loss.item()) 157 | 158 | return loss.item() 159 | 160 | 161 | @torch.no_grad() 162 | def test_function(X, Y, network): 163 | X = torch.autograd.Variable(torch.from_numpy(X)).cuda() 164 | Y = torch.autograd.Variable(torch.from_numpy(Y)).cuda() 165 | 166 | network = network.cuda() 167 | network.eval() 168 | output = network(X) # (B,1,h,w) 169 | 170 | loss = 0.0 171 | loss_criterion = get_loss_criterion() 172 | avg_pool = nn.AvgPool2d(kernel_size=args.kernel_size, 173 | stride=args.kernel_size) 174 | 175 | output_reshape = avg_pool(output) * (args.kernel_size * args.kernel_size) 176 | 177 | loss = loss_criterion(output_reshape.view(-1, 1), 178 | torch.cuda.FloatTensor(sampled_GT).view(-1, 1)) * 0.01 179 | count_error = torch.abs(torch.sum(Y.view( 180 | Y.size(0), -1), dim=1) - torch.sum(output.view(output.size(0), -1), dim=1)) 181 | 182 | network.train() 183 | network = set_batch_norm_to_eval(network) 184 | return loss.item(), output.cpu().detach().numpy(), count_error.cpu().detach().numpy() 185 | 186 | 187 | def test_network(dataset, set_name, network, print_output=False): 188 | if isinstance(print_output, str): 189 | print_path = print_output 190 | elif isinstance(print_output, bool) and print_output: 191 | print_path = './models_stage_2/dump' 192 | else: 193 | print_path = None 194 | 195 | loss_list = [] 196 | count_error_list = [] 197 | for idx, data in enumerate(dataset.test_get_data(set_name)): 198 | image_name, Xs, Ys = data 199 | image = Xs[0].transpose((1, 2, 0)) 200 | image = cv2.resize( 201 | image, (image.shape[1] // output_downscale, image.shape[0] // output_downscale)) 202 | 203 | loss, pred_dmap, count_error = test_function(Xs, Ys, network) 204 | max_val = max( 205 | np.max(pred_dmap[0, 0].reshape(-1)), np.max(Ys[0, 0].reshape(-1))) 206 | maps = [(np.transpose(image, (2, 0, 1)), {}), 207 | (pred_dmap[0, 0], {'cmap': 'jet', 208 | 'vmin': 0., 'vmax': max_val}), 209 | (Ys[0, 0], {'cmap': 'jet', 'vmin': 0., 'vmax': max_val})] 210 | 211 | loss_list.append(loss) 212 | count_error_list.append(count_error) 213 | 214 | # -- Plotting visualisations 215 | if print_path: 216 | print_graph(maps, "Gt:{},Pred:{}".format(np.sum(Ys), np.sum( 217 | pred_dmap)), os.path.join(print_path, image_name)) 218 | 219 | loss = np.mean(loss_list) 220 | mae = np.mean(count_error_list) 221 | 222 | if set_name == "test": 223 | return {'loss1': loss, 'new_mae': mae}, mae 224 | else: 225 | # -- not returning MAE for validation split 226 | return {'loss1': loss, 'new_mae': None}, None 227 | 228 | def train_network(): 229 | network = Stage2CountingNet() 230 | model_save_dir = './models_stage_2' 231 | model_save_path = os.path.join(model_save_dir, 'train2') 232 | if not os.path.exists(model_save_path): 233 | os.makedirs(model_save_path) 234 | os.makedirs(os.path.join(model_save_path, 'snapshots')) 235 | os.makedirs(os.path.join(model_save_dir, 'dump')) 236 | os.makedirs(os.path.join(model_save_dir, 'dump_test')) 237 | global f 238 | snapshot_path = os.path.join(model_save_path, 'snapshots') 239 | f = open(os.path.join(model_save_path, 'train0.log'), 'w') 240 | 241 | # -- Logging Parameters 242 | log(f, 'args: ' + str(args)) 243 | log(f, 'model: ' + str(network), False) 244 | log(f, 'Stage2...') 245 | log(f, 'LR: %.12f.' % (args.lr)) 246 | 247 | start_epoch = 0 248 | num_epochs = args.epochs 249 | valid_losses = {} 250 | train_losses = {} 251 | for metric in ['loss1', 'new_mae']: 252 | valid_losses[metric] = [] 253 | 254 | for metric in ['loss1']: 255 | train_losses[metric] = [] 256 | 257 | batch_size = args.batch_size 258 | num_train_images = len(dataset.data_files['train']) 259 | num_patches_per_image = args.patches 260 | assert(batch_size < (num_patches_per_image * num_train_images)) 261 | num_batches_per_epoch = num_patches_per_image * num_train_images // batch_size 262 | assert(num_batches_per_epoch >= 1) 263 | 264 | optimizer = optim.SGD(filter(lambda p: p.requires_grad, network.parameters()), 265 | lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay) 266 | 267 | network = load_rot_model_blocks( 268 | network, snapshot_path='models_stage_1/train2/snapshots/', excluded_layers=excluded_layers) 269 | 270 | shift_thresh = get_shift_thresh() 271 | Lambda = get_lambda() 272 | log(f, "Shift Thresh: {}, Lambda: {}".format(shift_thresh, Lambda)) 273 | 274 | # -- Main Training Loop 275 | min_valid_loss = 100. 276 | min_valid_epoch = -1 277 | 278 | before_BN_weights_sum = check_BN_no_gradient_change( 279 | network, exclude_list=excluded_layers) 280 | before_conv_weights_sum = check_conv_no_gradient_change( 281 | network, exclude_list=excluded_layers) 282 | 283 | stop_training = False 284 | 285 | global sampled_GT 286 | 287 | for e_i, epoch in enumerate(range(start_epoch, num_epochs)): 288 | avg_loss = [] 289 | 290 | # b_i - batch index 291 | for b_i in range(num_batches_per_epoch): 292 | # Generate next training sample 293 | Xs, _ = dataset.train_get_data(batch_size=args.batch_size) 294 | 295 | after_conv_weights_sum = check_conv_no_gradient_change( 296 | network, exclude_list=excluded_layers) 297 | assert (np.all(before_conv_weights_sum == after_conv_weights_sum)) 298 | 299 | sampled_GT = None 300 | sampled_GT_shape = args.sbs * 7 * 7 * \ 301 | (8 // args.kernel_size) * (8 // args.kernel_size) 302 | 303 | sampling_parameters = [args.alpha, Lambda] 304 | sampled_GT = powerlaw.Truncated_Power_Law( 305 | parameters=sampling_parameters).generate_random(sampled_GT_shape) 306 | 307 | for s_i, s_val in enumerate(sampled_GT): 308 | if s_val < shift_thresh: 309 | sampled_GT[s_i] = np.random.uniform(0, shift_thresh) 310 | assert(sampled_GT.shape[0] == ( 311 | sampled_GT_shape) and sampled_GT.ndim == 1) 312 | 313 | train_loss = train_function( 314 | Xs, sampled_GT, network, optimizer) 315 | avg_loss.append(train_loss) 316 | 317 | # Logging losses after each iteration. 318 | if b_i % 1 == 0: 319 | log(f, 'Epoch %d [%d]: %s loss: %s.' % 320 | (epoch, b_i, [network.name], train_loss)) 321 | after_BN_weights_sum = check_BN_no_gradient_change( 322 | network, exclude_list=excluded_layers) 323 | after_conv_weights_sum = check_conv_no_gradient_change( 324 | network, exclude_list=excluded_layers) 325 | 326 | assert (np.all(before_BN_weights_sum == after_BN_weights_sum)) 327 | assert (np.all(before_conv_weights_sum == after_conv_weights_sum)) 328 | 329 | # -- Stats update 330 | avg_loss = np.mean(np.array(avg_loss)) 331 | train_losses['loss1'].append(avg_loss) 332 | log(f, 'TRAIN epoch: ' + str(epoch) + 333 | ' train mean loss1:' + str(avg_loss)) 334 | 335 | torch.cuda.empty_cache() 336 | 337 | log(f, 'Validating...') 338 | 339 | epoch_val_losses, valid_mae = test_network( 340 | dataset, 'test_valid', network, True) 341 | log(f, 'TEST valid epoch: ' + str(epoch) + 342 | ' test valid loss1, mae' + str(epoch_val_losses)) 343 | 344 | for metric in ['loss1', 'new_mae']: 345 | valid_losses[metric].append(epoch_val_losses[metric]) 346 | 347 | if e_i > args.ma_window: 348 | valid_losses_smooth = np.mean(valid_losses['loss1'][-args.ma_window:]) 349 | if valid_losses_smooth < min_valid_loss: 350 | min_valid_loss = valid_losses_smooth 351 | min_valid_epoch = e_i 352 | count = 0 353 | else: 354 | count = count + 1 355 | if count > args.patience: 356 | stop_training = True 357 | 358 | log(f, 'Best valid so far epoch: {}, valid_loss: {}'.format(min_valid_epoch, 359 | valid_losses['loss1'][min_valid_epoch])) 360 | # Save networks 361 | save_checkpoint({ 362 | 'epoch': epoch + 1, 363 | 'state_dict': network.state_dict(), 364 | 'optimizer': optimizer.state_dict(), 365 | }, snapshot_path, get_filename(network.name, epoch + 1)) 366 | 367 | print('saving graphs...') 368 | with open(os.path.join(snapshot_path, 'losses.pkl'), 'wb') as lossfile: 369 | pickle.dump((train_losses, valid_losses), 370 | lossfile, protocol=2) 371 | 372 | for metric in train_losses.keys(): 373 | if "maxima_split" not in metric: 374 | if isinstance(train_losses[metric][0], list): 375 | for i in range(len(train_losses[metric][0])): 376 | plt.plot([a[i] for a in train_losses[metric]]) 377 | plt.savefig(os.path.join(snapshot_path, 378 | 'train_%s_%d.png' % (metric, i))) 379 | plt.clf() 380 | plt.close() 381 | plt.plot(train_losses[metric]) 382 | plt.savefig(os.path.join( 383 | snapshot_path, 'train_%s.png' % metric)) 384 | plt.clf() 385 | plt.close() 386 | 387 | for metric in valid_losses.keys(): 388 | if isinstance(valid_losses[metric][0], list): 389 | for i in range(len(valid_losses[metric][0])): 390 | plt.plot([a[i] for a in valid_losses[metric]]) 391 | plt.savefig(os.path.join(snapshot_path, 392 | 'valid_%s_%d.png' % (metric, i))) 393 | plt.clf() 394 | plt.close() 395 | plt.plot(valid_losses[metric]) 396 | plt.savefig(os.path.join(snapshot_path, 'valid_%s.png' % metric)) 397 | plt.clf() 398 | plt.close() 399 | 400 | if stop_training: 401 | break 402 | 403 | network = load_net(network, snapshot_path, get_filename( 404 | network.name, min_valid_epoch + 1)) 405 | log(f, 'Testing on best model {}'.format(min_valid_epoch)) 406 | epoch_test_losses, mae = test_network( 407 | dataset, 'test', network, print_output=os.path.join(model_save_dir, 'dump_test')) 408 | log(f, 'TEST epoch: ' + str(epoch) + 409 | ' test loss1, mae:' + str(epoch_test_losses) + ", " + str(mae)) 410 | log(f, 'Exiting train...') 411 | f.close() 412 | return 413 | 414 | 415 | if __name__ == '__main__': 416 | args = parser.parse_args() 417 | # -- Assign GPU 418 | os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu) 419 | 420 | # -- Assertions 421 | assert (args.dataset) 422 | 423 | # -- Check if requirements satisfied 424 | assert(np.__version__=="1.15.4") 425 | assert(cv2.__version__=="3.4.3") 426 | assert(torch.__version__=="0.4.1") 427 | assert(powerlaw.__version__=="1.4.4") 428 | assert("9.0" in torch.version.cuda) 429 | 430 | # -- Setting seeds for reproducability 431 | seed = args.seed 432 | np.random.seed(seed) 433 | random.seed(seed) 434 | torch.manual_seed(seed) 435 | torch.backends.cudnn.enabled = False 436 | torch.backends.cudnn.deterministic = True 437 | torch.backends.cudnn.benchmark = False 438 | torch.cuda.manual_seed(seed) 439 | torch.cuda.manual_seed_all(seed) 440 | 441 | # -- Dataset paths 442 | if args.dataset == "parta": 443 | validation_set = 30 444 | path = '../../dataset/ST_partA/' 445 | output_downscale = 8 446 | elif args.dataset == "ucfqnrf": 447 | validation_set = 240 448 | output_downscale = 8 449 | args.patience = 100 450 | path = "../../dataset/UCF-QNRF_ECCV18" 451 | 452 | model_save_dir = './models' 453 | 454 | batch_size = args.batch_size 455 | 456 | dataset = CrowdDataset(path, name=args.dataset, valid_set_size=validation_set, 457 | gt_downscale_factor=output_downscale) 458 | 459 | print(dataset.data_files['test_valid'], 460 | len(dataset.data_files['test_valid'])) 461 | print(dataset.data_files['train'], len(dataset.data_files['train'])) 462 | 463 | # -- Train the model 464 | train_network() 465 | -------------------------------------------------------------------------------- /test_model.py: -------------------------------------------------------------------------------- 1 | """ 2 | test_model.py: testing script 3 | """ 4 | 5 | import argparse 6 | import datetime 7 | import os 8 | import random 9 | 10 | import cv2 11 | import matplotlib 12 | import numpy as np 13 | import torch 14 | from matplotlib import pyplot as plt 15 | 16 | from crowd_dataset import CrowdDataset 17 | from models import Stage2CountingNet, load_net, set_batch_norm_to_eval 18 | 19 | matplotlib.use('Agg') 20 | 21 | 22 | 23 | parser = argparse.ArgumentParser(description='Test CSS-CCNN Model') 24 | parser.add_argument('--epochs', default=200, type=int, metavar='N', 25 | help='number of total epochs to run') 26 | parser.add_argument('--gpu', default=1, type=int, 27 | help='GPU number') 28 | parser.add_argument('-b', '--batch-size', default=4, type=int, metavar='N', 29 | help='mini-batch size (default: 4),only used for train') 30 | parser.add_argument('--patches', default=1, type=int, metavar='N', 31 | help='number of patches per image') 32 | parser.add_argument('--dataset', default="parta", type=str, 33 | help='dataset to train on') 34 | parser.add_argument('--lr', '--learning-rate', default=1e-3, type=float, 35 | metavar='LR', help='initial learning rate') 36 | parser.add_argument('--momentum', default=0.9, type=float, 37 | metavar='M', help='momentum') 38 | parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float, metavar='W', 39 | help='weight decay (default: 1e-4)') 40 | parser.add_argument('--trained-model', default='', type=str, metavar='PATH', help='filename of model to load', 41 | nargs='+') 42 | parser.add_argument('--loss', default='sinkhorn', type=str, help="loss to use: mse or sinkhorn") 43 | parser.add_argument('--kernel_size', default=8, type=int, help="kernel size for summing counts") 44 | parser.add_argument('--sinkhorn_epsilon', default=0.1, type=float, help="entropy regularisation weight in sinkhorn") 45 | parser.add_argument('--sinkhorn_batch_size', default=4, type=int, help="points to sample from distribution") 46 | parser.add_argument('--sinkhorn_iterations', default=1000, type=int, help="no of iterations in sinkhorn") 47 | parser.add_argument('--best_model_name', default=None, type=str, help="name of the best model checkpoint") 48 | 49 | sampled_GT = None 50 | 51 | def log(f, txt, do_print=1): 52 | txt = str(datetime.datetime.now()) + ': ' + txt 53 | if do_print == 1: 54 | print(txt) 55 | f.write(txt + '\n') 56 | 57 | def get_filename(net_name, epochs_over): 58 | return net_name + "_epoch_" + str(epochs_over) + ".pth" 59 | 60 | def print_graph(maps, title, save_path): 61 | fig = plt.figure() 62 | st = fig.suptitle(title) 63 | for i, (map, args) in enumerate(maps): 64 | plt.subplot(1, len(maps), i + 1) 65 | if len(map.shape) > 2 and map.shape[0] == 3: 66 | plt.imshow(map.transpose((1, 2, 0)).astype(np.uint8),aspect='equal', **args) 67 | else: 68 | plt.imshow(map, aspect='equal', **args) 69 | plt.axis('off') 70 | plt.savefig(save_path + ".png", bbox_inches='tight', pad_inches = 0) 71 | fig.clf() 72 | plt.clf() 73 | plt.close() 74 | 75 | 76 | @torch.no_grad() 77 | def test_function(X, Y, network): 78 | X = torch.autograd.Variable(torch.from_numpy(X)).cuda() 79 | Y = torch.autograd.Variable(torch.from_numpy(Y)).cuda() 80 | 81 | network = network.cuda() 82 | network.eval() 83 | output = network(X) # (B,1,h,w) 84 | 85 | count_error = torch.abs(torch.sum(Y.view(Y.size(0), -1), dim=1) - torch.sum(output.view(output.size(0), -1), dim=1)) 86 | 87 | network.train() 88 | network = set_batch_norm_to_eval(network) 89 | return output.cpu().detach().numpy(), count_error.cpu().detach().numpy() 90 | 91 | 92 | def test_network(dataset, set_name, network, print_output=False): 93 | if isinstance(print_output, str): 94 | print_path = print_output 95 | elif isinstance(print_output, bool) and print_output: 96 | print_path = './models_stage_2/dump' 97 | else: 98 | print_path = None 99 | 100 | count_error_list = [] 101 | for idx, data in enumerate(dataset.test_get_data(set_name)): 102 | image_name, Xs, Ys = data 103 | image = Xs[0].transpose((1, 2, 0)) 104 | image = cv2.resize(image, (image.shape[1] // output_downscale, image.shape[0] // output_downscale)) 105 | 106 | pred_dmap, count_error = test_function(Xs, Ys, network) 107 | 108 | max_val = max(np.max(pred_dmap[0, 0].reshape(-1)), np.max(Ys[0, 0].reshape(-1))) 109 | maps = [(np.transpose(image,(2,0,1)), {}), 110 | (pred_dmap[0,0], {'cmap': 'jet', 'vmin': 0., 'vmax': max_val}), 111 | (Ys[0,0], {'cmap': 'jet', 'vmin': 0., 'vmax': max_val})] 112 | 113 | count_error_list.append(count_error) 114 | 115 | if print_path: 116 | print_graph(maps, "Gt:{},Pred:{}".format(np.sum(Ys),np.sum(pred_dmap)), os.path.join(print_path, image_name)) 117 | 118 | mae = np.mean(count_error_list) 119 | mse = np.sqrt(np.mean(np.square(count_error_list))) 120 | return {'mae':mae, 'mse': mse}, mae 121 | 122 | def train_network(): 123 | network = Stage2CountingNet() 124 | model_save_dir = './models_stage_2' 125 | model_save_path = os.path.join(model_save_dir, 'train2') 126 | if not os.path.exists(model_save_path): 127 | os.makedirs(model_save_path) 128 | os.makedirs(os.path.join(model_save_path, 'snapshots')) 129 | os.makedirs(os.path.join(model_save_dir,'dump')) 130 | os.makedirs(os.path.join(model_save_dir,'dump_test')) 131 | global f 132 | snapshot_path = os.path.join(model_save_path, 'snapshots') 133 | 134 | network = load_net(network, snapshot_path, get_filename(network.name, args.best_model_name)) 135 | print(network) 136 | epoch_test_losses, mae = test_network(dataset, 'test', network, print_output=os.path.join(model_save_dir,'dump_test')) 137 | print('TEST mae, mse:' + str(epoch_test_losses)) 138 | return 139 | 140 | 141 | if __name__ == '__main__': 142 | args = parser.parse_args() 143 | # -- Assign GPU 144 | os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu) 145 | 146 | # -- Assertions 147 | assert (args.dataset) 148 | 149 | # -- Setting seeds for reproducability 150 | np.random.seed(11) 151 | random.seed(11) 152 | torch.manual_seed(11) 153 | torch.backends.cudnn.enabled = False 154 | torch.backends.cudnn.deterministic = True 155 | torch.backends.cudnn.benchmark = False 156 | torch.cuda.manual_seed(11) 157 | torch.cuda.manual_seed_all(11) 158 | 159 | # -- Dataset paths 160 | if args.dataset == "parta": 161 | validation_set = 30 162 | output_downscale = 8 163 | path = '../../dataset/ST_partA/' 164 | elif args.dataset == "ucfqnrf": 165 | validation_set = 240 166 | output_downscale = 8 167 | path = "../../dataset/UCF-QNRF_ECCV18" 168 | 169 | model_save_dir = './models' 170 | 171 | batch_size = args.batch_size 172 | 173 | dataset = CrowdDataset(path, name=args.dataset, valid_set_size=validation_set, 174 | gt_downscale_factor=output_downscale) 175 | print(dataset.data_files['test_valid'], len(dataset.data_files['test_valid'])) 176 | print(dataset.data_files['train'], len(dataset.data_files['train'])) 177 | 178 | # -- Train the model 179 | train_network() 180 | --------------------------------------------------------------------------------