├── .gitignore ├── ICNet.png ├── LICENSE ├── README.md ├── ckpt ├── .gitignore ├── icnet_resnet50_evaluate_log.txt └── icnet_resnet50_log.txt ├── configs ├── .gitignore └── icnet.yaml ├── dataset ├── .gitignore ├── __init__.py ├── cityscapes.py └── segbase.py ├── demo ├── frankfurt_000001_057181_leftImg8bit_label.png ├── frankfurt_000001_057181_leftImg8bit_mIoU_0.680.png ├── frankfurt_000001_057181_leftImg8bit_mIoU_0.716.png ├── frankfurt_000001_057181_leftImg8bit_mIoU_0.727.png ├── frankfurt_000001_057181_leftImg8bit_src.png ├── lindau_000005_000019_leftImg8bit_label.png ├── lindau_000005_000019_leftImg8bit_mIoU_0.657.png ├── lindau_000005_000019_leftImg8bit_mIoU_0.700.png ├── lindau_000005_000019_leftImg8bit_mIoU_0.705.png ├── lindau_000005_000019_leftImg8bit_src.png ├── munster_000061_000019_leftImg8bit_label.png ├── munster_000061_000019_leftImg8bit_mIoU_0.672.png ├── munster_000061_000019_leftImg8bit_mIoU_0.692.png ├── munster_000061_000019_leftImg8bit_mIoU_0.704.png ├── munster_000061_000019_leftImg8bit_src.png ├── munster_000075_000019_leftImg8bit_label.png ├── munster_000075_000019_leftImg8bit_mIoU_0.672.png ├── munster_000075_000019_leftImg8bit_mIoU_0.690.png ├── munster_000075_000019_leftImg8bit_mIoU_0.703.png ├── munster_000075_000019_leftImg8bit_src.png ├── munster_000106_000019_leftImg8bit_label.png ├── munster_000106_000019_leftImg8bit_mIoU_0.672.png ├── munster_000106_000019_leftImg8bit_mIoU_0.690.png ├── munster_000106_000019_leftImg8bit_mIoU_0.703.png ├── munster_000106_000019_leftImg8bit_src.png ├── munster_000121_000019_leftImg8bit_label.png ├── munster_000121_000019_leftImg8bit_mIoU_0.660.png ├── munster_000121_000019_leftImg8bit_mIoU_0.678.png ├── munster_000121_000019_leftImg8bit_mIoU_0.694.png ├── munster_000121_000019_leftImg8bit_src.png ├── munster_000124_000019_leftImg8bit_label.png ├── munster_000124_000019_leftImg8bit_mIoU_0.660.png ├── munster_000124_000019_leftImg8bit_mIoU_0.695.png ├── munster_000124_000019_leftImg8bit_mIoU_0.696.png ├── munster_000124_000019_leftImg8bit_src.png ├── munster_000150_000019_leftImg8bit_label.png ├── munster_000150_000019_leftImg8bit_mIoU_0.660.png ├── munster_000150_000019_leftImg8bit_mIoU_0.695.png ├── munster_000150_000019_leftImg8bit_mIoU_0.696.png ├── munster_000150_000019_leftImg8bit_src.png ├── munster_000158_000019_leftImg8bit_label.png ├── munster_000158_000019_leftImg8bit_mIoU_0.658.png ├── munster_000158_000019_leftImg8bit_mIoU_0.676.png ├── munster_000158_000019_leftImg8bit_mIoU_0.692.png └── munster_000158_000019_leftImg8bit_src.png ├── evaluate.py ├── models ├── __init__.py ├── base_models │ ├── __init__.py │ └── resnetv1b.py ├── icnet.py ├── model_store.py └── segbase.py ├── requirements.txt ├── train.py └── utils ├── __init__.py ├── download.py ├── logger.py ├── loss.py ├── lr_scheduler.py ├── metric.py └── visualize.py /.gitignore: -------------------------------------------------------------------------------- 1 | # 2 | .DS_Store 3 | train_customize.py 4 | 5 | # Byte-compiled / optimized / DLL files 6 | __pycache__/ 7 | *.py[cod] 8 | *$py.class 9 | 10 | # C extensions 11 | *.so 12 | 13 | # Distribution / packaging 14 | .Python 15 | build/ 16 | develop-eggs/ 17 | dist/ 18 | downloads/ 19 | eggs/ 20 | .eggs/ 21 | lib64/ 22 | parts/ 23 | sdist/ 24 | var/ 25 | wheels/ 26 | *.egg-info/ 27 | .installed.cfg 28 | *.egg 29 | *.idea 30 | MANIFEST 31 | 32 | # PyInstaller 33 | # Usually these files are written by a python script from a template 34 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 35 | *.manifest 36 | *.spec 37 | 38 | # Installer logs 39 | pip-log.txt 40 | pip-delete-this-directory.txt 41 | 42 | # Unit test / coverage reports 43 | htmlcov/ 44 | .tox/ 45 | .coverage 46 | .coverage.* 47 | .cache 48 | nosetests.xml 49 | coverage.xml 50 | *.cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | 63 | # Flask stuff: 64 | instance/ 65 | .webassets-cache 66 | 67 | # Scrapy stuff: 68 | .scrapy 69 | 70 | # Sphinx documentation 71 | docs/_build/ 72 | 73 | # PyBuilder 74 | target/ 75 | 76 | # Jupyter Notebook 77 | .ipynb_checkpoints 78 | 79 | # pyenv 80 | .python-version 81 | 82 | # celery beat schedule file 83 | celerybeat-schedule 84 | 85 | # SageMath parsed files 86 | *.sage.py 87 | 88 | # Environments 89 | .env 90 | .venv 91 | env/ 92 | venv/ 93 | ENV/ 94 | env.bak/ 95 | venv.bak/ 96 | 97 | # Spyder project settings 98 | .spyderproject 99 | .spyproject 100 | 101 | # Rope project settings 102 | .ropeproject 103 | 104 | # mkdocs documentation 105 | /site 106 | 107 | # mypy 108 | .mypy_cache/ 109 | 110 | # pycharm 111 | 112 | # premodel 113 | weights/ 114 | *.pkl 115 | *.pth 116 | 117 | # dataset 118 | datasets/ 119 | VOCdevket/ 120 | eval/ 121 | 122 | # overfitting test 123 | 124 | # run result 125 | 126 | # model 127 | /models/hrnet.py 128 | /models/psanet_old.py 129 | /scripts/debug.py 130 | 131 | # nn 132 | nn/sync_bn/ -------------------------------------------------------------------------------- /ICNet.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/liminn/ICNet-pytorch/da394fc44f4fbaff1b47ab83ce7121a96f375b03/ICNet.png -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Limin Zhang 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Description 2 | This repo contains ICNet implemented by PyTorch, based on [paper](https://arxiv.org/abs/1704.08545) by Hengshuang Zhao, and et. al(ECCV'18). 3 | Training and evaluation are done on the [Cityscapes dataset](https://www.cityscapes-dataset.com/) by default. 4 | 5 | # Requirements 6 | Python 3.6 or later with the following `pip3 install -r requirements.txt`: 7 | - torch==1.1.0 8 | - torchsummary==1.5.1 9 | - torchvision==0.3.0 10 | - numpy==1.17.0 11 | - Pillow==6.0.0 12 | - PyYAML==5.1.2 13 | 14 | # Updates 15 | - 2019.11.15: change `crop_size=960`, the best mIoU increased to 71.0%. It took about 2 days. Get [icnet_resnet50_197_0.710_best_model.pth]() 16 | 17 | # Performance 18 | | Method | mIoU(%) | Time(ms) | FPS | Memory(GB)| GPU | 19 | |:---:|:---:|:---:|:---:|:---:|:---:| 20 | | ICNet(paper) | 67.7% | 33ms | 30.3 | **1.6** | TitanX| 21 | | ICNet(ours) | **71.0%** | **19ms** | **52.6** | 1.86 | GTX 1080Ti| 22 | - Base on Cityscapes dataset, only train on trainning set, and test on validation set, using only one GTX 1080Ti card, and input size of the test phase is 2048x1024x3. 23 | - For the performance of the original paper, you can query the "Table2" in the [paper](https://arxiv.org/abs/1704.08545). 24 | 25 | # Demo 26 | |image|predict| 27 | |:---:|:---:| 28 | |![src](https://github.com/liminn/ICNet/raw/master/demo/frankfurt_000001_057181_leftImg8bit_src.png)|![predict](https://github.com/liminn/ICNet/raw/master/demo/frankfurt_000001_057181_leftImg8bit_mIoU_0.716.png)| 29 | |![src](https://github.com/liminn/ICNet/raw/master/demo/lindau_000005_000019_leftImg8bit_src.png)|![predict](https://github.com/liminn/ICNet/raw/master/demo/lindau_000005_000019_leftImg8bit_mIoU_0.700.png) | 30 | |![src](https://github.com/liminn/ICNet/raw/master/demo/munster_000061_000019_leftImg8bit_src.png)|![predict](https://github.com/liminn/ICNet/raw/master/demo/munster_000061_000019_leftImg8bit_mIoU_0.692.png) | 31 | |![src](https://github.com/liminn/ICNet/raw/master/demo/munster_000075_000019_leftImg8bit_src.png)|![predict](https://github.com/liminn/ICNet/raw/master/demo/munster_000075_000019_leftImg8bit_mIoU_0.690.png) | 32 | |![src](https://github.com/liminn/ICNet/raw/master/demo/munster_000106_000019_leftImg8bit_src.png)|![predict](https://github.com/liminn/ICNet/raw/master/demo/munster_000106_000019_leftImg8bit_mIoU_0.690.png) | 33 | |![src](https://github.com/liminn/ICNet/raw/master/demo/munster_000121_000019_leftImg8bit_src.png)|![predict](https://github.com/liminn/ICNet/raw/master/demo/munster_000121_000019_leftImg8bit_mIoU_0.678.png) | 34 | |![src](https://github.com/liminn/ICNet/raw/master/demo/munster_000124_000019_leftImg8bit_src.png)|![predict](https://github.com/liminn/ICNet/raw/master/demo/munster_000124_000019_leftImg8bit_mIoU_0.695.png) | 35 | - All the input images comes from the validation dataset of the Cityscaps, you can switch to the `demo/` directory to check more demo results. 36 | 37 | # Usage 38 | ## Trainning 39 | First, modify the configuration in the `configs/icnet.yaml` file: 40 | ```Python 41 | ### 3.Trainning 42 | train: 43 | specific_gpu_num: "1" # for example: "0", "1" or "0, 1" 44 | train_batch_size: 7 # adjust according to gpu resources 45 | cityscapes_root: "/home/datalab/ex_disk1/open_dataset/Cityscapes/" 46 | ckpt_dir: "./ckpt/" # ckpt and trainning log will be saved here 47 | ``` 48 | Then, run: `python3 train.py` 49 | 50 | ## Evaluation 51 | First, modify the configuration in the `configs/icnet.yaml` file: 52 | ```Python 53 | ### 4.Test 54 | test: 55 | ckpt_path: "./ckpt/icnet_resnet50_197_0.710_best_model.pth" # set the pretrained model path correctly 56 | ``` 57 | Then, run: `python3 evaluate.py` 58 | 59 | # Discussion 60 | ![ICNet](https://github.com/liminn/ICNet/raw/master/ICNet.png) 61 | The structure of ICNet is mainly composed of `sub4`, `sub2`, `sub1` and `head`: 62 | - `sub4`: basically a `pspnet`, the biggest difference is a modified `pyramid pooling module`. 63 | - `sub2`: the first three phases convolutional layers of `sub4`, `sub2` and `sub4` share these three phases convolutional layers. 64 | - `sub1`: three consecutive stried convolutional layers, to fastly downsample the original large-size input images 65 | - `head`: through the `CFF` module, the outputs of the three cascaded branches( `sub4`, `sub2` and `sub1`) are connected. Finaly, using 1x1 convolution and interpolation to get the output. 66 | 67 | During the training, I found that `pyramid pooling module` in `sub4` is very important. It can significantly improve the performance of the network and lightweight models. 68 | 69 | The most import thing in data preprocessing phase is to set the `crop_size` reasonably, you should set the `crop_size` as close as possible to the input size of prediction phase, here is my experiment: 70 | - I set the `base_size` to 520, it means resize the shorter side of image between 520x0.5 and 520x2, and set the `crop size` to 480, it means randomly crop 480x480 patch to train. The final best mIoU is 66.7%. 71 | - I set the `base_size` to 1024, it means resize the shorter side of image between 1024x0.5 and 1024x2, and set the `crop_size` to 720, it means randomly crop 720x720 patch to train. The final best mIoU is 69.9%. 72 | - Beacuse our target dataset is Cityscapes, the image size is 2048x1024, so the larger `crop_size`(720x720) is better. I have not tried a larger `crop_size`(such as 960x960 or 1024x1024) yet, beacuse it will result in a very small batch size and is very time-consuming, in addition, the current mIoU is already high. But I believe that larger `crop_size` will bring higher mIoU. 73 | 74 | In addition, I found that a small training technique can improve the performance of the model: 75 | - set the learning rate of `sub4` to orginal initial learning rate(0.01), because it has backbone pretrained weights. 76 | - set the learning rate of `sub1` and `head` to 10 times initial learning rate(0.1), because there are no pretrained weights for them. 77 | 78 | This small training technique is really effective, it can improve the mIoU performance by 1~2 percentage points. 79 | 80 | Any other questions or my mistakes can be fedback in the comments section. I will replay as soon as possible. 81 | 82 | # Reference 83 | - [ICNet for Real-Time Semantic Segmentation on High-Resolution Images](https://arxiv.org/abs/1704.08545) 84 | - [awesome-semantic-segmentation-pytorch](https://github.com/Tramac/awesome-semantic-segmentation-pytorch) 85 | - [Human-Segmentation-PyTorch](https://github.com/thuyngch/Human-Segmentation-PyTorch) -------------------------------------------------------------------------------- /ckpt/.gitignore: -------------------------------------------------------------------------------- 1 | evaluate_output/ -------------------------------------------------------------------------------- /ckpt/icnet_resnet50_evaluate_log.txt: -------------------------------------------------------------------------------- 1 | 2019-11-15 19:51:21,330 semantic_segmentation INFO: Start validation, Total sample: 500 2 | 2019-11-15 19:51:21,656 semantic_segmentation INFO: Sample: 1, validation pixAcc: 98.339, mIoU: 37.200, time: 0.025s 3 | 2019-11-15 19:51:22,920 semantic_segmentation INFO: Sample: 2, validation pixAcc: 98.370, mIoU: 43.898, time: 0.019s 4 | 2019-11-15 19:51:24,240 semantic_segmentation INFO: Sample: 3, validation pixAcc: 97.839, mIoU: 46.334, time: 0.019s 5 | 2019-11-15 19:51:25,512 semantic_segmentation INFO: Sample: 4, validation pixAcc: 97.203, mIoU: 51.326, time: 0.019s 6 | 2019-11-15 19:51:26,792 semantic_segmentation INFO: Sample: 5, validation pixAcc: 97.274, mIoU: 51.458, time: 0.019s 7 | 2019-11-15 19:51:28,069 semantic_segmentation INFO: Sample: 6, validation pixAcc: 97.466, mIoU: 56.477, time: 0.019s 8 | 2019-11-15 19:51:29,354 semantic_segmentation INFO: Sample: 7, validation pixAcc: 97.256, mIoU: 56.383, time: 0.019s 9 | 2019-11-15 19:51:30,598 semantic_segmentation INFO: Sample: 8, validation pixAcc: 97.226, mIoU: 57.070, time: 0.019s 10 | 2019-11-15 19:51:31,862 semantic_segmentation INFO: Sample: 9, validation pixAcc: 97.061, mIoU: 59.249, time: 0.019s 11 | 2019-11-15 19:51:33,150 semantic_segmentation INFO: Sample: 10, validation pixAcc: 96.884, mIoU: 66.572, time: 0.019s 12 | 2019-11-15 19:51:34,466 semantic_segmentation INFO: Sample: 11, validation pixAcc: 96.867, mIoU: 65.707, time: 0.020s 13 | 2019-11-15 19:51:35,758 semantic_segmentation INFO: Sample: 12, validation pixAcc: 96.719, mIoU: 66.369, time: 0.019s 14 | 2019-11-15 19:51:37,080 semantic_segmentation INFO: Sample: 13, validation pixAcc: 96.748, mIoU: 66.708, time: 0.019s 15 | 2019-11-15 19:51:38,383 semantic_segmentation INFO: Sample: 14, validation pixAcc: 96.646, mIoU: 66.521, time: 0.019s 16 | 2019-11-15 19:51:39,719 semantic_segmentation INFO: Sample: 15, validation pixAcc: 96.717, mIoU: 66.623, time: 0.019s 17 | 2019-11-15 19:51:41,040 semantic_segmentation INFO: Sample: 16, validation pixAcc: 96.743, mIoU: 68.513, time: 0.019s 18 | 2019-11-15 19:51:42,320 semantic_segmentation INFO: Sample: 17, validation pixAcc: 96.806, mIoU: 68.297, time: 0.019s 19 | 2019-11-15 19:51:43,633 semantic_segmentation INFO: Sample: 18, validation pixAcc: 96.674, mIoU: 67.215, time: 0.019s 20 | 2019-11-15 19:51:45,009 semantic_segmentation INFO: Sample: 19, validation pixAcc: 96.505, mIoU: 69.249, time: 0.019s 21 | 2019-11-15 19:51:46,322 semantic_segmentation INFO: Sample: 20, validation pixAcc: 96.502, mIoU: 68.721, time: 0.019s 22 | 2019-11-15 19:51:47,657 semantic_segmentation INFO: Sample: 21, validation pixAcc: 96.567, mIoU: 70.777, time: 0.019s 23 | 2019-11-15 19:51:48,903 semantic_segmentation INFO: Sample: 22, validation pixAcc: 96.514, mIoU: 70.309, time: 0.019s 24 | 2019-11-15 19:51:50,215 semantic_segmentation INFO: Sample: 23, validation pixAcc: 96.451, mIoU: 70.566, time: 0.019s 25 | 2019-11-15 19:51:51,528 semantic_segmentation INFO: Sample: 24, validation pixAcc: 96.435, mIoU: 71.592, time: 0.019s 26 | 2019-11-15 19:51:52,758 semantic_segmentation INFO: Sample: 25, validation pixAcc: 96.509, mIoU: 71.633, time: 0.019s 27 | 2019-11-15 19:51:54,021 semantic_segmentation INFO: Sample: 26, validation pixAcc: 96.483, mIoU: 72.118, time: 0.019s 28 | 2019-11-15 19:51:55,320 semantic_segmentation INFO: Sample: 27, validation pixAcc: 96.477, mIoU: 71.979, time: 0.019s 29 | 2019-11-15 19:51:56,579 semantic_segmentation INFO: Sample: 28, validation pixAcc: 96.477, mIoU: 72.165, time: 0.019s 30 | 2019-11-15 19:51:57,931 semantic_segmentation INFO: Sample: 29, validation pixAcc: 96.452, mIoU: 72.322, time: 0.019s 31 | 2019-11-15 19:51:59,292 semantic_segmentation INFO: Sample: 30, validation pixAcc: 96.466, mIoU: 71.993, time: 0.019s 32 | 2019-11-15 19:52:00,578 semantic_segmentation INFO: Sample: 31, validation pixAcc: 96.444, mIoU: 71.515, time: 0.019s 33 | 2019-11-15 19:52:01,909 semantic_segmentation INFO: Sample: 32, validation pixAcc: 96.428, mIoU: 71.063, time: 0.019s 34 | 2019-11-15 19:52:03,219 semantic_segmentation INFO: Sample: 33, validation pixAcc: 96.381, mIoU: 70.991, time: 0.019s 35 | 2019-11-15 19:52:04,554 semantic_segmentation INFO: Sample: 34, validation pixAcc: 96.377, mIoU: 70.887, time: 0.019s 36 | 2019-11-15 19:52:05,842 semantic_segmentation INFO: Sample: 35, validation pixAcc: 96.378, mIoU: 70.676, time: 0.019s 37 | 2019-11-15 19:52:07,144 semantic_segmentation INFO: Sample: 36, validation pixAcc: 96.363, mIoU: 70.394, time: 0.019s 38 | 2019-11-15 19:52:08,425 semantic_segmentation INFO: Sample: 37, validation pixAcc: 96.326, mIoU: 70.208, time: 0.019s 39 | 2019-11-15 19:52:09,776 semantic_segmentation INFO: Sample: 38, validation pixAcc: 96.328, mIoU: 70.817, time: 0.019s 40 | 2019-11-15 19:52:11,116 semantic_segmentation INFO: Sample: 39, validation pixAcc: 96.293, mIoU: 70.562, time: 0.019s 41 | 2019-11-15 19:52:12,452 semantic_segmentation INFO: Sample: 40, validation pixAcc: 96.295, mIoU: 70.443, time: 0.019s 42 | 2019-11-15 19:52:13,800 semantic_segmentation INFO: Sample: 41, validation pixAcc: 96.269, mIoU: 70.346, time: 0.019s 43 | 2019-11-15 19:52:15,009 semantic_segmentation INFO: Sample: 42, validation pixAcc: 96.281, mIoU: 70.215, time: 0.019s 44 | 2019-11-15 19:52:16,145 semantic_segmentation INFO: Sample: 43, validation pixAcc: 96.285, mIoU: 70.272, time: 0.019s 45 | 2019-11-15 19:52:17,448 semantic_segmentation INFO: Sample: 44, validation pixAcc: 96.256, mIoU: 70.263, time: 0.019s 46 | 2019-11-15 19:52:18,731 semantic_segmentation INFO: Sample: 45, validation pixAcc: 96.192, mIoU: 69.539, time: 0.019s 47 | 2019-11-15 19:52:20,003 semantic_segmentation INFO: Sample: 46, validation pixAcc: 96.168, mIoU: 69.602, time: 0.019s 48 | 2019-11-15 19:52:21,313 semantic_segmentation INFO: Sample: 47, validation pixAcc: 96.192, mIoU: 69.603, time: 0.019s 49 | 2019-11-15 19:52:22,672 semantic_segmentation INFO: Sample: 48, validation pixAcc: 96.225, mIoU: 69.755, time: 0.019s 50 | 2019-11-15 19:52:23,997 semantic_segmentation INFO: Sample: 49, validation pixAcc: 96.189, mIoU: 69.308, time: 0.020s 51 | 2019-11-15 19:52:25,293 semantic_segmentation INFO: Sample: 50, validation pixAcc: 96.216, mIoU: 69.449, time: 0.019s 52 | 2019-11-15 19:52:26,591 semantic_segmentation INFO: Sample: 51, validation pixAcc: 96.228, mIoU: 69.301, time: 0.020s 53 | 2019-11-15 19:52:27,921 semantic_segmentation INFO: Sample: 52, validation pixAcc: 96.222, mIoU: 69.260, time: 0.019s 54 | 2019-11-15 19:52:29,235 semantic_segmentation INFO: Sample: 53, validation pixAcc: 96.214, mIoU: 69.256, time: 0.019s 55 | 2019-11-15 19:52:30,527 semantic_segmentation INFO: Sample: 54, validation pixAcc: 96.223, mIoU: 69.135, time: 0.019s 56 | 2019-11-15 19:52:31,920 semantic_segmentation INFO: Sample: 55, validation pixAcc: 96.243, mIoU: 69.112, time: 0.020s 57 | 2019-11-15 19:52:33,258 semantic_segmentation INFO: Sample: 56, validation pixAcc: 96.251, mIoU: 69.269, time: 0.019s 58 | 2019-11-15 19:52:34,588 semantic_segmentation INFO: Sample: 57, validation pixAcc: 96.290, mIoU: 69.310, time: 0.020s 59 | 2019-11-15 19:52:35,933 semantic_segmentation INFO: Sample: 58, validation pixAcc: 96.254, mIoU: 69.302, time: 0.019s 60 | 2019-11-15 19:52:37,214 semantic_segmentation INFO: Sample: 59, validation pixAcc: 96.269, mIoU: 69.543, time: 0.019s 61 | 2019-11-15 19:52:38,564 semantic_segmentation INFO: Sample: 60, validation pixAcc: 96.289, mIoU: 69.551, time: 0.019s 62 | 2019-11-15 19:52:39,906 semantic_segmentation INFO: Sample: 61, validation pixAcc: 96.300, mIoU: 69.548, time: 0.019s 63 | 2019-11-15 19:52:41,225 semantic_segmentation INFO: Sample: 62, validation pixAcc: 96.300, mIoU: 69.529, time: 0.019s 64 | 2019-11-15 19:52:42,555 semantic_segmentation INFO: Sample: 63, validation pixAcc: 96.257, mIoU: 69.389, time: 0.019s 65 | 2019-11-15 19:52:43,891 semantic_segmentation INFO: Sample: 64, validation pixAcc: 96.247, mIoU: 69.164, time: 0.019s 66 | 2019-11-15 19:52:45,168 semantic_segmentation INFO: Sample: 65, validation pixAcc: 96.216, mIoU: 69.115, time: 0.019s 67 | 2019-11-15 19:52:46,516 semantic_segmentation INFO: Sample: 66, validation pixAcc: 96.233, mIoU: 69.421, time: 0.020s 68 | 2019-11-15 19:52:47,857 semantic_segmentation INFO: Sample: 67, validation pixAcc: 96.238, mIoU: 69.509, time: 0.019s 69 | 2019-11-15 19:52:49,219 semantic_segmentation INFO: Sample: 68, validation pixAcc: 96.222, mIoU: 69.613, time: 0.019s 70 | 2019-11-15 19:52:50,588 semantic_segmentation INFO: Sample: 69, validation pixAcc: 96.238, mIoU: 69.577, time: 0.019s 71 | 2019-11-15 19:52:51,886 semantic_segmentation INFO: Sample: 70, validation pixAcc: 96.230, mIoU: 69.373, time: 0.019s 72 | 2019-11-15 19:52:53,181 semantic_segmentation INFO: Sample: 71, validation pixAcc: 96.201, mIoU: 69.436, time: 0.019s 73 | 2019-11-15 19:52:54,518 semantic_segmentation INFO: Sample: 72, validation pixAcc: 96.203, mIoU: 69.441, time: 0.019s 74 | 2019-11-15 19:52:55,807 semantic_segmentation INFO: Sample: 73, validation pixAcc: 96.173, mIoU: 69.530, time: 0.019s 75 | 2019-11-15 19:52:57,115 semantic_segmentation INFO: Sample: 74, validation pixAcc: 96.183, mIoU: 69.727, time: 0.019s 76 | 2019-11-15 19:52:58,400 semantic_segmentation INFO: Sample: 75, validation pixAcc: 96.158, mIoU: 69.884, time: 0.019s 77 | 2019-11-15 19:52:59,637 semantic_segmentation INFO: Sample: 76, validation pixAcc: 96.139, mIoU: 69.742, time: 0.020s 78 | 2019-11-15 19:53:00,835 semantic_segmentation INFO: Sample: 77, validation pixAcc: 96.162, mIoU: 69.758, time: 0.019s 79 | 2019-11-15 19:53:02,184 semantic_segmentation INFO: Sample: 78, validation pixAcc: 96.153, mIoU: 69.965, time: 0.019s 80 | 2019-11-15 19:53:03,543 semantic_segmentation INFO: Sample: 79, validation pixAcc: 96.172, mIoU: 69.959, time: 0.019s 81 | 2019-11-15 19:53:04,854 semantic_segmentation INFO: Sample: 80, validation pixAcc: 96.189, mIoU: 70.018, time: 0.019s 82 | 2019-11-15 19:53:06,168 semantic_segmentation INFO: Sample: 81, validation pixAcc: 96.191, mIoU: 70.311, time: 0.020s 83 | 2019-11-15 19:53:07,492 semantic_segmentation INFO: Sample: 82, validation pixAcc: 96.222, mIoU: 70.281, time: 0.019s 84 | 2019-11-15 19:53:08,805 semantic_segmentation INFO: Sample: 83, validation pixAcc: 96.236, mIoU: 70.295, time: 0.019s 85 | 2019-11-15 19:53:10,174 semantic_segmentation INFO: Sample: 84, validation pixAcc: 96.256, mIoU: 70.298, time: 0.020s 86 | 2019-11-15 19:53:11,522 semantic_segmentation INFO: Sample: 85, validation pixAcc: 96.248, mIoU: 70.307, time: 0.020s 87 | 2019-11-15 19:53:12,841 semantic_segmentation INFO: Sample: 86, validation pixAcc: 96.263, mIoU: 70.380, time: 0.019s 88 | 2019-11-15 19:53:14,163 semantic_segmentation INFO: Sample: 87, validation pixAcc: 96.252, mIoU: 70.429, time: 0.019s 89 | 2019-11-15 19:53:15,455 semantic_segmentation INFO: Sample: 88, validation pixAcc: 96.243, mIoU: 70.329, time: 0.019s 90 | 2019-11-15 19:53:16,609 semantic_segmentation INFO: Sample: 89, validation pixAcc: 96.261, mIoU: 70.350, time: 0.020s 91 | 2019-11-15 19:53:17,930 semantic_segmentation INFO: Sample: 90, validation pixAcc: 96.277, mIoU: 70.433, time: 0.019s 92 | 2019-11-15 19:53:19,294 semantic_segmentation INFO: Sample: 91, validation pixAcc: 96.274, mIoU: 70.440, time: 0.019s 93 | 2019-11-15 19:53:20,645 semantic_segmentation INFO: Sample: 92, validation pixAcc: 96.249, mIoU: 70.613, time: 0.020s 94 | 2019-11-15 19:53:21,937 semantic_segmentation INFO: Sample: 93, validation pixAcc: 96.254, mIoU: 70.625, time: 0.019s 95 | 2019-11-15 19:53:23,244 semantic_segmentation INFO: Sample: 94, validation pixAcc: 96.257, mIoU: 70.589, time: 0.019s 96 | 2019-11-15 19:53:24,591 semantic_segmentation INFO: Sample: 95, validation pixAcc: 96.248, mIoU: 70.479, time: 0.019s 97 | 2019-11-15 19:53:25,950 semantic_segmentation INFO: Sample: 96, validation pixAcc: 96.267, mIoU: 70.478, time: 0.019s 98 | 2019-11-15 19:53:27,251 semantic_segmentation INFO: Sample: 97, validation pixAcc: 96.273, mIoU: 70.460, time: 0.019s 99 | 2019-11-15 19:53:28,520 semantic_segmentation INFO: Sample: 98, validation pixAcc: 96.285, mIoU: 70.437, time: 0.019s 100 | 2019-11-15 19:53:29,847 semantic_segmentation INFO: Sample: 99, validation pixAcc: 96.301, mIoU: 70.525, time: 0.019s 101 | 2019-11-15 19:53:31,233 semantic_segmentation INFO: Sample: 100, validation pixAcc: 96.307, mIoU: 70.583, time: 0.019s 102 | 2019-11-15 19:53:32,542 semantic_segmentation INFO: Sample: 101, validation pixAcc: 96.318, mIoU: 70.567, time: 0.019s 103 | 2019-11-15 19:53:33,821 semantic_segmentation INFO: Sample: 102, validation pixAcc: 96.323, mIoU: 70.543, time: 0.019s 104 | 2019-11-15 19:53:35,109 semantic_segmentation INFO: Sample: 103, validation pixAcc: 96.338, mIoU: 70.538, time: 0.019s 105 | 2019-11-15 19:53:36,406 semantic_segmentation INFO: Sample: 104, validation pixAcc: 96.342, mIoU: 70.553, time: 0.019s 106 | 2019-11-15 19:53:37,697 semantic_segmentation INFO: Sample: 105, validation pixAcc: 96.349, mIoU: 70.536, time: 0.019s 107 | 2019-11-15 19:53:38,963 semantic_segmentation INFO: Sample: 106, validation pixAcc: 96.348, mIoU: 70.526, time: 0.019s 108 | 2019-11-15 19:53:40,261 semantic_segmentation INFO: Sample: 107, validation pixAcc: 96.343, mIoU: 70.420, time: 0.019s 109 | 2019-11-15 19:53:41,527 semantic_segmentation INFO: Sample: 108, validation pixAcc: 96.327, mIoU: 70.386, time: 0.019s 110 | 2019-11-15 19:53:42,847 semantic_segmentation INFO: Sample: 109, validation pixAcc: 96.327, mIoU: 70.340, time: 0.019s 111 | 2019-11-15 19:53:44,105 semantic_segmentation INFO: Sample: 110, validation pixAcc: 96.331, mIoU: 70.327, time: 0.019s 112 | 2019-11-15 19:53:45,331 semantic_segmentation INFO: Sample: 111, validation pixAcc: 96.337, mIoU: 70.308, time: 0.019s 113 | 2019-11-15 19:53:46,575 semantic_segmentation INFO: Sample: 112, validation pixAcc: 96.337, mIoU: 70.255, time: 0.020s 114 | 2019-11-15 19:53:47,877 semantic_segmentation INFO: Sample: 113, validation pixAcc: 96.347, mIoU: 70.257, time: 0.020s 115 | 2019-11-15 19:53:49,165 semantic_segmentation INFO: Sample: 114, validation pixAcc: 96.359, mIoU: 70.311, time: 0.019s 116 | 2019-11-15 19:53:50,500 semantic_segmentation INFO: Sample: 115, validation pixAcc: 96.361, mIoU: 70.485, time: 0.019s 117 | 2019-11-15 19:53:51,770 semantic_segmentation INFO: Sample: 116, validation pixAcc: 96.377, mIoU: 70.504, time: 0.019s 118 | 2019-11-15 19:53:53,106 semantic_segmentation INFO: Sample: 117, validation pixAcc: 96.366, mIoU: 70.457, time: 0.019s 119 | 2019-11-15 19:53:54,425 semantic_segmentation INFO: Sample: 118, validation pixAcc: 96.363, mIoU: 70.394, time: 0.019s 120 | 2019-11-15 19:53:55,748 semantic_segmentation INFO: Sample: 119, validation pixAcc: 96.376, mIoU: 70.400, time: 0.019s 121 | 2019-11-15 19:53:57,034 semantic_segmentation INFO: Sample: 120, validation pixAcc: 96.382, mIoU: 70.407, time: 0.019s 122 | 2019-11-15 19:53:58,281 semantic_segmentation INFO: Sample: 121, validation pixAcc: 96.380, mIoU: 70.372, time: 0.019s 123 | 2019-11-15 19:53:59,565 semantic_segmentation INFO: Sample: 122, validation pixAcc: 96.389, mIoU: 70.360, time: 0.020s 124 | 2019-11-15 19:54:00,867 semantic_segmentation INFO: Sample: 123, validation pixAcc: 96.377, mIoU: 70.322, time: 0.019s 125 | 2019-11-15 19:54:02,178 semantic_segmentation INFO: Sample: 124, validation pixAcc: 96.357, mIoU: 70.266, time: 0.020s 126 | 2019-11-15 19:54:03,489 semantic_segmentation INFO: Sample: 125, validation pixAcc: 96.369, mIoU: 70.358, time: 0.019s 127 | 2019-11-15 19:54:04,797 semantic_segmentation INFO: Sample: 126, validation pixAcc: 96.364, mIoU: 70.330, time: 0.019s 128 | 2019-11-15 19:54:06,110 semantic_segmentation INFO: Sample: 127, validation pixAcc: 96.370, mIoU: 70.320, time: 0.019s 129 | 2019-11-15 19:54:07,403 semantic_segmentation INFO: Sample: 128, validation pixAcc: 96.336, mIoU: 70.299, time: 0.019s 130 | 2019-11-15 19:54:08,764 semantic_segmentation INFO: Sample: 129, validation pixAcc: 96.335, mIoU: 70.290, time: 0.019s 131 | 2019-11-15 19:54:10,097 semantic_segmentation INFO: Sample: 130, validation pixAcc: 96.339, mIoU: 70.234, time: 0.019s 132 | 2019-11-15 19:54:11,372 semantic_segmentation INFO: Sample: 131, validation pixAcc: 96.320, mIoU: 70.067, time: 0.019s 133 | 2019-11-15 19:54:12,674 semantic_segmentation INFO: Sample: 132, validation pixAcc: 96.320, mIoU: 70.054, time: 0.019s 134 | 2019-11-15 19:54:13,967 semantic_segmentation INFO: Sample: 133, validation pixAcc: 96.324, mIoU: 70.091, time: 0.019s 135 | 2019-11-15 19:54:15,294 semantic_segmentation INFO: Sample: 134, validation pixAcc: 96.332, mIoU: 70.048, time: 0.019s 136 | 2019-11-15 19:54:16,545 semantic_segmentation INFO: Sample: 135, validation pixAcc: 96.326, mIoU: 69.976, time: 0.019s 137 | 2019-11-15 19:54:17,886 semantic_segmentation INFO: Sample: 136, validation pixAcc: 96.330, mIoU: 69.986, time: 0.019s 138 | 2019-11-15 19:54:19,246 semantic_segmentation INFO: Sample: 137, validation pixAcc: 96.346, mIoU: 69.991, time: 0.019s 139 | 2019-11-15 19:54:20,506 semantic_segmentation INFO: Sample: 138, validation pixAcc: 96.348, mIoU: 69.994, time: 0.019s 140 | 2019-11-15 19:54:21,797 semantic_segmentation INFO: Sample: 139, validation pixAcc: 96.340, mIoU: 69.780, time: 0.019s 141 | 2019-11-15 19:54:23,130 semantic_segmentation INFO: Sample: 140, validation pixAcc: 96.347, mIoU: 69.760, time: 0.020s 142 | 2019-11-15 19:54:24,515 semantic_segmentation INFO: Sample: 141, validation pixAcc: 96.320, mIoU: 69.747, time: 0.019s 143 | 2019-11-15 19:54:25,931 semantic_segmentation INFO: Sample: 142, validation pixAcc: 96.319, mIoU: 69.788, time: 0.019s 144 | 2019-11-15 19:54:27,252 semantic_segmentation INFO: Sample: 143, validation pixAcc: 96.322, mIoU: 69.811, time: 0.020s 145 | 2019-11-15 19:54:28,476 semantic_segmentation INFO: Sample: 144, validation pixAcc: 96.327, mIoU: 69.702, time: 0.019s 146 | 2019-11-15 19:54:29,762 semantic_segmentation INFO: Sample: 145, validation pixAcc: 96.315, mIoU: 69.818, time: 0.019s 147 | 2019-11-15 19:54:31,094 semantic_segmentation INFO: Sample: 146, validation pixAcc: 96.313, mIoU: 69.803, time: 0.019s 148 | 2019-11-15 19:54:32,449 semantic_segmentation INFO: Sample: 147, validation pixAcc: 96.261, mIoU: 69.770, time: 0.019s 149 | 2019-11-15 19:54:33,774 semantic_segmentation INFO: Sample: 148, validation pixAcc: 96.264, mIoU: 69.841, time: 0.019s 150 | 2019-11-15 19:54:35,070 semantic_segmentation INFO: Sample: 149, validation pixAcc: 96.268, mIoU: 69.831, time: 0.019s 151 | 2019-11-15 19:54:36,452 semantic_segmentation INFO: Sample: 150, validation pixAcc: 96.277, mIoU: 69.900, time: 0.019s 152 | 2019-11-15 19:54:37,707 semantic_segmentation INFO: Sample: 151, validation pixAcc: 96.271, mIoU: 69.750, time: 0.019s 153 | 2019-11-15 19:54:38,988 semantic_segmentation INFO: Sample: 152, validation pixAcc: 96.263, mIoU: 69.993, time: 0.019s 154 | 2019-11-15 19:54:40,339 semantic_segmentation INFO: Sample: 153, validation pixAcc: 96.266, mIoU: 69.901, time: 0.019s 155 | 2019-11-15 19:54:41,666 semantic_segmentation INFO: Sample: 154, validation pixAcc: 96.230, mIoU: 69.566, time: 0.019s 156 | 2019-11-15 19:54:42,981 semantic_segmentation INFO: Sample: 155, validation pixAcc: 96.227, mIoU: 69.588, time: 0.019s 157 | 2019-11-15 19:54:44,255 semantic_segmentation INFO: Sample: 156, validation pixAcc: 96.223, mIoU: 69.584, time: 0.019s 158 | 2019-11-15 19:54:45,618 semantic_segmentation INFO: Sample: 157, validation pixAcc: 96.210, mIoU: 69.564, time: 0.019s 159 | 2019-11-15 19:54:46,967 semantic_segmentation INFO: Sample: 158, validation pixAcc: 96.207, mIoU: 69.595, time: 0.019s 160 | 2019-11-15 19:54:48,337 semantic_segmentation INFO: Sample: 159, validation pixAcc: 96.206, mIoU: 69.592, time: 0.019s 161 | 2019-11-15 19:54:49,696 semantic_segmentation INFO: Sample: 160, validation pixAcc: 96.205, mIoU: 69.598, time: 0.019s 162 | 2019-11-15 19:54:50,998 semantic_segmentation INFO: Sample: 161, validation pixAcc: 96.210, mIoU: 69.607, time: 0.020s 163 | 2019-11-15 19:54:52,280 semantic_segmentation INFO: Sample: 162, validation pixAcc: 96.208, mIoU: 69.565, time: 0.019s 164 | 2019-11-15 19:54:53,589 semantic_segmentation INFO: Sample: 163, validation pixAcc: 96.210, mIoU: 69.647, time: 0.019s 165 | 2019-11-15 19:54:54,872 semantic_segmentation INFO: Sample: 164, validation pixAcc: 96.223, mIoU: 69.637, time: 0.019s 166 | 2019-11-15 19:54:56,238 semantic_segmentation INFO: Sample: 165, validation pixAcc: 96.204, mIoU: 69.724, time: 0.019s 167 | 2019-11-15 19:54:57,629 semantic_segmentation INFO: Sample: 166, validation pixAcc: 96.205, mIoU: 69.733, time: 0.020s 168 | 2019-11-15 19:54:58,978 semantic_segmentation INFO: Sample: 167, validation pixAcc: 96.202, mIoU: 69.715, time: 0.020s 169 | 2019-11-15 19:55:00,308 semantic_segmentation INFO: Sample: 168, validation pixAcc: 96.207, mIoU: 69.709, time: 0.020s 170 | 2019-11-15 19:55:01,707 semantic_segmentation INFO: Sample: 169, validation pixAcc: 96.216, mIoU: 69.711, time: 0.019s 171 | 2019-11-15 19:55:03,098 semantic_segmentation INFO: Sample: 170, validation pixAcc: 96.224, mIoU: 69.734, time: 0.019s 172 | 2019-11-15 19:55:04,411 semantic_segmentation INFO: Sample: 171, validation pixAcc: 96.225, mIoU: 69.701, time: 0.019s 173 | 2019-11-15 19:55:05,748 semantic_segmentation INFO: Sample: 172, validation pixAcc: 96.228, mIoU: 69.688, time: 0.019s 174 | 2019-11-15 19:55:07,078 semantic_segmentation INFO: Sample: 173, validation pixAcc: 96.216, mIoU: 69.596, time: 0.019s 175 | 2019-11-15 19:55:08,366 semantic_segmentation INFO: Sample: 174, validation pixAcc: 96.227, mIoU: 69.599, time: 0.019s 176 | 2019-11-15 19:55:09,689 semantic_segmentation INFO: Sample: 175, validation pixAcc: 96.222, mIoU: 69.698, time: 0.019s 177 | 2019-11-15 19:55:11,089 semantic_segmentation INFO: Sample: 176, validation pixAcc: 96.203, mIoU: 69.677, time: 0.020s 178 | 2019-11-15 19:55:12,545 semantic_segmentation INFO: Sample: 177, validation pixAcc: 96.170, mIoU: 69.615, time: 0.019s 179 | 2019-11-15 19:55:14,115 semantic_segmentation INFO: Sample: 178, validation pixAcc: 96.169, mIoU: 70.134, time: 0.019s 180 | 2019-11-15 19:55:15,700 semantic_segmentation INFO: Sample: 179, validation pixAcc: 96.149, mIoU: 70.055, time: 0.019s 181 | 2019-11-15 19:55:17,117 semantic_segmentation INFO: Sample: 180, validation pixAcc: 96.155, mIoU: 70.047, time: 0.019s 182 | 2019-11-15 19:55:18,484 semantic_segmentation INFO: Sample: 181, validation pixAcc: 96.154, mIoU: 70.075, time: 0.019s 183 | 2019-11-15 19:55:20,051 semantic_segmentation INFO: Sample: 182, validation pixAcc: 96.117, mIoU: 70.125, time: 0.020s 184 | 2019-11-15 19:55:21,367 semantic_segmentation INFO: Sample: 183, validation pixAcc: 96.092, mIoU: 69.996, time: 0.019s 185 | 2019-11-15 19:55:22,757 semantic_segmentation INFO: Sample: 184, validation pixAcc: 96.087, mIoU: 69.989, time: 0.019s 186 | 2019-11-15 19:55:24,180 semantic_segmentation INFO: Sample: 185, validation pixAcc: 96.088, mIoU: 69.969, time: 0.020s 187 | 2019-11-15 19:55:25,508 semantic_segmentation INFO: Sample: 186, validation pixAcc: 96.086, mIoU: 69.945, time: 0.019s 188 | 2019-11-15 19:55:26,905 semantic_segmentation INFO: Sample: 187, validation pixAcc: 96.098, mIoU: 69.950, time: 0.019s 189 | 2019-11-15 19:55:28,276 semantic_segmentation INFO: Sample: 188, validation pixAcc: 96.103, mIoU: 69.951, time: 0.019s 190 | 2019-11-15 19:55:29,677 semantic_segmentation INFO: Sample: 189, validation pixAcc: 96.111, mIoU: 70.711, time: 0.020s 191 | 2019-11-15 19:55:31,146 semantic_segmentation INFO: Sample: 190, validation pixAcc: 96.103, mIoU: 70.612, time: 0.019s 192 | 2019-11-15 19:55:32,483 semantic_segmentation INFO: Sample: 191, validation pixAcc: 96.098, mIoU: 70.601, time: 0.019s 193 | 2019-11-15 19:55:33,836 semantic_segmentation INFO: Sample: 192, validation pixAcc: 96.091, mIoU: 70.592, time: 0.019s 194 | 2019-11-15 19:55:35,254 semantic_segmentation INFO: Sample: 193, validation pixAcc: 96.069, mIoU: 70.548, time: 0.019s 195 | 2019-11-15 19:55:36,777 semantic_segmentation INFO: Sample: 194, validation pixAcc: 96.081, mIoU: 70.532, time: 0.019s 196 | 2019-11-15 19:55:38,128 semantic_segmentation INFO: Sample: 195, validation pixAcc: 96.089, mIoU: 70.532, time: 0.020s 197 | 2019-11-15 19:55:39,544 semantic_segmentation INFO: Sample: 196, validation pixAcc: 96.087, mIoU: 70.535, time: 0.019s 198 | 2019-11-15 19:55:40,969 semantic_segmentation INFO: Sample: 197, validation pixAcc: 96.084, mIoU: 70.499, time: 0.019s 199 | 2019-11-15 19:55:42,394 semantic_segmentation INFO: Sample: 198, validation pixAcc: 96.075, mIoU: 70.446, time: 0.020s 200 | 2019-11-15 19:55:43,665 semantic_segmentation INFO: Sample: 199, validation pixAcc: 96.067, mIoU: 70.734, time: 0.019s 201 | 2019-11-15 19:55:44,876 semantic_segmentation INFO: Sample: 200, validation pixAcc: 96.060, mIoU: 70.999, time: 0.019s 202 | 2019-11-15 19:55:46,251 semantic_segmentation INFO: Sample: 201, validation pixAcc: 95.975, mIoU: 70.730, time: 0.019s 203 | 2019-11-15 19:55:47,650 semantic_segmentation INFO: Sample: 202, validation pixAcc: 95.973, mIoU: 70.695, time: 0.019s 204 | 2019-11-15 19:55:49,083 semantic_segmentation INFO: Sample: 203, validation pixAcc: 95.974, mIoU: 70.697, time: 0.019s 205 | 2019-11-15 19:55:50,522 semantic_segmentation INFO: Sample: 204, validation pixAcc: 95.974, mIoU: 70.726, time: 0.020s 206 | 2019-11-15 19:55:51,838 semantic_segmentation INFO: Sample: 205, validation pixAcc: 95.980, mIoU: 70.725, time: 0.019s 207 | 2019-11-15 19:55:53,190 semantic_segmentation INFO: Sample: 206, validation pixAcc: 95.982, mIoU: 70.730, time: 0.019s 208 | 2019-11-15 19:55:54,563 semantic_segmentation INFO: Sample: 207, validation pixAcc: 95.984, mIoU: 70.718, time: 0.019s 209 | 2019-11-15 19:55:55,948 semantic_segmentation INFO: Sample: 208, validation pixAcc: 95.987, mIoU: 70.710, time: 0.019s 210 | 2019-11-15 19:55:57,253 semantic_segmentation INFO: Sample: 209, validation pixAcc: 95.980, mIoU: 70.668, time: 0.019s 211 | 2019-11-15 19:55:58,596 semantic_segmentation INFO: Sample: 210, validation pixAcc: 95.959, mIoU: 70.339, time: 0.019s 212 | 2019-11-15 19:55:59,975 semantic_segmentation INFO: Sample: 211, validation pixAcc: 95.949, mIoU: 70.458, time: 0.019s 213 | 2019-11-15 19:56:01,445 semantic_segmentation INFO: Sample: 212, validation pixAcc: 95.931, mIoU: 70.488, time: 0.020s 214 | 2019-11-15 19:56:02,823 semantic_segmentation INFO: Sample: 213, validation pixAcc: 95.939, mIoU: 70.481, time: 0.019s 215 | 2019-11-15 19:56:04,178 semantic_segmentation INFO: Sample: 214, validation pixAcc: 95.926, mIoU: 70.412, time: 0.019s 216 | 2019-11-15 19:56:05,542 semantic_segmentation INFO: Sample: 215, validation pixAcc: 95.908, mIoU: 70.619, time: 0.019s 217 | 2019-11-15 19:56:07,101 semantic_segmentation INFO: Sample: 216, validation pixAcc: 95.903, mIoU: 70.594, time: 0.020s 218 | 2019-11-15 19:56:08,454 semantic_segmentation INFO: Sample: 217, validation pixAcc: 95.892, mIoU: 70.554, time: 0.019s 219 | 2019-11-15 19:56:09,796 semantic_segmentation INFO: Sample: 218, validation pixAcc: 95.862, mIoU: 70.802, time: 0.019s 220 | 2019-11-15 19:56:11,129 semantic_segmentation INFO: Sample: 219, validation pixAcc: 95.870, mIoU: 70.805, time: 0.019s 221 | 2019-11-15 19:56:12,396 semantic_segmentation INFO: Sample: 220, validation pixAcc: 95.857, mIoU: 70.710, time: 0.019s 222 | 2019-11-15 19:56:13,983 semantic_segmentation INFO: Sample: 221, validation pixAcc: 95.846, mIoU: 70.813, time: 0.019s 223 | 2019-11-15 19:56:15,425 semantic_segmentation INFO: Sample: 222, validation pixAcc: 95.847, mIoU: 70.769, time: 0.019s 224 | 2019-11-15 19:56:16,869 semantic_segmentation INFO: Sample: 223, validation pixAcc: 95.858, mIoU: 70.778, time: 0.020s 225 | 2019-11-15 19:56:18,329 semantic_segmentation INFO: Sample: 224, validation pixAcc: 95.842, mIoU: 70.739, time: 0.019s 226 | 2019-11-15 19:56:19,701 semantic_segmentation INFO: Sample: 225, validation pixAcc: 95.813, mIoU: 70.873, time: 0.019s 227 | 2019-11-15 19:56:21,226 semantic_segmentation INFO: Sample: 226, validation pixAcc: 95.818, mIoU: 70.858, time: 0.019s 228 | 2019-11-15 19:56:22,470 semantic_segmentation INFO: Sample: 227, validation pixAcc: 95.785, mIoU: 70.717, time: 0.019s 229 | 2019-11-15 19:56:23,857 semantic_segmentation INFO: Sample: 228, validation pixAcc: 95.767, mIoU: 70.626, time: 0.019s 230 | 2019-11-15 19:56:25,160 semantic_segmentation INFO: Sample: 229, validation pixAcc: 95.780, mIoU: 70.658, time: 0.019s 231 | 2019-11-15 19:56:26,454 semantic_segmentation INFO: Sample: 230, validation pixAcc: 95.751, mIoU: 70.546, time: 0.019s 232 | 2019-11-15 19:56:27,800 semantic_segmentation INFO: Sample: 231, validation pixAcc: 95.755, mIoU: 70.503, time: 0.019s 233 | 2019-11-15 19:56:29,098 semantic_segmentation INFO: Sample: 232, validation pixAcc: 95.744, mIoU: 70.497, time: 0.019s 234 | 2019-11-15 19:56:30,395 semantic_segmentation INFO: Sample: 233, validation pixAcc: 95.751, mIoU: 70.506, time: 0.019s 235 | 2019-11-15 19:56:31,709 semantic_segmentation INFO: Sample: 234, validation pixAcc: 95.741, mIoU: 73.590, time: 0.019s 236 | 2019-11-15 19:56:33,021 semantic_segmentation INFO: Sample: 235, validation pixAcc: 95.740, mIoU: 73.629, time: 0.019s 237 | 2019-11-15 19:56:34,361 semantic_segmentation INFO: Sample: 236, validation pixAcc: 95.740, mIoU: 73.605, time: 0.019s 238 | 2019-11-15 19:56:35,688 semantic_segmentation INFO: Sample: 237, validation pixAcc: 95.730, mIoU: 73.379, time: 0.019s 239 | 2019-11-15 19:56:37,021 semantic_segmentation INFO: Sample: 238, validation pixAcc: 95.732, mIoU: 73.369, time: 0.019s 240 | 2019-11-15 19:56:38,323 semantic_segmentation INFO: Sample: 239, validation pixAcc: 95.733, mIoU: 73.340, time: 0.019s 241 | 2019-11-15 19:56:39,601 semantic_segmentation INFO: Sample: 240, validation pixAcc: 95.720, mIoU: 73.283, time: 0.019s 242 | 2019-11-15 19:56:41,016 semantic_segmentation INFO: Sample: 241, validation pixAcc: 95.712, mIoU: 73.268, time: 0.019s 243 | 2019-11-15 19:56:42,403 semantic_segmentation INFO: Sample: 242, validation pixAcc: 95.711, mIoU: 73.768, time: 0.020s 244 | 2019-11-15 19:56:43,738 semantic_segmentation INFO: Sample: 243, validation pixAcc: 95.702, mIoU: 73.762, time: 0.019s 245 | 2019-11-15 19:56:45,030 semantic_segmentation INFO: Sample: 244, validation pixAcc: 95.700, mIoU: 74.197, time: 0.019s 246 | 2019-11-15 19:56:46,323 semantic_segmentation INFO: Sample: 245, validation pixAcc: 95.706, mIoU: 74.189, time: 0.019s 247 | 2019-11-15 19:56:47,619 semantic_segmentation INFO: Sample: 246, validation pixAcc: 95.705, mIoU: 74.229, time: 0.019s 248 | 2019-11-15 19:56:48,940 semantic_segmentation INFO: Sample: 247, validation pixAcc: 95.709, mIoU: 74.295, time: 0.019s 249 | 2019-11-15 19:56:50,166 semantic_segmentation INFO: Sample: 248, validation pixAcc: 95.714, mIoU: 74.282, time: 0.019s 250 | 2019-11-15 19:56:51,459 semantic_segmentation INFO: Sample: 249, validation pixAcc: 95.716, mIoU: 74.264, time: 0.019s 251 | 2019-11-15 19:56:52,695 semantic_segmentation INFO: Sample: 250, validation pixAcc: 95.713, mIoU: 74.565, time: 0.019s 252 | 2019-11-15 19:56:53,983 semantic_segmentation INFO: Sample: 251, validation pixAcc: 95.685, mIoU: 74.587, time: 0.019s 253 | 2019-11-15 19:56:55,291 semantic_segmentation INFO: Sample: 252, validation pixAcc: 95.685, mIoU: 74.571, time: 0.020s 254 | 2019-11-15 19:56:56,615 semantic_segmentation INFO: Sample: 253, validation pixAcc: 95.683, mIoU: 74.578, time: 0.019s 255 | 2019-11-15 19:56:57,937 semantic_segmentation INFO: Sample: 254, validation pixAcc: 95.612, mIoU: 73.020, time: 0.019s 256 | 2019-11-15 19:56:59,238 semantic_segmentation INFO: Sample: 255, validation pixAcc: 95.620, mIoU: 73.017, time: 0.019s 257 | 2019-11-15 19:57:00,493 semantic_segmentation INFO: Sample: 256, validation pixAcc: 95.624, mIoU: 73.007, time: 0.019s 258 | 2019-11-15 19:57:01,842 semantic_segmentation INFO: Sample: 257, validation pixAcc: 95.629, mIoU: 73.112, time: 0.019s 259 | 2019-11-15 19:57:03,109 semantic_segmentation INFO: Sample: 258, validation pixAcc: 95.593, mIoU: 72.916, time: 0.019s 260 | 2019-11-15 19:57:04,444 semantic_segmentation INFO: Sample: 259, validation pixAcc: 95.586, mIoU: 72.903, time: 0.019s 261 | 2019-11-15 19:57:05,778 semantic_segmentation INFO: Sample: 260, validation pixAcc: 95.575, mIoU: 72.855, time: 0.019s 262 | 2019-11-15 19:57:07,091 semantic_segmentation INFO: Sample: 261, validation pixAcc: 95.567, mIoU: 72.844, time: 0.019s 263 | 2019-11-15 19:57:08,394 semantic_segmentation INFO: Sample: 262, validation pixAcc: 95.572, mIoU: 72.801, time: 0.019s 264 | 2019-11-15 19:57:09,622 semantic_segmentation INFO: Sample: 263, validation pixAcc: 95.547, mIoU: 72.800, time: 0.019s 265 | 2019-11-15 19:57:10,875 semantic_segmentation INFO: Sample: 264, validation pixAcc: 95.540, mIoU: 72.862, time: 0.019s 266 | 2019-11-15 19:57:12,333 semantic_segmentation INFO: Sample: 265, validation pixAcc: 95.515, mIoU: 72.744, time: 0.020s 267 | 2019-11-15 19:57:13,635 semantic_segmentation INFO: Sample: 266, validation pixAcc: 95.522, mIoU: 72.748, time: 0.019s 268 | 2019-11-15 19:57:14,929 semantic_segmentation INFO: Sample: 267, validation pixAcc: 95.519, mIoU: 72.791, time: 0.019s 269 | 2019-11-15 19:57:16,251 semantic_segmentation INFO: Sample: 268, validation pixAcc: 95.499, mIoU: 72.749, time: 0.019s 270 | 2019-11-15 19:57:17,568 semantic_segmentation INFO: Sample: 269, validation pixAcc: 95.500, mIoU: 72.754, time: 0.019s 271 | 2019-11-15 19:57:18,901 semantic_segmentation INFO: Sample: 270, validation pixAcc: 95.490, mIoU: 72.718, time: 0.019s 272 | 2019-11-15 19:57:20,282 semantic_segmentation INFO: Sample: 271, validation pixAcc: 95.498, mIoU: 72.712, time: 0.019s 273 | 2019-11-15 19:57:21,668 semantic_segmentation INFO: Sample: 272, validation pixAcc: 95.440, mIoU: 72.539, time: 0.019s 274 | 2019-11-15 19:57:22,984 semantic_segmentation INFO: Sample: 273, validation pixAcc: 95.438, mIoU: 72.498, time: 0.019s 275 | 2019-11-15 19:57:24,344 semantic_segmentation INFO: Sample: 274, validation pixAcc: 95.427, mIoU: 72.737, time: 0.020s 276 | 2019-11-15 19:57:25,667 semantic_segmentation INFO: Sample: 275, validation pixAcc: 95.427, mIoU: 72.726, time: 0.019s 277 | 2019-11-15 19:57:26,957 semantic_segmentation INFO: Sample: 276, validation pixAcc: 95.413, mIoU: 72.615, time: 0.019s 278 | 2019-11-15 19:57:28,220 semantic_segmentation INFO: Sample: 277, validation pixAcc: 95.416, mIoU: 72.608, time: 0.019s 279 | 2019-11-15 19:57:29,524 semantic_segmentation INFO: Sample: 278, validation pixAcc: 95.406, mIoU: 72.547, time: 0.019s 280 | 2019-11-15 19:57:30,758 semantic_segmentation INFO: Sample: 279, validation pixAcc: 95.396, mIoU: 72.552, time: 0.019s 281 | 2019-11-15 19:57:32,008 semantic_segmentation INFO: Sample: 280, validation pixAcc: 95.383, mIoU: 72.511, time: 0.019s 282 | 2019-11-15 19:57:33,300 semantic_segmentation INFO: Sample: 281, validation pixAcc: 95.379, mIoU: 72.343, time: 0.019s 283 | 2019-11-15 19:57:34,664 semantic_segmentation INFO: Sample: 282, validation pixAcc: 95.373, mIoU: 72.305, time: 0.020s 284 | 2019-11-15 19:57:36,084 semantic_segmentation INFO: Sample: 283, validation pixAcc: 95.380, mIoU: 72.310, time: 0.019s 285 | 2019-11-15 19:57:37,412 semantic_segmentation INFO: Sample: 284, validation pixAcc: 95.375, mIoU: 72.290, time: 0.020s 286 | 2019-11-15 19:57:38,702 semantic_segmentation INFO: Sample: 285, validation pixAcc: 95.366, mIoU: 72.279, time: 0.019s 287 | 2019-11-15 19:57:39,961 semantic_segmentation INFO: Sample: 286, validation pixAcc: 95.367, mIoU: 72.289, time: 0.019s 288 | 2019-11-15 19:57:41,233 semantic_segmentation INFO: Sample: 287, validation pixAcc: 95.369, mIoU: 72.279, time: 0.019s 289 | 2019-11-15 19:57:42,577 semantic_segmentation INFO: Sample: 288, validation pixAcc: 95.364, mIoU: 72.247, time: 0.019s 290 | 2019-11-15 19:57:44,112 semantic_segmentation INFO: Sample: 289, validation pixAcc: 95.358, mIoU: 72.154, time: 0.019s 291 | 2019-11-15 19:57:45,482 semantic_segmentation INFO: Sample: 290, validation pixAcc: 95.359, mIoU: 72.165, time: 0.020s 292 | 2019-11-15 19:57:46,887 semantic_segmentation INFO: Sample: 291, validation pixAcc: 95.353, mIoU: 72.158, time: 0.019s 293 | 2019-11-15 19:57:48,260 semantic_segmentation INFO: Sample: 292, validation pixAcc: 95.357, mIoU: 72.235, time: 0.019s 294 | 2019-11-15 19:57:49,579 semantic_segmentation INFO: Sample: 293, validation pixAcc: 95.355, mIoU: 72.226, time: 0.019s 295 | 2019-11-15 19:57:50,890 semantic_segmentation INFO: Sample: 294, validation pixAcc: 95.354, mIoU: 72.220, time: 0.019s 296 | 2019-11-15 19:57:52,232 semantic_segmentation INFO: Sample: 295, validation pixAcc: 95.336, mIoU: 72.207, time: 0.019s 297 | 2019-11-15 19:57:53,490 semantic_segmentation INFO: Sample: 296, validation pixAcc: 95.339, mIoU: 72.203, time: 0.020s 298 | 2019-11-15 19:57:54,808 semantic_segmentation INFO: Sample: 297, validation pixAcc: 95.341, mIoU: 72.179, time: 0.019s 299 | 2019-11-15 19:57:56,157 semantic_segmentation INFO: Sample: 298, validation pixAcc: 95.343, mIoU: 72.251, time: 0.019s 300 | 2019-11-15 19:57:57,509 semantic_segmentation INFO: Sample: 299, validation pixAcc: 95.346, mIoU: 72.202, time: 0.019s 301 | 2019-11-15 19:57:58,826 semantic_segmentation INFO: Sample: 300, validation pixAcc: 95.337, mIoU: 72.202, time: 0.019s 302 | 2019-11-15 19:58:00,203 semantic_segmentation INFO: Sample: 301, validation pixAcc: 95.326, mIoU: 72.156, time: 0.019s 303 | 2019-11-15 19:58:01,553 semantic_segmentation INFO: Sample: 302, validation pixAcc: 95.330, mIoU: 72.164, time: 0.019s 304 | 2019-11-15 19:58:02,894 semantic_segmentation INFO: Sample: 303, validation pixAcc: 95.327, mIoU: 72.130, time: 0.019s 305 | 2019-11-15 19:58:04,266 semantic_segmentation INFO: Sample: 304, validation pixAcc: 95.326, mIoU: 72.123, time: 0.020s 306 | 2019-11-15 19:58:05,633 semantic_segmentation INFO: Sample: 305, validation pixAcc: 95.307, mIoU: 72.082, time: 0.019s 307 | 2019-11-15 19:58:07,024 semantic_segmentation INFO: Sample: 306, validation pixAcc: 95.315, mIoU: 72.083, time: 0.019s 308 | 2019-11-15 19:58:08,334 semantic_segmentation INFO: Sample: 307, validation pixAcc: 95.313, mIoU: 72.263, time: 0.019s 309 | 2019-11-15 19:58:09,673 semantic_segmentation INFO: Sample: 308, validation pixAcc: 95.315, mIoU: 72.290, time: 0.019s 310 | 2019-11-15 19:58:11,040 semantic_segmentation INFO: Sample: 309, validation pixAcc: 95.302, mIoU: 72.293, time: 0.018s 311 | 2019-11-15 19:58:12,404 semantic_segmentation INFO: Sample: 310, validation pixAcc: 95.299, mIoU: 72.243, time: 0.019s 312 | 2019-11-15 19:58:13,708 semantic_segmentation INFO: Sample: 311, validation pixAcc: 95.295, mIoU: 72.221, time: 0.019s 313 | 2019-11-15 19:58:14,986 semantic_segmentation INFO: Sample: 312, validation pixAcc: 95.293, mIoU: 72.198, time: 0.019s 314 | 2019-11-15 19:58:16,325 semantic_segmentation INFO: Sample: 313, validation pixAcc: 95.287, mIoU: 72.189, time: 0.019s 315 | 2019-11-15 19:58:17,645 semantic_segmentation INFO: Sample: 314, validation pixAcc: 95.269, mIoU: 72.120, time: 0.019s 316 | 2019-11-15 19:58:18,901 semantic_segmentation INFO: Sample: 315, validation pixAcc: 95.267, mIoU: 72.085, time: 0.019s 317 | 2019-11-15 19:58:20,230 semantic_segmentation INFO: Sample: 316, validation pixAcc: 95.264, mIoU: 72.353, time: 0.019s 318 | 2019-11-15 19:58:21,504 semantic_segmentation INFO: Sample: 317, validation pixAcc: 95.264, mIoU: 72.360, time: 0.019s 319 | 2019-11-15 19:58:22,786 semantic_segmentation INFO: Sample: 318, validation pixAcc: 95.255, mIoU: 72.363, time: 0.018s 320 | 2019-11-15 19:58:24,025 semantic_segmentation INFO: Sample: 319, validation pixAcc: 95.244, mIoU: 72.428, time: 0.019s 321 | 2019-11-15 19:58:25,388 semantic_segmentation INFO: Sample: 320, validation pixAcc: 95.245, mIoU: 72.400, time: 0.019s 322 | 2019-11-15 19:58:26,699 semantic_segmentation INFO: Sample: 321, validation pixAcc: 95.247, mIoU: 72.437, time: 0.019s 323 | 2019-11-15 19:58:28,073 semantic_segmentation INFO: Sample: 322, validation pixAcc: 95.239, mIoU: 72.483, time: 0.019s 324 | 2019-11-15 19:58:29,461 semantic_segmentation INFO: Sample: 323, validation pixAcc: 95.240, mIoU: 72.540, time: 0.019s 325 | 2019-11-15 19:58:30,848 semantic_segmentation INFO: Sample: 324, validation pixAcc: 95.229, mIoU: 72.472, time: 0.019s 326 | 2019-11-15 19:58:32,192 semantic_segmentation INFO: Sample: 325, validation pixAcc: 95.187, mIoU: 72.425, time: 0.019s 327 | 2019-11-15 19:58:33,494 semantic_segmentation INFO: Sample: 326, validation pixAcc: 95.186, mIoU: 72.452, time: 0.019s 328 | 2019-11-15 19:58:34,813 semantic_segmentation INFO: Sample: 327, validation pixAcc: 95.154, mIoU: 72.343, time: 0.019s 329 | 2019-11-15 19:58:36,102 semantic_segmentation INFO: Sample: 328, validation pixAcc: 95.157, mIoU: 72.331, time: 0.019s 330 | 2019-11-15 19:58:37,438 semantic_segmentation INFO: Sample: 329, validation pixAcc: 95.163, mIoU: 72.339, time: 0.019s 331 | 2019-11-15 19:58:38,688 semantic_segmentation INFO: Sample: 330, validation pixAcc: 95.162, mIoU: 72.346, time: 0.019s 332 | 2019-11-15 19:58:40,002 semantic_segmentation INFO: Sample: 331, validation pixAcc: 95.163, mIoU: 72.344, time: 0.019s 333 | 2019-11-15 19:58:41,265 semantic_segmentation INFO: Sample: 332, validation pixAcc: 95.156, mIoU: 72.352, time: 0.019s 334 | 2019-11-15 19:58:42,618 semantic_segmentation INFO: Sample: 333, validation pixAcc: 95.103, mIoU: 72.316, time: 0.019s 335 | 2019-11-15 19:58:43,905 semantic_segmentation INFO: Sample: 334, validation pixAcc: 95.107, mIoU: 72.333, time: 0.019s 336 | 2019-11-15 19:58:45,225 semantic_segmentation INFO: Sample: 335, validation pixAcc: 95.108, mIoU: 72.335, time: 0.019s 337 | 2019-11-15 19:58:46,568 semantic_segmentation INFO: Sample: 336, validation pixAcc: 95.103, mIoU: 72.300, time: 0.019s 338 | 2019-11-15 19:58:47,928 semantic_segmentation INFO: Sample: 337, validation pixAcc: 95.085, mIoU: 72.269, time: 0.020s 339 | 2019-11-15 19:58:49,219 semantic_segmentation INFO: Sample: 338, validation pixAcc: 95.086, mIoU: 72.271, time: 0.018s 340 | 2019-11-15 19:58:50,469 semantic_segmentation INFO: Sample: 339, validation pixAcc: 95.090, mIoU: 72.325, time: 0.019s 341 | 2019-11-15 19:58:51,757 semantic_segmentation INFO: Sample: 340, validation pixAcc: 95.092, mIoU: 72.325, time: 0.019s 342 | 2019-11-15 19:58:53,111 semantic_segmentation INFO: Sample: 341, validation pixAcc: 95.094, mIoU: 72.286, time: 0.018s 343 | 2019-11-15 19:58:54,405 semantic_segmentation INFO: Sample: 342, validation pixAcc: 95.099, mIoU: 72.323, time: 0.019s 344 | 2019-11-15 19:58:55,743 semantic_segmentation INFO: Sample: 343, validation pixAcc: 95.101, mIoU: 72.613, time: 0.019s 345 | 2019-11-15 19:58:56,871 semantic_segmentation INFO: Sample: 344, validation pixAcc: 95.098, mIoU: 72.880, time: 0.019s 346 | 2019-11-15 19:58:58,216 semantic_segmentation INFO: Sample: 345, validation pixAcc: 95.103, mIoU: 72.899, time: 0.024s 347 | 2019-11-15 19:58:59,651 semantic_segmentation INFO: Sample: 346, validation pixAcc: 95.094, mIoU: 72.914, time: 0.019s 348 | 2019-11-15 19:59:00,975 semantic_segmentation INFO: Sample: 347, validation pixAcc: 95.084, mIoU: 72.872, time: 0.019s 349 | 2019-11-15 19:59:02,340 semantic_segmentation INFO: Sample: 348, validation pixAcc: 95.086, mIoU: 72.854, time: 0.018s 350 | 2019-11-15 19:59:03,612 semantic_segmentation INFO: Sample: 349, validation pixAcc: 95.079, mIoU: 72.853, time: 0.018s 351 | 2019-11-15 19:59:04,823 semantic_segmentation INFO: Sample: 350, validation pixAcc: 95.086, mIoU: 72.849, time: 0.018s 352 | 2019-11-15 19:59:06,072 semantic_segmentation INFO: Sample: 351, validation pixAcc: 95.075, mIoU: 72.863, time: 0.018s 353 | 2019-11-15 19:59:07,429 semantic_segmentation INFO: Sample: 352, validation pixAcc: 95.071, mIoU: 72.836, time: 0.018s 354 | 2019-11-15 19:59:08,697 semantic_segmentation INFO: Sample: 353, validation pixAcc: 95.073, mIoU: 72.828, time: 0.019s 355 | 2019-11-15 19:59:10,050 semantic_segmentation INFO: Sample: 354, validation pixAcc: 95.078, mIoU: 72.831, time: 0.019s 356 | 2019-11-15 19:59:11,406 semantic_segmentation INFO: Sample: 355, validation pixAcc: 95.083, mIoU: 72.828, time: 0.018s 357 | 2019-11-15 19:59:12,730 semantic_segmentation INFO: Sample: 356, validation pixAcc: 95.083, mIoU: 72.832, time: 0.018s 358 | 2019-11-15 19:59:14,001 semantic_segmentation INFO: Sample: 357, validation pixAcc: 95.071, mIoU: 72.851, time: 0.019s 359 | 2019-11-15 19:59:15,317 semantic_segmentation INFO: Sample: 358, validation pixAcc: 95.073, mIoU: 72.840, time: 0.019s 360 | 2019-11-15 19:59:16,644 semantic_segmentation INFO: Sample: 359, validation pixAcc: 95.073, mIoU: 72.842, time: 0.019s 361 | 2019-11-15 19:59:17,940 semantic_segmentation INFO: Sample: 360, validation pixAcc: 95.076, mIoU: 72.838, time: 0.018s 362 | 2019-11-15 19:59:19,233 semantic_segmentation INFO: Sample: 361, validation pixAcc: 95.074, mIoU: 72.827, time: 0.018s 363 | 2019-11-15 19:59:20,502 semantic_segmentation INFO: Sample: 362, validation pixAcc: 95.070, mIoU: 72.834, time: 0.018s 364 | 2019-11-15 19:59:21,782 semantic_segmentation INFO: Sample: 363, validation pixAcc: 95.068, mIoU: 72.784, time: 0.018s 365 | 2019-11-15 19:59:23,104 semantic_segmentation INFO: Sample: 364, validation pixAcc: 95.070, mIoU: 72.836, time: 0.018s 366 | 2019-11-15 19:59:24,418 semantic_segmentation INFO: Sample: 365, validation pixAcc: 95.062, mIoU: 72.789, time: 0.019s 367 | 2019-11-15 19:59:25,802 semantic_segmentation INFO: Sample: 366, validation pixAcc: 95.059, mIoU: 72.775, time: 0.018s 368 | 2019-11-15 19:59:27,103 semantic_segmentation INFO: Sample: 367, validation pixAcc: 95.058, mIoU: 72.758, time: 0.019s 369 | 2019-11-15 19:59:28,418 semantic_segmentation INFO: Sample: 368, validation pixAcc: 95.057, mIoU: 72.773, time: 0.018s 370 | 2019-11-15 19:59:29,802 semantic_segmentation INFO: Sample: 369, validation pixAcc: 95.062, mIoU: 72.784, time: 0.018s 371 | 2019-11-15 19:59:30,993 semantic_segmentation INFO: Sample: 370, validation pixAcc: 95.066, mIoU: 72.780, time: 0.018s 372 | 2019-11-15 19:59:32,294 semantic_segmentation INFO: Sample: 371, validation pixAcc: 95.064, mIoU: 72.770, time: 0.018s 373 | 2019-11-15 19:59:33,648 semantic_segmentation INFO: Sample: 372, validation pixAcc: 95.059, mIoU: 72.861, time: 0.018s 374 | 2019-11-15 19:59:35,001 semantic_segmentation INFO: Sample: 373, validation pixAcc: 95.040, mIoU: 72.812, time: 0.019s 375 | 2019-11-15 19:59:36,374 semantic_segmentation INFO: Sample: 374, validation pixAcc: 95.044, mIoU: 72.806, time: 0.018s 376 | 2019-11-15 19:59:37,748 semantic_segmentation INFO: Sample: 375, validation pixAcc: 95.037, mIoU: 72.836, time: 0.019s 377 | 2019-11-15 19:59:39,089 semantic_segmentation INFO: Sample: 376, validation pixAcc: 95.038, mIoU: 72.813, time: 0.018s 378 | 2019-11-15 19:59:40,430 semantic_segmentation INFO: Sample: 377, validation pixAcc: 95.026, mIoU: 72.830, time: 0.018s 379 | 2019-11-15 19:59:41,760 semantic_segmentation INFO: Sample: 378, validation pixAcc: 95.024, mIoU: 72.813, time: 0.019s 380 | 2019-11-15 19:59:43,125 semantic_segmentation INFO: Sample: 379, validation pixAcc: 95.019, mIoU: 72.835, time: 0.018s 381 | 2019-11-15 19:59:44,435 semantic_segmentation INFO: Sample: 380, validation pixAcc: 95.023, mIoU: 72.834, time: 0.018s 382 | 2019-11-15 19:59:45,789 semantic_segmentation INFO: Sample: 381, validation pixAcc: 95.021, mIoU: 72.826, time: 0.018s 383 | 2019-11-15 19:59:47,171 semantic_segmentation INFO: Sample: 382, validation pixAcc: 95.014, mIoU: 72.789, time: 0.018s 384 | 2019-11-15 19:59:48,464 semantic_segmentation INFO: Sample: 383, validation pixAcc: 95.008, mIoU: 72.789, time: 0.018s 385 | 2019-11-15 19:59:49,787 semantic_segmentation INFO: Sample: 384, validation pixAcc: 95.011, mIoU: 72.792, time: 0.019s 386 | 2019-11-15 19:59:51,126 semantic_segmentation INFO: Sample: 385, validation pixAcc: 95.015, mIoU: 72.792, time: 0.018s 387 | 2019-11-15 19:59:52,404 semantic_segmentation INFO: Sample: 386, validation pixAcc: 95.018, mIoU: 72.777, time: 0.018s 388 | 2019-11-15 19:59:53,663 semantic_segmentation INFO: Sample: 387, validation pixAcc: 95.025, mIoU: 72.772, time: 0.018s 389 | 2019-11-15 19:59:54,952 semantic_segmentation INFO: Sample: 388, validation pixAcc: 95.024, mIoU: 72.736, time: 0.018s 390 | 2019-11-15 19:59:56,271 semantic_segmentation INFO: Sample: 389, validation pixAcc: 95.028, mIoU: 72.744, time: 0.018s 391 | 2019-11-15 19:59:57,630 semantic_segmentation INFO: Sample: 390, validation pixAcc: 95.028, mIoU: 72.767, time: 0.018s 392 | 2019-11-15 19:59:58,974 semantic_segmentation INFO: Sample: 391, validation pixAcc: 95.026, mIoU: 72.784, time: 0.018s 393 | 2019-11-15 20:00:00,296 semantic_segmentation INFO: Sample: 392, validation pixAcc: 95.028, mIoU: 72.772, time: 0.018s 394 | 2019-11-15 20:00:01,547 semantic_segmentation INFO: Sample: 393, validation pixAcc: 95.028, mIoU: 72.764, time: 0.019s 395 | 2019-11-15 20:00:02,845 semantic_segmentation INFO: Sample: 394, validation pixAcc: 95.031, mIoU: 72.750, time: 0.019s 396 | 2019-11-15 20:00:04,070 semantic_segmentation INFO: Sample: 395, validation pixAcc: 95.035, mIoU: 72.794, time: 0.019s 397 | 2019-11-15 20:00:05,400 semantic_segmentation INFO: Sample: 396, validation pixAcc: 95.035, mIoU: 72.786, time: 0.018s 398 | 2019-11-15 20:00:06,750 semantic_segmentation INFO: Sample: 397, validation pixAcc: 95.031, mIoU: 72.745, time: 0.019s 399 | 2019-11-15 20:00:08,119 semantic_segmentation INFO: Sample: 398, validation pixAcc: 95.029, mIoU: 72.737, time: 0.019s 400 | 2019-11-15 20:00:09,482 semantic_segmentation INFO: Sample: 399, validation pixAcc: 95.035, mIoU: 72.738, time: 0.018s 401 | 2019-11-15 20:00:10,762 semantic_segmentation INFO: Sample: 400, validation pixAcc: 95.032, mIoU: 72.711, time: 0.019s 402 | 2019-11-15 20:00:12,145 semantic_segmentation INFO: Sample: 401, validation pixAcc: 95.028, mIoU: 72.708, time: 0.019s 403 | 2019-11-15 20:00:13,460 semantic_segmentation INFO: Sample: 402, validation pixAcc: 95.026, mIoU: 72.702, time: 0.018s 404 | 2019-11-15 20:00:14,794 semantic_segmentation INFO: Sample: 403, validation pixAcc: 95.021, mIoU: 72.704, time: 0.018s 405 | 2019-11-15 20:00:16,121 semantic_segmentation INFO: Sample: 404, validation pixAcc: 95.016, mIoU: 72.708, time: 0.019s 406 | 2019-11-15 20:00:17,363 semantic_segmentation INFO: Sample: 405, validation pixAcc: 95.019, mIoU: 72.754, time: 0.018s 407 | 2019-11-15 20:00:18,655 semantic_segmentation INFO: Sample: 406, validation pixAcc: 95.018, mIoU: 72.813, time: 0.018s 408 | 2019-11-15 20:00:19,961 semantic_segmentation INFO: Sample: 407, validation pixAcc: 95.013, mIoU: 72.799, time: 0.018s 409 | 2019-11-15 20:00:21,270 semantic_segmentation INFO: Sample: 408, validation pixAcc: 95.012, mIoU: 72.801, time: 0.019s 410 | 2019-11-15 20:00:22,590 semantic_segmentation INFO: Sample: 409, validation pixAcc: 95.003, mIoU: 72.787, time: 0.019s 411 | 2019-11-15 20:00:23,840 semantic_segmentation INFO: Sample: 410, validation pixAcc: 94.997, mIoU: 72.780, time: 0.018s 412 | 2019-11-15 20:00:25,133 semantic_segmentation INFO: Sample: 411, validation pixAcc: 94.989, mIoU: 72.769, time: 0.019s 413 | 2019-11-15 20:00:26,454 semantic_segmentation INFO: Sample: 412, validation pixAcc: 94.988, mIoU: 72.776, time: 0.018s 414 | 2019-11-15 20:00:27,762 semantic_segmentation INFO: Sample: 413, validation pixAcc: 94.979, mIoU: 72.629, time: 0.018s 415 | 2019-11-15 20:00:29,039 semantic_segmentation INFO: Sample: 414, validation pixAcc: 94.982, mIoU: 72.678, time: 0.018s 416 | 2019-11-15 20:00:30,381 semantic_segmentation INFO: Sample: 415, validation pixAcc: 94.985, mIoU: 72.659, time: 0.018s 417 | 2019-11-15 20:00:31,752 semantic_segmentation INFO: Sample: 416, validation pixAcc: 94.982, mIoU: 72.657, time: 0.018s 418 | 2019-11-15 20:00:32,995 semantic_segmentation INFO: Sample: 417, validation pixAcc: 94.986, mIoU: 72.640, time: 0.018s 419 | 2019-11-15 20:00:34,307 semantic_segmentation INFO: Sample: 418, validation pixAcc: 94.991, mIoU: 72.631, time: 0.018s 420 | 2019-11-15 20:00:35,619 semantic_segmentation INFO: Sample: 419, validation pixAcc: 94.996, mIoU: 72.628, time: 0.018s 421 | 2019-11-15 20:00:36,891 semantic_segmentation INFO: Sample: 420, validation pixAcc: 95.002, mIoU: 72.631, time: 0.019s 422 | 2019-11-15 20:00:38,217 semantic_segmentation INFO: Sample: 421, validation pixAcc: 94.999, mIoU: 72.631, time: 0.018s 423 | 2019-11-15 20:00:39,562 semantic_segmentation INFO: Sample: 422, validation pixAcc: 95.002, mIoU: 72.607, time: 0.018s 424 | 2019-11-15 20:00:40,870 semantic_segmentation INFO: Sample: 423, validation pixAcc: 94.997, mIoU: 72.590, time: 0.018s 425 | 2019-11-15 20:00:42,145 semantic_segmentation INFO: Sample: 424, validation pixAcc: 94.984, mIoU: 72.557, time: 0.019s 426 | 2019-11-15 20:00:43,457 semantic_segmentation INFO: Sample: 425, validation pixAcc: 94.986, mIoU: 72.556, time: 0.018s 427 | 2019-11-15 20:00:44,785 semantic_segmentation INFO: Sample: 426, validation pixAcc: 94.989, mIoU: 72.565, time: 0.019s 428 | 2019-11-15 20:00:46,140 semantic_segmentation INFO: Sample: 427, validation pixAcc: 94.991, mIoU: 72.556, time: 0.019s 429 | 2019-11-15 20:00:47,678 semantic_segmentation INFO: Sample: 428, validation pixAcc: 94.987, mIoU: 72.534, time: 0.019s 430 | 2019-11-15 20:00:48,963 semantic_segmentation INFO: Sample: 429, validation pixAcc: 94.984, mIoU: 72.520, time: 0.020s 431 | 2019-11-15 20:00:50,302 semantic_segmentation INFO: Sample: 430, validation pixAcc: 94.983, mIoU: 72.547, time: 0.019s 432 | 2019-11-15 20:00:51,596 semantic_segmentation INFO: Sample: 431, validation pixAcc: 94.987, mIoU: 72.580, time: 0.019s 433 | 2019-11-15 20:00:52,858 semantic_segmentation INFO: Sample: 432, validation pixAcc: 94.980, mIoU: 72.606, time: 0.020s 434 | 2019-11-15 20:00:54,113 semantic_segmentation INFO: Sample: 433, validation pixAcc: 94.978, mIoU: 72.638, time: 0.020s 435 | 2019-11-15 20:00:55,509 semantic_segmentation INFO: Sample: 434, validation pixAcc: 94.980, mIoU: 72.627, time: 0.020s 436 | 2019-11-15 20:00:56,834 semantic_segmentation INFO: Sample: 435, validation pixAcc: 94.984, mIoU: 72.622, time: 0.019s 437 | 2019-11-15 20:00:58,218 semantic_segmentation INFO: Sample: 436, validation pixAcc: 94.991, mIoU: 72.651, time: 0.020s 438 | 2019-11-15 20:00:59,572 semantic_segmentation INFO: Sample: 437, validation pixAcc: 94.992, mIoU: 72.663, time: 0.020s 439 | 2019-11-15 20:01:00,877 semantic_segmentation INFO: Sample: 438, validation pixAcc: 94.978, mIoU: 72.665, time: 0.019s 440 | 2019-11-15 20:01:02,197 semantic_segmentation INFO: Sample: 439, validation pixAcc: 94.969, mIoU: 72.634, time: 0.019s 441 | 2019-11-15 20:01:03,570 semantic_segmentation INFO: Sample: 440, validation pixAcc: 94.972, mIoU: 72.640, time: 0.019s 442 | 2019-11-15 20:01:04,832 semantic_segmentation INFO: Sample: 441, validation pixAcc: 94.971, mIoU: 72.656, time: 0.019s 443 | 2019-11-15 20:01:06,163 semantic_segmentation INFO: Sample: 442, validation pixAcc: 94.967, mIoU: 72.602, time: 0.019s 444 | 2019-11-15 20:01:07,465 semantic_segmentation INFO: Sample: 443, validation pixAcc: 94.968, mIoU: 72.599, time: 0.019s 445 | 2019-11-15 20:01:08,797 semantic_segmentation INFO: Sample: 444, validation pixAcc: 94.967, mIoU: 72.587, time: 0.019s 446 | 2019-11-15 20:01:10,129 semantic_segmentation INFO: Sample: 445, validation pixAcc: 94.935, mIoU: 71.477, time: 0.019s 447 | 2019-11-15 20:01:11,429 semantic_segmentation INFO: Sample: 446, validation pixAcc: 94.928, mIoU: 71.466, time: 0.019s 448 | 2019-11-15 20:01:12,675 semantic_segmentation INFO: Sample: 447, validation pixAcc: 94.919, mIoU: 71.434, time: 0.019s 449 | 2019-11-15 20:01:13,991 semantic_segmentation INFO: Sample: 448, validation pixAcc: 94.913, mIoU: 71.454, time: 0.020s 450 | 2019-11-15 20:01:15,331 semantic_segmentation INFO: Sample: 449, validation pixAcc: 94.911, mIoU: 71.433, time: 0.019s 451 | 2019-11-15 20:01:16,587 semantic_segmentation INFO: Sample: 450, validation pixAcc: 94.915, mIoU: 71.433, time: 0.019s 452 | 2019-11-15 20:01:17,813 semantic_segmentation INFO: Sample: 451, validation pixAcc: 94.915, mIoU: 71.465, time: 0.019s 453 | 2019-11-15 20:01:19,130 semantic_segmentation INFO: Sample: 452, validation pixAcc: 94.913, mIoU: 71.467, time: 0.020s 454 | 2019-11-15 20:01:20,434 semantic_segmentation INFO: Sample: 453, validation pixAcc: 94.909, mIoU: 71.482, time: 0.019s 455 | 2019-11-15 20:01:21,792 semantic_segmentation INFO: Sample: 454, validation pixAcc: 94.912, mIoU: 71.454, time: 0.019s 456 | 2019-11-15 20:01:23,167 semantic_segmentation INFO: Sample: 455, validation pixAcc: 94.896, mIoU: 71.404, time: 0.020s 457 | 2019-11-15 20:01:24,553 semantic_segmentation INFO: Sample: 456, validation pixAcc: 94.897, mIoU: 71.398, time: 0.019s 458 | 2019-11-15 20:01:25,930 semantic_segmentation INFO: Sample: 457, validation pixAcc: 94.892, mIoU: 71.377, time: 0.019s 459 | 2019-11-15 20:01:27,252 semantic_segmentation INFO: Sample: 458, validation pixAcc: 94.890, mIoU: 71.406, time: 0.020s 460 | 2019-11-15 20:01:28,631 semantic_segmentation INFO: Sample: 459, validation pixAcc: 94.891, mIoU: 71.406, time: 0.020s 461 | 2019-11-15 20:01:29,976 semantic_segmentation INFO: Sample: 460, validation pixAcc: 94.894, mIoU: 71.404, time: 0.019s 462 | 2019-11-15 20:01:31,365 semantic_segmentation INFO: Sample: 461, validation pixAcc: 94.898, mIoU: 71.413, time: 0.019s 463 | 2019-11-15 20:01:32,728 semantic_segmentation INFO: Sample: 462, validation pixAcc: 94.890, mIoU: 71.458, time: 0.019s 464 | 2019-11-15 20:01:34,249 semantic_segmentation INFO: Sample: 463, validation pixAcc: 94.884, mIoU: 71.454, time: 0.018s 465 | 2019-11-15 20:01:35,559 semantic_segmentation INFO: Sample: 464, validation pixAcc: 94.886, mIoU: 71.452, time: 0.018s 466 | 2019-11-15 20:01:36,815 semantic_segmentation INFO: Sample: 465, validation pixAcc: 94.884, mIoU: 71.441, time: 0.018s 467 | 2019-11-15 20:01:38,069 semantic_segmentation INFO: Sample: 466, validation pixAcc: 94.882, mIoU: 71.437, time: 0.018s 468 | 2019-11-15 20:01:39,315 semantic_segmentation INFO: Sample: 467, validation pixAcc: 94.874, mIoU: 71.402, time: 0.018s 469 | 2019-11-15 20:01:40,640 semantic_segmentation INFO: Sample: 468, validation pixAcc: 94.870, mIoU: 71.412, time: 0.018s 470 | 2019-11-15 20:01:42,003 semantic_segmentation INFO: Sample: 469, validation pixAcc: 94.872, mIoU: 71.389, time: 0.018s 471 | 2019-11-15 20:01:43,250 semantic_segmentation INFO: Sample: 470, validation pixAcc: 94.876, mIoU: 71.381, time: 0.019s 472 | 2019-11-15 20:01:44,477 semantic_segmentation INFO: Sample: 471, validation pixAcc: 94.875, mIoU: 71.373, time: 0.018s 473 | 2019-11-15 20:01:45,718 semantic_segmentation INFO: Sample: 472, validation pixAcc: 94.873, mIoU: 71.410, time: 0.018s 474 | 2019-11-15 20:01:47,028 semantic_segmentation INFO: Sample: 473, validation pixAcc: 94.878, mIoU: 71.404, time: 0.018s 475 | 2019-11-15 20:01:48,350 semantic_segmentation INFO: Sample: 474, validation pixAcc: 94.880, mIoU: 71.400, time: 0.019s 476 | 2019-11-15 20:01:49,622 semantic_segmentation INFO: Sample: 475, validation pixAcc: 94.878, mIoU: 71.310, time: 0.019s 477 | 2019-11-15 20:01:50,887 semantic_segmentation INFO: Sample: 476, validation pixAcc: 94.874, mIoU: 71.345, time: 0.018s 478 | 2019-11-15 20:01:52,191 semantic_segmentation INFO: Sample: 477, validation pixAcc: 94.875, mIoU: 71.388, time: 0.019s 479 | 2019-11-15 20:01:53,500 semantic_segmentation INFO: Sample: 478, validation pixAcc: 94.871, mIoU: 71.389, time: 0.018s 480 | 2019-11-15 20:01:54,788 semantic_segmentation INFO: Sample: 479, validation pixAcc: 94.868, mIoU: 71.393, time: 0.018s 481 | 2019-11-15 20:01:56,098 semantic_segmentation INFO: Sample: 480, validation pixAcc: 94.868, mIoU: 71.391, time: 0.019s 482 | 2019-11-15 20:01:57,388 semantic_segmentation INFO: Sample: 481, validation pixAcc: 94.873, mIoU: 71.391, time: 0.018s 483 | 2019-11-15 20:01:58,699 semantic_segmentation INFO: Sample: 482, validation pixAcc: 94.869, mIoU: 71.357, time: 0.018s 484 | 2019-11-15 20:02:00,002 semantic_segmentation INFO: Sample: 483, validation pixAcc: 94.874, mIoU: 71.357, time: 0.018s 485 | 2019-11-15 20:02:01,313 semantic_segmentation INFO: Sample: 484, validation pixAcc: 94.880, mIoU: 71.355, time: 0.018s 486 | 2019-11-15 20:02:02,581 semantic_segmentation INFO: Sample: 485, validation pixAcc: 94.859, mIoU: 71.319, time: 0.018s 487 | 2019-11-15 20:02:03,891 semantic_segmentation INFO: Sample: 486, validation pixAcc: 94.851, mIoU: 71.297, time: 0.018s 488 | 2019-11-15 20:02:05,221 semantic_segmentation INFO: Sample: 487, validation pixAcc: 94.849, mIoU: 71.287, time: 0.018s 489 | 2019-11-15 20:02:06,552 semantic_segmentation INFO: Sample: 488, validation pixAcc: 94.845, mIoU: 71.289, time: 0.019s 490 | 2019-11-15 20:02:07,930 semantic_segmentation INFO: Sample: 489, validation pixAcc: 94.822, mIoU: 71.256, time: 0.018s 491 | 2019-11-15 20:02:09,279 semantic_segmentation INFO: Sample: 490, validation pixAcc: 94.821, mIoU: 71.286, time: 0.019s 492 | 2019-11-15 20:02:10,612 semantic_segmentation INFO: Sample: 491, validation pixAcc: 94.823, mIoU: 71.287, time: 0.018s 493 | 2019-11-15 20:02:11,915 semantic_segmentation INFO: Sample: 492, validation pixAcc: 94.823, mIoU: 71.304, time: 0.018s 494 | 2019-11-15 20:02:13,223 semantic_segmentation INFO: Sample: 493, validation pixAcc: 94.822, mIoU: 71.297, time: 0.019s 495 | 2019-11-15 20:02:14,571 semantic_segmentation INFO: Sample: 494, validation pixAcc: 94.823, mIoU: 71.292, time: 0.018s 496 | 2019-11-15 20:02:15,895 semantic_segmentation INFO: Sample: 495, validation pixAcc: 94.818, mIoU: 71.282, time: 0.018s 497 | 2019-11-15 20:02:17,210 semantic_segmentation INFO: Sample: 496, validation pixAcc: 94.818, mIoU: 71.296, time: 0.019s 498 | 2019-11-15 20:02:18,520 semantic_segmentation INFO: Sample: 497, validation pixAcc: 94.817, mIoU: 71.287, time: 0.019s 499 | 2019-11-15 20:02:19,814 semantic_segmentation INFO: Sample: 498, validation pixAcc: 94.809, mIoU: 71.218, time: 0.018s 500 | 2019-11-15 20:02:21,192 semantic_segmentation INFO: Sample: 499, validation pixAcc: 94.811, mIoU: 71.219, time: 0.019s 501 | 2019-11-15 20:02:22,524 semantic_segmentation INFO: Sample: 500, validation pixAcc: 94.814, mIoU: 71.242, time: 0.018s 502 | 2019-11-15 20:02:23,630 semantic_segmentation INFO: Evaluate: Average mIoU: 0.710, Average pixAcc: 0.957, Average time: 0.019 503 | -------------------------------------------------------------------------------- /configs/.gitignore: -------------------------------------------------------------------------------- 1 | icnet_customize.yaml -------------------------------------------------------------------------------- /configs/icnet.yaml: -------------------------------------------------------------------------------- 1 | ### 1.Model 2 | model: 3 | name: "icnet" 4 | backbone: "resnet50" 5 | base_size: 1024 # during augentation, shorter size will be resized between [base_size*0.5, base_size*2.0] 6 | crop_size: 960 # end of augmentation, crop to trainning 7 | 8 | ### 2.Optimizer 9 | optimizer: 10 | init_lr: 0.01 11 | momentum: 0.9 12 | weight_decay: 0.0001 13 | 14 | ### 3.Trainning 15 | train: 16 | specific_gpu_num: "1" # for example: "0", "1" or "0, 1" 17 | train_batch_size: 7 # adjust according to gpu resources 18 | valid_batch_size: 1 19 | cityscapes_root: "/home/datalab/ex_disk1/bulang/data/Segmentation/Cityscapes" 20 | epochs: 200 21 | log_iter: 10 # print log every log-iter 22 | val_epoch: 1 # run validation every val-epoch 23 | ckpt_dir: "./ckpt/" # ckpt and trainning log will be saved here 24 | 25 | ### 4.Test 26 | test: 27 | ckpt_path: "./ckpt/icnet_resnet50_197_0.710_best_model.pth" # set the pretrained model path correctly -------------------------------------------------------------------------------- /dataset/.gitignore: -------------------------------------------------------------------------------- 1 | customization.py 2 | *.png -------------------------------------------------------------------------------- /dataset/__init__.py: -------------------------------------------------------------------------------- 1 | from .cityscapes import CityscapesDataset 2 | from .customization import CustomizedDataset 3 | 4 | -------------------------------------------------------------------------------- /dataset/cityscapes.py: -------------------------------------------------------------------------------- 1 | """Prepare Cityscapes dataset""" 2 | import os 3 | import torch 4 | import numpy as np 5 | 6 | from PIL import Image 7 | from torchvision import transforms 8 | from .segbase import SegmentationDataset 9 | 10 | class CityscapesDataset(SegmentationDataset): 11 | NUM_CLASS = 19 12 | IGNORE_INDEX=-1 13 | NAME = "cityscapes" 14 | 15 | # image transform 16 | """ 17 | transforms.ToTensor(): 18 | Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor. 19 | Converts a PIL Image or numpy.ndarray (H x W x C) in the range 20 | [0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0]. 21 | """ 22 | input_transform = transforms.Compose([ 23 | transforms.ToTensor(), 24 | transforms.Normalize([.485, .456, .406], [.229, .224, .225])]) 25 | 26 | def __init__(self, root = './datasets/Cityscapes', split='train', base_size=1024, crop_size=720, mode=None, transform=input_transform): 27 | """ 28 | Parameters 29 | root : string 30 | Path to Cityscapes folder. Default is './datasets/Cityscapes' 31 | split: string 32 | 'train', 'val' or 'test' 33 | transform : callable, optional 34 | A function that transforms the image 35 | """ 36 | super(CityscapesDataset, self).__init__(root, split, mode, transform,base_size, crop_size) 37 | assert os.path.exists(self.root), "Error: data root path is wrong!" 38 | self.images, self.mask_paths = _get_city_pairs(self.root, self.split) 39 | assert (len(self.images) == len(self.mask_paths)) 40 | if len(self.images) == 0: 41 | raise RuntimeError("Found 0 images in subfolders of:" + root + "\n") 42 | # _gtFine_labelIds.png中,像素值从[-1,33]中的有效像素值 43 | self.valid_classes = [7, 8, 11, 12, 13, 17, 19, 20, 21, 22, 44 | 23, 24, 25, 26, 27, 28, 31, 32, 33] 45 | # reference: https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/helpers/labels.py 46 | # _gtFine_labelIds.png中,像素值从[-1,33]所对应的类别值 47 | self._key = np.array([-1, -1, -1, -1, -1, -1, 48 | -1, -1, 0, 1, -1, -1, 49 | 2, 3, 4, -1, -1, -1, 50 | 5, -1, 6, 7, 8, 9, 51 | 10, 11, 12, 13, 14, 15, 52 | -1, -1, 16, 17, 18]) 53 | # [-1, ..., 33] 54 | self._mapping = np.array(range(-1, len(self._key) - 1)).astype('int32') 55 | 56 | def _class_to_index(self, mask): 57 | # assert the value 58 | values = np.unique(mask) 59 | for value in values: 60 | assert (value in self._mapping) 61 | # 获取mask中各像素值对应于_mapping的索引 62 | index = np.digitize(mask.ravel(), self._mapping, right=True) 63 | # 依据上述索引,根据_key,得到对应 64 | return self._key[index].reshape(mask.shape) 65 | 66 | def __getitem__(self, index): 67 | img = Image.open(self.images[index]).convert('RGB') 68 | if self.mode == 'test': 69 | if self.transform is not None: 70 | img = self.transform(img) 71 | return img, os.path.basename(self.images[index]) 72 | mask = Image.open(self.mask_paths[index]) 73 | # synchrosized transform 74 | if self.mode == 'train': 75 | img, mask = self._sync_transform(img, mask) 76 | elif self.mode == 'val': 77 | img, mask = self._val_sync_transform(img, mask) 78 | else: 79 | assert self.mode == 'testval' 80 | img, mask = self._img_transform(img), self._mask_transform(mask) 81 | # general normalize and toTensor 82 | if self.transform is not None: 83 | img = self.transform(img) 84 | return img, mask, os.path.basename(self.images[index]) 85 | 86 | # 覆盖了基类的_mask_transform方法 87 | def _mask_transform(self, mask): 88 | target = self._class_to_index(np.array(mask).astype('int32')) 89 | return torch.LongTensor(np.array(target).astype('int32')) 90 | 91 | def __len__(self): 92 | return len(self.images) 93 | 94 | @property 95 | def pred_offset(self): 96 | return 0 97 | 98 | """ 99 | Cityscapes文件夹构成: 100 | Citicapes: 101 | - leftImg8bit 102 | - train 103 | - aachen 104 | - aachen_xxx_leftImg8bit.png 105 | - ... 106 | - .... 107 | - val 108 | - frankfurt 109 | - frankfurt_xxx_leftImg8bit.png 110 | - ... 111 | - ... 112 | - test 113 | - berloin 114 | - berlin_xxx_leftImg8bit.png 115 | - ... 116 | - ... 117 | - gtFine 118 | - train 119 | - aachen 120 | - aachen_xxx_gtFine_color.png 121 | - aachen_xxx_gtFine_labelIds.png 122 | - ... 123 | - .... 124 | - val 125 | - frankfurt 126 | - frankfurt_xxx_gtFine_color.png 127 | - frankfurt_xxx_gtFine_labelIds.png 128 | - ... 129 | - ... 130 | - test 131 | - berloin 132 | - berloin_xxx_gtFine_color.png 133 | - berloin_xxx_gtFine_labelIds.png 134 | - ... 135 | - ... 136 | - trainImages.txt 137 | - trainLabels.txt 138 | - valImages.txt 139 | - valLabels.txt 140 | - testImages.txt 141 | - testLabels.txt 142 | """ 143 | 144 | def _get_city_pairs(folder, split='train'): 145 | def get_path_pairs(img_folder, mask_folder): 146 | img_paths = [] 147 | mask_paths = [] 148 | for root, _, files in os.walk(img_folder): 149 | for filename in files: 150 | if filename.endswith('.png'): 151 | """ 152 | Example: 153 | root = "./Cityscapes/leftImg8bit/train/aachen" 154 | filename = "aachen_xxx_leftImg8bit.png" 155 | imgpath = "./Cityscapes/leftImg8bit/train/aachen/aachen_xxx_leftImg8bit.png" 156 | foldername = "aachen" 157 | maskname = "aachen_xxx_gtFine_labelIds.png" 158 | maskpath = "./Cityscapes/gtFine/train/aachen/aachen_xxx_gtFine_labelIds" 159 | """ 160 | imgpath = os.path.join(root, filename) 161 | foldername = os.path.basename(os.path.dirname(imgpath)) 162 | maskname = filename.replace('leftImg8bit', 'gtFine_labelIds') 163 | maskpath = os.path.join(mask_folder, foldername, maskname) 164 | if os.path.isfile(imgpath) and os.path.isfile(maskpath): 165 | img_paths.append(imgpath) 166 | mask_paths.append(maskpath) 167 | else: 168 | print('cannot find the mask or image:', imgpath, maskpath) 169 | print('Found {} images in the folder {}'.format(len(img_paths), img_folder)) 170 | return img_paths, mask_paths 171 | 172 | if split in ('train', 'val'): 173 | # "./Cityscapes/leftImg8bit/train" or "./Cityscapes/leftImg8bit/val" 174 | img_folder = os.path.join(folder, 'leftImg8bit/' + split) 175 | # "./Cityscapes/gtFine/train" or "./Cityscapes/gtFine/val" 176 | mask_folder = os.path.join(folder, 'gtFine/' + split) 177 | # img_paths与mask_paths的顺序是一一对应的 178 | img_paths, mask_paths = get_path_pairs(img_folder, mask_folder) 179 | return img_paths, mask_paths 180 | else: 181 | assert split == 'trainval' 182 | print('trainval set') 183 | train_img_folder = os.path.join(folder, 'leftImg8bit/train') 184 | train_mask_folder = os.path.join(folder, 'gtFine/train') 185 | val_img_folder = os.path.join(folder, 'leftImg8bit/val') 186 | val_mask_folder = os.path.join(folder, 'gtFine/val') 187 | train_img_paths, train_mask_paths = get_path_pairs(train_img_folder, train_mask_folder) 188 | val_img_paths, val_mask_paths = get_path_pairs(val_img_folder, val_mask_folder) 189 | img_paths = train_img_paths + val_img_paths 190 | mask_paths = train_mask_paths + val_mask_paths 191 | return img_paths, mask_paths 192 | 193 | 194 | if __name__ == '__main__': 195 | pass 196 | -------------------------------------------------------------------------------- /dataset/segbase.py: -------------------------------------------------------------------------------- 1 | """Base segmentation dataset""" 2 | import random 3 | import numpy as np 4 | 5 | from PIL import Image, ImageOps, ImageFilter 6 | 7 | __all__ = ['SegmentationDataset'] 8 | 9 | class SegmentationDataset(object): 10 | """Segmentation Base Dataset""" 11 | 12 | def __init__(self, root, split, mode, transform, base_size=1024, crop_size=720): 13 | """ 14 | root: string 15 | 16 | split: string 17 | 'train', 'val' or 'test' 18 | mode: 19 | 20 | transform: callable, optional 21 | A function that transforms the image 22 | base_size: 23 | shorter size will be resized between [short_size*0.5, short_size*2.0] 24 | crop_size: 25 | 26 | """ 27 | super(SegmentationDataset, self).__init__() 28 | self.root = root 29 | self.transform = transform 30 | self.split = split 31 | self.mode = mode if mode is not None else split 32 | self.base_size = base_size 33 | self.crop_size = crop_size 34 | 35 | def _val_sync_transform(self, img, mask): 36 | """ 37 | outsize = self.crop_size 38 | short_size = outsize 39 | w, h = img.size 40 | if w > h: 41 | oh = short_size 42 | ow = int(1.0 * w * oh / h) 43 | else: 44 | ow = short_size 45 | oh = int(1.0 * h * ow / w) 46 | img = img.resize((ow, oh), Image.BILINEAR) 47 | mask = mask.resize((ow, oh), Image.NEAREST) 48 | # center crop 49 | w, h = img.size 50 | x1 = int(round((w - outsize) / 2.)) 51 | y1 = int(round((h - outsize) / 2.)) 52 | img = img.crop((x1, y1, x1 + outsize, y1 + outsize)) 53 | mask = mask.crop((x1, y1, x1 + outsize, y1 + outsize)) 54 | """ 55 | # final transform 56 | img, mask = self._img_transform(img), self._mask_transform(mask) 57 | return img, mask 58 | 59 | def _sync_transform(self, img, mask): 60 | # random mirror 61 | if random.random() < 0.5: 62 | img = img.transpose(Image.FLIP_LEFT_RIGHT) 63 | mask = mask.transpose(Image.FLIP_LEFT_RIGHT) 64 | crop_size = self.crop_size 65 | # random scale (short edge) 66 | short_size = random.randint(int(self.base_size * 0.5), int(self.base_size * 2.0)) 67 | w, h = img.size 68 | if h > w: 69 | ow = short_size 70 | oh = int(1.0 * h * ow / w) 71 | else: 72 | oh = short_size 73 | ow = int(1.0 * w * oh / h) 74 | img = img.resize((ow, oh), Image.BILINEAR) 75 | mask = mask.resize((ow, oh), Image.NEAREST) 76 | # pad crop 77 | if short_size < crop_size: 78 | padh = crop_size - oh if oh < crop_size else 0 79 | padw = crop_size - ow if ow < crop_size else 0 80 | img = ImageOps.expand(img, border=(0, 0, padw, padh), fill=0) 81 | mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=0) 82 | # random crop crop_size 83 | w, h = img.size 84 | x1 = random.randint(0, w - crop_size) 85 | y1 = random.randint(0, h - crop_size) 86 | img = img.crop((x1, y1, x1 + crop_size, y1 + crop_size)) 87 | mask = mask.crop((x1, y1, x1 + crop_size, y1 + crop_size)) 88 | # gaussian blur as in PSP 89 | if random.random() < 0.5: 90 | img = img.filter(ImageFilter.GaussianBlur(radius=random.random())) 91 | # final transform 92 | img, mask = self._img_transform(img), self._mask_transform(mask) 93 | return img, mask 94 | 95 | """ 96 | PIL.Image.open 97 | 对于RGB图: 98 | - 读取后,RGB顺序的(cols,rows,3); 99 | - 施加np.array()后,变为(rows,cols,3),即(H x W x C) 100 | - 继续经过torchvision.transform.Totensor()后,变为torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0]. 101 | 对于灰度图: 102 | - 读取后,(cols,rows); 103 | - 施加np.array()后,变为(rows,cols) 104 | 105 | cv2.imread 106 | 对于RGB图: 107 | - 读取后,BGR顺序的(rows,cols,3),即(H x W x C); 就是np.array 108 | 对于灰度图: 109 | - 读取后,(rows,cols); 就是np.array 110 | """ 111 | def _img_transform(self, img): 112 | return np.array(img) 113 | 114 | def _mask_transform(self, mask): 115 | return np.array(mask).astype('int32') 116 | 117 | @property 118 | def num_class(self): 119 | """Number of categories.""" 120 | return self.NUM_CLASS 121 | 122 | @property 123 | def pred_offset(self): 124 | return 0 125 | -------------------------------------------------------------------------------- /demo/frankfurt_000001_057181_leftImg8bit_label.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/liminn/ICNet-pytorch/da394fc44f4fbaff1b47ab83ce7121a96f375b03/demo/frankfurt_000001_057181_leftImg8bit_label.png -------------------------------------------------------------------------------- /demo/frankfurt_000001_057181_leftImg8bit_mIoU_0.680.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/liminn/ICNet-pytorch/da394fc44f4fbaff1b47ab83ce7121a96f375b03/demo/frankfurt_000001_057181_leftImg8bit_mIoU_0.680.png -------------------------------------------------------------------------------- /demo/frankfurt_000001_057181_leftImg8bit_mIoU_0.716.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/liminn/ICNet-pytorch/da394fc44f4fbaff1b47ab83ce7121a96f375b03/demo/frankfurt_000001_057181_leftImg8bit_mIoU_0.716.png -------------------------------------------------------------------------------- /demo/frankfurt_000001_057181_leftImg8bit_mIoU_0.727.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/liminn/ICNet-pytorch/da394fc44f4fbaff1b47ab83ce7121a96f375b03/demo/frankfurt_000001_057181_leftImg8bit_mIoU_0.727.png -------------------------------------------------------------------------------- /demo/frankfurt_000001_057181_leftImg8bit_src.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/liminn/ICNet-pytorch/da394fc44f4fbaff1b47ab83ce7121a96f375b03/demo/frankfurt_000001_057181_leftImg8bit_src.png -------------------------------------------------------------------------------- /demo/lindau_000005_000019_leftImg8bit_label.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/liminn/ICNet-pytorch/da394fc44f4fbaff1b47ab83ce7121a96f375b03/demo/lindau_000005_000019_leftImg8bit_label.png -------------------------------------------------------------------------------- /demo/lindau_000005_000019_leftImg8bit_mIoU_0.657.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/liminn/ICNet-pytorch/da394fc44f4fbaff1b47ab83ce7121a96f375b03/demo/lindau_000005_000019_leftImg8bit_mIoU_0.657.png -------------------------------------------------------------------------------- /demo/lindau_000005_000019_leftImg8bit_mIoU_0.700.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/liminn/ICNet-pytorch/da394fc44f4fbaff1b47ab83ce7121a96f375b03/demo/lindau_000005_000019_leftImg8bit_mIoU_0.700.png -------------------------------------------------------------------------------- /demo/lindau_000005_000019_leftImg8bit_mIoU_0.705.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/liminn/ICNet-pytorch/da394fc44f4fbaff1b47ab83ce7121a96f375b03/demo/lindau_000005_000019_leftImg8bit_mIoU_0.705.png -------------------------------------------------------------------------------- /demo/lindau_000005_000019_leftImg8bit_src.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/liminn/ICNet-pytorch/da394fc44f4fbaff1b47ab83ce7121a96f375b03/demo/lindau_000005_000019_leftImg8bit_src.png -------------------------------------------------------------------------------- /demo/munster_000061_000019_leftImg8bit_label.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/liminn/ICNet-pytorch/da394fc44f4fbaff1b47ab83ce7121a96f375b03/demo/munster_000061_000019_leftImg8bit_label.png -------------------------------------------------------------------------------- /demo/munster_000061_000019_leftImg8bit_mIoU_0.672.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/liminn/ICNet-pytorch/da394fc44f4fbaff1b47ab83ce7121a96f375b03/demo/munster_000061_000019_leftImg8bit_mIoU_0.672.png -------------------------------------------------------------------------------- /demo/munster_000061_000019_leftImg8bit_mIoU_0.692.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/liminn/ICNet-pytorch/da394fc44f4fbaff1b47ab83ce7121a96f375b03/demo/munster_000061_000019_leftImg8bit_mIoU_0.692.png -------------------------------------------------------------------------------- /demo/munster_000061_000019_leftImg8bit_mIoU_0.704.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/liminn/ICNet-pytorch/da394fc44f4fbaff1b47ab83ce7121a96f375b03/demo/munster_000061_000019_leftImg8bit_mIoU_0.704.png -------------------------------------------------------------------------------- /demo/munster_000061_000019_leftImg8bit_src.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/liminn/ICNet-pytorch/da394fc44f4fbaff1b47ab83ce7121a96f375b03/demo/munster_000061_000019_leftImg8bit_src.png -------------------------------------------------------------------------------- /demo/munster_000075_000019_leftImg8bit_label.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/liminn/ICNet-pytorch/da394fc44f4fbaff1b47ab83ce7121a96f375b03/demo/munster_000075_000019_leftImg8bit_label.png -------------------------------------------------------------------------------- /demo/munster_000075_000019_leftImg8bit_mIoU_0.672.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/liminn/ICNet-pytorch/da394fc44f4fbaff1b47ab83ce7121a96f375b03/demo/munster_000075_000019_leftImg8bit_mIoU_0.672.png -------------------------------------------------------------------------------- /demo/munster_000075_000019_leftImg8bit_mIoU_0.690.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/liminn/ICNet-pytorch/da394fc44f4fbaff1b47ab83ce7121a96f375b03/demo/munster_000075_000019_leftImg8bit_mIoU_0.690.png -------------------------------------------------------------------------------- /demo/munster_000075_000019_leftImg8bit_mIoU_0.703.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/liminn/ICNet-pytorch/da394fc44f4fbaff1b47ab83ce7121a96f375b03/demo/munster_000075_000019_leftImg8bit_mIoU_0.703.png -------------------------------------------------------------------------------- /demo/munster_000075_000019_leftImg8bit_src.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/liminn/ICNet-pytorch/da394fc44f4fbaff1b47ab83ce7121a96f375b03/demo/munster_000075_000019_leftImg8bit_src.png -------------------------------------------------------------------------------- /demo/munster_000106_000019_leftImg8bit_label.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/liminn/ICNet-pytorch/da394fc44f4fbaff1b47ab83ce7121a96f375b03/demo/munster_000106_000019_leftImg8bit_label.png -------------------------------------------------------------------------------- /demo/munster_000106_000019_leftImg8bit_mIoU_0.672.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/liminn/ICNet-pytorch/da394fc44f4fbaff1b47ab83ce7121a96f375b03/demo/munster_000106_000019_leftImg8bit_mIoU_0.672.png -------------------------------------------------------------------------------- /demo/munster_000106_000019_leftImg8bit_mIoU_0.690.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/liminn/ICNet-pytorch/da394fc44f4fbaff1b47ab83ce7121a96f375b03/demo/munster_000106_000019_leftImg8bit_mIoU_0.690.png -------------------------------------------------------------------------------- /demo/munster_000106_000019_leftImg8bit_mIoU_0.703.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/liminn/ICNet-pytorch/da394fc44f4fbaff1b47ab83ce7121a96f375b03/demo/munster_000106_000019_leftImg8bit_mIoU_0.703.png -------------------------------------------------------------------------------- /demo/munster_000106_000019_leftImg8bit_src.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/liminn/ICNet-pytorch/da394fc44f4fbaff1b47ab83ce7121a96f375b03/demo/munster_000106_000019_leftImg8bit_src.png -------------------------------------------------------------------------------- /demo/munster_000121_000019_leftImg8bit_label.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/liminn/ICNet-pytorch/da394fc44f4fbaff1b47ab83ce7121a96f375b03/demo/munster_000121_000019_leftImg8bit_label.png -------------------------------------------------------------------------------- /demo/munster_000121_000019_leftImg8bit_mIoU_0.660.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/liminn/ICNet-pytorch/da394fc44f4fbaff1b47ab83ce7121a96f375b03/demo/munster_000121_000019_leftImg8bit_mIoU_0.660.png -------------------------------------------------------------------------------- /demo/munster_000121_000019_leftImg8bit_mIoU_0.678.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/liminn/ICNet-pytorch/da394fc44f4fbaff1b47ab83ce7121a96f375b03/demo/munster_000121_000019_leftImg8bit_mIoU_0.678.png -------------------------------------------------------------------------------- /demo/munster_000121_000019_leftImg8bit_mIoU_0.694.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/liminn/ICNet-pytorch/da394fc44f4fbaff1b47ab83ce7121a96f375b03/demo/munster_000121_000019_leftImg8bit_mIoU_0.694.png -------------------------------------------------------------------------------- /demo/munster_000121_000019_leftImg8bit_src.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/liminn/ICNet-pytorch/da394fc44f4fbaff1b47ab83ce7121a96f375b03/demo/munster_000121_000019_leftImg8bit_src.png -------------------------------------------------------------------------------- /demo/munster_000124_000019_leftImg8bit_label.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/liminn/ICNet-pytorch/da394fc44f4fbaff1b47ab83ce7121a96f375b03/demo/munster_000124_000019_leftImg8bit_label.png -------------------------------------------------------------------------------- /demo/munster_000124_000019_leftImg8bit_mIoU_0.660.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/liminn/ICNet-pytorch/da394fc44f4fbaff1b47ab83ce7121a96f375b03/demo/munster_000124_000019_leftImg8bit_mIoU_0.660.png -------------------------------------------------------------------------------- /demo/munster_000124_000019_leftImg8bit_mIoU_0.695.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/liminn/ICNet-pytorch/da394fc44f4fbaff1b47ab83ce7121a96f375b03/demo/munster_000124_000019_leftImg8bit_mIoU_0.695.png -------------------------------------------------------------------------------- /demo/munster_000124_000019_leftImg8bit_mIoU_0.696.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/liminn/ICNet-pytorch/da394fc44f4fbaff1b47ab83ce7121a96f375b03/demo/munster_000124_000019_leftImg8bit_mIoU_0.696.png -------------------------------------------------------------------------------- /demo/munster_000124_000019_leftImg8bit_src.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/liminn/ICNet-pytorch/da394fc44f4fbaff1b47ab83ce7121a96f375b03/demo/munster_000124_000019_leftImg8bit_src.png -------------------------------------------------------------------------------- /demo/munster_000150_000019_leftImg8bit_label.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/liminn/ICNet-pytorch/da394fc44f4fbaff1b47ab83ce7121a96f375b03/demo/munster_000150_000019_leftImg8bit_label.png -------------------------------------------------------------------------------- /demo/munster_000150_000019_leftImg8bit_mIoU_0.660.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/liminn/ICNet-pytorch/da394fc44f4fbaff1b47ab83ce7121a96f375b03/demo/munster_000150_000019_leftImg8bit_mIoU_0.660.png -------------------------------------------------------------------------------- /demo/munster_000150_000019_leftImg8bit_mIoU_0.695.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/liminn/ICNet-pytorch/da394fc44f4fbaff1b47ab83ce7121a96f375b03/demo/munster_000150_000019_leftImg8bit_mIoU_0.695.png -------------------------------------------------------------------------------- /demo/munster_000150_000019_leftImg8bit_mIoU_0.696.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/liminn/ICNet-pytorch/da394fc44f4fbaff1b47ab83ce7121a96f375b03/demo/munster_000150_000019_leftImg8bit_mIoU_0.696.png -------------------------------------------------------------------------------- /demo/munster_000150_000019_leftImg8bit_src.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/liminn/ICNet-pytorch/da394fc44f4fbaff1b47ab83ce7121a96f375b03/demo/munster_000150_000019_leftImg8bit_src.png -------------------------------------------------------------------------------- /demo/munster_000158_000019_leftImg8bit_label.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/liminn/ICNet-pytorch/da394fc44f4fbaff1b47ab83ce7121a96f375b03/demo/munster_000158_000019_leftImg8bit_label.png -------------------------------------------------------------------------------- /demo/munster_000158_000019_leftImg8bit_mIoU_0.658.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/liminn/ICNet-pytorch/da394fc44f4fbaff1b47ab83ce7121a96f375b03/demo/munster_000158_000019_leftImg8bit_mIoU_0.658.png -------------------------------------------------------------------------------- /demo/munster_000158_000019_leftImg8bit_mIoU_0.676.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/liminn/ICNet-pytorch/da394fc44f4fbaff1b47ab83ce7121a96f375b03/demo/munster_000158_000019_leftImg8bit_mIoU_0.676.png -------------------------------------------------------------------------------- /demo/munster_000158_000019_leftImg8bit_mIoU_0.692.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/liminn/ICNet-pytorch/da394fc44f4fbaff1b47ab83ce7121a96f375b03/demo/munster_000158_000019_leftImg8bit_mIoU_0.692.png -------------------------------------------------------------------------------- /demo/munster_000158_000019_leftImg8bit_src.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/liminn/ICNet-pytorch/da394fc44f4fbaff1b47ab83ce7121a96f375b03/demo/munster_000158_000019_leftImg8bit_src.png -------------------------------------------------------------------------------- /evaluate.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | import datetime 4 | import yaml 5 | import shutil 6 | import torch 7 | import numpy as np 8 | import torch.nn as nn 9 | import torch.utils.data as data 10 | 11 | from PIL import Image 12 | from torchvision import transforms 13 | from models import ICNet 14 | from dataset import CityscapesDataset 15 | from utils import ICNetLoss, IterationPolyLR, SegmentationMetric, SetupLogger, get_color_pallete 16 | 17 | class Evaluator(object): 18 | def __init__(self, cfg): 19 | self.cfg = cfg 20 | self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") 21 | 22 | # get valid dataset images and targets 23 | self.image_paths, self.mask_paths = _get_city_pairs(cfg["train"]["cityscapes_root"], "val") 24 | 25 | # create network 26 | self.model = ICNet(nclass = 19, backbone='resnet50').to(self.device) 27 | 28 | # load ckpt 29 | pretrained_net = torch.load(cfg["test"]["ckpt_path"]) 30 | self.model.load_state_dict(pretrained_net) 31 | 32 | # evaluation metrics 33 | self.metric = SegmentationMetric(19) 34 | 35 | def eval(self): 36 | self.metric.reset() 37 | self.model.eval() 38 | model = self.model 39 | 40 | logger.info("Start validation, Total sample: {:d}".format(len(self.image_paths))) 41 | list_time = [] 42 | lsit_pixAcc = [] 43 | list_mIoU = [] 44 | 45 | for i in range(len(self.image_paths)): 46 | 47 | image = Image.open(self.image_paths[i]).convert('RGB') # image shape: (W,H,3) 48 | mask = Image.open(self.mask_paths[i]) # mask shape: (W,H) 49 | 50 | image = self._img_transform(image) # image shape: (3,H,W) [0,1] 51 | mask = self._mask_transform(mask) # mask shape: (H,w) 52 | 53 | image = image.to(self.device) 54 | mask = mask.to(self.device) 55 | 56 | image = torch.unsqueeze(image, 0) # image shape: (1,3,H,W) [0,1] 57 | 58 | with torch.no_grad(): 59 | start_time = time.time() 60 | outputs = model(image) 61 | end_time = time.time() 62 | step_time = end_time-start_time 63 | self.metric.update(outputs[0], mask) 64 | pixAcc, mIoU = self.metric.get() 65 | list_time.append(step_time) 66 | lsit_pixAcc.append(pixAcc) 67 | list_mIoU.append(mIoU) 68 | logger.info("Sample: {:d}, validation pixAcc: {:.3f}, mIoU: {:.3f}, time: {:.3f}s".format( 69 | i + 1, pixAcc * 100, mIoU * 100, step_time)) 70 | 71 | filename = os.path.basename(self.image_paths[i]) 72 | prefix = filename.split('.')[0] 73 | 74 | # save pred 75 | pred = torch.argmax(outputs[0], 1) 76 | pred = pred.cpu().data.numpy() 77 | pred = pred.squeeze(0) 78 | pred = get_color_pallete(pred, "citys") 79 | pred.save(os.path.join(outdir, prefix + "_mIoU_{:.3f}.png".format(mIoU))) 80 | 81 | # save image 82 | image = Image.open(self.image_paths[i]).convert('RGB') # image shape: (W,H,3) 83 | image.save(os.path.join(outdir, prefix + '_src.png')) 84 | 85 | # save target 86 | mask = Image.open(self.mask_paths[i]) # mask shape: (W,H) 87 | mask = self._class_to_index(np.array(mask).astype('int32')) 88 | mask = get_color_pallete(mask, "citys") 89 | mask.save(os.path.join(outdir, prefix + '_label.png')) 90 | 91 | average_pixAcc = sum(lsit_pixAcc)/len(lsit_pixAcc) 92 | average_mIoU = sum(list_mIoU)/len(list_mIoU) 93 | average_time = sum(list_time)/len(list_time) 94 | self.current_mIoU = average_mIoU 95 | logger.info("Evaluate: Average mIoU: {:.3f}, Average pixAcc: {:.3f}, Average time: {:.3f}".format(average_mIoU, average_pixAcc, average_time)) 96 | 97 | def _img_transform(self, image): 98 | image_transform = transforms.Compose([ 99 | transforms.ToTensor(), 100 | transforms.Normalize([.485, .456, .406], [.229, .224, .225])]) 101 | image = image_transform(image) 102 | return image 103 | 104 | def _mask_transform(self, mask): 105 | mask = self._class_to_index(np.array(mask).astype('int32')) 106 | return torch.LongTensor(np.array(mask).astype('int32')) 107 | 108 | def _class_to_index(self, mask): 109 | # assert the value 110 | values = np.unique(mask) 111 | self._key = np.array([-1, -1, -1, -1, -1, -1, 112 | -1, -1, 0, 1, -1, -1, 113 | 2, 3, 4, -1, -1, -1, 114 | 5, -1, 6, 7, 8, 9, 115 | 10, 11, 12, 13, 14, 15, 116 | -1, -1, 16, 17, 18]) 117 | self._mapping = np.array(range(-1, len(self._key) - 1)).astype('int32') 118 | for value in values: 119 | assert (value in self._mapping) 120 | # 获取mask中各像素值对应于_mapping的索引 121 | index = np.digitize(mask.ravel(), self._mapping, right=True) 122 | # 依据上述索引index,根据_key,得到对应的mask图 123 | return self._key[index].reshape(mask.shape) 124 | 125 | def _get_city_pairs(folder, split='train'): 126 | def get_path_pairs(img_folder, mask_folder): 127 | img_paths = [] 128 | mask_paths = [] 129 | for root, _, files in os.walk(img_folder): 130 | for filename in files: 131 | if filename.endswith('.png'): 132 | """ 133 | For example: 134 | root = "./Cityscapes/leftImg8bit/train/aachen" 135 | filename = "aachen_xxx_leftImg8bit.png" 136 | imgpath = "./Cityscapes/leftImg8bit/train/aachen/aachen_xxx_leftImg8bit.png" 137 | foldername = "aachen" 138 | maskname = "aachen_xxx_gtFine_labelIds.png" 139 | maskpath = "./Cityscapes/gtFine/train/aachen/aachen_xxx_gtFine_labelIds" 140 | """ 141 | imgpath = os.path.join(root, filename) 142 | foldername = os.path.basename(os.path.dirname(imgpath)) 143 | maskname = filename.replace('leftImg8bit', 'gtFine_labelIds') 144 | maskpath = os.path.join(mask_folder, foldername, maskname) 145 | if os.path.isfile(imgpath) and os.path.isfile(maskpath): 146 | img_paths.append(imgpath) 147 | mask_paths.append(maskpath) 148 | else: 149 | print('cannot find the mask or image:', imgpath, maskpath) 150 | print('Found {} images in the folder {}'.format(len(img_paths), img_folder)) 151 | return img_paths, mask_paths 152 | 153 | if split in ('train', 'val'): 154 | # "./Cityscapes/leftImg8bit/train" or "./Cityscapes/leftImg8bit/val" 155 | img_folder = os.path.join(folder, 'leftImg8bit/' + split) 156 | # "./Cityscapes/gtFine/train" or "./Cityscapes/gtFine/val" 157 | mask_folder = os.path.join(folder, 'gtFine/' + split) 158 | # img_paths与mask_paths的顺序是一一对应的 159 | img_paths, mask_paths = get_path_pairs(img_folder, mask_folder) 160 | return img_paths, mask_paths 161 | return img_paths, mask_paths 162 | 163 | if __name__ == '__main__': 164 | # Set config file 165 | config_path = "./configs/icnet.yaml" 166 | with open(config_path, "r") as yaml_file: 167 | cfg = yaml.load(yaml_file.read()) 168 | #print(cfg) 169 | #print(cfg["model"]["backbone"]) 170 | print(cfg["train"]["specific_gpu_num"]) 171 | 172 | # Use specific GPU 173 | os.environ["CUDA_VISIBLE_DEVICES"] = str(cfg["train"]["specific_gpu_num"]) 174 | num_gpus = len(cfg["train"]["specific_gpu_num"].split(',')) 175 | print("torch.cuda.is_available(): {}".format(torch.cuda.is_available())) 176 | print("torch.cuda.device_count(): {}".format(torch.cuda.device_count())) 177 | print("torch.cuda.current_device(): {}".format(torch.cuda.current_device())) 178 | 179 | outdir = os.path.join(cfg["train"]["ckpt_dir"], "evaluate_output") 180 | if not os.path.exists(outdir): 181 | os.makedirs(outdir) 182 | 183 | logger = SetupLogger(name = "semantic_segmentation", 184 | save_dir = cfg["train"]["ckpt_dir"], 185 | distributed_rank = 0, 186 | filename='{}_{}_evaluate_log.txt'.format(cfg["model"]["name"], cfg["model"]["backbone"])) 187 | 188 | evaluator = Evaluator(cfg) 189 | evaluator.eval() -------------------------------------------------------------------------------- /models/__init__.py: -------------------------------------------------------------------------------- 1 | from .icnet import ICNet -------------------------------------------------------------------------------- /models/base_models/__init__.py: -------------------------------------------------------------------------------- 1 | from .resnetv1b import * 2 | -------------------------------------------------------------------------------- /models/base_models/resnetv1b.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.utils.model_zoo as model_zoo 4 | 5 | __all__ = ['ResNetV1b', 'resnet18_v1b', 'resnet34_v1b', 'resnet50_v1b', 6 | 'resnet101_v1b', 'resnet152_v1b', 'resnet152_v1s', 'resnet101_v1s', 'resnet50_v1s'] 7 | 8 | model_urls = { 9 | 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', 10 | 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', 11 | 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', 12 | 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', 13 | 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', 14 | } 15 | 16 | 17 | class BasicBlockV1b(nn.Module): 18 | expansion = 1 19 | 20 | def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, 21 | previous_dilation=1, norm_layer=nn.BatchNorm2d): 22 | super(BasicBlockV1b, self).__init__() 23 | self.conv1 = nn.Conv2d(inplanes, planes, 3, stride, 24 | dilation, dilation, bias=False) 25 | self.bn1 = norm_layer(planes) 26 | self.relu = nn.ReLU(True) 27 | self.conv2 = nn.Conv2d(planes, planes, 3, 1, previous_dilation, 28 | dilation=previous_dilation, bias=False) 29 | self.bn2 = norm_layer(planes) 30 | self.downsample = downsample 31 | self.stride = stride 32 | 33 | def forward(self, x): 34 | identity = x 35 | 36 | out = self.conv1(x) 37 | out = self.bn1(out) 38 | out = self.relu(out) 39 | 40 | out = self.conv2(out) 41 | out = self.bn2(out) 42 | 43 | if self.downsample is not None: 44 | identity = self.downsample(x) 45 | 46 | out += identity 47 | out = self.relu(out) 48 | 49 | return out 50 | 51 | 52 | class BottleneckV1b(nn.Module): 53 | expansion = 4 54 | 55 | def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, 56 | previous_dilation=1, norm_layer=nn.BatchNorm2d): 57 | super(BottleneckV1b, self).__init__() 58 | self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False) 59 | self.bn1 = norm_layer(planes) 60 | self.conv2 = nn.Conv2d(planes, planes, 3, stride, 61 | dilation, dilation, bias=False) 62 | self.bn2 = norm_layer(planes) 63 | self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False) 64 | self.bn3 = norm_layer(planes * self.expansion) 65 | self.relu = nn.ReLU(True) 66 | self.downsample = downsample 67 | self.stride = stride 68 | 69 | def forward(self, x): 70 | identity = x 71 | 72 | out = self.conv1(x) 73 | out = self.bn1(out) 74 | out = self.relu(out) 75 | 76 | out = self.conv2(out) 77 | out = self.bn2(out) 78 | out = self.relu(out) 79 | 80 | out = self.conv3(out) 81 | out = self.bn3(out) 82 | 83 | if self.downsample is not None: 84 | identity = self.downsample(x) 85 | 86 | out += identity 87 | out = self.relu(out) 88 | 89 | return out 90 | 91 | 92 | class ResNetV1b(nn.Module): 93 | 94 | def __init__(self, block, layers, num_classes=1000, dilated=True, deep_stem=False, 95 | zero_init_residual=False, norm_layer=nn.BatchNorm2d): 96 | self.inplanes = 128 if deep_stem else 64 97 | super(ResNetV1b, self).__init__() 98 | if deep_stem: 99 | self.conv1 = nn.Sequential( 100 | nn.Conv2d(3, 64, 3, 2, 1, bias=False), 101 | norm_layer(64), 102 | nn.ReLU(True), 103 | nn.Conv2d(64, 64, 3, 1, 1, bias=False), 104 | norm_layer(64), 105 | nn.ReLU(True), 106 | nn.Conv2d(64, 128, 3, 1, 1, bias=False) 107 | ) 108 | else: 109 | self.conv1 = nn.Conv2d(3, 64, 7, 2, 3, bias=False) 110 | self.bn1 = norm_layer(self.inplanes) 111 | self.relu = nn.ReLU(True) 112 | self.maxpool = nn.MaxPool2d(3, 2, 1) 113 | self.layer1 = self._make_layer(block, 64, layers[0], norm_layer=norm_layer) 114 | self.layer2 = self._make_layer(block, 128, layers[1], stride=2, norm_layer=norm_layer) 115 | if dilated: 116 | self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2, norm_layer=norm_layer) 117 | self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4, norm_layer=norm_layer) 118 | else: 119 | self.layer3 = self._make_layer(block, 256, layers[2], stride=2, norm_layer=norm_layer) 120 | self.layer4 = self._make_layer(block, 512, layers[3], stride=2, norm_layer=norm_layer) 121 | self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) 122 | self.fc = nn.Linear(512 * block.expansion, num_classes) 123 | 124 | for m in self.modules(): 125 | if isinstance(m, nn.Conv2d): 126 | nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') 127 | elif isinstance(m, nn.BatchNorm2d): 128 | nn.init.constant_(m.weight, 1) 129 | nn.init.constant_(m.bias, 0) 130 | 131 | if zero_init_residual: 132 | for m in self.modules(): 133 | if isinstance(m, BottleneckV1b): 134 | nn.init.constant_(m.bn3.weight, 0) 135 | elif isinstance(m, BasicBlockV1b): 136 | nn.init.constant_(m.bn2.weight, 0) 137 | 138 | def _make_layer(self, block, planes, blocks, stride=1, dilation=1, norm_layer=nn.BatchNorm2d): 139 | downsample = None 140 | if stride != 1 or self.inplanes != planes * block.expansion: 141 | downsample = nn.Sequential( 142 | nn.Conv2d(self.inplanes, planes * block.expansion, 1, stride, bias=False), 143 | norm_layer(planes * block.expansion), 144 | ) 145 | 146 | layers = [] 147 | if dilation in (1, 2): 148 | layers.append(block(self.inplanes, planes, stride, dilation=1, downsample=downsample, 149 | previous_dilation=dilation, norm_layer=norm_layer)) 150 | elif dilation == 4: 151 | layers.append(block(self.inplanes, planes, stride, dilation=2, downsample=downsample, 152 | previous_dilation=dilation, norm_layer=norm_layer)) 153 | else: 154 | raise RuntimeError("=> unknown dilation size: {}".format(dilation)) 155 | self.inplanes = planes * block.expansion 156 | for _ in range(1, blocks): 157 | layers.append(block(self.inplanes, planes, dilation=dilation, 158 | previous_dilation=dilation, norm_layer=norm_layer)) 159 | 160 | return nn.Sequential(*layers) 161 | 162 | def forward(self, x): 163 | x = self.conv1(x) 164 | x = self.bn1(x) 165 | x = self.relu(x) 166 | x = self.maxpool(x) 167 | 168 | x = self.layer1(x) 169 | x = self.layer2(x) 170 | x = self.layer3(x) 171 | x = self.layer4(x) 172 | 173 | x = self.avgpool(x) 174 | x = x.view(x.size(0), -1) 175 | x = self.fc(x) 176 | 177 | return x 178 | 179 | 180 | def resnet18_v1b(pretrained=False, **kwargs): 181 | model = ResNetV1b(BasicBlockV1b, [2, 2, 2, 2], **kwargs) 182 | if pretrained: 183 | old_dict = model_zoo.load_url(model_urls['resnet18']) 184 | model_dict = model.state_dict() 185 | old_dict = {k: v for k, v in old_dict.items() if (k in model_dict)} 186 | model_dict.update(old_dict) 187 | model.load_state_dict(model_dict) 188 | return model 189 | 190 | 191 | def resnet34_v1b(pretrained=False, **kwargs): 192 | model = ResNetV1b(BasicBlockV1b, [3, 4, 6, 3], **kwargs) 193 | if pretrained: 194 | old_dict = model_zoo.load_url(model_urls['resnet34']) 195 | model_dict = model.state_dict() 196 | old_dict = {k: v for k, v in old_dict.items() if (k in model_dict)} 197 | model_dict.update(old_dict) 198 | model.load_state_dict(model_dict) 199 | return model 200 | 201 | 202 | def resnet50_v1b(pretrained=False, **kwargs): 203 | model = ResNetV1b(BottleneckV1b, [3, 4, 6, 3], **kwargs) 204 | if pretrained: 205 | old_dict = model_zoo.load_url(model_urls['resnet50']) 206 | model_dict = model.state_dict() 207 | old_dict = {k: v for k, v in old_dict.items() if (k in model_dict)} 208 | model_dict.update(old_dict) 209 | model.load_state_dict(model_dict) 210 | return model 211 | 212 | 213 | def resnet101_v1b(pretrained=False, **kwargs): 214 | model = ResNetV1b(BottleneckV1b, [3, 4, 23, 3], **kwargs) 215 | if pretrained: 216 | old_dict = model_zoo.load_url(model_urls['resnet101']) 217 | model_dict = model.state_dict() 218 | old_dict = {k: v for k, v in old_dict.items() if (k in model_dict)} 219 | model_dict.update(old_dict) 220 | model.load_state_dict(model_dict) 221 | return model 222 | 223 | 224 | def resnet152_v1b(pretrained=False, **kwargs): 225 | model = ResNetV1b(BottleneckV1b, [3, 8, 36, 3], **kwargs) 226 | if pretrained: 227 | old_dict = model_zoo.load_url(model_urls['resnet152']) 228 | model_dict = model.state_dict() 229 | old_dict = {k: v for k, v in old_dict.items() if (k in model_dict)} 230 | model_dict.update(old_dict) 231 | model.load_state_dict(model_dict) 232 | return model 233 | 234 | 235 | def resnet50_v1s(pretrained=False, root='~/.torch/models', **kwargs): 236 | model = ResNetV1b(BottleneckV1b, [3, 4, 6, 3], deep_stem=True, **kwargs) 237 | if pretrained: 238 | from ..model_store import get_resnet_file 239 | model.load_state_dict(torch.load(get_resnet_file('resnet50', root=root)), strict=False) 240 | return model 241 | 242 | 243 | def resnet101_v1s(pretrained=False, root='~/.torch/models', **kwargs): 244 | model = ResNetV1b(BottleneckV1b, [3, 4, 23, 3], deep_stem=True, **kwargs) 245 | if pretrained: 246 | from ..model_store import get_resnet_file 247 | model.load_state_dict(torch.load(get_resnet_file('resnet101', root=root)), strict=False) 248 | return model 249 | 250 | 251 | def resnet152_v1s(pretrained=False, root='~/.torch/models', **kwargs): 252 | model = ResNetV1b(BottleneckV1b, [3, 8, 36, 3], deep_stem=True, **kwargs) 253 | if pretrained: 254 | from ..model_store import get_resnet_file 255 | model.load_state_dict(torch.load(get_resnet_file('resnet152', root=root)), strict=False) 256 | return model 257 | 258 | 259 | if __name__ == '__main__': 260 | import torch 261 | 262 | img = torch.randn(4, 3, 224, 224) 263 | model = resnet50_v1b(True) 264 | output = model(img) 265 | -------------------------------------------------------------------------------- /models/icnet.py: -------------------------------------------------------------------------------- 1 | """Image Cascade Network""" 2 | import torch 3 | import torch.nn as nn 4 | import torch.nn.functional as F 5 | 6 | from .segbase import SegBaseModel 7 | from torchsummary import summary 8 | 9 | __all__ = ['ICNet', 'get_icnet', 'get_icnet_resnet50_citys', 10 | 'get_icnet_resnet101_citys', 'get_icnet_resnet152_citys'] 11 | 12 | class ICNet(SegBaseModel): 13 | """Image Cascade Network""" 14 | 15 | def __init__(self, nclass = 19, backbone='resnet50', pretrained_base=True): 16 | super(ICNet, self).__init__(nclass,backbone, pretrained_base=pretrained_base) 17 | self.conv_sub1 = nn.Sequential( 18 | _ConvBNReLU(3, 32, 3, 2), 19 | _ConvBNReLU(32, 32, 3, 2), 20 | _ConvBNReLU(32, 64, 3, 2) 21 | ) 22 | 23 | self.ppm = PyramidPoolingModule() 24 | 25 | self.head = _ICHead(nclass) 26 | 27 | self.__setattr__('exclusive', ['conv_sub1', 'head']) 28 | 29 | def forward(self, x): 30 | # sub 1 31 | x_sub1 = self.conv_sub1(x) 32 | 33 | # sub 2 34 | x_sub2 = F.interpolate(x, scale_factor=0.5, mode='bilinear', align_corners=True) 35 | _, x_sub2, _, _ = self.base_forward(x_sub2) 36 | 37 | # sub 4 38 | x_sub4 = F.interpolate(x, scale_factor=0.25, mode='bilinear', align_corners=True) 39 | _, _, _, x_sub4 = self.base_forward(x_sub4) 40 | # add PyramidPoolingModule 41 | x_sub4 = self.ppm(x_sub4) 42 | 43 | outputs = self.head(x_sub1, x_sub2, x_sub4) 44 | 45 | return tuple(outputs) 46 | 47 | class PyramidPoolingModule(nn.Module): 48 | def __init__(self, pyramids=[1,2,3,6]): 49 | super(PyramidPoolingModule, self).__init__() 50 | self.pyramids = pyramids 51 | 52 | def forward(self, input): 53 | feat = input 54 | height, width = input.shape[2:] 55 | for bin_size in self.pyramids: 56 | x = F.adaptive_avg_pool2d(input, output_size=bin_size) 57 | x = F.interpolate(x, size=(height, width), mode='bilinear', align_corners=True) 58 | feat = feat + x 59 | return feat 60 | 61 | class _ICHead(nn.Module): 62 | def __init__(self, nclass, norm_layer=nn.BatchNorm2d, **kwargs): 63 | super(_ICHead, self).__init__() 64 | #self.cff_12 = CascadeFeatureFusion(512, 64, 128, nclass, norm_layer, **kwargs) 65 | self.cff_12 = CascadeFeatureFusion(128, 64, 128, nclass, norm_layer, **kwargs) 66 | self.cff_24 = CascadeFeatureFusion(2048, 512, 128, nclass, norm_layer, **kwargs) 67 | 68 | self.conv_cls = nn.Conv2d(128, nclass, 1, bias=False) 69 | 70 | def forward(self, x_sub1, x_sub2, x_sub4): 71 | outputs = list() 72 | x_cff_24, x_24_cls = self.cff_24(x_sub4, x_sub2) 73 | outputs.append(x_24_cls) 74 | # x_cff_12, x_12_cls = self.cff_12(x_sub2, x_sub1) 75 | x_cff_12, x_12_cls = self.cff_12(x_cff_24, x_sub1) 76 | outputs.append(x_12_cls) 77 | 78 | up_x2 = F.interpolate(x_cff_12, scale_factor=2, mode='bilinear', align_corners=True) 79 | up_x2 = self.conv_cls(up_x2) 80 | outputs.append(up_x2) 81 | up_x8 = F.interpolate(up_x2, scale_factor=4, mode='bilinear', align_corners=True) 82 | outputs.append(up_x8) 83 | # 1 -> 1/4 -> 1/8 -> 1/16 84 | outputs.reverse() 85 | 86 | return outputs 87 | 88 | 89 | class _ConvBNReLU(nn.Module): 90 | def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, dilation=1, 91 | groups=1, norm_layer=nn.BatchNorm2d, bias=False, **kwargs): 92 | super(_ConvBNReLU, self).__init__() 93 | self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias) 94 | self.bn = norm_layer(out_channels) 95 | self.relu = nn.ReLU(True) 96 | 97 | def forward(self, x): 98 | x = self.conv(x) 99 | x = self.bn(x) 100 | x = self.relu(x) 101 | return x 102 | 103 | 104 | class CascadeFeatureFusion(nn.Module): 105 | """CFF Unit""" 106 | 107 | def __init__(self, low_channels, high_channels, out_channels, nclass, norm_layer=nn.BatchNorm2d, **kwargs): 108 | super(CascadeFeatureFusion, self).__init__() 109 | self.conv_low = nn.Sequential( 110 | nn.Conv2d(low_channels, out_channels, 3, padding=2, dilation=2, bias=False), 111 | norm_layer(out_channels) 112 | ) 113 | self.conv_high = nn.Sequential( 114 | nn.Conv2d(high_channels, out_channels, 1, bias=False), 115 | norm_layer(out_channels) 116 | ) 117 | self.conv_low_cls = nn.Conv2d(out_channels, nclass, 1, bias=False) 118 | 119 | def forward(self, x_low, x_high): 120 | x_low = F.interpolate(x_low, size=x_high.size()[2:], mode='bilinear', align_corners=True) 121 | x_low = self.conv_low(x_low) 122 | x_high = self.conv_high(x_high) 123 | x = x_low + x_high 124 | x = F.relu(x, inplace=True) 125 | x_low_cls = self.conv_low_cls(x_low) 126 | 127 | return x, x_low_cls 128 | 129 | 130 | if __name__ == '__main__': 131 | # #img = torch.randn(1, 3, 256, 256) 132 | # model = get_icnet_resnet50_citys() 133 | # #outputs = model(img) 134 | 135 | # inputs = torch.randn(1, 3, 720, 720) 136 | # with torch.no_grad(): 137 | # outputs = model(inputs) 138 | # print(len(outputs)) # 3 139 | # print(outputs[0].size()) # torch.Size([1, 19, 200, 200]) 140 | # print(outputs[1].size()) # torch.Size([1, 19, 100, 100]) 141 | # print(outputs[2].size()) # torch.Size([1, 19, 50, 50]) 142 | # print(outputs[3].size()) # torch.Size([1, 19, 50, 50]) 143 | pass 144 | 145 | -------------------------------------------------------------------------------- /models/model_store.py: -------------------------------------------------------------------------------- 1 | """Model store which provides pretrained models.""" 2 | from __future__ import print_function 3 | 4 | import os 5 | import zipfile 6 | 7 | from utils.download import download, check_sha1 8 | 9 | __all__ = ['get_model_file', 'get_resnet_file'] 10 | 11 | _model_sha1 = {name: checksum for checksum, name in [ 12 | ('25c4b50959ef024fcc050213a06b614899f94b3d', 'resnet50'), 13 | ('2a57e44de9c853fa015b172309a1ee7e2d0e4e2a', 'resnet101'), 14 | ('0d43d698c66aceaa2bc0309f55efdd7ff4b143af', 'resnet152'), 15 | ]} 16 | 17 | encoding_repo_url = 'https://hangzh.s3.amazonaws.com/' 18 | _url_format = '{repo_url}encoding/models/{file_name}.zip' 19 | 20 | 21 | def short_hash(name): 22 | if name not in _model_sha1: 23 | raise ValueError('Pretrained model for {name} is not available.'.format(name=name)) 24 | return _model_sha1[name][:8] 25 | 26 | 27 | def get_resnet_file(name, root='~/.torch/models'): 28 | file_name = '{name}-{short_hash}'.format(name=name, short_hash=short_hash(name)) 29 | root = os.path.expanduser(root) 30 | 31 | file_path = os.path.join(root, file_name + '.pth') 32 | sha1_hash = _model_sha1[name] 33 | if os.path.exists(file_path): 34 | if check_sha1(file_path, sha1_hash): 35 | return file_path 36 | else: 37 | print('Mismatch in the content of model file {} detected.' + 38 | ' Downloading again.'.format(file_path)) 39 | else: 40 | print('Model file {} is not found. Downloading.'.format(file_path)) 41 | 42 | if not os.path.exists(root): 43 | os.makedirs(root) 44 | 45 | zip_file_path = os.path.join(root, file_name + '.zip') 46 | repo_url = os.environ.get('ENCODING_REPO', encoding_repo_url) 47 | if repo_url[-1] != '/': 48 | repo_url = repo_url + '/' 49 | download(_url_format.format(repo_url=repo_url, file_name=file_name), 50 | path=zip_file_path, 51 | overwrite=True) 52 | with zipfile.ZipFile(zip_file_path) as zf: 53 | zf.extractall(root) 54 | os.remove(zip_file_path) 55 | 56 | if check_sha1(file_path, sha1_hash): 57 | return file_path 58 | else: 59 | raise ValueError('Downloaded file has different hash. Please try again.') 60 | 61 | 62 | def get_model_file(name, root='~/.torch/models'): 63 | root = os.path.expanduser(root) 64 | file_path = os.path.join(root, name + '.pth') 65 | if os.path.exists(file_path): 66 | return file_path 67 | else: 68 | raise ValueError('Model file is not found. Downloading or trainning.') 69 | -------------------------------------------------------------------------------- /models/segbase.py: -------------------------------------------------------------------------------- 1 | """Base Model for Semantic Segmentation""" 2 | import torch.nn as nn 3 | from .base_models.resnetv1b import resnet50_v1s, resnet101_v1s, resnet152_v1s 4 | 5 | __all__ = ['SegBaseModel'] 6 | 7 | class SegBaseModel(nn.Module): 8 | r"""Base Model for Semantic Segmentation 9 | 10 | Parameters 11 | ---------- 12 | backbone : string 13 | Pre-trained dilated backbone network type (default:'resnet50'; 'resnet50', 14 | 'resnet101' or 'resnet152'). 15 | """ 16 | 17 | def __init__(self, nclass, backbone='resnet50', pretrained_base=True, **kwargs): 18 | super(SegBaseModel, self).__init__() 19 | dilated = True 20 | self.nclass = nclass 21 | if backbone == 'resnet50': 22 | self.pretrained = resnet50_v1s(pretrained=pretrained_base, dilated=dilated, **kwargs) 23 | elif backbone == 'resnet101': 24 | self.pretrained = resnet101_v1s(pretrained=pretrained_base, dilated=dilated, **kwargs) 25 | elif backbone == 'resnet152': 26 | self.pretrained = resnet152_v1s(pretrained=pretrained_base, dilated=dilated, **kwargs) 27 | else: 28 | raise RuntimeError('unknown backbone: {}'.format(backbone)) 29 | 30 | def base_forward(self, x): 31 | """forwarding pre-trained network""" 32 | x = self.pretrained.conv1(x) 33 | x = self.pretrained.bn1(x) 34 | x = self.pretrained.relu(x) 35 | x = self.pretrained.maxpool(x) 36 | c1 = self.pretrained.layer1(x) 37 | c2 = self.pretrained.layer2(c1) 38 | c3 = self.pretrained.layer3(c2) 39 | c4 = self.pretrained.layer4(c3) 40 | 41 | return c1, c2, c3, c4 42 | 43 | def evaluate(self, x): 44 | """evaluating network with inputs and targets""" 45 | return self.forward(x)[0] 46 | 47 | def demo(self, x): 48 | pred = self.forward(x) 49 | return pred 50 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | torch==1.1.0 2 | torchsummary==1.5.1 3 | torchvision==0.3.0 4 | numpy==1.17.0 5 | Pillow==6.0.0 6 | PyYAML==5.1.2 7 | requests==2.22.0 8 | tqdm==4.38.0 9 | opencv-python 10 | -------------------------------------------------------------------------------- /train.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | import datetime 4 | import yaml 5 | import torch 6 | import torch.nn as nn 7 | import torch.utils.data as data 8 | 9 | from dataset import CityscapesDataset 10 | from models import ICNet 11 | from utils import ICNetLoss, IterationPolyLR, SegmentationMetric, SetupLogger 12 | 13 | class Trainer(object): 14 | def __init__(self, cfg): 15 | self.cfg = cfg 16 | self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") 17 | self.dataparallel = torch.cuda.device_count() > 1 18 | 19 | # dataset and dataloader 20 | train_dataset = CityscapesDataset(root = cfg["train"]["cityscapes_root"], 21 | split='train', 22 | base_size=cfg["model"]["base_size"], 23 | crop_size=cfg["model"]["crop_size"]) 24 | val_dataset = CityscapesDataset(root = cfg["train"]["cityscapes_root"], 25 | split='val', 26 | base_size=cfg["model"]["base_size"], 27 | crop_size=cfg["model"]["crop_size"]) 28 | self.train_dataloader = data.DataLoader(dataset=train_dataset, 29 | batch_size=cfg["train"]["train_batch_size"], 30 | shuffle=True, 31 | num_workers=4, 32 | pin_memory=True, 33 | drop_last=False) 34 | self.val_dataloader = data.DataLoader(dataset=val_dataset, 35 | batch_size=cfg["train"]["valid_batch_size"], 36 | shuffle=False, 37 | num_workers=4, 38 | pin_memory=True, 39 | drop_last=False) 40 | 41 | self.iters_per_epoch = len(self.train_dataloader) 42 | self.max_iters = cfg["train"]["epochs"] * self.iters_per_epoch 43 | 44 | # create network 45 | self.model = ICNet(nclass = train_dataset.NUM_CLASS, backbone='resnet50').to(self.device) 46 | 47 | # create criterion 48 | self.criterion = ICNetLoss(ignore_index=train_dataset.IGNORE_INDEX).to(self.device) 49 | 50 | # optimizer, for model just includes pretrained, head and auxlayer 51 | params_list = list() 52 | if hasattr(self.model, 'pretrained'): 53 | params_list.append({'params': self.model.pretrained.parameters(), 'lr': cfg["optimizer"]["init_lr"]}) 54 | if hasattr(self.model, 'exclusive'): 55 | for module in self.model.exclusive: 56 | params_list.append({'params': getattr(self.model, module).parameters(), 'lr': cfg["optimizer"]["init_lr"] * 10}) 57 | self.optimizer = torch.optim.SGD(params = params_list, 58 | lr = cfg["optimizer"]["init_lr"], 59 | momentum=cfg["optimizer"]["momentum"], 60 | weight_decay=cfg["optimizer"]["weight_decay"]) 61 | # self.optimizer = torch.optim.SGD(params = self.model.parameters(), 62 | # lr = cfg["optimizer"]["init_lr"], 63 | # momentum=cfg["optimizer"]["momentum"], 64 | # weight_decay=cfg["optimizer"]["weight_decay"]) 65 | 66 | # lr scheduler 67 | self.lr_scheduler = IterationPolyLR(self.optimizer, 68 | max_iters=self.max_iters, 69 | power=0.9) 70 | # dataparallel 71 | if(self.dataparallel): 72 | self.model = nn.DataParallel(self.model) 73 | 74 | # evaluation metrics 75 | self.metric = SegmentationMetric(train_dataset.NUM_CLASS) 76 | 77 | self.current_mIoU = 0.0 78 | self.best_mIoU = 0.0 79 | 80 | self.epochs = cfg["train"]["epochs"] 81 | self.current_epoch = 0 82 | self.current_iteration = 0 83 | 84 | def train(self): 85 | epochs, max_iters = self.epochs, self.max_iters 86 | log_per_iters = self.cfg["train"]["log_iter"] 87 | val_per_iters = self.cfg["train"]["val_epoch"] * self.iters_per_epoch 88 | 89 | start_time = time.time() 90 | logger.info('Start training, Total Epochs: {:d} = Total Iterations {:d}'.format(epochs, max_iters)) 91 | 92 | self.model.train() 93 | 94 | for _ in range(self.epochs): 95 | self.current_epoch += 1 96 | lsit_pixAcc = [] 97 | list_mIoU = [] 98 | list_loss = [] 99 | self.metric.reset() 100 | for i, (images, targets, _) in enumerate(self.train_dataloader): 101 | self.current_iteration += 1 102 | 103 | self.lr_scheduler.step() 104 | 105 | images = images.to(self.device) 106 | targets = targets.to(self.device) 107 | 108 | outputs = self.model(images) 109 | loss = self.criterion(outputs, targets) 110 | 111 | self.metric.update(outputs[0], targets) 112 | pixAcc, mIoU = self.metric.get() 113 | lsit_pixAcc.append(pixAcc) 114 | list_mIoU.append(mIoU) 115 | list_loss.append(loss.item()) 116 | 117 | self.optimizer.zero_grad() 118 | loss.backward() 119 | self.optimizer.step() 120 | 121 | eta_seconds = ((time.time() - start_time) / self.current_iteration) * (max_iters - self.current_iteration) 122 | eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) 123 | 124 | if self.current_iteration % log_per_iters == 0: 125 | logger.info( 126 | "Epochs: {:d}/{:d} || Iters: {:d}/{:d} || Lr: {:.6f} || Loss: {:.4f} || mIoU: {:.4f} || Cost Time: {} || Estimated Time: {}".format( 127 | self.current_epoch, self.epochs, 128 | self.current_iteration, max_iters, 129 | self.optimizer.param_groups[0]['lr'], 130 | loss.item(), 131 | mIoU, 132 | str(datetime.timedelta(seconds=int(time.time() - start_time))), 133 | eta_string)) 134 | 135 | average_pixAcc = sum(lsit_pixAcc)/len(lsit_pixAcc) 136 | average_mIoU = sum(list_mIoU)/len(list_mIoU) 137 | average_loss = sum(list_loss)/len(list_loss) 138 | logger.info("Epochs: {:d}/{:d}, Average loss: {:.3f}, Average mIoU: {:.3f}, Average pixAcc: {:.3f}".format(self.current_epoch, self.epochs, average_loss, average_mIoU, average_pixAcc)) 139 | 140 | if self.current_iteration % val_per_iters == 0: 141 | self.validation() 142 | self.model.train() 143 | 144 | total_training_time = time.time() - start_time 145 | total_training_str = str(datetime.timedelta(seconds=total_training_time)) 146 | logger.info( 147 | "Total training time: {} ({:.4f}s / it)".format( 148 | total_training_str, total_training_time / max_iters)) 149 | 150 | def validation(self): 151 | is_best = False 152 | self.metric.reset() 153 | if self.dataparallel: 154 | model = self.model.module 155 | else: 156 | model = self.model 157 | model.eval() 158 | lsit_pixAcc = [] 159 | list_mIoU = [] 160 | list_loss = [] 161 | for i, (image, targets, filename) in enumerate(self.val_dataloader): 162 | image = image.to(self.device) 163 | targets = targets.to(self.device) 164 | 165 | with torch.no_grad(): 166 | outputs = model(image) 167 | loss = self.criterion(outputs, targets) 168 | self.metric.update(outputs[0], targets) 169 | pixAcc, mIoU = self.metric.get() 170 | lsit_pixAcc.append(pixAcc) 171 | list_mIoU.append(mIoU) 172 | list_loss.append(loss.item()) 173 | 174 | average_pixAcc = sum(lsit_pixAcc)/len(lsit_pixAcc) 175 | average_mIoU = sum(list_mIoU)/len(list_mIoU) 176 | average_loss = sum(list_loss)/len(list_loss) 177 | self.current_mIoU = average_mIoU 178 | logger.info("Validation: Average loss: {:.3f}, Average mIoU: {:.3f}, Average pixAcc: {:.3f}".format(average_loss, average_mIoU, average_pixAcc)) 179 | 180 | if self.current_mIoU > self.best_mIoU: 181 | is_best = True 182 | self.best_mIoU = self.current_mIoU 183 | if is_best: 184 | save_checkpoint(self.model, self.cfg, self.current_epoch, is_best, self.current_mIoU, self.dataparallel) 185 | 186 | def save_checkpoint(model, cfg, epoch = 0, is_best=False, mIoU = 0.0, dataparallel = False): 187 | """Save Checkpoint""" 188 | directory = os.path.expanduser(cfg["train"]["ckpt_dir"]) 189 | if not os.path.exists(directory): 190 | os.makedirs(directory) 191 | filename = '{}_{}_{}_{:.3f}.pth'.format(cfg["model"]["name"], cfg["model"]["backbone"],epoch,mIoU) 192 | filename = os.path.join(directory, filename) 193 | if dataparallel: 194 | model = model.module 195 | if is_best: 196 | best_filename = '{}_{}_{}_{:.3f}_best_model.pth'.format(cfg["model"]["name"], cfg["model"]["backbone"],epoch,mIoU) 197 | best_filename = os.path.join(directory, best_filename) 198 | torch.save(model.state_dict(), best_filename) 199 | 200 | 201 | if __name__ == '__main__': 202 | # Set config file 203 | config_path = "./configs/icnet.yaml" 204 | with open(config_path, "r") as yaml_file: 205 | cfg = yaml.load(yaml_file.read()) 206 | #print(cfg) 207 | #print(cfg["model"]["backbone"]) 208 | #print(cfg["train"]["specific_gpu_num"]) 209 | 210 | # Use specific GPU 211 | os.environ["CUDA_VISIBLE_DEVICES"] = str(cfg["train"]["specific_gpu_num"]) 212 | num_gpus = len(cfg["train"]["specific_gpu_num"].split(',')) 213 | print("torch.cuda.is_available(): {}".format(torch.cuda.is_available())) 214 | print("torch.cuda.device_count(): {}".format(torch.cuda.device_count())) 215 | print("torch.cuda.current_device(): {}".format(torch.cuda.current_device())) 216 | 217 | # Set logger 218 | logger = SetupLogger(name = "semantic_segmentation", 219 | save_dir = cfg["train"]["ckpt_dir"], 220 | distributed_rank = 0, 221 | filename='{}_{}_log.txt'.format(cfg["model"]["name"], cfg["model"]["backbone"])) 222 | logger.info("Using {} GPUs".format(num_gpus)) 223 | logger.info("torch.cuda.is_available(): {}".format(torch.cuda.is_available())) 224 | logger.info("torch.cuda.device_count(): {}".format(torch.cuda.device_count())) 225 | logger.info("torch.cuda.current_device(): {}".format(torch.cuda.current_device())) 226 | logger.info(cfg) 227 | 228 | # Start train 229 | trainer = Trainer(cfg) 230 | trainer.train() 231 | 232 | -------------------------------------------------------------------------------- /utils/__init__.py: -------------------------------------------------------------------------------- 1 | """Utility functions.""" 2 | from .loss import ICNetLoss 3 | from .lr_scheduler import IterationPolyLR 4 | from .metric import SegmentationMetric 5 | from .logger import SetupLogger 6 | from .visualize import get_color_pallete -------------------------------------------------------------------------------- /utils/download.py: -------------------------------------------------------------------------------- 1 | """Download files with progress bar.""" 2 | import os 3 | import hashlib 4 | import requests 5 | from tqdm import tqdm 6 | 7 | def check_sha1(filename, sha1_hash): 8 | """Check whether the sha1 hash of the file content matches the expected hash. 9 | Parameters 10 | ---------- 11 | filename : str 12 | Path to the file. 13 | sha1_hash : str 14 | Expected sha1 hash in hexadecimal digits. 15 | Returns 16 | ------- 17 | bool 18 | Whether the file content matches the expected hash. 19 | """ 20 | sha1 = hashlib.sha1() 21 | with open(filename, 'rb') as f: 22 | while True: 23 | data = f.read(1048576) 24 | if not data: 25 | break 26 | sha1.update(data) 27 | 28 | sha1_file = sha1.hexdigest() 29 | l = min(len(sha1_file), len(sha1_hash)) 30 | return sha1.hexdigest()[0:l] == sha1_hash[0:l] 31 | 32 | def download(url, path=None, overwrite=False, sha1_hash=None): 33 | """Download an given URL 34 | Parameters 35 | ---------- 36 | url : str 37 | URL to download 38 | path : str, optional 39 | Destination path to store downloaded file. By default stores to the 40 | current directory with same name as in url. 41 | overwrite : bool, optional 42 | Whether to overwrite destination file if already exists. 43 | sha1_hash : str, optional 44 | Expected sha1 hash in hexadecimal digits. Will ignore existing file when hash is specified 45 | but doesn't match. 46 | Returns 47 | ------- 48 | str 49 | The file path of the downloaded file. 50 | """ 51 | if path is None: 52 | fname = url.split('/')[-1] 53 | else: 54 | path = os.path.expanduser(path) 55 | if os.path.isdir(path): 56 | fname = os.path.join(path, url.split('/')[-1]) 57 | else: 58 | fname = path 59 | 60 | if overwrite or not os.path.exists(fname) or (sha1_hash and not check_sha1(fname, sha1_hash)): 61 | dirname = os.path.dirname(os.path.abspath(os.path.expanduser(fname))) 62 | if not os.path.exists(dirname): 63 | os.makedirs(dirname) 64 | 65 | print('Downloading %s from %s...'%(fname, url)) 66 | r = requests.get(url, stream=True) 67 | if r.status_code != 200: 68 | raise RuntimeError("Failed downloading url %s"%url) 69 | total_length = r.headers.get('content-length') 70 | with open(fname, 'wb') as f: 71 | if total_length is None: # no content length header 72 | for chunk in r.iter_content(chunk_size=1024): 73 | if chunk: # filter out keep-alive new chunks 74 | f.write(chunk) 75 | else: 76 | total_length = int(total_length) 77 | for chunk in tqdm(r.iter_content(chunk_size=1024), 78 | total=int(total_length / 1024. + 0.5), 79 | unit='KB', unit_scale=False, dynamic_ncols=True): 80 | f.write(chunk) 81 | 82 | if sha1_hash and not check_sha1(fname, sha1_hash): 83 | raise UserWarning('File {} is downloaded but the content hash does not match. ' \ 84 | 'The repo may be outdated or download may be incomplete. ' \ 85 | 'If the "repo_url" is overridden, consider switching to ' \ 86 | 'the default repo.'.format(fname)) 87 | 88 | return fname -------------------------------------------------------------------------------- /utils/logger.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. 2 | import logging 3 | import os 4 | import sys 5 | 6 | __all__ = ['SetupLogger'] 7 | 8 | # reference from: https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/utils/logger.py 9 | def SetupLogger(name, save_dir, distributed_rank, filename="log.txt", mode='w'): 10 | logger = logging.getLogger(name) 11 | logger.setLevel(logging.DEBUG) 12 | # don't log results for the non-master process 13 | if distributed_rank > 0: 14 | return logger 15 | ch = logging.StreamHandler(stream=sys.stdout) 16 | ch.setLevel(logging.DEBUG) 17 | formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s: %(message)s") 18 | ch.setFormatter(formatter) 19 | logger.addHandler(ch) 20 | 21 | if save_dir: 22 | if not os.path.exists(save_dir): 23 | os.makedirs(save_dir) 24 | fh = logging.FileHandler(os.path.join(save_dir, filename), mode=mode) # 'a+' for add, 'w' for overwrite 25 | fh.setLevel(logging.DEBUG) 26 | fh.setFormatter(formatter) 27 | logger.addHandler(fh) 28 | 29 | return logger 30 | -------------------------------------------------------------------------------- /utils/loss.py: -------------------------------------------------------------------------------- 1 | """Custom losses.""" 2 | import torch 3 | import torch.nn as nn 4 | import torch.nn.functional as F 5 | 6 | from torch.autograd import Variable 7 | 8 | __all__ = ['ICNetLoss'] 9 | 10 | # TODO: optim function 11 | class ICNetLoss(nn.CrossEntropyLoss): 12 | """Cross Entropy Loss for ICNet""" 13 | 14 | def __init__(self, aux_weight=0.4, ignore_index=-1): 15 | super(ICNetLoss, self).__init__(ignore_index=ignore_index) 16 | self.aux_weight = aux_weight 17 | 18 | def forward(self, *inputs): 19 | preds, target = tuple(inputs) 20 | inputs = tuple(list(preds) + [target]) 21 | 22 | pred, pred_sub4, pred_sub8, pred_sub16, target = tuple(inputs) 23 | # [batch, H, W] -> [batch, 1, H, W] 24 | target = target.unsqueeze(1).float() 25 | target_sub4 = F.interpolate(target, pred_sub4.size()[2:], mode='bilinear', align_corners=True).squeeze(1).long() 26 | target_sub8 = F.interpolate(target, pred_sub8.size()[2:], mode='bilinear', align_corners=True).squeeze(1).long() 27 | target_sub16 = F.interpolate(target, pred_sub16.size()[2:], mode='bilinear', align_corners=True).squeeze( 28 | 1).long() 29 | loss1 = super(ICNetLoss, self).forward(pred_sub4, target_sub4) 30 | loss2 = super(ICNetLoss, self).forward(pred_sub8, target_sub8) 31 | loss3 = super(ICNetLoss, self).forward(pred_sub16, target_sub16) 32 | #return dict(loss=loss1 + loss2 * self.aux_weight + loss3 * self.aux_weight) 33 | return loss1 + loss2 * self.aux_weight + loss3 * self.aux_weight 34 | 35 | -------------------------------------------------------------------------------- /utils/lr_scheduler.py: -------------------------------------------------------------------------------- 1 | """Popular Learning Rate Schedulers""" 2 | from __future__ import division 3 | import math 4 | import torch 5 | 6 | from bisect import bisect_right 7 | 8 | __all__ = ['IterationPolyLR'] 9 | 10 | class IterationPolyLR(torch.optim.lr_scheduler._LRScheduler): 11 | def __init__(self, optimizer, target_lr=0, max_iters=0, power=0.9, last_epoch=-1): 12 | self.target_lr = target_lr 13 | self.max_iters = max_iters 14 | self.power = power 15 | super(IterationPolyLR, self).__init__(optimizer, last_epoch) 16 | 17 | def get_lr(self): 18 | N = self.max_iters 19 | T = self.last_epoch 20 | factor = pow(1 - T / N, self.power) 21 | # https://blog.csdn.net/mieleizhi0522/article/details/83113824 22 | return [self.target_lr + (base_lr - self.target_lr) * factor for base_lr in self.base_lrs] 23 | 24 | -------------------------------------------------------------------------------- /utils/metric.py: -------------------------------------------------------------------------------- 1 | """Evaluation Metrics for Semantic Segmentation""" 2 | import torch 3 | import numpy as np 4 | 5 | __all__ = ['SegmentationMetric', 'batch_pix_accuracy', 'batch_intersection_union', 6 | 'pixelAccuracy', 'intersectionAndUnion', 'hist_info', 'compute_score'] 7 | 8 | 9 | class SegmentationMetric(object): 10 | """Computes pixAcc and mIoU metric scores 11 | """ 12 | 13 | def __init__(self, nclass): 14 | super(SegmentationMetric, self).__init__() 15 | self.nclass = nclass 16 | self.reset() 17 | 18 | def update(self, preds, labels): 19 | """Updates the internal evaluation result. 20 | 21 | Parameters 22 | ---------- 23 | labels : 'NumpyArray' or list of `NumpyArray` 24 | The labels of the data. 25 | preds : 'NumpyArray' or list of `NumpyArray` 26 | Predicted values. 27 | """ 28 | 29 | def evaluate_worker(self, pred, label): 30 | correct, labeled = batch_pix_accuracy(pred, label) 31 | inter, union = batch_intersection_union(pred, label, self.nclass) 32 | 33 | self.total_correct += correct 34 | self.total_label += labeled 35 | if self.total_inter.device != inter.device: 36 | self.total_inter = self.total_inter.to(inter.device) 37 | self.total_union = self.total_union.to(union.device) 38 | self.total_inter += inter 39 | self.total_union += union 40 | 41 | if isinstance(preds, torch.Tensor): 42 | evaluate_worker(self, preds, labels) 43 | elif isinstance(preds, (list, tuple)): 44 | for (pred, label) in zip(preds, labels): 45 | evaluate_worker(self, pred, label) 46 | 47 | def get(self): 48 | """Gets the current evaluation result. 49 | 50 | Returns 51 | ------- 52 | metrics : tuple of float 53 | pixAcc and mIoU 54 | """ 55 | pixAcc = 1.0 * self.total_correct / (2.220446049250313e-16 + self.total_label) # remove np.spacing(1) 56 | IoU = 1.0 * self.total_inter / (2.220446049250313e-16 + self.total_union) 57 | mIoU = IoU.mean().item() 58 | return pixAcc, mIoU 59 | 60 | def reset(self): 61 | """Resets the internal evaluation result to initial state.""" 62 | self.total_inter = torch.zeros(self.nclass) 63 | self.total_union = torch.zeros(self.nclass) 64 | self.total_correct = 0 65 | self.total_label = 0 66 | 67 | 68 | # pytorch version 69 | def batch_pix_accuracy(output, target): 70 | """PixAcc""" 71 | # inputs are numpy array, output 4D, target 3D 72 | predict = torch.argmax(output.long(), 1) + 1 73 | target = target.long() + 1 74 | 75 | pixel_labeled = torch.sum(target > 0).item() 76 | try: 77 | pixel_correct = torch.sum((predict == target) * (target > 0)).item() 78 | except: 79 | print("predict size: {}, target size: {}, ".format(predict.size(), target.size())) 80 | assert pixel_correct <= pixel_labeled, "Correct area should be smaller than Labeled" 81 | return pixel_correct, pixel_labeled 82 | 83 | 84 | def batch_intersection_union(output, target, nclass): 85 | """mIoU""" 86 | # inputs are numpy array, output 4D, target 3D 87 | mini = 1 88 | maxi = nclass 89 | nbins = nclass 90 | predict = torch.argmax(output, 1) + 1 # [N,H,W] 91 | target = target.float() + 1 # [N,H,W] 92 | 93 | predict = predict.float() * (target > 0).float() 94 | intersection = predict * (predict == target).float() 95 | # areas of intersection and union 96 | # element 0 in intersection occur the main difference from np.bincount. set boundary to -1 is necessary. 97 | area_inter = torch.histc(intersection.cpu(), bins=nbins, min=mini, max=maxi) 98 | area_pred = torch.histc(predict.cpu(), bins=nbins, min=mini, max=maxi) 99 | area_lab = torch.histc(target.cpu(), bins=nbins, min=mini, max=maxi) 100 | area_union = area_pred + area_lab - area_inter 101 | assert torch.sum(area_inter > area_union).item() == 0, "Intersection area should be smaller than Union area" 102 | return area_inter.float(), area_union.float() 103 | 104 | 105 | def pixelAccuracy(imPred, imLab): 106 | """ 107 | This function takes the prediction and label of a single image, returns pixel-wise accuracy 108 | To compute over many images do: 109 | for i = range(Nimages): 110 | (pixel_accuracy[i], pixel_correct[i], pixel_labeled[i]) = \ 111 | pixelAccuracy(imPred[i], imLab[i]) 112 | mean_pixel_accuracy = 1.0 * np.sum(pixel_correct) / (np.spacing(1) + np.sum(pixel_labeled)) 113 | """ 114 | # Remove classes from unlabeled pixels in gt image. 115 | # We should not penalize detections in unlabeled portions of the image. 116 | pixel_labeled = np.sum(imLab >= 0) 117 | pixel_correct = np.sum((imPred == imLab) * (imLab >= 0)) 118 | pixel_accuracy = 1.0 * pixel_correct / pixel_labeled 119 | return (pixel_accuracy, pixel_correct, pixel_labeled) 120 | 121 | 122 | def intersectionAndUnion(imPred, imLab, numClass): 123 | """ 124 | This function takes the prediction and label of a single image, 125 | returns intersection and union areas for each class 126 | To compute over many images do: 127 | for i in range(Nimages): 128 | (area_intersection[:,i], area_union[:,i]) = intersectionAndUnion(imPred[i], imLab[i]) 129 | IoU = 1.0 * np.sum(area_intersection, axis=1) / np.sum(np.spacing(1)+area_union, axis=1) 130 | """ 131 | # Remove classes from unlabeled pixels in gt image. 132 | # We should not penalize detections in unlabeled portions of the image. 133 | imPred = imPred * (imLab >= 0) 134 | 135 | # Compute area intersection: 136 | intersection = imPred * (imPred == imLab) 137 | (area_intersection, _) = np.histogram(intersection, bins=numClass, range=(1, numClass)) 138 | 139 | # Compute area union: 140 | (area_pred, _) = np.histogram(imPred, bins=numClass, range=(1, numClass)) 141 | (area_lab, _) = np.histogram(imLab, bins=numClass, range=(1, numClass)) 142 | area_union = area_pred + area_lab - area_intersection 143 | return (area_intersection, area_union) 144 | 145 | 146 | def hist_info(pred, label, num_cls): 147 | assert pred.shape == label.shape 148 | k = (label >= 0) & (label < num_cls) 149 | labeled = np.sum(k) 150 | correct = np.sum((pred[k] == label[k])) 151 | 152 | return np.bincount(num_cls * label[k].astype(int) + pred[k], minlength=num_cls ** 2).reshape(num_cls, 153 | num_cls), labeled, correct 154 | 155 | 156 | def compute_score(hist, correct, labeled): 157 | iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist)) 158 | mean_IU = np.nanmean(iu) 159 | mean_IU_no_back = np.nanmean(iu[1:]) 160 | freq = hist.sum(1) / hist.sum() 161 | freq_IU = (iu[freq > 0] * freq[freq > 0]).sum() 162 | mean_pixel_acc = correct / labeled 163 | 164 | return iu, mean_IU, mean_IU_no_back, mean_pixel_acc 165 | -------------------------------------------------------------------------------- /utils/visualize.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | from PIL import Image 4 | 5 | __all__ = ['get_color_pallete', 'print_iou', 'set_img_color', 6 | 'show_prediction', 'show_colorful_images', 'save_colorful_images'] 7 | 8 | 9 | def print_iou(iu, mean_pixel_acc, class_names=None, show_no_back=False): 10 | n = iu.size 11 | lines = [] 12 | for i in range(n): 13 | if class_names is None: 14 | cls = 'Class %d:' % (i + 1) 15 | else: 16 | cls = '%d %s' % (i + 1, class_names[i]) 17 | # lines.append('%-8s: %.3f%%' % (cls, iu[i] * 100)) 18 | mean_IU = np.nanmean(iu) 19 | mean_IU_no_back = np.nanmean(iu[1:]) 20 | if show_no_back: 21 | lines.append('mean_IU: %.3f%% || mean_IU_no_back: %.3f%% || mean_pixel_acc: %.3f%%' % ( 22 | mean_IU * 100, mean_IU_no_back * 100, mean_pixel_acc * 100)) 23 | else: 24 | lines.append('mean_IU: %.3f%% || mean_pixel_acc: %.3f%%' % (mean_IU * 100, mean_pixel_acc * 100)) 25 | lines.append('=================================================') 26 | line = "\n".join(lines) 27 | 28 | print(line) 29 | 30 | 31 | def set_img_color(img, label, colors, background=0, show255=False): 32 | for i in range(len(colors)): 33 | if i != background: 34 | img[np.where(label == i)] = colors[i] 35 | if show255: 36 | img[np.where(label == 255)] = 255 37 | 38 | return img 39 | 40 | 41 | def show_prediction(img, pred, colors, background=0): 42 | im = np.array(img, np.uint8) 43 | set_img_color(im, pred, colors, background) 44 | out = np.array(im) 45 | 46 | return out 47 | 48 | 49 | def show_colorful_images(prediction, palettes): 50 | im = Image.fromarray(palettes[prediction.astype('uint8').squeeze()]) 51 | im.show() 52 | 53 | 54 | def save_colorful_images(prediction, filename, output_dir, palettes): 55 | ''' 56 | :param prediction: [B, H, W, C] 57 | ''' 58 | im = Image.fromarray(palettes[prediction.astype('uint8').squeeze()]) 59 | fn = os.path.join(output_dir, filename) 60 | out_dir = os.path.split(fn)[0] 61 | if not os.path.exists(out_dir): 62 | os.mkdir(out_dir) 63 | im.save(fn) 64 | 65 | 66 | def get_color_pallete(npimg, dataset='pascal_voc'): 67 | """Visualize image. 68 | 69 | Parameters 70 | ---------- 71 | npimg : numpy.ndarray 72 | Single channel image with shape `H, W, 1`. 73 | dataset : str, default: 'pascal_voc' 74 | The dataset that model pretrained on. ('pascal_voc', 'ade20k') 75 | Returns 76 | ------- 77 | out_img : PIL.Image 78 | Image with color pallete 79 | """ 80 | # recovery boundary 81 | if dataset in ('pascal_voc', 'pascal_aug'): 82 | npimg[npimg == -1] = 255 83 | # put colormap 84 | if dataset == 'ade20k': 85 | npimg = npimg + 1 86 | out_img = Image.fromarray(npimg.astype('uint8')) 87 | out_img.putpalette(adepallete) 88 | return out_img 89 | elif dataset == 'citys': 90 | out_img = Image.fromarray(npimg.astype('uint8')) 91 | out_img.putpalette(cityspallete) 92 | return out_img 93 | out_img = Image.fromarray(npimg.astype('uint8')) 94 | out_img.putpalette(vocpallete) 95 | return out_img 96 | 97 | 98 | def _getvocpallete(num_cls): 99 | n = num_cls 100 | pallete = [0] * (n * 3) 101 | for j in range(0, n): 102 | lab = j 103 | pallete[j * 3 + 0] = 0 104 | pallete[j * 3 + 1] = 0 105 | pallete[j * 3 + 2] = 0 106 | i = 0 107 | while (lab > 0): 108 | pallete[j * 3 + 0] |= (((lab >> 0) & 1) << (7 - i)) 109 | pallete[j * 3 + 1] |= (((lab >> 1) & 1) << (7 - i)) 110 | pallete[j * 3 + 2] |= (((lab >> 2) & 1) << (7 - i)) 111 | i = i + 1 112 | lab >>= 3 113 | return pallete 114 | 115 | 116 | vocpallete = _getvocpallete(256) 117 | 118 | adepallete = [ 119 | 0, 0, 0, 120, 120, 120, 180, 120, 120, 6, 230, 230, 80, 50, 50, 4, 200, 3, 120, 120, 80, 140, 140, 140, 204, 120 | 5, 255, 230, 230, 230, 4, 250, 7, 224, 5, 255, 235, 255, 7, 150, 5, 61, 120, 120, 70, 8, 255, 51, 255, 6, 82, 121 | 143, 255, 140, 204, 255, 4, 255, 51, 7, 204, 70, 3, 0, 102, 200, 61, 230, 250, 255, 6, 51, 11, 102, 255, 255, 122 | 7, 71, 255, 9, 224, 9, 7, 230, 220, 220, 220, 255, 9, 92, 112, 9, 255, 8, 255, 214, 7, 255, 224, 255, 184, 6, 123 | 10, 255, 71, 255, 41, 10, 7, 255, 255, 224, 255, 8, 102, 8, 255, 255, 61, 6, 255, 194, 7, 255, 122, 8, 0, 255, 124 | 20, 255, 8, 41, 255, 5, 153, 6, 51, 255, 235, 12, 255, 160, 150, 20, 0, 163, 255, 140, 140, 140, 250, 10, 15, 125 | 20, 255, 0, 31, 255, 0, 255, 31, 0, 255, 224, 0, 153, 255, 0, 0, 0, 255, 255, 71, 0, 0, 235, 255, 0, 173, 255, 126 | 31, 0, 255, 11, 200, 200, 255, 82, 0, 0, 255, 245, 0, 61, 255, 0, 255, 112, 0, 255, 133, 255, 0, 0, 255, 163, 127 | 0, 255, 102, 0, 194, 255, 0, 0, 143, 255, 51, 255, 0, 0, 82, 255, 0, 255, 41, 0, 255, 173, 10, 0, 255, 173, 255, 128 | 0, 0, 255, 153, 255, 92, 0, 255, 0, 255, 255, 0, 245, 255, 0, 102, 255, 173, 0, 255, 0, 20, 255, 184, 184, 0, 129 | 31, 255, 0, 255, 61, 0, 71, 255, 255, 0, 204, 0, 255, 194, 0, 255, 82, 0, 10, 255, 0, 112, 255, 51, 0, 255, 0, 130 | 194, 255, 0, 122, 255, 0, 255, 163, 255, 153, 0, 0, 255, 10, 255, 112, 0, 143, 255, 0, 82, 0, 255, 163, 255, 131 | 0, 255, 235, 0, 8, 184, 170, 133, 0, 255, 0, 255, 92, 184, 0, 255, 255, 0, 31, 0, 184, 255, 0, 214, 255, 255, 132 | 0, 112, 92, 255, 0, 0, 224, 255, 112, 224, 255, 70, 184, 160, 163, 0, 255, 153, 0, 255, 71, 255, 0, 255, 0, 133 | 163, 255, 204, 0, 255, 0, 143, 0, 255, 235, 133, 255, 0, 255, 0, 235, 245, 0, 255, 255, 0, 122, 255, 245, 0, 134 | 10, 190, 212, 214, 255, 0, 0, 204, 255, 20, 0, 255, 255, 255, 0, 0, 153, 255, 0, 41, 255, 0, 255, 204, 41, 0, 135 | 255, 41, 255, 0, 173, 0, 255, 0, 245, 255, 71, 0, 255, 122, 0, 255, 0, 255, 184, 0, 92, 255, 184, 255, 0, 0, 136 | 133, 255, 255, 214, 0, 25, 194, 194, 102, 255, 0, 92, 0, 255] 137 | 138 | cityspallete = [ 139 | 128, 64, 128, 140 | 244, 35, 232, 141 | 70, 70, 70, 142 | 102, 102, 156, 143 | 190, 153, 153, 144 | 153, 153, 153, 145 | 250, 170, 30, 146 | 220, 220, 0, 147 | 107, 142, 35, 148 | 152, 251, 152, 149 | 0, 130, 180, 150 | 220, 20, 60, 151 | 255, 0, 0, 152 | 0, 0, 142, 153 | 0, 0, 70, 154 | 0, 60, 100, 155 | 0, 80, 100, 156 | 0, 0, 230, 157 | 119, 11, 32, 158 | ] 159 | --------------------------------------------------------------------------------