├── .idea
└── vcs.xml
├── LICENSE
├── README.md
├── albu-solution
├── Dockerfile
├── README.md
├── docker-build.sh
├── docker-remove.sh
├── docker-run.sh
├── docker-stop.sh
├── requirements.txt
├── src
│ ├── README.md
│ ├── __init__.py
│ ├── augmentations
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-36.pyc
│ │ │ ├── composition.cpython-36.pyc
│ │ │ ├── functional.cpython-36.pyc
│ │ │ └── transforms.cpython-36.pyc
│ │ ├── composition.py
│ │ ├── functional.py
│ │ └── transforms.py
│ ├── config.py
│ ├── create_spacenet_masks.py
│ ├── dataset
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-35.pyc
│ │ │ ├── __init__.cpython-36.pyc
│ │ │ ├── abstract_image_provider.cpython-35.pyc
│ │ │ ├── abstract_image_provider.cpython-36.pyc
│ │ │ ├── abstract_image_type.cpython-35.pyc
│ │ │ ├── abstract_image_type.cpython-36.pyc
│ │ │ ├── image_cropper.cpython-35.pyc
│ │ │ ├── image_cropper.cpython-36.pyc
│ │ │ ├── neural_dataset.cpython-35.pyc
│ │ │ ├── neural_dataset.cpython-36.pyc
│ │ │ ├── raw_image.cpython-35.pyc
│ │ │ ├── raw_image.cpython-36.pyc
│ │ │ ├── reading_image_provider.cpython-35.pyc
│ │ │ └── reading_image_provider.cpython-36.pyc
│ │ ├── abstract_image_provider.py
│ │ ├── abstract_image_type.py
│ │ ├── image_cropper.py
│ │ ├── neural_dataset.py
│ │ ├── raw_image.py
│ │ └── reading_image_provider.py
│ ├── folds4.csv
│ ├── merge_preds.py
│ ├── other_tools
│ │ ├── __init__.py
│ │ ├── all_dems_min_max.py
│ │ ├── apls_tools.py
│ │ ├── gen_folds.py
│ │ ├── make_submission.py
│ │ ├── merge_preds.py
│ │ ├── out.txt
│ │ └── sknw.py
│ ├── pytorch_utils
│ │ ├── __init__.py
│ │ ├── callbacks.py
│ │ ├── concrete_eval.py
│ │ ├── eval.py
│ │ ├── loss.py
│ │ ├── train.py
│ │ └── transforms.py
│ ├── pytorch_zoo
│ │ ├── __init__.py
│ │ ├── abstract_model.py
│ │ ├── inception.py
│ │ ├── resnet.py
│ │ └── unet.py
│ ├── resnet34_512_02_02.json
│ ├── run_training.sh
│ ├── skeleton.py
│ ├── train.sh
│ ├── train_4folds.sh
│ ├── train_eval.py
│ └── utils.py
├── test.sh
├── train.sh
├── train_single.sh
└── weights
│ └── downloadModel.sh
├── cannab-solution
├── Dockerfile
├── README.md
├── create_masks.py
├── create_submission.py
├── docker-build.sh
├── docker-remove.sh
├── docker-run.sh
├── docker-stop.sh
├── docker_cannab.zip
├── download_models.sh
├── inception_resnet_v2_padding_same.py
├── inception_v3_padding_same.py
├── linknet.py
├── models.py
├── predict_inception_520.py
├── predict_inception_small.py
├── predict_inception_smallest.py
├── predict_inception_v3_520.py
├── predict_linknet.py
├── predict_linknet_520.py
├── predict_linknet_small.py
├── predict_resnet_small.py
├── predict_resnet_smallest.py
├── predict_vgg.py
├── predict_vgg_small.py
├── predict_vgg_smallest.py
├── resnet50_padding_same.py
├── test.sh
├── train.sh
├── train_fix.sh
├── train_inc_v2_unet_520.py
├── train_inception3_unet_520.py
├── train_inception_city_small.py
├── train_inception_unet_smallest.py
├── train_inception_unet_smallest_fixed.py
├── train_linknet_520.py
├── train_linknet_city_big.py
├── train_linknet_city_small.py
├── train_patch_cannab.zip
├── train_resnet_linknet_city_small.py
├── train_resnet_unet_smallest.py
├── train_vgg.py
├── train_vgg2_city_small.py
├── train_vgg_unet_smallest.py
├── tune_vgg_city.py
└── tune_vgg_city_fixed.py
├── fbastani-solution
├── 1_convertgraphs.go
├── 2_truth_tiles.go
├── Dockerfile
├── README.md
├── common
│ ├── bresenham.go
│ ├── bresenham_test.go
│ ├── geom.go
│ ├── geom_test.go
│ ├── graph.go
│ ├── graph_algo.go
│ ├── graph_index.go
│ ├── graph_index_test.go
│ ├── graph_read.go
│ ├── graph_rtree.go
│ ├── graph_test.go
│ ├── kde.go
│ ├── osm.go
│ ├── svg.go
│ ├── trace.go
│ ├── trace_io.go
│ ├── viterbi.go
│ └── viterbi2.go
├── do_the_training.py
├── download_models.sh
├── graphextract
│ ├── __init__.py
│ ├── bounding_boxes
│ │ └── spacenet.txt
│ ├── clean.py
│ ├── discoverlib
│ │ ├── __init__.py
│ │ ├── geom.py
│ │ └── graph.py
│ ├── graph_extract_good.py
│ ├── map2go.py
│ ├── pylibs
│ │ ├── __init__.py
│ │ ├── mathfunclib.py
│ │ ├── spatialfunclib.py
│ │ └── spatialfunclib_accel.pyx
│ ├── skeleton.py
│ └── subiterations.pyx
├── model4u.py
├── model4u_big.py
├── prep.sh
├── run_lib.py
├── run_test.py
├── run_train.py
├── skeleton.py
├── test.sh
└── train.sh
├── pfr-solution
├── Dockerfile
├── README.md
├── code
│ ├── cleanup_directories.sh
│ ├── do_unpack.py
│ ├── predict.py
│ ├── provision.sh
│ ├── pytorch_dpn
│ │ ├── LICENSE
│ │ ├── README.md
│ │ ├── adaptive_avgmax_pool.py
│ │ ├── convert_from_mxnet.py
│ │ ├── dataset.py
│ │ ├── dpn.py
│ │ ├── inference.py
│ │ ├── model_factory.py
│ │ └── validate.py
│ ├── pytorch_utils.py
│ ├── rd.py
│ ├── setup_directories.sh
│ ├── train.py
│ └── vectorize.py
├── model
│ ├── download_models.sh
│ ├── model01.yaml
│ ├── model02.yaml
│ ├── model03.yaml
│ ├── model04.yaml
│ ├── model05.yaml
│ ├── model06.yaml
│ ├── model07.yaml
│ ├── model08.yaml
│ └── model09.yaml
├── param
│ └── adjust_rgb_v1.csv
├── test.sh
└── train.sh
└── selim_sef-solution
├── Dockerfile
├── README.md
├── calculate_stats.py
├── datasets
├── __init__.py
└── spacenet.py
├── docker-build.sh
├── docker-remove.sh
├── docker-run.sh
├── docker-stop.sh
├── download_models.sh
├── generate_submission.py
├── inceptionv3_padding.py
├── inceptionv3_padding_swish.py
├── linknet.py
├── losses.py
├── model_name_encoder.py
├── models.py
├── params.py
├── predict_all.py
├── preprocess_clahe.py
├── resnet50_padding.py
├── test.sh
├── tools
├── __init__.py
├── clr.py
├── metrics.py
├── mul_img_utils.py
├── stats.py
├── tiling.py
└── vectorize.py
├── train.py
├── train.sh
└── transormer.py
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright 2018 CosmiQ Works, An In-Q-Tel Lab
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ## SpaceNet Road Detection and Routing Challenge Solutions
2 | For more information about the SpaceNet Challenge visit the [SpaceNet Challenge Website](https://spacenetchallenge.github.io/)
3 |
4 | ### [About the Dataset](https://medium.com/the-downlinq/introducing-the-spacenet-road-detection-and-routing-challenge-and-dataset-7604de39b779)
5 | ### [The Average Path Length Similarity (APLS): Metric Part 1](https://medium.com/the-downlinq/spacenet-road-detection-and-routing-challenge-part-i-d4f59d55bfce)
6 | ### [The Average Path Length Similarity (APLS): Metric Part 2](https://medium.com/the-downlinq/spacenet-road-detection-and-routing-challenge-part-ii-apls-implementation-92acd86f4094)
7 |
8 |
9 | ### Competition Specific information:
10 | #### [SpaceNet Challenges hosted on TopCoder](http://crowdsourcing.topcoder.com/spacenet)
11 | #### [Topcoder Competition Page](https://community.topcoder.com/longcontest/?module=ViewProblemStatement&rd=17036&pm=14735)
12 |
13 | Results:
14 |
15 | **Place**|**Entrant**|**Country**|**Average Score**|**Las Vegas**|**Paris**|**Shanghai**|**Khartoum**
16 | :-----:|:-----:|:-----:|:-----:|:-----:|:-----:|:-----:|:-----:
17 | 1|albu|Russia|0.6663|0.7977|0.604|0.6543|0.6093
18 | 2|cannab|Russia|0.6661|0.7804|0.6446|0.6398|0.5996
19 | 3|pfr|France|0.666|0.8009|0.6008|0.6646|0.5975
20 | 4|selim\_sef|Germany|0.6567|0.7884|0.5991|0.6472|0.5922
21 | 5|fbastani|America|0.6284|0.771|0.5474|0.6326|0.5628
22 | 6|ipraznik|Germany|0.6215|0.7578|0.5668|0.6078|0.5537
23 | 7|tcghanareddy|India|0.6182|0.7591|0.571|0.6014|0.5415
24 | 8|hasan.asyari|Norway|0.6097|0.7407|0.5557|0.5952|0.5472
25 | 9|aveysov|Russia|0.5943|0.7426|0.5805|0.5751|0.4789
26 |
27 |
28 | Top 5 Solutions:
29 | 1. [Albu](https://github.com/SpaceNetChallenge/RoadDetector/tree/master/albu-solution/)
30 |
31 | 2. (tie)[cannab](https://github.com/SpaceNetChallenge/RoadDetector/tree/master/cannab-solution/)
32 |
33 | 3. (Tie)[pfr](https://github.com/SpaceNetChallenge/BuildingDetectors_Round2/tree/master/pfr-solution)
34 |
35 | 4. [selim_sef](https://github.com/SpaceNetChallenge/BuildingDetectors_Round2/tree/master/selim_sef-solution)
36 |
37 | 5. [fbastani](https://github.com/SpaceNetChallenge/BuildingDetectors_Round2/tree/master/fabastani-solution)
38 |
39 |
--------------------------------------------------------------------------------
/albu-solution/Dockerfile:
--------------------------------------------------------------------------------
1 | # run only on GPU instance and with --ipc=host option
2 | FROM nvidia/cuda:8.0-cudnn6-devel-ubuntu16.04
3 |
4 | RUN apt-get update --fix-missing && apt-get install -y wget libglib2.0 libsm-dev libxrender-dev libjpeg-dev vim tmux libopenblas-dev libxext-dev
5 | ENV PATH "/miniconda/bin:$PATH"
6 | ENV VERSION 4.3.30
7 | RUN wget https://repo.continuum.io/miniconda/Miniconda3-${VERSION}-Linux-x86_64.sh
8 | RUN chmod +x Miniconda3-${VERSION}-Linux-x86_64.sh
9 | RUN ./Miniconda3-${VERSION}-Linux-x86_64.sh -b -f -p /miniconda
10 | RUN mkdir -p /root/.torch/models
11 | RUN wget https://download.pytorch.org/models/resnet34-333f7ec4.pth -P /root/.torch/models
12 | RUN wget http://download.pytorch.org/whl/cu80/torch-0.3.0.post4-cp36-cp36m-linux_x86_64.whl
13 | RUN conda install -y GDAL
14 | RUN conda install -y shapely
15 | RUN conda install -y conda=4.4.7
16 | RUN conda install -c conda-forge -y osmnx
17 | RUN pip install torch-0.3.0.post4-cp36-cp36m-linux_x86_64.whl
18 | ADD requirements.txt requirements.txt
19 | RUN pip install -r requirements.txt
20 | ENV LD_LIBRARY_PATH /miniconda/lib:${LD_LIBRARY_PATH}
21 | RUN apt install -y libgl1-mesa-glx
22 | ADD weights /results/weights/
23 |
24 | ADD ["train.sh", "test.sh", "train_single.sh", "/"]
25 | ADD src /opt/app/src/
26 |
--------------------------------------------------------------------------------
/albu-solution/docker-build.sh:
--------------------------------------------------------------------------------
1 | nvidia-docker build -t albu .
2 |
--------------------------------------------------------------------------------
/albu-solution/docker-remove.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | docker images -q --filter "dangling=true" | xargs docker rmi
4 |
--------------------------------------------------------------------------------
/albu-solution/docker-run.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | nvidia-docker run -v /mnt/disk2/roads/data:/data:ro -v /mnt/disk2/roads/wdata:/wdata --rm -ti --ipc=host albu
4 |
--------------------------------------------------------------------------------
/albu-solution/docker-stop.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | docker stop $(docker ps -a -q)
4 | docker rm $(docker ps -a -q)
--------------------------------------------------------------------------------
/albu-solution/requirements.txt:
--------------------------------------------------------------------------------
1 | cython
2 | numpy==1.13.1
3 | opencv-python
4 | pyproj
5 | matplotlib
6 | networkx==1.11
7 | fiona
8 | scikit-image==0.13.1
9 | scikit-learn
10 | scipy==1.0.0
11 | geopandas
12 | tqdm
13 | torchvision
14 | utm
15 | rtree
16 | tensorboardX
17 | numba==0.36.2
18 | pandas==0.21.0
19 |
--------------------------------------------------------------------------------
/albu-solution/src/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SpaceNetChallenge/RoadDetector/0a7391f546ab20c873dc6744920deef22c21ef3e/albu-solution/src/README.md
--------------------------------------------------------------------------------
/albu-solution/src/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SpaceNetChallenge/RoadDetector/0a7391f546ab20c873dc6744920deef22c21ef3e/albu-solution/src/__init__.py
--------------------------------------------------------------------------------
/albu-solution/src/augmentations/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SpaceNetChallenge/RoadDetector/0a7391f546ab20c873dc6744920deef22c21ef3e/albu-solution/src/augmentations/__init__.py
--------------------------------------------------------------------------------
/albu-solution/src/augmentations/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SpaceNetChallenge/RoadDetector/0a7391f546ab20c873dc6744920deef22c21ef3e/albu-solution/src/augmentations/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/albu-solution/src/augmentations/__pycache__/composition.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SpaceNetChallenge/RoadDetector/0a7391f546ab20c873dc6744920deef22c21ef3e/albu-solution/src/augmentations/__pycache__/composition.cpython-36.pyc
--------------------------------------------------------------------------------
/albu-solution/src/augmentations/__pycache__/functional.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SpaceNetChallenge/RoadDetector/0a7391f546ab20c873dc6744920deef22c21ef3e/albu-solution/src/augmentations/__pycache__/functional.cpython-36.pyc
--------------------------------------------------------------------------------
/albu-solution/src/augmentations/__pycache__/transforms.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SpaceNetChallenge/RoadDetector/0a7391f546ab20c873dc6744920deef22c21ef3e/albu-solution/src/augmentations/__pycache__/transforms.cpython-36.pyc
--------------------------------------------------------------------------------
/albu-solution/src/augmentations/composition.py:
--------------------------------------------------------------------------------
1 | import random
2 |
3 |
4 | class Compose:
5 | """
6 | compose transforms from list to apply them sequentially
7 | """
8 | def __init__(self, transforms):
9 | self.transforms = [t for t in transforms if t is not None]
10 |
11 | def __call__(self, **data):
12 | for t in self.transforms:
13 | data = t(**data)
14 | return data
15 |
16 |
17 | class OneOf:
18 | """
19 | with probability prob choose one transform from list and apply it
20 | """
21 | def __init__(self, transforms, prob=.5):
22 | self.transforms = transforms
23 | self.prob = prob
24 |
25 | def __call__(self, **data):
26 | if random.random() < self.prob:
27 | t = random.choice(self.transforms)
28 | t.prob = 1.
29 | data = t(**data)
30 | return data
31 |
--------------------------------------------------------------------------------
/albu-solution/src/config.py:
--------------------------------------------------------------------------------
1 | from collections import namedtuple
2 |
3 | Config = namedtuple("Config", [
4 | "dataset_path",
5 | "iter_size",
6 | "folder",
7 | "target_rows",
8 | "target_cols",
9 | "num_channels",
10 | "network",
11 | "loss",
12 | "optimizer",
13 | "lr",
14 | "lr_steps",
15 | "lr_gamma",
16 | "batch_size",
17 | "epoch_size",
18 | "nb_epoch",
19 | "predict_batch_size",
20 | "test_pad",
21 | "results_dir",
22 | "num_classes",
23 | "warmup",
24 | "ignore_target_size"
25 | ])
26 |
27 |
28 |
--------------------------------------------------------------------------------
/albu-solution/src/dataset/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SpaceNetChallenge/RoadDetector/0a7391f546ab20c873dc6744920deef22c21ef3e/albu-solution/src/dataset/__init__.py
--------------------------------------------------------------------------------
/albu-solution/src/dataset/__pycache__/__init__.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SpaceNetChallenge/RoadDetector/0a7391f546ab20c873dc6744920deef22c21ef3e/albu-solution/src/dataset/__pycache__/__init__.cpython-35.pyc
--------------------------------------------------------------------------------
/albu-solution/src/dataset/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SpaceNetChallenge/RoadDetector/0a7391f546ab20c873dc6744920deef22c21ef3e/albu-solution/src/dataset/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/albu-solution/src/dataset/__pycache__/abstract_image_provider.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SpaceNetChallenge/RoadDetector/0a7391f546ab20c873dc6744920deef22c21ef3e/albu-solution/src/dataset/__pycache__/abstract_image_provider.cpython-35.pyc
--------------------------------------------------------------------------------
/albu-solution/src/dataset/__pycache__/abstract_image_provider.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SpaceNetChallenge/RoadDetector/0a7391f546ab20c873dc6744920deef22c21ef3e/albu-solution/src/dataset/__pycache__/abstract_image_provider.cpython-36.pyc
--------------------------------------------------------------------------------
/albu-solution/src/dataset/__pycache__/abstract_image_type.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SpaceNetChallenge/RoadDetector/0a7391f546ab20c873dc6744920deef22c21ef3e/albu-solution/src/dataset/__pycache__/abstract_image_type.cpython-35.pyc
--------------------------------------------------------------------------------
/albu-solution/src/dataset/__pycache__/abstract_image_type.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SpaceNetChallenge/RoadDetector/0a7391f546ab20c873dc6744920deef22c21ef3e/albu-solution/src/dataset/__pycache__/abstract_image_type.cpython-36.pyc
--------------------------------------------------------------------------------
/albu-solution/src/dataset/__pycache__/image_cropper.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SpaceNetChallenge/RoadDetector/0a7391f546ab20c873dc6744920deef22c21ef3e/albu-solution/src/dataset/__pycache__/image_cropper.cpython-35.pyc
--------------------------------------------------------------------------------
/albu-solution/src/dataset/__pycache__/image_cropper.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SpaceNetChallenge/RoadDetector/0a7391f546ab20c873dc6744920deef22c21ef3e/albu-solution/src/dataset/__pycache__/image_cropper.cpython-36.pyc
--------------------------------------------------------------------------------
/albu-solution/src/dataset/__pycache__/neural_dataset.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SpaceNetChallenge/RoadDetector/0a7391f546ab20c873dc6744920deef22c21ef3e/albu-solution/src/dataset/__pycache__/neural_dataset.cpython-35.pyc
--------------------------------------------------------------------------------
/albu-solution/src/dataset/__pycache__/neural_dataset.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SpaceNetChallenge/RoadDetector/0a7391f546ab20c873dc6744920deef22c21ef3e/albu-solution/src/dataset/__pycache__/neural_dataset.cpython-36.pyc
--------------------------------------------------------------------------------
/albu-solution/src/dataset/__pycache__/raw_image.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SpaceNetChallenge/RoadDetector/0a7391f546ab20c873dc6744920deef22c21ef3e/albu-solution/src/dataset/__pycache__/raw_image.cpython-35.pyc
--------------------------------------------------------------------------------
/albu-solution/src/dataset/__pycache__/raw_image.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SpaceNetChallenge/RoadDetector/0a7391f546ab20c873dc6744920deef22c21ef3e/albu-solution/src/dataset/__pycache__/raw_image.cpython-36.pyc
--------------------------------------------------------------------------------
/albu-solution/src/dataset/__pycache__/reading_image_provider.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SpaceNetChallenge/RoadDetector/0a7391f546ab20c873dc6744920deef22c21ef3e/albu-solution/src/dataset/__pycache__/reading_image_provider.cpython-35.pyc
--------------------------------------------------------------------------------
/albu-solution/src/dataset/__pycache__/reading_image_provider.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SpaceNetChallenge/RoadDetector/0a7391f546ab20c873dc6744920deef22c21ef3e/albu-solution/src/dataset/__pycache__/reading_image_provider.cpython-36.pyc
--------------------------------------------------------------------------------
/albu-solution/src/dataset/abstract_image_provider.py:
--------------------------------------------------------------------------------
1 | from .abstract_image_type import AbstractImageType
2 | from typing import Type, Dict, AnyStr, Callable
3 |
4 | class AbstractImageProvider:
5 | def __init__(self, image_type: Type[AbstractImageType], fn_mapping: Dict[AnyStr, Callable], has_alpha=False):
6 | self.image_type = image_type
7 | self.has_alpha = has_alpha
8 | self.fn_mapping = fn_mapping
9 |
10 | def __getitem__(self, item):
11 | raise NotImplementedError
12 |
13 | def __len__(self):
14 | raise NotImplementedError
15 |
16 |
17 |
--------------------------------------------------------------------------------
/albu-solution/src/dataset/abstract_image_type.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | cv2.setNumThreads(0)
3 | cv2.ocl.setUseOpenCL(False)
4 | import numpy as np
5 |
6 |
7 | class AlphaNotAvailableException(Exception):
8 | pass
9 |
10 | class AbstractImageType:
11 | """
12 | implement read_* methods in concrete image types. see raw_image for example
13 | """
14 | def __init__(self, paths, fn, fn_mapping, has_alpha=False):
15 | self.paths = paths
16 | self.fn = fn
17 | self.has_alpha = has_alpha
18 | self.fn_mapping = fn_mapping
19 | self.cache = {}
20 |
21 | @property
22 | def image(self):
23 | if 'image' not in self.cache:
24 | self.cache['image'] = self.read_image()
25 | return self.cache['image']
26 |
27 | @property
28 | def mask(self):
29 | if 'mask' not in self.cache:
30 | self.cache['mask'] = self.read_mask()
31 | return self.cache['mask']
32 |
33 | @property
34 | def alpha(self):
35 | if not self.has_alpha:
36 | raise AlphaNotAvailableException
37 | if 'alpha' not in self.cache:
38 | self.cache['alpha'] = self.read_alpha()
39 | return self.cache['alpha']
40 |
41 | def read_alpha(self):
42 | raise NotImplementedError
43 |
44 | def read_image(self):
45 | raise NotImplementedError
46 |
47 | def read_mask(self):
48 | raise NotImplementedError
49 |
50 | def reflect_border(self, image, b=12):
51 | return cv2.copyMakeBorder(image, b, b, b, b, cv2.BORDER_REFLECT)
52 |
53 | def pad_image(self, image, rows, cols):
54 | channels = image.shape[2] if len(image.shape) > 2 else None
55 | if image.shape[:2] != (rows, cols):
56 | empty_x = np.zeros((rows, cols, channels), dtype=image.dtype) if channels else np.zeros((rows, cols), dtype=image.dtype)
57 | empty_x[0:image.shape[0],0:image.shape[1],...] = image
58 | image = empty_x
59 | return image
60 |
61 | def finalyze(self, image):
62 | return self.reflect_border(image)
63 |
64 |
65 |
--------------------------------------------------------------------------------
/albu-solution/src/dataset/image_cropper.py:
--------------------------------------------------------------------------------
1 | import random
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 |
5 | class ImageCropper:
6 | """
7 | generates random or sequential crops of image
8 | """
9 | def __init__(self, img_rows, img_cols, target_rows, target_cols, pad):
10 | self.image_rows = img_rows
11 | self.image_cols = img_cols
12 | self.target_rows = target_rows
13 | self.target_cols = target_cols
14 | self.pad = pad
15 | self.use_crop = (img_rows != target_rows) or (img_cols != target_cols)
16 | self.starts_y = self.sequential_starts(axis=0) if self.use_crop else [0]
17 | self.starts_x = self.sequential_starts(axis=1) if self.use_crop else [0]
18 | self.positions = [(x, y) for x in self.starts_x for y in self.starts_y]
19 | # self.lock = threading.Lock()
20 |
21 | def random_crop_coords(self):
22 | x = random.randint(0, self.image_cols - self.target_cols)
23 | y = random.randint(0, self.image_rows - self.target_rows)
24 | return x, y
25 |
26 | def crop_image(self, image, x, y):
27 | return image[y: y+self.target_rows, x: x+self.target_cols,...] if self.use_crop else image
28 |
29 | def sequential_crops(self, img):
30 | for startx in self.starts_x:
31 | for starty in self.starts_y:
32 | yield self.crop_image(img, startx, starty)
33 |
34 | def sequential_starts(self, axis=0):
35 | """
36 | splits range uniformly to generate uniform image crops with minimal pad (intersection)
37 | """
38 | big_segment = self.image_cols if axis else self.image_rows
39 | small_segment = self.target_cols if axis else self.target_rows
40 | if big_segment == small_segment:
41 | return [0]
42 | steps = np.ceil((big_segment - self.pad) / (small_segment - self.pad)) # how many small segments in big segment
43 | if steps == 1:
44 | return [0]
45 | new_pad = int(np.floor((small_segment * steps - big_segment) / (steps - 1))) # recalculate pad
46 | starts = [i for i in range(0, big_segment - small_segment, small_segment - new_pad)]
47 | starts.append(big_segment - small_segment)
48 | return starts
49 |
50 | #dbg functions
51 | def starts_to_mpl(starts, t):
52 | ends = np.array(starts) + t
53 | data = []
54 | prev_e = None
55 | for idx, (s, e) in enumerate(zip(starts, ends)):
56 | # if prev_e is not None:
57 | # data.append((prev_e, s))
58 | # data.append((idx-1, idx-1))
59 | # data.append('b')
60 | # data.append((prev_e, s))
61 | # data.append((idx, idx))
62 | # data.append('b')
63 | data.append((s, e))
64 | data.append((idx, idx))
65 | data.append('r')
66 |
67 | prev_e = e
68 | if idx > 0:
69 | data.append((s, s))
70 | data.append((idx-1, idx))
71 | data.append('g--')
72 | if idx < len(starts) - 1:
73 | data.append((e, e))
74 | data.append((idx, idx+1))
75 | data.append('g--')
76 |
77 | return data
78 |
79 | def calc_starts_and_visualize(c, tr, tc):
80 | starts_rows = c.sequential_starts(axis=0)
81 | data_rows = starts_to_mpl(starts_rows, tr)
82 | starts_cols = c.sequential_starts(axis=1)
83 | data_cols = starts_to_mpl(starts_cols, tc)
84 | print(starts_rows)
85 | print(starts_cols)
86 |
87 | f, axarr = plt.subplots(1, 2, sharey=True)
88 | axarr[0].plot(*data_rows)
89 | axarr[0].set_title('rows')
90 | axarr[1].plot(*data_cols)
91 | axarr[1].set_title('cols')
92 | plt.show()
93 |
94 |
95 | if __name__ == '__main__':
96 | opts = 1324, 1324, 768, 768, 0
97 | c = ImageCropper(*opts)
98 | calc_starts_and_visualize(c, opts[2], opts[3])
99 |
--------------------------------------------------------------------------------
/albu-solution/src/dataset/raw_image.py:
--------------------------------------------------------------------------------
1 | import os
2 | from scipy.misc import imread
3 |
4 | from dataset.abstract_image_type import AbstractImageType
5 |
6 |
7 | class RawImageType(AbstractImageType):
8 | """
9 | image provider constructs image of type and then you can work with it
10 | """
11 | def __init__(self, paths, fn, fn_mapping, has_alpha):
12 | super().__init__(paths, fn, fn_mapping, has_alpha)
13 | self.im = imread(os.path.join(self.paths['images'], self.fn), mode='RGB')
14 |
15 | def read_image(self):
16 | im = self.im[...,:-1] if self.has_alpha else self.im
17 | return self.finalyze(im)
18 |
19 | def read_mask(self):
20 | path = os.path.join(self.paths['masks'], self.fn_mapping['masks'](self.fn))
21 | mask = imread(path, mode='L')
22 | return self.finalyze(mask)
23 |
24 | def read_alpha(self):
25 | return self.finalyze(self.im[...,-1])
26 |
27 | def finalyze(self, data):
28 | return self.reflect_border(data)
29 |
--------------------------------------------------------------------------------
/albu-solution/src/dataset/reading_image_provider.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from .abstract_image_provider import AbstractImageProvider
4 |
5 |
6 | class ReadingImageProvider(AbstractImageProvider):
7 | def __init__(self, image_type, paths, fn_mapping=lambda name: name, image_suffix=None, has_alpha=False):
8 | super(ReadingImageProvider, self).__init__(image_type, fn_mapping, has_alpha=has_alpha)
9 | self.im_names = os.listdir(paths['images'])
10 | if image_suffix is not None:
11 | self.im_names = [n for n in self.im_names if image_suffix in n]
12 |
13 | self.paths = paths
14 |
15 | def get_indexes_by_names(self, names):
16 | return [idx for idx, name in enumerate(self.im_names) if os.path.splitext(name)[0] in names]
17 |
18 | def __getitem__(self, item):
19 | return self.image_type(self.paths, self.im_names[item], self.fn_mapping, self.has_alpha)
20 |
21 | def __len__(self):
22 | return len(self.im_names)
--------------------------------------------------------------------------------
/albu-solution/src/merge_preds.py:
--------------------------------------------------------------------------------
1 | import os
2 | import tqdm
3 | import numpy as np
4 | import cv2
5 |
6 | def merge_tiffs(root):
7 | os.makedirs(os.path.join(root, 'merged'), exist_ok=True)
8 | prob_files = {f for f in os.listdir(root) if os.path.splitext(f)[1] in ['.tif', '.tiff']}
9 | unfolded = {f[6:] for f in prob_files if f.startswith('fold')}
10 | if not unfolded:
11 | unfolded = prob_files
12 |
13 | for prob_file in tqdm.tqdm(unfolded):
14 | probs = []
15 | for fold in range(4):
16 | prob = os.path.join(root, 'fold{}_'.format(fold) + prob_file)
17 | prob_arr = cv2.imread(prob, cv2.IMREAD_GRAYSCALE)
18 | probs.append(prob_arr)
19 | prob_arr = np.mean(probs, axis=0)
20 |
21 | res_path_geo = os.path.join(root, 'merged', prob_file)
22 | cv2.imwrite(res_path_geo, prob_arr)
23 |
24 | def merge_tiffs_defferent_folders(roots, res):
25 | os.makedirs(os.path.join(res), exist_ok=True)
26 | prob_files = {f for f in os.listdir(roots[0]) if os.path.splitext(f)[1] in ['.tif', '.tiff']}
27 |
28 | for prob_file in tqdm.tqdm(prob_files):
29 | probs = []
30 | for root in roots:
31 | prob_arr = cv2.imread(os.path.join(root, prob_file), cv2.IMREAD_GRAYSCALE)
32 | probs.append(prob_arr)
33 | prob_arr = np.mean(probs, axis=0)
34 | # prob_arr = np.clip(probs[0] * 0.7 + probs[1] * 0.3, 0, 1.)
35 |
36 | res_path_geo = os.path.join(res, prob_file)
37 | cv2.imwrite(res_path_geo, prob_arr)
38 |
39 | if __name__ == "__main__":
40 | root = '/results/results'
41 | merge_tiffs(os.path.join(root, '2m_4fold_512_30e_d0.2_g0.2_test'))
42 |
--------------------------------------------------------------------------------
/albu-solution/src/other_tools/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SpaceNetChallenge/RoadDetector/0a7391f546ab20c873dc6744920deef22c21ef3e/albu-solution/src/other_tools/__init__.py
--------------------------------------------------------------------------------
/albu-solution/src/other_tools/all_dems_min_max.py:
--------------------------------------------------------------------------------
1 | import os
2 | import numpy as np
3 | from osgeo import gdal
4 |
5 | def minmax():
6 | root = r'D:\tmp\map3d\testing'
7 | all_mean = []
8 | for fn in os.listdir(root):
9 | if 'DSM' not in fn or 'xml' in fn:
10 | continue
11 | dsm = gdal.Open(os.path.join(root, fn))
12 | dtm = gdal.Open(os.path.join(root, fn.replace('DSM', "DTM")))
13 | dsm_band = dsm.GetRasterBand(1)
14 | dtm_band = dtm.GetRasterBand(1)
15 | stats_dsm = dsm_band.GetStatistics(True, True)
16 | mi, ma, mean, std = stats_dsm
17 | stats_dtm = dtm_band.GetStatistics(True, True)
18 | mi2, ma2, mean2, std2 = stats_dtm
19 | print(std)
20 | all_mean.append(ma - ma2)
21 | print(np.mean(all_mean))
22 |
23 |
24 | minmax()
--------------------------------------------------------------------------------
/albu-solution/src/other_tools/gen_folds.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | import os
3 | import random
4 | from random import shuffle
5 | random.seed(42)
6 |
7 | files = os.listdir(r'/data/train/images')
8 | shuffle(files)
9 | s = {k[:5] for k in files}
10 | d = {k: [v for v in files if v.startswith(k)] for k in s}
11 | folds = {}
12 |
13 | idx = 0
14 | for v in d.values():
15 | for val in v:
16 | folds[val] = idx % 4
17 | idx+=1
18 |
19 | df = pd.Series(folds, name='fold')
20 | df.to_csv('folds.csv', header=['fold'], index=True)
21 |
--------------------------------------------------------------------------------
/albu-solution/src/other_tools/make_submission.py:
--------------------------------------------------------------------------------
1 | from osgeo import gdal
2 | import os
3 | import numpy as np
4 | from scipy import ndimage as ndi
5 | from skimage.morphology import remove_small_objects, watershed
6 | import tqdm
7 |
8 | def rlencode(x, dropna=False):
9 | """
10 | Run length encoding.
11 | Based on http://stackoverflow.com/a/32681075, which is based on the rle
12 | function from R.
13 |
14 | Parameters
15 | ----------
16 | x : 1D array_like
17 | Input array to encode
18 | dropna: bool, optional
19 | Drop all runs of NaNs.
20 |
21 | Returns
22 | -------
23 | start positions, run lengths, run values
24 |
25 | """
26 | where = np.flatnonzero
27 | x = np.asarray(x)
28 | n = len(x)
29 | if n == 0:
30 | return (np.array([], dtype=int),
31 | np.array([], dtype=int),
32 | np.array([], dtype=x.dtype))
33 |
34 | starts = np.r_[0, where(~np.isclose(x[1:], x[:-1], equal_nan=True)) + 1]
35 | lengths = np.diff(np.r_[starts, n])
36 | values = x[starts]
37 |
38 | if dropna:
39 | mask = ~np.isnan(values)
40 | starts, lengths, values = starts[mask], lengths[mask], values[mask]
41 |
42 | return starts, lengths, values
43 |
44 | def rldecode(starts, lengths, values, minlength=None):
45 | """
46 | Decode a run-length encoding of a 1D array.
47 |
48 | Parameters
49 | ----------
50 | starts, lengths, values : 1D array_like
51 | The run-length encoding.
52 | minlength : int, optional
53 | Minimum length of the output array.
54 |
55 | Returns
56 | -------
57 | 1D array. Missing data will be filled with NaNs.
58 |
59 | """
60 | starts, lengths, values = map(np.asarray, (starts, lengths, values))
61 | ends = starts + lengths
62 | n = ends[-1]
63 | if minlength is not None:
64 | n = max(minlength, n)
65 | x = np.full(n, np.nan)
66 | for lo, hi, val in zip(starts, ends, values):
67 | x[lo:hi] = val
68 | return x
69 |
70 | def rle_to_string(rle):
71 | (starts, lengths, values) = rle
72 | items = []
73 | for i in range(len(starts)):
74 | items.append(str(values[i]))
75 | items.append(str(lengths[i]))
76 | return ",".join(items)
77 |
78 |
79 | def my_watershed(mask1, mask2):
80 | markers = ndi.label(mask2, output=np.uint32)[0]
81 | labels = watershed(mask1, markers, mask=mask1, watershed_line=True)
82 | return labels
83 |
84 |
85 | def make_submission(prediction_dir, data_dir, submission_file):
86 | # 8881 - 0.3 / +0.4 / 100 / 120 test 8935
87 | threshold = 0.3
88 | f_submit = open(submission_file, "w")
89 | strings = []
90 | predictions = list(sorted(os.listdir(prediction_dir)))
91 |
92 | for f in tqdm.tqdm(predictions):
93 | if 'xml' in f:
94 | continue
95 | dsm_ds = gdal.Open(os.path.join(data_dir, f.replace('RGB', 'DSM')), gdal.GA_ReadOnly)
96 | band_dsm = dsm_ds.GetRasterBand(1)
97 | nodata = band_dsm.GetNoDataValue()
98 | dsm = band_dsm.ReadAsArray()
99 | tile_id = f.split('_RGB.tif')[0]
100 | mask_ds = gdal.Open(os.path.join(prediction_dir, f))
101 | mask_img = mask_ds.ReadAsArray()
102 | mask_img[dsm==nodata] = 0
103 |
104 | img_copy = np.copy(mask_img)
105 | img_copy[mask_img <= threshold + 0.4] = 0
106 | img_copy[mask_img > threshold + 0.4] = 1
107 | img_copy = img_copy.astype(np.bool)
108 | img_copy = remove_small_objects(img_copy, 100).astype(np.uint8)
109 |
110 | mask_img[mask_img <= threshold] = 0
111 | mask_img[mask_img > threshold] = 1
112 | mask_img = mask_img.astype(np.bool)
113 | mask_img = remove_small_objects(mask_img, 120).astype(np.uint8)
114 |
115 | labeled_array = my_watershed(mask_img, img_copy)
116 |
117 | # labeled_array = remove_on_boundary(labeled_array)
118 | rle_str = rle_to_string(rlencode(labeled_array.flatten()))
119 | s = "{tile_id}\n2048,2048\n{rle}\n".format(tile_id=tile_id, rle=rle_str)
120 | strings.append(s)
121 |
122 | f_submit.writelines(strings)
123 | f_submit.close()
124 |
125 |
126 |
--------------------------------------------------------------------------------
/albu-solution/src/other_tools/merge_preds.py:
--------------------------------------------------------------------------------
1 | import os
2 | from osgeo import gdal
3 | from osgeo.gdalnumeric import CopyDatasetInfo
4 | from scipy.spatial.distance import dice
5 | import tqdm
6 | import numpy as np
7 | import cv2
8 |
9 | def merge_tiffs(root):
10 | os.makedirs(os.path.join(root, 'merged'), exist_ok=True)
11 | prob_files = {f for f in os.listdir(root) if os.path.splitext(f)[1] in ['.tif', '.tiff']}
12 | unfolded = {f[6:] for f in prob_files if f.startswith('fold')}
13 | if not unfolded:
14 | unfolded = prob_files
15 |
16 | for prob_file in tqdm.tqdm(unfolded):
17 | probs = []
18 | for fold in range(5):
19 | prob = os.path.join(root, 'fold{}_'.format(fold) + prob_file)
20 | prob_arr = cv2.imread(prob, cv2.IMREAD_GRAYSCALE)
21 | probs.append(prob_arr)
22 | prob_arr = np.mean(probs, axis=0)
23 |
24 | res_path_geo = os.path.join(root, 'merged', prob_file)
25 | cv2.imwrite(res_path_geo, prob_arr)
26 |
27 | def merge_tiffs_defferent_folders(roots, res):
28 | os.makedirs(os.path.join(res), exist_ok=True)
29 | prob_files = {f for f in os.listdir(roots[0]) if os.path.splitext(f)[1] in ['.tif', '.tiff']}
30 |
31 | for prob_file in tqdm.tqdm(prob_files):
32 | probs = []
33 | for root in roots:
34 | prob = gdal.Open(os.path.join(root, prob_file), gdal.GA_ReadOnly)
35 | geotrans = prob.GetGeoTransform()
36 | prob_arr = prob.ReadAsArray()
37 | probs.append(prob_arr)
38 | prob_arr = np.mean(probs, axis=0)
39 | # prob_arr = np.clip(probs[0] * 0.7 + probs[1] * 0.3, 0, 1.)
40 |
41 | res_path_geo = os.path.join(res, prob_file)
42 | driver = gdal.GetDriverByName('GTiff')
43 | outRaster = driver.Create(res_path_geo, prob_arr.shape[1], prob_arr.shape[0], 1, gdal.GDT_Float32)
44 | outRaster.SetGeoTransform(geotrans)
45 | CopyDatasetInfo(prob, outRaster)
46 | outband = outRaster.GetRasterBand(1)
47 | outband.WriteArray(prob_arr)
48 | outRaster.FlushCache()
49 |
50 | def all_dice(pred_path, gt_path):
51 | all_d= []
52 | for im in os.listdir(pred_path):
53 | img_ds = gdal.Open(os.path.join(pred_path, im), gdal.GA_ReadOnly)
54 | img = img_ds.GetRasterBand(1).ReadAsArray()
55 | gt_ds = gdal.Open(os.path.join(gt_path, im.replace('RGB', "GTI")), gdal.GA_ReadOnly)
56 | gt = gt_ds.GetRasterBand(1).ReadAsArray()
57 | dsm_ds = gdal.Open(os.path.join(gt_path, im.replace('RGB', 'DSM')), gdal.GA_ReadOnly)
58 | band_dsm = dsm_ds.GetRasterBand(1)
59 | nodata = band_dsm.GetNoDataValue()
60 | dsm = band_dsm.ReadAsArray()
61 | img[dsm==nodata] = 0
62 | gt[dsm==nodata] = 0
63 |
64 | d = 1 - dice(img.flatten() > .4, gt.flatten() >= 1)
65 | print(im, d)
66 | all_d.append(d)
67 | print(np.mean(all_d))
68 |
69 | if __name__ == "__main__":
70 | #upscale bce adam 8489
71 | root = os.path.join('..', '..')
72 | # all_dice(os.path.join(root, 'results', 'resnet34'), os.path.join('training'))
73 | merge_tiffs(os.path.join(root, 'results', 'results', 'resnet34_test'))
74 |
75 |
--------------------------------------------------------------------------------
/albu-solution/src/other_tools/sknw.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from numba import jit
3 | import networkx as nx
4 |
5 |
6 | # get neighbors d index
7 | def neighbors(shape):
8 | dim = len(shape)
9 | block = np.ones([3] * dim)
10 | block[tuple([1] * dim)] = 0
11 | idx = np.where(block > 0)
12 | idx = np.array(idx, dtype=np.uint8).T
13 | idx = np.array(idx - [1] * dim)
14 | acc = np.cumprod((1,) + shape[::-1][:-1])
15 | return np.dot(idx, acc[::-1])
16 |
17 |
18 | @jit # my mark
19 | def mark(img): # mark the array use (0, 1, 2)
20 | nbs = neighbors(img.shape)
21 | img = img.ravel()
22 | for p in range(len(img)):
23 | if img[p] == 0: continue
24 | s = 0
25 | for dp in nbs:
26 | if img[p + dp] != 0: s += 1
27 | if s == 2:
28 | img[p] = 1
29 | else:
30 | img[p] = 2
31 |
32 |
33 | @jit # trans index to r, c...
34 | def idx2rc(idx, acc):
35 | rst = np.zeros((len(idx), len(acc)), dtype=np.int16)
36 | for i in range(len(idx)):
37 | for j in range(len(acc)):
38 | rst[i, j] = idx[i] // acc[j]
39 | idx[i] -= rst[i, j] * acc[j]
40 | rst -= 1
41 | return rst
42 |
43 |
44 | @jit # fill a node (may be two or more points)
45 | def fill(img, p, num, nbs, acc, buf):
46 | back = img[p]
47 | img[p] = num
48 | buf[0] = p
49 | cur = 0;
50 | s = 1;
51 |
52 | while True:
53 | p = buf[cur]
54 | for dp in nbs:
55 | cp = p + dp
56 | if img[cp] == back:
57 | img[cp] = num
58 | buf[s] = cp
59 | s += 1
60 | cur += 1
61 | if cur == s: break
62 | return idx2rc(buf[:s], acc)
63 |
64 |
65 | @jit # trace the edge and use a buffer, then buf.copy, if use [] numba not works
66 | def trace(img, p, nbs, acc, buf):
67 | c1 = 0;
68 | c2 = 0;
69 | newp = 0
70 | cur = 0
71 |
72 | while True:
73 | buf[cur] = p
74 | img[p] = 0
75 | cur += 1
76 | for dp in nbs:
77 | cp = p + dp
78 | if img[cp] >= 10:
79 | if c1 == 0:
80 | c1 = img[cp]
81 | else:
82 | c2 = img[cp]
83 | if img[cp] == 1:
84 | newp = cp
85 | p = newp
86 | if c2 != 0: break
87 | return (c1 - 10, c2 - 10, idx2rc(buf[:cur], acc))
88 |
89 |
90 | @jit # parse the image then get the nodes and edges
91 | def parse_struc(img):
92 | nbs = neighbors(img.shape)
93 | acc = np.cumprod((1,) + img.shape[::-1][:-1])[::-1]
94 | img = img.ravel()
95 | pts = np.array(np.where(img == 2))[0]
96 | buf = np.zeros(131072, dtype=np.int64)
97 | num = 10
98 | nodes = []
99 | for p in pts:
100 | if img[p] == 2:
101 | nds = fill(img, p, num, nbs, acc, buf)
102 | num += 1
103 | nodes.append(nds)
104 |
105 | edges = []
106 | for p in pts:
107 | for dp in nbs:
108 | if img[p + dp] == 1:
109 | edge = trace(img, p + dp, nbs, acc, buf)
110 | edges.append(edge)
111 | return nodes, edges
112 |
113 |
114 | # use nodes and edges build a networkx graph
115 | def build_graph(nodes, edges, multi=False):
116 | graph = nx.MultiGraph() if multi else nx.Graph()
117 | for i in range(len(nodes)):
118 | graph.add_node(i, pts=nodes[i], o=np.int32(nodes[i].mean(axis=0)))
119 | for s, e, pts in edges:
120 | l = np.linalg.norm(pts[1:] - pts[:-1], axis=1).sum()
121 | graph.add_edge(s, e, pts=pts, weight=l)
122 | return graph
123 |
124 |
125 | def buffer(ske):
126 | buf = np.zeros(tuple(np.array(ske.shape) + 2), dtype=np.uint16)
127 | buf[tuple([slice(1, -1)] * buf.ndim)] = ske
128 | return buf
129 |
130 |
131 | def build_sknw(ske, multi=False):
132 | buf = buffer(ske)
133 | mark(buf)
134 | nodes, edges = parse_struc(buf)
135 | return build_graph(nodes, edges, multi)
136 |
137 |
138 | # draw the graph
139 | def draw_graph(img, graph, cn=255, ce=128):
140 | acc = np.cumprod((1,) + img.shape[::-1][:-1])[::-1]
141 | img = img.ravel()
142 | for idx in graph.nodes():
143 | pts = graph.node[idx]['pts']
144 | img[np.dot(pts, acc)] = cn
145 | for (s, e) in graph.edges():
146 | eds = graph[s][e]
147 | for i in eds:
148 | pts = eds[i]['pts']
149 | img[np.dot(pts, acc)] = ce
150 |
151 |
152 | if __name__ == '__main__':
153 | g = nx.MultiGraph()
154 | g.add_nodes_from([1, 2, 3, 4, 5])
155 | g.add_edges_from([(1, 2), (1, 3), (2, 3), (4, 5), (5, 4)])
156 | print(g.nodes())
157 | print(g.edges())
158 | a = g.subgraph(1)
159 | print('d')
160 | print(a)
161 | print('d')
162 |
--------------------------------------------------------------------------------
/albu-solution/src/pytorch_utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SpaceNetChallenge/RoadDetector/0a7391f546ab20c873dc6744920deef22c21ef3e/albu-solution/src/pytorch_utils/__init__.py
--------------------------------------------------------------------------------
/albu-solution/src/pytorch_utils/concrete_eval.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import cv2
4 | cv2.setNumThreads(0)
5 | cv2.ocl.setUseOpenCL(False)
6 | import numpy as np
7 |
8 | from .eval import Evaluator
9 |
10 |
11 | class FullImageEvaluator(Evaluator):
12 | def __init__(self, *args, **kwargs):
13 | super().__init__(*args, **kwargs)
14 |
15 | def process_batch(self, predicted, model, data, prefix=""):
16 | names = data['image_name']
17 | for i in range(len(names)):
18 | self.on_image_constructed(names[i], predicted[i,...], prefix)
19 |
20 | def save(self, name, prediction, prefix=""):
21 | cv2.imwrite(os.path.join(self.save_dir, prefix + name), (prediction * 255).astype(np.uint8))
22 |
23 |
24 | class CropEvaluator(Evaluator):
25 | def __init__(self, *args, **kwargs):
26 | super().__init__(*args, **kwargs)
27 | self.current_mask = None
28 | self.current_prediction = None
29 | self.current_image_name = None
30 |
31 | def process_batch(self, predicted, model, data, prefix=""):
32 | names = data['image_name']
33 | config = self.config
34 | batch_geometry = self.parse_geometry(data['geometry'])
35 | for i in range(len(names)):
36 | name = names[i]
37 | geometry = batch_geometry[i]
38 | sx, sy = geometry['sx'], geometry['sy']
39 | pred = self.cut_border(np.squeeze(predicted[i,...]))
40 | if name != self.current_image_name:
41 | if self.current_image_name is None:
42 | self.current_image_name = name
43 | else:
44 | self.on_image_constructed(self.current_image_name, self.current_prediction / self.current_mask, prefix=prefix)
45 | self.construct_big_image(geometry)
46 | self.current_prediction[sy + self.border:sy + config.target_rows - self.border, sx + self.border:sx + config.target_cols - self.border] += pred
47 | self.current_mask[sy+self.border:sy + config.target_rows - self.border, sx + self.border:sx + config.target_cols - self.border] += 1
48 | self.current_image_name = name
49 |
50 | def parse_geometry(self, batch_geometry):
51 | rows = batch_geometry['rows'].numpy()
52 | cols = batch_geometry['cols'].numpy()
53 | sx = batch_geometry['sx'].numpy()
54 | sy = batch_geometry['sy'].numpy()
55 | geometries = []
56 | for idx in range(rows.shape[0]):
57 | geometry = {'rows': rows[idx],
58 | 'cols': cols[idx],
59 | 'sx': sx[idx],
60 | 'sy': sy[idx]}
61 | geometries.append(geometry)
62 | return geometries
63 |
64 | def construct_big_image(self, geometry):
65 | self.current_mask = np.zeros((geometry['rows'], geometry['cols']), np.uint8)
66 | self.current_prediction = np.zeros((geometry['rows'], geometry['cols']), np.float32)
67 |
68 | def save(self, name, prediction, prefix=""):
69 | cv2.imwrite(os.path.join(self.save_dir, prefix + name), (prediction * 255).astype(np.uint8))
70 |
71 | def post_predict_action(self, prefix):
72 | self.on_image_constructed(self.current_image_name, self.current_prediction / self.current_mask, prefix=prefix)
73 | self.current_image_name = None
74 |
--------------------------------------------------------------------------------
/albu-solution/src/pytorch_utils/eval.py:
--------------------------------------------------------------------------------
1 | import os
2 | import cv2
3 | cv2.setNumThreads(0)
4 | cv2.ocl.setUseOpenCL(False)
5 | import numpy as np
6 | import torch
7 | import torch.nn.functional as F
8 | # torch.backends.cudnn.benchmark = True
9 | import tqdm
10 | from torch.serialization import SourceChangeWarning
11 | import warnings
12 |
13 | from dataset.neural_dataset import SequentialDataset
14 | from torch.utils.data.dataloader import DataLoader as PytorchDataLoader
15 |
16 |
17 | class flip:
18 | FLIP_NONE=0
19 | FLIP_LR=1
20 | FLIP_FULL=2
21 |
22 |
23 | def flip_tensor_lr(batch):
24 | columns = batch.data.size()[-1]
25 | index = torch.autograd.Variable(torch.LongTensor(list(reversed(range(columns)))).cuda())
26 | return batch.index_select(3, index)
27 |
28 |
29 | def flip_tensor_ud(batch):
30 | rows = batch.data.size()[-2]
31 | index = torch.autograd.Variable(torch.LongTensor(list(reversed(range(rows)))).cuda())
32 | return batch.index_select(2, index)
33 |
34 |
35 | def to_numpy(batch):
36 | return np.moveaxis(batch.data.cpu().numpy(), 1, -1)
37 |
38 |
39 | def predict(model, batch, flips=flip.FLIP_NONE):
40 | # predict with tta on gpu
41 | pred1 = F.sigmoid(model(batch))
42 | if flips > flip.FLIP_NONE:
43 | pred2 = flip_tensor_lr(model(flip_tensor_lr(batch)))
44 | masks = [pred1, pred2]
45 | if flips > flip.FLIP_LR:
46 | pred3 = flip_tensor_ud(model(flip_tensor_ud(batch)))
47 | pred4 = flip_tensor_ud(flip_tensor_lr(model(flip_tensor_ud(flip_tensor_lr(batch)))))
48 | masks.extend([pred3, pred4])
49 | masks = list(map(F.sigmoid, masks))
50 | new_mask = torch.mean(torch.stack(masks, 0), 0)
51 | return to_numpy(new_mask)
52 | return to_numpy(pred1)
53 |
54 |
55 | def read_model(config, fold):
56 | # model = nn.DataParallel(torch.load(os.path.join('..', 'weights', project, 'fold{}_best.pth'.format(fold))))
57 | with warnings.catch_warnings():
58 | warnings.simplefilter('ignore', SourceChangeWarning)
59 | model = torch.load(os.path.join(config.results_dir, 'weights', config.folder, 'fold{}_best.pth'.format(fold)))
60 | model.eval()
61 | return model
62 |
63 |
64 | class Evaluator:
65 | """
66 | base class for evaluators
67 | """
68 | def __init__(self, config, ds, test=False, flips=0, num_workers=0, border=12, val_transforms=None):
69 | self.config = config
70 | self.ds = ds
71 | self.test = test
72 | self.flips = flips
73 | self.num_workers = num_workers
74 |
75 | self.current_prediction = None
76 | self.need_to_save = False
77 | self.border = border
78 | self.folder = config.folder
79 |
80 | self.save_dir = os.path.join(self.config.results_dir, self.folder + ('_test' if self.test else ''))
81 | self.val_transforms = val_transforms
82 | os.makedirs(self.save_dir, exist_ok=True)
83 |
84 | def predict(self, fold, val_indexes):
85 | prefix = ('fold' + str(fold) + "_") if (self.test and fold is not None) else ""
86 | val_dataset = SequentialDataset(self.ds, val_indexes, stage='test', config=self.config, transforms=self.val_transforms)
87 | val_dl = PytorchDataLoader(val_dataset, batch_size=self.config.predict_batch_size, num_workers=self.num_workers, drop_last=False)
88 | model = read_model(self.config, fold)
89 | pbar = tqdm.tqdm(val_dl, total=len(val_dl))
90 | for data in pbar:
91 | samples = torch.autograd.Variable(data['image'], volatile=True).cuda()
92 | predicted = predict(model, samples, flips=self.flips)
93 | self.process_batch(predicted, model, data, prefix=prefix)
94 | self.post_predict_action(prefix=prefix)
95 |
96 | def cut_border(self, image):
97 | if image is None:
98 | return None
99 | return image if not self.border else image[self.border:-self.border, self.border:-self.border, ...]
100 |
101 | def on_image_constructed(self, name, prediction, prefix=""):
102 | prediction = self.cut_border(prediction)
103 | prediction = np.squeeze(prediction)
104 | self.save(name, prediction, prefix=prefix)
105 |
106 | def save(self, name, prediction, prefix=""):
107 | raise NotImplementedError
108 |
109 | def process_batch(self, predicted, model, data, prefix=""):
110 | raise NotImplementedError
111 |
112 | def post_predict_action(self, prefix):
113 | pass
114 |
--------------------------------------------------------------------------------
/albu-solution/src/pytorch_utils/loss.py:
--------------------------------------------------------------------------------
1 | import torch
2 | eps = 1
3 |
4 | def dice_round(preds, trues, is_average=True):
5 | preds = torch.round(preds)
6 | return dice(preds, trues, is_average=is_average)
7 |
8 |
9 | def dice(preds, trues, weight=None, is_average=True):
10 | num = preds.size(0)
11 | preds = preds.view(num, -1)
12 | trues = trues.view(num, -1)
13 | if weight is not None:
14 | w = torch.autograd.Variable(weight).view(num, -1)
15 | preds = preds * w
16 | trues = trues * w
17 | intersection = (preds * trues).sum(1)
18 | scores = (2. * intersection + eps) / (preds.sum(1) + trues.sum(1) + eps)
19 |
20 | score = scores.sum()
21 | if is_average:
22 | score /= num
23 | return torch.clamp(score, 0., 1.)
24 |
25 |
--------------------------------------------------------------------------------
/albu-solution/src/pytorch_zoo/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SpaceNetChallenge/RoadDetector/0a7391f546ab20c873dc6744920deef22c21ef3e/albu-solution/src/pytorch_zoo/__init__.py
--------------------------------------------------------------------------------
/albu-solution/src/pytorch_zoo/abstract_model.py:
--------------------------------------------------------------------------------
1 | import math
2 | import torch
3 | import torch.nn as nn
4 | import torch.utils.model_zoo as model_zoo
5 | from pytorch_zoo import resnet
6 |
7 |
8 | encoder_params = {
9 | 'resnet34':
10 | {'filters': [64, 64, 128, 256, 512],
11 | 'init_op': resnet.resnet34,
12 | 'url': resnet.model_urls['resnet34']}
13 | }
14 |
15 | class ConvBottleneck(nn.Module):
16 | def __init__(self, in_channels, out_channels):
17 | super().__init__()
18 | self.seq = nn.Sequential(
19 | nn.Conv2d(in_channels, out_channels, 3, padding=1),
20 | nn.ReLU(inplace=True)
21 | )
22 |
23 | def forward(self, dec, enc):
24 | x = torch.cat([dec, enc], dim=1)
25 | return self.seq(x)
26 |
27 | class PlusBottleneck(nn.Module):
28 | def __init__(self, in_channels, out_channels):
29 | super().__init__()
30 |
31 | def forward(self, dec, enc):
32 | return enc + dec
33 |
34 |
35 | class UnetDecoderBlock(nn.Module):
36 | def __init__(self, in_channels, out_channels):
37 | super().__init__()
38 | self.layer = nn.Sequential(
39 | nn.Upsample(scale_factor=2),
40 | nn.Conv2d(in_channels, out_channels, 3, padding=1),
41 | nn.ReLU(inplace=True)
42 | )
43 |
44 | def forward(self, x):
45 | return self.layer(x)
46 |
47 |
48 | class AbstractModel(nn.Module):
49 | def _initialize_weights(self):
50 | for m in self.modules():
51 | if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
52 | # Kaiming He normal initialization
53 | n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
54 | m.weight.data.normal_(0, math.sqrt(2. / n))
55 | if m.bias is not None:
56 | m.bias.data.zero_()
57 | elif isinstance(m, nn.BatchNorm2d):
58 | m.weight.data.fill_(1)
59 | m.bias.data.zero_()
60 |
61 | def initialize_encoder(self, model, model_url):
62 | pretrained_dict = model_zoo.load_url(model_url)
63 | model_dict = model.state_dict()
64 | pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
65 | model.load_state_dict(pretrained_dict)
66 |
67 | def _get_layers_params(layers):
68 | return sum((list(l.parameters()) for l in layers), [])
69 |
70 | class EncoderDecoder(AbstractModel):
71 | def __init__(self, num_classes, num_channels=3, encoder_name='resnet34'):
72 | super().__init__()
73 | self.filters = encoder_params[encoder_name]['filters']
74 | self.num_channels = num_channels
75 | if not hasattr(self, 'bottleneck_type'):
76 | self.bottleneck_type = ConvBottleneck
77 |
78 | self.bottlenecks = nn.ModuleList([self.bottleneck_type(f * 2, f) for f in reversed(self.filters[:-1])])
79 | self.decoder_stages = nn.ModuleList([self.get_decoder(idx) for idx in range(1, len(self.filters))])
80 |
81 | self.last_upsample = UnetDecoderBlock(self.filters[0], self.filters[0] // 2)
82 | self.final = self.make_final_classifier(self.filters[0] // 2, num_classes)
83 |
84 | self._initialize_weights()
85 |
86 | encoder = encoder_params[encoder_name]['init_op']()
87 | self.encoder_stages = nn.ModuleList([self.get_encoder(encoder, idx) for idx in range(len(self.filters))])
88 | if num_channels == 3 and encoder_params[encoder_name]['url'] is not None:
89 | self.initialize_encoder(encoder, encoder_params[encoder_name]['url'])
90 |
91 | # noinspection PyCallingNonCallable
92 | def forward(self, x):
93 | # Encoder
94 | enc_results = []
95 | for idx, stage in enumerate(self.encoder_stages):
96 | x = stage(x)
97 | if idx < len(self.encoder_stages) - 1:
98 | enc_results.append(x.clone())
99 |
100 | for idx, bottleneck in enumerate(self.bottlenecks):
101 | rev_idx = - (idx + 1)
102 | x = self.decoder_stages[rev_idx](x)
103 | x = bottleneck(x, enc_results[rev_idx])
104 |
105 | x = self.last_upsample(x)
106 | f = self.final(x)
107 |
108 | return f
109 |
110 | def get_decoder(self, layer):
111 | return UnetDecoderBlock(self.filters[layer], self.filters[max(layer - 1, 0)])
112 |
113 | def make_final_classifier(self, in_filters, num_classes):
114 | return nn.Sequential(
115 | nn.Conv2d(in_filters, num_classes, 3, padding=1)
116 | )
117 |
118 | def get_encoder(self, encoder, layer):
119 | raise NotImplementedError
120 |
121 | @property
122 | def first_layer_params_names(self):
123 | raise NotImplementedError
124 |
--------------------------------------------------------------------------------
/albu-solution/src/pytorch_zoo/unet.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 | from pytorch_zoo.abstract_model import EncoderDecoder
3 |
4 | class Resnet(EncoderDecoder):
5 | def __init__(self, num_classes, num_channels, encoder_name):
6 | super().__init__(num_classes, num_channels, encoder_name)
7 |
8 | def get_encoder(self, encoder, layer):
9 | if layer == 0:
10 | return nn.Sequential(
11 | encoder.conv1,
12 | encoder.bn1,
13 | encoder.relu)
14 | elif layer == 1:
15 | return nn.Sequential(
16 | encoder.maxpool,
17 | encoder.layer1)
18 | elif layer == 2:
19 | return encoder.layer2
20 | elif layer == 3:
21 | return encoder.layer3
22 | elif layer == 4:
23 | return encoder.layer4
24 |
25 |
26 | class Resnet34_upsample(Resnet):
27 | def __init__(self, num_classes, num_channels=3):
28 | super().__init__(num_classes, num_channels, encoder_name='resnet34')
29 |
30 |
--------------------------------------------------------------------------------
/albu-solution/src/resnet34_512_02_02.json:
--------------------------------------------------------------------------------
1 | {
2 | "batch_size": 11,
3 | "iter_size": 1,
4 | "dataset_path": "/wdata",
5 | "folder": "2m_4fold_512_30e_d0.2_g0.2",
6 | "lr": 0.0001,
7 | "lr_steps": [20, 25],
8 | "lr_gamma": 0.2,
9 | "nb_epoch": 30,
10 | "num_channels": 3,
11 | "test_pad": 64,
12 | "epoch_size": 8,
13 | "predict_batch_size": 4,
14 | "results_dir": "/results",
15 | "target_cols": 512,
16 | "target_rows": 512,
17 | "network": "resnet34",
18 | "loss": {"dice": 0.2, "bce": 0.8},
19 | "optimizer": "adam",
20 | "num_classes": 1,
21 | "ignore_target_size": false,
22 | "warmup": 0
23 | }
24 |
--------------------------------------------------------------------------------
/albu-solution/src/run_training.sh:
--------------------------------------------------------------------------------
1 | tmux new-session -d -s albu_train './train_4folds.sh'
2 | tmux attach
3 |
--------------------------------------------------------------------------------
/albu-solution/src/train.sh:
--------------------------------------------------------------------------------
1 | CUDA_VISIBLE_DEVICES=$1 python train_eval.py resnet34_512_02_02.json --fold=$1 --training
2 |
--------------------------------------------------------------------------------
/albu-solution/src/train_4folds.sh:
--------------------------------------------------------------------------------
1 | tmux set-option remain-on-exit on
2 | tmux split-window -t 0 -p 50 './train.sh 1'
3 |
4 | sleep 20
5 | tmux split-window -t 0 -p 50 './train.sh 2'
6 |
7 | sleep 20
8 | tmux split-window -t 1 -p 50 './train.sh 3'
9 |
10 | sleep 20
11 | ./train.sh 0
12 |
--------------------------------------------------------------------------------
/albu-solution/src/train_eval.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import cv2
3 | cv2.setNumThreads(0)
4 | cv2.ocl.setUseOpenCL(False)
5 | import os
6 |
7 | from augmentations.transforms import get_flips_colors_augmentation
8 |
9 | from dataset.reading_image_provider import ReadingImageProvider
10 | from dataset.raw_image import RawImageType
11 | from pytorch_utils.train import train
12 | from pytorch_utils.concrete_eval import FullImageEvaluator
13 | from utils import update_config, get_csv_folds
14 | import argparse
15 | import json
16 | from config import Config
17 |
18 | parser = argparse.ArgumentParser()
19 | parser.add_argument('config_path')
20 | parser.add_argument('--fold', type=int)
21 | parser.add_argument('--training', action='store_true')
22 | args = parser.parse_args()
23 | with open(args.config_path, 'r') as f:
24 | cfg = json.load(f)
25 | config = Config(**cfg)
26 | skip_folds = []
27 |
28 | if args.fold is not None:
29 | skip_folds = [i for i in range(4) if i != int(args.fold)]
30 |
31 | test = not args.training
32 | config = update_config(config, dataset_path=os.path.join(config.dataset_path, 'test' if test else 'train'))
33 |
34 | paths = {
35 | 'masks': os.path.join(config.dataset_path, 'masks2m'),
36 | 'images': os.path.join(config.dataset_path, 'images')
37 | }
38 |
39 | fn_mapping = {
40 | 'masks': lambda name: os.path.splitext(name)[0] + '.png'
41 | }
42 |
43 | image_suffix = 'img'
44 |
45 | def train_roads():
46 | ds = ReadingImageProvider(RawImageType, paths, fn_mapping, image_suffix=image_suffix)
47 |
48 | folds = get_csv_folds('folds4.csv', ds.im_names)
49 | num_workers = 0 if os.name == 'nt' else 2
50 | for fold, (train_idx, val_idx) in enumerate(folds):
51 | if args.fold is not None and int(args.fold) != fold:
52 | continue
53 | train(ds, fold, train_idx, val_idx, config, num_workers=num_workers, transforms=get_flips_colors_augmentation())
54 |
55 | class RawImageTypePad(RawImageType):
56 | def finalyze(self, data):
57 | return self.reflect_border(data, 22)
58 |
59 |
60 | def eval_roads():
61 | global config
62 | rows, cols = 1344, 1344
63 | config = update_config(config, target_rows=rows, target_cols=cols)
64 | ds = ReadingImageProvider(RawImageTypePad, paths, fn_mapping, image_suffix=image_suffix)
65 |
66 | folds = [([], list(range(len(ds)))) for i in range(4)]
67 | num_workers = 0 if os.name == 'nt' else 2
68 | keval = FullImageEvaluator(config, ds, test=test, flips=3, num_workers=num_workers, border=22)
69 | for fold, (t, e) in enumerate(folds):
70 | if args.fold is not None and int(args.fold) != fold:
71 | continue
72 | keval.predict(fold, e)
73 |
74 |
75 | if __name__ == "__main__":
76 | if test:
77 | eval_roads()
78 | else:
79 | train_roads()
80 |
--------------------------------------------------------------------------------
/albu-solution/src/utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from config import Config
3 | import pandas as pd
4 |
5 |
6 | def get_csv_folds(path, d):
7 | df = pd.read_csv(path, index_col=0)
8 | m = df.max()[0] + 1
9 | train = [[] for i in range(m)]
10 | test = [[] for i in range(m)]
11 |
12 | folds = {}
13 | for i in range(m):
14 | fold_ids = list(df[df['fold'].isin([i])].index)
15 | folds.update({i: [n for n, l in enumerate(d) if l in fold_ids]})
16 |
17 | for k, v in folds.items():
18 | for i in range(m):
19 | if i != k:
20 | train[i].extend(v)
21 | test[k] = v
22 |
23 | return list(zip(np.array(train), np.array(test)))
24 |
25 | def update_config(config, **kwargs):
26 | d = config._asdict()
27 | d.update(**kwargs)
28 | print(d)
29 | return Config(**d)
--------------------------------------------------------------------------------
/albu-solution/test.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -e
3 | arglen=$(($#-1))
4 | args=${@:1:$arglen}
5 | last=${!#}
6 |
7 | rm -rf /wdata/*
8 | pushd /opt/app/src
9 | python create_spacenet_masks.py $args
10 | python train_eval.py resnet34_512_02_02.json
11 | python merge_preds.py
12 | python skeleton.py $last
13 | popd
14 |
--------------------------------------------------------------------------------
/albu-solution/train.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -e
3 |
4 | rm -rf /results/*
5 | rm -rf /wdata/*
6 | pushd /opt/app/src
7 | python create_spacenet_masks.py $1 $2 $3 $4 --training
8 | ./run_training.sh
9 | popd
10 |
--------------------------------------------------------------------------------
/albu-solution/train_single.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -e
3 |
4 | rm -rf /results/*
5 | rm -rf /wdata/*
6 | pushd /opt/app/src
7 | python create_spacenet_masks.py $1 $2 $3 $4 --training
8 | CUDA_VISIBLE_DEVICES=0 python train_eval.py resnet34_512_02_02.json --training
9 | popd
10 |
--------------------------------------------------------------------------------
/albu-solution/weights/downloadModel.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | mkdir 2m_4fold_512_30e_d0.2_g0.2
4 | aws s3 sync s3://spacenet-dataset/SpaceNet_Roads_Competition/Pretrained_Models/01-Albu/weights/ 2m_4fold_512_30e_d0.2_g0.2
--------------------------------------------------------------------------------
/cannab-solution/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM nvidia/cuda:9.0-cudnn7-runtime-ubuntu16.04
2 |
3 | # Use a fixed apt-get repo to stop intermittent failures due to flaky httpredir connections,
4 | # as described by Lionel Chan at http://stackoverflow.com/a/37426929/5881346
5 | RUN sed -i "s/httpredir.debian.org/debian.uchicago.edu/" /etc/apt/sources.list && \
6 | apt-get update && apt-get install -y build-essential
7 |
8 | RUN apt-get update --fix-missing && apt-get install -y wget bzip2 ca-certificates \
9 | libglib2.0-0 libxext6 libsm6 libxrender1 \
10 | git mercurial subversion zip unzip
11 |
12 | # install Anaconda3
13 | ENV LANG=C.UTF-8 LC_ALL=C.UTF-8
14 |
15 | RUN echo 'export PATH=/opt/conda/bin:$PATH' > /etc/profile.d/conda.sh && \
16 | wget --quiet https://repo.continuum.io/archive/Anaconda3-5.0.1-Linux-x86_64.sh -O ~/anaconda.sh && \
17 | /bin/bash ~/anaconda.sh -b -p /opt/conda && \
18 | rm ~/anaconda.sh
19 |
20 | RUN apt-get install -y curl grep sed dpkg && \
21 | TINI_VERSION=`curl https://github.com/krallin/tini/releases/latest | grep -o "/v.*\"" | sed 's:^..\(.*\).$:\1:'` && \
22 | curl -L "https://github.com/krallin/tini/releases/download/v${TINI_VERSION}/tini_${TINI_VERSION}.deb" > tini.deb && \
23 | dpkg -i tini.deb && \
24 | rm tini.deb && \
25 | apt-get clean
26 |
27 | ENV PATH /opt/conda/bin:$PATH
28 |
29 | RUN apt-get update && apt-get install -y libglu1
30 |
31 | RUN conda install opencv
32 |
33 | RUN conda install tqdm
34 |
35 | RUN conda install shapely
36 |
37 | RUN conda update --all
38 |
39 | RUN pip install --upgrade tensorflow-gpu
40 |
41 | RUN pip install --upgrade keras
42 |
43 | WORKDIR /work
44 |
45 | # copy entire directory where docker file is into docker container at /work
46 | COPY . /work/
47 |
48 | RUN chmod 777 train.sh
49 | RUN chmod 777 test.sh
50 | RUN chmod 777 download_models.sh
51 |
52 | ENTRYPOINT [ "/usr/bin/tini", "--" ]
53 | CMD [ "/work/download_models.sh" ]
--------------------------------------------------------------------------------
/cannab-solution/create_masks.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from os import path, mkdir
3 | import numpy as np
4 | np.random.seed(1)
5 | import random
6 | random.seed(1)
7 | import pandas as pd
8 | import timeit
9 | import cv2
10 | from tqdm import tqdm
11 | import sys
12 | from shapely.wkt import loads
13 |
14 | cities = ['Vegas', 'Paris', 'Shanghai', 'Khartoum']
15 |
16 | if __name__ == '__main__':
17 | t0 = timeit.default_timer()
18 |
19 | masks_folder = path.join('/wdata', sys.argv[1])
20 | sz = int(sys.argv[2])
21 | thickness = int(sys.argv[3])
22 | radius = int(0.85 * thickness)
23 | ratio = 1300.0 / sz
24 |
25 | train_folders = []
26 | for i in range(4, len(sys.argv)):
27 | train_folders.append(sys.argv[i])
28 |
29 | if not path.isdir(masks_folder):
30 | mkdir(masks_folder)
31 |
32 | for dd in train_folders:
33 | df = pd.read_csv(path.join(dd, 'summaryData', '{0}.csv'.format(dd.split('/')[-1])))
34 | city = df['ImageId'].values[0].split('_')[2]
35 | print('creating masks for', city)
36 | if not path.isdir(path.join(masks_folder, city)):
37 | mkdir(path.join(masks_folder, city))
38 | for img_id in tqdm(df['ImageId'].unique()):
39 | lines = [loads(s) for s in df[df['ImageId'] == img_id]['WKT_Pix']]
40 | img = np.zeros((sz, sz), np.uint8)
41 | img2 = np.zeros((sz, sz), np.uint8)
42 | img3 = np.zeros((sz, sz), np.uint8)
43 |
44 | d = {}
45 |
46 | for l in lines:
47 | if len(l.coords) == 0:
48 | continue
49 | x, y = l.coords.xy
50 | for i in range(len(x)):
51 | x[i] /= ratio
52 | y[i] /= ratio
53 |
54 | x_int = int(round(x[0] * 10))
55 | y_int = int(round(y[0] * 10))
56 | h = x_int * 100000 + y_int
57 | if not (h in d.keys()):
58 | d[h] = 0
59 | d[h] = d[h] + 1
60 |
61 | for i in range(len(x) - 1):
62 | x_int = int(round(x[i+1] * 10))
63 | y_int = int(round(y[i+1] * 10))
64 | h = x_int * 100000 + y_int
65 | if not (h in d.keys()):
66 | d[h] = 0
67 | if i == len(x) - 2:
68 | d[h] = d[h] + 1
69 | else:
70 | d[h] = d[h] + 2
71 | cv2.line(img, (int(x[i]), int(y[i])), (int(x[i+1]), int(y[i+1])), 255, thickness)
72 | for h in d.keys():
73 | if d[h] > 2:
74 | x_int = int(h / 100000)
75 | y_int = h - x_int * 100000
76 | x_int = int(x_int / 10)
77 | y_int = int(y_int / 10)
78 | cv2.circle(img2, (x_int, y_int), radius, 255, -1)
79 | img = img[..., np.newaxis]
80 | img2 = img2[..., np.newaxis]
81 | img3 = img3[..., np.newaxis]
82 | img = np.concatenate([img, img2, img3], axis=2)
83 | cv2.imwrite(path.join(masks_folder, city, '{0}{1}'.format(img_id, '.png')), img, [cv2.IMWRITE_PNG_COMPRESSION, 9])
84 |
85 | elapsed = timeit.default_timer() - t0
86 | print('Time: {:.3f} min'.format(elapsed / 60))
--------------------------------------------------------------------------------
/cannab-solution/docker-build.sh:
--------------------------------------------------------------------------------
1 | nvidia-docker build -t cannab .
2 |
--------------------------------------------------------------------------------
/cannab-solution/docker-remove.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | docker images -q --filter "dangling=true" | xargs docker rmi
4 |
--------------------------------------------------------------------------------
/cannab-solution/docker-run.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | nvidia-docker run -v /local_data/SpaceNet_Roads_Dataset:/data:ro -v /local_data/SpaceNet_Roads_Dataset/results/cannab:/wdata --rm -ti --ipc=host cannab
4 |
--------------------------------------------------------------------------------
/cannab-solution/docker-stop.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | docker stop $(docker ps -a -q)
4 | docker rm $(docker ps -a -q)
--------------------------------------------------------------------------------
/cannab-solution/docker_cannab.zip:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SpaceNetChallenge/RoadDetector/0a7391f546ab20c873dc6744920deef22c21ef3e/cannab-solution/docker_cannab.zip
--------------------------------------------------------------------------------
/cannab-solution/download_models.sh:
--------------------------------------------------------------------------------
1 | wget https://s3.us-east-2.amazonaws.com/vdurnov/nn_models.zip -O /wdata/nn_models.zip
2 | unzip -o /wdata/nn_models.zip -d /wdata
3 | /bin/bash
--------------------------------------------------------------------------------
/cannab-solution/predict_inception_520.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import sys
3 | from os import path, listdir, mkdir
4 | import numpy as np
5 | np.random.seed(1)
6 | import random
7 | random.seed(1)
8 | import tensorflow as tf
9 | tf.set_random_seed(1)
10 | import timeit
11 | import cv2
12 | from models import get_inception_resnet_v2_unet
13 | import skimage.io
14 | from tqdm import tqdm
15 |
16 | input_shape = (544, 544)
17 |
18 | means = [[290.42, 446.84, 591.88, 442.45, 424.66, 418.13, 554.13, 354.34, 566.86],
19 | [178.33, 260.14, 287.4, 161.44, 211.46, 198.83, 453.27, 228.99, 242.67],
20 | [357.82, 344.64, 436.76, 452.17, 290.35, 439.7, 440.43, 393.6, 452.5],
21 | [386.98, 415.74, 601.29, 755.34, 527.79, 729.95, 641, 611.41, 697.17]]
22 | stds = [[75.42, 177.98, 288.81, 250.24, 260.55, 220.09, 299.67, 191.47, 285.25],
23 | [16.4, 45.69, 79.42, 61.91, 99.64, 81.17, 210.34, 106.31, 80.89],
24 | [35.23, 58, 89.42, 115.7, 90.45, 109.5, 144.61, 136.77, 99.11],
25 | [37.9, 59.95, 99.56, 131.14, 96.26, 107.79, 98.77, 92.2, 107.9]]
26 | def preprocess_inputs_std(x, city_id):
27 | zero_msk = (x == 0)
28 | x = np.asarray(x, dtype='float32')
29 | for i in range(9):
30 | x[..., i] -= means[city_id][i]
31 | x[..., i] /= stds[city_id][i]
32 | x[zero_msk] = 0
33 | return x
34 |
35 | models_folder = '/wdata/nn_models'
36 | pred_folder = '/wdata/predictions'
37 | model_name = 'inception_520'
38 |
39 | cities = ['Vegas', 'Paris', 'Shanghai', 'Khartoum']
40 |
41 | ignored_cities = [3]
42 |
43 | if __name__ == '__main__':
44 | t0 = timeit.default_timer()
45 |
46 | test_folders = []
47 |
48 | for i in range(1, len(sys.argv) - 1):
49 | test_folders.append(sys.argv[i])
50 |
51 | if not path.isdir(pred_folder):
52 | mkdir(pred_folder)
53 |
54 | if not path.isdir(path.join(pred_folder, model_name)):
55 | mkdir(path.join(pred_folder, model_name))
56 |
57 | for it in [0, 1]:
58 | models = []
59 |
60 | if not path.isdir(path.join(pred_folder, model_name, str(it))):
61 | mkdir(path.join(pred_folder, model_name, str(it)))
62 |
63 | for i in range(4):
64 | if i in ignored_cities or not path.isfile(path.join(models_folder, 'inc_v2_520_model_weights4_{0}_{1}.h5'.format(cities[i], it))):
65 | models.append(None)
66 | continue
67 | if not path.isdir(path.join(path.join(pred_folder, model_name, str(it), cities[i]))):
68 | mkdir(path.join(path.join(pred_folder, model_name, str(it), cities[i])))
69 | model = get_inception_resnet_v2_unet(input_shape, weights=None)
70 | model.load_weights(path.join(models_folder, 'inc_v2_520_model_weights4_{0}_{1}.h5'.format(cities[i], it)))
71 | models.append(model)
72 |
73 | print('Predictiong fold', it)
74 | for d in test_folders:
75 | for f in tqdm(sorted(listdir(path.join(d, 'MUL')))):
76 | if path.isfile(path.join(d, 'MUL', f)) and '.tif' in f:
77 | img_id = f.split('MUL_')[1].split('.')[0]
78 | cinp = np.zeros((4,))
79 | cinp[cities.index(img_id.split('_')[2])] = 1.0
80 | cid = cinp.argmax()
81 | if cid in ignored_cities:
82 | continue
83 | fpath = path.join(d, 'MUL', f)
84 | img = skimage.io.imread(fpath, plugin='tifffile')
85 | img = cv2.resize(img, (520, 520))
86 | pan = skimage.io.imread(path.join(d, 'PAN', 'PAN_{0}.tif'.format(img_id)), plugin='tifffile')
87 | pan = cv2.resize(pan, (520, 520))
88 | pan = pan[..., np.newaxis]
89 | img = np.concatenate([img, pan], axis=2)
90 | img = cv2.copyMakeBorder(img, 12, 12, 12, 12, cv2.BORDER_REFLECT_101)
91 | inp = []
92 | inp.append(img)
93 | inp.append(np.rot90(img, k=1))
94 | inp = np.asarray(inp)
95 | inp = preprocess_inputs_std(inp, cid)
96 | pred = models[cid].predict(inp)
97 | mask = pred[0] + np.rot90(pred[1], k=3)
98 | mask /= 2
99 | mask = mask[12:532, 12:532, ...]
100 | mask = mask * 255
101 | mask = mask.astype('uint8')
102 | cv2.imwrite(path.join(pred_folder, model_name, str(it), cities[cid], '{0}.png'.format(img_id)), mask, [cv2.IMWRITE_PNG_COMPRESSION, 9])
103 |
104 | elapsed = timeit.default_timer() - t0
105 | print('Time: {:.3f} min'.format(elapsed / 60))
--------------------------------------------------------------------------------
/cannab-solution/predict_inception_small.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import sys
3 | from os import path, listdir, mkdir
4 | import numpy as np
5 | np.random.seed(1)
6 | import random
7 | random.seed(1)
8 | import tensorflow as tf
9 | tf.set_random_seed(1)
10 | import timeit
11 | import cv2
12 | from models import get_inception_resnet_v2_unet
13 | import skimage.io
14 | from tqdm import tqdm
15 |
16 | input_shape = (672, 672)
17 |
18 | means = [[290.42, 446.84, 591.88, 442.45, 424.66, 418.13, 554.13, 354.34, 566.86],
19 | [178.33, 260.14, 287.4, 161.44, 211.46, 198.83, 453.27, 228.99, 242.67],
20 | [357.82, 344.64, 436.76, 452.17, 290.35, 439.7, 440.43, 393.6, 452.5],
21 | [386.98, 415.74, 601.29, 755.34, 527.79, 729.95, 641, 611.41, 697.17]]
22 | stds = [[75.42, 177.98, 288.81, 250.24, 260.55, 220.09, 299.67, 191.47, 285.25],
23 | [16.4, 45.69, 79.42, 61.91, 99.64, 81.17, 210.34, 106.31, 80.89],
24 | [35.23, 58, 89.42, 115.7, 90.45, 109.5, 144.61, 136.77, 99.11],
25 | [37.9, 59.95, 99.56, 131.14, 96.26, 107.79, 98.77, 92.2, 107.9]]
26 | def preprocess_inputs_std(x, city_id):
27 | zero_msk = (x == 0)
28 | x = np.asarray(x, dtype='float32')
29 | for i in range(9):
30 | x[..., i] -= means[city_id][i]
31 | x[..., i] /= stds[city_id][i]
32 | x[zero_msk] = 0
33 | return x
34 |
35 | models_folder = '/wdata/nn_models'
36 | pred_folder = '/wdata/predictions'
37 | model_name = 'inception_small'
38 |
39 | cities = ['Vegas', 'Paris', 'Shanghai', 'Khartoum']
40 |
41 | ignored_cities = [1]
42 |
43 | if __name__ == '__main__':
44 | t0 = timeit.default_timer()
45 |
46 | test_folders = []
47 |
48 | for i in range(1, len(sys.argv) - 1):
49 | test_folders.append(sys.argv[i])
50 |
51 | if not path.isdir(pred_folder):
52 | mkdir(pred_folder)
53 |
54 | if not path.isdir(path.join(pred_folder, model_name)):
55 | mkdir(path.join(pred_folder, model_name))
56 |
57 | for it in [0]:
58 | models = []
59 |
60 | if not path.isdir(path.join(pred_folder, model_name, str(it))):
61 | mkdir(path.join(pred_folder, model_name, str(it)))
62 |
63 | for i in range(4):
64 | if i in ignored_cities or not path.isfile(path.join(models_folder, 'inception_small_model_weights4_{0}_{1}.h5'.format(cities[i], it))):
65 | models.append(None)
66 | continue
67 | if not path.isdir(path.join(path.join(pred_folder, model_name, str(it), cities[i]))):
68 | mkdir(path.join(path.join(pred_folder, model_name, str(it), cities[i])))
69 | model = get_inception_resnet_v2_unet(input_shape, weights=None)
70 | model.load_weights(path.join(models_folder, 'inception_small_model_weights4_{0}_{1}.h5'.format(cities[i], it)))
71 | models.append(model)
72 |
73 | print('Predictiong fold', it)
74 | for d in test_folders:
75 | for f in tqdm(sorted(listdir(path.join(d, 'MUL')))):
76 | if path.isfile(path.join(d, 'MUL', f)) and '.tif' in f:
77 | img_id = f.split('MUL_')[1].split('.')[0]
78 | cinp = np.zeros((4,))
79 | cinp[cities.index(img_id.split('_')[2])] = 1.0
80 | cid = cinp.argmax()
81 | if cid in ignored_cities:
82 | continue
83 | fpath = path.join(d, 'MUL', f)
84 | img = skimage.io.imread(fpath, plugin='tifffile')
85 | img = cv2.resize(img, (650, 650))
86 | pan = skimage.io.imread(path.join(d, 'PAN', 'PAN_{0}.tif'.format(img_id)), plugin='tifffile')
87 | pan = cv2.resize(pan, (650, 650))
88 | pan = pan[..., np.newaxis]
89 | img = np.concatenate([img, pan], axis=2)
90 | img = cv2.copyMakeBorder(img, 11, 11, 11, 11, cv2.BORDER_REFLECT_101)
91 | inp = []
92 | inp.append(img)
93 | inp.append(np.rot90(img, k=1))
94 | inp = np.asarray(inp)
95 | inp = preprocess_inputs_std(inp, cid)
96 | pred = models[cid].predict(inp)
97 | mask = pred[0] + np.rot90(pred[1], k=3)
98 | mask /= 2
99 | mask = mask[11:661, 11:661, ...]
100 | mask = mask * 255
101 | mask = mask.astype('uint8')
102 | cv2.imwrite(path.join(pred_folder, model_name, str(it), cities[cid], '{0}.png'.format(img_id)), mask, [cv2.IMWRITE_PNG_COMPRESSION, 9])
103 |
104 | elapsed = timeit.default_timer() - t0
105 | print('Time: {:.3f} min'.format(elapsed / 60))
--------------------------------------------------------------------------------
/cannab-solution/predict_inception_smallest.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import sys
3 | from os import path, listdir, mkdir
4 | import numpy as np
5 | np.random.seed(1)
6 | import random
7 | random.seed(1)
8 | import tensorflow as tf
9 | tf.set_random_seed(1)
10 | import timeit
11 | import cv2
12 | from models import get_inception_resnet_v2_unet
13 | import skimage.io
14 | from tqdm import tqdm
15 |
16 | input_shape = (352, 352)
17 |
18 | means = [[290.42, 446.84, 591.88, 442.45, 424.66, 418.13, 554.13, 354.34, 566.86],
19 | [178.33, 260.14, 287.4, 161.44, 211.46, 198.83, 453.27, 228.99, 242.67],
20 | [357.82, 344.64, 436.76, 452.17, 290.35, 439.7, 440.43, 393.6, 452.5],
21 | [386.98, 415.74, 601.29, 755.34, 527.79, 729.95, 641, 611.41, 697.17]]
22 | stds = [[75.42, 177.98, 288.81, 250.24, 260.55, 220.09, 299.67, 191.47, 285.25],
23 | [16.4, 45.69, 79.42, 61.91, 99.64, 81.17, 210.34, 106.31, 80.89],
24 | [35.23, 58, 89.42, 115.7, 90.45, 109.5, 144.61, 136.77, 99.11],
25 | [37.9, 59.95, 99.56, 131.14, 96.26, 107.79, 98.77, 92.2, 107.9]]
26 | def preprocess_inputs_std(x, city_id):
27 | zero_msk = (x == 0)
28 | x = np.asarray(x, dtype='float32')
29 | for i in range(9):
30 | x[..., i] -= means[city_id][i]
31 | x[..., i] /= stds[city_id][i]
32 | x[zero_msk] = 0
33 | return x
34 |
35 | models_folder = '/wdata/nn_models'
36 | pred_folder = '/wdata/predictions'
37 | model_name = 'inception_smallest'
38 |
39 | cities = ['Vegas', 'Paris', 'Shanghai', 'Khartoum']
40 |
41 | ignored_cities = [2]
42 |
43 | if __name__ == '__main__':
44 | t0 = timeit.default_timer()
45 |
46 | test_folders = []
47 |
48 | for i in range(1, len(sys.argv) - 1):
49 | test_folders.append(sys.argv[i])
50 |
51 | if not path.isdir(pred_folder):
52 | mkdir(pred_folder)
53 |
54 | if not path.isdir(path.join(pred_folder, model_name)):
55 | mkdir(path.join(pred_folder, model_name))
56 |
57 | for it in [0, 1]:
58 | models = []
59 |
60 | if not path.isdir(path.join(pred_folder, model_name, str(it))):
61 | mkdir(path.join(pred_folder, model_name, str(it)))
62 |
63 | for i in range(4):
64 | if i in ignored_cities or not path.isfile(path.join(models_folder, 'inception_smallest_model_weights4_{0}_{1}.h5'.format(cities[i], it))):
65 | models.append(None)
66 | continue
67 | if not path.isdir(path.join(path.join(pred_folder, model_name, str(it), cities[i]))):
68 | mkdir(path.join(path.join(pred_folder, model_name, str(it), cities[i])))
69 | model = get_inception_resnet_v2_unet(input_shape, weights=None)
70 | model.load_weights(path.join(models_folder, 'inception_smallest_model_weights4_{0}_{1}.h5'.format(cities[i], it)))
71 | models.append(model)
72 |
73 | print('Predictiong fold', it)
74 | for d in test_folders:
75 | for f in tqdm(sorted(listdir(path.join(d, 'MUL')))):
76 | if path.isfile(path.join(d, 'MUL', f)) and '.tif' in f:
77 | img_id = f.split('MUL_')[1].split('.')[0]
78 | cinp = np.zeros((4,))
79 | cinp[cities.index(img_id.split('_')[2])] = 1.0
80 | cid = cinp.argmax()
81 | if cid in ignored_cities:
82 | continue
83 | fpath = path.join(d, 'MUL', f)
84 | img = skimage.io.imread(fpath, plugin='tifffile')
85 | pan = skimage.io.imread(path.join(d, 'PAN', 'PAN_{0}.tif'.format(img_id)), plugin='tifffile')
86 | pan = cv2.resize(pan, (325, 325))
87 | pan = pan[..., np.newaxis]
88 | img = np.concatenate([img, pan], axis=2)
89 | img = cv2.copyMakeBorder(img, 13, 14, 13, 14, cv2.BORDER_REFLECT_101)
90 | inp = []
91 | inp.append(img)
92 | inp.append(np.rot90(img, k=1))
93 | inp = np.asarray(inp)
94 | inp = preprocess_inputs_std(inp, cid)
95 | pred = models[cid].predict(inp)
96 | mask = pred[0] + np.rot90(pred[1], k=3)
97 | mask /= 2
98 | mask = mask[13:338, 13:338, ...]
99 | mask = mask * 255
100 | mask = mask.astype('uint8')
101 | cv2.imwrite(path.join(pred_folder, model_name, str(it), cities[cid], '{0}.png'.format(img_id)), mask, [cv2.IMWRITE_PNG_COMPRESSION, 9])
102 |
103 | elapsed = timeit.default_timer() - t0
104 | print('Time: {:.3f} min'.format(elapsed / 60))
--------------------------------------------------------------------------------
/cannab-solution/predict_inception_v3_520.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import sys
3 | from os import path, listdir, mkdir
4 | import numpy as np
5 | np.random.seed(1)
6 | import random
7 | random.seed(1)
8 | import tensorflow as tf
9 | tf.set_random_seed(1)
10 | import timeit
11 | import cv2
12 | from models import get_inception_v3_unet
13 | import skimage.io
14 | from tqdm import tqdm
15 |
16 | input_shape = (544, 544)
17 |
18 | means = [[290.42, 446.84, 591.88, 442.45, 424.66, 418.13, 554.13, 354.34, 566.86],
19 | [178.33, 260.14, 287.4, 161.44, 211.46, 198.83, 453.27, 228.99, 242.67],
20 | [357.82, 344.64, 436.76, 452.17, 290.35, 439.7, 440.43, 393.6, 452.5],
21 | [386.98, 415.74, 601.29, 755.34, 527.79, 729.95, 641, 611.41, 697.17]]
22 | stds = [[75.42, 177.98, 288.81, 250.24, 260.55, 220.09, 299.67, 191.47, 285.25],
23 | [16.4, 45.69, 79.42, 61.91, 99.64, 81.17, 210.34, 106.31, 80.89],
24 | [35.23, 58, 89.42, 115.7, 90.45, 109.5, 144.61, 136.77, 99.11],
25 | [37.9, 59.95, 99.56, 131.14, 96.26, 107.79, 98.77, 92.2, 107.9]]
26 | def preprocess_inputs_std(x, city_id):
27 | zero_msk = (x == 0)
28 | x = np.asarray(x, dtype='float32')
29 | for i in range(9):
30 | x[..., i] -= means[city_id][i]
31 | x[..., i] /= stds[city_id][i]
32 | x[zero_msk] = 0
33 | return x
34 |
35 | models_folder = '/wdata/nn_models'
36 | pred_folder = '/wdata/predictions'
37 | model_name = 'inception_v3_520'
38 |
39 | cities = ['Vegas', 'Paris', 'Shanghai', 'Khartoum']
40 |
41 | ignored_cities = [3]
42 |
43 | if __name__ == '__main__':
44 | t0 = timeit.default_timer()
45 |
46 | test_folders = []
47 |
48 | for i in range(1, len(sys.argv) - 1):
49 | test_folders.append(sys.argv[i])
50 |
51 | if not path.isdir(pred_folder):
52 | mkdir(pred_folder)
53 |
54 | if not path.isdir(path.join(pred_folder, model_name)):
55 | mkdir(path.join(pred_folder, model_name))
56 |
57 | for it in [0, 1]:
58 | models = []
59 |
60 | if not path.isdir(path.join(pred_folder, model_name, str(it))):
61 | mkdir(path.join(pred_folder, model_name, str(it)))
62 |
63 | for i in range(4):
64 | if i in ignored_cities or not path.isfile(path.join(models_folder, 'inception3_520_model_weights4_{0}_{1}.h5'.format(cities[i], it))):
65 | models.append(None)
66 | continue
67 | if not path.isdir(path.join(path.join(pred_folder, model_name, str(it), cities[i]))):
68 | mkdir(path.join(path.join(pred_folder, model_name, str(it), cities[i])))
69 | model = get_inception_v3_unet(input_shape, weights=None)
70 | model.load_weights(path.join(models_folder, 'inception3_520_model_weights4_{0}_{1}.h5'.format(cities[i], it)))
71 | models.append(model)
72 |
73 | print('Predictiong fold', it)
74 | for d in test_folders:
75 | for f in tqdm(sorted(listdir(path.join(d, 'MUL')))):
76 | if path.isfile(path.join(d, 'MUL', f)) and '.tif' in f:
77 | img_id = f.split('MUL_')[1].split('.')[0]
78 | cinp = np.zeros((4,))
79 | cinp[cities.index(img_id.split('_')[2])] = 1.0
80 | cid = cinp.argmax()
81 | if cid in ignored_cities:
82 | continue
83 | fpath = path.join(d, 'MUL', f)
84 | img = skimage.io.imread(fpath, plugin='tifffile')
85 | img = cv2.resize(img, (520, 520))
86 | pan = skimage.io.imread(path.join(d, 'PAN', 'PAN_{0}.tif'.format(img_id)), plugin='tifffile')
87 | pan = cv2.resize(pan, (520, 520))
88 | pan = pan[..., np.newaxis]
89 | img = np.concatenate([img, pan], axis=2)
90 | img = cv2.copyMakeBorder(img, 12, 12, 12, 12, cv2.BORDER_REFLECT_101)
91 | inp = []
92 | inp.append(img)
93 | inp.append(np.rot90(img, k=1))
94 | inp = np.asarray(inp)
95 | inp = preprocess_inputs_std(inp, cid)
96 | pred = models[cid].predict(inp)
97 | mask = pred[0] + np.rot90(pred[1], k=3)
98 | mask /= 2
99 | mask = mask[12:532, 12:532, ...]
100 | mask = mask * 255
101 | mask = mask.astype('uint8')
102 | cv2.imwrite(path.join(pred_folder, model_name, str(it), cities[cid], '{0}.png'.format(img_id)), mask, [cv2.IMWRITE_PNG_COMPRESSION, 9])
103 |
104 | elapsed = timeit.default_timer() - t0
105 | print('Time: {:.3f} min'.format(elapsed / 60))
--------------------------------------------------------------------------------
/cannab-solution/predict_linknet.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import sys
3 | from os import path, listdir, mkdir
4 | import numpy as np
5 | np.random.seed(1)
6 | import random
7 | random.seed(1)
8 | import tensorflow as tf
9 | tf.set_random_seed(1)
10 | import timeit
11 | import cv2
12 | from linknet import get_resnet50_linknet
13 | import skimage.io
14 | from tqdm import tqdm
15 |
16 | input_shape = (1344, 1344)
17 |
18 | means = [[290.42, 446.84, 591.88, 442.45, 424.66, 418.13, 554.13, 354.34, 566.86],
19 | [178.33, 260.14, 287.4, 161.44, 211.46, 198.83, 453.27, 228.99, 242.67],
20 | [357.82, 344.64, 436.76, 452.17, 290.35, 439.7, 440.43, 393.6, 452.5],
21 | [386.98, 415.74, 601.29, 755.34, 527.79, 729.95, 641, 611.41, 697.17]]
22 | stds = [[75.42, 177.98, 288.81, 250.24, 260.55, 220.09, 299.67, 191.47, 285.25],
23 | [16.4, 45.69, 79.42, 61.91, 99.64, 81.17, 210.34, 106.31, 80.89],
24 | [35.23, 58, 89.42, 115.7, 90.45, 109.5, 144.61, 136.77, 99.11],
25 | [37.9, 59.95, 99.56, 131.14, 96.26, 107.79, 98.77, 92.2, 107.9]]
26 | def preprocess_inputs_std(x, city_id):
27 | zero_msk = (x == 0)
28 | x = np.asarray(x, dtype='float32')
29 | for i in range(9):
30 | x[..., i] -= means[city_id][i]
31 | x[..., i] /= stds[city_id][i]
32 | x[zero_msk] = 0
33 | return x
34 |
35 | models_folder = '/wdata/nn_models'
36 | pred_folder = '/wdata/predictions'
37 | model_name = 'linknet_big'
38 |
39 | cities = ['Vegas', 'Paris', 'Shanghai', 'Khartoum']
40 |
41 | ignored_cities = [1, 2]
42 |
43 | if __name__ == '__main__':
44 | t0 = timeit.default_timer()
45 |
46 | test_folders = []
47 |
48 | for i in range(1, len(sys.argv) - 1):
49 | test_folders.append(sys.argv[i])
50 |
51 | if not path.isdir(pred_folder):
52 | mkdir(pred_folder)
53 |
54 | if not path.isdir(path.join(pred_folder, model_name)):
55 | mkdir(path.join(pred_folder, model_name))
56 |
57 | for it in [0, 1]:
58 | models = []
59 |
60 | if not path.isdir(path.join(pred_folder, model_name, str(it))):
61 | mkdir(path.join(pred_folder, model_name, str(it)))
62 |
63 | for i in range(4):
64 | if i in ignored_cities or not path.isfile(path.join(models_folder, 'linknet_big_model_weights4_{0}_{1}.h5'.format(cities[i], it))):
65 | models.append(None)
66 | continue
67 | if not path.isdir(path.join(path.join(pred_folder, model_name, str(it), cities[i]))):
68 | mkdir(path.join(path.join(pred_folder, model_name, str(it), cities[i])))
69 | model = get_resnet50_linknet(input_shape, weights=None)
70 | model.load_weights(path.join(models_folder, 'linknet_big_model_weights4_{0}_{1}.h5'.format(cities[i], it)))
71 | models.append(model)
72 |
73 | print('Predictiong fold', it)
74 | for d in test_folders:
75 | for f in tqdm(sorted(listdir(path.join(d, 'MUL-PanSharpen')))):
76 | if path.isfile(path.join(d, 'MUL-PanSharpen', f)) and '.tif' in f:
77 | img_id = f.split('PanSharpen_')[1].split('.')[0]
78 | cinp = np.zeros((4,))
79 | cinp[cities.index(img_id.split('_')[2])] = 1.0
80 | cid = cinp.argmax()
81 | if cid in ignored_cities:
82 | continue
83 | fpath = path.join(d, 'MUL-PanSharpen', f)
84 | img = skimage.io.imread(fpath, plugin='tifffile')
85 | pan = skimage.io.imread(path.join(d, 'PAN', 'PAN_{0}.tif'.format(img_id)), plugin='tifffile')
86 | pan = pan[..., np.newaxis]
87 | img = np.concatenate([img, pan], axis=2)
88 | img = cv2.copyMakeBorder(img, 22, 22, 22, 22, cv2.BORDER_REFLECT_101)
89 | inp = []
90 | inp.append(img)
91 | inp.append(np.rot90(img, k=1))
92 | inp = np.asarray(inp)
93 | inp = preprocess_inputs_std(inp, cid)
94 | pred = models[cid].predict(inp)
95 | mask = pred[0] + np.rot90(pred[1], k=3)
96 | mask /= 2
97 | mask = mask[22:1322, 22:1322, ...]
98 | mask = mask * 255
99 | mask = mask.astype('uint8')
100 | cv2.imwrite(path.join(pred_folder, model_name, str(it), cities[cid], '{0}.png'.format(img_id)), mask, [cv2.IMWRITE_PNG_COMPRESSION, 9])
101 |
102 | elapsed = timeit.default_timer() - t0
103 | print('Time: {:.3f} min'.format(elapsed / 60))
--------------------------------------------------------------------------------
/cannab-solution/predict_linknet_520.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import sys
3 | from os import path, listdir, mkdir
4 | import numpy as np
5 | np.random.seed(1)
6 | import random
7 | random.seed(1)
8 | import tensorflow as tf
9 | tf.set_random_seed(1)
10 | import timeit
11 | import cv2
12 | from linknet import get_resnet50_linknet
13 | import skimage.io
14 | from tqdm import tqdm
15 |
16 | input_shape = (544, 544)
17 |
18 | means = [[290.42, 446.84, 591.88, 442.45, 424.66, 418.13, 554.13, 354.34, 566.86],
19 | [178.33, 260.14, 287.4, 161.44, 211.46, 198.83, 453.27, 228.99, 242.67],
20 | [357.82, 344.64, 436.76, 452.17, 290.35, 439.7, 440.43, 393.6, 452.5],
21 | [386.98, 415.74, 601.29, 755.34, 527.79, 729.95, 641, 611.41, 697.17]]
22 | stds = [[75.42, 177.98, 288.81, 250.24, 260.55, 220.09, 299.67, 191.47, 285.25],
23 | [16.4, 45.69, 79.42, 61.91, 99.64, 81.17, 210.34, 106.31, 80.89],
24 | [35.23, 58, 89.42, 115.7, 90.45, 109.5, 144.61, 136.77, 99.11],
25 | [37.9, 59.95, 99.56, 131.14, 96.26, 107.79, 98.77, 92.2, 107.9]]
26 | def preprocess_inputs_std(x, city_id):
27 | zero_msk = (x == 0)
28 | x = np.asarray(x, dtype='float32')
29 | for i in range(9):
30 | x[..., i] -= means[city_id][i]
31 | x[..., i] /= stds[city_id][i]
32 | x[zero_msk] = 0
33 | return x
34 |
35 | models_folder = '/wdata/nn_models'
36 | pred_folder = '/wdata/predictions'
37 | model_name = 'linknet_520'
38 |
39 | cities = ['Vegas', 'Paris', 'Shanghai', 'Khartoum']
40 |
41 | ignored_cities = []
42 |
43 | if __name__ == '__main__':
44 | t0 = timeit.default_timer()
45 |
46 | test_folders = []
47 |
48 | for i in range(1, len(sys.argv) - 1):
49 | test_folders.append(sys.argv[i])
50 |
51 | if not path.isdir(pred_folder):
52 | mkdir(pred_folder)
53 |
54 | if not path.isdir(path.join(pred_folder, model_name)):
55 | mkdir(path.join(pred_folder, model_name))
56 |
57 | for it in [0, 1]:
58 | models = []
59 |
60 | if not path.isdir(path.join(pred_folder, model_name, str(it))):
61 | mkdir(path.join(pred_folder, model_name, str(it)))
62 |
63 | for i in range(4):
64 | if i in ignored_cities or not path.isfile(path.join(models_folder, 'linknet_520_model_weights4_{0}_{1}.h5'.format(cities[i], it))):
65 | models.append(None)
66 | continue
67 | if not path.isdir(path.join(path.join(pred_folder, model_name, str(it), cities[i]))):
68 | mkdir(path.join(path.join(pred_folder, model_name, str(it), cities[i])))
69 | model = get_resnet50_linknet(input_shape, weights=None)
70 | model.load_weights(path.join(models_folder, 'linknet_520_model_weights4_{0}_{1}.h5'.format(cities[i], it)))
71 | models.append(model)
72 |
73 | print('Predictiong fold', it)
74 | for d in test_folders:
75 | for f in tqdm(sorted(listdir(path.join(d, 'MUL')))):
76 | if path.isfile(path.join(d, 'MUL', f)) and '.tif' in f:
77 | img_id = f.split('MUL_')[1].split('.')[0]
78 | cinp = np.zeros((4,))
79 | cinp[cities.index(img_id.split('_')[2])] = 1.0
80 | cid = cinp.argmax()
81 | if cid in ignored_cities:
82 | continue
83 | fpath = path.join(d, 'MUL', f)
84 | img = skimage.io.imread(fpath, plugin='tifffile')
85 | img = cv2.resize(img, (520, 520))
86 | pan = skimage.io.imread(path.join(d, 'PAN', 'PAN_{0}.tif'.format(img_id)), plugin='tifffile')
87 | pan = cv2.resize(pan, (520, 520))
88 | pan = pan[..., np.newaxis]
89 | img = np.concatenate([img, pan], axis=2)
90 | img = cv2.copyMakeBorder(img, 12, 12, 12, 12, cv2.BORDER_REFLECT_101)
91 | inp = []
92 | inp.append(img)
93 | inp.append(np.rot90(img, k=1))
94 | inp = np.asarray(inp)
95 | inp = preprocess_inputs_std(inp, cid)
96 | pred = models[cid].predict(inp)
97 | mask = pred[0] + np.rot90(pred[1], k=3)
98 | mask /= 2
99 | mask = mask[12:532, 12:532, ...]
100 | mask = mask * 255
101 | mask = mask.astype('uint8')
102 | cv2.imwrite(path.join(pred_folder, model_name, str(it), cities[cid], '{0}.png'.format(img_id)), mask, [cv2.IMWRITE_PNG_COMPRESSION, 9])
103 |
104 | elapsed = timeit.default_timer() - t0
105 | print('Time: {:.3f} min'.format(elapsed / 60))
--------------------------------------------------------------------------------
/cannab-solution/predict_linknet_small.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import sys
3 | from os import path, listdir, mkdir
4 | import numpy as np
5 | np.random.seed(1)
6 | import random
7 | random.seed(1)
8 | import tensorflow as tf
9 | tf.set_random_seed(1)
10 | import timeit
11 | import cv2
12 | from linknet import get_vgg_linknet_small
13 | import skimage.io
14 | from tqdm import tqdm
15 |
16 | input_shape = (672, 672)
17 |
18 | means = [[290.42, 446.84, 591.88, 442.45, 424.66, 418.13, 554.13, 354.34, 566.86],
19 | [178.33, 260.14, 287.4, 161.44, 211.46, 198.83, 453.27, 228.99, 242.67],
20 | [357.82, 344.64, 436.76, 452.17, 290.35, 439.7, 440.43, 393.6, 452.5],
21 | [386.98, 415.74, 601.29, 755.34, 527.79, 729.95, 641, 611.41, 697.17]]
22 | stds = [[75.42, 177.98, 288.81, 250.24, 260.55, 220.09, 299.67, 191.47, 285.25],
23 | [16.4, 45.69, 79.42, 61.91, 99.64, 81.17, 210.34, 106.31, 80.89],
24 | [35.23, 58, 89.42, 115.7, 90.45, 109.5, 144.61, 136.77, 99.11],
25 | [37.9, 59.95, 99.56, 131.14, 96.26, 107.79, 98.77, 92.2, 107.9]]
26 | def preprocess_inputs_std(x, city_id):
27 | zero_msk = (x == 0)
28 | x = np.asarray(x, dtype='float32')
29 | for i in range(9):
30 | x[..., i] -= means[city_id][i]
31 | x[..., i] /= stds[city_id][i]
32 | x[zero_msk] = 0
33 | return x
34 |
35 | models_folder = '/wdata/nn_models'
36 | pred_folder = '/wdata/predictions'
37 | model_name = 'linknet_small'
38 |
39 | cities = ['Vegas', 'Paris', 'Shanghai', 'Khartoum']
40 |
41 | ignored_cities = []
42 |
43 | if __name__ == '__main__':
44 | t0 = timeit.default_timer()
45 |
46 | test_folders = []
47 |
48 | for i in range(1, len(sys.argv) - 1):
49 | test_folders.append(sys.argv[i])
50 |
51 | if not path.isdir(pred_folder):
52 | mkdir(pred_folder)
53 |
54 | if not path.isdir(path.join(pred_folder, model_name)):
55 | mkdir(path.join(pred_folder, model_name))
56 |
57 | for it in [0]:
58 | models = []
59 |
60 | if not path.isdir(path.join(pred_folder, model_name, str(it))):
61 | mkdir(path.join(pred_folder, model_name, str(it)))
62 |
63 | for i in range(4):
64 | if i in ignored_cities or not path.isfile(path.join(models_folder, 'linknet_small_model_weights4_{0}_{1}.h5'.format(cities[i], it))):
65 | models.append(None)
66 | continue
67 | if not path.isdir(path.join(path.join(pred_folder, model_name, str(it), cities[i]))):
68 | mkdir(path.join(path.join(pred_folder, model_name, str(it), cities[i])))
69 | model = get_vgg_linknet_small(input_shape, weights=None)
70 | model.load_weights(path.join(models_folder, 'linknet_small_model_weights4_{0}_{1}.h5'.format(cities[i], it)))
71 | models.append(model)
72 |
73 | print('Predictiong fold', it)
74 | for d in test_folders:
75 | for f in tqdm(sorted(listdir(path.join(d, 'MUL')))):
76 | if path.isfile(path.join(d, 'MUL', f)) and '.tif' in f:
77 | img_id = f.split('MUL_')[1].split('.')[0]
78 | cinp = np.zeros((4,))
79 | cinp[cities.index(img_id.split('_')[2])] = 1.0
80 | cid = cinp.argmax()
81 | if cid in ignored_cities:
82 | continue
83 | fpath = path.join(d, 'MUL', f)
84 | img = skimage.io.imread(fpath, plugin='tifffile')
85 | img = cv2.resize(img, (650, 650))
86 | pan = skimage.io.imread(path.join(d, 'PAN', 'PAN_{0}.tif'.format(img_id)), plugin='tifffile')
87 | pan = cv2.resize(pan, (650, 650))
88 | pan = pan[..., np.newaxis]
89 | img = np.concatenate([img, pan], axis=2)
90 | img = cv2.copyMakeBorder(img, 11, 11, 11, 11, cv2.BORDER_REFLECT_101)
91 | inp = []
92 | inp.append(img)
93 | inp.append(np.rot90(img, k=1))
94 | inp = np.asarray(inp)
95 | inp = preprocess_inputs_std(inp, cid)
96 | pred = models[cid].predict(inp)
97 | mask = pred[0] + np.rot90(pred[1], k=3)
98 | mask /= 2
99 | mask = mask[11:661, 11:661, ...]
100 | mask = mask * 255
101 | mask = mask.astype('uint8')
102 | cv2.imwrite(path.join(pred_folder, model_name, str(it), cities[cid], '{0}.png'.format(img_id)), mask, [cv2.IMWRITE_PNG_COMPRESSION, 9])
103 |
104 | elapsed = timeit.default_timer() - t0
105 | print('Time: {:.3f} min'.format(elapsed / 60))
--------------------------------------------------------------------------------
/cannab-solution/predict_resnet_small.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import sys
3 | from os import path, listdir, mkdir
4 | import numpy as np
5 | np.random.seed(1)
6 | import random
7 | random.seed(1)
8 | import tensorflow as tf
9 | tf.set_random_seed(1)
10 | import timeit
11 | import cv2
12 | from models import get_resnet_unet
13 | import skimage.io
14 | from tqdm import tqdm
15 |
16 | input_shape = (672, 672)
17 |
18 | means = [[290.42, 446.84, 591.88, 442.45, 424.66, 418.13, 554.13, 354.34, 566.86],
19 | [178.33, 260.14, 287.4, 161.44, 211.46, 198.83, 453.27, 228.99, 242.67],
20 | [357.82, 344.64, 436.76, 452.17, 290.35, 439.7, 440.43, 393.6, 452.5],
21 | [386.98, 415.74, 601.29, 755.34, 527.79, 729.95, 641, 611.41, 697.17]]
22 | stds = [[75.42, 177.98, 288.81, 250.24, 260.55, 220.09, 299.67, 191.47, 285.25],
23 | [16.4, 45.69, 79.42, 61.91, 99.64, 81.17, 210.34, 106.31, 80.89],
24 | [35.23, 58, 89.42, 115.7, 90.45, 109.5, 144.61, 136.77, 99.11],
25 | [37.9, 59.95, 99.56, 131.14, 96.26, 107.79, 98.77, 92.2, 107.9]]
26 | def preprocess_inputs_std(x, city_id):
27 | zero_msk = (x == 0)
28 | x = np.asarray(x, dtype='float32')
29 | for i in range(9):
30 | x[..., i] -= means[city_id][i]
31 | x[..., i] /= stds[city_id][i]
32 | x[zero_msk] = 0
33 | return x
34 |
35 | models_folder = '/wdata/nn_models'
36 | pred_folder = '/wdata/predictions'
37 | model_name = 'resnet_small'
38 |
39 | cities = ['Vegas', 'Paris', 'Shanghai', 'Khartoum']
40 |
41 | ignored_cities = [0, 1]
42 |
43 | if __name__ == '__main__':
44 | t0 = timeit.default_timer()
45 |
46 | test_folders = []
47 |
48 | for i in range(1, len(sys.argv) - 1):
49 | test_folders.append(sys.argv[i])
50 |
51 | if not path.isdir(pred_folder):
52 | mkdir(pred_folder)
53 |
54 | if not path.isdir(path.join(pred_folder, model_name)):
55 | mkdir(path.join(pred_folder, model_name))
56 |
57 | for it in [0]:
58 | models = []
59 |
60 | if not path.isdir(path.join(pred_folder, model_name, str(it))):
61 | mkdir(path.join(pred_folder, model_name, str(it)))
62 |
63 | for i in range(4):
64 | if i in ignored_cities or not path.isfile(path.join(models_folder, 'resnet_linknet_small_model_weights4_{0}_{1}.h5'.format(cities[i], it))):
65 | models.append(None)
66 | continue
67 | if not path.isdir(path.join(path.join(pred_folder, model_name, str(it), cities[i]))):
68 | mkdir(path.join(path.join(pred_folder, model_name, str(it), cities[i])))
69 | model = get_resnet_unet(input_shape, weights=None)
70 | model.load_weights(path.join(models_folder, 'resnet_linknet_small_model_weights4_{0}_{1}.h5'.format(cities[i], it)))
71 | models.append(model)
72 |
73 | print('Predictiong fold', it)
74 | for d in test_folders:
75 | for f in tqdm(sorted(listdir(path.join(d, 'MUL')))):
76 | if path.isfile(path.join(d, 'MUL', f)) and '.tif' in f:
77 | img_id = f.split('MUL_')[1].split('.')[0]
78 | cinp = np.zeros((4,))
79 | cinp[cities.index(img_id.split('_')[2])] = 1.0
80 | cid = cinp.argmax()
81 | if cid in ignored_cities:
82 | continue
83 | fpath = path.join(d, 'MUL', f)
84 | img = skimage.io.imread(fpath, plugin='tifffile')
85 | img = cv2.resize(img, (650, 650))
86 | pan = skimage.io.imread(path.join(d, 'PAN', 'PAN_{0}.tif'.format(img_id)), plugin='tifffile')
87 | pan = cv2.resize(pan, (650, 650))
88 | pan = pan[..., np.newaxis]
89 | img = np.concatenate([img, pan], axis=2)
90 | img = cv2.copyMakeBorder(img, 11, 11, 11, 11, cv2.BORDER_REFLECT_101)
91 | inp = []
92 | inp.append(img)
93 | inp.append(np.rot90(img, k=1))
94 | inp = np.asarray(inp)
95 | inp = preprocess_inputs_std(inp, cid)
96 | pred = models[cid].predict(inp)
97 | mask = pred[0] + np.rot90(pred[1], k=3)
98 | mask /= 2
99 | mask = mask[11:661, 11:661, ...]
100 | mask = mask * 255
101 | mask = mask.astype('uint8')
102 | cv2.imwrite(path.join(pred_folder, model_name, str(it), cities[cid], '{0}.png'.format(img_id)), mask, [cv2.IMWRITE_PNG_COMPRESSION, 9])
103 |
104 | elapsed = timeit.default_timer() - t0
105 | print('Time: {:.3f} min'.format(elapsed / 60))
--------------------------------------------------------------------------------
/cannab-solution/predict_resnet_smallest.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import sys
3 | from os import path, listdir, mkdir
4 | import numpy as np
5 | np.random.seed(1)
6 | import random
7 | random.seed(1)
8 | import tensorflow as tf
9 | tf.set_random_seed(1)
10 | import timeit
11 | import cv2
12 | from models import get_resnet_unet
13 | import skimage.io
14 | from tqdm import tqdm
15 |
16 | input_shape = (352, 352)
17 |
18 | means = [[290.42, 446.84, 591.88, 442.45, 424.66, 418.13, 554.13, 354.34, 566.86],
19 | [178.33, 260.14, 287.4, 161.44, 211.46, 198.83, 453.27, 228.99, 242.67],
20 | [357.82, 344.64, 436.76, 452.17, 290.35, 439.7, 440.43, 393.6, 452.5],
21 | [386.98, 415.74, 601.29, 755.34, 527.79, 729.95, 641, 611.41, 697.17]]
22 | stds = [[75.42, 177.98, 288.81, 250.24, 260.55, 220.09, 299.67, 191.47, 285.25],
23 | [16.4, 45.69, 79.42, 61.91, 99.64, 81.17, 210.34, 106.31, 80.89],
24 | [35.23, 58, 89.42, 115.7, 90.45, 109.5, 144.61, 136.77, 99.11],
25 | [37.9, 59.95, 99.56, 131.14, 96.26, 107.79, 98.77, 92.2, 107.9]]
26 | def preprocess_inputs_std(x, city_id):
27 | zero_msk = (x == 0)
28 | x = np.asarray(x, dtype='float32')
29 | for i in range(9):
30 | x[..., i] -= means[city_id][i]
31 | x[..., i] /= stds[city_id][i]
32 | x[zero_msk] = 0
33 | return x
34 |
35 | models_folder = '/wdata/nn_models'
36 | pred_folder = '/wdata/predictions'
37 | model_name = 'resnet_smallest'
38 |
39 | cities = ['Vegas', 'Paris', 'Shanghai', 'Khartoum']
40 |
41 | ignored_cities = [0, 3]
42 |
43 | if __name__ == '__main__':
44 | t0 = timeit.default_timer()
45 |
46 | test_folders = []
47 |
48 | for i in range(1, len(sys.argv) - 1):
49 | test_folders.append(sys.argv[i])
50 |
51 | if not path.isdir(pred_folder):
52 | mkdir(pred_folder)
53 |
54 | if not path.isdir(path.join(pred_folder, model_name)):
55 | mkdir(path.join(pred_folder, model_name))
56 |
57 | for it in [0, 1]:
58 | models = []
59 |
60 | if not path.isdir(path.join(pred_folder, model_name, str(it))):
61 | mkdir(path.join(pred_folder, model_name, str(it)))
62 |
63 | for i in range(4):
64 | if i in ignored_cities or not path.isfile(path.join(models_folder, 'resnet_smallest_model_weights4_{0}_{1}.h5'.format(cities[i], it))):
65 | models.append(None)
66 | continue
67 | if not path.isdir(path.join(path.join(pred_folder, model_name, str(it), cities[i]))):
68 | mkdir(path.join(path.join(pred_folder, model_name, str(it), cities[i])))
69 | model = get_resnet_unet(input_shape, weights=None)
70 | model.load_weights(path.join(models_folder, 'resnet_smallest_model_weights4_{0}_{1}.h5'.format(cities[i], it)))
71 | models.append(model)
72 |
73 | print('Predictiong fold', it)
74 | for d in test_folders:
75 | for f in tqdm(sorted(listdir(path.join(d, 'MUL')))):
76 | if path.isfile(path.join(d, 'MUL', f)) and '.tif' in f:
77 | img_id = f.split('MUL_')[1].split('.')[0]
78 | cinp = np.zeros((4,))
79 | cinp[cities.index(img_id.split('_')[2])] = 1.0
80 | cid = cinp.argmax()
81 | if cid in ignored_cities:
82 | continue
83 | fpath = path.join(d, 'MUL', f)
84 | img = skimage.io.imread(fpath, plugin='tifffile')
85 | pan = skimage.io.imread(path.join(d, 'PAN', 'PAN_{0}.tif'.format(img_id)), plugin='tifffile')
86 | pan = cv2.resize(pan, (325, 325))
87 | pan = pan[..., np.newaxis]
88 | img = np.concatenate([img, pan], axis=2)
89 | img = cv2.copyMakeBorder(img, 13, 14, 13, 14, cv2.BORDER_REFLECT_101)
90 | inp = []
91 | inp.append(img)
92 | inp.append(np.rot90(img, k=1))
93 | inp = np.asarray(inp)
94 | inp = preprocess_inputs_std(inp, cid)
95 | pred = models[cid].predict(inp)
96 | mask = pred[0] + np.rot90(pred[1], k=3)
97 | mask /= 2
98 | mask = mask[13:338, 13:338, ...]
99 | mask = mask * 255
100 | mask = mask.astype('uint8')
101 | cv2.imwrite(path.join(pred_folder, model_name, str(it), cities[cid], '{0}.png'.format(img_id)), mask, [cv2.IMWRITE_PNG_COMPRESSION, 9])
102 |
103 | elapsed = timeit.default_timer() - t0
104 | print('Time: {:.3f} min'.format(elapsed / 60))
--------------------------------------------------------------------------------
/cannab-solution/predict_vgg.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import sys
3 | from os import path, listdir, mkdir
4 | import numpy as np
5 | np.random.seed(1)
6 | import random
7 | random.seed(1)
8 | import tensorflow as tf
9 | tf.set_random_seed(1)
10 | import timeit
11 | import cv2
12 | from models import get_vgg_unet
13 | import skimage.io
14 | from tqdm import tqdm
15 |
16 | input_shape = (1344, 1344)
17 |
18 | def preprocess_inputs(x):
19 | zero_msk = (x == 0)
20 | x = x / 8.0
21 | x -= 127.5
22 | x[zero_msk] = 0
23 | return x
24 |
25 | models_folder = '/wdata/nn_models'
26 | pred_folder = '/wdata/predictions'
27 | model_name = 'vgg_big'
28 |
29 | cities = ['Vegas', 'Paris', 'Shanghai', 'Khartoum']
30 |
31 | ignored_cities = [0]
32 |
33 | if __name__ == '__main__':
34 | t0 = timeit.default_timer()
35 |
36 | test_folders = []
37 |
38 | for i in range(1, len(sys.argv) - 1):
39 | test_folders.append(sys.argv[i])
40 |
41 | if not path.isdir(pred_folder):
42 | mkdir(pred_folder)
43 |
44 | if not path.isdir(path.join(pred_folder, model_name)):
45 | mkdir(path.join(pred_folder, model_name))
46 |
47 | for it in [0, 1]:
48 | models = []
49 |
50 | if not path.isdir(path.join(pred_folder, model_name, str(it))):
51 | mkdir(path.join(pred_folder, model_name, str(it)))
52 |
53 | for i in range(4):
54 | if i in ignored_cities or not path.isfile(path.join(models_folder, 'vgg_model3_weights2_{0}_{1}.h5'.format(cities[i], it))):
55 | models.append(None)
56 | continue
57 | if not path.isdir(path.join(path.join(pred_folder, model_name, str(it), cities[i]))):
58 | mkdir(path.join(path.join(pred_folder, model_name, str(it), cities[i])))
59 | model = get_vgg_unet(input_shape, weights=None)
60 | model.load_weights(path.join(models_folder, 'vgg_model3_weights2_{0}_{1}.h5'.format(cities[i], it)))
61 | models.append(model)
62 |
63 | print('Predictiong fold', it)
64 | for d in test_folders:
65 | for f in tqdm(sorted(listdir(path.join(d, 'MUL-PanSharpen')))):
66 | if path.isfile(path.join(d, 'MUL-PanSharpen', f)) and '.tif' in f:
67 | img_id = f.split('PanSharpen_')[1].split('.')[0]
68 | cinp = np.zeros((4,))
69 | cinp[cities.index(img_id.split('_')[2])] = 1.0
70 | cid = cinp.argmax()
71 | if cid in ignored_cities:
72 | continue
73 | fpath = path.join(d, 'MUL-PanSharpen', f)
74 | img = skimage.io.imread(fpath, plugin='tifffile')
75 | img = cv2.copyMakeBorder(img, 22, 22, 22, 22, cv2.BORDER_REFLECT_101)
76 | inp = []
77 | inp.append(img)
78 | inp.append(np.rot90(img, k=1))
79 | inp = np.asarray(inp)
80 | inp = preprocess_inputs(inp)
81 | inp2 = []
82 | inp2.append(cinp)
83 | inp2.append(cinp)
84 | inp2 = np.asarray(inp2)
85 | pred = models[cid].predict([inp, inp2])
86 | mask = pred[0] + np.rot90(pred[1], k=3)
87 | mask /= 2
88 | mask = mask[22:1322, 22:1322, ...]
89 | mask = mask * 255
90 | mask = mask.astype('uint8')
91 | cv2.imwrite(path.join(pred_folder, model_name, str(it), cities[cid], '{0}.png'.format(img_id)), mask, [cv2.IMWRITE_PNG_COMPRESSION, 9])
92 |
93 | elapsed = timeit.default_timer() - t0
94 | print('Time: {:.3f} min'.format(elapsed / 60))
--------------------------------------------------------------------------------
/cannab-solution/predict_vgg_small.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import sys
3 | from os import path, listdir, mkdir
4 | import numpy as np
5 | np.random.seed(1)
6 | import random
7 | random.seed(1)
8 | import tensorflow as tf
9 | tf.set_random_seed(1)
10 | import timeit
11 | import cv2
12 | from models import get_vgg_unet_small
13 | import skimage.io
14 | from tqdm import tqdm
15 |
16 | input_shape = (672, 672)
17 |
18 | def preprocess_inputs(x):
19 | zero_msk = (x == 0)
20 | x = x / 8.0
21 | x -= 127.5
22 | x[zero_msk] = 0
23 | return x
24 |
25 | models_folder = '/wdata/nn_models'
26 | pred_folder = '/wdata/predictions'
27 | model_name = 'vgg_small'
28 |
29 | cities = ['Vegas', 'Paris', 'Shanghai', 'Khartoum']
30 |
31 | ignored_cities = [1]
32 |
33 | if __name__ == '__main__':
34 | t0 = timeit.default_timer()
35 |
36 | test_folders = []
37 |
38 | for i in range(1, len(sys.argv) - 1):
39 | test_folders.append(sys.argv[i])
40 |
41 | if not path.isdir(pred_folder):
42 | mkdir(pred_folder)
43 |
44 | if not path.isdir(path.join(pred_folder, model_name)):
45 | mkdir(path.join(pred_folder, model_name))
46 |
47 | for it in [0]:
48 | models = []
49 |
50 | if not path.isdir(path.join(pred_folder, model_name, str(it))):
51 | mkdir(path.join(pred_folder, model_name, str(it)))
52 |
53 | for i in range(4):
54 | if i in ignored_cities or not path.isfile(path.join(models_folder, 'vgg2_small_model_weights4_{0}_{1}.h5'.format(cities[i], it))):
55 | models.append(None)
56 | continue
57 | if not path.isdir(path.join(path.join(pred_folder, model_name, str(it), cities[i]))):
58 | mkdir(path.join(path.join(pred_folder, model_name, str(it), cities[i])))
59 | model = get_vgg_unet_small(input_shape, weights=None)
60 | model.load_weights(path.join(models_folder, 'vgg2_small_model_weights4_{0}_{1}.h5'.format(cities[i], it)))
61 | models.append(model)
62 |
63 | print('Predictiong fold', it)
64 | for d in test_folders:
65 | for f in tqdm(sorted(listdir(path.join(d, 'MUL')))):
66 | if path.isfile(path.join(d, 'MUL', f)) and '.tif' in f:
67 | img_id = f.split('MUL_')[1].split('.')[0]
68 | cinp = np.zeros((4,))
69 | cinp[cities.index(img_id.split('_')[2])] = 1.0
70 | cid = cinp.argmax()
71 | if cid in ignored_cities:
72 | continue
73 | fpath = path.join(d, 'MUL', f)
74 | img = skimage.io.imread(fpath, plugin='tifffile')
75 | img = cv2.resize(img, (650, 650))
76 | pan = skimage.io.imread(path.join(d, 'PAN', 'PAN_{0}.tif'.format(img_id)), plugin='tifffile')
77 | pan = cv2.resize(pan, (650, 650))
78 | pan = pan[..., np.newaxis]
79 | img = np.concatenate([img, pan], axis=2)
80 | img = cv2.copyMakeBorder(img, 11, 11, 11, 11, cv2.BORDER_REFLECT_101)
81 | inp = []
82 | inp.append(img)
83 | inp.append(np.rot90(img, k=1))
84 | inp = np.asarray(inp)
85 | inp = preprocess_inputs(inp)
86 | pred = models[cid].predict(inp)
87 | mask = pred[0] + np.rot90(pred[1], k=3)
88 | mask /= 2
89 | mask = mask[11:661, 11:661, ...]
90 | mask = mask * 255
91 | mask = mask.astype('uint8')
92 | cv2.imwrite(path.join(pred_folder, model_name, str(it), cities[cid], '{0}.png'.format(img_id)), mask, [cv2.IMWRITE_PNG_COMPRESSION, 9])
93 |
94 | elapsed = timeit.default_timer() - t0
95 | print('Time: {:.3f} min'.format(elapsed / 60))
--------------------------------------------------------------------------------
/cannab-solution/predict_vgg_smallest.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import sys
3 | from os import path, listdir, mkdir
4 | import numpy as np
5 | np.random.seed(1)
6 | import random
7 | random.seed(1)
8 | import tensorflow as tf
9 | tf.set_random_seed(1)
10 | import timeit
11 | import cv2
12 | from models import get_vgg_unet_small
13 | import skimage.io
14 | from tqdm import tqdm
15 |
16 | input_shape = (352, 352)
17 |
18 | means = [[290.42, 446.84, 591.88, 442.45, 424.66, 418.13, 554.13, 354.34, 566.86],
19 | [178.33, 260.14, 287.4, 161.44, 211.46, 198.83, 453.27, 228.99, 242.67],
20 | [357.82, 344.64, 436.76, 452.17, 290.35, 439.7, 440.43, 393.6, 452.5],
21 | [386.98, 415.74, 601.29, 755.34, 527.79, 729.95, 641, 611.41, 697.17]]
22 | stds = [[75.42, 177.98, 288.81, 250.24, 260.55, 220.09, 299.67, 191.47, 285.25],
23 | [16.4, 45.69, 79.42, 61.91, 99.64, 81.17, 210.34, 106.31, 80.89],
24 | [35.23, 58, 89.42, 115.7, 90.45, 109.5, 144.61, 136.77, 99.11],
25 | [37.9, 59.95, 99.56, 131.14, 96.26, 107.79, 98.77, 92.2, 107.9]]
26 | def preprocess_inputs_std(x, city_id):
27 | zero_msk = (x == 0)
28 | x = np.asarray(x, dtype='float32')
29 | for i in range(9):
30 | x[..., i] -= means[city_id][i]
31 | x[..., i] /= stds[city_id][i]
32 | x[zero_msk] = 0
33 | return x
34 |
35 | models_folder = '/wdata/nn_models'
36 | pred_folder = '/wdata/predictions'
37 | model_name = 'vgg_smallest'
38 |
39 | cities = ['Vegas', 'Paris', 'Shanghai', 'Khartoum']
40 |
41 | ignored_cities = [1]
42 |
43 | if __name__ == '__main__':
44 | t0 = timeit.default_timer()
45 |
46 | test_folders = []
47 |
48 | for i in range(1, len(sys.argv) - 1):
49 | test_folders.append(sys.argv[i])
50 |
51 | if not path.isdir(pred_folder):
52 | mkdir(pred_folder)
53 |
54 | if not path.isdir(path.join(pred_folder, model_name)):
55 | mkdir(path.join(pred_folder, model_name))
56 |
57 | for it in [0, 1]:
58 | models = []
59 |
60 | if not path.isdir(path.join(pred_folder, model_name, str(it))):
61 | mkdir(path.join(pred_folder, model_name, str(it)))
62 |
63 | for i in range(4):
64 | if i in ignored_cities or not path.isfile(path.join(models_folder, 'vgg_smallest_model_weights4_{0}_{1}.h5'.format(cities[i], it))):
65 | models.append(None)
66 | continue
67 | if not path.isdir(path.join(path.join(pred_folder, model_name, str(it), cities[i]))):
68 | mkdir(path.join(path.join(pred_folder, model_name, str(it), cities[i])))
69 | model = get_vgg_unet_small(input_shape, weights=None)
70 | model.load_weights(path.join(models_folder, 'vgg_smallest_model_weights4_{0}_{1}.h5'.format(cities[i], it)))
71 | models.append(model)
72 |
73 | print('Predictiong fold', it)
74 | for d in test_folders:
75 | for f in tqdm(sorted(listdir(path.join(d, 'MUL')))):
76 | if path.isfile(path.join(d, 'MUL', f)) and '.tif' in f:
77 | img_id = f.split('MUL_')[1].split('.')[0]
78 | cinp = np.zeros((4,))
79 | cinp[cities.index(img_id.split('_')[2])] = 1.0
80 | cid = cinp.argmax()
81 | if cid in ignored_cities:
82 | continue
83 | fpath = path.join(d, 'MUL', f)
84 | img = skimage.io.imread(fpath, plugin='tifffile')
85 | pan = skimage.io.imread(path.join(d, 'PAN', 'PAN_{0}.tif'.format(img_id)), plugin='tifffile')
86 | pan = cv2.resize(pan, (325, 325))
87 | pan = pan[..., np.newaxis]
88 | img = np.concatenate([img, pan], axis=2)
89 | img = cv2.copyMakeBorder(img, 13, 14, 13, 14, cv2.BORDER_REFLECT_101)
90 | inp = []
91 | inp.append(img)
92 | inp.append(np.rot90(img, k=1))
93 | inp = np.asarray(inp)
94 | inp = preprocess_inputs_std(inp, cid)
95 | pred = models[cid].predict(inp)
96 | mask = pred[0] + np.rot90(pred[1], k=3)
97 | mask /= 2
98 | mask = mask[13:338, 13:338, ...]
99 | mask = mask * 255
100 | mask = mask.astype('uint8')
101 | cv2.imwrite(path.join(pred_folder, model_name, str(it), cities[cid], '{0}.png'.format(img_id)), mask, [cv2.IMWRITE_PNG_COMPRESSION, 9])
102 |
103 | elapsed = timeit.default_timer() - t0
104 | print('Time: {:.3f} min'.format(elapsed / 60))
--------------------------------------------------------------------------------
/cannab-solution/test.sh:
--------------------------------------------------------------------------------
1 | python predict_inception_smallest.py "$@"
2 | python predict_vgg_smallest.py "$@"
3 | python predict_resnet_smallest.py "$@"
4 | python predict_resnet_small.py "$@"
5 | python predict_vgg_small.py "$@"
6 | python predict_linknet_small.py "$@"
7 | python predict_inception_small.py "$@"
8 | python predict_linknet_520.py "$@"
9 | python predict_inception_520.py "$@"
10 | python predict_inception_v3_520.py "$@"
11 | python predict_vgg.py "$@"
12 | python predict_linknet.py "$@"
13 | python create_submission.py "$@"
--------------------------------------------------------------------------------
/cannab-solution/train.sh:
--------------------------------------------------------------------------------
1 | echo "Creating masks..."
2 | nohup python create_masks.py masks_small 650 12 "$@" &
3 | nohup python create_masks.py masks_small_9 650 9 "$@" &
4 | nohup python create_masks.py masks_520 520 6 "$@" &
5 | nohup python create_masks.py masks_520_9 520 9 "$@" &
6 | nohup python create_masks.py masks_smallest 325 4 "$@" &
7 | python create_masks.py masks_22 1300 22 "$@"
8 | wait
9 | echo "Masks created"
10 |
11 | CUDA_VISIBLE_DEVICES="0" python train_resnet_unet_smallest.py "$@" &> /wdata/resnet_unet_smallest.out &
12 | CUDA_VISIBLE_DEVICES="1" python train_inception_unet_smallest.py "$@" &> /wdata/inception_unet_smallest.out &
13 | CUDA_VISIBLE_DEVICES="2" python train_inception3_unet_520.py "$@" &> /wdata/inception3_unet_520.out &
14 | CUDA_VISIBLE_DEVICES="3" python train_vgg.py "$@" | tee /wdata/vgg_pretrain.out
15 | echo "Waiting all GPUs to complete..."
16 | wait
17 |
18 | CUDA_VISIBLE_DEVICES="0" python train_linknet_520.py "$@" &> /wdata/linknet_520.out &
19 | CUDA_VISIBLE_DEVICES="1" python train_inc_v2_unet_520.py "$@" &> /wdata/inc_v2_unet_520.out &
20 | CUDA_VISIBLE_DEVICES="2" python tune_vgg_city.py "$@" &> tee /wdata/vgg_tune.out &
21 | CUDA_VISIBLE_DEVICES="3" python train_linknet_city_big.py "$@" | tee /wdata/linknet_big.out
22 | echo "Waiting all GPUs to complete..."
23 | wait
24 |
25 | CUDA_VISIBLE_DEVICES="0" python train_vgg_unet_smallest.py "$@" &> /wdata/gg_unet_smallest.out &
26 | CUDA_VISIBLE_DEVICES="1" python train_vgg2_city_small.py "$@" &> /wdata/vgg2_city_small.out &
27 | CUDA_VISIBLE_DEVICES="2" python train_linknet_city_small.py "$@" &> /wdata/linknet_city_small.out &
28 | CUDA_VISIBLE_DEVICES="3" python train_inception_city_small.py "$@" | tee /wdata/inception_city_small.out
29 | echo "Waiting all GPUs to complete..."
30 | wait
31 |
32 | CUDA_VISIBLE_DEVICES="0" python train_resnet_linknet_city_small.py "$@" | tee /wdata/resnet_linknet_city_small.out
33 | echo "All NNs trained!"
--------------------------------------------------------------------------------
/cannab-solution/train_fix.sh:
--------------------------------------------------------------------------------
1 | CUDA_VISIBLE_DEVICES="0" python train_inception_unet_smallest_fixed.py "$@" &> /wdata/inception_unet_smallest_fixed.out &
2 | CUDA_VISIBLE_DEVICES="1" python tune_vgg_city_fixed.py "$@" | tee /wdata/linknet_big_fixed.out
3 | echo "Waiting all GPUs to complete..."
4 | wait
5 |
6 | echo "All NNs trained!"
--------------------------------------------------------------------------------
/cannab-solution/train_patch_cannab.zip:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SpaceNetChallenge/RoadDetector/0a7391f546ab20c873dc6744920deef22c21ef3e/cannab-solution/train_patch_cannab.zip
--------------------------------------------------------------------------------
/fbastani-solution/1_convertgraphs.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "./common"
5 |
6 | "fmt"
7 | "io/ioutil"
8 | "os"
9 | "strconv"
10 | "strings"
11 | )
12 |
13 | func main() {
14 | paths := os.Args[1:]
15 | for _, trainpath := range paths {
16 | if trainpath[len(trainpath) - 1] == '/' {
17 | trainpath = trainpath[:len(trainpath) - 1]
18 | }
19 | parts := strings.Split(trainpath, "/")
20 | d := parts[len(parts) - 1]
21 | dparts := strings.Split(d, "_")
22 | city := fmt.Sprintf("%s_%s_%s", dparts[0], dparts[1], dparts[2])
23 |
24 | graphs := make(map[string]*common.Graph)
25 | vertices := make(map[[2]string]*common.Node)
26 |
27 | bytes, err := ioutil.ReadFile(fmt.Sprintf("%s/summaryData/%s.csv", trainpath, d))
28 | if err != nil {
29 | panic(err)
30 | }
31 | for _, line := range strings.Split(string(bytes), "\n") {
32 | line = strings.TrimSpace(line)
33 | if !strings.Contains(line, city) {
34 | continue
35 | }
36 | parts := strings.SplitN(line, ",", 2)
37 | id := strings.Split(parts[0], "img")[1]
38 |
39 | if graphs[id] == nil {
40 | graphs[id] = &common.Graph{}
41 | }
42 |
43 | if strings.Contains(line, "LINESTRING EMPTY") {
44 | continue
45 | }
46 |
47 | pointsStr := strings.Split(strings.Split(strings.Split(parts[1], "(")[1], ")")[0], ", ")
48 |
49 | for _, pointStr := range pointsStr {
50 | if vertices[[2]string{id, pointStr}] == nil {
51 | pointParts := strings.Split(pointStr, " ")
52 | x, _ := strconv.ParseFloat(pointParts[0], 64)
53 | y, _ := strconv.ParseFloat(pointParts[1], 64)
54 | vertices[[2]string{id, pointStr}] = graphs[id].AddNode(common.Point{x, y})
55 | }
56 | }
57 |
58 | for i := 0; i < len(pointsStr) - 1; i++ {
59 | prev := pointsStr[i]
60 | next := pointsStr[i + 1]
61 | graphs[id].AddBidirectionalEdge(vertices[[2]string{id, prev}], vertices[[2]string{id, next}])
62 | }
63 | }
64 |
65 | for id, graph := range graphs {
66 | if err := graph.Write(fmt.Sprintf("/wdata/spacenet2017/favyen/graphs/%s.%s.%s.graph", d, city, id)); err != nil {
67 | panic(err)
68 | }
69 | }
70 | }
71 | }
72 |
--------------------------------------------------------------------------------
/fbastani-solution/2_truth_tiles.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "./common"
5 |
6 | "image"
7 | "image/color"
8 | "image/png"
9 | "io/ioutil"
10 | "fmt"
11 | "math"
12 | "os"
13 | "strings"
14 | )
15 |
16 | func main() {
17 | fmt.Println("initializing tasks")
18 | type Task struct {
19 | Label string
20 | Graph *common.Graph
21 | }
22 | var tasks []Task
23 |
24 | graphDir := "/wdata/spacenet2017/favyen/graphs/"
25 | files, err := ioutil.ReadDir(graphDir)
26 | if err != nil {
27 | panic(err)
28 | }
29 | for _, file := range files {
30 | if !strings.HasSuffix(file.Name(), ".graph") {
31 | continue
32 | }
33 | graph, err := common.ReadGraph(graphDir + file.Name())
34 | if err != nil {
35 | panic(err)
36 | }
37 | tasks = append(tasks, Task{
38 | Label: strings.Split(file.Name(), ".graph")[0],
39 | Graph: graph,
40 | })
41 | }
42 |
43 | processTask := func(task Task, threadID int) {
44 | values := make([][]uint8, 650)
45 | for i := range values {
46 | values[i] = make([]uint8, 650)
47 | }
48 | for _, edge := range task.Graph.Edges {
49 | segment := edge.Segment()
50 | for _, pos := range common.DrawLineOnCells(int(segment.Start.X)/2, int(segment.Start.Y)/2, int(segment.End.X)/2, int(segment.End.Y)/2, 650, 650) {
51 | for i := -4; i <= 4; i++ {
52 | for j := -4; j <= 4; j++ {
53 | d := math.Sqrt(float64(i * i + j * j))
54 | if d > 4 {
55 | continue
56 | }
57 | x := pos[0] + i
58 | y := pos[1] + j
59 | if x >= 0 && x < 650 && y >= 0 && y < 650 {
60 | values[x][y] = 255
61 | }
62 | }
63 | }
64 | }
65 | }
66 |
67 | img := image.NewGray(image.Rect(0, 0, 650, 650))
68 | for i := 0; i < 650; i++ {
69 | for j := 0; j < 650; j++ {
70 | img.SetGray(i, j, color.Gray{values[i][j]})
71 | }
72 | }
73 |
74 | f, err := os.Create(fmt.Sprintf("/wdata/spacenet2017/favyen/truth/%s.png", task.Label))
75 | if err != nil {
76 | panic(err)
77 | }
78 | if err := png.Encode(f, img); err != nil {
79 | panic(err)
80 | }
81 | f.Close()
82 | }
83 |
84 | fmt.Println("launching workers")
85 | n := 8
86 | taskCh := make(chan Task)
87 | doneCh := make(chan bool)
88 | for threadID := 0; threadID < n; threadID++ {
89 | go func(threadID int) {
90 | for task := range taskCh {
91 | processTask(task, threadID)
92 | }
93 | doneCh <- true
94 | }(threadID)
95 | }
96 | fmt.Println("running tasks")
97 | for i, task := range tasks {
98 | if i % 10 == 0 {
99 | fmt.Printf("... task progress: %d/%d\n", i, len(tasks))
100 | }
101 | taskCh <- task
102 | }
103 | close(taskCh)
104 | for threadID := 0; threadID < n; threadID++ {
105 | <- doneCh
106 | }
107 | }
108 |
--------------------------------------------------------------------------------
/fbastani-solution/Dockerfile:
--------------------------------------------------------------------------------
1 | # GPU
2 | FROM tensorflow/tensorflow:latest-gpu
3 | WORKDIR /app
4 | ADD . /app
5 | ENTRYPOINT /bin/bash
6 |
--------------------------------------------------------------------------------
/fbastani-solution/common/bresenham.go:
--------------------------------------------------------------------------------
1 | package common
2 |
3 | // Use Bresenham's algorithm to get indices of cells to draw a line.
4 | func DrawLineOnCells(startX int, startY int, endX int, endY int, maxX int, maxY int) [][2]int {
5 | abs := func(x int) int {
6 | if x >= 0 {
7 | return x
8 | } else {
9 | return -x
10 | }
11 | }
12 |
13 | // followX indicates whether to move along x or y coordinates
14 | followX := abs(endY - startY) <= abs(endX - startX)
15 | var x0, x1, y0, y1 int
16 | if followX {
17 | x0 = startX
18 | x1 = endX
19 | y0 = startY
20 | y1 = endY
21 | } else {
22 | x0 = startY
23 | x1 = endY
24 | y0 = startX
25 | y1 = endX
26 | }
27 |
28 | deltaX := abs(x1 - x0)
29 | deltaY := abs(y1 - y0)
30 | var currentError int = 0
31 |
32 | var xstep, ystep int
33 | if x0 < x1 {
34 | xstep = 1
35 | } else {
36 | xstep = -1
37 | }
38 | if y0 < y1 {
39 | ystep = 1
40 | } else {
41 | ystep = -1
42 | }
43 |
44 | points := make([][2]int, 0, deltaX + 1)
45 | addPoint := func(x int, y int) {
46 | if x >= 0 && x < maxX && y >= 0 && y < maxY {
47 | points = append(points, [2]int{x, y})
48 | }
49 | }
50 |
51 | x := x0
52 | y := y0
53 |
54 | for x != x1 + xstep {
55 | if followX {
56 | addPoint(x, y)
57 | } else {
58 | addPoint(y, x)
59 | }
60 |
61 | x += xstep
62 | currentError += deltaY
63 | if currentError >= deltaX {
64 | y += ystep
65 | currentError -= deltaX
66 | }
67 | }
68 |
69 | return points
70 | }
71 |
--------------------------------------------------------------------------------
/fbastani-solution/common/bresenham_test.go:
--------------------------------------------------------------------------------
1 | package common
2 |
3 | import (
4 | "testing"
5 | )
6 |
7 | func TestBresenhamSlope1(t *testing.T) {
8 | t.Fatalf("%v", DrawLineOnCells(5, 5, 10, 15, 20, 20))
9 | }
10 |
--------------------------------------------------------------------------------
/fbastani-solution/common/geom_test.go:
--------------------------------------------------------------------------------
1 | package common
2 |
3 | import (
4 | "math"
5 | "testing"
6 | )
7 |
8 | func TestPointAngleTo(t *testing.T) {
9 | point1 := Point{1, 1}
10 | point2 := Point{1, 0}
11 | point3 := Point{-1, 0}
12 | check := func(a Point, b Point, expected float64) {
13 | got := a.AngleTo(b)
14 | if math.Abs(got - expected) > 0.001 {
15 | t.Fatalf("expected %f for angle from %v to %v, but got %f", expected, a, b, got)
16 | }
17 | }
18 | check(point1, point1, 0)
19 | check(point1, point2, math.Pi / 4)
20 | check(point2, point3, math.Pi)
21 | check(point1, point3, math.Pi * 3 / 4)
22 | check(point3, point1, math.Pi * 3 / 4)
23 | }
24 |
25 | func TestSegmentDistance(t *testing.T) {
26 | segment := Segment{
27 | Point{0, 0},
28 | Point{1, 0},
29 | }
30 | check := func(point Point, expected float64) {
31 | got := segment.Distance(point)
32 | if math.Abs(got - expected) > 0.001 {
33 | t.Fatalf("expected %f for distance to %v, but got %f", expected, point, got)
34 | }
35 | }
36 | check(Point{0, 0}, 0)
37 | check(Point{0.5, 0}, 0)
38 | check(Point{-0.5, 0}, 0.5)
39 | check(Point{0.5, 0.5}, 0.5)
40 | }
41 |
42 | func TestSegmentDistanceToSegment(t *testing.T) {
43 | check := func(a Segment, b Segment, expected float64) {
44 | got := a.DistanceToSegment(b)
45 | if math.Abs(got - expected) > 0.001 {
46 | t.Fatalf("expected %f for distance from %v to %v, but got %f", expected, a, b, got)
47 | }
48 | }
49 | segment1 := Segment{Point{0, 0}, Point{1, 0}}
50 | segment2 := Segment{Point{0, 0.5}, Point{1, -0.5}}
51 | check(segment1, segment2, 0)
52 | segment3 := Segment{Point{0, 1}, Point{1, 1}}
53 | check(segment1, segment3, 1)
54 | check(segment2, segment3, 0.5)
55 | segment4 := Segment{Point{-1, 0}, Point{0, 1}}
56 | check(segment1, segment4, math.Sqrt(0.5))
57 | }
58 |
59 | func TestSegmentIntersection(t *testing.T) {
60 | check := func(a Segment, b Segment, expected *Point) {
61 | got := a.Intersection(b)
62 | if expected == nil && got != nil {
63 | t.Fatalf("expected nil from %v to %v, but got %v", a, b, *got)
64 | } else if expected != nil && got == nil {
65 | t.Fatalf("expected %v from %v to %v, but got nil", *expected, a, b)
66 | } else if expected != nil && got != nil {
67 | ep := *expected
68 | gp := *got
69 | if ep.Distance(gp) > 0.001 {
70 | t.Fatalf("expected %v from %v to %v, but got %v", ep, a, b, gp)
71 | }
72 | }
73 | }
74 | segment1 := Segment{Point{0, 0}, Point{1, 0}}
75 | segment2 := Segment{Point{0, 0.5}, Point{1, -0.5}}
76 | check(segment1, segment2, &Point{0.5, 0})
77 | segment3 := Segment{Point{0, 1}, Point{1, 1}}
78 | check(segment1, segment3, nil)
79 | check(segment2, segment3, nil)
80 | segment4 := Segment{Point{0, -1}, Point{0, 1}}
81 | check(segment1, segment4, &Point{0, 0})
82 | }
83 |
84 | func TestSegmentProjectWithWidth(t *testing.T) {
85 | segment := Segment{Point{10, 10}, Point{20, 10}}
86 | width := 2.0
87 | check := func(p Point, expected Point) {
88 | got := segment.ProjectWithWidth(p, width)
89 | if math.Abs(got.X - expected.X) > 0.001 || math.Abs(got.Y - expected.Y) > 0.001 {
90 | t.Fatalf("expected %v for projection of %v, but got %v", expected, p, got)
91 | }
92 | }
93 | check(Point{10, 5}, Point{10, 9})
94 | check(Point{10, 50}, Point{10, 11})
95 | check(Point{15, 15}, Point{15, 11})
96 | check(Point{15, 10.5}, Point{15, 10.5})
97 | check(Point{100, 10}, Point{21, 10})
98 | }
99 |
100 | func TestLineProjectPoint(t *testing.T) {
101 | line := Line{Point{10, 10}, Point{20, 10}}
102 | check := func(p Point, expected Point) {
103 | got := line.ProjectPoint(p)
104 | if got.Distance(expected) > 0.001 {
105 | t.Fatalf("expected %v for projection of %v, but got %v", expected, p, got)
106 | }
107 | }
108 | check(Point{15, 5}, Point{15, 10})
109 | check(Point{100, 5}, Point{100, 10})
110 | }
111 |
112 | func TestRectangleIntersects(t *testing.T) {
113 | check := func(a Rectangle, b Rectangle, expected bool) {
114 | got := a.Intersects(b)
115 | if got != expected {
116 | t.Fatalf("expected %v for %v.Intersects(%v), but got %v", expected, a, b, got)
117 | }
118 | }
119 | check(Rectangle{Point{0, 0}, Point{1, 1}}, Rectangle{Point{2, 0}, Point{3, 1}}, false)
120 | check(Rectangle{Point{0, 0}, Point{1, 1}}, Rectangle{Point{-1, -1}, Point{0.1, 0.1}}, true)
121 | check(Rectangle{Point{0, 0}, Point{1, 1}}, Rectangle{Point{0.5, 0.5}, Point{0.6, 0.6}}, true)
122 | }
123 |
--------------------------------------------------------------------------------
/fbastani-solution/common/graph_index.go:
--------------------------------------------------------------------------------
1 | package common
2 |
3 | import (
4 | "math"
5 | )
6 |
7 | type GridIndex struct {
8 | gridSize float64
9 | grid map[[2]int][]int
10 | graph *Graph
11 | }
12 |
13 | func (idx GridIndex) eachCell(rect Rectangle, f func(i int, j int)) {
14 | for i := int(math.Floor(rect.Min.X / idx.gridSize)); i <= int(math.Floor(rect.Max.X / idx.gridSize)); i++ {
15 | for j := int(math.Floor(rect.Min.Y / idx.gridSize)); j <= int(math.Floor(rect.Max.Y / idx.gridSize)); j++ {
16 | f(i, j)
17 | }
18 | }
19 | }
20 |
21 | func (idx GridIndex) Search(rect Rectangle) []*Edge {
22 | edgeIDs := make(map[int]bool)
23 | idx.eachCell(rect, func(i int, j int) {
24 | for _, edgeID := range idx.grid[[2]int{i, j}] {
25 | edgeIDs[edgeID] = true
26 | }
27 | })
28 | edges := make([]*Edge, 0, len(edgeIDs))
29 | for edgeID := range edgeIDs {
30 | edges = append(edges, idx.graph.Edges[edgeID])
31 | }
32 | return edges
33 | }
34 |
35 | func (graph *Graph) GridIndex(gridSize float64) GridIndex {
36 | idx := GridIndex{
37 | gridSize: gridSize,
38 | grid: make(map[[2]int][]int),
39 | graph: graph,
40 | }
41 | for _, edge := range graph.Edges {
42 | idx.eachCell(edge.Segment().Bounds(), func(i int, j int) {
43 | idx.grid[[2]int{i, j}] = append(idx.grid[[2]int{i, j}], edge.ID)
44 | })
45 | }
46 | return idx
47 | }
48 |
--------------------------------------------------------------------------------
/fbastani-solution/common/graph_index_test.go:
--------------------------------------------------------------------------------
1 | package common
2 |
3 | import (
4 | "testing"
5 | )
6 |
7 | func TestGridIndexSearch(t *testing.T) {
8 | graph := &Graph{}
9 | v00 := graph.AddNode(Point{0, 0})
10 | v01 := graph.AddNode(Point{1, 0})
11 | v02 := graph.AddNode(Point{2, 0})
12 | v11 := graph.AddNode(Point{1, 1})
13 | e00t01 := graph.AddEdge(v00, v01)
14 | e00t02 := graph.AddEdge(v00, v02)
15 | e00t11 := graph.AddEdge(v00, v11)
16 | e01t11 := graph.AddEdge(v01, v11)
17 | idx := graph.GridIndex(0.3)
18 | check := func(rect Rectangle, edges []*Edge) {
19 | edgeIDs := make(map[int]bool)
20 | for _, edge := range edges {
21 | edgeIDs[edge.ID] = true
22 | }
23 | got := idx.Search(rect)
24 | if len(got) != len(edgeIDs) {
25 | t.Errorf("expected %d edges but got %d for %v", len(edgeIDs), len(got), rect)
26 | return
27 | }
28 | for _, edge := range got {
29 | if !edgeIDs[edge.ID] {
30 | t.Errorf("got edge %d (%v) unexpectedly for %v", edge.ID, edge.Segment(), rect)
31 | return
32 | }
33 | }
34 | }
35 | check(Rectangle{Point{-0.1, -0.1}, Point{0.1, 0.1}}, []*Edge{e00t01, e00t02, e00t11})
36 | check(Rectangle{Point{0.4, 0.4}, Point{0.6, 0.6}}, []*Edge{e00t11})
37 | check(Rectangle{Point{1.7, -0.01}, Point{1.71, 0.01}}, []*Edge{e00t02})
38 | check(Rectangle{Point{0.4, 0.4}, Point{1.1, 0.6}}, []*Edge{e00t11, e01t11})
39 | }
40 |
--------------------------------------------------------------------------------
/fbastani-solution/common/graph_rtree.go:
--------------------------------------------------------------------------------
1 | package common
2 |
3 | import (
4 | "github.com/dhconnelly/rtreego"
5 |
6 | "math"
7 | )
8 |
9 | func RtreegoRect(r Rectangle) *rtreego.Rect {
10 | dx := math.Max(0.00000001, r.Max.X - r.Min.X)
11 | dy := math.Max(0.00000001, r.Max.Y - r.Min.Y)
12 | rect, err := rtreego.NewRect(rtreego.Point{r.Min.X, r.Min.Y}, []float64{dx, dy})
13 | if err != nil {
14 | panic(err)
15 | }
16 | return rect
17 | }
18 |
19 | type edgeSpatial struct {
20 | edge *Edge
21 | rect *rtreego.Rect
22 | }
23 |
24 | func (e *edgeSpatial) Bounds() *rtreego.Rect {
25 | if e.rect == nil {
26 | r := e.edge.Src.Point.Rectangle()
27 | r = r.Extend(e.edge.Dst.Point)
28 | e.rect = RtreegoRect(r)
29 | }
30 | return e.rect
31 | }
32 |
33 | type Rtree struct {
34 | tree *rtreego.Rtree
35 | }
36 |
37 | func (rtree Rtree) Search(rect Rectangle) []*Edge {
38 | spatials := rtree.tree.SearchIntersect(RtreegoRect(rect))
39 | edges := make([]*Edge, len(spatials))
40 | for i := range spatials {
41 | edges[i] = spatials[i].(*edgeSpatial).edge
42 | }
43 | return edges
44 | }
45 |
46 | func (graph *Graph) Rtree() Rtree {
47 | rtree := rtreego.NewTree(2, 25, 50)
48 | for _, edge := range graph.Edges {
49 | rtree.Insert(&edgeSpatial{edge: edge})
50 | }
51 | return Rtree{rtree}
52 | }
53 |
--------------------------------------------------------------------------------
/fbastani-solution/common/graph_test.go:
--------------------------------------------------------------------------------
1 | package common
2 |
3 | import (
4 | "math"
5 | "testing"
6 | )
7 |
8 | func TestEdgeAngleTo(t *testing.T) {
9 | zero := &Node{Point: Point{0, 0}}
10 | v1 := &Node{Point: Point{1, 1}}
11 | v2 := &Node{Point: Point{1, 0}}
12 | v3 := &Node{Point: Point{1, -1}}
13 | e1 := &Edge{Src: zero, Dst: v1}
14 | e2 := &Edge{Src: zero, Dst: v2}
15 | e3 := &Edge{Src: zero, Dst: v3}
16 | e4 := &Edge{Src: v1, Dst: zero}
17 | check := func(label string, expected float64, got float64) {
18 | if math.Abs(expected - got) > 0.001 {
19 | t.Fatalf("%s: expected %f but got %f", label, expected, got)
20 | }
21 | }
22 | check("e1->e2", math.Pi / 4, e1.AngleTo(e2))
23 | check("e2->e1", math.Pi / 4, e2.AngleTo(e1))
24 | check("e1->e3", math.Pi / 2, e1.AngleTo(e3))
25 | check("e1->e4", math.Pi, e1.AngleTo(e4))
26 | }
27 |
--------------------------------------------------------------------------------
/fbastani-solution/common/kde.go:
--------------------------------------------------------------------------------
1 | package common
2 |
3 | import (
4 | "math"
5 | )
6 |
7 | func KDE(traces Traces, cellSize float64, sigma float64) [][]float64 {
8 | // compute histogram
9 | rect := traces.Bounds()
10 | numX := int((rect.Max.X - rect.Min.X) / cellSize + 1)
11 | numY := int((rect.Max.Y - rect.Min.Y) / cellSize + 1)
12 | histogram := make([][]int, numX)
13 | for i := range histogram {
14 | histogram[i] = make([]int, numY)
15 | }
16 |
17 | getHistogramIndices := func(p Point) (int, int) {
18 | return int((p.X - rect.Min.X) / cellSize), int((p.Y - rect.Min.Y) / cellSize)
19 | }
20 |
21 | for _, trace := range traces {
22 | var previousObs *Observation
23 | for _, obs := range trace.Observations {
24 | if previousObs != nil {
25 | startX, startY := getHistogramIndices(previousObs.Point)
26 | endX, endY := getHistogramIndices(obs.Point)
27 | cells := DrawLineOnCells(startX, startY, endX, endY, numX, numY)
28 | for _, cellidx := range cells {
29 | histogram[cellidx[0]][cellidx[1]]++
30 | }
31 | }
32 | previousObs = obs
33 | }
34 | }
35 |
36 | // create Gaussian kernel
37 | kernelRadius := int((2 * sigma) / cellSize + 1)
38 | kernelCells := 2 * kernelRadius + 1
39 | kernel := make([][]float64, kernelCells)
40 | for i := range kernel {
41 | kernel[i] = make([]float64, kernelCells)
42 | for j := range kernel[i] {
43 | dsq := (i - kernelRadius) * (i - kernelRadius) + (j - kernelRadius) * (j - kernelRadius)
44 | kernel[i][j] = math.Exp(float64(-dsq) / (2 * sigma * sigma)) / (math.Pi * 2 * sigma * sigma)
45 | }
46 | }
47 |
48 | // apply kernel
49 | out := make([][]float64, numX)
50 | for i := range out {
51 | out[i] = make([]float64, numY)
52 | for j := range out[i] {
53 | for dx := -kernelRadius; dx <= kernelRadius; dx++ {
54 | for dy := -kernelRadius; dy <= kernelRadius; dy++ {
55 | if i + dx >= 0 && i + dx < numX && j + dy >= 0 && j + dy < numY {
56 | out[i][j] += kernel[dx + kernelRadius][dy + kernelRadius] * float64(histogram[i + dx][j + dy])
57 | }
58 | }
59 | }
60 | }
61 | }
62 |
63 | return out
64 | }
65 |
--------------------------------------------------------------------------------
/fbastani-solution/common/trace.go:
--------------------------------------------------------------------------------
1 | package common
2 |
3 | import (
4 | "time"
5 | )
6 |
7 | type Observation struct {
8 | Time time.Time
9 | Point Point
10 | Metadata map[string]interface{}
11 | }
12 |
13 | func (obs *Observation) SetMetadata(k string, val interface{}) {
14 | if obs.Metadata == nil {
15 | obs.Metadata = make(map[string]interface{})
16 | }
17 | obs.Metadata[k] = val
18 | }
19 |
20 | func (obs *Observation) GetMetadata(k string) interface{} {
21 | if obs.Metadata == nil {
22 | return nil
23 | } else {
24 | return obs.Metadata[k]
25 | }
26 | }
27 |
28 | type Trace struct {
29 | Name string
30 | Observations []*Observation
31 | }
32 |
33 | func (trace *Trace) LastObservation() *Observation {
34 | if len(trace.Observations) > 0 {
35 | return trace.Observations[len(trace.Observations) - 1]
36 | } else {
37 | return nil
38 | }
39 | }
40 |
41 | type Traces []*Trace
42 |
43 | // Convert coordinate system from longitude/latitude to Cartesian meters.
44 | // This assumes that the GPS sequences cover a small region so that curvature can be ignored.
45 | func (traces Traces) LonLatToMeters(origin Point) {
46 | for _, trace := range traces {
47 | for i := range trace.Observations {
48 | trace.Observations[i].Point = trace.Observations[i].Point.LonLatToMeters(origin)
49 | }
50 | }
51 | }
52 |
53 | func (traces Traces) Bounds() Rectangle {
54 | r := EmptyRectangle
55 | for _, trace := range traces {
56 | for _, obs := range trace.Observations {
57 | r = r.Extend(obs.Point)
58 | }
59 | }
60 | return r
61 | }
62 |
--------------------------------------------------------------------------------
/fbastani-solution/common/viterbi.go:
--------------------------------------------------------------------------------
1 | package common
2 |
3 | import (
4 | "fmt"
5 | "math"
6 | "time"
7 | )
8 |
9 | const VITERBI_NORMALIZE_DELTA time.Duration = time.Second
10 | const VITERBI_SIGMA = 25
11 |
12 | func NormalizeTraces(traces []*Trace) {
13 | for _, trace := range traces {
14 | previous := trace.Observations[0]
15 | newObs := []*Observation{previous}
16 | for _, obs := range trace.Observations[1:] {
17 | dt := obs.Time.Sub(previous.Time)
18 | n := int(dt / VITERBI_NORMALIZE_DELTA) + 1
19 | for i := 1; i < n; i++ {
20 | intermediateTime := previous.Time.Add(dt * time.Duration(i) / time.Duration(n))
21 | intermediatePoint := previous.Point.Add(obs.Point.Sub(previous.Point).Scale(float64(i) / float64(n)))
22 | newObs = append(newObs, &Observation{
23 | Time: intermediateTime,
24 | Point: intermediatePoint,
25 | })
26 | }
27 | newObs = append(newObs, obs)
28 | previous = obs
29 | }
30 | trace.Observations = newObs
31 | }
32 | }
33 |
34 | func Viterbi(graph *Graph, traces []*Trace, tolerance float64) {
35 | transitionProbs := make([]map[int]float64, len(graph.Edges))
36 | for _, edge := range graph.Edges {
37 | probs := make(map[int]float64)
38 | transitionProbs[edge.ID] = probs
39 |
40 | var adjacentEdges []*Edge
41 | for _, other := range edge.Dst.Out {
42 | adjacentEdges = append(adjacentEdges, other)
43 | }
44 |
45 | // set scores and then reweight so that sum is 1
46 | probs[edge.ID] = 30
47 | var totalScore float64 = 30
48 | for _, other := range adjacentEdges {
49 | negAngle := math.Pi / 2 - edge.AngleTo(other)
50 | if negAngle < 0 {
51 | negAngle = 0
52 | }
53 | score := negAngle * negAngle + 0.05
54 | totalScore += score
55 | probs[other.ID] = score
56 | }
57 | for id := range probs {
58 | probs[id] /= totalScore
59 | }
60 | }
61 |
62 | rtree := graph.Rtree()
63 |
64 | // get conditional emission probabilities
65 | emissionProbs := func(point Point, tolerance float64) map[int]float64 {
66 | candidates := rtree.Search(point.RectangleTol(tolerance))
67 | if len(candidates) == 0 {
68 | return nil
69 | }
70 | scores := make(map[int]float64)
71 | var totalScore float64 = 0
72 | for _, edge := range candidates {
73 | distance := edge.Segment().Distance(point)
74 | score := math.Exp(-0.5 * distance * distance / VITERBI_SIGMA / VITERBI_SIGMA)
75 | scores[edge.ID] = score
76 | totalScore += score
77 | }
78 | for i := range scores {
79 | scores[i] /= totalScore
80 | }
81 | return scores
82 | }
83 |
84 | for _, trace := range traces {
85 | // run viterbi
86 | probs := make(map[int]float64)
87 | for _, edge := range rtree.Search(trace.Observations[0].Point.RectangleTol(tolerance)) {
88 | probs[edge.ID] = 0
89 | }
90 | backpointers := make([]map[int]int, len(trace.Observations))
91 | failed := false
92 | for i := 1; i < len(trace.Observations); i++ {
93 | obs := trace.Observations[i]
94 | var nextProbs map[int]float64
95 |
96 | for factor := float64(1); len(nextProbs) < 2 && factor <= 4; factor *= 2 {
97 | backpointers[i] = make(map[int]int)
98 | emissions := emissionProbs(obs.Point, tolerance * factor)
99 | if factor > 1 {
100 | fmt.Printf("viterbi: warning: factor=%f at i=%d, point=%v\n", factor, i, obs.Point)
101 | }
102 | nextProbs = make(map[int]float64)
103 | for prevEdgeID := range probs {
104 | transitions := transitionProbs[prevEdgeID]
105 | for nextEdgeID := range transitions {
106 | if emissions[nextEdgeID] == 0 {
107 | continue
108 | }
109 | prob := probs[prevEdgeID] + math.Log(transitions[nextEdgeID]) + math.Log(emissions[nextEdgeID])
110 | if curProb, ok := nextProbs[nextEdgeID]; !ok || prob > curProb {
111 | nextProbs[nextEdgeID] = prob
112 | backpointers[i][nextEdgeID] = prevEdgeID
113 | }
114 | }
115 | }
116 | }
117 | probs = nextProbs
118 | //fmt.Printf("%d/%d %v %v\n", i, len(trace.Observations), obs.Point, probs)
119 | if len(probs) == 0 {
120 | fmt.Printf("viterbi: warning: failed to find edge, skipping trace: i=%d, point=%v\n", i, obs.Point)
121 | failed = true
122 | break
123 | }
124 | }
125 | if failed {
126 | continue
127 | }
128 |
129 | // collect state sequence and annotate trace with map matched data
130 | var bestEdgeID *int
131 | for edgeID := range probs {
132 | if bestEdgeID == nil || probs[edgeID] > probs[*bestEdgeID] {
133 | bestEdgeID = &edgeID
134 | }
135 | }
136 | curEdge := *bestEdgeID
137 | for i := len(trace.Observations) - 1; i >= 0; i-- {
138 | edge := graph.Edges[curEdge]
139 | position := edge.Segment().Project(trace.Observations[i].Point, false)
140 | trace.Observations[i].SetMetadata("viterbi", EdgePos{edge, position})
141 | if i > 0 {
142 | curEdge = backpointers[i][curEdge]
143 | }
144 | }
145 | }
146 | }
147 |
--------------------------------------------------------------------------------
/fbastani-solution/do_the_training.py:
--------------------------------------------------------------------------------
1 | import multiprocessing
2 | import os
3 | import subprocess
4 | import sys
5 |
6 | def runtrain(params):
7 | i, cities, basedirs = params
8 | gpu_env = os.environ.copy()
9 | gpu_env['CUDA_VISIBLE_DEVICES'] = str(i)
10 | for city in cities:
11 | print 'starting member {}/4 for city {}'.format(i, city)
12 | subprocess.call(['python', 'run_train.py', city, str(i)] + basedirs, env=gpu_env)
13 |
14 | basedirs = sys.argv[1:]
15 | for i in xrange(len(basedirs)):
16 | if basedirs[i][-1] == '/':
17 | basedirs[i] = basedirs[i][:-1]
18 |
19 | cities = []
20 | for basedir in basedirs:
21 | d = basedir.split('/')[-1]
22 | parts = d.split('_')
23 | city = '{}_{}_{}'.format(parts[0], parts[1], parts[2])
24 | cities.append(city)
25 |
26 | todo = [(i, cities, basedirs) for i in xrange(4)]
27 | p = multiprocessing.Pool(4)
28 | p.map(runtrain, todo)
29 | p.close()
30 |
--------------------------------------------------------------------------------
/fbastani-solution/download_models.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | mkdir models
3 | aws s3 sync s3://spacenet-dataset/SpaceNet_Roads_Competition/Pretrained_Models/05-fbastani/ models/
--------------------------------------------------------------------------------
/fbastani-solution/graphextract/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SpaceNetChallenge/RoadDetector/0a7391f546ab20c873dc6744920deef22c21ef3e/fbastani-solution/graphextract/__init__.py
--------------------------------------------------------------------------------
/fbastani-solution/graphextract/bounding_boxes/spacenet.txt:
--------------------------------------------------------------------------------
1 | 36.1666923624 -115.272632937 36.1706330372 -115.268692263
2 |
--------------------------------------------------------------------------------
/fbastani-solution/graphextract/discoverlib/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SpaceNetChallenge/RoadDetector/0a7391f546ab20c873dc6744920deef22c21ef3e/fbastani-solution/graphextract/discoverlib/__init__.py
--------------------------------------------------------------------------------
/fbastani-solution/graphextract/map2go.py:
--------------------------------------------------------------------------------
1 | import pickle
2 | import sys
3 |
4 | my_map = pickle.load( open( sys.argv[1], "rb" ) )
5 | nodes = my_map[0]
6 | edges = my_map[1]
7 |
8 | with open(sys.argv[2], 'w') as f:
9 | nodemap = {}
10 | counter = 0
11 | for node_id, node in nodes.items():
12 | nodemap[node_id] = counter
13 | counter += 1
14 | f.write("{} {}\n".format(node[1], node[0]))
15 | f.write("\n")
16 | for edge in edges.values():
17 | f.write("{} {}\n".format(nodemap[edge[0]], nodemap[edge[1]]))
18 | f.write("{} {}\n".format(nodemap[edge[1]], nodemap[edge[0]]))
19 |
--------------------------------------------------------------------------------
/fbastani-solution/graphextract/pylibs/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SpaceNetChallenge/RoadDetector/0a7391f546ab20c873dc6744920deef22c21ef3e/fbastani-solution/graphextract/pylibs/__init__.py
--------------------------------------------------------------------------------
/fbastani-solution/graphextract/pylibs/mathfunclib.py:
--------------------------------------------------------------------------------
1 | #
2 | # Mathematical function library.
3 | # Author: James P. Biagioni (jbiagi1@uic.edu)
4 | # Company: University of Illinois at Chicago
5 | # Created: 12/16/10
6 | #
7 |
8 | import math
9 |
10 | # Normal distribution PDF. Formula obtained from: http://en.wikipedia.org/wiki/Normal_Distribution
11 | def normal_distribution_pdf(x, mu, sigma, numerator=1.0):
12 | return (numerator / math.sqrt(2.0 * math.pi * math.pow(sigma, 2.0))) * math.exp(-1.0 * (math.pow((x - mu), 2.0) / (2.0 * math.pow(sigma, 2.0))))
13 |
14 | # Normal distribution CDF. Formula obtained from: http://en.wikipedia.org/wiki/Normal_Distribution
15 | def normal_distribution_cdf(x, mu, sigma):
16 | return (0.5 * (1.0 + erf( (x - mu) / math.sqrt(2.0 * math.pow(sigma, 2.0)))))
17 |
18 | # Complementary normal distribution CDF. Formula obtained from: http://en.wikipedia.org/wiki/Cumulative_distribution_function
19 | def complementary_normal_distribution_cdf(x, mu, sigma):
20 | return (1.0 - normal_distribution_cdf(x, mu, sigma))
21 |
22 | # Spring force. Formula obtained from: http://en.wikipedia.org/wiki/Hooke%27s_law
23 | def spring_force(x, k):
24 | return ((-1.0 * k) * x)
25 |
26 | # Gaussian error function. Algorithm obtained from: http://www.johndcook.com/python_erf.html
27 | def erf(x):
28 | # constants
29 | a1 = 0.254829592
30 | a2 = -0.284496736
31 | a3 = 1.421413741
32 | a4 = -1.453152027
33 | a5 = 1.061405429
34 | p = 0.3275911
35 |
36 | # Save the sign of x
37 | sign = 1
38 | if x < 0:
39 | sign = -1
40 | x = abs(x)
41 |
42 | # A&S formula 7.1.26
43 | t = 1.0/(1.0 + p*x)
44 | y = 1.0 - (((((a5*t + a4)*t) + a3)*t + a2)*t + a1)*t*math.exp(-x*x)
45 |
46 | return sign*y
47 |
--------------------------------------------------------------------------------
/fbastani-solution/graphextract/pylibs/spatialfunclib_accel.pyx:
--------------------------------------------------------------------------------
1 | import math
2 |
3 | cdef double METERS_PER_DEGREE_LATITUDE, METERS_PER_DEGREE_LONGITUDE
4 |
5 | METERS_PER_DEGREE_LATITUDE = 111070.34306591158
6 | METERS_PER_DEGREE_LONGITUDE = 83044.98918812413
7 |
8 | #
9 | # Returns the distance in meters between two points specified in degrees, using an approximation method.
10 | #
11 | def fast_distance(double a_lat, double a_lon, double b_lat, double b_lon):
12 | if a_lat == b_lat and a_lon==b_lon:
13 | return 0.0
14 |
15 | cdef double y_dist, x_dist
16 | y_dist = METERS_PER_DEGREE_LATITUDE * (a_lat - b_lat)
17 | x_dist = METERS_PER_DEGREE_LONGITUDE * (a_lon - b_lon)
18 |
19 | return math.sqrt((y_dist * y_dist) + (x_dist * x_dist))
20 |
21 |
--------------------------------------------------------------------------------
/fbastani-solution/graphextract/subiterations.pyx:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | cimport numpy as np
3 |
4 | DTYPE = np.int
5 | ctypedef np.int_t DTYPE_t
6 |
7 | def first_subiteration(np.ndarray[DTYPE_t, ndim=2] curr_image, fg_pixels):
8 | cdef int i,j, p2, p3, p4, p5, p6, p7, p8, p9
9 |
10 | zero_pixels = {}
11 | next_pixels = {}
12 |
13 | for (i, j) in fg_pixels:
14 | if curr_image[i][j] != 1: continue
15 |
16 | p2 = curr_image[i - 1][j]
17 | p3 = curr_image[i - 1][j + 1]
18 | p4 = curr_image[i][j + 1]
19 | p5 = curr_image[i + 1][j + 1]
20 | p6 = curr_image[i + 1][j]
21 | p7 = curr_image[i + 1][j - 1]
22 | p8 = curr_image[i][j - 1]
23 | p9 = curr_image[i - 1][j - 1]
24 |
25 | if (2 <= (bool(p2) + bool(p3) + bool(p4) + bool(p5) + bool(p6) + bool(p7) + bool(p8) + bool(p9)) <= 6 and
26 | (p2 * p4 * p6 == 0) and
27 | (p4 * p6 * p8 == 0)):
28 | if (bool(not p2 and p3) + bool(not p3 and p4) + bool(not p4 and p5) + bool(not p5 and p6) + bool(not p6 and p7) + bool(not p7 and p8) + bool(not p8 and p9) + bool(not p9 and p2) == 1):
29 | zero_pixels[(i,j)] = 0
30 | if p2 == 1: next_pixels[(i-1,j)]=0
31 | if p3 == 1: next_pixels[(i-1,j+1)]=0
32 | if p4 == 1: next_pixels[(i,j+1)]=0
33 | if p5 == 1: next_pixels[(i+1,j+1)]=0
34 | if p6 == 1: next_pixels[(i+1,j)]=0
35 | if p7 == 1: next_pixels[(i+1,j-1)]=0
36 | if p8 == 1: next_pixels[(i,j-1)]=0
37 | if p9 == 1: next_pixels[(i-1,j-1)]=0
38 |
39 | return zero_pixels.keys(), next_pixels.keys()
40 |
41 | def second_subiteration(np.ndarray[DTYPE_t, ndim=2] curr_image, fg_pixels):
42 | cdef int i,j, p2, p3, p4, p5, p6, p7, p8, p9
43 |
44 | zero_pixels = {}
45 | next_pixels = {}
46 |
47 | for (i, j) in fg_pixels:
48 | if curr_image[i][j] != 1: continue
49 |
50 | p2 = curr_image[i - 1][j]
51 | p3 = curr_image[i - 1][j + 1]
52 | p4 = curr_image[i][j + 1]
53 | p5 = curr_image[i + 1][j + 1]
54 | p6 = curr_image[i + 1][j]
55 | p7 = curr_image[i + 1][j - 1]
56 | p8 = curr_image[i][j - 1]
57 | p9 = curr_image[i - 1][j - 1]
58 |
59 | if (2 <= (bool(p2) + bool(p3) + bool(p4) + bool(p5) + bool(p6) + bool(p7) + bool(p8) + bool(p9)) <= 6 and
60 | (p2 * p4 * p8 == 0) and
61 | (p2 * p6 * p8 == 0)):
62 | if (bool(not p2 and p3) + bool(not p3 and p4) + bool(not p4 and p5) + bool(not p5 and p6) + bool(not p6 and p7) + bool(not p7 and p8) + bool(not p8 and p9) + bool(not p9 and p2) == 1):
63 | zero_pixels[(i,j)] = 0
64 | if p2 == 1: next_pixels[(i-1,j)]=0
65 | if p3 == 1: next_pixels[(i-1,j+1)]=0
66 | if p4 == 1: next_pixels[(i,j+1)]=0
67 | if p5 == 1: next_pixels[(i+1,j+1)]=0
68 | if p6 == 1: next_pixels[(i+1,j)]=0
69 | if p7 == 1: next_pixels[(i+1,j-1)]=0
70 | if p8 == 1: next_pixels[(i,j-1)]=0
71 | if p9 == 1: next_pixels[(i-1,j-1)]=0
72 |
73 | return zero_pixels.keys(), next_pixels.keys()
74 |
75 |
--------------------------------------------------------------------------------
/fbastani-solution/prep.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 | apt-get update
4 | #apt-get dist-upgrade -y
5 | apt-get install -y python2.7 python-gdal python-pip libpython2.7-dev python-tk build-essential golang libgdal-dev libspatialindex-dev git
6 | pip install --upgrade pip
7 | pip install georasters numpy scikit-image scipy cython rtree
8 | #pip install tensorflow-gpu
9 | mkdir -p /wdata/spacenet2017/favyen/graphs/ /wdata/spacenet2017/favyen/truth/
10 |
--------------------------------------------------------------------------------
/fbastani-solution/run_lib.py:
--------------------------------------------------------------------------------
1 | import georasters
2 | import numpy
3 | import os
4 | from PIL import Image
5 | import random
6 | import scipy.ndimage
7 | import subprocess
8 | import sys
9 | import tensorflow as tf
10 | import time
11 |
12 | SIZE = 256
13 | OUTPUT_SCALE = 2
14 | OUTPUT_CHANNELS = 1
15 | TARGETS = '/wdata/spacenet2017/favyen/truth/'
16 |
17 | def load_tile(basedirs, region, skip_truth=False):
18 | d, city, id = region.split('.')
19 | BASEDIR = [x for x in basedirs if city in x][0]
20 |
21 | fname = '{}/MUL-PanSharpen/MUL-PanSharpen_{}_img{}.tif'.format(BASEDIR, city, id)
22 | data1 = georasters.from_file(fname).raster
23 | data1 = data1.filled(0)
24 | data1 = numpy.transpose(data1, (2, 1, 0))
25 |
26 | fname = '{}/PAN/PAN_{}_img{}.tif'.format(BASEDIR, city, id)
27 | data2 = georasters.from_file(fname).raster
28 | data2 = data2.filled(0)
29 | data2 = numpy.transpose(data2, (1, 0))
30 |
31 | input_im = numpy.zeros((1300, 1300, 9), dtype='uint8')
32 | for i in xrange(8):
33 | input_im[:, :, i] = (data1[:, :, i] / 8).astype('uint8')
34 | input_im[:, :, 8] = (data2 / 8).astype('uint8')
35 |
36 | if not skip_truth:
37 | fname = '{}/{}.png'.format(TARGETS, region)
38 | if not os.path.isfile(fname):
39 | return None
40 | output_im = scipy.ndimage.imread(fname)
41 | if len(output_im.shape) == 3:
42 | output_im = 255 - output_im[:, :, 0:1]
43 | output_im = (output_im > 1).astype('uint8') * 255
44 | else:
45 | output_im = numpy.expand_dims(output_im, axis=2)
46 |
47 | else:
48 | output_im = numpy.zeros((1300 / OUTPUT_SCALE, 1300 / OUTPUT_SCALE, OUTPUT_CHANNELS), dtype='uint8')
49 |
50 | return input_im, numpy.swapaxes(output_im, 0, 1)
51 |
52 | def load_tiles_new(basedirs, city):
53 | all_tiles = []
54 | regions = [fname.split('.png')[0] for fname in os.listdir(TARGETS) if '.png' in fname]
55 | regions = [region for region in regions if city == region.split('.')[1]]
56 | counter = 0
57 | for region in regions:
58 | counter += 1
59 | if counter % 20 == 0:
60 | print '... {}/{}'.format(counter, len(regions))
61 | all_tiles.append((region, load_tile(basedirs, region)))
62 | random.shuffle(all_tiles)
63 | val_tiles = all_tiles[:20]
64 | train_tiles = all_tiles[20:]
65 | return val_tiles, train_tiles
66 |
67 | def extract(tile):
68 | input_im, output_im = tile[1]
69 |
70 | i = random.randint(-64, 1300 / OUTPUT_SCALE + 64 - SIZE / OUTPUT_SCALE - 1)
71 | j = random.randint(-64, 1300 / OUTPUT_SCALE + 64 - SIZE / OUTPUT_SCALE - 1)
72 | if i < 0:
73 | i = 0
74 | elif i >= 1300 / OUTPUT_SCALE - SIZE / OUTPUT_SCALE:
75 | i = 1300 / OUTPUT_SCALE - SIZE / OUTPUT_SCALE - 1
76 | if j < 0:
77 | j = 0
78 | elif j >= 1300 / OUTPUT_SCALE - SIZE / OUTPUT_SCALE:
79 | j = 1300 / OUTPUT_SCALE - SIZE / OUTPUT_SCALE - 1
80 |
81 | input_rect = input_im[i*OUTPUT_SCALE:i*OUTPUT_SCALE+SIZE, j*OUTPUT_SCALE:j*OUTPUT_SCALE+SIZE, :]
82 | output_rect = output_im[i:i+SIZE/OUTPUT_SCALE, j:j+SIZE/OUTPUT_SCALE, :]
83 |
84 | rotations = random.randint(0, 3)
85 | if rotations > 0:
86 | input_rect = numpy.rot90(input_rect, k=rotations)
87 | output_rect = numpy.rot90(output_rect, k=rotations)
88 |
89 | return input_rect, output_rect
90 |
91 | def do_test2(m, session, tiles, out_path):
92 | for counter in xrange(len(tiles)):
93 | if counter % 10 == 0:
94 | print '... {}/{}'.format(counter, len(tiles))
95 | region, tile = tiles[counter]
96 |
97 | inputs = numpy.zeros((1, 1408, 1408, 9), dtype='float32')
98 | inputs[0, 54:54+1300, 54:54+1300, :] = tile[0].astype('float32') / 255.0
99 | outputs = session.run(m.outputs, feed_dict={
100 | m.is_training: False,
101 | m.inputs: inputs,
102 | })
103 | tile_output = (outputs[0, 27:27+650, 27:27+650] * 255.0).astype('uint8')
104 | Image.fromarray(numpy.swapaxes(tile_output, 0, 1)).save('{}/{}.png'.format(out_path, region))
105 |
--------------------------------------------------------------------------------
/fbastani-solution/run_train.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 |
3 | import model4u as model
4 |
5 | import georasters
6 | import numpy
7 | import os
8 | from PIL import Image
9 | import random
10 | import scipy.ndimage
11 | import subprocess
12 | import sys
13 | import tensorflow as tf
14 | import time
15 |
16 | from run_lib import *
17 |
18 | basedirs = sys.argv[3:]
19 | city = sys.argv[1]
20 | memid = int(sys.argv[2])
21 | PATH = 'models/{}/mem{}/'.format(city, memid)
22 |
23 | os.makedirs(PATH)
24 | os.mkdir(PATH + '/model_best')
25 | os.mkdir(PATH + '/model_latest')
26 |
27 | m = model.Model(in_channels=9)
28 |
29 | print 'loading tiles'
30 | start_loading_time = time.time()
31 | val_tiles, train_tiles = load_tiles_new(basedirs, city)
32 | print 'loaded {} tiles in {} seconds'.format(len(train_tiles), int(time.time() - start_loading_time))
33 |
34 | start_training_time = time.time()
35 |
36 | def epoch_to_learning_rate(epoch):
37 | num_tiles = len(train_tiles)
38 | if epoch < 50000 / num_tiles:
39 | return 0.0004
40 | elif epoch < 300000 / num_tiles:
41 | return 0.0001
42 | elif epoch < 700000 / num_tiles:
43 | return 0.00001
44 | elif epoch < 1000000 / num_tiles:
45 | return 0.000001
46 | else:
47 | return 0.0000001
48 |
49 | print 'begin training'
50 | session = tf.Session()
51 | session.run(m.init_op)
52 | latest_path = '{}/model_latest/model'.format(PATH)
53 | best_path = '{}/model_best/model'.format(PATH)
54 | best_loss = None
55 |
56 | val_rects = []
57 | for tile in val_tiles:
58 | for _ in xrange(1300*1300/SIZE/SIZE/2):
59 | val_rects.append(extract(tile))
60 |
61 | for epoch in xrange(2000000 / len(train_tiles)):
62 | epoch_time = time.time()
63 | random.shuffle(train_tiles)
64 | train_losses = []
65 | for i in xrange(0, len(train_tiles), model.BATCH_SIZE):
66 | batch_tiles = [extract(t) for t in train_tiles[i:i+model.BATCH_SIZE]]
67 | _, loss = session.run([m.optimizer, m.loss], feed_dict={
68 | m.is_training: True,
69 | m.inputs: [tile[0].astype('float32') / 255.0 for tile in batch_tiles],
70 | m.targets: [tile[1].astype('float32') / 255.0 for tile in batch_tiles],
71 | m.learning_rate: epoch_to_learning_rate(epoch),
72 | })
73 | train_losses.append(loss)
74 | train_loss = numpy.mean(train_losses)
75 | train_time = time.time()
76 |
77 | val_losses = []
78 | for i in xrange(0, len(val_rects), model.BATCH_SIZE):
79 | batch_tiles = val_rects[i:i+model.BATCH_SIZE]
80 | batch_targets = numpy.array([tile[1].astype('float32') / 255.0 for tile in batch_tiles], dtype='float32')
81 | outputs, loss = session.run([m.outputs, m.loss], feed_dict={
82 | m.is_training: False,
83 | m.inputs: [tile[0].astype('float32') / 255.0 for tile in batch_tiles],
84 | m.targets: batch_targets,
85 | })
86 | val_losses.append(loss)
87 |
88 | val_loss = numpy.mean(val_losses)
89 | val_time = time.time()
90 | elapsed = time.time() - start_training_time
91 |
92 | print 'iteration {}: train_time={}, val_time={}, train_loss={}, val_loss={}/{}, elapsed={}'.format(epoch, int(train_time - epoch_time), int(val_time - train_time), train_loss, val_loss, best_loss, int(elapsed))
93 |
94 | if epoch % 10 == 0:
95 | m.saver.save(session, latest_path)
96 | if best_loss is None or val_loss < best_loss:
97 | best_loss = val_loss
98 | m.saver.save(session, best_path)
99 |
100 | if elapsed > 3600 * 40:
101 | break
102 |
--------------------------------------------------------------------------------
/fbastani-solution/skeleton.py:
--------------------------------------------------------------------------------
1 | from discoverlib import geom
2 | from discoverlib import graph
3 |
4 | import math
5 | import numpy
6 | import os
7 | import scipy.ndimage
8 |
9 | PATH = '/data/spacenet2017/favyen/segmentation_model4d3/outputs'
10 | OUT_PATH = '/data/spacenet2017/favyen/segmentation_model4d3_newskeleton/graphs'
11 | TOL = 10
12 | THRESHOLD = 20
13 |
14 | circle_mask = numpy.ones((2*TOL+1, 2*TOL+1), dtype='uint8')
15 | for i in xrange(-TOL, TOL+1):
16 | for j in xrange(-TOL, TOL+1):
17 | d = math.sqrt(i * i + j * j)
18 | if d <= TOL:
19 | circle_mask[i+TOL, j+TOL] = 0
20 |
21 | def get_reachable_points(im, point, iterations):
22 | points = set()
23 | search = set()
24 | r = geom.Rectangle(geom.Point(0, 0), geom.Point(im.shape[0]-1, im.shape[1]-1))
25 | search.add(point)
26 | for _ in xrange(iterations):
27 | next_search = set()
28 | for point in search:
29 | for offset in [(-1, 0), (1, 0), (0, -1), (0, 1), (-1, -1), (-1, 1), (1, -1), (1, 1)]:
30 | adj_point = point.add(geom.Point(offset[0], offset[1]))
31 | if r.contains(adj_point) and adj_point not in points and im[adj_point.x, adj_point.y] > 0:
32 | points.add(adj_point)
33 | next_search.add(adj_point)
34 | search = next_search
35 | return points
36 |
37 | fnames = [fname for fname in os.listdir(PATH) if '.png' in fname]
38 | for fname in fnames:
39 | region = fname.split('.png')[0]
40 | im = numpy.swapaxes(scipy.ndimage.imread(os.path.join(PATH, fname)), 0, 1)
41 | im = (im > THRESHOLD).astype('uint8')
42 |
43 | g = graph.Graph()
44 | im_copy = numpy.zeros((im.shape[0], im.shape[1]), dtype='uint8')
45 | im_copy[:, :] = im[:, :]
46 | point_to_vertex = {}
47 |
48 | while im_copy.max() > 0:
49 | p = numpy.unravel_index(im_copy.argmax(), im_copy.shape)
50 | vertex = g.add_vertex(geom.Point(p[0]*2, p[1]*2))
51 | point_to_vertex[geom.Point(p[0], p[1])] = vertex
52 |
53 | # coordinates below are start/end between -TOL and TOL
54 | sx = max(-TOL, -p[0])
55 | ex = min(TOL + 1, im_copy.shape[0] - p[0])
56 | sy = max(-TOL, -p[1])
57 | ey = min(TOL + 1, im_copy.shape[1] - p[1])
58 | im_copy[sx+p[0]:ex+p[0], sy+p[1]:ey+p[1]] *= circle_mask[sx+TOL:ex+TOL, sy+TOL:ey+TOL]
59 |
60 | for vertex in g.vertices:
61 | for point in get_reachable_points(im, vertex.point.scale(0.5), 2*TOL):
62 | if point in point_to_vertex and point_to_vertex[point] != vertex:
63 | g.add_bidirectional_edge(vertex, point_to_vertex[point])
64 |
65 | g.save(os.path.join(OUT_PATH, region + '.graph'))
66 |
--------------------------------------------------------------------------------
/fbastani-solution/test.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 | bash prep.sh
4 | python run_test.py $@
5 | rm -rf /wdata/*
6 |
--------------------------------------------------------------------------------
/fbastani-solution/train.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 | rm -rf models/*
4 | bash prep.sh
5 |
6 | # install go packages
7 | export GOPATH=/go
8 | mkdir -p /go
9 | go get github.com/ajstarks/svgo
10 | go get github.com/dhconnelly/rtreego
11 | go get github.com/qedus/osmpbf
12 |
13 | # create graphs, training masks
14 | go run 1_convertgraphs.go $@
15 | go run 2_truth_tiles.go
16 |
17 | # run training
18 | python do_the_training.py $@
19 |
20 | rm -rf /wdata/*
21 |
--------------------------------------------------------------------------------
/pfr-solution/Dockerfile:
--------------------------------------------------------------------------------
1 | # Requirements:
2 | # - host machine with AVX2, 4 GPUs for training or 1 GPU for inference, and nvidia-docker
3 | # Important: nvidia-docker must be run with the option --ipc=host
4 |
5 | FROM nvidia/cuda:8.0-cudnn6-runtime-ubuntu16.04
6 |
7 | RUN apt-get update
8 | RUN apt-get install -y awscli bzip2
9 | RUN apt-get install -y python3-pip
10 | RUN apt-get install -y iotop psmisc
11 | RUN apt-get install -y python3-gdal
12 | RUN pip3 install pip --upgrade
13 | RUN pip3 install boto3
14 | RUN pip3 install requests
15 | RUN pip3 install numpy==1.13.3
16 | RUN pip3 install pandas==0.19.2
17 | RUN pip3 install scipy==1.0.0
18 | RUN pip3 install mxnet==0.11.0
19 | RUN pip3 install tqdm
20 | RUN pip3 install Shapely==1.6.3
21 | RUN pip3 install http://download.pytorch.org/whl/cu80/torch-0.3.0.post4-cp35-cp35m-linux_x86_64.whl
22 | RUN pip3 install torchvision==0.2.0
23 | RUN pip3 install scikit-image==0.13.1
24 | RUN pip3 uninstall pillow -y
25 | RUN apt-get install -y libjpeg-turbo8-dev zlib1g-dev
26 | RUN CC="cc -mavx2" pip3 install -U --force-reinstall pillow-simd
27 | RUN pip3 install gdown
28 | RUN apt-get install -y wget unzip
29 |
30 | # Only needed for train.sh: download generic pretrained models
31 | WORKDIR /workdir/pretrained
32 | RUN gdown 'https://drive.google.com/uc?id=0B_uPUDq5vVcAdkxOZExBSXI0dlU' && test -e dpn92-extra_2017_08_28.tar.gz
33 | RUN tar xvzf dpn92-extra_2017_08_28.tar.gz
34 |
35 | # Only needed for test.sh: download trained models
36 | WORKDIR /workdir/trained_models
37 | RUN wget -nv https://s3.amazonaws.com/hello-tc/spacenet3-code/trained_models/model01.pth
38 | RUN wget -nv https://s3.amazonaws.com/hello-tc/spacenet3-code/trained_models/model02.pth
39 | RUN wget -nv https://s3.amazonaws.com/hello-tc/spacenet3-code/trained_models/model03.pth
40 | RUN wget -nv https://s3.amazonaws.com/hello-tc/spacenet3-code/trained_models/model04.pth
41 | RUN wget -nv https://s3.amazonaws.com/hello-tc/spacenet3-code/trained_models/model05.pth
42 | RUN wget -nv https://s3.amazonaws.com/hello-tc/spacenet3-code/trained_models/model06.pth
43 | RUN wget -nv https://s3.amazonaws.com/hello-tc/spacenet3-code/trained_models/model07.pth
44 | RUN wget -nv https://s3.amazonaws.com/hello-tc/spacenet3-code/trained_models/model08.pth
45 | RUN wget -nv https://s3.amazonaws.com/hello-tc/spacenet3-code/trained_models/model09.pth
46 |
47 | # Install the code
48 | WORKDIR /workdir
49 | COPY *.sh /workdir/
50 | COPY code /workdir/code
51 | COPY model /workdir/model
52 | COPY param /workdir/param
53 | RUN chmod +x *.sh
54 |
55 | WORKDIR /wdata
56 | WORKDIR /workdir
57 |
--------------------------------------------------------------------------------
/pfr-solution/code/cleanup_directories.sh:
--------------------------------------------------------------------------------
1 | set -eu
2 |
3 | TEMP_PATH=/wdata
4 | rm -rf "$TEMP_PATH"/computed 2>/dev/null || true
5 | rm -f computed 2>/dev/null || true
6 | rm -rf unpacked 2>/dev/null || true
7 |
--------------------------------------------------------------------------------
/pfr-solution/code/do_unpack.py:
--------------------------------------------------------------------------------
1 | from __future__ import division, print_function
2 | from osgeo import gdal
3 | from glob import glob
4 | import os
5 | from tqdm import tqdm
6 |
7 | def handle_file(path, meta_f):
8 | assert path.endswith('.tif')
9 | assert '\\' not in path
10 | key = path.split('/', 1)[1][:-4]
11 | ds = gdal.Open(path)
12 | bands = ds.RasterCount
13 | assert bands in (1,3,8)
14 | geo = ds.GetGeoTransform()
15 | arr_shape = (ds.RasterCount, ds.RasterYSize, ds.RasterXSize)
16 | meta = arr_shape + geo
17 | print(','.join(str(x) for x in (key,) + meta), file=meta_f)
18 | meta_f.flush()
19 |
20 | def scan_zone(zone):
21 | try:
22 | os.makedirs('unpacked/%s' % zone)
23 | except os.error:
24 | pass
25 | with open('unpacked/%s/geo_transform.csv.tmp'%zone, 'w') as meta_f:
26 | patterns = [
27 | 'spacenet/%s/PAN/*.tif' % zone,
28 | 'spacenet/%s/MUL/*.tif' % zone,
29 | #'spacenet/%s/RGB-PanSharpen/*.tif' % zone,
30 | #'spacenet/%s/MUL-PanSharpen/*.tif' % zone,
31 | ]
32 | print('key,bands,height,width,lon,dx_lon,dy_lon,lat,dx_lat,dy_lat', file=meta_f)
33 | for pattern in patterns:
34 | for path in sorted(glob(pattern)):
35 | path = path.replace(r'\\', '/')
36 | handle_file(path, meta_f)
37 | os.rename('unpacked/%s/geo_transform.csv.tmp'%zone,
38 | 'unpacked/%s/geo_transform.csv'%zone)
39 |
40 |
41 | def scan_all():
42 | zones = sorted(path[9:] for path in glob('spacenet/AOI_*'))
43 | for zone in tqdm(zones, desc="extract TIFF metadata"):
44 | scan_zone(zone)
45 |
46 | if __name__ == '__main__':
47 | scan_all()
48 |
--------------------------------------------------------------------------------
/pfr-solution/code/predict.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch.autograd import Variable
3 | import numpy as np
4 | from torchvision import transforms
5 | import os
6 | from tqdm import tqdm
7 | import yaml
8 | from PIL import Image
9 | import itertools
10 |
11 | import rd
12 |
13 | from PIL import PILLOW_VERSION; assert PILLOW_VERSION=="4.3.0.post0"
14 |
15 | n_gpu = 1
16 |
17 | def save_prediction(model, model_root, model_desc, batch_size):
18 | model.train(False)
19 | folds = 'val test'.split()
20 | for fold in folds:
21 | iids = rd.dict_metadata_by_fold(rd.get_val_fold(model_root))[fold].index
22 | if len(iids) == 0:
23 | continue
24 | for iid in tqdm(iids, desc="predict %r on %r" % (model_root, fold), unit="image"):
25 | output_opt = dict(classes=3, stride=4, atom_size=8, size=40)
26 | coarsest_step_size = 32
27 | label_stride = output_opt['stride']
28 | label_size = output_opt['size']
29 | label_span = label_stride * label_size
30 | full_image_size = 1300
31 | n_classes = output_opt['classes']
32 | crop_size = 352
33 |
34 | increment_size = coarsest_step_size
35 | if increment_size % label_stride != 0:
36 | increment_size *= 3
37 | assert increment_size % label_stride == 0
38 | extra_margin = 96
39 | while 2 * (label_span-extra_margin) < full_image_size:
40 | label_size += increment_size // label_stride
41 | label_span = label_stride * label_size
42 | crop_size += increment_size
43 |
44 | img_extra = (crop_size - label_span)//2
45 | img_padded = np.array(Image.open(rd.iid_path(iid, 'rgb_aniso_jpg')).crop((-img_extra, -img_extra, full_image_size+img_extra, full_image_size+img_extra)))
46 | crop_step = full_image_size + 2*img_extra - crop_size
47 | n_blocks = 2
48 | img_padded = transforms.ToTensor()(img_padded)
49 | img_padded = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(img_padded)
50 | img_padded = img_padded.cpu()
51 | crops = (img_padded[:,ij//n_blocks*crop_step:,ij%n_blocks*crop_step:][:,:crop_size,:crop_size] for ij in range(n_blocks**2))
52 | img_outputs = []
53 | for _,g in itertools.groupby(enumerate(crops), key=lambda iv: iv[0]//batch_size):
54 | batch = torch.stack([iv[1] for iv in g]).cuda()
55 | batch = Variable(batch)
56 | outputs = model(batch)
57 | if len(outputs.shape)==4:
58 | # batch x chan x size x size -> batch x size x size x chan
59 | outputs = outputs.permute(0, 2, 3, 1)
60 | else:
61 | assert len(outputs.shape)==2
62 | img_outputs.append(outputs.data.cpu().numpy())
63 | del batch
64 | del outputs
65 | img_outputs = np.concatenate(img_outputs).reshape(n_blocks*n_blocks, label_size, label_size, n_classes)
66 |
67 | rem = (label_span - crop_step)//2
68 | i1 = label_size - rem // label_stride
69 | i2 = label_size + i1 - (label_span + crop_step) // label_stride
70 | assert i2>=0
71 | img_outputs = np.concatenate([
72 | np.concatenate([
73 | img_outputs[0][:i1,:i1],
74 | img_outputs[1][:i1,i2:]], axis=1),
75 | np.concatenate([
76 | img_outputs[2][i2:,:i1],
77 | img_outputs[3][i2:,i2:]], axis=1)], axis=0)
78 | img_outputs = img_outputs[...,::-1] # BGR for classes 0,1,2
79 | img_outputs = np.exp(img_outputs)
80 | img_outputs /= img_outputs.sum(axis=-1)[...,None]
81 | out = Image.fromarray((255*img_outputs).astype('u1'))
82 | path = rd.iid_path(iid, 'rgb_pred_%s_png' % model_root)
83 | os.makedirs(os.path.dirname(path), exist_ok=True)
84 | out.save(path)
85 |
86 | if __name__=='__main__':
87 | import sys
88 | args = sys.argv[1:]
89 | if not args:
90 | sys.exit("no model specified")
91 |
92 | for yaml_path in args:
93 | with open(yaml_path) as f:
94 | model_desc = yaml.load(f)
95 |
96 | model_path = 'trained_models/' + os.path.basename(yaml_path[:-5]) + '.pth'
97 | model_root = os.path.basename(yaml_path)[:-5]
98 | batch_size = 2
99 |
100 | model = torch.load(model_path).cuda()
101 | model.device_ids = list(range(torch.cuda.device_count()))
102 | save_prediction(model, model_root, model_desc, batch_size)
103 |
--------------------------------------------------------------------------------
/pfr-solution/code/provision.sh:
--------------------------------------------------------------------------------
1 | set -eu
2 | python3 code/do_unpack.py
3 | python3 code/rd.py --provision "$@"
4 |
--------------------------------------------------------------------------------
/pfr-solution/code/pytorch_dpn/adaptive_avgmax_pool.py:
--------------------------------------------------------------------------------
1 | """ PyTorch selectable adaptive pooling
2 | Adaptive pooling with the ability to select the type of pooling from:
3 | * 'avg' - Average pooling
4 | * 'max' - Max pooling
5 | * 'avgmax' - Sum of average and max pooling re-scaled by 0.5
6 | * 'avgmaxc' - Concatenation of average and max pooling along feature dim, doubles feature dim
7 |
8 | Both a functional and a nn.Module version of the pooling is provided.
9 |
10 | Author: Ross Wightman (rwightman)
11 | """
12 | import torch
13 | import torch.nn as nn
14 | import torch.nn.functional as F
15 |
16 |
17 | def pooling_factor(pool_type='avg'):
18 | return 2 if pool_type == 'avgmaxc' else 1
19 |
20 |
21 | def adaptive_avgmax_pool2d(x, pool_type='avg', padding=0, count_include_pad=False):
22 | """Selectable global pooling function with dynamic input kernel size
23 | """
24 | if pool_type == 'avgmaxc':
25 | x = torch.cat([
26 | F.avg_pool2d(
27 | x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad),
28 | F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)
29 | ], dim=1)
30 | elif pool_type == 'avgmax':
31 | x_avg = F.avg_pool2d(
32 | x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad)
33 | x_max = F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)
34 | x = 0.5 * (x_avg + x_max)
35 | elif pool_type == 'max':
36 | x = F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)
37 | else:
38 | if pool_type != 'avg':
39 | print('Invalid pool type %s specified. Defaulting to average pooling.' % pool_type)
40 | x = F.avg_pool2d(
41 | x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad)
42 | return x
43 |
44 |
45 | class AdaptiveAvgMaxPool2d(torch.nn.Module):
46 | """Selectable global pooling layer with dynamic input kernel size
47 | """
48 | def __init__(self, output_size=1, pool_type='avg'):
49 | super(AdaptiveAvgMaxPool2d, self).__init__()
50 | self.output_size = output_size
51 | self.pool_type = pool_type
52 | if pool_type == 'avgmaxc' or pool_type == 'avgmax':
53 | self.pool = nn.ModuleList([nn.AdaptiveAvgPool2d(output_size), nn.AdaptiveMaxPool2d(output_size)])
54 | elif pool_type == 'max':
55 | self.pool = nn.AdaptiveMaxPool2d(output_size)
56 | else:
57 | if pool_type != 'avg':
58 | print('Invalid pool type %s specified. Defaulting to average pooling.' % pool_type)
59 | self.pool = nn.AdaptiveAvgPool2d(output_size)
60 |
61 | def forward(self, x):
62 | if self.pool_type == 'avgmaxc':
63 | x = torch.cat([p(x) for p in self.pool], dim=1)
64 | elif self.pool_type == 'avgmax':
65 | x = 0.5 * torch.sum(torch.stack([p(x) for p in self.pool]), 0).squeeze(dim=0)
66 | else:
67 | x = self.pool(x)
68 | return x
69 |
70 | def factor(self):
71 | return pooling_factor(self.pool_type)
72 |
73 | def __repr__(self):
74 | return self.__class__.__name__ + ' (' \
75 | + 'output_size=' + str(self.output_size) \
76 | + ', pool_type=' + self.pool_type + ')'
77 |
--------------------------------------------------------------------------------
/pfr-solution/code/pytorch_dpn/dataset.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import torch.utils.data as data
6 |
7 | import os
8 | import re
9 | import torch
10 | from PIL import Image
11 |
12 | IMG_EXTENSIONS = ['.png', '.jpg', '.jpeg']
13 |
14 |
15 | def natural_key(string_):
16 | """See http://www.codinghorror.com/blog/archives/001018.html"""
17 | return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())]
18 |
19 |
20 | def find_images_and_targets(folder, types=IMG_EXTENSIONS, class_to_idx=None, leaf_name_only=True, sort=True):
21 | if class_to_idx is None:
22 | class_to_idx = dict()
23 | build_class_idx = True
24 | else:
25 | build_class_idx = False
26 | labels = []
27 | filenames = []
28 | for root, subdirs, files in os.walk(folder, topdown=False):
29 | rel_path = os.path.relpath(root, folder) if (root != folder) else ''
30 | label = os.path.basename(rel_path) if leaf_name_only else rel_path.replace(os.path.sep, '_')
31 | if build_class_idx and not subdirs:
32 | class_to_idx[label] = None
33 | for f in files:
34 | base, ext = os.path.splitext(f)
35 | if ext.lower() in types:
36 | filenames.append(os.path.join(root, f))
37 | labels.append(label)
38 | if build_class_idx:
39 | classes = sorted(class_to_idx.keys(), key=natural_key)
40 | for idx, c in enumerate(classes):
41 | class_to_idx[c] = idx
42 | images_and_targets = zip(filenames, [class_to_idx[l] for l in labels])
43 | if sort:
44 | images_and_targets = sorted(images_and_targets, key=lambda k: natural_key(k[0]))
45 | if build_class_idx:
46 | return images_and_targets, classes, class_to_idx
47 | else:
48 | return images_and_targets
49 |
50 |
51 | class Dataset(data.Dataset):
52 |
53 | def __init__(
54 | self,
55 | root,
56 | transform=None):
57 |
58 | imgs, _, _ = find_images_and_targets(root)
59 | if len(imgs) == 0:
60 | raise(RuntimeError("Found 0 images in subfolders of: " + root + "\n"
61 | "Supported image extensions are: " + ",".join(IMG_EXTENSIONS)))
62 | self.root = root
63 | self.imgs = imgs
64 | self.transform = transform
65 |
66 | def __getitem__(self, index):
67 | path, target = self.imgs[index]
68 | img = Image.open(path).convert('RGB')
69 | if self.transform is not None:
70 | img = self.transform(img)
71 | if target is None:
72 | target = torch.zeros(1).long()
73 | return img, target
74 |
75 | def __len__(self):
76 | return len(self.imgs)
77 |
78 | def set_transform(self, transform):
79 | self.transform = transform
80 |
81 | def filenames(self, indices=[], basename=False):
82 | if indices:
83 | if basename:
84 | return [os.path.basename(self.imgs[i][0]) for i in indices]
85 | else:
86 | return [self.imgs[i][0] for i in indices]
87 | else:
88 | if basename:
89 | return [os.path.basename(x[0]) for x in self.imgs]
90 | else:
91 | return [x[0] for x in self.imgs]
92 |
--------------------------------------------------------------------------------
/pfr-solution/code/pytorch_dpn/model_factory.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import math
6 | from .dpn import dpn68, dpn68b, dpn92, dpn98, dpn131, dpn107
7 | from torchvision.models.resnet import resnet18, resnet34, resnet50, resnet101, resnet152
8 | from torchvision.models.densenet import densenet121, densenet169, densenet161, densenet201
9 | from torchvision.models.inception import inception_v3
10 | import torchvision.transforms as transforms
11 | from PIL import Image
12 |
13 |
14 | def create_model(model_name, num_classes=1000, pretrained=False, **kwargs):
15 | if 'test_time_pool' in kwargs:
16 | test_time_pool = kwargs.pop('test_time_pool')
17 | else:
18 | test_time_pool = True
19 | if 'extra' in kwargs:
20 | extra = kwargs.pop('extra')
21 | else:
22 | extra = True
23 | if model_name == 'dpn68':
24 | model = dpn68(
25 | num_classes=num_classes, pretrained=pretrained, test_time_pool=test_time_pool)
26 | elif model_name == 'dpn68b':
27 | model = dpn68b(
28 | num_classes=num_classes, pretrained=pretrained, test_time_pool=test_time_pool)
29 | elif model_name == 'dpn92':
30 | model = dpn92(
31 | num_classes=num_classes, pretrained=pretrained, test_time_pool=test_time_pool, extra=extra)
32 | elif model_name == 'dpn98':
33 | model = dpn98(
34 | num_classes=num_classes, pretrained=pretrained, test_time_pool=test_time_pool)
35 | elif model_name == 'dpn131':
36 | model = dpn131(
37 | num_classes=num_classes, pretrained=pretrained, test_time_pool=test_time_pool)
38 | elif model_name == 'dpn107':
39 | model = dpn107(
40 | num_classes=num_classes, pretrained=pretrained, test_time_pool=test_time_pool)
41 | elif model_name == 'resnet18':
42 | model = resnet18(num_classes=num_classes, pretrained=pretrained, **kwargs)
43 | elif model_name == 'resnet34':
44 | model = resnet34(num_classes=num_classes, pretrained=pretrained, **kwargs)
45 | elif model_name == 'resnet50':
46 | model = resnet50(num_classes=num_classes, pretrained=pretrained, **kwargs)
47 | elif model_name == 'resnet101':
48 | model = resnet101(num_classes=num_classes, pretrained=pretrained, **kwargs)
49 | elif model_name == 'resnet152':
50 | model = resnet152(num_classes=num_classes, pretrained=pretrained, **kwargs)
51 | elif model_name == 'densenet121':
52 | model = densenet121(num_classes=num_classes, pretrained=pretrained, **kwargs)
53 | elif model_name == 'densenet161':
54 | model = densenet161(num_classes=num_classes, pretrained=pretrained, **kwargs)
55 | elif model_name == 'densenet169':
56 | model = densenet169(num_classes=num_classes, pretrained=pretrained, **kwargs)
57 | elif model_name == 'densenet201':
58 | model = densenet201(num_classes=num_classes, pretrained=pretrained, **kwargs)
59 | elif model_name == 'inception_v3':
60 | model = inception_v3(
61 | num_classes=num_classes, pretrained=pretrained, transform_input=False, **kwargs)
62 | else:
63 | assert False, "Unknown model architecture (%s)" % model_name
64 | return model
65 |
66 |
67 | class LeNormalize(object):
68 | """Normalize to -1..1 in Google Inception style
69 | """
70 | def __call__(self, tensor):
71 | for t in tensor:
72 | t.sub_(0.5).mul_(2.0)
73 | return tensor
74 |
75 |
76 | DEFAULT_CROP_PCT = 0.875
77 |
78 |
79 | def get_transforms_eval(model_name, img_size=224, crop_pct=None):
80 | crop_pct = crop_pct or DEFAULT_CROP_PCT
81 | if 'dpn' in model_name:
82 | if crop_pct is None:
83 | # Use default 87.5% crop for model's native img_size
84 | # but use 100% crop for larger than native as it
85 | # improves test time results across all models.
86 | if img_size == 224:
87 | scale_size = int(math.floor(img_size / DEFAULT_CROP_PCT))
88 | else:
89 | scale_size = img_size
90 | else:
91 | scale_size = int(math.floor(img_size / crop_pct))
92 | normalize = transforms.Normalize(
93 | mean=[124 / 255, 117 / 255, 104 / 255],
94 | std=[1 / (.0167 * 255)] * 3)
95 | elif 'inception' in model_name:
96 | scale_size = int(math.floor(img_size / crop_pct))
97 | normalize = LeNormalize()
98 | else:
99 | scale_size = int(math.floor(img_size / crop_pct))
100 | normalize = transforms.Normalize(
101 | mean=[0.485, 0.456, 0.406],
102 | std=[0.229, 0.224, 0.225])
103 |
104 | return transforms.Compose([
105 | transforms.Scale(scale_size, Image.BICUBIC),
106 | transforms.CenterCrop(img_size),
107 | transforms.ToTensor(),
108 | normalize])
109 |
--------------------------------------------------------------------------------
/pfr-solution/code/setup_directories.sh:
--------------------------------------------------------------------------------
1 | set -eu
2 |
3 | TEMP_PATH=/wdata
4 | rm -f spacenet/* 2>/dev/null || true
5 | rmdir spacenet 2>/dev/null || true
6 | mkdir spacenet
7 | for path in $@; do
8 | echo "Input directory: '$path'"
9 | ln -s "$path" spacenet/
10 | done
11 | rm -rf "$TEMP_PATH"/computed 2>/dev/null || true
12 | rm -f computed 2>/dev/null || true
13 | mkdir "$TEMP_PATH"/computed
14 | ln -s "$TEMP_PATH"/computed computed
15 | rm -rf unpacked 2>/dev/null || true
16 |
--------------------------------------------------------------------------------
/pfr-solution/code/vectorize.py:
--------------------------------------------------------------------------------
1 | import rd
2 | import sys
3 |
4 | args = sys.argv[1:]
5 | model_root, fold, out_path = args
6 | rd.make_submission(model_root, fold, out_path)
7 | print("done!")
8 |
--------------------------------------------------------------------------------
/pfr-solution/model/download_models.sh:
--------------------------------------------------------------------------------
1 | wget https://s3.amazonaws.com/hello-tc/spacenet3-code/trained_models/model01.pth
2 | wget https://s3.amazonaws.com/hello-tc/spacenet3-code/trained_models/model02.pth
3 | wget https://s3.amazonaws.com/hello-tc/spacenet3-code/trained_models/model03.pth
4 | wget https://s3.amazonaws.com/hello-tc/spacenet3-code/trained_models/model04.pth
5 | wget https://s3.amazonaws.com/hello-tc/spacenet3-code/trained_models/model05.pth
6 | wget https://s3.amazonaws.com/hello-tc/spacenet3-code/trained_models/model06.pth
7 | wget https://s3.amazonaws.com/hello-tc/spacenet3-code/trained_models/model07.pth
8 | wget https://s3.amazonaws.com/hello-tc/spacenet3-code/trained_models/model08.pth
9 | wget https://s3.amazonaws.com/hello-tc/spacenet3-code/trained_models/model09.pth
10 |
--------------------------------------------------------------------------------
/pfr-solution/model/model01.yaml:
--------------------------------------------------------------------------------
1 | augment_crop_period: null
2 | automatic_clipping: 1
3 | conv5_params: [12, 32]
4 | drop_margin: 0
5 | epoch_milestones: [6, 7, 8, 9, 10]
6 | loss_size_average: true
7 | training_set: x
8 |
--------------------------------------------------------------------------------
/pfr-solution/model/model02.yaml:
--------------------------------------------------------------------------------
1 | augment_crop_period: null
2 | automatic_clipping: 1
3 | conv5_params: [12, 32]
4 | drop_margin: 0
5 | epoch_milestones: [6, 7, 8, 9, 10]
6 | loss_size_average: true
7 | training_set: y
8 |
--------------------------------------------------------------------------------
/pfr-solution/model/model03.yaml:
--------------------------------------------------------------------------------
1 | augment_crop_period: null
2 | automatic_clipping: 1
3 | conv5_params: [12, 32]
4 | drop_margin: 0
5 | epoch_milestones: [6, 7, 8, 9, 10]
6 | loss_size_average: true
7 | training_set: z
8 |
--------------------------------------------------------------------------------
/pfr-solution/model/model04.yaml:
--------------------------------------------------------------------------------
1 | augment_crop_period: null
2 | automatic_clipping: 1
3 | conv5_params: [12, 32]
4 | drop_margin: 0
5 | epoch_milestones: [6, 7, 8, 9, 10]
6 | loss_size_average: true
7 | training_set: w
8 |
--------------------------------------------------------------------------------
/pfr-solution/model/model05.yaml:
--------------------------------------------------------------------------------
1 | augment_crop_period: null
2 | automatic_clipping: 1
3 | conv5_params: [16, 32]
4 | drop_margin: 0
5 | epoch_milestones: [6, 7, 8, 9]
6 | loss_size_average: true
7 | training_set: w
8 |
--------------------------------------------------------------------------------
/pfr-solution/model/model06.yaml:
--------------------------------------------------------------------------------
1 | augment_crop_period: null
2 | automatic_clipping: 1
3 | conv5_params: [16, 32]
4 | drop_margin: 0
5 | epoch_milestones: [6, 7, 8, 9]
6 | loss_size_average: true
7 | training_set: w
8 |
--------------------------------------------------------------------------------
/pfr-solution/model/model07.yaml:
--------------------------------------------------------------------------------
1 | augment_crop_period: 1800
2 | automatic_clipping: 2
3 | conv5_params: [12, 32]
4 | drop_margin: 10
5 | epoch_milestones: [6, 7, 8, 9]
6 | loss_size_average: false
7 | training_set: x
8 |
--------------------------------------------------------------------------------
/pfr-solution/model/model08.yaml:
--------------------------------------------------------------------------------
1 | augment_crop_period: 1800
2 | automatic_clipping: 2
3 | conv5_params: [12, 32]
4 | drop_margin: 10
5 | epoch_milestones: [6, 7, 8, 9]
6 | loss_size_average: false
7 | training_set: y
8 |
--------------------------------------------------------------------------------
/pfr-solution/model/model09.yaml:
--------------------------------------------------------------------------------
1 | augment_crop_period: 1800
2 | automatic_clipping: 2
3 | conv5_params: [12, 32]
4 | drop_margin: 10
5 | epoch_milestones: [6, 7, 8, 9]
6 | loss_size_average: false
7 | training_set: z
8 |
--------------------------------------------------------------------------------
/pfr-solution/param/adjust_rgb_v1.csv:
--------------------------------------------------------------------------------
1 | ,a,a,a,b,b,b
2 | ,0,1,2,0,1,2
3 | Khartoum,866.4705882352941,773.8235294117648,443.8235294117647,281.1764705882353,405.80882352941177,303.30882352941177
4 | Paris,802.5,625.5,352.5,144.65,242.53,231.65
5 | Shanghai,605.2941176470588,594.7058823529412,397.05882352941177,165.07941176470587,315.92058823529413,264.95588235294116
6 | Vegas,1436.4705882352941,1574.1176470588236,963.5294117647059,143.09411764705882,293.22352941176473,258.90588235294115
7 |
--------------------------------------------------------------------------------
/pfr-solution/test.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -eu
3 |
4 | output_name="${@:$#}"
5 | directories=${@:1:($#-1)}
6 |
7 | COMPONENT_MODELS="model01 model02 model03 model04 model05 model06 model07 model08 model09"
8 |
9 | bash code/setup_directories.sh $directories
10 | echo "Output path: '$output_name.txt'"
11 | time bash code/provision.sh
12 | for model in $COMPONENT_MODELS; do
13 | time python3 code/predict.py model/$model.yaml
14 | done
15 | time python3 code/vectorize.py "$COMPONENT_MODELS" test "$output_name".txt
16 | echo "Successfully generated '$output_name.txt'."
17 | bash code/cleanup_directories.sh
18 |
--------------------------------------------------------------------------------
/pfr-solution/train.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -eu
3 |
4 | bash code/setup_directories.sh "$@"
5 | if [ -e trained_models ]; then
6 | echo "Removing all existing trained models."
7 | rm -f trained_models/*.pth
8 | fi
9 | time bash code/provision.sh
10 | for yaml in model/*.yaml; do
11 | time python3 code/train.py "$yaml"
12 | done
13 | echo "Training complete."
14 | bash code/cleanup_directories.sh
15 |
--------------------------------------------------------------------------------
/selim_sef-solution/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM nvidia/cuda:8.0-cudnn6-devel
2 |
3 | MAINTAINER Selim Seferbekov
4 |
5 | ARG TENSORFLOW_VERSION=1.4.1
6 | ARG TENSORFLOW_ARCH=gpu
7 | ARG KERAS_VERSION=2.1.3
8 |
9 | RUN apt-get update && \
10 | apt-get install -y curl build-essential libpng12-dev libffi-dev \
11 | libboost-all-dev \
12 | libgflags-dev \
13 | libgoogle-glog-dev \
14 | libhdf5-serial-dev \
15 | libleveldb-dev \
16 | liblmdb-dev \
17 | libopencv-dev \
18 | libprotobuf-dev \
19 | libsnappy-dev \
20 | protobuf-compiler \
21 | git \
22 | && \
23 | apt-get clean && \
24 | rm -rf /var/tmp /tmp /var/lib/apt/lists/*
25 |
26 | RUN curl -sSL -o installer.sh https://repo.continuum.io/archive/Anaconda3-5.0.1-Linux-x86_64.sh && \
27 | bash /installer.sh -b -f && \
28 | rm /installer.sh
29 |
30 | ENV PATH "$PATH:/root/anaconda3/bin"
31 |
32 | RUN pip --no-cache-dir install \
33 | https://storage.googleapis.com/tensorflow/linux/${TENSORFLOW_ARCH}/tensorflow_${TENSORFLOW_ARCH}-${TENSORFLOW_VERSION}-cp36-cp36m-linux_x86_64.whl
34 |
35 | RUN pip install --no-cache-dir --no-dependencies keras==${KERAS_VERSION}
36 | RUN conda install tqdm
37 | RUN conda install -c conda-forge opencv
38 | RUN pip install git+https://github.com/yxdragon/sknw
39 | RUN pip install pygeoif
40 | RUN pip install shapely
41 | RUN pip install simplification
42 |
43 | WORKDIR /work
44 |
45 | COPY . /work/
46 |
47 |
48 | RUN chmod 777 train.sh
49 | RUN chmod 777 test.sh
50 |
51 |
--------------------------------------------------------------------------------
/selim_sef-solution/calculate_stats.py:
--------------------------------------------------------------------------------
1 | import os
2 | import numpy as np
3 | from skimage.external import tifffile
4 |
5 | from tqdm import tqdm
6 |
7 | from params import args
8 | from tools.mul_img_utils import stretch_8bit
9 |
10 | cities = ['AOI_2_Vegas', 'AOI_3_Paris', 'AOI_4_Shanghai', 'AOI_5_Khartoum', ]
11 |
12 |
13 | def calc_stats(img_dir):
14 | city_mean_value = {}
15 | for city in cities:
16 | city_mean = []
17 | city_mean_img = np.zeros((1300, 1300, 8))
18 | num_images = 0
19 | city_dir = os.path.join(img_dir, city + '_Roads_Train', 'MUL-PanSharpen')
20 | for f in tqdm(os.listdir(city_dir)):
21 | if f.endswith(".tif"):
22 | arr = tifffile.imread(os.path.join(city_dir, f))
23 | image = np.stack([arr[..., 4], arr[..., 2], arr[..., 1], arr[..., 0], arr[..., 3], arr[..., 5], arr[..., 6], arr[..., 7]], axis=-1)
24 | image = stretch_8bit(image)
25 | if image is not None:
26 | city_mean_img += (image * 255.)
27 | num_images += 1
28 |
29 | for i in range(8):
30 | city_mean.append(np.mean(city_mean_img[..., i] / num_images))
31 | city_mean_value[city] = city_mean
32 |
33 | return city_mean_value
34 |
35 |
36 | if __name__ == '__main__':
37 | print(calc_stats(args.img_dir))
38 |
--------------------------------------------------------------------------------
/selim_sef-solution/datasets/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SpaceNetChallenge/RoadDetector/0a7391f546ab20c873dc6744920deef22c21ef3e/selim_sef-solution/datasets/__init__.py
--------------------------------------------------------------------------------
/selim_sef-solution/docker-build.sh:
--------------------------------------------------------------------------------
1 | nvidia-docker build -t selim_sef .
2 |
--------------------------------------------------------------------------------
/selim_sef-solution/docker-remove.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | docker images -q --filter "dangling=true" | xargs docker rmi
4 |
--------------------------------------------------------------------------------
/selim_sef-solution/docker-run.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | nvidia-docker run -v /local_data/SpaceNet_Roads_Dataset:/data -v /local_data/SpaceNet_Roads_Dataset/results/selim_sef:/wdata --rm -ti --ipc=host selim_sef
4 |
--------------------------------------------------------------------------------
/selim_sef-solution/docker-stop.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | docker stop $(docker ps -a -q)
4 | docker rm $(docker ps -a -q)
--------------------------------------------------------------------------------
/selim_sef-solution/download_models.sh:
--------------------------------------------------------------------------------
1 | mkdir trained_models
2 | aws s3 sync s3://spacenet-dataset/SpaceNet_Roads_Competition/Pretrained_Models/04-selim_sef/ trained_models/
3 |
4 |
--------------------------------------------------------------------------------
/selim_sef-solution/generate_submission.py:
--------------------------------------------------------------------------------
1 | from multiprocessing.pool import Pool
2 |
3 | import cv2
4 | import numpy as np
5 | import os
6 |
7 | from params import args
8 | from tools.vectorize import to_line_strings
9 |
10 | folders = [
11 | 'all_masks/linknet_inception',
12 | 'all_masks/inception-unet',
13 | 'all_masks/clahe_inception-swish',
14 | 'all_masks/clahe_linknet_inception',
15 | 'all_masks/clahe_linknet_inception_lite',
16 | 'all_masks/clahe_linknet_resnet50'
17 | ]
18 |
19 |
20 | def predict(f):
21 | image_id = f.split('MUL-PanSharpen_')[1].split(".tif")[0]
22 | masks = []
23 | for folder in folders:
24 | masks.append(cv2.imread(os.path.join(folder, f + ".png")) / 255)
25 | mask = np.average(np.array(masks), axis=0)
26 | line_strings = to_line_strings(mask, threashold=0.25, sigma=0.5, dilation=1)
27 | result = ""
28 | if len(line_strings) > 0:
29 | for line_string in line_strings:
30 | result += '{image_id},"{line}"\n'.format(image_id=image_id, line=line_string)
31 | else:
32 | result += "{image_id},{line}\n".format(image_id=image_id, line="LINESTRING EMPTY")
33 |
34 | return result
35 |
36 |
37 | def multi_predict(X, predict):
38 | pool = Pool(4)
39 | results = pool.map(predict, X)
40 | pool.close()
41 | pool.join()
42 | return results
43 |
44 |
45 | f_submit = open(args.output_file + ".txt", "w")
46 |
47 | for city_dir in args.dirs_to_process:
48 | print("ensemble for dir ", city_dir)
49 | pool = Pool(4)
50 |
51 | test_dir = os.path.join(city_dir, 'MUL-PanSharpen')
52 | files = sorted(os.listdir(test_dir))
53 | city_results = multi_predict(files, predict)
54 | for line in city_results:
55 | f_submit.write(line)
56 |
57 | f_submit.close()
58 |
--------------------------------------------------------------------------------
/selim_sef-solution/losses.py:
--------------------------------------------------------------------------------
1 | import keras.backend as K
2 | from keras.backend.tensorflow_backend import _to_tensor
3 | from keras.losses import mean_squared_error
4 | import numpy as np
5 |
6 |
7 | def dice_coef_clipped(y_true, y_pred, smooth=1.0):
8 | y_true_f = K.flatten(K.round(y_true))
9 | y_pred_f = K.flatten(K.round(y_pred))
10 | intersection = K.sum(y_true_f * y_pred_f)
11 | return 100. * (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
12 |
13 |
14 | def dice_coef(y_true, y_pred, smooth=1.0):
15 | y_true_f = K.flatten(y_true)
16 | y_pred_f = K.flatten(y_pred)
17 | intersection = K.sum(y_true_f * y_pred_f)
18 | return K.mean((2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth))
19 |
20 |
21 | def bootstrapped_crossentropy(y_true, y_pred, bootstrap_type='hard', alpha=0.95):
22 | target_tensor = y_true
23 | prediction_tensor = y_pred
24 | _epsilon = _to_tensor(K.epsilon(), prediction_tensor.dtype.base_dtype)
25 | prediction_tensor = K.tf.clip_by_value(prediction_tensor, _epsilon, 1 - _epsilon)
26 | prediction_tensor = K.tf.log(prediction_tensor / (1 - prediction_tensor))
27 |
28 | if bootstrap_type == 'soft':
29 | bootstrap_target_tensor = alpha * target_tensor + (1.0 - alpha) * K.tf.sigmoid(prediction_tensor)
30 | else:
31 | bootstrap_target_tensor = alpha * target_tensor + (1.0 - alpha) * K.tf.cast(
32 | K.tf.sigmoid(prediction_tensor) > 0.5, K.tf.float32)
33 | return K.mean(K.tf.nn.sigmoid_cross_entropy_with_logits(
34 | labels=bootstrap_target_tensor, logits=prediction_tensor))
35 |
36 |
37 | def ceneterline_loss(y, p):
38 | centerline = get_eroded(y)
39 | p = p * centerline
40 | return dice_coef_loss_bce(centerline, p, dice=0.5, bce=0.5, bootstrapping='soft', alpha=1)
41 |
42 | def get_eroded(y):
43 | structure = np.asarray(np.zeros((3, 3, 1)), dtype="float32")
44 | filter = K.tf.constant(structure, dtype="float32")
45 | erosion = K.tf.nn.erosion2d(y, strides=[1, 1, 1, 1], rates=[1, 5, 5, 1], kernel=filter, padding='SAME')
46 | return erosion
47 |
48 | def dice_coef_loss(y_true, y_pred):
49 | return 1 - dice_coef(y_true, y_pred)
50 |
51 |
52 | def dice_coef_loss_bce(y_true, y_pred, dice=0.5, bce=0.5, bootstrapping='hard', alpha=1.):
53 | return bootstrapped_crossentropy(y_true, y_pred, bootstrapping, alpha) * bce + dice_coef_loss(y_true, y_pred) * dice
54 |
55 |
56 | def binary_crossentropy(y, p):
57 | return K.mean(K.binary_crossentropy(y, p))
58 |
59 |
60 | def mae_vgg(y_true, y_pred):
61 | y_true = K.permute_dimensions(y_true, (0, 3, 1, 2))
62 | y_pred = K.permute_dimensions(y_pred, (0, 3, 1, 2))
63 |
64 | y_true = K.reshape(y_true, (K.shape(y_true)[0], K.shape(y_true)[1], K.shape(y_true)[2] * K.shape(y_true)[3]))
65 | y_pred = K.reshape(y_pred, (K.shape(y_pred)[0], K.shape(y_pred)[1], K.shape(y_pred)[2] * K.shape(y_pred)[3]))
66 |
67 | return K.mean(mean_squared_error(y_true, y_pred))
68 |
69 |
70 | def make_loss(loss_name):
71 | if loss_name == 'bce_dice':
72 | def loss(y, p):
73 | return dice_coef_loss_bce(y, p, dice=0.5, bce=0.5, bootstrapping='soft', alpha=1)
74 |
75 | return loss
76 | if loss_name == 'bce':
77 | return binary_crossentropy
78 | else:
79 | ValueError("Unknown loss.")
80 |
--------------------------------------------------------------------------------
/selim_sef-solution/model_name_encoder.py:
--------------------------------------------------------------------------------
1 | # clahe
2 | # stretch
3 | # caffe
4 |
5 | def encode_params(clahe, mode, stretch):
6 | result = ""
7 | if clahe:
8 | result += "1"
9 | else:
10 | result += "0"
11 | if stretch:
12 | result += "1"
13 | else:
14 | result += "0"
15 |
16 | if mode == 'caffe':
17 | result += "1"
18 | else:
19 | result += "0"
20 | return result
21 |
22 |
23 | def decode_params(encoded):
24 | clahe = encoded[0] == "1"
25 | caffe_mode = encoded[2] == "1"
26 | if caffe_mode:
27 | mode = 'caffe'
28 | else:
29 | mode = 'tf'
30 | stretch = encoded[1] == "1"
31 | return clahe, mode, stretch
32 |
--------------------------------------------------------------------------------
/selim_sef-solution/params.py:
--------------------------------------------------------------------------------
1 | import argparse
2 |
3 | parser = argparse.ArgumentParser()
4 | arg = parser.add_argument
5 | arg('--gpu', default="0")
6 | arg('--epochs', type=int, default=100)
7 | arg('--fold', default='0')
8 | arg('--n_folds', type=int, default=4)
9 | arg('--freeze_till_layer', default='input_1')
10 | arg('--preprocessing_function', default='tf')
11 | arg('--weights')
12 | arg('--city')
13 | arg('--input_size', type=int, default=1300)
14 | arg('--padded_size', type=int, default=1312)
15 | arg('--save_epochs')
16 | arg('--learning_rate', type=float, default=0.0003)
17 | arg('--crop_size', type=int, default=256)
18 | arg('--crops_per_image', type=int, default=1)
19 | arg('--batch_size', type=int, default=1)
20 | arg('--loss_function', default='bce_dice')
21 | arg('--clr')
22 | arg('--schedule')
23 | arg('--optimizer')
24 | arg('--num_workers', type=int, default=8)
25 | arg('--clahe', action='store_true')
26 | arg('--ohe_city', action='store_true')
27 | arg('--stretch_and_mean', action='store_true')
28 | arg('--network', default='inception_linknet')
29 | arg('--alias', default='full')
30 | arg('--steps_per_epoch', type=int, default=1000)
31 | arg('--seed', type=int, default=777)
32 | arg('--models_dir', default='trained_models')
33 | arg('--data_dirs', nargs='+')
34 |
35 | #prediction
36 | arg('--out_dir_name')
37 | arg('--city_id')
38 | arg('--ensembling_dirs')
39 | arg('--dirs_to_process', nargs='+')
40 | arg('--output_file')
41 | arg('--wdata_dir')
42 |
43 |
44 | args = parser.parse_args()
45 |
--------------------------------------------------------------------------------
/selim_sef-solution/preprocess_clahe.py:
--------------------------------------------------------------------------------
1 | import os
2 | from multiprocessing.pool import Pool
3 |
4 | import numpy as np
5 | from skimage.exposure import equalize_adapthist
6 | from skimage.external import tifffile
7 |
8 | from params import args
9 |
10 | wdata_dir = args.wdata_dir
11 |
12 | def transform(f):
13 | path = f
14 | city_dir_name = f.split("/")[-3]
15 | image = tifffile.imread(path)
16 | bands = []
17 | for band in range(8):
18 | bands.append(equalize_adapthist(image[..., band]) * 2047)
19 | img = np.array(np.stack(bands, axis=-1), dtype="uint16")
20 | clahe_city_dir = os.path.join(wdata_dir, city_dir_name)
21 | os.makedirs(clahe_city_dir, exist_ok=True)
22 | mul_dir = os.path.join(clahe_city_dir, 'CLAHE-MUL-PanSharpen')
23 | os.makedirs(mul_dir, exist_ok=True)
24 | tifffile.imsave(os.path.join(mul_dir, f.split("/")[-1]), img, planarconfig='contig')
25 |
26 |
27 | def multi_transform(files, transform):
28 | pool = Pool(8)
29 | results = pool.map(transform, files)
30 | pool.close()
31 | pool.join()
32 | return results
33 |
34 | for city_dir in args.dirs_to_process:
35 | print("preprocess data for dir ", city_dir)
36 | mul_dir = os.path.join(city_dir, 'MUL-PanSharpen')
37 | files = [os.path.join(mul_dir, f) for f in os.listdir(mul_dir)]
38 | multi_transform(files, transform)
39 |
--------------------------------------------------------------------------------
/selim_sef-solution/test.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | all_args=( $@ )
3 | arg_len=${#all_args[@]}
4 | out_file=${all_args[$arg_len-1]}
5 | city_dirs=${all_args[@]:0:$arg_len-1}
6 |
7 | echo "Output file: $out_file"
8 | echo "City dirs $out_file are: $city_dirs"
9 |
10 | python3 preprocess_clahe.py --wdata_dir /wdata --dirs_to_process $city_dirs
11 |
12 | python3 predict_all.py --gpu "0" --wdata_dir /wdata --dirs_to_process $city_dirs
13 |
14 | python3 generate_submission.py --output_file $out_file --dirs_to_process $city_dirs
--------------------------------------------------------------------------------
/selim_sef-solution/tools/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SpaceNetChallenge/RoadDetector/0a7391f546ab20c873dc6744920deef22c21ef3e/selim_sef-solution/tools/__init__.py
--------------------------------------------------------------------------------
/selim_sef-solution/tools/metrics.py:
--------------------------------------------------------------------------------
1 | import threading
2 | import numpy as np
3 |
4 |
5 | def dice(im1, im2, empty_score=1.0):
6 | """
7 | Computes the Dice coefficient, a measure of set similarity.
8 | Parameters
9 | ----------
10 | im1 : array-like, bool
11 | Any array of arbitrary size. If not boolean, will be converted.
12 | im2 : array-like, bool
13 | Any other array of identical size. If not boolean, will be converted.
14 | Returns
15 | -------
16 | dice : float
17 | Dice coefficient as a float on range [0,1].
18 | Maximum similarity = 1
19 | No similarity = 0
20 | Both are empty (sum eq to zero) = empty_score
21 |
22 | Notes
23 | -----
24 | The order of inputs for `dice` is irrelevant. The result will be
25 | identical if `im1` and `im2` are switched.
26 | """
27 | im1 = np.asarray(im1).astype(np.bool)
28 | im2 = np.asarray(im2).astype(np.bool)
29 |
30 | if im1.shape != im2.shape:
31 | raise ValueError("Shape mismatch: im1 and im2 must have the same shape. {}, {}".format(im1.shape, im2.shape))
32 |
33 | im_sum = im1.sum() + im2.sum()
34 | if im_sum == 0:
35 | return empty_score
36 |
37 | # Compute Dice coefficient
38 | intersection = np.logical_and(im1, im2)
39 |
40 | return 2. * intersection.sum() / im_sum
--------------------------------------------------------------------------------
/selim_sef-solution/tools/mul_img_utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | def stretch_8bit(bands, lower_percent=0, higher_percent=100):
4 | out = np.zeros_like(bands).astype(np.float32)
5 | for i in range(bands.shape[-1]):
6 | a = 0
7 | b = 1
8 | band = bands[:, :, i].flatten()
9 | filtered = band[band > 0]
10 | if len(filtered) == 0:
11 | continue
12 | c = np.percentile(filtered, lower_percent)
13 | d = np.percentile(filtered, higher_percent)
14 | t = a + (bands[:, :, i] - c) * (b - a) / (d - c)
15 | t[t < a] = a
16 | t[t > b] = b
17 | out[:, :, i] = t
18 | return out.astype(np.float32)
--------------------------------------------------------------------------------
/selim_sef-solution/tools/stats.py:
--------------------------------------------------------------------------------
1 | mean_bands = {
2 | 'AOI_2_Vegas': [66.203843139322203, 64.793622432296814, 63.816469071277851, 67.365390860065887, 68.80479517013778,
3 | 70.498218756846072, 67.712760288609388, 69.555316792809833],
4 | 'AOI_3_Paris': [38.639393682336831, 37.012744344218845, 38.301689254170853, 40.9502977658262, 38.395837950275691,
5 | 42.296092196170676, 54.33440729897616, 41.800257810114338],
6 | 'AOI_4_Shanghai': [47.610848144347287, 48.692754717031264, 49.079308552918832, 49.506723647359777,
7 | 48.668532262291414,
8 | 52.832670146898408, 53.044230720675131, 47.833598289901779],
9 | 'AOI_5_Khartoum': [58.699947916218441, 55.08216781020004, 52.674812276089149, 51.730563153507624,
10 | 59.679492363441504,
11 | 62.533004077249132, 58.810756464969373, 57.123224559781079]}
--------------------------------------------------------------------------------
/selim_sef-solution/tools/tiling.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | def generate_tiles(img, tile_size=512):
5 | height = img.shape[0]
6 | width = img.shape[1]
7 | rows = 2 * height // tile_size - 1
8 | columns = 2 * width // tile_size - 1
9 | overlap = tile_size // 2
10 |
11 | images = np.zeros((rows * columns, tile_size, tile_size, img.shape[-1]), dtype="float32")
12 | i = 0
13 | for tile_y in range(int(rows)):
14 | for tile_x in range(int(columns)):
15 | x1 = tile_x * tile_size - tile_x * overlap
16 | x2 = x1 + tile_size
17 | y1 = tile_y * tile_size - tile_y * overlap
18 | y2 = y1 + tile_size
19 | slice = img[y1:y2, x1:x2, :]
20 | images[i, 0:slice.shape[0], 0:slice.shape[1]] = slice[:]
21 | i += 1
22 | return images
23 |
24 |
25 | def combine_tiles(predicted_tiles, tile_size=1024, height=2048, width=2048):
26 | img = np.zeros((height, width, 1), dtype="float32")
27 | rows = 2 * height // tile_size - 1
28 | columns = 2 * width // tile_size - 1
29 | overlap = tile_size // 2
30 |
31 | offset = overlap // 2
32 | for i in range(len(predicted_tiles)):
33 | tile_x = i % columns
34 | tile_y = i // columns
35 | start_x = tile_x * tile_size - tile_x * overlap
36 | start_y = tile_y * tile_size - tile_y * overlap
37 | offset_x_start = offset
38 | offset_y_start = offset
39 | offset_x_end = offset
40 | offset_y_end = offset
41 |
42 | if tile_x == 0:
43 | offset_x_start = 0
44 | if tile_x == columns - 1:
45 | offset_x_end = 0
46 | if tile_y == 0:
47 | offset_y_start = 0
48 | if tile_y == rows - 1:
49 | offset_y_end = 0
50 | tile = np.expand_dims(predicted_tiles[i][offset_y_start:tile_size - offset_y_end, offset_x_start:tile_size - offset_x_end, 0], -1)
51 | img[start_y + offset_y_start: start_y + tile_size - offset_y_end, start_x + offset_x_start: start_x + tile_size - offset_x_end, :] = tile
52 | return img
--------------------------------------------------------------------------------