├── 1. Generate_images_and_masks.ipynb
├── 2. Train.ipynb
├── 3. Inference.ipynb
├── 4. Geo-processing Tuto.ipynb
├── README.md
├── dataset.py
├── files
├── Clip_final_hlaing_thar_yar11.json
├── img.png
├── label.png
└── preds.png
├── loss.py
├── models.py
├── utils
├── geo_process.py
├── image_utils.py
└── readme.md
└── weights
├── Dice_EPOCH150_Unet.h5
├── Epoch1000_Unet.h5
└── readme.md
/2. Train.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "nbformat": 4,
3 | "nbformat_minor": 0,
4 | "metadata": {
5 | "kernelspec": {
6 | "display_name": "Python 3",
7 | "language": "python",
8 | "name": "python3"
9 | },
10 | "language_info": {
11 | "codemirror_mode": {
12 | "name": "ipython",
13 | "version": 3
14 | },
15 | "file_extension": ".py",
16 | "mimetype": "text/x-python",
17 | "name": "python",
18 | "nbconvert_exporter": "python",
19 | "pygments_lexer": "ipython3",
20 | "version": "3.8.5"
21 | },
22 | "colab": {
23 | "name": "2. Train.ipynb",
24 | "provenance": [],
25 | "collapsed_sections": []
26 | },
27 | "accelerator": "GPU"
28 | },
29 | "cells": [
30 | {
31 | "cell_type": "markdown",
32 | "metadata": {
33 | "id": "prhghqjgFVup"
34 | },
35 | "source": [
36 | "## **Images & masks directory list**"
37 | ]
38 | },
39 | {
40 | "cell_type": "code",
41 | "metadata": {
42 | "collapsed": true,
43 | "id": "5BKaq-XqFdfZ"
44 | },
45 | "source": [
46 | "# make lists for image and mask directories\n",
47 | "import glob\n",
48 | "\n",
49 | "images_dir = '/outputs/dataset/images'\n",
50 | "masks_dir = '/outputs/dataset/masks'\n",
51 | "\n",
52 | "image_paths = sorted(glob.glob(f'{images_dir}/*'))\n",
53 | "mask_paths = sorted(glob.glob(f'{masks_dir}/*'))\n",
54 | "\n",
55 | "print(f'total images: {len(image_paths)}')\n",
56 | "print(f'total masks: {len(mask_paths)}')\n",
57 | "print(image_paths[:5])\n",
58 | "print(mask_paths[:5])"
59 | ],
60 | "execution_count": null,
61 | "outputs": []
62 | },
63 | {
64 | "cell_type": "markdown",
65 | "metadata": {
66 | "id": "fSBdFPbx51M_"
67 | },
68 | "source": [
69 | "## **Prepare Data**"
70 | ]
71 | },
72 | {
73 | "cell_type": "code",
74 | "metadata": {
75 | "id": "iLFQlVyX2DiR"
76 | },
77 | "source": [
78 | "import random\n",
79 | "\n",
80 | "# number of validation samples\n",
81 | "val_samples = 50\n",
82 | "\n",
83 | "random.Random(1337).shuffle(image_paths)\n",
84 | "random.Random(1337).shuffle(mask_paths)\n",
85 | "\n",
86 | "# Split our img paths into a training and a validation set\n",
87 | "train_image_paths = image_paths[:-val_samples]\n",
88 | "train_mask_paths = mask_paths[:-val_samples]\n",
89 | "\n",
90 | "val_image_paths = image_paths[-val_samples:]\n",
91 | "val_mask_paths = mask_paths[-val_samples:]"
92 | ],
93 | "execution_count": 2,
94 | "outputs": []
95 | },
96 | {
97 | "cell_type": "code",
98 | "metadata": {
99 | "id": "Q6QruQbz51NE"
100 | },
101 | "source": [
102 | "from dataset import BuildingDataset\n",
103 | "\n",
104 | "img_size = (512, 512)\n",
105 | "batch_size = 16\n",
106 | "\n",
107 | "# Instantiate data Sequences for each split\n",
108 | "train_gen = BuildingDataset(batch_size,\n",
109 | " img_size,\n",
110 | " train_image_paths,\n",
111 | " train_mask_paths)\n",
112 | "\n",
113 | "val_gen = BuildingDataset(batch_size,\n",
114 | " img_size,\n",
115 | " val_image_paths,\n",
116 | " val_mask_paths)"
117 | ],
118 | "execution_count": 3,
119 | "outputs": []
120 | },
121 | {
122 | "cell_type": "markdown",
123 | "metadata": {
124 | "id": "TOI32CLa51NF"
125 | },
126 | "source": [
127 | "## **Create and compile the model**"
128 | ]
129 | },
130 | {
131 | "cell_type": "code",
132 | "metadata": {
133 | "id": "p5Gm5zuv51NG"
134 | },
135 | "source": [
136 | "activation_function = 'sigmoid'\n",
137 | "num_classes = 1\n",
138 | "\n",
139 | "from models import UNet\n",
140 | "model = UNet(num_classes = num_classes,\n",
141 | " image_size = img_size[0], \n",
142 | " img_channels = 3,\n",
143 | " activation_fn = 'sigmoid')\n",
144 | "\n",
145 | "# from models import DeepUNet\n",
146 | "# model = DeepUNet(num_classes=num_classes,\n",
147 | "# image_size=img_size[0],\n",
148 | "# img_channels=3,\n",
149 | "# activation_fn = activation_function,\n",
150 | "# n_filters_start = 32)\n",
151 | "\n",
152 | "from tensorflow.keras.optimizers import Adam\n",
153 | "from tensorflow.keras.metrics import Precision, Recall\n",
154 | "from loss import bce_dice_loss, dice_coef\n",
155 | "\n",
156 | "loss = bce_dice_loss # can use 'binary_crossentropy'\n",
157 | "optimizer = Adam()\n",
158 | "\n",
159 | "# compile model\n",
160 | "model.compile(optimizer = optimizer, \n",
161 | " loss = loss,\n",
162 | " metrics = [Precision(), \n",
163 | " Recall(), \n",
164 | " dice_coef])"
165 | ],
166 | "execution_count": 4,
167 | "outputs": []
168 | },
169 | {
170 | "cell_type": "markdown",
171 | "metadata": {
172 | "id": "ZzTFaP7c51NG"
173 | },
174 | "source": [
175 | "## **Train the model**"
176 | ]
177 | },
178 | {
179 | "cell_type": "code",
180 | "metadata": {
181 | "collapsed": true,
182 | "id": "elTPldOq2Imf"
183 | },
184 | "source": [
185 | "# Train the model, doing validation at the end of each epoch.\n",
186 | "epochs = 50\n",
187 | "\n",
188 | "model.fit(train_gen, \n",
189 | " epochs=epochs, \n",
190 | " validation_data=val_gen,\n",
191 | " verbose = 1)"
192 | ],
193 | "execution_count": null,
194 | "outputs": []
195 | },
196 | {
197 | "cell_type": "markdown",
198 | "metadata": {
199 | "id": "6J9USi5351NI"
200 | },
201 | "source": [
202 | "## **Save model file**"
203 | ]
204 | },
205 | {
206 | "cell_type": "code",
207 | "metadata": {
208 | "id": "IgU20AGe51NI"
209 | },
210 | "source": [
211 | "import os\n",
212 | "\n",
213 | "# create folder to save weights\n",
214 | "weights_save_folder = 'outputs/weights'\n",
215 | "os.makedirs(weights_save_folder, exist_ok = True)\n",
216 | "\n",
217 | "model.save(f'{weights_save_folder}/my_model.h5')"
218 | ],
219 | "execution_count": null,
220 | "outputs": []
221 | },
222 | {
223 | "cell_type": "markdown",
224 | "metadata": {
225 | "id": "MdyKAC1PKb7w"
226 | },
227 | "source": [
228 | "## **Inference on a single tile**"
229 | ]
230 | },
231 | {
232 | "cell_type": "code",
233 | "metadata": {
234 | "collapsed": true,
235 | "id": "qWbFtiPk2Uzc"
236 | },
237 | "source": [
238 | "from PIL import Image\n",
239 | "import numpy as np\n",
240 | "import matplotlib.pyplot as plt\n",
241 | "\n",
242 | "threshold = 0.95\n",
243 | "\n",
244 | "test_img_dir = 'test_image.png'\n",
245 | "test_mask_dir = 'test_mask.png'\n",
246 | "\n",
247 | "# read test image and mask\n",
248 | "test_image = Image.open(test_img_dir)\n",
249 | "test_mask = Image.open(test_mask_dir)\n",
250 | "\n",
251 | "# make predictions\n",
252 | "pred_img = np.expand_dims(test_image, axis = 0)\n",
253 | "test_preds = model.predict(pred_img)\n",
254 | "test_preds = np.squeeze(test_preds)\n",
255 | "test_preds = test_preds > threshold\n",
256 | "\n",
257 | "# plot the results\n",
258 | "fig, (ax1, ax2, ax3) = plt.subplots(1, 3)\n",
259 | "\n",
260 | "ax1.imshow(test_image)\n",
261 | "ax1.title.set_text('Image')\n",
262 | "\n",
263 | "ax2.imshow(test_preds)\n",
264 | "ax2.title.set_text('Predictions')\n",
265 | "\n",
266 | "ax3.imshow(test_mask)\n",
267 | "ax3.title.set_text('Ground Truth')"
268 | ],
269 | "execution_count": null,
270 | "outputs": []
271 | }
272 | ]
273 | }
--------------------------------------------------------------------------------
/3. Inference.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {
7 | "id": "_xUTGZZ9WAwP"
8 | },
9 | "outputs": [],
10 | "source": [
11 | "import numpy as np\n",
12 | "from PIL import Image\n",
13 | "import matplotlib.pyplot as plt\n",
14 | "\n",
15 | "import os\n",
16 | "import utils.image_utils as ps"
17 | ]
18 | },
19 | {
20 | "cell_type": "markdown",
21 | "metadata": {
22 | "id": "5JOqKhrMWAwR"
23 | },
24 | "source": [
25 | "## Load test image"
26 | ]
27 | },
28 | {
29 | "cell_type": "code",
30 | "execution_count": null,
31 | "metadata": {
32 | "id": "GY6lFC7dWAwW"
33 | },
34 | "outputs": [],
35 | "source": [
36 | "image_dir = 'test_image.png'\n",
37 | "image = np.array(Image.open(image_dir))"
38 | ]
39 | },
40 | {
41 | "cell_type": "markdown",
42 | "metadata": {
43 | "id": "vb8lXWzoWAwX"
44 | },
45 | "source": [
46 | "## Resize image if necessary"
47 | ]
48 | },
49 | {
50 | "cell_type": "code",
51 | "execution_count": null,
52 | "metadata": {
53 | "colab": {
54 | "base_uri": "https://localhost:8080/",
55 | "height": 319
56 | },
57 | "id": "PRuGQONfWAwX",
58 | "outputId": "a725bfa5-aea9-46cf-c341-156360ae3280"
59 | },
60 | "outputs": [],
61 | "source": [
62 | "resize = True\n",
63 | "\n",
64 | "scale = 0.5\n",
65 | "\n",
66 | "if resize:\n",
67 | " image = ps.resize_image(image, percent = scale)\n",
68 | " \n",
69 | "plt.imshow(image)"
70 | ]
71 | },
72 | {
73 | "cell_type": "markdown",
74 | "metadata": {
75 | "id": "FOUvJF8tWAwX"
76 | },
77 | "source": [
78 | "## Pad and crop image"
79 | ]
80 | },
81 | {
82 | "cell_type": "code",
83 | "execution_count": null,
84 | "metadata": {
85 | "colab": {
86 | "base_uri": "https://localhost:8080/"
87 | },
88 | "id": "noMDqLJIWAwX",
89 | "outputId": "2f94d598-8555-4a44-ef0c-776a9167ab12"
90 | },
91 | "outputs": [],
92 | "source": [
93 | "model_input_w = 512\n",
94 | "model_input_h = 512\n",
95 | "\n",
96 | "# pad & crop\n",
97 | "padded_image = ps.pad(image, model_input_w, model_input_h)\n",
98 | "cropped_tiles = ps.crop(padded_image, model_input_w, model_input_h, numpy_output=True)"
99 | ]
100 | },
101 | {
102 | "cell_type": "markdown",
103 | "metadata": {
104 | "id": "WKFu684DWAwY"
105 | },
106 | "source": [
107 | "## Load model"
108 | ]
109 | },
110 | {
111 | "cell_type": "code",
112 | "execution_count": null,
113 | "metadata": {
114 | "id": "rQcGoXrCWAwY"
115 | },
116 | "outputs": [],
117 | "source": [
118 | "from tensorflow.keras.models import load_model\n",
119 | "from loss import bce_dice_loss\n",
120 | "\n",
121 | "model = load_model('outputs/weights/my_model.h5',\n",
122 | " custom_objects={ 'bce_dice_loss': bce_dice_loss} # Load BCE Dice Loss from loss.py into model\n",
123 | " ) "
124 | ]
125 | },
126 | {
127 | "cell_type": "markdown",
128 | "metadata": {
129 | "id": "BI0E2EfHWAwY"
130 | },
131 | "source": [
132 | "## Inference"
133 | ]
134 | },
135 | {
136 | "cell_type": "code",
137 | "execution_count": null,
138 | "metadata": {
139 | "colab": {
140 | "base_uri": "https://localhost:8080/"
141 | },
142 | "id": "ApyqqtG0WAwY",
143 | "outputId": "d6c3ff83-b700-4aad-97c3-c4294eea210b",
144 | "scrolled": true
145 | },
146 | "outputs": [],
147 | "source": [
148 | "model_predictions = model.predict(cropped_tiles,verbose=1)\n",
149 | "\n",
150 | "preds = np.squeeze(model_predictions)\n",
151 | "\n",
152 | "threshold = 0.95\n",
153 | "preds = preds > threshold"
154 | ]
155 | },
156 | {
157 | "cell_type": "markdown",
158 | "metadata": {
159 | "id": "j40NqPx2WAwY"
160 | },
161 | "source": [
162 | "## Reconstruct final predicted image"
163 | ]
164 | },
165 | {
166 | "cell_type": "code",
167 | "execution_count": null,
168 | "metadata": {
169 | "colab": {
170 | "base_uri": "https://localhost:8080/",
171 | "height": 286
172 | },
173 | "id": "n6XUjLJiWAwZ",
174 | "outputId": "10775ee7-1e4e-418b-a670-93e35192a248",
175 | "scrolled": true
176 | },
177 | "outputs": [],
178 | "source": [
179 | "# image height and width\n",
180 | "img_h, img_w = image.shape[0], image.shape[1]\n",
181 | "\n",
182 | "# reconstructs predicted mask tiles into one single mask image\n",
183 | "predicted_image = ps.reconstruct(preds, img_w, img_h, model_input_w, model_input_h, numpy_output = False)\n",
184 | "\n",
185 | "plt.imshow(predicted_image)"
186 | ]
187 | },
188 | {
189 | "cell_type": "markdown",
190 | "metadata": {
191 | "id": "L24wmmcTWAwZ"
192 | },
193 | "source": [
194 | "## Save image"
195 | ]
196 | },
197 | {
198 | "cell_type": "code",
199 | "execution_count": null,
200 | "metadata": {
201 | "id": "SOyCuKAuWAwZ"
202 | },
203 | "outputs": [],
204 | "source": [
205 | "# folder for saving predicted mask image by model\n",
206 | "image_save_folder = 'outputs/inference outputs/png'\n",
207 | "\n",
208 | "# creates a new folder if it does not exist yet\n",
209 | "os.makedirs(image_save_folder, exists_ok = True)\n",
210 | "\n",
211 | "# save predicted mask image in target folder\n",
212 | "predicted_image.save(f'{image_save_folder}/predicted_image.png')"
213 | ]
214 | }
215 | ],
216 | "metadata": {
217 | "colab": {
218 | "name": "Inferencev2.ipynb",
219 | "provenance": []
220 | },
221 | "kernelspec": {
222 | "display_name": "Python 3",
223 | "language": "python",
224 | "name": "python3"
225 | },
226 | "language_info": {
227 | "codemirror_mode": {
228 | "name": "ipython",
229 | "version": 3
230 | },
231 | "file_extension": ".py",
232 | "mimetype": "text/x-python",
233 | "name": "python",
234 | "nbconvert_exporter": "python",
235 | "pygments_lexer": "ipython3",
236 | "version": "3.8.5"
237 | }
238 | },
239 | "nbformat": 4,
240 | "nbformat_minor": 1
241 | }
242 |
--------------------------------------------------------------------------------
/4. Geo-processing Tuto.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 4,
6 | "id": "75bda633",
7 | "metadata": {},
8 | "outputs": [],
9 | "source": [
10 | "import geo_process"
11 | ]
12 | },
13 | {
14 | "cell_type": "code",
15 | "execution_count": 10,
16 | "id": "e677c448",
17 | "metadata": {},
18 | "outputs": [],
19 | "source": [
20 | "from geo_process import geo_ref,to_shapefile"
21 | ]
22 | },
23 | {
24 | "cell_type": "code",
25 | "execution_count": 6,
26 | "id": "3c823b62",
27 | "metadata": {
28 | "scrolled": true
29 | },
30 | "outputs": [
31 | {
32 | "name": "stdout",
33 | "output_type": "stream",
34 | "text": [
35 | "Help on module geo_process:\n",
36 | "\n",
37 | "NAME\n",
38 | " geo_process - # coding: utf-8\n",
39 | "\n",
40 | "FUNCTIONS\n",
41 | " geo_ref(ref_image_dir, target_image_dir, output_image_dir)\n",
42 | " Georeferencing an image \n",
43 | " - \n",
44 | " \n",
45 | " arguments:\n",
46 | " ref_image_dir (str): original geotiff image directory// Example \"C:\\Desktop\\OriginalImage.tif\"\n",
47 | " target_image_dir (str): predicted image directory // Example \"C:\\Desktop\\PredictedImage.png\"\n",
48 | " output_image_dir(str) : georeferenced predicted image // Example \"C:\\Desktop\\PredictedImage_Georef.tif\"\n",
49 | " returns:\n",
50 | " georeferenced predicted image (numpy)\n",
51 | " \n",
52 | " to_shapefile(imageDir, outputShapefile)\n",
53 | " Arguments :\n",
54 | " imageDir(str) : input geotif image\n",
55 | " outputShapefile(str) : output shapefile \n",
56 | " Returns:\n",
57 | " null\n",
58 | "\n",
59 | "DATA\n",
60 | " __warningregistry__ = {'version': 19}\n",
61 | "\n",
62 | "FILE\n",
63 | " c:\\users\\toby\\desktop\\building-footprint-extraction-practical-project-toby\\building-footprint-extraction-practical-project-toby\\modules\\geo_process.py\n",
64 | "\n",
65 | "\n"
66 | ]
67 | }
68 | ],
69 | "source": [
70 | "help(geo_process)"
71 | ]
72 | },
73 | {
74 | "cell_type": "markdown",
75 | "id": "562c908b",
76 | "metadata": {},
77 | "source": [
78 | "# Declaring Path"
79 | ]
80 | },
81 | {
82 | "cell_type": "code",
83 | "execution_count": 7,
84 | "id": "32a221f9",
85 | "metadata": {},
86 | "outputs": [],
87 | "source": [
88 | "\n",
89 | "ref_img =r\"C:\\Users\\Toby\\Desktop\\Building-Footprint-Extraction-Practical-Project-toby\\Building-Footprint-Extraction-Practical-Project-toby\\dataset\\Annotation Data\\Labelme Output data\\Clip_final_hlaing_thar_yar1.tif\"\n",
90 | "target_img=r\"C:\\Users\\Toby\\Desktop\\Building-Footprint-Extraction-Practical-Project-toby\\Building-Footprint-Extraction-Practical-Project-toby\\dataset\\Annotation Data\\Labelme Output data\\label.png\"\n"
91 | ]
92 | },
93 | {
94 | "cell_type": "code",
95 | "execution_count": 8,
96 | "id": "34eae9cf",
97 | "metadata": {},
98 | "outputs": [
99 | {
100 | "data": {
101 | "text/plain": [
102 | "array([[0, 0, 0, ..., 0, 0, 0],\n",
103 | " [0, 0, 0, ..., 0, 0, 0],\n",
104 | " [0, 0, 0, ..., 0, 0, 0],\n",
105 | " ...,\n",
106 | " [0, 0, 0, ..., 0, 0, 0],\n",
107 | " [0, 0, 0, ..., 0, 0, 0],\n",
108 | " [0, 0, 0, ..., 0, 0, 0]], dtype=uint8)"
109 | ]
110 | },
111 | "execution_count": 8,
112 | "metadata": {},
113 | "output_type": "execute_result"
114 | }
115 | ],
116 | "source": [
117 | "\n",
118 | "geo_ref(ref_img,target_img,'geo_referenced_img.tif')"
119 | ]
120 | },
121 | {
122 | "cell_type": "code",
123 | "execution_count": 11,
124 | "id": "54b3a926",
125 | "metadata": {},
126 | "outputs": [],
127 | "source": [
128 | "\n",
129 | "#run the module \n",
130 | "to_shapefile('geo_referenced_img.tif','final_vector')"
131 | ]
132 | },
133 | {
134 | "cell_type": "code",
135 | "execution_count": null,
136 | "id": "3ceb97fb",
137 | "metadata": {},
138 | "outputs": [],
139 | "source": []
140 | }
141 | ],
142 | "metadata": {
143 | "kernelspec": {
144 | "display_name": "Python 3",
145 | "language": "python",
146 | "name": "python3"
147 | },
148 | "language_info": {
149 | "codemirror_mode": {
150 | "name": "ipython",
151 | "version": 3
152 | },
153 | "file_extension": ".py",
154 | "mimetype": "text/x-python",
155 | "name": "python",
156 | "nbconvert_exporter": "python",
157 | "pygments_lexer": "ipython3",
158 | "version": "3.8.11"
159 | }
160 | },
161 | "nbformat": 4,
162 | "nbformat_minor": 5
163 | }
164 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ## Building Footprint Extraction from Satellite Images with Deep learning Project
2 |
3 | # Train Image and Label Image
4 |
5 |
6 |
7 | # Predicted Image
8 |
9 |
10 |
11 |
12 |
13 | # Problem statment
14 | Building footprints are being digitized,annotated from time to time depending on various use case in our Geoinformatic society. However, digitizing over large areas become a labour intensive work and therefore most of GIS related process are almost bottlenecked in this phase. However, with a help of emerging discipline as we all know as Deep learning, it become an easy work. Being able to use the model efficiently is the only thing we require comparing to labour intensive and time-consuming digitizing tasks.
15 |
16 | Humbly published this repo in pursuits of trying our utmost to be a great lift for Myanmar Geoinformatic and Machine learning society. Since both contributors were still at their grittiest learning phase, sincere apologies are delivered if any mistakes or inconveiences are encountered.
17 |
18 | This project aims to help beginners in both Geospatial technology and deep learning to understand and work out a particular segmentation project on their own.
19 | There are two tracks with
20 | - one for using our model to extract building footprint his/her own needs
21 | - one for those who would like to gain details insight on our project
22 |
23 | # For those who would like to use our model, please go through
24 | - Inference.ipynb
25 |
26 | # For those longing for details, please go through
27 | - Generate Images.ipynb
28 | - Train.ipynb
29 | - Inference
30 | - Geo-process.ipynb
31 |
32 |
33 | # Utils
34 | - image_utils.py (used for image preprocessing and postprocessing )
35 | - geoprocess.py ( used for georeferencing and shapefile conversion )
36 | - loss.py ( used to import custom loss function into model )
37 |
38 | # Weights
39 |
40 | Pretrained weights can be seen under weights folder
41 |
42 | # Datasets
43 |
44 | Datasets used in our training process can be found under this folder. Labelme annotation is used to produce binary mask file.
45 |
46 |
Bianry mask images can be exported from labelme json format using the following syntax:
labelme_json_to_dataset Clip_final_hlaing_thar_yar11.json -o Labelme_Output_data
49 |
50 |
51 |
--------------------------------------------------------------------------------
/dataset.py:
--------------------------------------------------------------------------------
1 | from tensorflow import keras
2 | import numpy as np
3 | from tensorflow.keras.preprocessing.image import load_img
4 |
5 | class BuildingDataset(keras.utils.Sequence):
6 | """
7 | Sequence class to load & vectorize batches of data.
8 |
9 | Iterate over the data (as Numpy arrays).
10 |
11 | Reference - https://keras.io/examples/vision/oxford_pets_image_segmentation/#prepare-sequence-class-to-load-amp-vectorize-batches-of-data
12 | """
13 |
14 | def __init__(self, batch_size, img_size, input_img_paths, target_img_paths):
15 | self.batch_size = batch_size
16 | self.img_size = img_size
17 | self.input_img_paths = input_img_paths
18 | self.target_img_paths = target_img_paths
19 |
20 | def __len__(self):
21 | return len(self.target_img_paths) // self.batch_size
22 |
23 | def __getitem__(self, idx):
24 | """Returns tuple (input, target) correspond to batch #idx."""
25 | i = idx * self.batch_size
26 | batch_input_img_paths = self.input_img_paths[i : i + self.batch_size]
27 | batch_target_img_paths = self.target_img_paths[i : i + self.batch_size]
28 |
29 | # load images
30 | x = np.zeros((self.batch_size,) + self.img_size + (3,), dtype="float32")
31 | for j, path in enumerate(batch_input_img_paths):
32 | img = load_img(path, target_size=self.img_size)
33 | x[j] = img
34 |
35 | # load masks
36 | y = np.zeros((self.batch_size,) + self.img_size , dtype="uint8")
37 | for j, path in enumerate(batch_target_img_paths):
38 | img = load_img(path, target_size=self.img_size, color_mode="grayscale")
39 | y[j] = img
40 |
41 | return x, y
--------------------------------------------------------------------------------
/files/img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/knwin/Building-Footprint-Extraction-From-Satellite-Images-With-Deep-Learning/ec99d96c7dfe9abc9c72c15c3fa392366f1caf30/files/img.png
--------------------------------------------------------------------------------
/files/label.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/knwin/Building-Footprint-Extraction-From-Satellite-Images-With-Deep-Learning/ec99d96c7dfe9abc9c72c15c3fa392366f1caf30/files/label.png
--------------------------------------------------------------------------------
/files/preds.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/knwin/Building-Footprint-Extraction-From-Satellite-Images-With-Deep-Learning/ec99d96c7dfe9abc9c72c15c3fa392366f1caf30/files/preds.png
--------------------------------------------------------------------------------
/loss.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import tensorflow.keras.backend as K
3 |
4 |
5 | def dice_coef(y_true, y_pred):
6 |
7 | # explicit cast
8 | y_true = tf.cast(y_true, tf.float32)
9 |
10 | y_true = K.flatten(y_true)
11 | y_pred = K.flatten(y_pred)
12 | intersection = K.sum(y_true * y_pred)
13 |
14 | return 2.0 * intersection / (K.sum(y_true) + K.sum(y_pred) + 1.)
15 |
16 |
17 | def dice_coef_loss(y_true, y_pred):
18 |
19 | # explicit cast
20 | y_true = tf.cast(y_true, tf.float32)
21 |
22 | return 1.0 - dice_coef(y_true, y_pred)
23 |
24 |
25 | def bce_dice_loss(y_true, y_pred):
26 |
27 | # explicit cast
28 | y_true = tf.cast(y_true, tf.float32)
29 |
30 | a = 0.5
31 | b = 1-a
32 | loss = a * K.binary_crossentropy(y_true, y_pred) + b * dice_coef_loss(y_true, y_pred)
33 |
34 | return loss
--------------------------------------------------------------------------------
/models.py:
--------------------------------------------------------------------------------
1 | from tensorflow.keras.models import Model
2 | from tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D, concatenate, Conv2DTranspose, BatchNormalization, Dropout, Lambda
3 |
4 |
5 | def UNet(num_classes = 1,
6 | image_size = 512,
7 | img_channels = 3,
8 | activation_fn = 'sigmoid'
9 | ):
10 |
11 | inputs = Input((image_size, image_size, img_channels))
12 | s = Lambda(lambda x: x / 255)(inputs)
13 |
14 | #Contraction path
15 | c1 = Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(s)
16 | c1 = Dropout(0.1)(c1)
17 | c1 = Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c1)
18 | p1 = MaxPooling2D((2, 2))(c1)
19 |
20 | c2 = Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(p1)
21 | c2 = Dropout(0.1)(c2)
22 | c2 = Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c2)
23 | p2 = MaxPooling2D((2, 2))(c2)
24 |
25 | c3 = Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(p2)
26 | c3 = Dropout(0.2)(c3)
27 | c3 = Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c3)
28 | p3 = MaxPooling2D((2, 2))(c3)
29 |
30 | c4 = Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(p3)
31 | c4 = Dropout(0.2)(c4)
32 | c4 = Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c4)
33 | p4 = MaxPooling2D(pool_size=(2, 2))(c4)
34 |
35 | c5 = Conv2D(256, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(p4)
36 | c5 = Dropout(0.3)(c5)
37 | c5 = Conv2D(256, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c5)
38 |
39 | #Expansive path
40 | u6 = Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(c5)
41 | u6 = concatenate([u6, c4])
42 | c6 = Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u6)
43 | c6 = Dropout(0.2)(c6)
44 | c6 = Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c6)
45 |
46 | u7 = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(c6)
47 | u7 = concatenate([u7, c3])
48 | c7 = Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u7)
49 | c7 = Dropout(0.2)(c7)
50 | c7 = Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c7)
51 |
52 | u8 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(c7)
53 | u8 = concatenate([u8, c2])
54 | c8 = Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u8)
55 | c8 = Dropout(0.1)(c8)
56 | c8 = Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c8)
57 |
58 | u9 = Conv2DTranspose(16, (2, 2), strides=(2, 2), padding='same')(c8)
59 | u9 = concatenate([u9, c1], axis=3)
60 | c9 = Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u9)
61 | c9 = Dropout(0.1)(c9)
62 | c9 = Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c9)
63 |
64 | outputs = Conv2D(num_classes, (1, 1), activation=activation_fn)(c9)
65 |
66 | model = Model(inputs=[inputs], outputs=[outputs])
67 |
68 | return model
69 |
70 |
71 | def DeepUNet(num_classes=1,
72 | image_size=512,
73 | img_channels=3,
74 | activation_fn = 'sigmoid',
75 | n_filters_start=32,
76 | growth_factor=2,
77 | upconv=True
78 | ):
79 | """
80 | Reference - https://github.com/reachsumit/deep-unet-for-satellite-image-segmentation/blob/master/unet_model_deeper.py
81 | """
82 |
83 | droprate=0.25
84 | n_filters = n_filters_start
85 | inputs = Input((image_size, image_size, img_channels))
86 |
87 | s = Lambda(lambda x: x / 255)(inputs)
88 |
89 | conv1 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(s)
90 | conv1 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv1)
91 | pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
92 | pool1 = Dropout(droprate)(pool1)
93 |
94 | n_filters *= growth_factor
95 | pool1 = BatchNormalization()(pool1)
96 | conv2 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(pool1)
97 | conv2 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv2)
98 | pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
99 | pool2 = Dropout(droprate)(pool2)
100 |
101 | n_filters *= growth_factor
102 | pool2 = BatchNormalization()(pool2)
103 | conv3 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(pool2)
104 | conv3 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv3)
105 | pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
106 | pool3 = Dropout(droprate)(pool3)
107 |
108 | n_filters *= growth_factor
109 | pool3 = BatchNormalization()(pool3)
110 | conv4_0 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(pool3)
111 | conv4_0 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv4_0)
112 | pool4_0 = MaxPooling2D(pool_size=(2, 2))(conv4_0)
113 | pool4_0 = Dropout(droprate)(pool4_0)
114 |
115 | n_filters *= growth_factor
116 | pool4_0 = BatchNormalization()(pool4_0)
117 | conv4_1 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(pool4_0)
118 | conv4_1 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv4_1)
119 | pool4_1 = MaxPooling2D(pool_size=(2, 2))(conv4_1)
120 | pool4_1 = Dropout(droprate)(pool4_1)
121 |
122 | n_filters *= growth_factor
123 | pool4_1 = BatchNormalization()(pool4_1)
124 | conv4_2 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(pool4_1)
125 | conv4_2 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv4_2)
126 | pool4_2 = MaxPooling2D(pool_size=(2, 2))(conv4_2)
127 | pool4_2 = Dropout(droprate)(pool4_2)
128 |
129 | n_filters *= growth_factor
130 | pool4_2 = BatchNormalization()(pool4_2)
131 | conv5 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(pool4_2)
132 | conv5 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv5)
133 | conv5 = Dropout(droprate)(conv5)
134 |
135 | n_filters //= growth_factor
136 | if upconv:
137 | up6 = concatenate([Conv2DTranspose(n_filters, (2, 2), strides=(2, 2), padding='same')(conv5), conv4_2])
138 | else:
139 | up6 = concatenate([UpSampling2D(size=(2, 2))(conv5), conv4_2])
140 | up6 = BatchNormalization()(up6)
141 | conv6 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(up6)
142 | conv6 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv6)
143 | conv6 = Dropout(droprate)(conv6)
144 |
145 | n_filters //= growth_factor
146 | if upconv:
147 | up6_1 = concatenate([Conv2DTranspose(n_filters, (2, 2), strides=(2, 2), padding='same')(conv6), conv4_1])
148 | else:
149 | up6_1 = concatenate([UpSampling2D(size=(2, 2))(conv6), conv4_1])
150 | up6_1 = BatchNormalization()(up6_1)
151 | conv6_1 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(up6_1)
152 | conv6_1 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv6_1)
153 | conv6_1 = Dropout(droprate)(conv6_1)
154 |
155 | n_filters //= growth_factor
156 | if upconv:
157 | up6_2 = concatenate([Conv2DTranspose(n_filters, (2, 2), strides=(2, 2), padding='same')(conv6_1), conv4_0])
158 | else:
159 | up6_2 = concatenate([UpSampling2D(size=(2, 2))(conv6_1), conv4_0])
160 | up6_2 = BatchNormalization()(up6_2)
161 | conv6_2 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(up6_2)
162 | conv6_2 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv6_2)
163 | conv6_2 = Dropout(droprate)(conv6_2)
164 |
165 | n_filters //= growth_factor
166 | if upconv:
167 | up7 = concatenate([Conv2DTranspose(n_filters, (2, 2), strides=(2, 2), padding='same')(conv6_2), conv3])
168 | else:
169 | up7 = concatenate([UpSampling2D(size=(2, 2))(conv6_2), conv3])
170 | up7 = BatchNormalization()(up7)
171 | conv7 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(up7)
172 | conv7 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv7)
173 | conv7 = Dropout(droprate)(conv7)
174 |
175 | n_filters //= growth_factor
176 | if upconv:
177 | up8 = concatenate([Conv2DTranspose(n_filters, (2, 2), strides=(2, 2), padding='same')(conv7), conv2])
178 | else:
179 | up8 = concatenate([UpSampling2D(size=(2, 2))(conv7), conv2])
180 | up8 = BatchNormalization()(up8)
181 | conv8 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(up8)
182 | conv8 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv8)
183 | conv8 = Dropout(droprate)(conv8)
184 |
185 | n_filters //= growth_factor
186 | if upconv:
187 | up9 = concatenate([Conv2DTranspose(n_filters, (2, 2), strides=(2, 2), padding='same')(conv8), conv1])
188 | else:
189 | up9 = concatenate([UpSampling2D(size=(2, 2))(conv8), conv1])
190 | up9 = BatchNormalization()(up9)
191 | conv9 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(up9)
192 | conv9 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv9)
193 |
194 | conv10 = Conv2D(num_classes, (1, 1), activation=activation_fn)(conv9)
195 |
196 | model = Model(inputs=inputs, outputs=conv10)
197 |
198 | return model
--------------------------------------------------------------------------------
/utils/geo_process.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding: utf-8
3 |
4 | # In[ ]:
5 |
6 |
7 | #importing necessary modules
8 | import shutil
9 | from osgeo import gdal,ogr
10 | import osr
11 | import sys
12 | import os
13 | import matplotlib.pyplot as plt
14 |
15 |
16 |
17 | def geo_ref(ref_image_dir,target_image_dir,output_image_dir):
18 | """
19 | Georeferencing an image
20 | -
21 |
22 | arguments:
23 | ref_image_dir (str): original geotiff image directory// Example "C:\Desktop\OriginalImage.tif"
24 | target_image_dir (str): predicted image directory // Example "C:\Desktop\PredictedImage.png"
25 | output_image_dir(str) : georeferenced predicted image // Example "C:\Desktop\PredictedImage_Georef.tif"
26 | returns:
27 | georeferenced predicted image (numpy)
28 | """
29 |
30 | tiff = gdal.Open(ref_image_dir) #ref image directory
31 | gt = tiff.GetGeoTransform()
32 |
33 | inputImage = target_image_dir #target image directory
34 | outputImage = output_image_dir #output image directory
35 |
36 | dataset = gdal.Open(inputImage)
37 | I = dataset.ReadAsArray(0,0,dataset.RasterXSize,dataset.RasterYSize)
38 |
39 |
40 | outdataset = gdal.GetDriverByName('GTiff')
41 | output_SRS = osr.SpatialReference()
42 | output_SRS.ImportFromEPSG(32647)
43 | outdataset = outdataset.Create(outputImage,dataset.RasterXSize,dataset.RasterYSize,1)
44 | outdataset.GetRasterBand(1).WriteArray(I)
45 |
46 |
47 |
48 | outdataset.SetProjection(output_SRS.ExportToWkt())
49 | outdataset.SetGeoTransform(gt)
50 | wkt = outdataset.GetProjection()
51 | srs =gdal.WarpOptions(dstSRS='ESPG:32647')
52 | gdal.Warp(outputImage, outdataset)
53 | outdata = outdataset.ReadAsArray()
54 | outdataset = None
55 |
56 | return(outdata)
57 |
58 |
59 |
60 |
61 | def to_shapefile(imageDir,outputShapefile):
62 | """
63 | Arguments :
64 | imageDir(str) : input geotif image
65 | outputShapefile(str) : output shapefile
66 | Returns:
67 | null
68 |
69 | """
70 |
71 | #Vectorization
72 | sourceRaster = gdal.Open(imageDir)
73 | band = sourceRaster.GetRasterBand(1)
74 | bandArray = band.ReadAsArray(1)
75 | outShapefile = outputShapefile
76 | driver = ogr.GetDriverByName("ESRI Shapefile")
77 | outDatasource = driver.CreateDataSource(outShapefile+ ".shp")
78 | outLayer = outDatasource.CreateLayer("polygonized", srs=None)
79 | gdal.Polygonize( band, None, outLayer, -1, [], callback=None )
80 | outDatasource.Destroy()
81 | sourceRaster = None
82 |
--------------------------------------------------------------------------------
/utils/image_utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import tensorflow as tf
3 | import cv2 as cv2
4 | import PIL
5 | from PIL import Image, ImageOps
6 | import os
7 |
8 |
9 | def pad(src_img, model_input_w, model_input_h):
10 | """
11 | Add extra black area to image to make it ready for cropping
12 |
13 | arguments:
14 | src_img (PIL (or) np array): image to be padded
15 | model_input_w (int): input image width for model
16 | model_input_h (int): input image height for model
17 |
18 | returns:
19 | PIL image: padded image
20 | """
21 |
22 | img_type = type(src_img)
23 |
24 | # change to numpy array
25 | if img_type == np.ndarray :
26 | img = src_img.copy()
27 | else:
28 | img = np.array(src_img)
29 |
30 | img_width = img.shape[1]
31 | img_height = img.shape[0]
32 |
33 | pad_width = int((np.ceil(img_width / model_input_w) * model_input_w) - img_width)
34 | pad_height = int((np.ceil(img_height / model_input_h) * model_input_h) - img_height)
35 |
36 | print('image width = ', img_width, ', image height = ', img_height)
37 | print('pad width = ', pad_width, ', pad height = ', pad_height)
38 |
39 | result_image = cv2.copyMakeBorder( img, 0, pad_height, 0, pad_width, cv2.BORDER_CONSTANT)
40 |
41 | # convert numpy array to PIL image format
42 | # check if result_image is binary mask
43 | if len(result_image.shape) == 2:
44 | result_image = Image.fromarray(result_image, 'L')
45 | else:
46 | result_image = Image.fromarray(result_image.astype('uint8'), 'RGB')
47 |
48 | return result_image
49 |
50 |
51 | def crop(src_img, model_input_w, model_input_h, numpy_output = False):
52 | """
53 | Crops an image into tiles of provided width and height
54 |
55 | arguments:
56 | src_img(PIL image): image to be cropped into tiles
57 | model_input_w(int): input image width for model
58 | model_input_h(int): input image height for model
59 | numpy_output(bool): output a list of numpy arrays or PIL images(default)
60 |
61 | returns:
62 | list: a list of cropped tiles in numpy array or PIL image format
63 | """
64 |
65 | img_width, img_height = src_img.size
66 |
67 | images = []
68 | for i in range(img_height//model_input_h):
69 | hori = []
70 | for j in range(img_width//model_input_w):
71 | box = (j*model_input_w, i*model_input_h, (j+1)*model_input_w, (i+1)*model_input_h)
72 | images.append(src_img.crop(box))
73 |
74 | if numpy_output:
75 | images = np.array([np.array(img) for img in images])
76 |
77 | return images
78 |
79 |
80 | def resize_image(image, percent = 0.7, interpolation = None):
81 | """
82 | Resizes an image
83 | - best interpolation methods for image resizing
84 | Enlarge: INTER_LINEAR or INTER_CUBIC
85 | Shrink: INTER_AREA
86 |
87 | arguments:
88 | image (numpy array): original image to be resized
89 | percent (float): percentage of the original image size(e.g. 0.5 means 50% of the original size)
90 | interpolation (cv2 interpolation): interpolation method for resizing
91 | returns:
92 | numpy array: new resized image
93 | """
94 |
95 | ori_height = image.shape[0]
96 | ori_width = image.shape[1]
97 |
98 | # calculate target image height and width
99 | new_height = int(ori_height * percent)
100 | new_width = int(ori_width * percent)
101 |
102 | print(f'original height= {ori_height}, original width = {ori_width}')
103 | print(f'new height = {new_height}, new width = {new_width}')
104 |
105 | # resize image
106 | if interpolation:
107 | new_img = cv2.resize(image, (new_width , new_height), interpolation = interpolation)[:,:,:]
108 | else:
109 | new_img = cv2.resize(image, (new_width , new_height))[:,:,:]
110 |
111 | return new_img
112 |
113 |
114 | def get_flipped_images(image, flip_horizontal = True, flip_vertical = True):
115 | """
116 | Takes an image as input. Flip it horizontally and/or vertically and returns a list of flipped images.
117 |
118 | arguments:
119 | image(PIL Image): image to be flipped
120 | flip_horizontal(bool): flip the image horizontally ?
121 | flip_vertical(bool): flip the image vertically ?
122 |
123 | returns:
124 | list: a list of flipped images
125 | """
126 |
127 | image_list = [image]
128 |
129 | # flip the image horizontally
130 | if flip_horizontal:
131 | image_list.append(image.transpose(Image.FLIP_LEFT_RIGHT))
132 |
133 | # flip the image vertically
134 | if flip_vertical:
135 | image_list += [i.transpose(Image.FLIP_TOP_BOTTOM) for i in image_list]
136 |
137 | return image_list
138 |
139 |
140 | def get_img_name(area_name, idx, num_digit, flip_idx, angle):
141 | """
142 | Creates image name(e.g. area_1_0002_f0_a0)
143 |
144 | arguments:
145 | area_name(str): area name of cropped tiles
146 | idx(int): index number of tile
147 | num_digit(int): total number of digits for tile index
148 | flip_idx(int): 0, 1, 2, 3
149 | 0 = original image(not flipped)
150 | 1 = horizontal flip
151 | 2 = vertical flip of 0
152 | 3 = vertical flip of 1
153 | angle(int): rotation angle of image
154 |
155 | returns:
156 | name(str): complete name of current tile
157 | """
158 | num_zeros = num_digit - len(str(idx))
159 | zeros = ''
160 | for i in range(num_zeros): zeros+='0'
161 | name = f'{area_name}_{zeros}{str(idx)}_f{flip_idx}_a{str(angle)}'
162 |
163 | return name
164 |
165 |
166 | def generate_images(cropped_tiles,
167 | save_dir,
168 | area_name = 'area_1',
169 | img_format = 'png',
170 | num_digit = 4,
171 | rotations = [0],
172 | flip_horizontal = False,
173 | flip_vertical = False):
174 | """
175 | Save cropped tiles in a given directory.
176 |
177 | arguments:
178 | cropped_tiles(list): a list of cropped tiles in PIL Image format
179 | save_dir(str): directory to save images
180 | area_name(str): area name of cropped tiles(e.g. area1, area_1, etc.)
181 | img_format(str): 'png', 'jpg', etc.
182 | num_digit(int): total number of digits for tile index(e.g. 5 means 00001, 6 means 000001)
183 | rotations(list): a list of rotation angles (e.g. [90, 180, 270]) to rotate images and save them
184 | flip_horizontal(bool): flip image horizontally ?
185 | flip_vertical(bool): flip image vertically ?
186 | """
187 |
188 | # make directory to save images
189 | os.makedirs(save_dir, exist_ok = True)
190 |
191 | # save all images from list of cropped tiles
192 | for index, tile in enumerate(cropped_tiles):
193 |
194 | images = [tile]
195 |
196 | # flip image if flip is set to True
197 | if flip_horizontal == True or flip_vertical == True:
198 | images = get_flipped_images(tile, flip_horizontal, flip_vertical)
199 |
200 | flip_idx = 0
201 |
202 | # for all tiles in images list (flipped or not)
203 | for tile in images:
204 | # rotate images and save
205 | for angle in rotations:
206 |
207 | # rotate image
208 | rotated_tile = tile.rotate(angle)
209 |
210 | # get image name format
211 | img_name = get_img_name(area_name, index, num_digit, flip_idx, angle)
212 |
213 | # save image
214 | save_image_dir = os.path.join(save_dir,'{}.{}'.format(img_name, img_format))
215 | rotated_tile.save(save_image_dir)
216 | print(save_image_dir)
217 |
218 | flip_idx += 1
219 |
220 |
221 | def reconstruct(tiles_list, img_width, img_height, model_input_w, model_input_h, numpy_output = False):
222 | """
223 | Reconstruct one single image from a list of tiles
224 |
225 | arguments:
226 | tiles_list (list): a list of tiles(numpy array or PIL Image)
227 | img_width (int): width of image before padding/cropping
228 | img_height (int): height of image before padding/cropping
229 | model_input_w (int): input tile width for model
230 | model_input_h (int): input tile height for model
231 |
232 | returns:
233 | PIL image or numpy array: reconstructed image
234 | """
235 |
236 | # if input is in numpy array format, convert to PIL first
237 | if type(tiles_list[0]) == np.ndarray:
238 |
239 | # check if image is black & white
240 | if tiles_list[0].ndim == 2:
241 | tiles_list = [Image.fromarray(i, 'L') for i in tiles_list]
242 |
243 | # if RGB image
244 | else:
245 | tiles_list = [Image.fromarray(i.astype('uint8'), 'RGB') for i in tiles_list]
246 |
247 | first_image = tiles_list[0]
248 |
249 | num_row = int(np.ceil(img_height / model_input_h))
250 | num_col = int(np.ceil(img_width / model_input_w))
251 |
252 | # create a blank sheet
253 | contact_sheet=PIL.Image.new(first_image.mode, (first_image.width * num_col,first_image.height * num_row))
254 | x, y = 0, 0
255 |
256 | # paste tiles in blank sheet
257 | for img in tiles_list:
258 |
259 | # paste a single tile in sheet
260 | contact_sheet.paste(img, (x, y) )
261 |
262 | # calculate next position
263 | if x+first_image.width == contact_sheet.width:
264 | x=0
265 | y=y+first_image.height
266 | else:
267 | x=x+first_image.width
268 |
269 | # remove extra padded area
270 | crop_box = (0, 0, img_width, img_height)
271 | new_img = contact_sheet.crop(crop_box)
272 |
273 | if numpy_output:
274 | new_img = np.array(new_img)
275 |
276 | return new_img
--------------------------------------------------------------------------------
/utils/readme.md:
--------------------------------------------------------------------------------
1 | # This is utilities file
2 |
--------------------------------------------------------------------------------
/weights/Dice_EPOCH150_Unet.h5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/knwin/Building-Footprint-Extraction-From-Satellite-Images-With-Deep-Learning/ec99d96c7dfe9abc9c72c15c3fa392366f1caf30/weights/Dice_EPOCH150_Unet.h5
--------------------------------------------------------------------------------
/weights/Epoch1000_Unet.h5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/knwin/Building-Footprint-Extraction-From-Satellite-Images-With-Deep-Learning/ec99d96c7dfe9abc9c72c15c3fa392366f1caf30/weights/Epoch1000_Unet.h5
--------------------------------------------------------------------------------
/weights/readme.md:
--------------------------------------------------------------------------------
1 | # This is weights of our pre-trained model
2 |
--------------------------------------------------------------------------------