├── LICENSE ├── Notebooks ├── Combine_the_Images.ipynb ├── DenseUnet.ipynb ├── Resunet.ipynb ├── Unet.ipynb ├── UnetPlusPlus.ipynb ├── UnetPlusPlus_LR.ipynb └── VggUnet.ipynb ├── README.md ├── images └── image.png └── models ├── AttUNet.py ├── DeepLabV2.py ├── DeepLabV3+.py ├── Deeplabv3.py ├── DenseUnet.py ├── FCN8.py ├── GhostNet.py ├── HRNet.py ├── ICNet.py ├── MobileNetFCN8.py ├── MobileNetUnet.py ├── MobileNext.py ├── NestedUNet.py ├── PSPNet.py ├── R2AttUNet.py ├── R2UNet.py ├── ResUnet.py ├── SEUNet.py ├── Segnet.py ├── UNet_Xception_ResNetBlock.py ├── Unet.py ├── VggUnet.py └── scSEUnet.py /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Ashish Patel 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Notebooks/Combine_the_Images.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "nbformat": 4, 3 | "nbformat_minor": 0, 4 | "metadata": { 5 | "colab": { 6 | "name": "Combine the Images.ipynb", 7 | "provenance": [] 8 | }, 9 | "kernelspec": { 10 | "name": "python3", 11 | "display_name": "Python 3" 12 | }, 13 | "widgets": { 14 | "application/vnd.jupyter.widget-state+json": { 15 | "c8c3674c0f124c0fa705625874ddb10c": { 16 | "model_module": "@jupyter-widgets/controls", 17 | "model_name": "HBoxModel", 18 | "state": { 19 | "_view_name": "HBoxView", 20 | "_dom_classes": [], 21 | "_model_name": "HBoxModel", 22 | "_view_module": "@jupyter-widgets/controls", 23 | "_model_module_version": "1.5.0", 24 | "_view_count": null, 25 | "_view_module_version": "1.5.0", 26 | "box_style": "", 27 | "layout": "IPY_MODEL_d213c19dd0ec45738a83f8c155ff66b6", 28 | "_model_module": "@jupyter-widgets/controls", 29 | "children": [ 30 | "IPY_MODEL_cb4ce569c58b4143a5e6660bbf87c932", 31 | "IPY_MODEL_32503afa11b94b5b99bc8698fee0ea0f" 32 | ] 33 | } 34 | }, 35 | "d213c19dd0ec45738a83f8c155ff66b6": { 36 | "model_module": "@jupyter-widgets/base", 37 | "model_name": "LayoutModel", 38 | "state": { 39 | "_view_name": "LayoutView", 40 | "grid_template_rows": null, 41 | "right": null, 42 | "justify_content": null, 43 | "_view_module": "@jupyter-widgets/base", 44 | "overflow": null, 45 | "_model_module_version": "1.2.0", 46 | "_view_count": null, 47 | "flex_flow": null, 48 | "width": null, 49 | "min_width": null, 50 | "border": null, 51 | "align_items": null, 52 | "bottom": null, 53 | "_model_module": "@jupyter-widgets/base", 54 | "top": null, 55 | "grid_column": null, 56 | "overflow_y": null, 57 | "overflow_x": null, 58 | "grid_auto_flow": null, 59 | "grid_area": null, 60 | "grid_template_columns": null, 61 | "flex": null, 62 | "_model_name": "LayoutModel", 63 | "justify_items": null, 64 | "grid_row": null, 65 | "max_height": null, 66 | "align_content": null, 67 | "visibility": null, 68 | "align_self": null, 69 | "height": null, 70 | "min_height": null, 71 | "padding": null, 72 | "grid_auto_rows": null, 73 | "grid_gap": null, 74 | "max_width": null, 75 | "order": null, 76 | "_view_module_version": "1.2.0", 77 | "grid_template_areas": null, 78 | "object_position": null, 79 | "object_fit": null, 80 | "grid_auto_columns": null, 81 | "margin": null, 82 | "display": null, 83 | "left": null 84 | } 85 | }, 86 | "cb4ce569c58b4143a5e6660bbf87c932": { 87 | "model_module": "@jupyter-widgets/controls", 88 | "model_name": "FloatProgressModel", 89 | "state": { 90 | "_view_name": "ProgressView", 91 | "style": "IPY_MODEL_83b8addb0b6340f7bc3c4cd1d16c641b", 92 | "_dom_classes": [], 93 | "description": "100%", 94 | "_model_name": "FloatProgressModel", 95 | "bar_style": "success", 96 | "max": 64, 97 | "_view_module": "@jupyter-widgets/controls", 98 | "_model_module_version": "1.5.0", 99 | "value": 64, 100 | "_view_count": null, 101 | "_view_module_version": "1.5.0", 102 | "orientation": "horizontal", 103 | "min": 0, 104 | "description_tooltip": null, 105 | "_model_module": "@jupyter-widgets/controls", 106 | "layout": "IPY_MODEL_aeb7d01ae7dd4637944f191d2783c46b" 107 | } 108 | }, 109 | "32503afa11b94b5b99bc8698fee0ea0f": { 110 | "model_module": "@jupyter-widgets/controls", 111 | "model_name": "HTMLModel", 112 | "state": { 113 | "_view_name": "HTMLView", 114 | "style": "IPY_MODEL_aa989ac267a247829f6a817a0ae0de3e", 115 | "_dom_classes": [], 116 | "description": "", 117 | "_model_name": "HTMLModel", 118 | "placeholder": "​", 119 | "_view_module": "@jupyter-widgets/controls", 120 | "_model_module_version": "1.5.0", 121 | "value": " 64/64 [00:23<00:00, 2.70it/s]", 122 | "_view_count": null, 123 | "_view_module_version": "1.5.0", 124 | "description_tooltip": null, 125 | "_model_module": "@jupyter-widgets/controls", 126 | "layout": "IPY_MODEL_8c4e12c544d441e4921933c72984b2b2" 127 | } 128 | }, 129 | "83b8addb0b6340f7bc3c4cd1d16c641b": { 130 | "model_module": "@jupyter-widgets/controls", 131 | "model_name": "ProgressStyleModel", 132 | "state": { 133 | "_view_name": "StyleView", 134 | "_model_name": "ProgressStyleModel", 135 | "description_width": "initial", 136 | "_view_module": "@jupyter-widgets/base", 137 | "_model_module_version": "1.5.0", 138 | "_view_count": null, 139 | "_view_module_version": "1.2.0", 140 | "bar_color": null, 141 | "_model_module": "@jupyter-widgets/controls" 142 | } 143 | }, 144 | "aeb7d01ae7dd4637944f191d2783c46b": { 145 | "model_module": "@jupyter-widgets/base", 146 | "model_name": "LayoutModel", 147 | "state": { 148 | "_view_name": "LayoutView", 149 | "grid_template_rows": null, 150 | "right": null, 151 | "justify_content": null, 152 | "_view_module": "@jupyter-widgets/base", 153 | "overflow": null, 154 | "_model_module_version": "1.2.0", 155 | "_view_count": null, 156 | "flex_flow": null, 157 | "width": null, 158 | "min_width": null, 159 | "border": null, 160 | "align_items": null, 161 | "bottom": null, 162 | "_model_module": "@jupyter-widgets/base", 163 | "top": null, 164 | "grid_column": null, 165 | "overflow_y": null, 166 | "overflow_x": null, 167 | "grid_auto_flow": null, 168 | "grid_area": null, 169 | "grid_template_columns": null, 170 | "flex": null, 171 | "_model_name": "LayoutModel", 172 | "justify_items": null, 173 | "grid_row": null, 174 | "max_height": null, 175 | "align_content": null, 176 | "visibility": null, 177 | "align_self": null, 178 | "height": null, 179 | "min_height": null, 180 | "padding": null, 181 | "grid_auto_rows": null, 182 | "grid_gap": null, 183 | "max_width": null, 184 | "order": null, 185 | "_view_module_version": "1.2.0", 186 | "grid_template_areas": null, 187 | "object_position": null, 188 | "object_fit": null, 189 | "grid_auto_columns": null, 190 | "margin": null, 191 | "display": null, 192 | "left": null 193 | } 194 | }, 195 | "aa989ac267a247829f6a817a0ae0de3e": { 196 | "model_module": "@jupyter-widgets/controls", 197 | "model_name": "DescriptionStyleModel", 198 | "state": { 199 | "_view_name": "StyleView", 200 | "_model_name": "DescriptionStyleModel", 201 | "description_width": "", 202 | "_view_module": "@jupyter-widgets/base", 203 | "_model_module_version": "1.5.0", 204 | "_view_count": null, 205 | "_view_module_version": "1.2.0", 206 | "_model_module": "@jupyter-widgets/controls" 207 | } 208 | }, 209 | "8c4e12c544d441e4921933c72984b2b2": { 210 | "model_module": "@jupyter-widgets/base", 211 | "model_name": "LayoutModel", 212 | "state": { 213 | "_view_name": "LayoutView", 214 | "grid_template_rows": null, 215 | "right": null, 216 | "justify_content": null, 217 | "_view_module": "@jupyter-widgets/base", 218 | "overflow": null, 219 | "_model_module_version": "1.2.0", 220 | "_view_count": null, 221 | "flex_flow": null, 222 | "width": null, 223 | "min_width": null, 224 | "border": null, 225 | "align_items": null, 226 | "bottom": null, 227 | "_model_module": "@jupyter-widgets/base", 228 | "top": null, 229 | "grid_column": null, 230 | "overflow_y": null, 231 | "overflow_x": null, 232 | "grid_auto_flow": null, 233 | "grid_area": null, 234 | "grid_template_columns": null, 235 | "flex": null, 236 | "_model_name": "LayoutModel", 237 | "justify_items": null, 238 | "grid_row": null, 239 | "max_height": null, 240 | "align_content": null, 241 | "visibility": null, 242 | "align_self": null, 243 | "height": null, 244 | "min_height": null, 245 | "padding": null, 246 | "grid_auto_rows": null, 247 | "grid_gap": null, 248 | "max_width": null, 249 | "order": null, 250 | "_view_module_version": "1.2.0", 251 | "grid_template_areas": null, 252 | "object_position": null, 253 | "object_fit": null, 254 | "grid_auto_columns": null, 255 | "margin": null, 256 | "display": null, 257 | "left": null 258 | } 259 | }, 260 | "f254f52e3bac4dca8c6072d42eecd5d4": { 261 | "model_module": "@jupyter-widgets/controls", 262 | "model_name": "HBoxModel", 263 | "state": { 264 | "_view_name": "HBoxView", 265 | "_dom_classes": [], 266 | "_model_name": "HBoxModel", 267 | "_view_module": "@jupyter-widgets/controls", 268 | "_model_module_version": "1.5.0", 269 | "_view_count": null, 270 | "_view_module_version": "1.5.0", 271 | "box_style": "", 272 | "layout": "IPY_MODEL_2b9d198308e84313a2863bf36ed50d62", 273 | "_model_module": "@jupyter-widgets/controls", 274 | "children": [ 275 | "IPY_MODEL_5da8a59317454449bb20734a316c7642", 276 | "IPY_MODEL_0f5081a132864e5a9cf860905525cae5" 277 | ] 278 | } 279 | }, 280 | "2b9d198308e84313a2863bf36ed50d62": { 281 | "model_module": "@jupyter-widgets/base", 282 | "model_name": "LayoutModel", 283 | "state": { 284 | "_view_name": "LayoutView", 285 | "grid_template_rows": null, 286 | "right": null, 287 | "justify_content": null, 288 | "_view_module": "@jupyter-widgets/base", 289 | "overflow": null, 290 | "_model_module_version": "1.2.0", 291 | "_view_count": null, 292 | "flex_flow": null, 293 | "width": null, 294 | "min_width": null, 295 | "border": null, 296 | "align_items": null, 297 | "bottom": null, 298 | "_model_module": "@jupyter-widgets/base", 299 | "top": null, 300 | "grid_column": null, 301 | "overflow_y": null, 302 | "overflow_x": null, 303 | "grid_auto_flow": null, 304 | "grid_area": null, 305 | "grid_template_columns": null, 306 | "flex": null, 307 | "_model_name": "LayoutModel", 308 | "justify_items": null, 309 | "grid_row": null, 310 | "max_height": null, 311 | "align_content": null, 312 | "visibility": null, 313 | "align_self": null, 314 | "height": null, 315 | "min_height": null, 316 | "padding": null, 317 | "grid_auto_rows": null, 318 | "grid_gap": null, 319 | "max_width": null, 320 | "order": null, 321 | "_view_module_version": "1.2.0", 322 | "grid_template_areas": null, 323 | "object_position": null, 324 | "object_fit": null, 325 | "grid_auto_columns": null, 326 | "margin": null, 327 | "display": null, 328 | "left": null 329 | } 330 | }, 331 | "5da8a59317454449bb20734a316c7642": { 332 | "model_module": "@jupyter-widgets/controls", 333 | "model_name": "FloatProgressModel", 334 | "state": { 335 | "_view_name": "ProgressView", 336 | "style": "IPY_MODEL_dc08a4f6ce0741c7b00f4960edca931c", 337 | "_dom_classes": [], 338 | "description": "100%", 339 | "_model_name": "FloatProgressModel", 340 | "bar_style": "success", 341 | "max": 61, 342 | "_view_module": "@jupyter-widgets/controls", 343 | "_model_module_version": "1.5.0", 344 | "value": 61, 345 | "_view_count": null, 346 | "_view_module_version": "1.5.0", 347 | "orientation": "horizontal", 348 | "min": 0, 349 | "description_tooltip": null, 350 | "_model_module": "@jupyter-widgets/controls", 351 | "layout": "IPY_MODEL_3af97648993b4333b326415f407a7032" 352 | } 353 | }, 354 | "0f5081a132864e5a9cf860905525cae5": { 355 | "model_module": "@jupyter-widgets/controls", 356 | "model_name": "HTMLModel", 357 | "state": { 358 | "_view_name": "HTMLView", 359 | "style": "IPY_MODEL_15f42a5c573842128eff45fb7ebac60c", 360 | "_dom_classes": [], 361 | "description": "", 362 | "_model_name": "HTMLModel", 363 | "placeholder": "​", 364 | "_view_module": "@jupyter-widgets/controls", 365 | "_model_module_version": "1.5.0", 366 | "value": " 61/61 [00:02<00:00, 29.34it/s]", 367 | "_view_count": null, 368 | "_view_module_version": "1.5.0", 369 | "description_tooltip": null, 370 | "_model_module": "@jupyter-widgets/controls", 371 | "layout": "IPY_MODEL_43e58d608b284788aaab234ea0fcbc40" 372 | } 373 | }, 374 | "dc08a4f6ce0741c7b00f4960edca931c": { 375 | "model_module": "@jupyter-widgets/controls", 376 | "model_name": "ProgressStyleModel", 377 | "state": { 378 | "_view_name": "StyleView", 379 | "_model_name": "ProgressStyleModel", 380 | "description_width": "initial", 381 | "_view_module": "@jupyter-widgets/base", 382 | "_model_module_version": "1.5.0", 383 | "_view_count": null, 384 | "_view_module_version": "1.2.0", 385 | "bar_color": null, 386 | "_model_module": "@jupyter-widgets/controls" 387 | } 388 | }, 389 | "3af97648993b4333b326415f407a7032": { 390 | "model_module": "@jupyter-widgets/base", 391 | "model_name": "LayoutModel", 392 | "state": { 393 | "_view_name": "LayoutView", 394 | "grid_template_rows": null, 395 | "right": null, 396 | "justify_content": null, 397 | "_view_module": "@jupyter-widgets/base", 398 | "overflow": null, 399 | "_model_module_version": "1.2.0", 400 | "_view_count": null, 401 | "flex_flow": null, 402 | "width": null, 403 | "min_width": null, 404 | "border": null, 405 | "align_items": null, 406 | "bottom": null, 407 | "_model_module": "@jupyter-widgets/base", 408 | "top": null, 409 | "grid_column": null, 410 | "overflow_y": null, 411 | "overflow_x": null, 412 | "grid_auto_flow": null, 413 | "grid_area": null, 414 | "grid_template_columns": null, 415 | "flex": null, 416 | "_model_name": "LayoutModel", 417 | "justify_items": null, 418 | "grid_row": null, 419 | "max_height": null, 420 | "align_content": null, 421 | "visibility": null, 422 | "align_self": null, 423 | "height": null, 424 | "min_height": null, 425 | "padding": null, 426 | "grid_auto_rows": null, 427 | "grid_gap": null, 428 | "max_width": null, 429 | "order": null, 430 | "_view_module_version": "1.2.0", 431 | "grid_template_areas": null, 432 | "object_position": null, 433 | "object_fit": null, 434 | "grid_auto_columns": null, 435 | "margin": null, 436 | "display": null, 437 | "left": null 438 | } 439 | }, 440 | "15f42a5c573842128eff45fb7ebac60c": { 441 | "model_module": "@jupyter-widgets/controls", 442 | "model_name": "DescriptionStyleModel", 443 | "state": { 444 | "_view_name": "StyleView", 445 | "_model_name": "DescriptionStyleModel", 446 | "description_width": "", 447 | "_view_module": "@jupyter-widgets/base", 448 | "_model_module_version": "1.5.0", 449 | "_view_count": null, 450 | "_view_module_version": "1.2.0", 451 | "_model_module": "@jupyter-widgets/controls" 452 | } 453 | }, 454 | "43e58d608b284788aaab234ea0fcbc40": { 455 | "model_module": "@jupyter-widgets/base", 456 | "model_name": "LayoutModel", 457 | "state": { 458 | "_view_name": "LayoutView", 459 | "grid_template_rows": null, 460 | "right": null, 461 | "justify_content": null, 462 | "_view_module": "@jupyter-widgets/base", 463 | "overflow": null, 464 | "_model_module_version": "1.2.0", 465 | "_view_count": null, 466 | "flex_flow": null, 467 | "width": null, 468 | "min_width": null, 469 | "border": null, 470 | "align_items": null, 471 | "bottom": null, 472 | "_model_module": "@jupyter-widgets/base", 473 | "top": null, 474 | "grid_column": null, 475 | "overflow_y": null, 476 | "overflow_x": null, 477 | "grid_auto_flow": null, 478 | "grid_area": null, 479 | "grid_template_columns": null, 480 | "flex": null, 481 | "_model_name": "LayoutModel", 482 | "justify_items": null, 483 | "grid_row": null, 484 | "max_height": null, 485 | "align_content": null, 486 | "visibility": null, 487 | "align_self": null, 488 | "height": null, 489 | "min_height": null, 490 | "padding": null, 491 | "grid_auto_rows": null, 492 | "grid_gap": null, 493 | "max_width": null, 494 | "order": null, 495 | "_view_module_version": "1.2.0", 496 | "grid_template_areas": null, 497 | "object_position": null, 498 | "object_fit": null, 499 | "grid_auto_columns": null, 500 | "margin": null, 501 | "display": null, 502 | "left": null 503 | } 504 | }, 505 | "c6f2dc82e3974349b4db34ac81538a12": { 506 | "model_module": "@jupyter-widgets/controls", 507 | "model_name": "HBoxModel", 508 | "state": { 509 | "_view_name": "HBoxView", 510 | "_dom_classes": [], 511 | "_model_name": "HBoxModel", 512 | "_view_module": "@jupyter-widgets/controls", 513 | "_model_module_version": "1.5.0", 514 | "_view_count": null, 515 | "_view_module_version": "1.5.0", 516 | "box_style": "", 517 | "layout": "IPY_MODEL_ed648b306e794f089451a9ba57919c10", 518 | "_model_module": "@jupyter-widgets/controls", 519 | "children": [ 520 | "IPY_MODEL_7bb1aceee6cc49b69782fc62e0d927da", 521 | "IPY_MODEL_cfb0550c2fde4580bb9cfe94212aa93b" 522 | ] 523 | } 524 | }, 525 | "ed648b306e794f089451a9ba57919c10": { 526 | "model_module": "@jupyter-widgets/base", 527 | "model_name": "LayoutModel", 528 | "state": { 529 | "_view_name": "LayoutView", 530 | "grid_template_rows": null, 531 | "right": null, 532 | "justify_content": null, 533 | "_view_module": "@jupyter-widgets/base", 534 | "overflow": null, 535 | "_model_module_version": "1.2.0", 536 | "_view_count": null, 537 | "flex_flow": null, 538 | "width": null, 539 | "min_width": null, 540 | "border": null, 541 | "align_items": null, 542 | "bottom": null, 543 | "_model_module": "@jupyter-widgets/base", 544 | "top": null, 545 | "grid_column": null, 546 | "overflow_y": null, 547 | "overflow_x": null, 548 | "grid_auto_flow": null, 549 | "grid_area": null, 550 | "grid_template_columns": null, 551 | "flex": null, 552 | "_model_name": "LayoutModel", 553 | "justify_items": null, 554 | "grid_row": null, 555 | "max_height": null, 556 | "align_content": null, 557 | "visibility": null, 558 | "align_self": null, 559 | "height": null, 560 | "min_height": null, 561 | "padding": null, 562 | "grid_auto_rows": null, 563 | "grid_gap": null, 564 | "max_width": null, 565 | "order": null, 566 | "_view_module_version": "1.2.0", 567 | "grid_template_areas": null, 568 | "object_position": null, 569 | "object_fit": null, 570 | "grid_auto_columns": null, 571 | "margin": null, 572 | "display": null, 573 | "left": null 574 | } 575 | }, 576 | "7bb1aceee6cc49b69782fc62e0d927da": { 577 | "model_module": "@jupyter-widgets/controls", 578 | "model_name": "FloatProgressModel", 579 | "state": { 580 | "_view_name": "ProgressView", 581 | "style": "IPY_MODEL_5b825e82b335419184c0bd35bf28f19b", 582 | "_dom_classes": [], 583 | "description": "100%", 584 | "_model_name": "FloatProgressModel", 585 | "bar_style": "success", 586 | "max": 34, 587 | "_view_module": "@jupyter-widgets/controls", 588 | "_model_module_version": "1.5.0", 589 | "value": 34, 590 | "_view_count": null, 591 | "_view_module_version": "1.5.0", 592 | "orientation": "horizontal", 593 | "min": 0, 594 | "description_tooltip": null, 595 | "_model_module": "@jupyter-widgets/controls", 596 | "layout": "IPY_MODEL_01c3909d9114491280fd4d2bbd667af7" 597 | } 598 | }, 599 | "cfb0550c2fde4580bb9cfe94212aa93b": { 600 | "model_module": "@jupyter-widgets/controls", 601 | "model_name": "HTMLModel", 602 | "state": { 603 | "_view_name": "HTMLView", 604 | "style": "IPY_MODEL_38ee1f124ca1437c9d3039e1a48b23e9", 605 | "_dom_classes": [], 606 | "description": "", 607 | "_model_name": "HTMLModel", 608 | "placeholder": "​", 609 | "_view_module": "@jupyter-widgets/controls", 610 | "_model_module_version": "1.5.0", 611 | "value": " 34/34 [00:01<00:00, 20.05it/s]", 612 | "_view_count": null, 613 | "_view_module_version": "1.5.0", 614 | "description_tooltip": null, 615 | "_model_module": "@jupyter-widgets/controls", 616 | "layout": "IPY_MODEL_eff9e5f566004cd2974533cdd831c6b9" 617 | } 618 | }, 619 | "5b825e82b335419184c0bd35bf28f19b": { 620 | "model_module": "@jupyter-widgets/controls", 621 | "model_name": "ProgressStyleModel", 622 | "state": { 623 | "_view_name": "StyleView", 624 | "_model_name": "ProgressStyleModel", 625 | "description_width": "initial", 626 | "_view_module": "@jupyter-widgets/base", 627 | "_model_module_version": "1.5.0", 628 | "_view_count": null, 629 | "_view_module_version": "1.2.0", 630 | "bar_color": null, 631 | "_model_module": "@jupyter-widgets/controls" 632 | } 633 | }, 634 | "01c3909d9114491280fd4d2bbd667af7": { 635 | "model_module": "@jupyter-widgets/base", 636 | "model_name": "LayoutModel", 637 | "state": { 638 | "_view_name": "LayoutView", 639 | "grid_template_rows": null, 640 | "right": null, 641 | "justify_content": null, 642 | "_view_module": "@jupyter-widgets/base", 643 | "overflow": null, 644 | "_model_module_version": "1.2.0", 645 | "_view_count": null, 646 | "flex_flow": null, 647 | "width": null, 648 | "min_width": null, 649 | "border": null, 650 | "align_items": null, 651 | "bottom": null, 652 | "_model_module": "@jupyter-widgets/base", 653 | "top": null, 654 | "grid_column": null, 655 | "overflow_y": null, 656 | "overflow_x": null, 657 | "grid_auto_flow": null, 658 | "grid_area": null, 659 | "grid_template_columns": null, 660 | "flex": null, 661 | "_model_name": "LayoutModel", 662 | "justify_items": null, 663 | "grid_row": null, 664 | "max_height": null, 665 | "align_content": null, 666 | "visibility": null, 667 | "align_self": null, 668 | "height": null, 669 | "min_height": null, 670 | "padding": null, 671 | "grid_auto_rows": null, 672 | "grid_gap": null, 673 | "max_width": null, 674 | "order": null, 675 | "_view_module_version": "1.2.0", 676 | "grid_template_areas": null, 677 | "object_position": null, 678 | "object_fit": null, 679 | "grid_auto_columns": null, 680 | "margin": null, 681 | "display": null, 682 | "left": null 683 | } 684 | }, 685 | "38ee1f124ca1437c9d3039e1a48b23e9": { 686 | "model_module": "@jupyter-widgets/controls", 687 | "model_name": "DescriptionStyleModel", 688 | "state": { 689 | "_view_name": "StyleView", 690 | "_model_name": "DescriptionStyleModel", 691 | "description_width": "", 692 | "_view_module": "@jupyter-widgets/base", 693 | "_model_module_version": "1.5.0", 694 | "_view_count": null, 695 | "_view_module_version": "1.2.0", 696 | "_model_module": "@jupyter-widgets/controls" 697 | } 698 | }, 699 | "eff9e5f566004cd2974533cdd831c6b9": { 700 | "model_module": "@jupyter-widgets/base", 701 | "model_name": "LayoutModel", 702 | "state": { 703 | "_view_name": "LayoutView", 704 | "grid_template_rows": null, 705 | "right": null, 706 | "justify_content": null, 707 | "_view_module": "@jupyter-widgets/base", 708 | "overflow": null, 709 | "_model_module_version": "1.2.0", 710 | "_view_count": null, 711 | "flex_flow": null, 712 | "width": null, 713 | "min_width": null, 714 | "border": null, 715 | "align_items": null, 716 | "bottom": null, 717 | "_model_module": "@jupyter-widgets/base", 718 | "top": null, 719 | "grid_column": null, 720 | "overflow_y": null, 721 | "overflow_x": null, 722 | "grid_auto_flow": null, 723 | "grid_area": null, 724 | "grid_template_columns": null, 725 | "flex": null, 726 | "_model_name": "LayoutModel", 727 | "justify_items": null, 728 | "grid_row": null, 729 | "max_height": null, 730 | "align_content": null, 731 | "visibility": null, 732 | "align_self": null, 733 | "height": null, 734 | "min_height": null, 735 | "padding": null, 736 | "grid_auto_rows": null, 737 | "grid_gap": null, 738 | "max_width": null, 739 | "order": null, 740 | "_view_module_version": "1.2.0", 741 | "grid_template_areas": null, 742 | "object_position": null, 743 | "object_fit": null, 744 | "grid_auto_columns": null, 745 | "margin": null, 746 | "display": null, 747 | "left": null 748 | } 749 | }, 750 | "817fbe88190e400ba29117b1dd761591": { 751 | "model_module": "@jupyter-widgets/controls", 752 | "model_name": "HBoxModel", 753 | "state": { 754 | "_view_name": "HBoxView", 755 | "_dom_classes": [], 756 | "_model_name": "HBoxModel", 757 | "_view_module": "@jupyter-widgets/controls", 758 | "_model_module_version": "1.5.0", 759 | "_view_count": null, 760 | "_view_module_version": "1.5.0", 761 | "box_style": "", 762 | "layout": "IPY_MODEL_683415a93c9b488fb2626554f2eab577", 763 | "_model_module": "@jupyter-widgets/controls", 764 | "children": [ 765 | "IPY_MODEL_d9961cd98ca941f5bb34ea9b3c1e4a68", 766 | "IPY_MODEL_7b866b28bf924342a8348902a909bd28" 767 | ] 768 | } 769 | }, 770 | "683415a93c9b488fb2626554f2eab577": { 771 | "model_module": "@jupyter-widgets/base", 772 | "model_name": "LayoutModel", 773 | "state": { 774 | "_view_name": "LayoutView", 775 | "grid_template_rows": null, 776 | "right": null, 777 | "justify_content": null, 778 | "_view_module": "@jupyter-widgets/base", 779 | "overflow": null, 780 | "_model_module_version": "1.2.0", 781 | "_view_count": null, 782 | "flex_flow": null, 783 | "width": null, 784 | "min_width": null, 785 | "border": null, 786 | "align_items": null, 787 | "bottom": null, 788 | "_model_module": "@jupyter-widgets/base", 789 | "top": null, 790 | "grid_column": null, 791 | "overflow_y": null, 792 | "overflow_x": null, 793 | "grid_auto_flow": null, 794 | "grid_area": null, 795 | "grid_template_columns": null, 796 | "flex": null, 797 | "_model_name": "LayoutModel", 798 | "justify_items": null, 799 | "grid_row": null, 800 | "max_height": null, 801 | "align_content": null, 802 | "visibility": null, 803 | "align_self": null, 804 | "height": null, 805 | "min_height": null, 806 | "padding": null, 807 | "grid_auto_rows": null, 808 | "grid_gap": null, 809 | "max_width": null, 810 | "order": null, 811 | "_view_module_version": "1.2.0", 812 | "grid_template_areas": null, 813 | "object_position": null, 814 | "object_fit": null, 815 | "grid_auto_columns": null, 816 | "margin": null, 817 | "display": null, 818 | "left": null 819 | } 820 | }, 821 | "d9961cd98ca941f5bb34ea9b3c1e4a68": { 822 | "model_module": "@jupyter-widgets/controls", 823 | "model_name": "FloatProgressModel", 824 | "state": { 825 | "_view_name": "ProgressView", 826 | "style": "IPY_MODEL_833ef428df384593a2e765f92d1c2e2c", 827 | "_dom_classes": [], 828 | "description": "100%", 829 | "_model_name": "FloatProgressModel", 830 | "bar_style": "success", 831 | "max": 55, 832 | "_view_module": "@jupyter-widgets/controls", 833 | "_model_module_version": "1.5.0", 834 | "value": 55, 835 | "_view_count": null, 836 | "_view_module_version": "1.5.0", 837 | "orientation": "horizontal", 838 | "min": 0, 839 | "description_tooltip": null, 840 | "_model_module": "@jupyter-widgets/controls", 841 | "layout": "IPY_MODEL_1dd837dd24644b3c99821d6bebaba435" 842 | } 843 | }, 844 | "7b866b28bf924342a8348902a909bd28": { 845 | "model_module": "@jupyter-widgets/controls", 846 | "model_name": "HTMLModel", 847 | "state": { 848 | "_view_name": "HTMLView", 849 | "style": "IPY_MODEL_e328254a5fc34548a3b9accf8aa63e73", 850 | "_dom_classes": [], 851 | "description": "", 852 | "_model_name": "HTMLModel", 853 | "placeholder": "​", 854 | "_view_module": "@jupyter-widgets/controls", 855 | "_model_module_version": "1.5.0", 856 | "value": " 55/55 [00:01<00:00, 41.64it/s]", 857 | "_view_count": null, 858 | "_view_module_version": "1.5.0", 859 | "description_tooltip": null, 860 | "_model_module": "@jupyter-widgets/controls", 861 | "layout": "IPY_MODEL_142ffb8992c041e6bd3d9773dc60c3e0" 862 | } 863 | }, 864 | "833ef428df384593a2e765f92d1c2e2c": { 865 | "model_module": "@jupyter-widgets/controls", 866 | "model_name": "ProgressStyleModel", 867 | "state": { 868 | "_view_name": "StyleView", 869 | "_model_name": "ProgressStyleModel", 870 | "description_width": "initial", 871 | "_view_module": "@jupyter-widgets/base", 872 | "_model_module_version": "1.5.0", 873 | "_view_count": null, 874 | "_view_module_version": "1.2.0", 875 | "bar_color": null, 876 | "_model_module": "@jupyter-widgets/controls" 877 | } 878 | }, 879 | "1dd837dd24644b3c99821d6bebaba435": { 880 | "model_module": "@jupyter-widgets/base", 881 | "model_name": "LayoutModel", 882 | "state": { 883 | "_view_name": "LayoutView", 884 | "grid_template_rows": null, 885 | "right": null, 886 | "justify_content": null, 887 | "_view_module": "@jupyter-widgets/base", 888 | "overflow": null, 889 | "_model_module_version": "1.2.0", 890 | "_view_count": null, 891 | "flex_flow": null, 892 | "width": null, 893 | "min_width": null, 894 | "border": null, 895 | "align_items": null, 896 | "bottom": null, 897 | "_model_module": "@jupyter-widgets/base", 898 | "top": null, 899 | "grid_column": null, 900 | "overflow_y": null, 901 | "overflow_x": null, 902 | "grid_auto_flow": null, 903 | "grid_area": null, 904 | "grid_template_columns": null, 905 | "flex": null, 906 | "_model_name": "LayoutModel", 907 | "justify_items": null, 908 | "grid_row": null, 909 | "max_height": null, 910 | "align_content": null, 911 | "visibility": null, 912 | "align_self": null, 913 | "height": null, 914 | "min_height": null, 915 | "padding": null, 916 | "grid_auto_rows": null, 917 | "grid_gap": null, 918 | "max_width": null, 919 | "order": null, 920 | "_view_module_version": "1.2.0", 921 | "grid_template_areas": null, 922 | "object_position": null, 923 | "object_fit": null, 924 | "grid_auto_columns": null, 925 | "margin": null, 926 | "display": null, 927 | "left": null 928 | } 929 | }, 930 | "e328254a5fc34548a3b9accf8aa63e73": { 931 | "model_module": "@jupyter-widgets/controls", 932 | "model_name": "DescriptionStyleModel", 933 | "state": { 934 | "_view_name": "StyleView", 935 | "_model_name": "DescriptionStyleModel", 936 | "description_width": "", 937 | "_view_module": "@jupyter-widgets/base", 938 | "_model_module_version": "1.5.0", 939 | "_view_count": null, 940 | "_view_module_version": "1.2.0", 941 | "_model_module": "@jupyter-widgets/controls" 942 | } 943 | }, 944 | "142ffb8992c041e6bd3d9773dc60c3e0": { 945 | "model_module": "@jupyter-widgets/base", 946 | "model_name": "LayoutModel", 947 | "state": { 948 | "_view_name": "LayoutView", 949 | "grid_template_rows": null, 950 | "right": null, 951 | "justify_content": null, 952 | "_view_module": "@jupyter-widgets/base", 953 | "overflow": null, 954 | "_model_module_version": "1.2.0", 955 | "_view_count": null, 956 | "flex_flow": null, 957 | "width": null, 958 | "min_width": null, 959 | "border": null, 960 | "align_items": null, 961 | "bottom": null, 962 | "_model_module": "@jupyter-widgets/base", 963 | "top": null, 964 | "grid_column": null, 965 | "overflow_y": null, 966 | "overflow_x": null, 967 | "grid_auto_flow": null, 968 | "grid_area": null, 969 | "grid_template_columns": null, 970 | "flex": null, 971 | "_model_name": "LayoutModel", 972 | "justify_items": null, 973 | "grid_row": null, 974 | "max_height": null, 975 | "align_content": null, 976 | "visibility": null, 977 | "align_self": null, 978 | "height": null, 979 | "min_height": null, 980 | "padding": null, 981 | "grid_auto_rows": null, 982 | "grid_gap": null, 983 | "max_width": null, 984 | "order": null, 985 | "_view_module_version": "1.2.0", 986 | "grid_template_areas": null, 987 | "object_position": null, 988 | "object_fit": null, 989 | "grid_auto_columns": null, 990 | "margin": null, 991 | "display": null, 992 | "left": null 993 | } 994 | }, 995 | "8c76d372a8124cf59af79542e607ce13": { 996 | "model_module": "@jupyter-widgets/controls", 997 | "model_name": "HBoxModel", 998 | "state": { 999 | "_view_name": "HBoxView", 1000 | "_dom_classes": [], 1001 | "_model_name": "HBoxModel", 1002 | "_view_module": "@jupyter-widgets/controls", 1003 | "_model_module_version": "1.5.0", 1004 | "_view_count": null, 1005 | "_view_module_version": "1.5.0", 1006 | "box_style": "", 1007 | "layout": "IPY_MODEL_8411af94f2b84a479026bda52232b623", 1008 | "_model_module": "@jupyter-widgets/controls", 1009 | "children": [ 1010 | "IPY_MODEL_b8ef75fa15574ca6abe6bd4cd89d7c9a", 1011 | "IPY_MODEL_08994dee859e49468cc361ce0aaae31b" 1012 | ] 1013 | } 1014 | }, 1015 | "8411af94f2b84a479026bda52232b623": { 1016 | "model_module": "@jupyter-widgets/base", 1017 | "model_name": "LayoutModel", 1018 | "state": { 1019 | "_view_name": "LayoutView", 1020 | "grid_template_rows": null, 1021 | "right": null, 1022 | "justify_content": null, 1023 | "_view_module": "@jupyter-widgets/base", 1024 | "overflow": null, 1025 | "_model_module_version": "1.2.0", 1026 | "_view_count": null, 1027 | "flex_flow": null, 1028 | "width": null, 1029 | "min_width": null, 1030 | "border": null, 1031 | "align_items": null, 1032 | "bottom": null, 1033 | "_model_module": "@jupyter-widgets/base", 1034 | "top": null, 1035 | "grid_column": null, 1036 | "overflow_y": null, 1037 | "overflow_x": null, 1038 | "grid_auto_flow": null, 1039 | "grid_area": null, 1040 | "grid_template_columns": null, 1041 | "flex": null, 1042 | "_model_name": "LayoutModel", 1043 | "justify_items": null, 1044 | "grid_row": null, 1045 | "max_height": null, 1046 | "align_content": null, 1047 | "visibility": null, 1048 | "align_self": null, 1049 | "height": null, 1050 | "min_height": null, 1051 | "padding": null, 1052 | "grid_auto_rows": null, 1053 | "grid_gap": null, 1054 | "max_width": null, 1055 | "order": null, 1056 | "_view_module_version": "1.2.0", 1057 | "grid_template_areas": null, 1058 | "object_position": null, 1059 | "object_fit": null, 1060 | "grid_auto_columns": null, 1061 | "margin": null, 1062 | "display": null, 1063 | "left": null 1064 | } 1065 | }, 1066 | "b8ef75fa15574ca6abe6bd4cd89d7c9a": { 1067 | "model_module": "@jupyter-widgets/controls", 1068 | "model_name": "FloatProgressModel", 1069 | "state": { 1070 | "_view_name": "ProgressView", 1071 | "style": "IPY_MODEL_562d0e227d6d49c38da0e5f7434358bf", 1072 | "_dom_classes": [], 1073 | "description": "100%", 1074 | "_model_name": "FloatProgressModel", 1075 | "bar_style": "success", 1076 | "max": 64, 1077 | "_view_module": "@jupyter-widgets/controls", 1078 | "_model_module_version": "1.5.0", 1079 | "value": 64, 1080 | "_view_count": null, 1081 | "_view_module_version": "1.5.0", 1082 | "orientation": "horizontal", 1083 | "min": 0, 1084 | "description_tooltip": null, 1085 | "_model_module": "@jupyter-widgets/controls", 1086 | "layout": "IPY_MODEL_3e4a5de37bcb427abad7b98d334b6077" 1087 | } 1088 | }, 1089 | "08994dee859e49468cc361ce0aaae31b": { 1090 | "model_module": "@jupyter-widgets/controls", 1091 | "model_name": "HTMLModel", 1092 | "state": { 1093 | "_view_name": "HTMLView", 1094 | "style": "IPY_MODEL_e770d160e8cc4165b505f9603e7f96c9", 1095 | "_dom_classes": [], 1096 | "description": "", 1097 | "_model_name": "HTMLModel", 1098 | "placeholder": "​", 1099 | "_view_module": "@jupyter-widgets/controls", 1100 | "_model_module_version": "1.5.0", 1101 | "value": " 64/64 [00:00<00:00, 70.15it/s]", 1102 | "_view_count": null, 1103 | "_view_module_version": "1.5.0", 1104 | "description_tooltip": null, 1105 | "_model_module": "@jupyter-widgets/controls", 1106 | "layout": "IPY_MODEL_39b05f1c363b479d912a1d49fcdad017" 1107 | } 1108 | }, 1109 | "562d0e227d6d49c38da0e5f7434358bf": { 1110 | "model_module": "@jupyter-widgets/controls", 1111 | "model_name": "ProgressStyleModel", 1112 | "state": { 1113 | "_view_name": "StyleView", 1114 | "_model_name": "ProgressStyleModel", 1115 | "description_width": "initial", 1116 | "_view_module": "@jupyter-widgets/base", 1117 | "_model_module_version": "1.5.0", 1118 | "_view_count": null, 1119 | "_view_module_version": "1.2.0", 1120 | "bar_color": null, 1121 | "_model_module": "@jupyter-widgets/controls" 1122 | } 1123 | }, 1124 | "3e4a5de37bcb427abad7b98d334b6077": { 1125 | "model_module": "@jupyter-widgets/base", 1126 | "model_name": "LayoutModel", 1127 | "state": { 1128 | "_view_name": "LayoutView", 1129 | "grid_template_rows": null, 1130 | "right": null, 1131 | "justify_content": null, 1132 | "_view_module": "@jupyter-widgets/base", 1133 | "overflow": null, 1134 | "_model_module_version": "1.2.0", 1135 | "_view_count": null, 1136 | "flex_flow": null, 1137 | "width": null, 1138 | "min_width": null, 1139 | "border": null, 1140 | "align_items": null, 1141 | "bottom": null, 1142 | "_model_module": "@jupyter-widgets/base", 1143 | "top": null, 1144 | "grid_column": null, 1145 | "overflow_y": null, 1146 | "overflow_x": null, 1147 | "grid_auto_flow": null, 1148 | "grid_area": null, 1149 | "grid_template_columns": null, 1150 | "flex": null, 1151 | "_model_name": "LayoutModel", 1152 | "justify_items": null, 1153 | "grid_row": null, 1154 | "max_height": null, 1155 | "align_content": null, 1156 | "visibility": null, 1157 | "align_self": null, 1158 | "height": null, 1159 | "min_height": null, 1160 | "padding": null, 1161 | "grid_auto_rows": null, 1162 | "grid_gap": null, 1163 | "max_width": null, 1164 | "order": null, 1165 | "_view_module_version": "1.2.0", 1166 | "grid_template_areas": null, 1167 | "object_position": null, 1168 | "object_fit": null, 1169 | "grid_auto_columns": null, 1170 | "margin": null, 1171 | "display": null, 1172 | "left": null 1173 | } 1174 | }, 1175 | "e770d160e8cc4165b505f9603e7f96c9": { 1176 | "model_module": "@jupyter-widgets/controls", 1177 | "model_name": "DescriptionStyleModel", 1178 | "state": { 1179 | "_view_name": "StyleView", 1180 | "_model_name": "DescriptionStyleModel", 1181 | "description_width": "", 1182 | "_view_module": "@jupyter-widgets/base", 1183 | "_model_module_version": "1.5.0", 1184 | "_view_count": null, 1185 | "_view_module_version": "1.2.0", 1186 | "_model_module": "@jupyter-widgets/controls" 1187 | } 1188 | }, 1189 | "39b05f1c363b479d912a1d49fcdad017": { 1190 | "model_module": "@jupyter-widgets/base", 1191 | "model_name": "LayoutModel", 1192 | "state": { 1193 | "_view_name": "LayoutView", 1194 | "grid_template_rows": null, 1195 | "right": null, 1196 | "justify_content": null, 1197 | "_view_module": "@jupyter-widgets/base", 1198 | "overflow": null, 1199 | "_model_module_version": "1.2.0", 1200 | "_view_count": null, 1201 | "flex_flow": null, 1202 | "width": null, 1203 | "min_width": null, 1204 | "border": null, 1205 | "align_items": null, 1206 | "bottom": null, 1207 | "_model_module": "@jupyter-widgets/base", 1208 | "top": null, 1209 | "grid_column": null, 1210 | "overflow_y": null, 1211 | "overflow_x": null, 1212 | "grid_auto_flow": null, 1213 | "grid_area": null, 1214 | "grid_template_columns": null, 1215 | "flex": null, 1216 | "_model_name": "LayoutModel", 1217 | "justify_items": null, 1218 | "grid_row": null, 1219 | "max_height": null, 1220 | "align_content": null, 1221 | "visibility": null, 1222 | "align_self": null, 1223 | "height": null, 1224 | "min_height": null, 1225 | "padding": null, 1226 | "grid_auto_rows": null, 1227 | "grid_gap": null, 1228 | "max_width": null, 1229 | "order": null, 1230 | "_view_module_version": "1.2.0", 1231 | "grid_template_areas": null, 1232 | "object_position": null, 1233 | "object_fit": null, 1234 | "grid_auto_columns": null, 1235 | "margin": null, 1236 | "display": null, 1237 | "left": null 1238 | } 1239 | }, 1240 | "31ccb54c53274c8caebbace0d395d961": { 1241 | "model_module": "@jupyter-widgets/controls", 1242 | "model_name": "HBoxModel", 1243 | "state": { 1244 | "_view_name": "HBoxView", 1245 | "_dom_classes": [], 1246 | "_model_name": "HBoxModel", 1247 | "_view_module": "@jupyter-widgets/controls", 1248 | "_model_module_version": "1.5.0", 1249 | "_view_count": null, 1250 | "_view_module_version": "1.5.0", 1251 | "box_style": "", 1252 | "layout": "IPY_MODEL_b3a1040ceaa94b38ae7609919e52126e", 1253 | "_model_module": "@jupyter-widgets/controls", 1254 | "children": [ 1255 | "IPY_MODEL_13b79b324f164372a5bc1d516f31168b", 1256 | "IPY_MODEL_57a3c88f08ee454c80c1fb4d9c141b5a" 1257 | ] 1258 | } 1259 | }, 1260 | "b3a1040ceaa94b38ae7609919e52126e": { 1261 | "model_module": "@jupyter-widgets/base", 1262 | "model_name": "LayoutModel", 1263 | "state": { 1264 | "_view_name": "LayoutView", 1265 | "grid_template_rows": null, 1266 | "right": null, 1267 | "justify_content": null, 1268 | "_view_module": "@jupyter-widgets/base", 1269 | "overflow": null, 1270 | "_model_module_version": "1.2.0", 1271 | "_view_count": null, 1272 | "flex_flow": null, 1273 | "width": null, 1274 | "min_width": null, 1275 | "border": null, 1276 | "align_items": null, 1277 | "bottom": null, 1278 | "_model_module": "@jupyter-widgets/base", 1279 | "top": null, 1280 | "grid_column": null, 1281 | "overflow_y": null, 1282 | "overflow_x": null, 1283 | "grid_auto_flow": null, 1284 | "grid_area": null, 1285 | "grid_template_columns": null, 1286 | "flex": null, 1287 | "_model_name": "LayoutModel", 1288 | "justify_items": null, 1289 | "grid_row": null, 1290 | "max_height": null, 1291 | "align_content": null, 1292 | "visibility": null, 1293 | "align_self": null, 1294 | "height": null, 1295 | "min_height": null, 1296 | "padding": null, 1297 | "grid_auto_rows": null, 1298 | "grid_gap": null, 1299 | "max_width": null, 1300 | "order": null, 1301 | "_view_module_version": "1.2.0", 1302 | "grid_template_areas": null, 1303 | "object_position": null, 1304 | "object_fit": null, 1305 | "grid_auto_columns": null, 1306 | "margin": null, 1307 | "display": null, 1308 | "left": null 1309 | } 1310 | }, 1311 | "13b79b324f164372a5bc1d516f31168b": { 1312 | "model_module": "@jupyter-widgets/controls", 1313 | "model_name": "FloatProgressModel", 1314 | "state": { 1315 | "_view_name": "ProgressView", 1316 | "style": "IPY_MODEL_e7a53161b42646bca93a11a9185d05fe", 1317 | "_dom_classes": [], 1318 | "description": "100%", 1319 | "_model_name": "FloatProgressModel", 1320 | "bar_style": "success", 1321 | "max": 61, 1322 | "_view_module": "@jupyter-widgets/controls", 1323 | "_model_module_version": "1.5.0", 1324 | "value": 61, 1325 | "_view_count": null, 1326 | "_view_module_version": "1.5.0", 1327 | "orientation": "horizontal", 1328 | "min": 0, 1329 | "description_tooltip": null, 1330 | "_model_module": "@jupyter-widgets/controls", 1331 | "layout": "IPY_MODEL_3773b6344c884959bf2f3628ce8f9944" 1332 | } 1333 | }, 1334 | "57a3c88f08ee454c80c1fb4d9c141b5a": { 1335 | "model_module": "@jupyter-widgets/controls", 1336 | "model_name": "HTMLModel", 1337 | "state": { 1338 | "_view_name": "HTMLView", 1339 | "style": "IPY_MODEL_e236032e2b6e4642a2fce6c26578a34c", 1340 | "_dom_classes": [], 1341 | "description": "", 1342 | "_model_name": "HTMLModel", 1343 | "placeholder": "​", 1344 | "_view_module": "@jupyter-widgets/controls", 1345 | "_model_module_version": "1.5.0", 1346 | "value": " 61/61 [00:00<00:00, 814.92it/s]", 1347 | "_view_count": null, 1348 | "_view_module_version": "1.5.0", 1349 | "description_tooltip": null, 1350 | "_model_module": "@jupyter-widgets/controls", 1351 | "layout": "IPY_MODEL_cae6130e1ea34100948ab766b01c9e9e" 1352 | } 1353 | }, 1354 | "e7a53161b42646bca93a11a9185d05fe": { 1355 | "model_module": "@jupyter-widgets/controls", 1356 | "model_name": "ProgressStyleModel", 1357 | "state": { 1358 | "_view_name": "StyleView", 1359 | "_model_name": "ProgressStyleModel", 1360 | "description_width": "initial", 1361 | "_view_module": "@jupyter-widgets/base", 1362 | "_model_module_version": "1.5.0", 1363 | "_view_count": null, 1364 | "_view_module_version": "1.2.0", 1365 | "bar_color": null, 1366 | "_model_module": "@jupyter-widgets/controls" 1367 | } 1368 | }, 1369 | "3773b6344c884959bf2f3628ce8f9944": { 1370 | "model_module": "@jupyter-widgets/base", 1371 | "model_name": "LayoutModel", 1372 | "state": { 1373 | "_view_name": "LayoutView", 1374 | "grid_template_rows": null, 1375 | "right": null, 1376 | "justify_content": null, 1377 | "_view_module": "@jupyter-widgets/base", 1378 | "overflow": null, 1379 | "_model_module_version": "1.2.0", 1380 | "_view_count": null, 1381 | "flex_flow": null, 1382 | "width": null, 1383 | "min_width": null, 1384 | "border": null, 1385 | "align_items": null, 1386 | "bottom": null, 1387 | "_model_module": "@jupyter-widgets/base", 1388 | "top": null, 1389 | "grid_column": null, 1390 | "overflow_y": null, 1391 | "overflow_x": null, 1392 | "grid_auto_flow": null, 1393 | "grid_area": null, 1394 | "grid_template_columns": null, 1395 | "flex": null, 1396 | "_model_name": "LayoutModel", 1397 | "justify_items": null, 1398 | "grid_row": null, 1399 | "max_height": null, 1400 | "align_content": null, 1401 | "visibility": null, 1402 | "align_self": null, 1403 | "height": null, 1404 | "min_height": null, 1405 | "padding": null, 1406 | "grid_auto_rows": null, 1407 | "grid_gap": null, 1408 | "max_width": null, 1409 | "order": null, 1410 | "_view_module_version": "1.2.0", 1411 | "grid_template_areas": null, 1412 | "object_position": null, 1413 | "object_fit": null, 1414 | "grid_auto_columns": null, 1415 | "margin": null, 1416 | "display": null, 1417 | "left": null 1418 | } 1419 | }, 1420 | "e236032e2b6e4642a2fce6c26578a34c": { 1421 | "model_module": "@jupyter-widgets/controls", 1422 | "model_name": "DescriptionStyleModel", 1423 | "state": { 1424 | "_view_name": "StyleView", 1425 | "_model_name": "DescriptionStyleModel", 1426 | "description_width": "", 1427 | "_view_module": "@jupyter-widgets/base", 1428 | "_model_module_version": "1.5.0", 1429 | "_view_count": null, 1430 | "_view_module_version": "1.2.0", 1431 | "_model_module": "@jupyter-widgets/controls" 1432 | } 1433 | }, 1434 | "cae6130e1ea34100948ab766b01c9e9e": { 1435 | "model_module": "@jupyter-widgets/base", 1436 | "model_name": "LayoutModel", 1437 | "state": { 1438 | "_view_name": "LayoutView", 1439 | "grid_template_rows": null, 1440 | "right": null, 1441 | "justify_content": null, 1442 | "_view_module": "@jupyter-widgets/base", 1443 | "overflow": null, 1444 | "_model_module_version": "1.2.0", 1445 | "_view_count": null, 1446 | "flex_flow": null, 1447 | "width": null, 1448 | "min_width": null, 1449 | "border": null, 1450 | "align_items": null, 1451 | "bottom": null, 1452 | "_model_module": "@jupyter-widgets/base", 1453 | "top": null, 1454 | "grid_column": null, 1455 | "overflow_y": null, 1456 | "overflow_x": null, 1457 | "grid_auto_flow": null, 1458 | "grid_area": null, 1459 | "grid_template_columns": null, 1460 | "flex": null, 1461 | "_model_name": "LayoutModel", 1462 | "justify_items": null, 1463 | "grid_row": null, 1464 | "max_height": null, 1465 | "align_content": null, 1466 | "visibility": null, 1467 | "align_self": null, 1468 | "height": null, 1469 | "min_height": null, 1470 | "padding": null, 1471 | "grid_auto_rows": null, 1472 | "grid_gap": null, 1473 | "max_width": null, 1474 | "order": null, 1475 | "_view_module_version": "1.2.0", 1476 | "grid_template_areas": null, 1477 | "object_position": null, 1478 | "object_fit": null, 1479 | "grid_auto_columns": null, 1480 | "margin": null, 1481 | "display": null, 1482 | "left": null 1483 | } 1484 | }, 1485 | "6b2dfb71403040fd9c5d7330cfb8dcf1": { 1486 | "model_module": "@jupyter-widgets/controls", 1487 | "model_name": "HBoxModel", 1488 | "state": { 1489 | "_view_name": "HBoxView", 1490 | "_dom_classes": [], 1491 | "_model_name": "HBoxModel", 1492 | "_view_module": "@jupyter-widgets/controls", 1493 | "_model_module_version": "1.5.0", 1494 | "_view_count": null, 1495 | "_view_module_version": "1.5.0", 1496 | "box_style": "", 1497 | "layout": "IPY_MODEL_7017828f8b2546a1a764babd73b5dfe5", 1498 | "_model_module": "@jupyter-widgets/controls", 1499 | "children": [ 1500 | "IPY_MODEL_465f85d340e448f7bbd26086942ca570", 1501 | "IPY_MODEL_7b51d558769947a2a87238aee9d5fb22" 1502 | ] 1503 | } 1504 | }, 1505 | "7017828f8b2546a1a764babd73b5dfe5": { 1506 | "model_module": "@jupyter-widgets/base", 1507 | "model_name": "LayoutModel", 1508 | "state": { 1509 | "_view_name": "LayoutView", 1510 | "grid_template_rows": null, 1511 | "right": null, 1512 | "justify_content": null, 1513 | "_view_module": "@jupyter-widgets/base", 1514 | "overflow": null, 1515 | "_model_module_version": "1.2.0", 1516 | "_view_count": null, 1517 | "flex_flow": null, 1518 | "width": null, 1519 | "min_width": null, 1520 | "border": null, 1521 | "align_items": null, 1522 | "bottom": null, 1523 | "_model_module": "@jupyter-widgets/base", 1524 | "top": null, 1525 | "grid_column": null, 1526 | "overflow_y": null, 1527 | "overflow_x": null, 1528 | "grid_auto_flow": null, 1529 | "grid_area": null, 1530 | "grid_template_columns": null, 1531 | "flex": null, 1532 | "_model_name": "LayoutModel", 1533 | "justify_items": null, 1534 | "grid_row": null, 1535 | "max_height": null, 1536 | "align_content": null, 1537 | "visibility": null, 1538 | "align_self": null, 1539 | "height": null, 1540 | "min_height": null, 1541 | "padding": null, 1542 | "grid_auto_rows": null, 1543 | "grid_gap": null, 1544 | "max_width": null, 1545 | "order": null, 1546 | "_view_module_version": "1.2.0", 1547 | "grid_template_areas": null, 1548 | "object_position": null, 1549 | "object_fit": null, 1550 | "grid_auto_columns": null, 1551 | "margin": null, 1552 | "display": null, 1553 | "left": null 1554 | } 1555 | }, 1556 | "465f85d340e448f7bbd26086942ca570": { 1557 | "model_module": "@jupyter-widgets/controls", 1558 | "model_name": "FloatProgressModel", 1559 | "state": { 1560 | "_view_name": "ProgressView", 1561 | "style": "IPY_MODEL_ec524a1a5b68414a9c28978877d6dde2", 1562 | "_dom_classes": [], 1563 | "description": "100%", 1564 | "_model_name": "FloatProgressModel", 1565 | "bar_style": "success", 1566 | "max": 34, 1567 | "_view_module": "@jupyter-widgets/controls", 1568 | "_model_module_version": "1.5.0", 1569 | "value": 34, 1570 | "_view_count": null, 1571 | "_view_module_version": "1.5.0", 1572 | "orientation": "horizontal", 1573 | "min": 0, 1574 | "description_tooltip": null, 1575 | "_model_module": "@jupyter-widgets/controls", 1576 | "layout": "IPY_MODEL_a8a072789a5b442a9d0e6fe9a3b2180f" 1577 | } 1578 | }, 1579 | "7b51d558769947a2a87238aee9d5fb22": { 1580 | "model_module": "@jupyter-widgets/controls", 1581 | "model_name": "HTMLModel", 1582 | "state": { 1583 | "_view_name": "HTMLView", 1584 | "style": "IPY_MODEL_871c6e0d29f84ec1ba484ebe2f79d862", 1585 | "_dom_classes": [], 1586 | "description": "", 1587 | "_model_name": "HTMLModel", 1588 | "placeholder": "​", 1589 | "_view_module": "@jupyter-widgets/controls", 1590 | "_model_module_version": "1.5.0", 1591 | "value": " 34/34 [00:00<00:00, 429.89it/s]", 1592 | "_view_count": null, 1593 | "_view_module_version": "1.5.0", 1594 | "description_tooltip": null, 1595 | "_model_module": "@jupyter-widgets/controls", 1596 | "layout": "IPY_MODEL_e2152b0975aa40c98680b5d13625dcb2" 1597 | } 1598 | }, 1599 | "ec524a1a5b68414a9c28978877d6dde2": { 1600 | "model_module": "@jupyter-widgets/controls", 1601 | "model_name": "ProgressStyleModel", 1602 | "state": { 1603 | "_view_name": "StyleView", 1604 | "_model_name": "ProgressStyleModel", 1605 | "description_width": "initial", 1606 | "_view_module": "@jupyter-widgets/base", 1607 | "_model_module_version": "1.5.0", 1608 | "_view_count": null, 1609 | "_view_module_version": "1.2.0", 1610 | "bar_color": null, 1611 | "_model_module": "@jupyter-widgets/controls" 1612 | } 1613 | }, 1614 | "a8a072789a5b442a9d0e6fe9a3b2180f": { 1615 | "model_module": "@jupyter-widgets/base", 1616 | "model_name": "LayoutModel", 1617 | "state": { 1618 | "_view_name": "LayoutView", 1619 | "grid_template_rows": null, 1620 | "right": null, 1621 | "justify_content": null, 1622 | "_view_module": "@jupyter-widgets/base", 1623 | "overflow": null, 1624 | "_model_module_version": "1.2.0", 1625 | "_view_count": null, 1626 | "flex_flow": null, 1627 | "width": null, 1628 | "min_width": null, 1629 | "border": null, 1630 | "align_items": null, 1631 | "bottom": null, 1632 | "_model_module": "@jupyter-widgets/base", 1633 | "top": null, 1634 | "grid_column": null, 1635 | "overflow_y": null, 1636 | "overflow_x": null, 1637 | "grid_auto_flow": null, 1638 | "grid_area": null, 1639 | "grid_template_columns": null, 1640 | "flex": null, 1641 | "_model_name": "LayoutModel", 1642 | "justify_items": null, 1643 | "grid_row": null, 1644 | "max_height": null, 1645 | "align_content": null, 1646 | "visibility": null, 1647 | "align_self": null, 1648 | "height": null, 1649 | "min_height": null, 1650 | "padding": null, 1651 | "grid_auto_rows": null, 1652 | "grid_gap": null, 1653 | "max_width": null, 1654 | "order": null, 1655 | "_view_module_version": "1.2.0", 1656 | "grid_template_areas": null, 1657 | "object_position": null, 1658 | "object_fit": null, 1659 | "grid_auto_columns": null, 1660 | "margin": null, 1661 | "display": null, 1662 | "left": null 1663 | } 1664 | }, 1665 | "871c6e0d29f84ec1ba484ebe2f79d862": { 1666 | "model_module": "@jupyter-widgets/controls", 1667 | "model_name": "DescriptionStyleModel", 1668 | "state": { 1669 | "_view_name": "StyleView", 1670 | "_model_name": "DescriptionStyleModel", 1671 | "description_width": "", 1672 | "_view_module": "@jupyter-widgets/base", 1673 | "_model_module_version": "1.5.0", 1674 | "_view_count": null, 1675 | "_view_module_version": "1.2.0", 1676 | "_model_module": "@jupyter-widgets/controls" 1677 | } 1678 | }, 1679 | "e2152b0975aa40c98680b5d13625dcb2": { 1680 | "model_module": "@jupyter-widgets/base", 1681 | "model_name": "LayoutModel", 1682 | "state": { 1683 | "_view_name": "LayoutView", 1684 | "grid_template_rows": null, 1685 | "right": null, 1686 | "justify_content": null, 1687 | "_view_module": "@jupyter-widgets/base", 1688 | "overflow": null, 1689 | "_model_module_version": "1.2.0", 1690 | "_view_count": null, 1691 | "flex_flow": null, 1692 | "width": null, 1693 | "min_width": null, 1694 | "border": null, 1695 | "align_items": null, 1696 | "bottom": null, 1697 | "_model_module": "@jupyter-widgets/base", 1698 | "top": null, 1699 | "grid_column": null, 1700 | "overflow_y": null, 1701 | "overflow_x": null, 1702 | "grid_auto_flow": null, 1703 | "grid_area": null, 1704 | "grid_template_columns": null, 1705 | "flex": null, 1706 | "_model_name": "LayoutModel", 1707 | "justify_items": null, 1708 | "grid_row": null, 1709 | "max_height": null, 1710 | "align_content": null, 1711 | "visibility": null, 1712 | "align_self": null, 1713 | "height": null, 1714 | "min_height": null, 1715 | "padding": null, 1716 | "grid_auto_rows": null, 1717 | "grid_gap": null, 1718 | "max_width": null, 1719 | "order": null, 1720 | "_view_module_version": "1.2.0", 1721 | "grid_template_areas": null, 1722 | "object_position": null, 1723 | "object_fit": null, 1724 | "grid_auto_columns": null, 1725 | "margin": null, 1726 | "display": null, 1727 | "left": null 1728 | } 1729 | }, 1730 | "dc4d2ffc5a464b698c1785b25cd11ab1": { 1731 | "model_module": "@jupyter-widgets/controls", 1732 | "model_name": "HBoxModel", 1733 | "state": { 1734 | "_view_name": "HBoxView", 1735 | "_dom_classes": [], 1736 | "_model_name": "HBoxModel", 1737 | "_view_module": "@jupyter-widgets/controls", 1738 | "_model_module_version": "1.5.0", 1739 | "_view_count": null, 1740 | "_view_module_version": "1.5.0", 1741 | "box_style": "", 1742 | "layout": "IPY_MODEL_54e83774805a46819168dba2310e6b3f", 1743 | "_model_module": "@jupyter-widgets/controls", 1744 | "children": [ 1745 | "IPY_MODEL_8737929329f04c8bac02cdbba2972459", 1746 | "IPY_MODEL_6291797e13844ec1b0c57d6924aa4abe" 1747 | ] 1748 | } 1749 | }, 1750 | "54e83774805a46819168dba2310e6b3f": { 1751 | "model_module": "@jupyter-widgets/base", 1752 | "model_name": "LayoutModel", 1753 | "state": { 1754 | "_view_name": "LayoutView", 1755 | "grid_template_rows": null, 1756 | "right": null, 1757 | "justify_content": null, 1758 | "_view_module": "@jupyter-widgets/base", 1759 | "overflow": null, 1760 | "_model_module_version": "1.2.0", 1761 | "_view_count": null, 1762 | "flex_flow": null, 1763 | "width": null, 1764 | "min_width": null, 1765 | "border": null, 1766 | "align_items": null, 1767 | "bottom": null, 1768 | "_model_module": "@jupyter-widgets/base", 1769 | "top": null, 1770 | "grid_column": null, 1771 | "overflow_y": null, 1772 | "overflow_x": null, 1773 | "grid_auto_flow": null, 1774 | "grid_area": null, 1775 | "grid_template_columns": null, 1776 | "flex": null, 1777 | "_model_name": "LayoutModel", 1778 | "justify_items": null, 1779 | "grid_row": null, 1780 | "max_height": null, 1781 | "align_content": null, 1782 | "visibility": null, 1783 | "align_self": null, 1784 | "height": null, 1785 | "min_height": null, 1786 | "padding": null, 1787 | "grid_auto_rows": null, 1788 | "grid_gap": null, 1789 | "max_width": null, 1790 | "order": null, 1791 | "_view_module_version": "1.2.0", 1792 | "grid_template_areas": null, 1793 | "object_position": null, 1794 | "object_fit": null, 1795 | "grid_auto_columns": null, 1796 | "margin": null, 1797 | "display": null, 1798 | "left": null 1799 | } 1800 | }, 1801 | "8737929329f04c8bac02cdbba2972459": { 1802 | "model_module": "@jupyter-widgets/controls", 1803 | "model_name": "FloatProgressModel", 1804 | "state": { 1805 | "_view_name": "ProgressView", 1806 | "style": "IPY_MODEL_f77a17ba2a914bbe8a788d8bf9c80f9f", 1807 | "_dom_classes": [], 1808 | "description": "100%", 1809 | "_model_name": "FloatProgressModel", 1810 | "bar_style": "success", 1811 | "max": 55, 1812 | "_view_module": "@jupyter-widgets/controls", 1813 | "_model_module_version": "1.5.0", 1814 | "value": 55, 1815 | "_view_count": null, 1816 | "_view_module_version": "1.5.0", 1817 | "orientation": "horizontal", 1818 | "min": 0, 1819 | "description_tooltip": null, 1820 | "_model_module": "@jupyter-widgets/controls", 1821 | "layout": "IPY_MODEL_7eb9274999a84942a6ba5e3a996c07e2" 1822 | } 1823 | }, 1824 | "6291797e13844ec1b0c57d6924aa4abe": { 1825 | "model_module": "@jupyter-widgets/controls", 1826 | "model_name": "HTMLModel", 1827 | "state": { 1828 | "_view_name": "HTMLView", 1829 | "style": "IPY_MODEL_9005bad34134478daf474e5f4169e342", 1830 | "_dom_classes": [], 1831 | "description": "", 1832 | "_model_name": "HTMLModel", 1833 | "placeholder": "​", 1834 | "_view_module": "@jupyter-widgets/controls", 1835 | "_model_module_version": "1.5.0", 1836 | "value": " 55/55 [00:00<00:00, 740.07it/s]", 1837 | "_view_count": null, 1838 | "_view_module_version": "1.5.0", 1839 | "description_tooltip": null, 1840 | "_model_module": "@jupyter-widgets/controls", 1841 | "layout": "IPY_MODEL_978a0538ffa6403fbff119bdbff7031a" 1842 | } 1843 | }, 1844 | "f77a17ba2a914bbe8a788d8bf9c80f9f": { 1845 | "model_module": "@jupyter-widgets/controls", 1846 | "model_name": "ProgressStyleModel", 1847 | "state": { 1848 | "_view_name": "StyleView", 1849 | "_model_name": "ProgressStyleModel", 1850 | "description_width": "initial", 1851 | "_view_module": "@jupyter-widgets/base", 1852 | "_model_module_version": "1.5.0", 1853 | "_view_count": null, 1854 | "_view_module_version": "1.2.0", 1855 | "bar_color": null, 1856 | "_model_module": "@jupyter-widgets/controls" 1857 | } 1858 | }, 1859 | "7eb9274999a84942a6ba5e3a996c07e2": { 1860 | "model_module": "@jupyter-widgets/base", 1861 | "model_name": "LayoutModel", 1862 | "state": { 1863 | "_view_name": "LayoutView", 1864 | "grid_template_rows": null, 1865 | "right": null, 1866 | "justify_content": null, 1867 | "_view_module": "@jupyter-widgets/base", 1868 | "overflow": null, 1869 | "_model_module_version": "1.2.0", 1870 | "_view_count": null, 1871 | "flex_flow": null, 1872 | "width": null, 1873 | "min_width": null, 1874 | "border": null, 1875 | "align_items": null, 1876 | "bottom": null, 1877 | "_model_module": "@jupyter-widgets/base", 1878 | "top": null, 1879 | "grid_column": null, 1880 | "overflow_y": null, 1881 | "overflow_x": null, 1882 | "grid_auto_flow": null, 1883 | "grid_area": null, 1884 | "grid_template_columns": null, 1885 | "flex": null, 1886 | "_model_name": "LayoutModel", 1887 | "justify_items": null, 1888 | "grid_row": null, 1889 | "max_height": null, 1890 | "align_content": null, 1891 | "visibility": null, 1892 | "align_self": null, 1893 | "height": null, 1894 | "min_height": null, 1895 | "padding": null, 1896 | "grid_auto_rows": null, 1897 | "grid_gap": null, 1898 | "max_width": null, 1899 | "order": null, 1900 | "_view_module_version": "1.2.0", 1901 | "grid_template_areas": null, 1902 | "object_position": null, 1903 | "object_fit": null, 1904 | "grid_auto_columns": null, 1905 | "margin": null, 1906 | "display": null, 1907 | "left": null 1908 | } 1909 | }, 1910 | "9005bad34134478daf474e5f4169e342": { 1911 | "model_module": "@jupyter-widgets/controls", 1912 | "model_name": "DescriptionStyleModel", 1913 | "state": { 1914 | "_view_name": "StyleView", 1915 | "_model_name": "DescriptionStyleModel", 1916 | "description_width": "", 1917 | "_view_module": "@jupyter-widgets/base", 1918 | "_model_module_version": "1.5.0", 1919 | "_view_count": null, 1920 | "_view_module_version": "1.2.0", 1921 | "_model_module": "@jupyter-widgets/controls" 1922 | } 1923 | }, 1924 | "978a0538ffa6403fbff119bdbff7031a": { 1925 | "model_module": "@jupyter-widgets/base", 1926 | "model_name": "LayoutModel", 1927 | "state": { 1928 | "_view_name": "LayoutView", 1929 | "grid_template_rows": null, 1930 | "right": null, 1931 | "justify_content": null, 1932 | "_view_module": "@jupyter-widgets/base", 1933 | "overflow": null, 1934 | "_model_module_version": "1.2.0", 1935 | "_view_count": null, 1936 | "flex_flow": null, 1937 | "width": null, 1938 | "min_width": null, 1939 | "border": null, 1940 | "align_items": null, 1941 | "bottom": null, 1942 | "_model_module": "@jupyter-widgets/base", 1943 | "top": null, 1944 | "grid_column": null, 1945 | "overflow_y": null, 1946 | "overflow_x": null, 1947 | "grid_auto_flow": null, 1948 | "grid_area": null, 1949 | "grid_template_columns": null, 1950 | "flex": null, 1951 | "_model_name": "LayoutModel", 1952 | "justify_items": null, 1953 | "grid_row": null, 1954 | "max_height": null, 1955 | "align_content": null, 1956 | "visibility": null, 1957 | "align_self": null, 1958 | "height": null, 1959 | "min_height": null, 1960 | "padding": null, 1961 | "grid_auto_rows": null, 1962 | "grid_gap": null, 1963 | "max_width": null, 1964 | "order": null, 1965 | "_view_module_version": "1.2.0", 1966 | "grid_template_areas": null, 1967 | "object_position": null, 1968 | "object_fit": null, 1969 | "grid_auto_columns": null, 1970 | "margin": null, 1971 | "display": null, 1972 | "left": null 1973 | } 1974 | }, 1975 | "828a3728072240838f92e825c249f825": { 1976 | "model_module": "@jupyter-widgets/controls", 1977 | "model_name": "HBoxModel", 1978 | "state": { 1979 | "_view_name": "HBoxView", 1980 | "_dom_classes": [], 1981 | "_model_name": "HBoxModel", 1982 | "_view_module": "@jupyter-widgets/controls", 1983 | "_model_module_version": "1.5.0", 1984 | "_view_count": null, 1985 | "_view_module_version": "1.5.0", 1986 | "box_style": "", 1987 | "layout": "IPY_MODEL_e4a915e72364487e884681152f8912a0", 1988 | "_model_module": "@jupyter-widgets/controls", 1989 | "children": [ 1990 | "IPY_MODEL_b2063d614a854ff59c4a38c7cae18bf3", 1991 | "IPY_MODEL_2275807e7b9e4e629c57cf54dcd22b9b" 1992 | ] 1993 | } 1994 | }, 1995 | "e4a915e72364487e884681152f8912a0": { 1996 | "model_module": "@jupyter-widgets/base", 1997 | "model_name": "LayoutModel", 1998 | "state": { 1999 | "_view_name": "LayoutView", 2000 | "grid_template_rows": null, 2001 | "right": null, 2002 | "justify_content": null, 2003 | "_view_module": "@jupyter-widgets/base", 2004 | "overflow": null, 2005 | "_model_module_version": "1.2.0", 2006 | "_view_count": null, 2007 | "flex_flow": null, 2008 | "width": null, 2009 | "min_width": null, 2010 | "border": null, 2011 | "align_items": null, 2012 | "bottom": null, 2013 | "_model_module": "@jupyter-widgets/base", 2014 | "top": null, 2015 | "grid_column": null, 2016 | "overflow_y": null, 2017 | "overflow_x": null, 2018 | "grid_auto_flow": null, 2019 | "grid_area": null, 2020 | "grid_template_columns": null, 2021 | "flex": null, 2022 | "_model_name": "LayoutModel", 2023 | "justify_items": null, 2024 | "grid_row": null, 2025 | "max_height": null, 2026 | "align_content": null, 2027 | "visibility": null, 2028 | "align_self": null, 2029 | "height": null, 2030 | "min_height": null, 2031 | "padding": null, 2032 | "grid_auto_rows": null, 2033 | "grid_gap": null, 2034 | "max_width": null, 2035 | "order": null, 2036 | "_view_module_version": "1.2.0", 2037 | "grid_template_areas": null, 2038 | "object_position": null, 2039 | "object_fit": null, 2040 | "grid_auto_columns": null, 2041 | "margin": null, 2042 | "display": null, 2043 | "left": null 2044 | } 2045 | }, 2046 | "b2063d614a854ff59c4a38c7cae18bf3": { 2047 | "model_module": "@jupyter-widgets/controls", 2048 | "model_name": "FloatProgressModel", 2049 | "state": { 2050 | "_view_name": "ProgressView", 2051 | "style": "IPY_MODEL_338629d584ae4454b598fe5c303287d3", 2052 | "_dom_classes": [], 2053 | "description": "100%", 2054 | "_model_name": "FloatProgressModel", 2055 | "bar_style": "success", 2056 | "max": 64, 2057 | "_view_module": "@jupyter-widgets/controls", 2058 | "_model_module_version": "1.5.0", 2059 | "value": 64, 2060 | "_view_count": null, 2061 | "_view_module_version": "1.5.0", 2062 | "orientation": "horizontal", 2063 | "min": 0, 2064 | "description_tooltip": null, 2065 | "_model_module": "@jupyter-widgets/controls", 2066 | "layout": "IPY_MODEL_99b46af8269b45e7b1329f858ea1f3a5" 2067 | } 2068 | }, 2069 | "2275807e7b9e4e629c57cf54dcd22b9b": { 2070 | "model_module": "@jupyter-widgets/controls", 2071 | "model_name": "HTMLModel", 2072 | "state": { 2073 | "_view_name": "HTMLView", 2074 | "style": "IPY_MODEL_3e5184ff5154430bb51cc3c375d75bf4", 2075 | "_dom_classes": [], 2076 | "description": "", 2077 | "_model_name": "HTMLModel", 2078 | "placeholder": "​", 2079 | "_view_module": "@jupyter-widgets/controls", 2080 | "_model_module_version": "1.5.0", 2081 | "value": " 64/64 [00:00<00:00, 787.87it/s]", 2082 | "_view_count": null, 2083 | "_view_module_version": "1.5.0", 2084 | "description_tooltip": null, 2085 | "_model_module": "@jupyter-widgets/controls", 2086 | "layout": "IPY_MODEL_f6c5a73d767e4b978c61bab1eea29e24" 2087 | } 2088 | }, 2089 | "338629d584ae4454b598fe5c303287d3": { 2090 | "model_module": "@jupyter-widgets/controls", 2091 | "model_name": "ProgressStyleModel", 2092 | "state": { 2093 | "_view_name": "StyleView", 2094 | "_model_name": "ProgressStyleModel", 2095 | "description_width": "initial", 2096 | "_view_module": "@jupyter-widgets/base", 2097 | "_model_module_version": "1.5.0", 2098 | "_view_count": null, 2099 | "_view_module_version": "1.2.0", 2100 | "bar_color": null, 2101 | "_model_module": "@jupyter-widgets/controls" 2102 | } 2103 | }, 2104 | "99b46af8269b45e7b1329f858ea1f3a5": { 2105 | "model_module": "@jupyter-widgets/base", 2106 | "model_name": "LayoutModel", 2107 | "state": { 2108 | "_view_name": "LayoutView", 2109 | "grid_template_rows": null, 2110 | "right": null, 2111 | "justify_content": null, 2112 | "_view_module": "@jupyter-widgets/base", 2113 | "overflow": null, 2114 | "_model_module_version": "1.2.0", 2115 | "_view_count": null, 2116 | "flex_flow": null, 2117 | "width": null, 2118 | "min_width": null, 2119 | "border": null, 2120 | "align_items": null, 2121 | "bottom": null, 2122 | "_model_module": "@jupyter-widgets/base", 2123 | "top": null, 2124 | "grid_column": null, 2125 | "overflow_y": null, 2126 | "overflow_x": null, 2127 | "grid_auto_flow": null, 2128 | "grid_area": null, 2129 | "grid_template_columns": null, 2130 | "flex": null, 2131 | "_model_name": "LayoutModel", 2132 | "justify_items": null, 2133 | "grid_row": null, 2134 | "max_height": null, 2135 | "align_content": null, 2136 | "visibility": null, 2137 | "align_self": null, 2138 | "height": null, 2139 | "min_height": null, 2140 | "padding": null, 2141 | "grid_auto_rows": null, 2142 | "grid_gap": null, 2143 | "max_width": null, 2144 | "order": null, 2145 | "_view_module_version": "1.2.0", 2146 | "grid_template_areas": null, 2147 | "object_position": null, 2148 | "object_fit": null, 2149 | "grid_auto_columns": null, 2150 | "margin": null, 2151 | "display": null, 2152 | "left": null 2153 | } 2154 | }, 2155 | "3e5184ff5154430bb51cc3c375d75bf4": { 2156 | "model_module": "@jupyter-widgets/controls", 2157 | "model_name": "DescriptionStyleModel", 2158 | "state": { 2159 | "_view_name": "StyleView", 2160 | "_model_name": "DescriptionStyleModel", 2161 | "description_width": "", 2162 | "_view_module": "@jupyter-widgets/base", 2163 | "_model_module_version": "1.5.0", 2164 | "_view_count": null, 2165 | "_view_module_version": "1.2.0", 2166 | "_model_module": "@jupyter-widgets/controls" 2167 | } 2168 | }, 2169 | "f6c5a73d767e4b978c61bab1eea29e24": { 2170 | "model_module": "@jupyter-widgets/base", 2171 | "model_name": "LayoutModel", 2172 | "state": { 2173 | "_view_name": "LayoutView", 2174 | "grid_template_rows": null, 2175 | "right": null, 2176 | "justify_content": null, 2177 | "_view_module": "@jupyter-widgets/base", 2178 | "overflow": null, 2179 | "_model_module_version": "1.2.0", 2180 | "_view_count": null, 2181 | "flex_flow": null, 2182 | "width": null, 2183 | "min_width": null, 2184 | "border": null, 2185 | "align_items": null, 2186 | "bottom": null, 2187 | "_model_module": "@jupyter-widgets/base", 2188 | "top": null, 2189 | "grid_column": null, 2190 | "overflow_y": null, 2191 | "overflow_x": null, 2192 | "grid_auto_flow": null, 2193 | "grid_area": null, 2194 | "grid_template_columns": null, 2195 | "flex": null, 2196 | "_model_name": "LayoutModel", 2197 | "justify_items": null, 2198 | "grid_row": null, 2199 | "max_height": null, 2200 | "align_content": null, 2201 | "visibility": null, 2202 | "align_self": null, 2203 | "height": null, 2204 | "min_height": null, 2205 | "padding": null, 2206 | "grid_auto_rows": null, 2207 | "grid_gap": null, 2208 | "max_width": null, 2209 | "order": null, 2210 | "_view_module_version": "1.2.0", 2211 | "grid_template_areas": null, 2212 | "object_position": null, 2213 | "object_fit": null, 2214 | "grid_auto_columns": null, 2215 | "margin": null, 2216 | "display": null, 2217 | "left": null 2218 | } 2219 | } 2220 | } 2221 | } 2222 | }, 2223 | "cells": [ 2224 | { 2225 | "cell_type": "code", 2226 | "metadata": { 2227 | "id": "tT-shxJZgqSH" 2228 | }, 2229 | "source": [ 2230 | "# !pip install unrar\n", 2231 | "# !unrar x '/content/drive/MyDrive/Fatepura_satellite_dataset_800_altitude/800_dataset_with_semantic_segmentation.rar'\n", 2232 | "# !unrar x '/content/drive/MyDrive/1000_dataset_with_semantic_segmentation.rar'\n", 2233 | "# !unrar x '/content/drive/MyDrive/600_dataset_with_semantic_segmentation.rar'\n", 2234 | "# !unrar x '/content/drive/MyDrive/dataset_with_semantic_segmentation.rar'" 2235 | ], 2236 | "execution_count": 25, 2237 | "outputs": [] 2238 | }, 2239 | { 2240 | "cell_type": "code", 2241 | "metadata": { 2242 | "id": "_m8n-0qChoy-" 2243 | }, 2244 | "source": [ 2245 | "!mkdir dataset\n", 2246 | "!mkdir dataset/train_images\n", 2247 | "!mkdir dataset/train_segmentation" 2248 | ], 2249 | "execution_count": 2, 2250 | "outputs": [] 2251 | }, 2252 | { 2253 | "cell_type": "code", 2254 | "metadata": { 2255 | "id": "2bkRCIlBnePj" 2256 | }, 2257 | "source": [ 2258 | "from glob import glob\n", 2259 | "import os\n", 2260 | "from tqdm import notebook" 2261 | ], 2262 | "execution_count": 3, 2263 | "outputs": [] 2264 | }, 2265 | { 2266 | "cell_type": "code", 2267 | "metadata": { 2268 | "id": "x41Ttazvo_Qd" 2269 | }, 2270 | "source": [ 2271 | "train_paths = [\n", 2272 | " '/content/800_dataset_with_semantic_segmentation/train_images/*',\n", 2273 | " '/content/1000_dataset_with_semantic_segmentation/train_images/*', \n", 2274 | " '/content/600_dataset_with_semantic_segmentation/train_images/*', \n", 2275 | " '/content/dataset_with_semantic_segmentation/train_images/*', \n", 2276 | "]\n", 2277 | "segment_paths = [\n", 2278 | " '/content/800_dataset_with_semantic_segmentation/train_segmentation/*',\n", 2279 | " '/content/1000_dataset_with_semantic_segmentation/train_segmentation/*', \n", 2280 | " '/content/600_dataset_with_semantic_segmentation/train_segmentation/*', \n", 2281 | " '/content/dataset_with_semantic_segmentation/train_segmentation/*', \n", 2282 | "]\n" 2283 | ], 2284 | "execution_count": 18, 2285 | "outputs": [] 2286 | }, 2287 | { 2288 | "cell_type": "code", 2289 | "metadata": { 2290 | "colab": { 2291 | "base_uri": "https://localhost:8080/" 2292 | }, 2293 | "id": "IpzW9v-sp_Na", 2294 | "outputId": "7d3e2034-0572-4a07-a4b6-09a7c922128d" 2295 | }, 2296 | "source": [ 2297 | "# Remove the Images from the folder\n", 2298 | "# !rm /content/dataset/train_images/*\n", 2299 | "# !rm /content/dataset/train_segmentation/*" 2300 | ], 2301 | "execution_count": 23, 2302 | "outputs": [ 2303 | { 2304 | "output_type": "stream", 2305 | "text": [ 2306 | "rm: cannot remove '/content/dataset/train_segmentation/*': No such file or directory\n" 2307 | ], 2308 | "name": "stdout" 2309 | } 2310 | ] 2311 | }, 2312 | { 2313 | "cell_type": "code", 2314 | "metadata": { 2315 | "colab": { 2316 | "base_uri": "https://localhost:8080/", 2317 | "height": 66, 2318 | "referenced_widgets": [ 2319 | "c8c3674c0f124c0fa705625874ddb10c", 2320 | "d213c19dd0ec45738a83f8c155ff66b6", 2321 | "cb4ce569c58b4143a5e6660bbf87c932", 2322 | "32503afa11b94b5b99bc8698fee0ea0f", 2323 | "83b8addb0b6340f7bc3c4cd1d16c641b", 2324 | "aeb7d01ae7dd4637944f191d2783c46b", 2325 | "aa989ac267a247829f6a817a0ae0de3e", 2326 | "8c4e12c544d441e4921933c72984b2b2" 2327 | ] 2328 | }, 2329 | "id": "WY2WoC8pzyb8", 2330 | "outputId": "10b397f1-af28-4f7e-9e24-e9bcb5d9ec90" 2331 | }, 2332 | "source": [ 2333 | "import os\n", 2334 | "# Function to rename multiple files\n", 2335 | "def main():\n", 2336 | " i = 0\n", 2337 | " path=\"/content/dataset_with_semantic_segmentation/train_segmentation/\"\n", 2338 | " # dest = \"/content/dataset/train_images/\"\n", 2339 | " for filename in notebook.tqdm(os.listdir(path)):\n", 2340 | " my_dest = \"normal_segment\"+str(i) + \".png\"\n", 2341 | " my_source = path + filename\n", 2342 | " my_dest = path + my_dest\n", 2343 | " # rename() function will\n", 2344 | " # rename all the files\n", 2345 | " os.rename(my_source, my_dest)\n", 2346 | " i += 1\n", 2347 | "# Driver Code\n", 2348 | "if __name__ == '__main__':\n", 2349 | " # Calling main() function\n", 2350 | " main()" 2351 | ], 2352 | "execution_count": 17, 2353 | "outputs": [ 2354 | { 2355 | "output_type": "display_data", 2356 | "data": { 2357 | "application/vnd.jupyter.widget-view+json": { 2358 | "model_id": "c8c3674c0f124c0fa705625874ddb10c", 2359 | "version_minor": 0, 2360 | "version_major": 2 2361 | }, 2362 | "text/plain": [ 2363 | "HBox(children=(FloatProgress(value=0.0, max=64.0), HTML(value='')))" 2364 | ] 2365 | }, 2366 | "metadata": { 2367 | "tags": [] 2368 | } 2369 | }, 2370 | { 2371 | "output_type": "stream", 2372 | "text": [ 2373 | "\n" 2374 | ], 2375 | "name": "stdout" 2376 | } 2377 | ] 2378 | }, 2379 | { 2380 | "cell_type": "code", 2381 | "metadata": { 2382 | "colab": { 2383 | "base_uri": "https://localhost:8080/" 2384 | }, 2385 | "id": "8m5Ky0oK0Ocg", 2386 | "outputId": "95d1cce5-6d5f-46ff-b195-a8952540c412" 2387 | }, 2388 | "source": [ 2389 | "len(os.listdir('/content/dataset/train_images/'))" 2390 | ], 2391 | "execution_count": 22, 2392 | "outputs": [ 2393 | { 2394 | "output_type": "execute_result", 2395 | "data": { 2396 | "text/plain": [ 2397 | "214" 2398 | ] 2399 | }, 2400 | "metadata": { 2401 | "tags": [] 2402 | }, 2403 | "execution_count": 22 2404 | } 2405 | ] 2406 | }, 2407 | { 2408 | "cell_type": "code", 2409 | "metadata": { 2410 | "colab": { 2411 | "base_uri": "https://localhost:8080/", 2412 | "height": 443, 2413 | "referenced_widgets": [ 2414 | "f254f52e3bac4dca8c6072d42eecd5d4", 2415 | "2b9d198308e84313a2863bf36ed50d62", 2416 | "5da8a59317454449bb20734a316c7642", 2417 | "0f5081a132864e5a9cf860905525cae5", 2418 | "dc08a4f6ce0741c7b00f4960edca931c", 2419 | "3af97648993b4333b326415f407a7032", 2420 | "15f42a5c573842128eff45fb7ebac60c", 2421 | "43e58d608b284788aaab234ea0fcbc40", 2422 | "c6f2dc82e3974349b4db34ac81538a12", 2423 | "ed648b306e794f089451a9ba57919c10", 2424 | "7bb1aceee6cc49b69782fc62e0d927da", 2425 | "cfb0550c2fde4580bb9cfe94212aa93b", 2426 | "5b825e82b335419184c0bd35bf28f19b", 2427 | "01c3909d9114491280fd4d2bbd667af7", 2428 | "38ee1f124ca1437c9d3039e1a48b23e9", 2429 | "eff9e5f566004cd2974533cdd831c6b9", 2430 | "817fbe88190e400ba29117b1dd761591", 2431 | "683415a93c9b488fb2626554f2eab577", 2432 | "d9961cd98ca941f5bb34ea9b3c1e4a68", 2433 | "7b866b28bf924342a8348902a909bd28", 2434 | "833ef428df384593a2e765f92d1c2e2c", 2435 | "1dd837dd24644b3c99821d6bebaba435", 2436 | "e328254a5fc34548a3b9accf8aa63e73", 2437 | "142ffb8992c041e6bd3d9773dc60c3e0", 2438 | "8c76d372a8124cf59af79542e607ce13", 2439 | "8411af94f2b84a479026bda52232b623", 2440 | "b8ef75fa15574ca6abe6bd4cd89d7c9a", 2441 | "08994dee859e49468cc361ce0aaae31b", 2442 | "562d0e227d6d49c38da0e5f7434358bf", 2443 | "3e4a5de37bcb427abad7b98d334b6077", 2444 | "e770d160e8cc4165b505f9603e7f96c9", 2445 | "39b05f1c363b479d912a1d49fcdad017", 2446 | "31ccb54c53274c8caebbace0d395d961", 2447 | "b3a1040ceaa94b38ae7609919e52126e", 2448 | "13b79b324f164372a5bc1d516f31168b", 2449 | "57a3c88f08ee454c80c1fb4d9c141b5a", 2450 | "e7a53161b42646bca93a11a9185d05fe", 2451 | "3773b6344c884959bf2f3628ce8f9944", 2452 | "e236032e2b6e4642a2fce6c26578a34c", 2453 | "cae6130e1ea34100948ab766b01c9e9e", 2454 | "6b2dfb71403040fd9c5d7330cfb8dcf1", 2455 | "7017828f8b2546a1a764babd73b5dfe5", 2456 | "465f85d340e448f7bbd26086942ca570", 2457 | "7b51d558769947a2a87238aee9d5fb22", 2458 | "ec524a1a5b68414a9c28978877d6dde2", 2459 | "a8a072789a5b442a9d0e6fe9a3b2180f", 2460 | "871c6e0d29f84ec1ba484ebe2f79d862", 2461 | "e2152b0975aa40c98680b5d13625dcb2", 2462 | "dc4d2ffc5a464b698c1785b25cd11ab1", 2463 | "54e83774805a46819168dba2310e6b3f", 2464 | "8737929329f04c8bac02cdbba2972459", 2465 | "6291797e13844ec1b0c57d6924aa4abe", 2466 | "f77a17ba2a914bbe8a788d8bf9c80f9f", 2467 | "7eb9274999a84942a6ba5e3a996c07e2", 2468 | "9005bad34134478daf474e5f4169e342", 2469 | "978a0538ffa6403fbff119bdbff7031a", 2470 | "828a3728072240838f92e825c249f825", 2471 | "e4a915e72364487e884681152f8912a0", 2472 | "b2063d614a854ff59c4a38c7cae18bf3", 2473 | "2275807e7b9e4e629c57cf54dcd22b9b", 2474 | "338629d584ae4454b598fe5c303287d3", 2475 | "99b46af8269b45e7b1329f858ea1f3a5", 2476 | "3e5184ff5154430bb51cc3c375d75bf4", 2477 | "f6c5a73d767e4b978c61bab1eea29e24" 2478 | ] 2479 | }, 2480 | "id": "2fQbqkoO0ZCz", 2481 | "outputId": "8b67eb9f-e6f9-491d-856a-25935e727b62" 2482 | }, 2483 | "source": [ 2484 | "import shutil\n", 2485 | "for path in train_paths:\n", 2486 | " for img in notebook.tqdm(glob(path)):\n", 2487 | " shutil.copy(img, '/content/dataset/train_images/')\n", 2488 | "print(f\"Train Images : {len(os.listdir('/content/dataset/train_images/'))}\")\n", 2489 | "for path in segment_paths:\n", 2490 | " for img in notebook.tqdm(glob(path)):\n", 2491 | " shutil.copy(img, '/content/dataset/train_segmentation/')\n", 2492 | "print(f\"Segmentation Images : {len(os.listdir('/content/dataset/train_segmentation/'))}\")" 2493 | ], 2494 | "execution_count": 24, 2495 | "outputs": [ 2496 | { 2497 | "output_type": "display_data", 2498 | "data": { 2499 | "application/vnd.jupyter.widget-view+json": { 2500 | "model_id": "f254f52e3bac4dca8c6072d42eecd5d4", 2501 | "version_minor": 0, 2502 | "version_major": 2 2503 | }, 2504 | "text/plain": [ 2505 | "HBox(children=(FloatProgress(value=0.0, max=61.0), HTML(value='')))" 2506 | ] 2507 | }, 2508 | "metadata": { 2509 | "tags": [] 2510 | } 2511 | }, 2512 | { 2513 | "output_type": "stream", 2514 | "text": [ 2515 | "\n" 2516 | ], 2517 | "name": "stdout" 2518 | }, 2519 | { 2520 | "output_type": "display_data", 2521 | "data": { 2522 | "application/vnd.jupyter.widget-view+json": { 2523 | "model_id": "c6f2dc82e3974349b4db34ac81538a12", 2524 | "version_minor": 0, 2525 | "version_major": 2 2526 | }, 2527 | "text/plain": [ 2528 | "HBox(children=(FloatProgress(value=0.0, max=34.0), HTML(value='')))" 2529 | ] 2530 | }, 2531 | "metadata": { 2532 | "tags": [] 2533 | } 2534 | }, 2535 | { 2536 | "output_type": "stream", 2537 | "text": [ 2538 | "\n" 2539 | ], 2540 | "name": "stdout" 2541 | }, 2542 | { 2543 | "output_type": "display_data", 2544 | "data": { 2545 | "application/vnd.jupyter.widget-view+json": { 2546 | "model_id": "817fbe88190e400ba29117b1dd761591", 2547 | "version_minor": 0, 2548 | "version_major": 2 2549 | }, 2550 | "text/plain": [ 2551 | "HBox(children=(FloatProgress(value=0.0, max=55.0), HTML(value='')))" 2552 | ] 2553 | }, 2554 | "metadata": { 2555 | "tags": [] 2556 | } 2557 | }, 2558 | { 2559 | "output_type": "stream", 2560 | "text": [ 2561 | "\n" 2562 | ], 2563 | "name": "stdout" 2564 | }, 2565 | { 2566 | "output_type": "display_data", 2567 | "data": { 2568 | "application/vnd.jupyter.widget-view+json": { 2569 | "model_id": "8c76d372a8124cf59af79542e607ce13", 2570 | "version_minor": 0, 2571 | "version_major": 2 2572 | }, 2573 | "text/plain": [ 2574 | "HBox(children=(FloatProgress(value=0.0, max=64.0), HTML(value='')))" 2575 | ] 2576 | }, 2577 | "metadata": { 2578 | "tags": [] 2579 | } 2580 | }, 2581 | { 2582 | "output_type": "stream", 2583 | "text": [ 2584 | "\n", 2585 | "Train Images : 214\n" 2586 | ], 2587 | "name": "stdout" 2588 | }, 2589 | { 2590 | "output_type": "display_data", 2591 | "data": { 2592 | "application/vnd.jupyter.widget-view+json": { 2593 | "model_id": "31ccb54c53274c8caebbace0d395d961", 2594 | "version_minor": 0, 2595 | "version_major": 2 2596 | }, 2597 | "text/plain": [ 2598 | "HBox(children=(FloatProgress(value=0.0, max=61.0), HTML(value='')))" 2599 | ] 2600 | }, 2601 | "metadata": { 2602 | "tags": [] 2603 | } 2604 | }, 2605 | { 2606 | "output_type": "stream", 2607 | "text": [ 2608 | "\n" 2609 | ], 2610 | "name": "stdout" 2611 | }, 2612 | { 2613 | "output_type": "display_data", 2614 | "data": { 2615 | "application/vnd.jupyter.widget-view+json": { 2616 | "model_id": "6b2dfb71403040fd9c5d7330cfb8dcf1", 2617 | "version_minor": 0, 2618 | "version_major": 2 2619 | }, 2620 | "text/plain": [ 2621 | "HBox(children=(FloatProgress(value=0.0, max=34.0), HTML(value='')))" 2622 | ] 2623 | }, 2624 | "metadata": { 2625 | "tags": [] 2626 | } 2627 | }, 2628 | { 2629 | "output_type": "stream", 2630 | "text": [ 2631 | "\n" 2632 | ], 2633 | "name": "stdout" 2634 | }, 2635 | { 2636 | "output_type": "display_data", 2637 | "data": { 2638 | "application/vnd.jupyter.widget-view+json": { 2639 | "model_id": "dc4d2ffc5a464b698c1785b25cd11ab1", 2640 | "version_minor": 0, 2641 | "version_major": 2 2642 | }, 2643 | "text/plain": [ 2644 | "HBox(children=(FloatProgress(value=0.0, max=55.0), HTML(value='')))" 2645 | ] 2646 | }, 2647 | "metadata": { 2648 | "tags": [] 2649 | } 2650 | }, 2651 | { 2652 | "output_type": "stream", 2653 | "text": [ 2654 | "\n" 2655 | ], 2656 | "name": "stdout" 2657 | }, 2658 | { 2659 | "output_type": "display_data", 2660 | "data": { 2661 | "application/vnd.jupyter.widget-view+json": { 2662 | "model_id": "828a3728072240838f92e825c249f825", 2663 | "version_minor": 0, 2664 | "version_major": 2 2665 | }, 2666 | "text/plain": [ 2667 | "HBox(children=(FloatProgress(value=0.0, max=64.0), HTML(value='')))" 2668 | ] 2669 | }, 2670 | "metadata": { 2671 | "tags": [] 2672 | } 2673 | }, 2674 | { 2675 | "output_type": "stream", 2676 | "text": [ 2677 | "\n", 2678 | "Segmentation Images : 214\n" 2679 | ], 2680 | "name": "stdout" 2681 | } 2682 | ] 2683 | }, 2684 | { 2685 | "cell_type": "code", 2686 | "metadata": { 2687 | "id": "ed7BeiF63xJr" 2688 | }, 2689 | "source": [ 2690 | "" 2691 | ], 2692 | "execution_count": null, 2693 | "outputs": [] 2694 | } 2695 | ] 2696 | } -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Satellight Image Segmentation with UNet and Its Variants 2 | 3 | ![](https://github.com/ashishpatel26/Semantic-Segmentation-Unet-Tensorflow-keras/blob/main/images/image.png) 4 | 5 | This repository contains the Collection of different Unet Variant such as **VggUnet, ResUnet, DenseUnet, Unet. AttUnet, MobileNetUnet, NestedUNet, R2AttUNet, R2UNet, SEUnet, scSEUnet, Unet_Xception_ResNetBlock** 6 | 7 | --- 8 | 9 | **UNet Variant** 10 | 11 | | Sr No | Topic | Notebook | 12 | | ----- | ------------------------------------------------------------ | ------------------------------------------------------------ | 13 | | 1. | **Unet Architecture for Satellight Image Segmentation** ![](https://lmb.informatik.uni-freiburg.de/people/ronneber/u-net/u-net-architecture.png) | [Github](https://nbviewer.jupyter.org/github/ashishpatel26/Semantic-Segmentation-Unet-Tensorflow-keras/blob/main/Notebooks/Unet.ipynb) | 14 | | 2. | **VGGUnet Architecture for Satellight Image Segmentation** ![](https://neurohive.io/wp-content/uploads/2018/11/vgg16-1-e1542731207177.png) | [Github](https://nbviewer.jupyter.org/github/ashishpatel26/Semantic-Segmentation-Unet-Tensorflow-keras/blob/main/Notebooks/VggUnet.ipynb) | 15 | | 3. | **ResUnet Architecture for Satellight Image Segmentation** ![](https://miro.medium.com/max/5998/1*eKrh8FqJL3jodebYlielNg.png) | [Github](https://nbviewer.jupyter.org/github/ashishpatel26/Semantic-Segmentation-Unet-Tensorflow-keras/blob/main/Notebooks/Resunet.ipynb) | 16 | | 4. | **DenseUnet Architecture for Satellight Image Segmentation** ![](https://lh3.googleusercontent.com/proxy/dEolVIZqWdVh7UgxyQkLS18uf942vgeZ_2Ls1pJeJjHC188qigU0l-AoReNRRoOSjsA1UccwrNeLGW1pznV6kiAw-ZdS5pVX3ulOzo1x3RW32hIZ5unHhq1nT8MK3G0rqV8V4XPdDK-RewYGEtMOFnU4qSqVqN85uuWS) | [Github](https://nbviewer.jupyter.org/github/ashishpatel26/Semantic-Segmentation-Unet-Tensorflow-keras/blob/main/Notebooks/DenseUnet.ipynb) | 17 | | 5. | **UnetPlusPlus Architecture for Satellight Image Segmentation** ![](https://neurohive.io/wp-content/uploads/2019/12/Screenshot_4-570x251.png) | [Github](https://nbviewer.jupyter.org/github/ashishpatel26/Semantic-Segmentation-Unet-Tensorflow-keras/blob/main/Notebooks/UnetPlusPlus.ipynb) | 18 | 19 | --- 20 | 21 | **Thank you** 22 | 23 | --- 24 | 25 | -------------------------------------------------------------------------------- /images/image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ashishpatel26/satellite-Image-Semantic-Segmentation-Unet-Tensorflow-keras/64b1dfd97fdc846b92bf2fba55e9c3382857ded5/images/image.png -------------------------------------------------------------------------------- /models/AttUNet.py: -------------------------------------------------------------------------------- 1 | #coding=utf-8 2 | import tensorflow as tf 3 | from keras.models import * 4 | from keras.layers import * 5 | 6 | def conv_block(input, filters): 7 | out = Conv2D(filters, kernel_size=(3,3), strides=1, padding='same')(input) 8 | out = BatchNormalization()(out) 9 | out = Activation('relu')(out) 10 | out = Conv2D(filters, kernel_size=(3,3), strides=1, padding='same')(out) 11 | out = BatchNormalization()(out) 12 | out = Activation('relu')(out) 13 | return out 14 | 15 | def up_conv(input, filters): 16 | out = UpSampling2D()(input) 17 | out = Conv2D(filters, kernel_size=(3,3), strides=1, padding='same')(out) 18 | out = BatchNormalization()(out) 19 | out = Activation('relu')(out) 20 | return out 21 | 22 | def Attention_block(input1, input2, filters): 23 | g1 = Conv2D(filters, kernel_size=1, strides=1, padding='same')(input1) 24 | g1 = BatchNormalization()(g1) 25 | x1 = Conv2D(filters, kernel_size=1, strides=1, padding='same')(input2) 26 | x1 = BatchNormalization()(x1) 27 | psi = Activation('relu')(add([g1, x1])) 28 | psi = Conv2D(filters, kernel_size=1, strides=1, padding='same')(psi) 29 | psi = BatchNormalization()(psi) 30 | psi = Activation('sigmoid')(psi) 31 | out = multiply([input2, psi]) 32 | return out 33 | 34 | 35 | def AttUNet(nClasses, input_height=224, input_width=224): 36 | 37 | inputs = Input(shape=(input_height, input_width, 3)) 38 | n1 = 32 39 | filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16] 40 | 41 | e1 = conv_block(inputs, filters[0]) 42 | 43 | e2 = MaxPooling2D(strides=2)(e1) 44 | e2 = conv_block(e2, filters[1]) 45 | 46 | e3 = MaxPooling2D(strides=2)(e2) 47 | e3 = conv_block(e3, filters[2]) 48 | 49 | e4 = MaxPooling2D(strides=2)(e3) 50 | e4 = conv_block(e4, filters[3]) 51 | 52 | e5 = MaxPooling2D(strides=2)(e4) 53 | e5 = conv_block(e5, filters[4]) 54 | 55 | d5 = up_conv(e5, filters[3]) 56 | x4 = Attention_block(d5, e4, filters[3]) 57 | d5 = Concatenate()([x4, d5]) 58 | d5 = conv_block(d5, filters[3]) 59 | 60 | d4 = up_conv(d5, filters[2]) 61 | x3 = Attention_block(d4, e3, filters[2]) 62 | d4 = Concatenate()([x3, d4]) 63 | d4 = conv_block(d4, filters[2]) 64 | 65 | d3 = up_conv(d4, filters[1]) 66 | x2 = Attention_block(d3, e2, filters[1]) 67 | d3 = Concatenate()([x2, d3]) 68 | d3 = conv_block(d3, filters[1]) 69 | 70 | d2 = up_conv(d3, filters[0]) 71 | x1 = Attention_block(d2, e1, filters[0]) 72 | d2 = Concatenate()([x1, d2]) 73 | d2 = conv_block(d2, filters[0]) 74 | 75 | o = Conv2D(nClasses, (3, 3), padding='same')(d2) 76 | 77 | outputHeight = Model(inputs, o).output_shape[1] 78 | outputWidth = Model(inputs, o).output_shape[2] 79 | 80 | out = (Reshape((outputHeight * outputWidth, nClasses)))(o) 81 | out = Activation('softmax')(out) 82 | 83 | model = Model(input=inputs, output=out) 84 | model.outputHeight = outputHeight 85 | model.outputWidth = outputWidth 86 | 87 | return model -------------------------------------------------------------------------------- /models/DeepLabV2.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | import tensorflow as tf 3 | from keras.layers import * 4 | from keras.models import * 5 | from keras.optimizers import * 6 | 7 | class BilinearUpsampling(Layer): 8 | def __init__(self, upsampling, **kwargs): 9 | self.upsampling = upsampling 10 | super(BilinearUpsampling, self).__init__(**kwargs) 11 | 12 | def build(self, input_shape): 13 | super(BilinearUpsampling, self).build(input_shape) 14 | 15 | def call(self, x, mask=None): 16 | new_size = [x.shape[1] * self.upsampling, x.shape[2] * self.upsampling] 17 | output = tf.image.resize_images(x, new_size) 18 | return output 19 | 20 | def DeepLabV2(nClasses, input_height=224, input_width=224): 21 | inputs = Input(shape=(input_height, input_width, 3)) 22 | 23 | # Block 1 24 | x = ZeroPadding2D(padding=(1, 1))(inputs) 25 | x = Conv2D(filters=64, kernel_size=(3, 3), activation='relu', name='conv1_1')(x) 26 | x = ZeroPadding2D(padding=(1, 1))(x) 27 | x = Conv2D(filters=64, kernel_size=(3, 3), activation='relu', name='conv1_2')(x) 28 | x = ZeroPadding2D(padding=(1, 1))(x) 29 | x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(x) 30 | 31 | # Block 2 32 | x = ZeroPadding2D(padding=(1, 1))(x) 33 | x = Conv2D(filters=128, kernel_size=(3, 3), activation='relu', name='conv2_1')(x) 34 | x = ZeroPadding2D(padding=(1, 1))(x) 35 | x = Conv2D(filters=128, kernel_size=(3, 3), activation='relu', name='conv2_2')(x) 36 | x = ZeroPadding2D(padding=(1, 1))(x) 37 | x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(x) 38 | 39 | # Block 3 40 | x = ZeroPadding2D(padding=(1, 1))(x) 41 | x = Conv2D(filters=256, kernel_size=(3, 3), activation='relu', name='conv3_1')(x) 42 | x = ZeroPadding2D(padding=(1, 1))(x) 43 | x = Conv2D(filters=256, kernel_size=(3, 3), activation='relu', name='conv3_2')(x) 44 | x = ZeroPadding2D(padding=(1, 1))(x) 45 | x = Conv2D(filters=256, kernel_size=(3, 3), activation='relu', name='conv3_3')(x) 46 | x = ZeroPadding2D(padding=(1, 1))(x) 47 | x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(x) 48 | 49 | # Block 4 50 | x = ZeroPadding2D(padding=(1, 1))(x) 51 | x = Conv2D(filters=512, kernel_size=(3, 3), activation='relu', name='conv4_1')(x) 52 | x = ZeroPadding2D(padding=(1, 1))(x) 53 | x = Conv2D(filters=512, kernel_size=(3, 3), activation='relu', name='conv4_2')(x) 54 | x = ZeroPadding2D(padding=(1, 1))(x) 55 | x = Conv2D(filters=512, kernel_size=(3, 3), activation='relu', name='conv4_3')(x) 56 | x = ZeroPadding2D(padding=(1, 1))(x) 57 | x = MaxPooling2D(pool_size=(3, 3), strides=(1, 1))(x) 58 | 59 | # Block 5 60 | x = ZeroPadding2D(padding=(2, 2))(x) 61 | x = Conv2D(filters=512, kernel_size=(3, 3), dilation_rate=(2, 2), activation='relu', name='conv5_1')(x) 62 | x = ZeroPadding2D(padding=(2, 2))(x) 63 | x = Conv2D(filters=512, kernel_size=(3, 3), dilation_rate=(2, 2), activation='relu', name='conv5_2')(x) 64 | x = ZeroPadding2D(padding=(2, 2))(x) 65 | x = Conv2D(filters=512, kernel_size=(3, 3), dilation_rate=(2, 2), activation='relu', name='conv5_3')(x) 66 | x = ZeroPadding2D(padding=(1, 1))(x) 67 | p5 = MaxPooling2D(pool_size=(3, 3), strides=(1, 1))(x) 68 | 69 | # branching for Atrous Spatial Pyramid Pooling - Until here -14 layers 70 | # hole = 6 71 | b1 = ZeroPadding2D(padding=(6, 6))(p5) 72 | b1 = Conv2D(filters=1024, kernel_size=(3, 3), dilation_rate=(6, 6), activation='relu', name='fc6_1')(b1) 73 | b1 = Dropout(0.5)(b1) 74 | b1 = Conv2D(filters=1024, kernel_size=(1, 1), activation='relu', name='fc7_1')(b1) 75 | b1 = Dropout(0.5)(b1) 76 | b1 = Conv2D(filters=nClasses, kernel_size=(1, 1), activation='relu', name='fc8_1')(b1) 77 | 78 | # hole = 12 79 | b2 = ZeroPadding2D(padding=(12, 12))(p5) 80 | b2 = Conv2D(filters=1024, kernel_size=(3, 3), dilation_rate=(12, 12), activation='relu', name='fc6_2')(b2) 81 | b2 = Dropout(0.5)(b2) 82 | b2 = Conv2D(filters=1024, kernel_size=(1, 1), activation='relu', name='fc7_2')(b2) 83 | b2 = Dropout(0.5)(b2) 84 | b2 = Conv2D(filters=nClasses, kernel_size=(1, 1), activation='relu', name='fc8_2')(b2) 85 | 86 | # hole = 18 87 | b3 = ZeroPadding2D(padding=(18, 18))(p5) 88 | b3 = Conv2D(filters=1024, kernel_size=(3, 3), dilation_rate=(18, 18), activation='relu', name='fc6_3')(b3) 89 | b3 = Dropout(0.5)(b3) 90 | b3 = Conv2D(filters=1024, kernel_size=(1, 1), activation='relu', name='fc7_3')(b3) 91 | b3 = Dropout(0.5)(b3) 92 | b3 = Conv2D(filters=nClasses, kernel_size=(1, 1), activation='relu', name='fc8_3')(b3) 93 | 94 | # hole = 24 95 | b4 = ZeroPadding2D(padding=(24, 24))(p5) 96 | b4 = Conv2D(filters=1024, kernel_size=(3, 3), dilation_rate=(24, 24), activation='relu', name='fc6_4')(b4) 97 | b4 = Dropout(0.5)(b4) 98 | b4 = Conv2D(filters=1024, kernel_size=(1, 1), activation='relu', name='fc7_4')(b4) 99 | b4 = Dropout(0.5)(b4) 100 | b4 = Conv2D(filters=nClasses, kernel_size=(1, 1), activation='relu', name='fc8_4')(b4) 101 | 102 | s = Add()([b1, b2, b3, b4]) 103 | logits = BilinearUpsampling(upsampling=8)(s) 104 | 105 | outputHeight = Model(inputs, logits).output_shape[1] 106 | outputWidth = Model(inputs, logits).output_shape[2] 107 | 108 | out = (Reshape((outputHeight * outputWidth, nClasses)))(logits) 109 | out = Activation('softmax')(out) 110 | 111 | model = Model(input=inputs, output=out) 112 | model.outputHeight = outputHeight 113 | model.outputWidth = outputWidth 114 | 115 | return model 116 | -------------------------------------------------------------------------------- /models/DeepLabV3+.py: -------------------------------------------------------------------------------- 1 | from tensorflow.keras.layers import Activation, Conv2D, MaxPooling2D, BatchNormalization, Input, DepthwiseConv2D, add,Dropout, AveragePooling2D, Concatenate, Layer, InputSpec 2 | from tensorflow.keras.models import Model 3 | from tensorflow.python.keras.utils import conv_utils 4 | import tensorflow as tf 5 | from keras.models import * 6 | from keras.layers import * 7 | from keras.utils import conv_utils 8 | 9 | from keras.utils.data_utils import get_file 10 | from keras.backend.common import normalize_data_format 11 | 12 | 13 | 14 | class BilinearUpsampling(Layer): 15 | 16 | def __init__(self, upsampling=(2, 2), data_format=None, **kwargs): 17 | super(BilinearUpsampling, self).__init__(**kwargs) 18 | self.data_format = normalize_data_format(data_format) 19 | self.upsampling = conv_utils.normalize_tuple(upsampling, 2, 'size') 20 | self.input_spec = InputSpec(ndim=4) 21 | 22 | def compute_output_shape(self, input_shape): 23 | height = self.upsampling[0] * \ 24 | input_shape[1] if input_shape[1] is not None else None 25 | width = self.upsampling[1] * \ 26 | input_shape[2] if input_shape[2] is not None else None 27 | return (input_shape[0], height, width, input_shape[3]) 28 | 29 | def call(self, inputs): 30 | # .tf 31 | return tf.image.resize_bilinear(inputs, (int(inputs.shape[1] * self.upsampling[0]), 32 | int(inputs.shape[2] * self.upsampling[1]))) 33 | 34 | def get_config(self): 35 | 36 | config = {'size': self.upsampling, 'data_format': self.data_format} 37 | base_config = super(BilinearUpsampling, self).get_config() 38 | return dict(list(base_config.items()) + list(config.items())) 39 | 40 | def xception_downsample_block(x, channels, top_relu=False): 41 | ##separable conv1 42 | if top_relu: 43 | x = Activation("relu")(x) 44 | x = DepthwiseConv2D((3, 3), padding="same", use_bias=False)(x) 45 | x = BatchNormalization()(x) 46 | x = Conv2D(channels, (1, 1), padding="same", use_bias=False)(x) 47 | x = BatchNormalization()(x) 48 | x = Activation("relu")(x) 49 | 50 | ##separable conv2 51 | x = DepthwiseConv2D((3, 3), padding="same", use_bias=False)(x) 52 | x = BatchNormalization()(x) 53 | x = Conv2D(channels, (1, 1), padding="same", use_bias=False)(x) 54 | x = BatchNormalization()(x) 55 | x = Activation("relu")(x) 56 | 57 | ##separable conv3 58 | x = DepthwiseConv2D((3, 3), strides=(2, 2), padding="same", use_bias=False)(x) 59 | x = BatchNormalization()(x) 60 | x = Conv2D(channels, (1, 1), padding="same", use_bias=False)(x) 61 | x = BatchNormalization()(x) 62 | return x 63 | 64 | 65 | def res_xception_downsample_block(x, channels): 66 | res = Conv2D(channels, (1, 1), strides=(2, 2), padding="same", use_bias=False)(x) 67 | res = BatchNormalization()(res) 68 | x = xception_downsample_block(x, channels) 69 | x = add([x, res]) 70 | return x 71 | 72 | 73 | def xception_block(x, channels): 74 | ##separable conv1 75 | x = Activation("relu")(x) 76 | x = DepthwiseConv2D((3, 3), padding="same", use_bias=False)(x) 77 | x = BatchNormalization()(x) 78 | x = Conv2D(channels, (1, 1), padding="same", use_bias=False)(x) 79 | x = BatchNormalization()(x) 80 | 81 | ##separable conv2 82 | x = Activation("relu")(x) 83 | x = DepthwiseConv2D((3, 3), padding="same", use_bias=False)(x) 84 | x = BatchNormalization()(x) 85 | x = Conv2D(channels, (1, 1), padding="same", use_bias=False)(x) 86 | x = BatchNormalization()(x) 87 | 88 | ##separable conv3 89 | x = Activation("relu")(x) 90 | x = DepthwiseConv2D((3, 3), padding="same", use_bias=False)(x) 91 | x = BatchNormalization()(x) 92 | x = Conv2D(channels, (1, 1), padding="same", use_bias=False)(x) 93 | x = BatchNormalization()(x) 94 | return x 95 | 96 | 97 | def res_xception_block(x, channels): 98 | res = x 99 | x = xception_block(x, channels) 100 | x = add([x, res]) 101 | return x 102 | 103 | 104 | def aspp(x, input_shape, out_stride): 105 | b0 = Conv2D(256, (1, 1), padding="same", use_bias=False)(x) 106 | b0 = BatchNormalization()(b0) 107 | b0 = Activation("relu")(b0) 108 | 109 | b1 = DepthwiseConv2D((3, 3), dilation_rate=(6, 6), padding="same", use_bias=False)(x) 110 | b1 = BatchNormalization()(b1) 111 | b1 = Activation("relu")(b1) 112 | b1 = Conv2D(256, (1, 1), padding="same", use_bias=False)(b1) 113 | b1 = BatchNormalization()(b1) 114 | b1 = Activation("relu")(b1) 115 | 116 | b2 = DepthwiseConv2D((3, 3), dilation_rate=(12, 12), padding="same", use_bias=False)(x) 117 | b2 = BatchNormalization()(b2) 118 | b2 = Activation("relu")(b2) 119 | b2 = Conv2D(256, (1, 1), padding="same", use_bias=False)(b2) 120 | b2 = BatchNormalization()(b2) 121 | b2 = Activation("relu")(b2) 122 | 123 | b3 = DepthwiseConv2D((3, 3), dilation_rate=(12, 12), padding="same", use_bias=False)(x) 124 | b3 = BatchNormalization()(b3) 125 | b3 = Activation("relu")(b3) 126 | b3 = Conv2D(256, (1, 1), padding="same", use_bias=False)(b3) 127 | b3 = BatchNormalization()(b3) 128 | b3 = Activation("relu")(b3) 129 | 130 | out_shape = int(input_shape[0] / out_stride) 131 | b4 = AveragePooling2D(pool_size=(out_shape, out_shape))(x) 132 | b4 = Conv2D(256, (1, 1), padding="same", use_bias=False)(b4) 133 | b4 = BatchNormalization()(b4) 134 | b4 = Activation("relu")(b4) 135 | b4 = BilinearUpsampling((out_shape, out_shape))(b4) 136 | 137 | x = Concatenate()([b4, b0, b1, b2, b3]) 138 | return x 139 | 140 | 141 | def DeeplabV3_plus(nClasses=21, input_height=512, input_width=512, out_stride=16): 142 | img_input = Input(shape=(input_height, input_width, 3)) 143 | x = Conv2D(32, (3, 3), strides=(2, 2), padding="same", use_bias=False)(img_input) 144 | x = BatchNormalization()(x) 145 | x = Activation("relu")(x) 146 | x = Conv2D(64, (3, 3), padding="same", use_bias=False)(x) 147 | x = BatchNormalization()(x) 148 | x = Activation("relu")(x) 149 | 150 | x = res_xception_downsample_block(x, 128) 151 | 152 | res = Conv2D(256, (1, 1), strides=(2, 2), padding="same", use_bias=False)(x) 153 | res = BatchNormalization()(res) 154 | x = Activation("relu")(x) 155 | x = DepthwiseConv2D((3, 3), padding="same", use_bias=False)(x) 156 | x = BatchNormalization()(x) 157 | x = Conv2D(256, (1, 1), padding="same", use_bias=False)(x) 158 | x = BatchNormalization()(x) 159 | x = Activation("relu")(x) 160 | x = DepthwiseConv2D((3, 3), padding="same", use_bias=False)(x) 161 | x = BatchNormalization()(x) 162 | x = Conv2D(256, (1, 1), padding="same", use_bias=False)(x) 163 | skip = BatchNormalization()(x) 164 | x = Activation("relu")(skip) 165 | x = DepthwiseConv2D((3, 3), strides=(2, 2), padding="same", use_bias=False)(x) 166 | x = BatchNormalization()(x) 167 | x = Conv2D(256, (1, 1), padding="same", use_bias=False)(x) 168 | x = BatchNormalization()(x) 169 | x = add([x, res]) 170 | 171 | x = xception_downsample_block(x, 728, top_relu=True) 172 | 173 | for i in range(16): 174 | x = res_xception_block(x, 728) 175 | 176 | res = Conv2D(1024, (1, 1), padding="same", use_bias=False)(x) 177 | res = BatchNormalization()(res) 178 | x = Activation("relu")(x) 179 | x = DepthwiseConv2D((3, 3), padding="same", use_bias=False)(x) 180 | x = BatchNormalization()(x) 181 | x = Conv2D(728, (1, 1), padding="same", use_bias=False)(x) 182 | x = BatchNormalization()(x) 183 | x = Activation("relu")(x) 184 | x = DepthwiseConv2D((3, 3), padding="same", use_bias=False)(x) 185 | x = BatchNormalization()(x) 186 | x = Conv2D(1024, (1, 1), padding="same", use_bias=False)(x) 187 | x = BatchNormalization()(x) 188 | x = Activation("relu")(x) 189 | x = DepthwiseConv2D((3, 3), padding="same", use_bias=False)(x) 190 | x = BatchNormalization()(x) 191 | x = Conv2D(1024, (1, 1), padding="same", use_bias=False)(x) 192 | x = BatchNormalization()(x) 193 | x = add([x, res]) 194 | 195 | x = DepthwiseConv2D((3, 3), padding="same", use_bias=False)(x) 196 | x = BatchNormalization()(x) 197 | x = Conv2D(1536, (1, 1), padding="same", use_bias=False)(x) 198 | x = BatchNormalization()(x) 199 | x = Activation("relu")(x) 200 | x = DepthwiseConv2D((3, 3), padding="same", use_bias=False)(x) 201 | x = BatchNormalization()(x) 202 | x = Conv2D(1536, (1, 1), padding="same", use_bias=False)(x) 203 | x = BatchNormalization()(x) 204 | x = Activation("relu")(x) 205 | x = DepthwiseConv2D((3, 3), padding="same", use_bias=False)(x) 206 | x = BatchNormalization()(x) 207 | x = Conv2D(2048, (1, 1), padding="same", use_bias=False)(x) 208 | x = BatchNormalization()(x) 209 | x = Activation("relu")(x) 210 | 211 | # aspp 212 | x = aspp(x, (input_height, input_width, 3), out_stride) 213 | x = Conv2D(256, (1, 1), padding="same", use_bias=False)(x) 214 | x = BatchNormalization()(x) 215 | x = Activation("relu")(x) 216 | x = Dropout(0.9)(x) 217 | 218 | ##decoder 219 | x = BilinearUpsampling((4, 4))(x) 220 | dec_skip = Conv2D(48, (1, 1), padding="same", use_bias=False)(skip) 221 | dec_skip = BatchNormalization()(dec_skip) 222 | dec_skip = Activation("relu")(dec_skip) 223 | x = Concatenate()([x, dec_skip]) 224 | 225 | x = DepthwiseConv2D((3, 3), padding="same", use_bias=False)(x) 226 | x = BatchNormalization()(x) 227 | x = Activation("relu")(x) 228 | x = Conv2D(256, (1, 1), padding="same", use_bias=False)(x) 229 | x = BatchNormalization()(x) 230 | x = Activation("relu")(x) 231 | 232 | x = DepthwiseConv2D((3, 3), padding="same", use_bias=False)(x) 233 | x = BatchNormalization()(x) 234 | x = Activation("relu")(x) 235 | x = Conv2D(256, (1, 1), padding="same", use_bias=False)(x) 236 | x = BatchNormalization()(x) 237 | x = Activation("relu")(x) 238 | 239 | x = Conv2D(nClasses, (1, 1), padding="same")(x) 240 | x = BilinearUpsampling((4, 4))(x) 241 | outputHeight = Model(img_input, x).output_shape[1] 242 | outputWidth = Model(img_input, x).output_shape[2] 243 | x = (Reshape((outputHeight * outputWidth, nClasses)))(x) 244 | x = Activation('softmax')(x) 245 | model = Model(input=img_input, output=x) 246 | model.outputHeight = outputHeight 247 | model.outputWidth = outputWidth 248 | return model 249 | 250 | 251 | -------------------------------------------------------------------------------- /models/Deeplabv3.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ashishpatel26/satellite-Image-Semantic-Segmentation-Unet-Tensorflow-keras/64b1dfd97fdc846b92bf2fba55e9c3382857ded5/models/Deeplabv3.py -------------------------------------------------------------------------------- /models/DenseUnet.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from tensorflow.keras.layers import * 3 | from tensorflow.keras.optimizers import * 4 | from tensorflow.keras.models import * 5 | 6 | def DenseBlock(channels,inputs): 7 | 8 | conv1_1 = Conv2D(channels, (1, 1),activation=None, padding='same')(inputs) 9 | conv1_1=BatchActivate(conv1_1) 10 | conv1_2 = Conv2D(channels//4, (3, 3), activation=None, padding='same')(conv1_1) 11 | conv1_2 = BatchActivate(conv1_2) 12 | 13 | conv2=concatenate([inputs,conv1_2]) 14 | conv2_1 = Conv2D(channels, (1, 1), activation=None, padding='same')(conv2) 15 | conv2_1 = BatchActivate(conv2_1) 16 | conv2_2 = Conv2D(channels // 4, (3, 3), activation=None, padding='same')(conv2_1) 17 | conv2_2 = BatchActivate(conv2_2) 18 | 19 | conv3 = concatenate([inputs, conv1_2,conv2_2]) 20 | conv3_1 = Conv2D(channels, (1, 1), activation=None, padding='same')(conv3) 21 | conv3_1 = BatchActivate(conv3_1) 22 | conv3_2 = Conv2D(channels // 4, (3, 3), activation=None, padding='same')(conv3_1) 23 | conv3_2 = BatchActivate(conv3_2) 24 | 25 | conv4 = concatenate([inputs, conv1_2, conv2_2,conv3_2]) 26 | conv4_1 = Conv2D(channels, (1, 1), activation=None, padding='same')(conv4) 27 | conv4_1 = BatchActivate(conv4_1) 28 | conv4_2 = Conv2D(channels // 4, (3, 3), activation=None, padding='same')(conv4_1) 29 | conv4_2 = BatchActivate(conv4_2) 30 | result=concatenate([inputs,conv1_2, conv2_2,conv3_2,conv4_2]) 31 | return result 32 | 33 | def BatchActivate(x): 34 | x = BatchNormalization()(x) 35 | x = Activation('relu')(x) 36 | return x 37 | 38 | 39 | def DenseUNet(nClasses , input_height=304, input_width=304): 40 | 41 | filters=16 42 | keep_prob=0.9 43 | block_size=7 44 | 45 | inputs = Input(shape=(input_height, input_width, 3)) 46 | 47 | conv1 = Conv2D(filters * 1, (3, 3), activation=None, padding="same")(inputs) 48 | conv1 = BatchActivate(conv1) 49 | conv1 = DenseBlock(filters * 1, conv1) 50 | pool1 = MaxPooling2D((2, 2))(conv1) 51 | 52 | conv2 = DenseBlock(filters * 2, pool1) 53 | pool2 = MaxPooling2D((2, 2))(conv2) 54 | 55 | conv3 = DenseBlock(filters * 4, pool2) 56 | pool3 = MaxPooling2D((2, 2))(conv3) 57 | 58 | convm = DenseBlock(filters * 8, pool3) 59 | 60 | deconv3 = Conv2DTranspose(filters * 4, (3, 3), strides=(2, 2), padding="same")(convm) 61 | uconv3 = concatenate([deconv3, conv3]) 62 | uconv3 = Conv2D(filters * 4, (1, 1), activation=None, padding="same")(uconv3) 63 | uconv3 = BatchActivate(uconv3) 64 | uconv3 = DenseBlock(filters * 4, uconv3) 65 | 66 | 67 | deconv2 = Conv2DTranspose(filters * 2, (3, 3), strides=(2, 2), padding="same")(uconv3) 68 | uconv2 = concatenate([deconv2, conv2]) 69 | uconv2 = Conv2D(filters * 2, (1, 1), activation=None, padding="same")(uconv2) 70 | uconv2 = BatchActivate(uconv2) 71 | uconv2 = DenseBlock(filters * 2, uconv2) 72 | 73 | deconv1 = Conv2DTranspose(filters * 1, (3, 3), strides=(2, 2), padding="same")(uconv2) 74 | uconv1 = concatenate([deconv1, conv1]) 75 | uconv1 = Conv2D(filters * 1, (1, 1), activation=None, padding="same")(uconv1) 76 | uconv1 = BatchActivate(uconv1) 77 | uconv1 = DenseBlock(filters * 1, uconv1) 78 | 79 | outputs = Conv2D(nClasses, (1, 1), padding="same", activation=None)(uconv1) 80 | outputs = Activation('sigmoid')(outputs) 81 | 82 | denseunet = Model(inputs=inputs, outputs=outputs) 83 | 84 | denseunet.compile(optimizer=Adam(lr=1e-4), 85 | loss='binary_crossentropy', 86 | metrics=['accuracy', 87 | tf.keras.metrics.MeanIoU(num_classes=2), 88 | tf.keras.metrics.AUC(), 89 | tf.keras.metrics.Precision(top_k=5), 90 | tf.keras.metrics.Recall(top_k=5), 91 | ]) 92 | 93 | return denseunet 94 | 95 | if __name__ == '__main__': 96 | model = DenseUNet(nClasses = 1) 97 | model.summary() -------------------------------------------------------------------------------- /models/FCN8.py: -------------------------------------------------------------------------------- 1 | #coding=utf-8 2 | 3 | from keras.applications import vgg16 4 | from keras.models import Model, Sequential 5 | from keras.layers import Conv2D, Conv2DTranspose, Input, Cropping2D, add, Dropout, Reshape, Activation 6 | 7 | 8 | def FCN8_helper(nClasses, input_height=224, input_width=224): 9 | 10 | assert input_height % 32 == 0 11 | assert input_width % 32 == 0 12 | 13 | img_input = Input(shape=(input_height, input_width, 3)) 14 | 15 | model = vgg16.VGG16( 16 | include_top=False, 17 | weights='vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5', input_tensor=img_input, 18 | pooling=None, 19 | classes=1000) 20 | assert isinstance(model, Model) 21 | 22 | o = Conv2D( 23 | filters=4096, 24 | kernel_size=( 25 | 7, 26 | 7), 27 | padding="same", 28 | activation="relu", 29 | name="fc6")( 30 | model.output) 31 | o = Dropout(rate=0.5)(o) 32 | o = Conv2D( 33 | filters=4096, 34 | kernel_size=( 35 | 1, 36 | 1), 37 | padding="same", 38 | activation="relu", 39 | name="fc7")(o) 40 | o = Dropout(rate=0.5)(o) 41 | 42 | o = Conv2D(filters=nClasses, kernel_size=(1, 1), padding="same", activation="relu", kernel_initializer="he_normal", 43 | name="score_fr")(o) 44 | 45 | o = Conv2DTranspose(filters=nClasses, kernel_size=(2, 2), strides=(2, 2), padding="valid", activation=None, 46 | name="score2")(o) 47 | 48 | fcn8 = Model(inputs=img_input, outputs=o) 49 | # mymodel.summary() 50 | return fcn8 51 | 52 | 53 | 54 | def FCN8(nClasses, input_height=224, input_width=224): 55 | 56 | 57 | fcn8 = FCN8_helper(nClasses, input_height=224, input_width=224) 58 | 59 | 60 | 61 | # Conv to be applied on Pool4 62 | skip_con1 = Conv2D(nClasses, kernel_size=(1, 1), padding="same", activation=None, kernel_initializer="he_normal", 63 | name="score_pool4")(fcn8.get_layer("block4_pool").output) 64 | Summed = add(inputs=[skip_con1, fcn8.output]) 65 | 66 | x = Conv2DTranspose(nClasses, kernel_size=(2, 2), strides=(2, 2), padding="valid", activation=None, 67 | name="score4")(Summed) 68 | 69 | ### 70 | skip_con2 = Conv2D(nClasses, kernel_size=(1, 1), padding="same", activation=None, kernel_initializer="he_normal", 71 | name="score_pool3")(fcn8.get_layer("block3_pool").output) 72 | Summed2 = add(inputs=[skip_con2, x]) 73 | 74 | ##### 75 | Up = Conv2DTranspose(nClasses, kernel_size=(8, 8), strides=(8, 8), 76 | padding="valid", activation=None, name="upsample")(Summed2) 77 | 78 | 79 | 80 | o_shape = Model(inputs=fcn8.input, outputs=Up).output_shape 81 | 82 | 83 | 84 | outputHeight = o_shape[1] 85 | 86 | outputWidth = o_shape[2] 87 | 88 | Up = Reshape((-1, nClasses))(Up) 89 | Up = Activation("softmax")(Up) 90 | 91 | model = Model(inputs=fcn8.input, outputs=Up) 92 | 93 | model.outputWidth = outputWidth 94 | model.outputHeight = outputHeight 95 | 96 | 97 | return model 98 | 99 | 100 | 101 | 102 | -------------------------------------------------------------------------------- /models/GhostNet.py: -------------------------------------------------------------------------------- 1 | """ 2 | GhostNet https://arxiv.org/abs/1911.11907 3 | """ 4 | from keras.layers import * 5 | import math 6 | 7 | 8 | def _make_divisible(v, divisor, min_value=None): 9 | """ 10 | This function is taken from the original tf repo. 11 | It ensures that all layers have a channel number that is divisible by 8 12 | It can be seen here: 13 | https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py 14 | """ 15 | if min_value is None: 16 | min_value = divisor 17 | new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) 18 | # Make sure that round down does not go down by more than 10%. 19 | if new_v < 0.9 * v: 20 | new_v += divisor 21 | return new_v 22 | 23 | 24 | def ConvBnLayer(x, oup, kernel_size, stride, padding='valid'): 25 | y = Conv2D(filters=oup, kernel_size=kernel_size, strides=stride, padding=padding)(x) 26 | y = BatchNormalization(epsilon=1e-5, momentum=0.1)(y) 27 | return y 28 | 29 | 30 | def SELayer(x, reduction=4): 31 | batch, _, __, channel = x.shape 32 | y = GlobalAveragePooling2D()(x) 33 | y = Dense(units=channel // reduction, activation='relu')(y) 34 | y = Dense(units=channel, activation='sigmoid')(y) 35 | y = Reshape([1, 1, channel])(y) 36 | se_tensor = Multiply()([x, y]) 37 | return se_tensor 38 | 39 | 40 | def DepthWiseConv(x, kernel_size=3, stride=1, depth_multiplier=1, padding='same', relu=False): 41 | y = DepthwiseConv2D(kernel_size=kernel_size // 2, depth_multiplier=depth_multiplier, 42 | strides=stride, padding=padding)(x) 43 | y = BatchNormalization(epsilon=1e-5, momentum=0.1)(y) 44 | if relu: 45 | y = Activation('relu')(y) 46 | return y 47 | 48 | 49 | def GhostModule(x, oup, kernel_size=1, ratio=2, dw_size=3, stride=1, relu=True): 50 | init_channels = math.ceil(oup / ratio) 51 | new_channels = init_channels * (ratio - 1) 52 | 53 | multiplier = new_channels // init_channels 54 | 55 | primary_tensor = ConvBnLayer(x, init_channels, kernel_size=kernel_size, stride=stride, padding='same') 56 | if relu: 57 | primary_tensor = Activation('relu')(primary_tensor) 58 | 59 | cheap_tensor = DepthWiseConv(primary_tensor, kernel_size=dw_size, 60 | depth_multiplier=multiplier, padding='same', stride=1) 61 | if relu: 62 | cheap_tensor = Activation('relu')(cheap_tensor) 63 | 64 | out = Concatenate()([primary_tensor, cheap_tensor]) 65 | # 使用Lambda进行切分 66 | return Lambda(lambda x: x[:, :, :, :oup])(out) 67 | 68 | 69 | def GhostBottleneck(x, hidden_dim, oup, kernel_size, stride, use_se): 70 | assert stride in [1, 2] 71 | inp = x.shape[-1] 72 | if stride == 1 and inp == oup: 73 | shortcut = x 74 | else: 75 | shortcut = DepthWiseConv(x, kernel_size=3, stride=stride, relu=False) 76 | shortcut = ConvBnLayer(shortcut, oup, 1, 1, padding='same') 77 | 78 | x = GhostModule(x, hidden_dim, kernel_size=1, relu=True) 79 | if stride == 2: 80 | x = DepthWiseConv(x, kernel_size, stride, relu=False) 81 | if use_se: 82 | x = SELayer(x) 83 | x = GhostModule(x, oup, kernel_size=1, relu=False) 84 | return Add()([x, shortcut]) 85 | 86 | 87 | def GhostNet(x, num_classes=1000, width_mult=1.): 88 | cfgs = [ 89 | # k, t, c, SE, s 90 | [3, 16, 16, 0, 1], 91 | [3, 48, 24, 0, 2], 92 | [3, 72, 24, 0, 1], 93 | [5, 72, 40, 1, 2], 94 | [5, 120, 40, 1, 1], 95 | [3, 240, 80, 0, 2], 96 | [3, 200, 80, 0, 1], 97 | [3, 184, 80, 0, 1], 98 | [3, 184, 80, 0, 1], 99 | [3, 480, 112, 1, 1], 100 | [3, 672, 112, 1, 1], 101 | [5, 672, 160, 1, 2], 102 | [5, 960, 160, 0, 1], 103 | [5, 960, 160, 1, 1], 104 | [5, 960, 160, 0, 1], 105 | [5, 960, 160, 1, 1] 106 | ] 107 | 108 | output_channel = _make_divisible(16 * width_mult, 4) 109 | 110 | x = ConvBnLayer(x, output_channel, 3, 2, padding='same') 111 | for k, exp_size, c, use_se, s in cfgs: 112 | output_channel = _make_divisible(c * width_mult, 4) 113 | hidden_channel = _make_divisible(exp_size * width_mult, 4) 114 | x = GhostBottleneck(x, hidden_channel, output_channel, k, s, use_se) 115 | 116 | output_channel = _make_divisible(exp_size * width_mult, 4) 117 | x = ConvBnLayer(x, output_channel, kernel_size=1, stride=1, padding='same') 118 | x = Activation('relu')(x) 119 | x = GlobalAveragePooling2D()(x) 120 | 121 | output_channel = 1280 122 | x = Dense(output_channel)(x) 123 | x = BatchNormalization(epsilon=1e-5, momentum=0.1)(x) 124 | x = Activation('relu')(x) 125 | x = Dropout(0.2)(x) 126 | x = Dense(num_classes)(x) 127 | return x 128 | -------------------------------------------------------------------------------- /models/HRNet.py: -------------------------------------------------------------------------------- 1 | #coding=utf-8 2 | from keras.models import * 3 | from keras.layers import * 4 | import keras.backend as K 5 | from keras.initializers import RandomNormal 6 | import tensorflow as tf 7 | 8 | def conv(x, outsize, kernel_size, strides_=1, padding_='same', activation=None): 9 | return Conv2D(outsize, kernel_size, strides=strides_, padding=padding_, kernel_initializer=RandomNormal( 10 | stddev=0.001), use_bias=False, activation=activation)(x) 11 | 12 | 13 | def Bottleneck(x, size, downsampe=False): 14 | residual = x 15 | 16 | out = conv(x, size, 1, padding_='valid') 17 | out = BatchNormalization(epsilon=1e-5, momentum=0.1)(out) 18 | out = Activation('relu')(out) 19 | 20 | out = conv(out, size, 3) 21 | out = BatchNormalization(epsilon=1e-5, momentum=0.1)(out) 22 | out = Activation('relu')(out) 23 | 24 | out = conv(out, size * 4, 1, padding_='valid') 25 | out = BatchNormalization(epsilon=1e-5, momentum=0.1)(out) 26 | 27 | if downsampe: 28 | residual = conv(x, size * 4, 1, padding_='valid') 29 | residual = BatchNormalization(epsilon=1e-5, momentum=0.1)(residual) 30 | 31 | out = Add()([out, residual]) 32 | out = Activation('relu')(out) 33 | 34 | return out 35 | 36 | 37 | def BasicBlock(x, size, downsampe=False): 38 | residual = x 39 | 40 | out = conv(x, size, 3) 41 | out = BatchNormalization(epsilon=1e-5, momentum=0.1)(out) 42 | out = Activation('relu')(out) 43 | 44 | out = conv(out, size, 3) 45 | out = BatchNormalization(epsilon=1e-5, momentum=0.1)(out) 46 | 47 | if downsampe: 48 | residual = conv(x, size, 1, padding_='valid') 49 | residual = BatchNormalization(epsilon=1e-5, momentum=0.1)(residual) 50 | 51 | out = Add()([out, residual]) 52 | out = Activation('relu')(out) 53 | 54 | return out 55 | 56 | 57 | def layer1(x): 58 | x = Bottleneck(x, 64, downsampe=True) 59 | x = Bottleneck(x, 64) 60 | x = Bottleneck(x, 64) 61 | x = Bottleneck(x, 64) 62 | 63 | return x 64 | 65 | 66 | def transition_layer(x, in_channels, out_channels): 67 | num_in = len(in_channels) 68 | num_out = len(out_channels) 69 | out = [] 70 | 71 | for i in range(num_out): 72 | if i < num_in: 73 | if in_channels[i] != out_channels[i]: 74 | residual = conv(x[i], out_channels[i], 3) 75 | residual = BatchNormalization( 76 | epsilon=1e-5, momentum=0.1)(residual) 77 | residual = Activation('relu')(residual) 78 | out.append(residual) 79 | else: 80 | out.append(x[i]) 81 | else: 82 | residual = conv(x[-1], out_channels[i], 3, strides_=2) 83 | residual = BatchNormalization(epsilon=1e-5, momentum=0.1)(residual) 84 | residual = Activation('relu')(residual) 85 | out.append(residual) 86 | 87 | return out 88 | 89 | 90 | def branches(x, block_num, channels): 91 | out = [] 92 | for i in range(len(channels)): 93 | residual = x[i] 94 | for j in range(block_num): 95 | residual = BasicBlock(residual, channels[i]) 96 | out.append(residual) 97 | return out 98 | 99 | 100 | def fuse_layers(x, channels, multi_scale_output=True): 101 | out = [] 102 | 103 | for i in range(len(channels) if multi_scale_output else 1): 104 | residual = x[i] 105 | for j in range(len(channels)): 106 | if j > i: 107 | y = conv(x[j], channels[i], 1, padding_='valid') 108 | y = BatchNormalization(epsilon=1e-5, momentum=0.1)(y) 109 | y = UpSampling2D(size=2 ** (j - i))(y) 110 | residual = Add()([residual, y]) 111 | elif j < i: 112 | y = x[j] 113 | for k in range(i - j): 114 | if k == i - j - 1: 115 | y = conv(y, channels[i], 3, strides_=2) 116 | y = BatchNormalization(epsilon=1e-5, momentum=0.1)(y) 117 | else: 118 | y = conv(y, channels[j], 3, strides_=2) 119 | y = BatchNormalization(epsilon=1e-5, momentum=0.1)(y) 120 | y = Activation('relu')(y) 121 | residual = Add()([residual, y]) 122 | 123 | residual = Activation('relu')(residual) 124 | out.append(residual) 125 | 126 | return out 127 | 128 | 129 | def HighResolutionModule(x, channels, multi_scale_output=True): 130 | residual = branches(x, 4, channels) 131 | out = fuse_layers(residual, channels, 132 | multi_scale_output=multi_scale_output) 133 | return out 134 | 135 | 136 | def stage(x, num_modules, channels, multi_scale_output=True): 137 | out = x 138 | for i in range(num_modules): 139 | if i == num_modules - 1 and multi_scale_output == False: 140 | out = HighResolutionModule(out, channels, multi_scale_output=False) 141 | else: 142 | out = HighResolutionModule(out, channels) 143 | 144 | return out 145 | 146 | def HRNet(nClasses, input_height=224, input_width=224): 147 | channels_2 = [32, 64] 148 | channels_3 = [32, 64, 128] 149 | channels_4 = [32, 64, 128, 256] 150 | num_modules_2 = 1 151 | num_modules_3 = 4 152 | num_modules_4 = 3 153 | 154 | inputs = Input(shape=(input_height, input_width, 3)) 155 | 156 | x = conv(inputs, 64, 3, strides_=2) 157 | x = BatchNormalization(epsilon=1e-5, momentum=0.1)(x) 158 | x = conv(x, 64, 3, strides_=2) 159 | x = BatchNormalization(epsilon=1e-5, momentum=0.1)(x) 160 | x = Activation('relu')(x) 161 | 162 | la1 = layer1(x) 163 | tr1 = transition_layer([la1], [256], channels_2) 164 | st2 = stage(tr1, num_modules_2, channels_2) 165 | tr2 = transition_layer(st2, channels_2, channels_3) 166 | st3 = stage(tr2, num_modules_3, channels_3) 167 | tr3 = transition_layer(st3, channels_3, channels_4) 168 | st4 = stage(tr3, num_modules_4, channels_4, multi_scale_output=False) 169 | up1 = UpSampling2D()(st4[0]) 170 | up1 = conv(up1, 32, 3) 171 | up1 = BatchNormalization(epsilon=1e-5, momentum=0.1)(up1) 172 | up1 = Activation('relu')(up1) 173 | up2 = UpSampling2D()(up1) 174 | up2 = conv(up2, 32, 3) 175 | up2 = BatchNormalization(epsilon=1e-5, momentum=0.1)(up2) 176 | up2 = Activation('relu')(up2) 177 | 178 | final = conv(up2, nClasses, 1, padding_='valid') 179 | 180 | outputHeight = Model(inputs, final).output_shape[1] 181 | outputWidth = Model(inputs, final).output_shape[2] 182 | 183 | out = (Reshape((outputHeight * outputWidth, nClasses)))(final) 184 | out = Activation('softmax')(out) 185 | 186 | model = Model(input=inputs, output=out) 187 | model.outputHeight = outputHeight 188 | model.outputWidth = outputWidth 189 | 190 | return model 191 | -------------------------------------------------------------------------------- /models/ICNet.py: -------------------------------------------------------------------------------- 1 | #coding=utf-8 2 | from keras.models import * 3 | from keras.layers import * 4 | import keras.backend as K 5 | import tensorflow as tf 6 | 7 | def ICNet(nClasses, input_height=224, input_width=224): 8 | inputs = Input(shape=(input_height, input_width, 3)) 9 | 10 | # (1/2) 11 | y = Lambda(lambda x: tf.image.resize_bilinear(x, size=(int(x.shape[1])//2, int(x.shape[2])//2)), name='data_sub2')(inputs) 12 | y = Conv2D(32, 3, strides=2, padding='same', activation='relu', name='conv1_1_3x3_s2')(y) 13 | y = BatchNormalization(name='conv1_1_3x3_s2_bn')(y) 14 | y = Conv2D(32, 3, padding='same', activation='relu', name='conv1_2_3x3')(y) 15 | y = BatchNormalization(name='conv1_2_3x3_s2_bn')(y) 16 | y = Conv2D(64, 3, padding='same', activation='relu', name='conv1_3_3x3')(y) 17 | y = BatchNormalization(name='conv1_3_3x3_bn')(y) 18 | y_ = MaxPooling2D(pool_size=3, strides=2, name='pool1_3x3_s2')(y) 19 | 20 | y = Conv2D(128, 1, name='conv2_1_1x1_proj')(y_) 21 | y = BatchNormalization(name='conv2_1_1x1_proj_bn')(y) 22 | y_ = Conv2D(32, 1, activation='relu', name='conv2_1_1x1_reduce')(y_) 23 | y_ = BatchNormalization(name='conv2_1_1x1_reduce_bn')(y_) 24 | y_ = ZeroPadding2D(name='padding1')(y_) 25 | y_ = Conv2D(32, 3, activation='relu', name='conv2_1_3x3')(y_) 26 | y_ = BatchNormalization(name='conv2_1_3x3_bn')(y_) 27 | y_ = Conv2D(128, 1, name='conv2_1_1x1_increase')(y_) 28 | y_ = BatchNormalization(name='conv2_1_1x1_increase_bn')(y_) 29 | y = Add(name='conv2_1')([y,y_]) 30 | y_ = Activation('relu', name='conv2_1/relu')(y) 31 | 32 | y = Conv2D(32, 1, activation='relu', name='conv2_2_1x1_reduce')(y_) 33 | y = BatchNormalization(name='conv2_2_1x1_reduce_bn')(y) 34 | y = ZeroPadding2D(name='padding2')(y) 35 | y = Conv2D(32, 3, activation='relu', name='conv2_2_3x3')(y) 36 | y = BatchNormalization(name='conv2_2_3x3_bn')(y) 37 | y = Conv2D(128, 1, name='conv2_2_1x1_increase')(y) 38 | y = BatchNormalization(name='conv2_2_1x1_increase_bn')(y) 39 | y = Add(name='conv2_2')([y,y_]) 40 | y_ = Activation('relu', name='conv2_2/relu')(y) 41 | 42 | y = Conv2D(32, 1, activation='relu', name='conv2_3_1x1_reduce')(y_) 43 | y = BatchNormalization(name='conv2_3_1x1_reduce_bn')(y) 44 | y = ZeroPadding2D(name='padding3')(y) 45 | y = Conv2D(32, 3, activation='relu', name='conv2_3_3x3')(y) 46 | y = BatchNormalization(name='conv2_3_3x3_bn')(y) 47 | y = Conv2D(128, 1, name='conv2_3_1x1_increase')(y) 48 | y = BatchNormalization(name='conv2_3_1x1_increase_bn')(y) 49 | y = Add(name='conv2_3')([y,y_]) 50 | y_ = Activation('relu', name='conv2_3/relu')(y) 51 | 52 | y = Conv2D(256, 1, strides=2, name='conv3_1_1x1_proj')(y_) 53 | y = BatchNormalization(name='conv3_1_1x1_proj_bn')(y) 54 | y_ = Conv2D(64, 1, strides=2, activation='relu', name='conv3_1_1x1_reduce')(y_) 55 | y_ = BatchNormalization(name='conv3_1_1x1_reduce_bn')(y_) 56 | y_ = ZeroPadding2D(name='padding4')(y_) 57 | y_ = Conv2D(64, 3, activation='relu', name='conv3_1_3x3')(y_) 58 | y_ = BatchNormalization(name='conv3_1_3x3_bn')(y_) 59 | y_ = Conv2D(256, 1, name='conv3_1_1x1_increase')(y_) 60 | y_ = BatchNormalization(name='conv3_1_1x1_increase_bn')(y_) 61 | y = Add(name='conv3_1')([y,y_]) 62 | z = Activation('relu', name='conv3_1/relu')(y) 63 | 64 | # (1/4) 65 | y_ = Lambda(lambda x: tf.image.resize_bilinear(x, size=(int(x.shape[1])//2, int(x.shape[2])//2)), name='conv3_1_sub4')(z) 66 | y = Conv2D(64, 1, activation='relu', name='conv3_2_1x1_reduce')(y_) 67 | y = BatchNormalization(name='conv3_2_1x1_reduce_bn')(y) 68 | y = ZeroPadding2D(name='padding5')(y) 69 | y = Conv2D(64, 3, activation='relu', name='conv3_2_3x3')(y) 70 | y = BatchNormalization(name='conv3_2_3x3_bn')(y) 71 | y = Conv2D(256, 1, name='conv3_2_1x1_increase')(y) 72 | y = BatchNormalization(name='conv3_2_1x1_increase_bn')(y) 73 | y = Add(name='conv3_2')([y,y_]) 74 | y_ = Activation('relu', name='conv3_2/relu')(y) 75 | 76 | y = Conv2D(64, 1, activation='relu', name='conv3_3_1x1_reduce')(y_) 77 | y = BatchNormalization(name='conv3_3_1x1_reduce_bn')(y) 78 | y = ZeroPadding2D(name='padding6')(y) 79 | y = Conv2D(64, 3, activation='relu', name='conv3_3_3x3')(y) 80 | y = BatchNormalization(name='conv3_3_3x3_bn')(y) 81 | y = Conv2D(256, 1, name='conv3_3_1x1_increase')(y) 82 | y = BatchNormalization(name='conv3_3_1x1_increase_bn')(y) 83 | y = Add(name='conv3_3')([y,y_]) 84 | y_ = Activation('relu', name='conv3_3/relu')(y) 85 | 86 | y = Conv2D(64, 1, activation='relu', name='conv3_4_1x1_reduce')(y_) 87 | y = BatchNormalization(name='conv3_4_1x1_reduce_bn')(y) 88 | y = ZeroPadding2D(name='padding7')(y) 89 | y = Conv2D(64, 3, activation='relu', name='conv3_4_3x3')(y) 90 | y = BatchNormalization(name='conv3_4_3x3_bn')(y) 91 | y = Conv2D(256, 1, name='conv3_4_1x1_increase')(y) 92 | y = BatchNormalization(name='conv3_4_1x1_increase_bn')(y) 93 | y = Add(name='conv3_4')([y,y_]) 94 | y_ = Activation('relu', name='conv3_4/relu')(y) 95 | 96 | y = Conv2D(512, 1, name='conv4_1_1x1_proj')(y_) 97 | y = BatchNormalization(name='conv4_1_1x1_proj_bn')(y) 98 | y_ = Conv2D(128, 1, activation='relu', name='conv4_1_1x1_reduce')(y_) 99 | y_ = BatchNormalization(name='conv4_1_1x1_reduce_bn')(y_) 100 | y_ = ZeroPadding2D(padding=2, name='padding8')(y_) 101 | y_ = Conv2D(128, 3, dilation_rate=2, activation='relu', name='conv4_1_3x3')(y_) 102 | y_ = BatchNormalization(name='conv4_1_3x3_bn')(y_) 103 | y_ = Conv2D(512, 1, name='conv4_1_1x1_increase')(y_) 104 | y_ = BatchNormalization(name='conv4_1_1x1_increase_bn')(y_) 105 | y = Add(name='conv4_1')([y,y_]) 106 | y_ = Activation('relu', name='conv4_1/relu')(y) 107 | 108 | y = Conv2D(128, 1, activation='relu', name='conv4_2_1x1_reduce')(y_) 109 | y = BatchNormalization(name='conv4_2_1x1_reduce_bn')(y) 110 | y = ZeroPadding2D(padding=2, name='padding9')(y) 111 | y = Conv2D(128, 3, dilation_rate=2, activation='relu', name='conv4_2_3x3')(y) 112 | y = BatchNormalization(name='conv4_2_3x3_bn')(y) 113 | y = Conv2D(512, 1, name='conv4_2_1x1_increase')(y) 114 | y = BatchNormalization(name='conv4_2_1x1_increase_bn')(y) 115 | y = Add(name='conv4_2')([y,y_]) 116 | y_ = Activation('relu', name='conv4_2/relu')(y) 117 | 118 | y = Conv2D(128, 1, activation='relu', name='conv4_3_1x1_reduce')(y_) 119 | y = BatchNormalization(name='conv4_3_1x1_reduce_bn')(y) 120 | y = ZeroPadding2D(padding=2, name='padding10')(y) 121 | y = Conv2D(128, 3, dilation_rate=2, activation='relu', name='conv4_3_3x3')(y) 122 | y = BatchNormalization(name='conv4_3_3x3_bn')(y) 123 | y = Conv2D(512, 1, name='conv4_3_1x1_increase')(y) 124 | y = BatchNormalization(name='conv4_3_1x1_increase_bn')(y) 125 | y = Add(name='conv4_3')([y,y_]) 126 | y_ = Activation('relu', name='conv4_3/relu')(y) 127 | 128 | y = Conv2D(128, 1, activation='relu', name='conv4_4_1x1_reduce')(y_) 129 | y = BatchNormalization(name='conv4_4_1x1_reduce_bn')(y) 130 | y = ZeroPadding2D(padding=2, name='padding11')(y) 131 | y = Conv2D(128, 3, dilation_rate=2, activation='relu', name='conv4_4_3x3')(y) 132 | y = BatchNormalization(name='conv4_4_3x3_bn')(y) 133 | y = Conv2D(512, 1, name='conv4_4_1x1_increase')(y) 134 | y = BatchNormalization(name='conv4_4_1x1_increase_bn')(y) 135 | y = Add(name='conv4_4')([y,y_]) 136 | y_ = Activation('relu', name='conv4_4/relu')(y) 137 | 138 | y = Conv2D(128, 1, activation='relu', name='conv4_5_1x1_reduce')(y_) 139 | y = BatchNormalization(name='conv4_5_1x1_reduce_bn')(y) 140 | y = ZeroPadding2D(padding=2, name='padding12')(y) 141 | y = Conv2D(128, 3, dilation_rate=2, activation='relu', name='conv4_5_3x3')(y) 142 | y = BatchNormalization(name='conv4_5_3x3_bn')(y) 143 | y = Conv2D(512, 1, name='conv4_5_1x1_increase')(y) 144 | y = BatchNormalization(name='conv4_5_1x1_increase_bn')(y) 145 | y = Add(name='conv4_5')([y,y_]) 146 | y_ = Activation('relu', name='conv4_5/relu')(y) 147 | 148 | y = Conv2D(128, 1, activation='relu', name='conv4_6_1x1_reduce')(y_) 149 | y = BatchNormalization(name='conv4_6_1x1_reduce_bn')(y) 150 | y = ZeroPadding2D(padding=2, name='padding13')(y) 151 | y = Conv2D(128, 3, dilation_rate=2, activation='relu', name='conv4_6_3x3')(y) 152 | y = BatchNormalization(name='conv4_6_3x3_bn')(y) 153 | y = Conv2D(512, 1, name='conv4_6_1x1_increase')(y) 154 | y = BatchNormalization(name='conv4_6_1x1_increase_bn')(y) 155 | y = Add(name='conv4_6')([y,y_]) 156 | y = Activation('relu', name='conv4_6/relu')(y) 157 | 158 | y_ = Conv2D(1024, 1, name='conv5_1_1x1_proj')(y) 159 | y_ = BatchNormalization(name='conv5_1_1x1_proj_bn')(y_) 160 | y = Conv2D(256, 1, activation='relu', name='conv5_1_1x1_reduce')(y) 161 | y = BatchNormalization(name='conv5_1_1x1_reduce_bn')(y) 162 | y = ZeroPadding2D(padding=4, name='padding14')(y) 163 | y = Conv2D(256, 3, dilation_rate=4, activation='relu', name='conv5_1_3x3')(y) 164 | y = BatchNormalization(name='conv5_1_3x3_bn')(y) 165 | y = Conv2D(1024, 1, name='conv5_1_1x1_increase')(y) 166 | y = BatchNormalization(name='conv5_1_1x1_increase_bn')(y) 167 | y = Add(name='conv5_1')([y,y_]) 168 | y_ = Activation('relu', name='conv5_1/relu')(y) 169 | 170 | y = Conv2D(256, 1, activation='relu', name='conv5_2_1x1_reduce')(y_) 171 | y = BatchNormalization(name='conv5_2_1x1_reduce_bn')(y) 172 | y = ZeroPadding2D(padding=4, name='padding15')(y) 173 | y = Conv2D(256, 3, dilation_rate=4, activation='relu', name='conv5_2_3x3')(y) 174 | y = BatchNormalization(name='conv5_2_3x3_bn')(y) 175 | y = Conv2D(1024, 1, name='conv5_2_1x1_increase')(y) 176 | y = BatchNormalization(name='conv5_2_1x1_increase_bn')(y) 177 | y = Add(name='conv5_2')([y,y_]) 178 | y_ = Activation('relu', name='conv5_2/relu')(y) 179 | 180 | y = Conv2D(256, 1, activation='relu', name='conv5_3_1x1_reduce')(y_) 181 | y = BatchNormalization(name='conv5_3_1x1_reduce_bn')(y) 182 | y = ZeroPadding2D(padding=4, name='padding16')(y) 183 | y = Conv2D(256, 3, dilation_rate=4, activation='relu', name='conv5_3_3x3')(y) 184 | y = BatchNormalization(name='conv5_3_3x3_bn')(y) 185 | y = Conv2D(1024, 1, name='conv5_3_1x1_increase')(y) 186 | y = BatchNormalization(name='conv5_3_1x1_increase_bn')(y) 187 | y = Add(name='conv5_3')([y,y_]) 188 | y = Activation('relu', name='conv5_3/relu')(y) 189 | 190 | h, w = y.shape[1:3].as_list() 191 | pool1 = AveragePooling2D(pool_size=(h,w), strides=(h,w), name='conv5_3_pool1')(y) 192 | pool1 = Lambda(lambda x: tf.image.resize_bilinear(x, size=(h,w)), name='conv5_3_pool1_interp')(pool1) 193 | pool2 = AveragePooling2D(pool_size=(h/2,w/2), strides=(h//2,w//2), name='conv5_3_pool2')(y) 194 | pool2 = Lambda(lambda x: tf.image.resize_bilinear(x, size=(h,w)), name='conv5_3_pool2_interp')(pool2) 195 | pool3 = AveragePooling2D(pool_size=(h/3,w/3), strides=(h//3,w//3), name='conv5_3_pool3')(y) 196 | pool3 = Lambda(lambda x: tf.image.resize_bilinear(x, size=(h,w)), name='conv5_3_pool3_interp')(pool3) 197 | pool6 = AveragePooling2D(pool_size=(h/4,w/4), strides=(h//4,w//4), name='conv5_3_pool6')(y) 198 | pool6 = Lambda(lambda x: tf.image.resize_bilinear(x, size=(h,w)), name='conv5_3_pool6_interp')(pool6) 199 | 200 | y = Add(name='conv5_3_sum')([y, pool1, pool2, pool3, pool6]) 201 | y = Conv2D(256, 1, activation='relu', name='conv5_4_k1')(y) 202 | y = BatchNormalization(name='conv5_4_k1_bn')(y) 203 | aux_1 = Lambda(lambda x: tf.image.resize_bilinear(x, size=(int(x.shape[1])*2, int(x.shape[2])*2)), name='conv5_4_interp')(y) 204 | y = ZeroPadding2D(padding=2, name='padding17')(aux_1) 205 | y = Conv2D(128, 3, dilation_rate=2, name='conv_sub4')(y) 206 | y = BatchNormalization(name='conv_sub4_bn')(y) 207 | y_ = Conv2D(128, 1, name='conv3_1_sub2_proj')(z) 208 | y_ = BatchNormalization(name='conv3_1_sub2_proj_bn')(y_) 209 | y = Add(name='sub24_sum')([y,y_]) 210 | y = Activation('relu', name='sub24_sum/relu')(y) 211 | 212 | aux_2 = Lambda(lambda x: tf.image.resize_bilinear(x, size=(int(x.shape[1])*2, int(x.shape[2])*2)), name='sub24_sum_interp')(y) 213 | y = ZeroPadding2D(padding=2, name='padding18')(aux_2) 214 | y_ = Conv2D(128, 3, dilation_rate=2, name='conv_sub2')(y) 215 | y_ = BatchNormalization(name='conv_sub2_bn')(y_) 216 | 217 | # (1) 218 | y = Conv2D(32, 3, strides=2, padding='same', activation='relu', name='conv1_sub1')(inputs) 219 | y = BatchNormalization(name='conv1_sub1_bn')(y) 220 | y = Conv2D(32, 3, strides=2, padding='same', activation='relu', name='conv2_sub1')(y) 221 | y = BatchNormalization(name='conv2_sub1_bn')(y) 222 | y = Conv2D(64, 3, strides=2, padding='same', activation='relu', name='conv3_sub1')(y) 223 | y = BatchNormalization(name='conv3_sub1_bn')(y) 224 | y = Conv2D(128, 1, name='conv3_sub1_proj')(y) 225 | y = BatchNormalization(name='conv3_sub1_proj_bn')(y) 226 | 227 | y = Add(name='sub12_sum')([y,y_]) 228 | y = Activation('relu', name='sub12_sum/relu')(y) 229 | y = Lambda(lambda x: tf.image.resize_bilinear(x, size=(int(x.shape[1])*2, int(x.shape[2])*2)), name='sub12_sum_interp')(y) 230 | 231 | o = Conv2D(nClasses, 1, name='conv6_cls')(y) 232 | 233 | 234 | o_shape = Model(inputs, o).output_shape 235 | 236 | outputHeight = o_shape[1] 237 | outputWidth = o_shape[2] 238 | 239 | o = (Reshape((outputHeight*outputWidth, nClasses)))(o) 240 | o = (Activation('softmax'))(o) 241 | model = Model(inputs, o) 242 | model.outputWidth = outputWidth 243 | model.outputHeight = outputHeight 244 | 245 | return model 246 | -------------------------------------------------------------------------------- /models/MobileNetFCN8.py: -------------------------------------------------------------------------------- 1 | #coding=utf-8 2 | from keras.models import * 3 | from keras.layers import * 4 | import keras 5 | import keras.backend as K 6 | 7 | def relu6(x): 8 | return K.relu(x, max_value=6) 9 | 10 | # Width Multiplier: Thinner Models 11 | def conv_block(inputs, filters, alpha, kernel=(3, 3), strides=(1, 1)): 12 | filters = int(filters * alpha) 13 | x = ZeroPadding2D(padding=(1, 1), name='conv1_pad')(inputs) 14 | x = Conv2D(filters, kernel, padding='valid', use_bias=False, strides=strides, name='conv1')(x) 15 | x = BatchNormalization(axis=3, name='conv1_bn')(x) 16 | return Activation(relu6, name='conv1_relu')(x) 17 | 18 | def depthwise_conv_block(inputs, pointwise_conv_filters, alpha, depth_multiplier=1, strides=(1, 1), block_id=1): 19 | pointwise_conv_filters = int(pointwise_conv_filters * alpha) 20 | x = ZeroPadding2D((1, 1), name='conv_pad_%d' % block_id)(inputs) 21 | x = DepthwiseConv2D((3, 3), padding='valid', depth_multiplier=depth_multiplier, strides=strides, use_bias=False, name='conv_dw_%d' % block_id)(x) 22 | x = BatchNormalization(axis=3, name='conv_dw_%d_bn' % block_id)(x) 23 | x = Activation(relu6, name='conv_dw_%d_relu' % block_id)(x) 24 | x = Conv2D(pointwise_conv_filters, (1, 1), padding='same', use_bias=False, strides=(1, 1), name='conv_pw_%d' % block_id)(x) 25 | x = BatchNormalization(axis=3, name='conv_pw_%d_bn' % block_id)(x) 26 | return Activation(relu6, name='conv_pw_%d_relu' % block_id)(x) 27 | 28 | 29 | def MobileNetFCN8 (nClasses, optimizer=None, input_width=512, input_height=512, pretrained='imagenet'): 30 | input_size = (input_height, input_width, 3) 31 | img_input = Input(input_size) 32 | alpha = 1.0 33 | depth_multiplier = 1 34 | x = conv_block(img_input, 16, alpha, strides=(2, 2)) 35 | x = depthwise_conv_block(x, 16, alpha, depth_multiplier, block_id=1) 36 | f1 = x 37 | x = depthwise_conv_block(x, 32, alpha, depth_multiplier, strides=(2, 2), block_id=2) 38 | x = depthwise_conv_block(x, 32, alpha, depth_multiplier, block_id=3) 39 | f2 = x 40 | x = depthwise_conv_block(x, 64, alpha, depth_multiplier, strides=(2, 2), block_id=4) 41 | x = depthwise_conv_block(x, 64, alpha, depth_multiplier, block_id=5) 42 | f3 = x 43 | x = depthwise_conv_block(x, 128, alpha, depth_multiplier, strides=(2, 2), block_id=6) 44 | x = depthwise_conv_block(x, 128, alpha, depth_multiplier, block_id=7) 45 | x = depthwise_conv_block(x, 128, alpha, depth_multiplier, block_id=8) 46 | x = depthwise_conv_block(x, 128, alpha, depth_multiplier, block_id=9) 47 | x = depthwise_conv_block(x, 128, alpha, depth_multiplier, block_id=10) 48 | x = depthwise_conv_block(x, 128, alpha, depth_multiplier, block_id=11) 49 | f4 = x 50 | x = depthwise_conv_block(x, 256, alpha, depth_multiplier, strides=(2, 2), block_id=12) 51 | x = depthwise_conv_block(x, 256, alpha, depth_multiplier, block_id=13) 52 | f5 = x 53 | 54 | o = f5 55 | 56 | o = (Conv2D(256, (7, 7), activation='relu', padding='same'))(o) 57 | o = BatchNormalization()(o) 58 | 59 | 60 | o = (Conv2D(nClasses, (1, 1)))(o) 61 | # W = (N - 1) * S - 2P + F = 6 * 2 - 0 + 2 = 14 62 | o = Conv2DTranspose(nClasses, kernel_size=(2, 2), strides=(2, 2), padding="valid")(o) 63 | # 14 x 14 64 | 65 | o2 = f4 66 | o2 = (Conv2D(nClasses, (1, 1)))(o2) 67 | 68 | # (14 x 14) (14 x 14) 69 | 70 | o = Add()([o, o2]) 71 | # W = (N - 1) * S - 2P + F = 13 * 2 - 0 + 2 = 28 72 | o = Conv2DTranspose(nClasses, kernel_size=(2, 2), strides=(2, 2), padding="valid")(o) 73 | o2 = f3 74 | o2 = (Conv2D(nClasses, (1, 1)))(o2) 75 | # (28 x 28) (28 x 28) 76 | o = Add()([o2, o]) 77 | 78 | # 224 x 224 79 | # W = (N - 1) * S - 2P + F = 27 * 8 + 8 = 224 80 | o = Conv2DTranspose(nClasses , kernel_size=(8,8), strides=(8,8), padding="valid")(o) 81 | 82 | o_shape = Model(img_input, o).output_shape 83 | 84 | outputHeight = o_shape[1] 85 | outputWidth = o_shape[2] 86 | 87 | o = (Reshape((outputHeight*outputWidth, nClasses)))(o) 88 | o = (Activation('softmax'))(o) 89 | model = Model(img_input, o) 90 | model.outputWidth = outputWidth 91 | model.outputHeight = outputHeight 92 | 93 | return model 94 | -------------------------------------------------------------------------------- /models/MobileNetUnet.py: -------------------------------------------------------------------------------- 1 | #coding=utf-8 2 | from keras.models import * 3 | from keras.layers import * 4 | import keras.backend as K 5 | 6 | def relu6(x): 7 | return K.relu(x, max_value=6) 8 | 9 | # Width Multiplier: Thinner Models 10 | def conv_block(inputs, filters, alpha, kernel=(3, 3), strides=(1, 1)): 11 | filters = int(filters * alpha) 12 | x = ZeroPadding2D(padding=(1, 1), name='conv1_pad')(inputs) 13 | x = Conv2D(filters, kernel, padding='valid', use_bias=False, strides=strides, name='conv1')(x) 14 | x = BatchNormalization(axis=3, name='conv1_bn')(x) 15 | return Activation(relu6, name='conv1_relu')(x) 16 | 17 | def depthwise_conv_block(inputs, pointwise_conv_filters, alpha, depth_multiplier=1, strides=(1, 1), block_id=1): 18 | pointwise_conv_filters = int(pointwise_conv_filters * alpha) 19 | x = ZeroPadding2D((1, 1), name='conv_pad_%d' % block_id)(inputs) 20 | x = DepthwiseConv2D((3, 3), padding='valid', depth_multiplier=depth_multiplier, strides=strides, use_bias=False, name='conv_dw_%d' % block_id)(x) 21 | x = BatchNormalization(axis=3, name='conv_dw_%d_bn' % block_id)(x) 22 | x = Activation(relu6, name='conv_dw_%d_relu' % block_id)(x) 23 | x = Conv2D(pointwise_conv_filters, (1, 1), padding='same', use_bias=False, strides=(1, 1), name='conv_pw_%d' % block_id)(x) 24 | x = BatchNormalization(axis=3, name='conv_pw_%d_bn' % block_id)(x) 25 | return Activation(relu6, name='conv_pw_%d_relu' % block_id)(x) 26 | 27 | def MobileNetUnet (nClasses, input_width=224, input_height=224): 28 | input_size = (input_height, input_width, 3) 29 | inputs = Input(input_size) 30 | alpha = 1.0 31 | depth_multiplier = 1 32 | x = conv_block(inputs, 16, alpha, strides=(2, 2)) 33 | x = depthwise_conv_block(x, 16, alpha, depth_multiplier, block_id=1) 34 | f1 = x 35 | x = depthwise_conv_block(x, 32, alpha, depth_multiplier, strides=(2, 2), block_id=2) 36 | x = depthwise_conv_block(x, 32, alpha, depth_multiplier, block_id=3) 37 | f2 = x 38 | x = depthwise_conv_block(x, 64, alpha, depth_multiplier, strides=(2, 2), block_id=4) 39 | x = depthwise_conv_block(x, 64, alpha, depth_multiplier, block_id=5) 40 | f3 = x 41 | x = depthwise_conv_block(x, 128, alpha, depth_multiplier, strides=(2, 2), block_id=6) 42 | x = depthwise_conv_block(x, 128, alpha, depth_multiplier, block_id=7) 43 | x = depthwise_conv_block(x, 128, alpha, depth_multiplier, block_id=8) 44 | x = depthwise_conv_block(x, 128, alpha, depth_multiplier, block_id=9) 45 | x = depthwise_conv_block(x, 128, alpha, depth_multiplier, block_id=10) 46 | x = depthwise_conv_block(x, 128, alpha, depth_multiplier, block_id=11) 47 | f4 = x 48 | #x = depthwise_conv_block(x, 1024, alpha, depth_multiplier, strides=(2, 2), block_id=12) 49 | #x = depthwise_conv_block(x, 1024, alpha, depth_multiplier, block_id=13) 50 | #f5 = x 51 | 52 | o = f4 53 | 54 | o = Conv2D(128, (3, 3), activation='relu', padding='same')(o) 55 | o = BatchNormalization()(o) 56 | # decode 57 | o = (UpSampling2D((2, 2)))(o) 58 | o = (concatenate([o, f3], axis=-1)) 59 | o = (Conv2D(64, (3, 3), padding='same'))(o) 60 | o = (BatchNormalization())(o) 61 | 62 | o = (UpSampling2D((2, 2)))(o) 63 | o = (concatenate([o, f2], axis=-1)) 64 | o = (Conv2D(32, (3, 3), padding='same'))(o) 65 | o = (BatchNormalization())(o) 66 | 67 | o = (UpSampling2D((2, 2)))(o) 68 | o = (concatenate([o, f1], axis=-1)) 69 | 70 | o = (Conv2D(16, (3, 3), padding='same'))(o) 71 | o = (BatchNormalization())(o) 72 | 73 | o = Conv2D(nClasses, (3, 3), padding='same')(o) 74 | 75 | outputHeight = Model(inputs, o).output_shape[1] 76 | outputWidth = Model(inputs, o).output_shape[2] 77 | o = (Reshape((outputHeight*outputWidth, nClasses)))(o) 78 | o = Activation('softmax')(o) 79 | 80 | model = Model(inputs, o) 81 | model.outputWidth = outputWidth 82 | model.outputHeight = outputHeight 83 | 84 | return model 85 | -------------------------------------------------------------------------------- /models/MobileNext.py: -------------------------------------------------------------------------------- 1 | """ 2 | MobileNext模型 3 | 4 | https://arxiv.org/abs/2007.02269 5 | 6 | 知乎版解析:https://zhuanlan.zhihu.com/p/157878449 7 | """ 8 | 9 | from keras.layers import * 10 | 11 | 12 | def _make_divisible(v, divisor, min_value=None): 13 | """ 14 | This function is taken from the original tf repo. 15 | It ensures that all layers have a channel number that is divisible by 8 16 | It can be seen here: 17 | https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py 18 | :param v: 19 | :param divisor: 20 | :param min_value: 21 | :return: 22 | """ 23 | if min_value is None: 24 | min_value = divisor 25 | new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) 26 | # Make sure that round down does not go down by more than 10%. 27 | if new_v < 0.9 * v: 28 | new_v += divisor 29 | return new_v 30 | 31 | 32 | def ConvBNReLU(x, out_planes, kernel_size=3, stride=1, groups=1, norm_layer=False): 33 | """ 34 | Conv + BN + RELU6 35 | Args: 36 | x: 输入张量 37 | out_planes: 输出通道数 38 | kernel_size: 卷积核大小 39 | stride: 步长 40 | groups: 分组,当=1则为普通卷积,其他情况则是depthwise 41 | norm_layer: 若为False则不做BN,为True则过一层BN 42 | 43 | Returns: 44 | 45 | """ 46 | if groups == 1: 47 | x = Conv2D(filters=out_planes, kernel_size=kernel_size, strides=stride, padding='same')(x) 48 | else: 49 | x = DepthwiseConv2D(kernel_size=kernel_size, strides=stride, padding='same')(x) 50 | 51 | if norm_layer: 52 | x = BatchNormalization(epsilon=1e-5, momentum=0.1)(x) 53 | # relu 6 , 限制最大输出在6 54 | x = ReLU(max_value=6)(x) 55 | return x 56 | 57 | 58 | def CutOutLayer(input_tensor, oup): 59 | """ 60 | 在通道维上截取到 oup 的张量 61 | Args: 62 | input_tensor: 输入张量 63 | oup: 输出通道数 64 | 65 | Returns: 66 | 67 | """ 68 | return Lambda(lambda x: x[:, :, :, :oup])(input_tensor) 69 | 70 | 71 | def CutInLayer(input_tensor, oup): 72 | """ 73 | 在通道维上截取从 oup开始到后面的张量 74 | 75 | Args: 76 | input_tensor: 输入张量 77 | oup: 输出通道数 78 | Returns: 79 | 80 | """ 81 | return Lambda(lambda x: x[:, :, :, oup:])(input_tensor) 82 | 83 | 84 | def SandGlass(x, oup, stride, expand_ratio, identity_tensor_multiplier=1.0, norm_layer=False): 85 | """ 86 | SandGlass 模块 87 | Args: 88 | x: 输入张量 89 | oup: 输出通道数 90 | stride: 步长 91 | expand_ratio: 扩张系数 92 | identity_tensor_multiplier: 区间在0-1的浮点数,用于部分通道残差连接, 93 | 默认为1,即原始残差连接 94 | norm_layer: 若为False则不做BN,为True则过一层BN 95 | 96 | Returns: 97 | 98 | """ 99 | assert stride in [1, 2] 100 | # 残差连接 101 | residual = x 102 | 103 | inp = x.shape[-1] 104 | use_identity = False if identity_tensor_multiplier == 1.0 else True 105 | identity_tensor_channels = int(round(inp * identity_tensor_multiplier)) 106 | 107 | hidden_dim = int(round(inp / expand_ratio)) 108 | use_res_connect = stride == 1 and inp == oup 109 | 110 | # depthwise 111 | x = ConvBNReLU(x, inp, kernel_size=3, stride=1, groups=inp, norm_layer=norm_layer) 112 | if expand_ratio != 1: 113 | x = Conv2D(hidden_dim, kernel_size=1, strides=1, padding='same')(x) 114 | x = BatchNormalization(epsilon=1e-5, momentum=0.1)(x) 115 | # pointwise 116 | x = ConvBNReLU(x, oup, kernel_size=1, stride=1, norm_layer=norm_layer) 117 | # depthwise linear 118 | x = DepthwiseConv2D(kernel_size=3, strides=stride, padding='same')(x) 119 | if norm_layer: 120 | x = BatchNormalization(epsilon=1e-5, momentum=0.1)(x) 121 | 122 | if use_res_connect: 123 | if use_identity: 124 | # 在0:identity_tensor_channels上做残差连接 125 | identity_tensor = Add()([CutOutLayer(x, identity_tensor_channels), 126 | CutOutLayer(residual, identity_tensor_channels)]) 127 | # 进行拼接 128 | out = Concatenate()([identity_tensor, CutInLayer(x, identity_tensor_channels)]) 129 | else: 130 | # 直接残差 131 | out = x + residual 132 | return out 133 | else: 134 | # 直接返回 135 | return x 136 | 137 | 138 | def MobileNext(x, num_classes=1000, 139 | width_mult=1.0, 140 | identity_tensor_multiplier=1.0, 141 | sand_glass_setting=None, 142 | round_nearest=8, 143 | norm_layer=True): 144 | """ 145 | MobileNext网络主体 146 | Args: 147 | x: 输入张量 148 | num_classes: 最后输出分类数目 149 | width_mult: 宽度扩张 150 | identity_tensor_multiplier: 参见SandGlass模块 151 | sand_glass_setting: 用于设置SandGlass模块的各个参数,若为None则采取论文的设置 152 | round_nearest: 153 | norm_layer: 154 | 155 | 使用方法,这里以Input层作为演示 156 | 157 | input_shape = (224, 224, 3) 158 | 159 | inputs = Input(shape=input_shape, name="inputs") 160 | 161 | y = MobileNext(inputs, 1000, width_mult=1.0, identity_tensor_multiplier=0.5) 162 | 163 | Returns: (batch, num_classes)的张量 164 | 165 | """ 166 | input_channel = 32 167 | last_channel = 1280 168 | lc = last_channel 169 | if sand_glass_setting is None: 170 | sand_glass_setting = [ 171 | # t, c, b, s 172 | [2, 96, 1, 2], 173 | [6, 144, 1, 1], 174 | [6, 192, 3, 2], 175 | [6, 288, 3, 2], 176 | [6, 384, 4, 1], 177 | [6, 576, 4, 2], 178 | [6, 960, 2, 1], 179 | [6, lc, 1, 1], 180 | ] 181 | # only check the first element, assuming user knows t,c,n,s are required 182 | if len(sand_glass_setting) == 0 or len(sand_glass_setting[0]) != 4: 183 | raise ValueError("sand_glass_setting should be non-empty " 184 | "or a 4-element list, got {}".format(sand_glass_setting)) 185 | 186 | # first layer 187 | input_channel = _make_divisible(input_channel * width_mult, round_nearest) 188 | x = ConvBNReLU(x, input_channel, stride=2, norm_layer=norm_layer) 189 | 190 | for t, c, b, s in sand_glass_setting: 191 | output_channel = _make_divisible(c * width_mult, round_nearest) 192 | for i in range(b): 193 | stride = s if i == 0 else 1 194 | x = SandGlass(x, output_channel, stride, expand_ratio=t, 195 | identity_tensor_multiplier=identity_tensor_multiplier, norm_layer=True) 196 | x = GlobalAveragePooling2D()(x) 197 | x = Dense(units=num_classes)(x) 198 | return x 199 | -------------------------------------------------------------------------------- /models/NestedUNet.py: -------------------------------------------------------------------------------- 1 | #coding=utf-8 2 | import tensorflow as tf 3 | import keras 4 | from keras.models import * 5 | from keras.layers import * 6 | #函数式模块实现 7 | def conv_block_nested(x, mid_ch, out_ch, kernel_size=3, padding='same'): 8 | x = Conv2D(mid_ch, kernel_size=3, padding='same')(x) 9 | x = BatchNormalization()(x) 10 | x = Activation('relu')(x) 11 | 12 | x = Conv2D(out_ch, kernel_size=3, padding='same')(x) 13 | x = BatchNormalization()(x) 14 | x = Activation('relu')(x) 15 | return x 16 | 17 | def NestedUNet(nClasses, input_height=224, input_width=224): 18 | 19 | inputs = Input(shape=(input_height, input_width, 3)) 20 | t = 2 21 | n1 = 32 22 | filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16] 23 | 24 | x0_0 = conv_block_nested(inputs, filters[0], filters[0]) 25 | 26 | x1_0 = conv_block_nested(MaxPooling2D(strides=2)(x0_0), filters[1], filters[1]) 27 | x0_1 = conv_block_nested(Concatenate()([x0_0, UpSampling2D()(x1_0)]), filters[0], filters[0]) 28 | 29 | x2_0 = conv_block_nested(MaxPooling2D(strides=2)(x1_0), filters[2], filters[2]) 30 | x1_1 = conv_block_nested(Concatenate()([x1_0, UpSampling2D()(x2_0)]), filters[1], filters[1]) 31 | x0_2 = conv_block_nested(Concatenate()([x0_0, x0_1, UpSampling2D()(x1_1)]), filters[0], filters[0]) 32 | 33 | x3_0 = conv_block_nested(MaxPooling2D(strides=2)(x2_0), filters[3], filters[3]) 34 | x2_1 = conv_block_nested(Concatenate()([x2_0, UpSampling2D()(x3_0)]), filters[2], filters[2]) 35 | x1_2 = conv_block_nested(Concatenate()([x1_0, x1_1, UpSampling2D()(x2_1)]), filters[1], filters[1]) 36 | x0_3 = conv_block_nested(Concatenate()([x0_0, x0_1, x0_2, UpSampling2D()(x1_2)]), filters[0], filters[0]) 37 | 38 | x4_0 = conv_block_nested(MaxPooling2D(strides=2)(x3_0), filters[4], filters[4]) 39 | x3_1 = conv_block_nested(Concatenate()([x3_0, UpSampling2D()(x4_0)]), filters[3], filters[3]) 40 | x2_2 = conv_block_nested(Concatenate()([x2_0, x2_1, UpSampling2D()(x3_1)]), filters[2], filters[2]) 41 | x1_3 = conv_block_nested(Concatenate()([x1_0, x1_1, x1_2, UpSampling2D()(x2_2)]), filters[1], filters[1]) 42 | x0_4 = conv_block_nested(Concatenate()([x0_0, x0_1, x0_2, x0_3, UpSampling2D()(x1_3)]), filters[0], filters[0]) 43 | 44 | o = Conv2D(nClasses, (3, 3), padding='same')(x0_4) 45 | 46 | outputHeight = Model(inputs, o).output_shape[1] 47 | outputWidth = Model(inputs, o).output_shape[2] 48 | 49 | out = (Reshape((outputHeight * outputWidth, nClasses)))(o) 50 | out = Activation('softmax')(out) 51 | 52 | model = Model(input=inputs, output=out) 53 | model.outputHeight = outputHeight 54 | model.outputWidth = outputWidth 55 | 56 | return model 57 | 58 | -------------------------------------------------------------------------------- /models/PSPNet.py: -------------------------------------------------------------------------------- 1 | #coding=utf-8 2 | from keras.models import * 3 | from keras.layers import * 4 | import keras.backend as K 5 | import tensorflow as tf 6 | 7 | 8 | def pool_block(inp, pool_factor): 9 | h = K.int_shape(inp)[1] 10 | w = K.int_shape(inp)[2] 11 | pool_size = strides = [int(np.round( float(h) / pool_factor)), int(np.round( float(w)/ pool_factor))] 12 | x = AveragePooling2D(pool_size, strides=strides, padding='same')(inp) 13 | x = Conv2D(256, (1, 1), padding='same', activation='relu')(x) 14 | x = BatchNormalization()(x) 15 | x = Lambda(lambda x: tf.image.resize_bilinear(x, size=(int(x.shape[1])*strides[0], int(x.shape[2])*strides[1])))(x) 16 | x = Conv2D(256, (1, 1), padding='same', activation='relu')(x) 17 | return x 18 | 19 | def PSPNet(nClasses, input_width=384, input_height=384): 20 | assert input_height % 192 == 0 21 | assert input_width % 192 == 0 22 | inputs = Input(shape=(input_height, input_width, 3)) 23 | 24 | x = (Conv2D(64, (3, 3), activation='relu', padding='same'))(inputs) 25 | x = (BatchNormalization())(x) 26 | x = (MaxPooling2D((2, 2)))(x) 27 | f1 = x 28 | # 192 x 192 29 | 30 | x = (Conv2D(128, (3, 3), activation='relu', padding='same'))(x) 31 | x = (BatchNormalization())(x) 32 | x = (MaxPooling2D((2, 2)))(x) 33 | f2 = x 34 | # 96 x 96 35 | x = (Conv2D(256, (3, 3), activation='relu', padding='same'))(x) 36 | x = (BatchNormalization())(x) 37 | x = (MaxPooling2D((2, 2)))(x) 38 | f3 = x 39 | # 48 x 48 40 | x = (Conv2D(256, (3, 3), activation='relu', padding='same'))(x) 41 | x = (BatchNormalization())(x) 42 | x = (MaxPooling2D((2, 2)))(x) 43 | f4 = x 44 | 45 | # 24 x 24 46 | o = f4 47 | pool_factors = [1, 2, 3, 6] 48 | pool_outs = [o] 49 | for p in pool_factors: 50 | pooled = pool_block(o, p) 51 | pool_outs.append(pooled) 52 | 53 | o = Concatenate(axis=3)(pool_outs) 54 | o = Conv2D(256, (3, 3), activation='relu', padding='same')(o) 55 | o = BatchNormalization()(o) 56 | 57 | o = Lambda(lambda x: tf.image.resize_bilinear(x, size=(int(x.shape[1])*8, int(x.shape[2])*8)))(x) 58 | 59 | o = Conv2D(nClasses, (1, 1), padding='same')(o) 60 | o_shape = Model(inputs, o).output_shape 61 | outputHeight = o_shape[1] 62 | outputWidth = o_shape[2] 63 | print(outputHeight) 64 | print(outputWidth) 65 | o = (Reshape((outputHeight*outputWidth, nClasses)))(o) 66 | o = (Activation('softmax'))(o) 67 | model = Model(inputs, o) 68 | model.outputWidth = outputWidth 69 | model.outputHeight = outputHeight 70 | 71 | return model 72 | 73 | -------------------------------------------------------------------------------- /models/R2AttUNet.py: -------------------------------------------------------------------------------- 1 | #coding=utf-8 2 | import tensorflow as tf 3 | import keras 4 | from keras.models import * 5 | from keras.layers import * 6 | 7 | def conv_block(input, filters): 8 | out = Conv2D(filters, kernel_size=(3,3), strides=1, padding='same')(input) 9 | out = BatchNormalization()(out) 10 | out = Activation('relu')(out) 11 | out = Conv2D(filters, kernel_size=(3,3), strides=1, padding='same')(out) 12 | out = BatchNormalization()(out) 13 | out = Activation('relu')(out) 14 | return out 15 | 16 | def up_conv(input, filters): 17 | out = UpSampling2D()(input) 18 | out = Conv2D(filters, kernel_size=(3,3), strides=1, padding='same')(out) 19 | out = BatchNormalization()(out) 20 | out = Activation('relu')(out) 21 | return out 22 | 23 | def Attention_block(input1, input2, filters): 24 | g1 = Conv2D(filters, kernel_size=1, strides=1, padding='same')(input1) 25 | g1 = BatchNormalization()(g1) 26 | x1 = Conv2D(filters, kernel_size=1, strides=1, padding='same')(input2) 27 | x1 = BatchNormalization()(x1) 28 | psi = Activation('relu')(add([g1, x1])) 29 | psi = Conv2D(filters, kernel_size=1, strides=1, padding='same')(psi) 30 | psi = BatchNormalization()(psi) 31 | psi = Activation('sigmoid')(psi) 32 | out = multiply([input2, psi]) 33 | return out 34 | 35 | def Recurrent_block(input, channel, t=2): 36 | for i in range(t): 37 | if i == 0: 38 | x = Conv2D(channel, kernel_size=(3, 3), strides=1, padding='same')(input) 39 | x = BatchNormalization()(x) 40 | x = Activation('relu')(x) 41 | out = Conv2D(channel, kernel_size=(3, 3), strides=1, padding='same')(add([x, x])) 42 | out = BatchNormalization()(out) 43 | out = Activation('relu')(out) 44 | return out 45 | 46 | def RRCNN_block(input, channel, t=2): 47 | x1 = Conv2D(channel, kernel_size=(1, 1), strides=1, padding='same')(input) 48 | x2 = Recurrent_block(x1, channel, t=t) 49 | x2 = Recurrent_block(x2, channel, t=t) 50 | out = add([x1, x2]) 51 | return out 52 | 53 | 54 | 55 | def R2AttUNet(nClasses, input_height=224, input_width=224): 56 | # """ 57 | #Residual Recuurent Block with attention Unet 58 | #Implementation : https://github.com/LeeJunHyun/Image_Segmentation 59 | #""" 60 | inputs = Input(shape=(input_height, input_width, 3)) 61 | t = 2 62 | n1 = 32 63 | filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16] 64 | 65 | e1 = RRCNN_block(inputs, filters[0], t=t) 66 | 67 | e2 = MaxPooling2D(strides=2)(e1) 68 | e2 = RRCNN_block(e2, filters[1], t=t) 69 | 70 | e3 = MaxPooling2D(strides=2)(e2) 71 | e3 = RRCNN_block(e3, filters[2], t=t) 72 | 73 | e4 = MaxPooling2D(strides=2)(e3) 74 | e4 = RRCNN_block(e4, filters[3], t=t) 75 | 76 | e5 = MaxPooling2D(strides=2)(e4) 77 | e5 = RRCNN_block(e5, filters[4], t=t) 78 | 79 | d5 = up_conv(e5, filters[3]) 80 | x4 = Attention_block(d5, e4, filters[3]) 81 | d5 = Concatenate()([x4, d5]) 82 | d5 = conv_block(d5, filters[3]) 83 | 84 | d4 = up_conv(d5, filters[2]) 85 | x3 = Attention_block(d4, e3, filters[2]) 86 | d4 = Concatenate()([x3, d4]) 87 | d4 = conv_block(d4, filters[2]) 88 | 89 | d3 = up_conv(d4, filters[1]) 90 | x2 = Attention_block(d3, e2, filters[1]) 91 | d3 = Concatenate()([x2, d3]) 92 | d3 = conv_block(d3, filters[1]) 93 | 94 | d2 = up_conv(d3, filters[0]) 95 | x1 = Attention_block(d2, e1, filters[0]) 96 | d2 = Concatenate()([x1, d2]) 97 | d2 = conv_block(d2, filters[0]) 98 | 99 | o = Conv2D(nClasses, (3, 3), padding='same')(d2) 100 | 101 | outputHeight = Model(inputs, o).output_shape[1] 102 | outputWidth = Model(inputs, o).output_shape[2] 103 | 104 | out = (Reshape((outputHeight * outputWidth, nClasses)))(o) 105 | out = Activation('softmax')(out) 106 | 107 | model = Model(input=inputs, output=out) 108 | model.outputHeight = outputHeight 109 | model.outputWidth = outputWidth 110 | 111 | return model 112 | -------------------------------------------------------------------------------- /models/R2UNet.py: -------------------------------------------------------------------------------- 1 | #coding=utf-8 2 | import tensorflow as tf 3 | import keras 4 | from keras.models import * 5 | from keras.layers import * 6 | 7 | def conv_block(input, filters): 8 | out = Conv2D(filters, kernel_size=(3,3), strides=1, padding='same')(input) 9 | out = BatchNormalization()(out) 10 | out = Activation('relu')(out) 11 | out = Conv2D(filters, kernel_size=(3,3), strides=1, padding='same')(out) 12 | out = BatchNormalization()(out) 13 | out = Activation('relu')(out) 14 | return out 15 | 16 | def up_conv(input, filters): 17 | out = UpSampling2D()(input) 18 | out = Conv2D(filters, kernel_size=(3,3), strides=1, padding='same')(out) 19 | out = BatchNormalization()(out) 20 | out = Activation('relu')(out) 21 | return out 22 | 23 | def Recurrent_block(input, channel, t=2): 24 | for i in range(t): 25 | if i == 0: 26 | x = Conv2D(channel, kernel_size=(3, 3), strides=1, padding='same')(input) 27 | x = BatchNormalization()(x) 28 | x = Activation('relu')(x) 29 | out = Conv2D(channel, kernel_size=(3, 3), strides=1, padding='same')(add([x, x])) 30 | out = BatchNormalization()(out) 31 | out = Activation('relu')(out) 32 | return out 33 | 34 | def RRCNN_block(input, channel, t=2): 35 | x1 = Conv2D(channel, kernel_size=(1, 1), strides=1, padding='same')(input) 36 | x2 = Recurrent_block(x1, channel, t=t) 37 | x2 = Recurrent_block(x2, channel, t=t) 38 | out = add([x1, x2]) 39 | return out 40 | 41 | def R2UNet(nClasses, input_height=224, input_width=224): 42 | """ 43 | R2U-Unet implementation 44 | Paper: https://arxiv.org/abs/1802.06955 45 | """ 46 | inputs = Input(shape=(input_height, input_width, 3)) 47 | t = 2 48 | n1 = 32 49 | filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16] 50 | 51 | e1 = RRCNN_block(inputs, filters[0], t=t) 52 | 53 | e2 = MaxPooling2D(strides=2)(e1) 54 | e2 = RRCNN_block(e2, filters[1], t=t) 55 | 56 | e3 = MaxPooling2D(strides=2)(e2) 57 | e3 = RRCNN_block(e3, filters[2], t=t) 58 | 59 | e4 = MaxPooling2D(strides=2)(e3) 60 | e4 = RRCNN_block(e4, filters[3], t=t) 61 | 62 | e5 = MaxPooling2D(strides=2)(e4) 63 | e5 = RRCNN_block(e5, filters[4], t=t) 64 | 65 | d5 = up_conv(e5, filters[3]) 66 | d5 = Concatenate()([e4, d5]) 67 | d5 = RRCNN_block(d5, filters[3], t=t) 68 | 69 | d4 = up_conv(d5, filters[2]) 70 | d4 = Concatenate()([e3, d4]) 71 | d4 = RRCNN_block(d4, filters[2], t=t) 72 | 73 | d3 = up_conv(d4, filters[1]) 74 | d3 = Concatenate()([e2, d3]) 75 | d3 = RRCNN_block(d3, filters[1], t=t) 76 | 77 | d2 = up_conv(d3, filters[0]) 78 | d2 = Concatenate()([e1, d2]) 79 | d2 = RRCNN_block(d2, filters[0], t=t) 80 | 81 | 82 | o = Conv2D(nClasses, (3, 3), padding='same')(d2) 83 | 84 | outputHeight = Model(inputs, o).output_shape[1] 85 | outputWidth = Model(inputs, o).output_shape[2] 86 | 87 | out = (Reshape((outputHeight * outputWidth, nClasses)))(o) 88 | out = Activation('softmax')(out) 89 | 90 | model = Model(input=inputs, output=out) 91 | model.outputHeight = outputHeight 92 | model.outputWidth = outputWidth 93 | 94 | return model -------------------------------------------------------------------------------- /models/ResUnet.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from tensorflow.keras.layers import * 3 | from tensorflow.keras.optimizers import * 4 | from tensorflow.keras.models import * 5 | 6 | def batch_Norm_Activation(x, BN=False): ## To Turn off Batch Normalization, Change BN to False > 7 | if BN == True: 8 | x = BatchNormalization()(x) 9 | x = Activation("relu")(x) 10 | else: 11 | x= Activation("relu")(x) 12 | return x 13 | 14 | 15 | def ResUnet(image_size): 16 | inputs = Input(shape=(image_size, image_size, 3)) 17 | filters = 16 18 | 19 | # Encoder 20 | conv = Conv2D(filters*1, kernel_size= (3,3), padding= 'same', strides= (1,1))(inputs) 21 | conv = batch_Norm_Activation(conv) 22 | conv = Conv2D(filters*1, kernel_size= (3,3), padding= 'same', strides= (1,1))(conv) 23 | shortcut = Conv2D(filters*1, kernel_size=(1,1), padding='same', strides=(1,1))(inputs) 24 | shortcut = batch_Norm_Activation(shortcut) 25 | output1 = add([conv, shortcut]) 26 | 27 | res1 = batch_Norm_Activation(output1) 28 | res1 = Conv2D(filters*2, kernel_size= (3,3), padding= 'same', strides= (2,2))(res1) 29 | res1 = batch_Norm_Activation(res1) 30 | res1 = Conv2D(filters*2, kernel_size= (3,3), padding= 'same', strides= (1,1))(res1) 31 | shortcut1 = Conv2D(filters*2, kernel_size= (3,3), padding='same', strides=(2,2))(output1) 32 | shortcut1 = batch_Norm_Activation(shortcut1) 33 | output2 = add([shortcut1, res1]) 34 | 35 | res2 = batch_Norm_Activation(output2) 36 | res2 = Conv2D(filters*4, kernel_size= (3,3), padding= 'same', strides= (2,2))(res2) 37 | res2 = batch_Norm_Activation(res2) 38 | res2 = Conv2D(filters*4, kernel_size= (3,3), padding= 'same', strides= (1,1))(res2) 39 | shortcut2 = Conv2D(filters*4, kernel_size= (3,3), padding='same', strides=(2,2))(output2) 40 | shortcut2 = batch_Norm_Activation(shortcut2) 41 | output3 = add([shortcut2, res2]) 42 | 43 | res3 = batch_Norm_Activation(output3) 44 | res3 = Conv2D(filters*8, kernel_size= (3,3), padding= 'same', strides= (2,2))(res3) 45 | res3 = batch_Norm_Activation(res3) 46 | res3 = Conv2D(filters*8, kernel_size= (3,3), padding= 'same', strides= (1,1))(res3) 47 | shortcut3 = Conv2D(filters*8, kernel_size= (3,3), padding='same', strides=(2,2))(output3) 48 | shortcut3 = batch_Norm_Activation(shortcut3) 49 | output4 = add([shortcut3, res3]) 50 | 51 | res4 = batch_Norm_Activation(output4) 52 | res4 = Conv2D(filters*16, kernel_size= (3,3), padding= 'same', strides= (2,2))(res4) 53 | res4 = batch_Norm_Activation(res4) 54 | res4 = Conv2D(filters*16, kernel_size= (3,3), padding= 'same', strides= (1,1))(res4) 55 | shortcut4 = Conv2D(filters*16, kernel_size= (3,3), padding='same', strides=(2,2))(output4) 56 | shortcut4 = batch_Norm_Activation(shortcut4) 57 | output5 = add([shortcut4, res4]) 58 | 59 | #bridge 60 | conv = batch_Norm_Activation(output5) 61 | conv = Conv2D(filters*16, kernel_size= (3,3), padding= 'same', strides= (1,1))(conv) 62 | conv = batch_Norm_Activation(conv) 63 | conv = Conv2D(filters*16, kernel_size= (3,3), padding= 'same', strides= (1,1))(conv) 64 | 65 | #decoder 66 | 67 | uconv1 = UpSampling2D((2,2))(conv) 68 | uconv1 = concatenate([uconv1, output4]) 69 | 70 | uconv11 = batch_Norm_Activation(uconv1) 71 | uconv11 = Conv2D(filters*16, kernel_size= (3,3), padding= 'same', strides=(1,1))(uconv11) 72 | uconv11 = batch_Norm_Activation(uconv11) 73 | uconv11 = Conv2D(filters*16, kernel_size= (3,3), padding= 'same', strides=(1,1))(uconv11) 74 | shortcut5 = Conv2D(filters*16, kernel_size= (3,3), padding='same', strides=(1,1))(uconv1) 75 | shortcut5 = batch_Norm_Activation(shortcut5) 76 | output6 = add([uconv11,shortcut5]) 77 | 78 | uconv2 = UpSampling2D((2,2))(output6) 79 | uconv2 = concatenate([uconv2, output3]) 80 | 81 | uconv22 = batch_Norm_Activation(uconv2) 82 | uconv22 = Conv2D(filters*8, kernel_size= (3,3), padding= 'same', strides=(1,1))(uconv22) 83 | uconv22 = batch_Norm_Activation(uconv22) 84 | uconv22 = Conv2D(filters*8, kernel_size= (3,3), padding= 'same', strides=(1,1))(uconv22) 85 | shortcut6 = Conv2D(filters*8, kernel_size= (3,3), padding='same', strides=(1,1))(uconv2) 86 | shortcut6 = batch_Norm_Activation(shortcut6) 87 | output7 = add([uconv22,shortcut6]) 88 | 89 | uconv3 = UpSampling2D((2,2))(output7) 90 | uconv3 = concatenate([uconv3, output2]) 91 | 92 | uconv33 = batch_Norm_Activation(uconv3) 93 | uconv33 = Conv2D(filters*4, kernel_size= (3,3), padding= 'same', strides=(1,1))(uconv33) 94 | uconv33 = batch_Norm_Activation(uconv33) 95 | uconv33 = Conv2D(filters*4, kernel_size= (3,3), padding= 'same', strides=(1,1))(uconv33) 96 | shortcut7 = Conv2D(filters*4, kernel_size= (3,3), padding='same', strides=(1,1))(uconv3) 97 | shortcut7 = batch_Norm_Activation(shortcut7) 98 | output8 = add([uconv33,shortcut7]) 99 | 100 | uconv4 = UpSampling2D((2,2))(output8) 101 | uconv4 = concatenate([uconv4, output1]) 102 | 103 | uconv44 = batch_Norm_Activation(uconv4) 104 | uconv44 = Conv2D(filters*2, kernel_size= (3,3), padding= 'same', strides=(1,1))(uconv44) 105 | uconv44 = batch_Norm_Activation(uconv44) 106 | uconv44 = Conv2D(filters*2, kernel_size= (3,3), padding= 'same', strides=(1,1))(uconv44) 107 | shortcut8 = Conv2D(filters*2, kernel_size= (3,3), padding='same', strides=(1,1))(uconv4) 108 | shortcut8 = batch_Norm_Activation(shortcut8) 109 | output9 = add([uconv44,shortcut8]) 110 | 111 | output_layer = Conv2D(nClasses, (3, 3), padding="same", activation="sigmoid")(output9) 112 | model = Model(inputs, output_layer) 113 | 114 | return model 115 | 116 | if __name__ == '__main__': 117 | model = ResUnet(image_size = 304) 118 | metrics = ["accuracy", 119 | tf.keras.metrics.AUC(), 120 | tf.keras.metrics.SensitivityAtSpecificity(0.5), 121 | tf.keras.metrics.SpecificityAtSensitivity(0.5)] 122 | model.compile(optimizer=Adam(), loss="binary_crossentropy", metrics=metrics) 123 | model.summary() -------------------------------------------------------------------------------- /models/SEUNet.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | from keras.layers import * 3 | from keras.models import * 4 | 5 | 6 | def SEModule(input, ratio, out_dim): 7 | # bs, c, h, w 8 | x = GlobalAveragePooling2D()(input) 9 | excitation = Dense(units=out_dim // ratio)(x) 10 | excitation = Activation('relu')(excitation) 11 | excitation = Dense(units=out_dim)(excitation) 12 | excitation = Activation('sigmoid')(excitation) 13 | excitation = Reshape((1, 1, out_dim))(excitation) 14 | scale = multiply([input, excitation]) 15 | return scale 16 | 17 | 18 | def SEUnet(nClasses, input_height=224, input_width=224): 19 | inputs = Input(shape=(input_height, input_width, 3)) 20 | conv1 = Conv2D(16, 21 | 3, 22 | activation='relu', 23 | padding='same', 24 | kernel_initializer='he_normal')(inputs) 25 | conv1 = BatchNormalization()(conv1) 26 | 27 | conv1 = Conv2D(16, 28 | 3, 29 | activation='relu', 30 | padding='same', 31 | kernel_initializer='he_normal')(conv1) 32 | conv1 = BatchNormalization()(conv1) 33 | 34 | # se 35 | conv1 = SEModule(conv1, 4, 16) 36 | 37 | pool1 = MaxPooling2D(pool_size=(2, 2))(conv1) 38 | conv2 = Conv2D(32, 39 | 3, 40 | activation='relu', 41 | padding='same', 42 | kernel_initializer='he_normal')(pool1) 43 | conv2 = BatchNormalization()(conv2) 44 | 45 | conv2 = Conv2D(32, 46 | 3, 47 | activation='relu', 48 | padding='same', 49 | kernel_initializer='he_normal')(conv2) 50 | conv2 = BatchNormalization()(conv2) 51 | 52 | # se 53 | conv2 = SEModule(conv2, 8, 32) 54 | 55 | pool2 = MaxPooling2D(pool_size=(2, 2))(conv2) 56 | conv3 = Conv2D(64, 57 | 3, 58 | activation='relu', 59 | padding='same', 60 | kernel_initializer='he_normal')(pool2) 61 | conv3 = BatchNormalization()(conv3) 62 | 63 | conv3 = Conv2D(64, 64 | 3, 65 | activation='relu', 66 | padding='same', 67 | kernel_initializer='he_normal')(conv3) 68 | conv3 = BatchNormalization()(conv3) 69 | 70 | # se 71 | conv3 = SEModule(conv3, 8, 64) 72 | 73 | pool3 = MaxPooling2D(pool_size=(2, 2))(conv3) 74 | conv4 = Conv2D(128, 75 | 3, 76 | activation='relu', 77 | padding='same', 78 | kernel_initializer='he_normal')(pool3) 79 | conv4 = BatchNormalization()(conv4) 80 | 81 | conv4 = Conv2D(128, 82 | 3, 83 | activation='relu', 84 | padding='same', 85 | kernel_initializer='he_normal')(conv4) 86 | conv4 = BatchNormalization()(conv4) 87 | 88 | # se 89 | conv4 = SEModule(conv4, 16, 128) 90 | 91 | pool4 = MaxPooling2D(pool_size=(2, 2))(conv4) 92 | 93 | conv5 = Conv2D(256, 94 | 3, 95 | activation='relu', 96 | padding='same', 97 | kernel_initializer='he_normal')(pool4) 98 | conv5 = BatchNormalization()(conv5) 99 | conv5 = Conv2D(256, 100 | 3, 101 | activation='relu', 102 | padding='same', 103 | kernel_initializer='he_normal')(conv5) 104 | conv5 = BatchNormalization()(conv5) 105 | 106 | # se 107 | conv5 = SEModule(conv5, 16, 256) 108 | 109 | up6 = Conv2D(128, 110 | 2, 111 | activation='relu', 112 | padding='same', 113 | kernel_initializer='he_normal')(UpSampling2D(size=(2, 114 | 2))(conv5)) 115 | up6 = BatchNormalization()(up6) 116 | 117 | merge6 = concatenate([conv4, up6], axis=3) 118 | conv6 = Conv2D(128, 119 | 3, 120 | activation='relu', 121 | padding='same', 122 | kernel_initializer='he_normal')(merge6) 123 | conv6 = BatchNormalization()(conv6) 124 | 125 | conv6 = Conv2D(128, 126 | 3, 127 | activation='relu', 128 | padding='same', 129 | kernel_initializer='he_normal')(conv6) 130 | conv6 = BatchNormalization()(conv6) 131 | 132 | # se 133 | conv6 = SEModule(conv6, 16, 128) 134 | 135 | up7 = Conv2D(64, 136 | 2, 137 | activation='relu', 138 | padding='same', 139 | kernel_initializer='he_normal')(UpSampling2D(size=(2, 140 | 2))(conv6)) 141 | up7 = BatchNormalization()(up7) 142 | 143 | merge7 = concatenate([conv3, up7], axis=3) 144 | conv7 = Conv2D(64, 145 | 3, 146 | activation='relu', 147 | padding='same', 148 | kernel_initializer='he_normal')(merge7) 149 | conv7 = BatchNormalization()(conv7) 150 | 151 | conv7 = Conv2D(64, 152 | 3, 153 | activation='relu', 154 | padding='same', 155 | kernel_initializer='he_normal')(conv7) 156 | conv7 = BatchNormalization()(conv7) 157 | 158 | # se 159 | conv7 = SEModule(conv7, 8, 64) 160 | 161 | up8 = Conv2D(32, 162 | 2, 163 | activation='relu', 164 | padding='same', 165 | kernel_initializer='he_normal')(UpSampling2D(size=(2, 166 | 2))(conv7)) 167 | up8 = BatchNormalization()(up8) 168 | 169 | merge8 = concatenate([conv2, up8], axis=3) 170 | conv8 = Conv2D(32, 171 | 3, 172 | activation='relu', 173 | padding='same', 174 | kernel_initializer='he_normal')(merge8) 175 | conv8 = BatchNormalization()(conv8) 176 | 177 | conv8 = Conv2D(32, 178 | 3, 179 | activation='relu', 180 | padding='same', 181 | kernel_initializer='he_normal')(conv8) 182 | conv8 = BatchNormalization()(conv8) 183 | 184 | # se 185 | conv8 = SEModule(conv8, 4, 32) 186 | 187 | up9 = Conv2D(16, 188 | 2, 189 | activation='relu', 190 | padding='same', 191 | kernel_initializer='he_normal')(UpSampling2D(size=(2, 192 | 2))(conv8)) 193 | up9 = BatchNormalization()(up9) 194 | 195 | merge9 = concatenate([conv1, up9], axis=3) 196 | conv9 = Conv2D(16, 197 | 3, 198 | activation='relu', 199 | padding='same', 200 | kernel_initializer='he_normal')(merge9) 201 | conv9 = BatchNormalization()(conv9) 202 | 203 | conv9 = Conv2D(16, 204 | 3, 205 | activation='relu', 206 | padding='same', 207 | kernel_initializer='he_normal')(conv9) 208 | conv9 = BatchNormalization()(conv9) 209 | 210 | # se 211 | conv9 = SEModule(conv9, 2, 16) 212 | 213 | conv10 = Conv2D(nClasses, (3, 3), padding='same')(conv9) 214 | conv10 = BatchNormalization()(conv10) 215 | 216 | outputHeight = Model(inputs, conv10).output_shape[1] 217 | outputWidth = Model(inputs, conv10).output_shape[2] 218 | 219 | out = (Reshape((outputHeight * outputWidth, nClasses)))(conv10) 220 | out = Activation('softmax')(out) 221 | 222 | model = Model(input=inputs, output=out) 223 | model.outputHeight = outputHeight 224 | model.outputWidth = outputWidth 225 | 226 | return model 227 | -------------------------------------------------------------------------------- /models/Segnet.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | from keras.models import * 3 | from keras.layers import * 4 | 5 | def Segnet(nClasses, input_height=224, input_width=224): 6 | inputs = Input(shape=(input_height, input_width, 3)) 7 | #Encoder 8 | # 224x224 9 | conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(inputs) 10 | conv1 = BatchNormalization()(conv1) 11 | conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv1) 12 | conv1 = BatchNormalization()(conv1) 13 | pool1 = MaxPooling2D(pool_size=(2, 2))(conv1) 14 | # 112x112 15 | conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(pool1) 16 | conv2 = BatchNormalization()(conv2) 17 | conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv2) 18 | conv2 = BatchNormalization()(conv2) 19 | pool2 = MaxPooling2D(pool_size=(2, 2))(conv2) 20 | # 56x56 21 | conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(pool2) 22 | conv3 = BatchNormalization()(conv3) 23 | conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv3) 24 | conv3 = BatchNormalization()(conv3) 25 | pool3 = MaxPooling2D(pool_size=(2, 2))(conv3) 26 | # 28x28 27 | conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool3) 28 | conv4 = BatchNormalization()(conv4) 29 | conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv4) 30 | conv4 = BatchNormalization()(conv4) 31 | pool4 = MaxPooling2D(pool_size=(2, 2))(conv4) 32 | # 14x14 33 | 34 | # decode 35 | up7 = UpSampling2D(size=(2, 2))(pool4) 36 | #up7 = concatenate([up7, conv4], axis=-1) 37 | conv7 = Conv2D(256, (3, 3), activation='relu', padding='same')(up7) 38 | conv7 = BatchNormalization()(conv7) 39 | conv7 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv7) 40 | conv7 = BatchNormalization()(conv7) 41 | 42 | up8 = UpSampling2D(size=(2, 2))(conv7) 43 | #up8 = concatenate([up8, conv3], axis=-1) 44 | conv8 = Conv2D(128, (3, 3), activation='relu', padding='same')(up8) 45 | conv8 = BatchNormalization()(conv8) 46 | conv8 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv8) 47 | conv8 = BatchNormalization()(conv8) 48 | 49 | up9 = UpSampling2D(size=(2, 2))(conv8) 50 | #up9 = concatenate([up9, conv2], axis=-1) 51 | conv9 = Conv2D(64, (3, 3), activation='relu', padding='same')(up9) 52 | conv9 = BatchNormalization()(conv9) 53 | conv9 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv9) 54 | conv9 = BatchNormalization()(conv9) 55 | 56 | up10 = UpSampling2D(size=(2, 2))(conv9) 57 | #up10 = concatenate([up10, conv1], axis=-1) 58 | conv10 = Conv2D(32, (3, 3), activation='relu', padding='same')(up10) 59 | conv10 = BatchNormalization()(conv10) 60 | conv10 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv10) 61 | conv10 = BatchNormalization()(conv10) 62 | outputHeight = Model(inputs, conv10).output_shape[1] 63 | outputWidth = Model(inputs, conv10).output_shape[2] 64 | conv11 = Conv2D(nClasses, (1, 1), padding='same')(conv10) 65 | conv11 = (Reshape((outputHeight*outputWidth, nClasses)))(conv11) 66 | conv11 = Activation('softmax')(conv11) 67 | 68 | model = Model(inputs, conv11) 69 | model.outputWidth = outputWidth 70 | model.outputHeight = outputHeight 71 | 72 | return model -------------------------------------------------------------------------------- /models/UNet_Xception_ResNetBlock.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | from keras.layers import * 3 | from keras.models import * 4 | from keras.optimizers import * 5 | from keras.applications.xception import Xception 6 | 7 | def convolution_block(x, filters, size, strides=(1,1), padding='same', activation=True): 8 | x = Conv2D(filters, size, strides=strides, padding=padding)(x) 9 | x = BatchNormalization()(x) 10 | if activation == True: 11 | x = LeakyReLU(alpha=0.1)(x) 12 | return x 13 | 14 | def residual_block(blockInput, num_filters=16): 15 | x = LeakyReLU(alpha=0.1)(blockInput) 16 | x = BatchNormalization()(x) 17 | blockInput = BatchNormalization()(blockInput) 18 | x = convolution_block(x, num_filters, (3,3) ) 19 | x = convolution_block(x, num_filters, (3,3), activation=False) 20 | x = Add()([x, blockInput]) 21 | return x 22 | 23 | 24 | def Unet_Xception_ResNetBlock(nClasses, input_height=224, input_width=224): 25 | 26 | backbone = Xception(input_shape=(input_height, input_width, 3), weights=None, include_top=False) 27 | 28 | inputs = backbone.input 29 | 30 | conv4 = backbone.layers[121].output 31 | conv4 = LeakyReLU(alpha=0.1)(conv4) 32 | pool4 = MaxPooling2D((2, 2))(conv4) 33 | pool4 = Dropout(0.1)(pool4) 34 | 35 | # Middle 36 | convm = Conv2D(16*32, (3, 3), activation=None, padding="same")(pool4) 37 | convm = residual_block(convm, 16*32) 38 | convm = residual_block(convm, 16*32) 39 | convm = LeakyReLU(alpha=0.1)(convm) 40 | 41 | # 8 -> 16 42 | deconv4 = Conv2DTranspose(16*16, (3, 3), strides=(2, 2), padding="same")(convm) 43 | uconv4 = concatenate([deconv4, conv4]) 44 | uconv4 = Dropout(0.1)(uconv4) 45 | 46 | uconv4 = Conv2D(16*16, (3, 3), activation=None, padding="same")(uconv4) 47 | uconv4 = residual_block(uconv4, 16 * 16) 48 | uconv4 = residual_block(uconv4, 16*16) 49 | uconv4 = LeakyReLU(alpha=0.1)(uconv4) 50 | 51 | # 16 -> 32 52 | deconv3 = Conv2DTranspose(16*8, (3, 3), strides=(2, 2), padding="same")(uconv4) 53 | conv3 = backbone.layers[31].output 54 | uconv3 = concatenate([deconv3, conv3]) 55 | uconv3 = Dropout(0.1)(uconv3) 56 | 57 | uconv3 = Conv2D(16*8, (3, 3), activation=None, padding="same")(uconv3) 58 | uconv3 = residual_block(uconv3, 16*8) 59 | uconv3 = residual_block(uconv3, 16*8) 60 | uconv3 = LeakyReLU(alpha=0.1)(uconv3) 61 | 62 | # 32 -> 64 63 | deconv2 = Conv2DTranspose(16*4, (3, 3), strides=(2, 2), padding="same")(uconv3) 64 | conv2 = backbone.layers[21].output 65 | conv2 = ZeroPadding2D(((1,0),(1,0)))(conv2) 66 | uconv2 = concatenate([deconv2, conv2]) 67 | 68 | uconv2 = Dropout(0.1)(uconv2) 69 | uconv2 = Conv2D(16*4, (3, 3), activation=None, padding="same")(uconv2) 70 | uconv2 = residual_block(uconv2, 16*4) 71 | uconv2 = residual_block(uconv2, 16*4) 72 | uconv2 = LeakyReLU(alpha=0.1)(uconv2) 73 | 74 | # 64 -> 128 75 | deconv1 = Conv2DTranspose(16*2, (3, 3), strides=(2, 2), padding="same")(uconv2) 76 | conv1 = backbone.layers[11].output 77 | conv1 = ZeroPadding2D(((3,0),(3,0)))(conv1) 78 | uconv1 = concatenate([deconv1, conv1]) 79 | 80 | uconv1 = Dropout(0.1)(uconv1) 81 | uconv1 = Conv2D(16*2, (3, 3), activation=None, padding="same")(uconv1) 82 | uconv1 = residual_block(uconv1, 16*2) 83 | uconv1 = residual_block(uconv1, 16*2) 84 | uconv1 = LeakyReLU(alpha=0.1)(uconv1) 85 | 86 | # 128 -> 256 87 | uconv0 = Conv2DTranspose(16*1, (3, 3), strides=(2, 2), padding="same")(uconv1) 88 | uconv0 = Dropout(0.1)(uconv0) 89 | uconv0 = Conv2D(16*1, (3, 3), activation=None, padding="same")(uconv0) 90 | uconv0 = residual_block(uconv0, 16*1) 91 | uconv0 = residual_block(uconv0, 16*1) 92 | uconv0 = LeakyReLU(alpha=0.1)(uconv0) 93 | 94 | uconv0 = Dropout(0.1/2)(uconv0) 95 | 96 | out = Conv2D(nClasses, (3, 3), padding='same')(uconv0) 97 | out = BatchNormalization()(out) 98 | 99 | outputHeight = Model(inputs, out).output_shape[1] 100 | outputWidth = Model(inputs, out).output_shape[2] 101 | 102 | out = (Reshape((outputHeight * outputWidth, nClasses)))(out) 103 | out = Activation('softmax')(out) 104 | 105 | model = Model(input=inputs, output=out) 106 | model.outputHeight = outputHeight 107 | model.outputWidth = outputWidth 108 | 109 | return model 110 | -------------------------------------------------------------------------------- /models/Unet.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from tensorflow.keras.layers import * 3 | from tensorflow.keras.optimizers import * 4 | from tensorflow.keras.models import * 5 | 6 | def conv2d_block(input_tensor, n_filters, kernel_size = 3, batchnorm = True): 7 | """Function to add 2 convolutional layers with the parameters passed to it""" 8 | # first layer 9 | x = Conv2D(filters = n_filters, kernel_size = (kernel_size, kernel_size),\ 10 | kernel_initializer = 'he_normal', padding = 'same')(input_tensor) 11 | if batchnorm: 12 | x = BatchNormalization()(x) 13 | x = Activation('relu')(x) 14 | 15 | # second layer 16 | x = Conv2D(filters = n_filters, kernel_size = (kernel_size, kernel_size),\ 17 | kernel_initializer = 'he_normal', padding = 'same')(input_tensor) 18 | if batchnorm: 19 | x = BatchNormalization()(x) 20 | x = Activation('relu')(x) 21 | 22 | return x 23 | 24 | def Unet(input_img, n_filters = 16, dropout = 0.1, batchnorm = True): 25 | """Function to define the UNET Model""" 26 | # Contracting Path 27 | c1 = conv2d_block(input_img, n_filters * 1, kernel_size = 3, batchnorm = batchnorm) 28 | p1 = MaxPooling2D((2, 2))(c1) 29 | p1 = Dropout(dropout)(p1) 30 | 31 | c2 = conv2d_block(p1, n_filters * 2, kernel_size = 3, batchnorm = batchnorm) 32 | p2 = MaxPooling2D((2, 2))(c2) 33 | p2 = Dropout(dropout)(p2) 34 | 35 | c3 = conv2d_block(p2, n_filters * 4, kernel_size = 3, batchnorm = batchnorm) 36 | p3 = MaxPooling2D((2, 2))(c3) 37 | p3 = Dropout(dropout)(p3) 38 | 39 | c4 = conv2d_block(p3, n_filters * 8, kernel_size = 3, batchnorm = batchnorm) 40 | p4 = MaxPooling2D((2, 2))(c4) 41 | p4 = Dropout(dropout)(p4) 42 | 43 | c5 = conv2d_block(p4, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm) 44 | 45 | # Expansive Path 46 | u6 = Conv2DTranspose(n_filters * 8, (3, 3), strides = (2, 2), padding = 'same')(c5) 47 | u6 = concatenate([u6, c4]) 48 | u6 = Dropout(dropout)(u6) 49 | c6 = conv2d_block(u6, n_filters * 8, kernel_size = 3, batchnorm = batchnorm) 50 | 51 | u7 = Conv2DTranspose(n_filters * 4, (3, 3), strides = (2, 2), padding = 'same')(c6) 52 | u7 = concatenate([u7, c3]) 53 | u7 = Dropout(dropout)(u7) 54 | c7 = conv2d_block(u7, n_filters * 4, kernel_size = 3, batchnorm = batchnorm) 55 | 56 | u8 = Conv2DTranspose(n_filters * 2, (3, 3), strides = (2, 2), padding = 'same')(c7) 57 | u8 = concatenate([u8, c2]) 58 | u8 = Dropout(dropout)(u8) 59 | c8 = conv2d_block(u8, n_filters * 2, kernel_size = 3, batchnorm = batchnorm) 60 | 61 | u9 = Conv2DTranspose(n_filters * 1, (3, 3), strides = (2, 2), padding = 'same')(c8) 62 | u9 = concatenate([u9, c1]) 63 | u9 = Dropout(dropout)(u9) 64 | c9 = conv2d_block(u9, n_filters * 1, kernel_size = 3, batchnorm = batchnorm) 65 | 66 | outputs = Conv2D(1, (1, 1), activation='sigmoid')(c9) 67 | model = Model(inputs=[input_img], outputs=[outputs]) 68 | return model 69 | 70 | if __name__ == '__main__': 71 | input_img = Input((h, w, 3), name='img') 72 | model = get_unet(input_img, n_filters=16, dropout=0.05, batchnorm=True) 73 | metrics = ["accuracy", 74 | tf.keras.metrics.AUC(), 75 | tf.keras.metrics.SensitivityAtSpecificity(0.5), 76 | tf.keras.metrics.SpecificityAtSensitivity(0.5)] 77 | model.compile(optimizer=Adam(), loss="binary_crossentropy", metrics=metrics) 78 | model.summary() 79 | -------------------------------------------------------------------------------- /models/VggUnet.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from tensorflow.keras.layers import * 3 | from tensorflow.keras.optimizers import * 4 | from tensorflow.keras.models import * 5 | 6 | def VGGUnet(image_size, vgg_weight_path=None): 7 | inputs = Input((image_size, image_size, 3)) 8 | # Block 1 9 | x = Conv2D(64, (3, 3), padding='same', name='block1_conv1')(inputs) 10 | x = BatchNormalization()(x) 11 | x = Activation('relu')(x) 12 | 13 | x = Conv2D(64, (3, 3), padding='same', name='block1_conv2')(x) 14 | x = BatchNormalization()(x) 15 | block_1_out = Activation('relu')(x) 16 | 17 | x = MaxPooling2D()(block_1_out) 18 | 19 | # Block 2 20 | x = Conv2D(128, (3, 3), padding='same', name='block2_conv1')(x) 21 | x = BatchNormalization()(x) 22 | x = Activation('relu')(x) 23 | 24 | x = Conv2D(128, (3, 3), padding='same', name='block2_conv2')(x) 25 | x = BatchNormalization()(x) 26 | block_2_out = Activation('relu')(x) 27 | 28 | x = MaxPooling2D()(block_2_out) 29 | 30 | # Block 3 31 | x = Conv2D(256, (3, 3), padding='same', name='block3_conv1')(x) 32 | x = BatchNormalization()(x) 33 | x = Activation('relu')(x) 34 | 35 | x = Conv2D(256, (3, 3), padding='same', name='block3_conv2')(x) 36 | x = BatchNormalization()(x) 37 | x = Activation('relu')(x) 38 | 39 | x = Conv2D(256, (3, 3), padding='same', name='block3_conv3')(x) 40 | x = BatchNormalization()(x) 41 | block_3_out = Activation('relu')(x) 42 | 43 | x = MaxPooling2D()(block_3_out) 44 | 45 | # Block 4 46 | x = Conv2D(512, (3, 3), padding='same', name='block4_conv1')(x) 47 | x = BatchNormalization()(x) 48 | x = Activation('relu')(x) 49 | 50 | x = Conv2D(512, (3, 3), padding='same', name='block4_conv2')(x) 51 | x = BatchNormalization()(x) 52 | x = Activation('relu')(x) 53 | 54 | x = Conv2D(512, (3, 3), padding='same', name='block4_conv3')(x) 55 | x = BatchNormalization()(x) 56 | block_4_out = Activation('relu')(x) 57 | 58 | x = MaxPooling2D()(block_4_out) 59 | 60 | # Block 5 61 | x = Conv2D(512, (3, 3), padding='same', name='block5_conv1')(x) 62 | x = BatchNormalization()(x) 63 | x = Activation('relu')(x) 64 | 65 | x = Conv2D(512, (3, 3), padding='same', name='block5_conv2')(x) 66 | x = BatchNormalization()(x) 67 | x = Activation('relu')(x) 68 | 69 | x = Conv2D(512, (3, 3), padding='same', name='block5_conv3')(x) 70 | x = BatchNormalization()(x) 71 | x = Activation('relu')(x) 72 | 73 | for_pretrained_weight = MaxPooling2D()(x) 74 | 75 | # Load pretrained weights. 76 | if vgg_weight_path is not None: 77 | vgg16 = Model(inputs, for_pretrained_weight) 78 | vgg16.load_weights(vgg_weight_path, by_name=True) 79 | 80 | # UP 1 81 | x = Conv2DTranspose(512, (2, 2), strides=(2, 2), padding='same')(x) 82 | x = BatchNormalization()(x) 83 | x = Activation('relu')(x) 84 | 85 | x = concatenate([x, block_4_out]) 86 | x = Conv2D(512, (3, 3), padding='same')(x) 87 | x = BatchNormalization()(x) 88 | x = Activation('relu')(x) 89 | 90 | x = Conv2D(512, (3, 3), padding='same')(x) 91 | x = BatchNormalization()(x) 92 | x = Activation('relu')(x) 93 | 94 | # UP 2 95 | x = Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(x) 96 | x = BatchNormalization()(x) 97 | x = Activation('relu')(x) 98 | 99 | x = concatenate([x, block_3_out]) 100 | x = Conv2D(256, (3, 3), padding='same')(x) 101 | x = BatchNormalization()(x) 102 | x = Activation('relu')(x) 103 | 104 | x = Conv2D(256, (3, 3), padding='same')(x) 105 | x = BatchNormalization()(x) 106 | x = Activation('relu')(x) 107 | 108 | # UP 3 109 | x = Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(x) 110 | x = BatchNormalization()(x) 111 | x = Activation('relu')(x) 112 | 113 | x = concatenate([x, block_2_out]) 114 | x = Conv2D(128, (3, 3), padding='same')(x) 115 | x = BatchNormalization()(x) 116 | x = Activation('relu')(x) 117 | 118 | x = Conv2D(128, (3, 3), padding='same')(x) 119 | x = BatchNormalization()(x) 120 | x = Activation('relu')(x) 121 | 122 | # UP 4 123 | x = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(x) 124 | x = BatchNormalization()(x) 125 | x = Activation('relu')(x) 126 | 127 | x = concatenate([x, block_1_out]) 128 | x = Conv2D(64, (3, 3), padding='same')(x) 129 | x = BatchNormalization()(x) 130 | x = Activation('relu')(x) 131 | 132 | x = Conv2D(64, (3, 3), padding='same')(x) 133 | x = BatchNormalization()(x) 134 | x = Activation('relu')(x) 135 | 136 | x = Conv2D(1, (3, 3), padding='same')(x) 137 | x = BatchNormalization()(x) 138 | 139 | outputs = Activation('sigmoid')(x) 140 | 141 | model = Model(inputs=inputs, outputs=outputs) 142 | return model 143 | 144 | if __name__ == '__main__': 145 | model = VGGUnet(image_size = 304) 146 | metrics = ["accuracy", 147 | tf.keras.metrics.AUC(), 148 | tf.keras.metrics.SensitivityAtSpecificity(0.5), 149 | tf.keras.metrics.SpecificityAtSensitivity(0.5)] 150 | model.compile(optimizer=Adam(), loss="binary_crossentropy", metrics=metrics) 151 | model.summary() -------------------------------------------------------------------------------- /models/scSEUnet.py: -------------------------------------------------------------------------------- 1 | import keras.backend as K 2 | from keras.layers import * 3 | from keras.models import Model 4 | 5 | # spatial squeeze by mean and channel excitation 6 | 7 | 8 | def cse_block(prevlayer, prefix): 9 | mean = Lambda(lambda xin: K.mean(xin, axis=[1, 2]))(prevlayer) # H W 求均值 10 | # K.int_shape() Returns the shape of tensor or variable as a tuple of int or None entries 11 | lin1 = Dense(K.int_shape(prevlayer)[ 12 | 3] // 2, name=prefix + 'cse_lin1', activation='relu')(mean) 13 | lin2 = Dense(K.int_shape(prevlayer)[ 14 | 3], name=prefix + 'cse_lin2', activation='sigmoid')(lin1) 15 | x = Multiply()([prevlayer, lin2]) 16 | return x 17 | 18 | # channel squeeze and spatial excitation 19 | 20 | 21 | def sse_block(prevlayer, prefix): 22 | # Bug? Should be 1 here? 23 | conv = Conv2D(K.int_shape(prevlayer)[3], (1, 1), padding="same", kernel_initializer="he_normal", 24 | activation='sigmoid', strides=(1, 1), 25 | name=prefix + "_conv")(prevlayer) 26 | conv = Multiply(name=prefix + "_mul")([prevlayer, conv]) 27 | return conv 28 | 29 | # concurrent spatial and channel squeeze and channel excitation 30 | 31 | 32 | def csse_block(x, prefix): 33 | ''' 34 | Implementation of Concurrent Spatial and Channel ‘Squeeze & Excitation’ in Fully Convolutional Networks 35 | https://arxiv.org/abs/1803.02579 36 | 37 | x = csse_block(x, prefix='csse_block_{}'.format(i)) 38 | ''' 39 | cse = cse_block(x, prefix) 40 | sse = sse_block(x, prefix) 41 | x = Add(name=prefix + "_csse_mul")([cse, sse]) 42 | 43 | return x 44 | 45 | 46 | def scSEUnet(nClasses, input_height=224, input_width=224): 47 | inputs = Input(shape=(input_height, input_width, 3)) 48 | conv1 = Conv2D(16, 49 | 3, 50 | activation='relu', 51 | padding='same', 52 | kernel_initializer='he_normal')(inputs) 53 | conv1 = BatchNormalization()(conv1) 54 | 55 | conv1 = Conv2D(16, 56 | 3, 57 | activation='relu', 58 | padding='same', 59 | kernel_initializer='he_normal')(conv1) 60 | conv1 = BatchNormalization()(conv1) 61 | 62 | pool1 = MaxPooling2D(pool_size=(2, 2))(conv1) 63 | conv2 = Conv2D(32, 64 | 3, 65 | activation='relu', 66 | padding='same', 67 | kernel_initializer='he_normal')(pool1) 68 | conv2 = BatchNormalization()(conv2) 69 | 70 | conv2 = Conv2D(32, 71 | 3, 72 | activation='relu', 73 | padding='same', 74 | kernel_initializer='he_normal')(conv2) 75 | conv2 = BatchNormalization()(conv2) 76 | 77 | pool2 = MaxPooling2D(pool_size=(2, 2))(conv2) 78 | conv3 = Conv2D(64, 79 | 3, 80 | activation='relu', 81 | padding='same', 82 | kernel_initializer='he_normal')(pool2) 83 | conv3 = BatchNormalization()(conv3) 84 | 85 | conv3 = Conv2D(64, 86 | 3, 87 | activation='relu', 88 | padding='same', 89 | kernel_initializer='he_normal')(conv3) 90 | conv3 = BatchNormalization()(conv3) 91 | 92 | pool3 = MaxPooling2D(pool_size=(2, 2))(conv3) 93 | conv4 = Conv2D(128, 94 | 3, 95 | activation='relu', 96 | padding='same', 97 | kernel_initializer='he_normal')(pool3) 98 | conv4 = BatchNormalization()(conv4) 99 | 100 | conv4 = Conv2D(128, 101 | 3, 102 | activation='relu', 103 | padding='same', 104 | kernel_initializer='he_normal')(conv4) 105 | conv4 = BatchNormalization()(conv4) 106 | 107 | pool4 = MaxPooling2D(pool_size=(2, 2))(conv4) 108 | 109 | conv5 = Conv2D(256, 110 | 3, 111 | activation='relu', 112 | padding='same', 113 | kernel_initializer='he_normal')(pool4) 114 | conv5 = BatchNormalization()(conv5) 115 | conv5 = Conv2D(256, 116 | 3, 117 | activation='relu', 118 | padding='same', 119 | kernel_initializer='he_normal')(conv5) 120 | conv5 = BatchNormalization()(conv5) 121 | 122 | up6 = Conv2D(128, 123 | 2, 124 | activation='relu', 125 | padding='same', 126 | kernel_initializer='he_normal')(UpSampling2D(size=(2, 127 | 2))(conv5)) 128 | up6 = BatchNormalization()(up6) 129 | 130 | merge6 = concatenate([conv4, up6], axis=3) 131 | conv6 = Conv2D(128, 132 | 3, 133 | activation='relu', 134 | padding='same', 135 | kernel_initializer='he_normal')(merge6) 136 | conv6 = BatchNormalization()(conv6) 137 | 138 | conv6 = Conv2D(128, 139 | 3, 140 | activation='relu', 141 | padding='same', 142 | kernel_initializer='he_normal')(conv6) 143 | conv6 = BatchNormalization()(conv6) 144 | 145 | up7 = Conv2D(64, 146 | 2, 147 | activation='relu', 148 | padding='same', 149 | kernel_initializer='he_normal')(UpSampling2D(size=(2, 150 | 2))(conv6)) 151 | up7 = BatchNormalization()(up7) 152 | 153 | merge7 = concatenate([conv3, up7], axis=3) 154 | conv7 = Conv2D(64, 155 | 3, 156 | activation='relu', 157 | padding='same', 158 | kernel_initializer='he_normal')(merge7) 159 | conv7 = BatchNormalization()(conv7) 160 | 161 | conv7 = Conv2D(64, 162 | 3, 163 | activation='relu', 164 | padding='same', 165 | kernel_initializer='he_normal')(conv7) 166 | conv7 = BatchNormalization()(conv7) 167 | 168 | conv7 = csse_block(conv7, prefix="conv7") 169 | 170 | up8 = Conv2D(32, 171 | 2, 172 | activation='relu', 173 | padding='same', 174 | kernel_initializer='he_normal')(UpSampling2D(size=(2, 175 | 2))(conv7)) 176 | up8 = BatchNormalization()(up8) 177 | 178 | merge8 = concatenate([conv2, up8], axis=3) 179 | conv8 = Conv2D(32, 180 | 3, 181 | activation='relu', 182 | padding='same', 183 | kernel_initializer='he_normal')(merge8) 184 | conv8 = BatchNormalization()(conv8) 185 | 186 | conv8 = Conv2D(32, 187 | 3, 188 | activation='relu', 189 | padding='same', 190 | kernel_initializer='he_normal')(conv8) 191 | conv8 = BatchNormalization()(conv8) 192 | 193 | conv8 = csse_block(conv8, prefix="conv8") 194 | 195 | up9 = Conv2D(16, 196 | 2, 197 | activation='relu', 198 | padding='same', 199 | kernel_initializer='he_normal')(UpSampling2D(size=(2, 200 | 2))(conv8)) 201 | up9 = BatchNormalization()(up9) 202 | 203 | merge9 = concatenate([conv1, up9], axis=3) 204 | conv9 = Conv2D(16, 205 | 3, 206 | activation='relu', 207 | padding='same', 208 | kernel_initializer='he_normal')(merge9) 209 | conv9 = BatchNormalization()(conv9) 210 | 211 | conv9 = Conv2D(16, 212 | 3, 213 | activation='relu', 214 | padding='same', 215 | kernel_initializer='he_normal')(conv9) 216 | conv9 = BatchNormalization()(conv9) 217 | 218 | conv9 = csse_block(conv9, prefix="conv9") 219 | 220 | conv10 = Conv2D(nClasses, (3, 3), padding='same')(conv9) 221 | conv10 = BatchNormalization()(conv10) 222 | 223 | outputHeight = Model(inputs, conv10).output_shape[1] 224 | outputWidth = Model(inputs, conv10).output_shape[2] 225 | 226 | out = (Reshape((outputHeight * outputWidth, nClasses)))(conv10) 227 | out = Activation('softmax')(out) 228 | 229 | model = Model(input=inputs, output=out) 230 | model.outputHeight = outputHeight 231 | model.outputWidth = outputWidth 232 | 233 | return model 234 | --------------------------------------------------------------------------------