├── Binary Segmentation ├── GT │ ├── image001.png │ ├── image002.png │ ├── image003.png │ ├── image004.png │ ├── image005.png │ ├── image006.png │ ├── image007.png │ ├── image008.png │ ├── image009.png │ ├── image010.png │ ├── image011.png │ ├── image012.png │ ├── image013.png │ ├── image014.png │ ├── image015.png │ ├── image016.png │ ├── image017.png │ ├── image018.png │ ├── image019.png │ ├── image020.png │ ├── image021.png │ ├── image022.png │ ├── image023.png │ ├── image024.png │ ├── image025.png │ ├── image026.png │ └── image027.png └── pred │ ├── image028.png │ ├── image029.png │ ├── image030.png │ ├── image031.png │ ├── image032.png │ ├── image033.png │ ├── image034.png │ ├── image035.png │ ├── image036.png │ ├── image037.png │ ├── image038.png │ ├── image039.png │ ├── image040.png │ ├── image041.png │ ├── image042.png │ ├── image043.png │ ├── image044.png │ ├── image045.png │ ├── image046.png │ ├── image047.png │ ├── image048.png │ ├── image049.png │ ├── image050.png │ ├── image051.png │ ├── image052.png │ ├── image053.png │ ├── image054.png │ ├── image055.png │ ├── image056.png │ ├── image057.png │ ├── image058.png │ ├── image059.png │ ├── image060.png │ ├── image061.png │ ├── image062.png │ ├── image063.png │ ├── image064.png │ ├── image065.png │ ├── image066.png │ ├── image067.png │ ├── image068.png │ ├── image069.png │ ├── image070.png │ ├── image071.png │ ├── image072.png │ ├── image073.png │ ├── image074.png │ ├── image075.png │ ├── image076.png │ ├── image077.png │ ├── image078.png │ ├── image079.png │ ├── image080.png │ ├── image081.png │ ├── image082.png │ ├── image083.png │ ├── image084.png │ ├── image085.png │ ├── image086.png │ ├── image087.png │ ├── image088.png │ └── image089.png ├── Google Colab Code └── code │ ├── Step 2_ U-net_multi-class_DB1.ipynb │ ├── Step_1_One_Hot_encoded_GT_creation_3_classes.ipynb │ ├── U-net_binary_DB1.ipynb │ ├── model_depth_3.py │ └── model_depth_4.py ├── LICENSE ├── Multi-class Segmentation ├── GT │ ├── image001.png │ ├── image002.png │ ├── image003.png │ ├── image004.png │ ├── image005.png │ ├── image006.png │ ├── image007.png │ ├── image008.png │ ├── image009.png │ ├── image010.png │ ├── image011.png │ ├── image012.png │ ├── image013.png │ ├── image014.png │ ├── image015.png │ ├── image016.png │ ├── image017.png │ ├── image018.png │ ├── image019.png │ ├── image020.png │ ├── image021.png │ ├── image022.png │ ├── image023.png │ ├── image024.png │ ├── image025.png │ ├── image026.png │ └── image027.png └── pred │ ├── image028.png │ ├── image029.png │ ├── image030.png │ ├── image031.png │ ├── image032.png │ ├── image033.png │ ├── image034.png │ ├── image035.png │ ├── image036.png │ ├── image037.png │ ├── image038.png │ ├── image039.png │ ├── image040.png │ ├── image041.png │ ├── image042.png │ ├── image043.png │ ├── image044.png │ ├── image045.png │ ├── image046.png │ ├── image047.png │ ├── image048.png │ ├── image049.png │ ├── image050.png │ ├── image051.png │ ├── image052.png │ ├── image053.png │ ├── image054.png │ ├── image055.png │ ├── image056.png │ ├── image057.png │ ├── image058.png │ ├── image059.png │ ├── image060.png │ ├── image061.png │ ├── image062.png │ ├── image063.png │ ├── image064.png │ ├── image065.png │ ├── image066.png │ ├── image067.png │ ├── image068.png │ ├── image069.png │ ├── image070.png │ ├── image071.png │ ├── image072.png │ ├── image073.png │ ├── image074.png │ ├── image075.png │ ├── image076.png │ ├── image077.png │ ├── image078.png │ ├── image079.png │ ├── image080.png │ ├── image081.png │ ├── image082.png │ ├── image083.png │ ├── image084.png │ ├── image085.png │ ├── image086.png │ ├── image087.png │ ├── image088.png │ └── image089.png ├── README.md ├── code ├── One-Hot-encoded_GT_creation_3_classes.ipynb ├── U-net_binary_DB1.ipynb ├── U-net_multi-class_DB1.ipynb ├── model_depth_3.py └── model_depth_4.py └── images ├── Result.png └── tensorboard.png /Binary Segmentation/GT/image001.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/GT/image001.png -------------------------------------------------------------------------------- /Binary Segmentation/GT/image002.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/GT/image002.png -------------------------------------------------------------------------------- /Binary Segmentation/GT/image003.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/GT/image003.png -------------------------------------------------------------------------------- /Binary Segmentation/GT/image004.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/GT/image004.png -------------------------------------------------------------------------------- /Binary Segmentation/GT/image005.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/GT/image005.png -------------------------------------------------------------------------------- /Binary Segmentation/GT/image006.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/GT/image006.png -------------------------------------------------------------------------------- /Binary Segmentation/GT/image007.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/GT/image007.png -------------------------------------------------------------------------------- /Binary Segmentation/GT/image008.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/GT/image008.png -------------------------------------------------------------------------------- /Binary Segmentation/GT/image009.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/GT/image009.png -------------------------------------------------------------------------------- /Binary Segmentation/GT/image010.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/GT/image010.png -------------------------------------------------------------------------------- /Binary Segmentation/GT/image011.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/GT/image011.png -------------------------------------------------------------------------------- /Binary Segmentation/GT/image012.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/GT/image012.png -------------------------------------------------------------------------------- /Binary Segmentation/GT/image013.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/GT/image013.png -------------------------------------------------------------------------------- /Binary Segmentation/GT/image014.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/GT/image014.png -------------------------------------------------------------------------------- /Binary Segmentation/GT/image015.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/GT/image015.png -------------------------------------------------------------------------------- /Binary Segmentation/GT/image016.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/GT/image016.png -------------------------------------------------------------------------------- /Binary Segmentation/GT/image017.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/GT/image017.png -------------------------------------------------------------------------------- /Binary Segmentation/GT/image018.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/GT/image018.png -------------------------------------------------------------------------------- /Binary Segmentation/GT/image019.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/GT/image019.png -------------------------------------------------------------------------------- /Binary Segmentation/GT/image020.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/GT/image020.png -------------------------------------------------------------------------------- /Binary Segmentation/GT/image021.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/GT/image021.png -------------------------------------------------------------------------------- /Binary Segmentation/GT/image022.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/GT/image022.png -------------------------------------------------------------------------------- /Binary Segmentation/GT/image023.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/GT/image023.png -------------------------------------------------------------------------------- /Binary Segmentation/GT/image024.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/GT/image024.png -------------------------------------------------------------------------------- /Binary Segmentation/GT/image025.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/GT/image025.png -------------------------------------------------------------------------------- /Binary Segmentation/GT/image026.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/GT/image026.png -------------------------------------------------------------------------------- /Binary Segmentation/GT/image027.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/GT/image027.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image028.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image028.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image029.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image029.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image030.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image030.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image031.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image031.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image032.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image032.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image033.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image033.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image034.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image034.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image035.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image035.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image036.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image036.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image037.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image037.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image038.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image038.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image039.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image039.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image040.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image040.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image041.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image041.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image042.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image042.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image043.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image043.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image044.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image044.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image045.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image045.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image046.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image046.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image047.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image047.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image048.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image048.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image049.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image049.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image050.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image050.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image051.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image051.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image052.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image052.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image053.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image053.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image054.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image054.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image055.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image055.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image056.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image056.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image057.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image057.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image058.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image058.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image059.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image059.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image060.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image060.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image061.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image061.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image062.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image062.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image063.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image063.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image064.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image064.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image065.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image065.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image066.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image066.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image067.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image067.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image068.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image068.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image069.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image069.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image070.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image070.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image071.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image071.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image072.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image072.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image073.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image073.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image074.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image074.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image075.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image075.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image076.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image076.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image077.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image077.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image078.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image078.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image079.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image079.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image080.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image080.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image081.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image081.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image082.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image082.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image083.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image083.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image084.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image084.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image085.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image085.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image086.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image086.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image087.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image087.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image088.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image088.png -------------------------------------------------------------------------------- /Binary Segmentation/pred/image089.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Binary Segmentation/pred/image089.png -------------------------------------------------------------------------------- /Google Colab Code/code/Step 2_ U-net_multi-class_DB1.ipynb: -------------------------------------------------------------------------------- 1 | {"nbformat":4,"nbformat_minor":0,"metadata":{"kernelspec":{"display_name":"Python 3","language":"python","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.8.5"},"colab":{"name":"Step 2: U-net_multi-class_DB1.ipynb","provenance":[],"collapsed_sections":[]},"accelerator":"GPU"},"cells":[{"cell_type":"markdown","metadata":{"id":"n7yLKm_-kuii"},"source":["# This code implements segmentation of pathological regions from retinal images using a U-net model with depth 4 and tensorflow 2.x versions.\n","\n","## This code implements multi-class classification\n","## This model is adapted from the original codebase in https://github.com/HZCTony/U-net-with-multiple-classification"]},{"cell_type":"code","metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"RStzkXQwkuip","outputId":"3ace6ee3-8ac1-421e-e2a1-fee02d9f05b8"},"source":["# First lets connect the Gdrive that contains the data\n","from google.colab import drive\n","drive.mount('/content/drive')"],"execution_count":null,"outputs":[{"output_type":"stream","text":["Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3aietf%3awg%3aoauth%3a2.0%3aoob&scope=email%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdocs.test%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive.photos.readonly%20https%3a%2f%2fwww.googleapis.com%2fauth%2fpeopleapi.readonly%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive.activity.readonly%20https%3a%2f%2fwww.googleapis.com%2fauth%2fexperimentsandconfigs%20https%3a%2f%2fwww.googleapis.com%2fauth%2fphotos.native&response_type=code\n","\n","Enter your authorization code:\n","4/1AY0e-g68gf7Dv8gUt9_ojyE0wuMV2zGvQ4XI_znIiJqAl9uwvIffqygT4vo\n"],"name":"stdout"}]},{"cell_type":"code","metadata":{"id":"c7xAEMCtlApo"},"source":["import os\n","# The path below should point to the directory containing this notebook and the associated utility files\n","# Change it if necessary\n","os.chdir('/content/drive/MyDrive/U-net Multi-class/code/')\n","!ls"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"hsjiInH8kuiq"},"source":["# A. Lets start by stepwise defining all libraries and functions needed to generate the model and pre-process the data"]},{"cell_type":"code","metadata":{"id":"CKpTzQkOkuiq"},"source":["#Step 1: Load libraries for the U-net Model\n","import numpy as np \n","import os\n","import skimage.io as io\n","import skimage.transform as trans\n","import numpy as np\n","from tensorflow.keras.models import *\n","from tensorflow.keras.layers import *\n","from tensorflow.keras.optimizers import *\n","from tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler\n","from tensorflow.keras import backend as keras\n","#from tensorflow import keras\n","import tensorflow as tf"],"execution_count":null,"outputs":[]},{"cell_type":"code","metadata":{"id":"1AzDVayVkuiq"},"source":["#Step 2: Import the U-net model\n","from model_depth_4 import *\n","img_size=(512,512)"],"execution_count":null,"outputs":[]},{"cell_type":"code","metadata":{"id":"cbKhpLx5kuir"},"source":["n_class=3\n","#Create Groundtruth with 5 planes:[Red Lesions(0), Bright Lesions(1), background (2) ]"],"execution_count":null,"outputs":[]},{"cell_type":"code","metadata":{"id":"apyaqeIDkuir"},"source":["#Step 3:Define functions for pre-processing data\n","from tensorflow.keras.preprocessing.image import ImageDataGenerator\n","import skimage.io as io\n","import skimage.transform as trans\n","import matplotlib.pyplot as plt\n","import scipy.misc as sc\n","\n","\n","def trainGenerator(batch_size,train_path,image_folder,mask_folder,aug_dict,image_color_mode = \"grayscale\",\n"," mask_color_mode = \"rgb\",image_save_prefix = \"image\",mask_save_prefix = \"mask\",\n"," flag_multi_class = True,n_class = n_class,save_to_dir = None,target_size = img_size,seed = 1):\n"," '''\n"," can generate image and mask at the same time\n"," use the same seed for image_datagen and mask_datagen to ensure the transformation for image and mask is the same\n"," if you want to visualize the results of generator, set save_to_dir = \"your path\"\n"," '''\n"," image_datagen = ImageDataGenerator(**aug_dict)\n"," mask_datagen = ImageDataGenerator(**aug_dict)\n"," image_generator = image_datagen.flow_from_directory(\n"," train_path,\n"," classes = [image_folder],\n"," color_mode = image_color_mode,\n"," target_size = target_size,\n"," batch_size = batch_size,\n"," save_to_dir = save_to_dir,\n"," save_prefix = image_save_prefix,\n"," class_mode=None,\n"," seed = seed)\n"," mask_generator = mask_datagen.flow_from_directory(\n"," train_path,\n"," classes = [mask_folder],\n"," color_mode = mask_color_mode,\n"," target_size = target_size,\n"," batch_size = batch_size,\n"," save_to_dir = save_to_dir,\n"," save_prefix = mask_save_prefix,\n"," class_mode=None,\n"," seed = seed)\n"," train_generator = zip(image_generator, mask_generator)\n"," for (img,mask) in train_generator:\n"," yield (img,mask)\n"," \n","\n","def testGenerator(test_path,target_size = img_size,flag_multi_class = True,as_gray = True):\n"," files=sorted(os.listdir(test_path))\n"," num_image=len(files)\n"," for i in range(num_image):\n"," img = io.imread(os.path.join(test_path,files[i]),as_gray = True)\n"," print(files[i])\n"," img = trans.resize(img,target_size)\n"," #img = np.reshape(img,img.shape+(1,)) if (not flag_multi_class) else img\n"," img = np.reshape(img,(1,)+img.shape)\n"," yield img"],"execution_count":null,"outputs":[]},{"cell_type":"code","metadata":{"id":"E0ykPbkYkuir"},"source":["#Step 4: Define function to save the test images\n","### draw imgs in labelVisualize and save results in saveResult\n","def saveResult(img_path,save_path,npyfile):\n"," files=os.listdir(img_path)\n"," \n"," for i,item in enumerate(npyfile):\n"," img=item\n"," for k in range(3):\n"," img[:,:,k]=img[:,:,k]/np.ptp(img[:,:,k])\n"," \n"," img[:,:,1]=(img[:,:,1]>0.5).astype(int) #This threshold of 0.05 can be changed to any number in range [0,1]\n"," img[:,:,0]=(img[:,:,0]>0.5).astype(int)\n"," \n"," io.imsave(os.path.join(save_path, files[i]),img)"],"execution_count":null,"outputs":[]},{"cell_type":"code","metadata":{"id":"6_R2XCFukuis"},"source":["def SaveResultwImage(img_path,save_path,npyfile,target_size=img_size,flag_multi_class = True,num_class = 2):\n"," files=os.listdir(img_path)\n"," \n"," \n"," for i,item in enumerate(npyfile):\n"," img=item\n"," img[img>0.5]=1\n"," img[img<=0.5]=0\n"," img[:,:,2]=0\n"," \n"," I = io.imread(os.path.join(img_path,files[i]), as_gray=True)\n"," I = trans.resize(I,target_size)\n"," img[:,:,0]=np.true_divide((I+img[:,:,0]),2)\n"," img[:,:,1]=np.true_divide((I+img[:,:,1]),2)\n"," img[:,:,2]=np.true_divide((I+img[:,:,2]),2)\n"," io.imsave(os.path.join(save_path, files[i]),img) "],"execution_count":null,"outputs":[]},{"cell_type":"code","metadata":{"id":"OpRBa_aWkuis"},"source":["#Step 5: Define functions to evaluate the output\n","import sklearn.metrics as sm\n","\n","def get_confusion_matrix_elements(groundtruth_list, predicted_list):\n"," \"\"\"returns confusion matrix elements i.e TN, FP, FN, TP as floats\n","\tSee example code for helper function definitions\n"," \"\"\"\n"," tn, fp, fn, tp = sm.confusion_matrix(groundtruth_list, predicted_list,labels=[0,1]).ravel()\n"," tn, fp, fn, tp = np.float64(tn), np.float64(fp), np.float64(fn), np.float64(tp)\n","\n"," return tn, fp, fn, tp\n","\n","def get_prec_rec_IoU_accuracy(groundtruth_list, predicted_list):\n"," \"\"\"returns precision, recall, IoU and accuracy metrics\n","\t\"\"\"\n"," tn, fp, fn, tp = get_confusion_matrix_elements(groundtruth_list, predicted_list)\n"," \n"," total = tp + fp + fn + tn\n"," accuracy = (tp + tn) / total\n"," prec=tp/(tp+fp)\n"," rec=tp/(tp+fn)\n"," IoU=tp/(tp+fp+fn)\n"," \n"," return prec,rec,IoU,accuracy\n","\n","def get_f1_score(groundtruth_list, predicted_list):\n"," \"\"\"Return f1 score covering edge cases\"\"\"\n","\n"," tn, fp, fn, tp = get_confusion_matrix_elements(groundtruth_list, predicted_list)\n"," \n"," f1_score = (2 * tp) / ((2 * tp) + fp + fn)\n","\n"," return f1_score\n","\n","def get_validation_metrics(groundtruth,predicted):\n"," \"\"\"Return all output metrics. Input is binary images\"\"\"\n"," \n"," u,v=np.shape(groundtruth)\n"," groundtruth_list=np.reshape(groundtruth,(u*v,))\n"," predicted_list=np.reshape(predicted,(u*v,))\n"," prec,rec,IoU,acc=get_prec_rec_IoU_accuracy(groundtruth_list, predicted_list)\n"," f1_score=get_f1_score(groundtruth_list, predicted_list)\n"," # print(\"Precision=\",prec, \"Recall=\",rec, \"IoU=\",IoU, \"acc=\",acc, \"F1=\",f1_score)\n"," return prec,rec,IoU,acc,f1_score\n","\n","def evalResult(gth_path,npyfile,target_size=img_size,flag_multi_class = False,num_class = 3):\n"," files=sorted(os.listdir(gth_path))\n"," print(files)\n"," prec=0\n"," rec=0\n"," acc=0\n"," IoU=0\n"," f1_score=0\n"," for i,item in enumerate(npyfile):\n"," img = item[:,:,0]\n"," gth = io.imread(os.path.join(gth_path,files[i]))\n"," gth = trans.resize(gth,target_size)\n"," img1=np.array(((img - np.min(img))/np.ptp(img))>0.1).astype(float)\n"," gth1=np.array(((gth - np.min(gth))/np.ptp(gth))>0.1).astype(float)\n"," p,r,I,a,f=get_validation_metrics(gth1,img1)\n"," prec=prec+p\n"," rec=rec+r\n"," acc=acc+a\n"," IoU=IoU+I\n"," f1_score=f1_score+f\n"," print(\"Precision=\",prec/(i+1), \"Recall=\",rec/(i+1), \"IoU=\",IoU/(i+1), \"acc=\",acc/(i+1), \"F1=\",f1_score/(i+1)) "],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"036WjKyjkuit"},"source":["# All definitions are now done! Lets start using the functions now...\n","# B. Call to image data generator, model initialization, followed by model fitting."]},{"cell_type":"code","metadata":{"id":"mfzRaSRqkuit"},"source":["#Step 1: Call to image data generator in keras\n","\n","os.chdir('/content/drive/MyDrive/U-net Multi-class/diaretdb1_v_1_1/resources/')\n","data_gen_args = dict(rotation_range=0.3,\n"," rescale=1./255,\n"," width_shift_range=0.2,\n"," height_shift_range=0.2,\n"," shear_range=0.1,\n"," zoom_range=[0.7,1],\n"," horizontal_flip=True,\n"," vertical_flip=True,\n"," fill_mode='nearest')\n","PATH='./train/'"],"execution_count":null,"outputs":[]},{"cell_type":"code","metadata":{"id":"PebFECN7kuiu"},"source":["if not os.path.exists(PATH+'aug'):\n"," os.makedirs(PATH+'aug')\n"," \n","if not os.path.exists(PATH+'pred'):\n"," os.makedirs(PATH+'pred') \n"],"execution_count":null,"outputs":[]},{"cell_type":"code","metadata":{"id":"wVuo91-zkuiu"},"source":["data_gen = trainGenerator(3,PATH,'images','GT',data_gen_args, save_to_dir = None)"],"execution_count":null,"outputs":[]},{"cell_type":"code","metadata":{"id":"iaKBXrQykuiu"},"source":["for e in range(5):\n"," print('Epoch', e)\n"," batches = 0\n"," for x_batch, y_batch in data_gen:\n"," print(np.max(x_batch))\n"," for i in range(0, 2):\n"," plt.subplot(330+1 + i)\n"," plt.imshow(y_batch[i], cmap=plt.get_cmap('gray'))\n"," \n","\n"," plt.show()\n"," \n"," break"],"execution_count":null,"outputs":[]},{"cell_type":"code","metadata":{"id":"bKM5tKtWkuiv"},"source":["#Step 2: Initialize the model. Train from scratch!\n","model = unet()\n","model.summary()"],"execution_count":null,"outputs":[]},{"cell_type":"code","metadata":{"id":"0FBNXhCOkuiv"},"source":["#Step 3: Initialize Tensorboard to monitor changes in Model Loss \n","import datetime\n","%load_ext tensorboard\n","log_dir = \"logs/fit/\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n","tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)"],"execution_count":null,"outputs":[]},{"cell_type":"code","metadata":{"scrolled":true,"id":"vQYuJXXOkuiw"},"source":["#Visualize on tensorboard (move this above)\n","%tensorboard --logdir logs/fit"],"execution_count":null,"outputs":[]},{"cell_type":"code","metadata":{"scrolled":true,"id":"QRcEH7kdkuiw"},"source":["#Step 4: Fit the u-net model\n","model_checkpoint = tf.keras.callbacks.ModelCheckpoint('unet_DB1_multi.hdf5', monitor='loss',verbose=0)\n","model.fit(data_gen,steps_per_epoch=20,epochs=30,verbose=1, callbacks=[model_checkpoint, tensorboard_callback])"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"bPQAhlUYkuiw"},"source":["# Final trained model is saved as unet_DB1.hdf5\n","# C. Run the trained model on test images and save the outputs, and evaluate pixel-level segmentation performance "]},{"cell_type":"code","metadata":{"scrolled":true,"id":"M0U1YHs_kuix"},"source":["#Step 1: Run model on test images and save the images\n","#number of test images\n","n_i=len(os.listdir('./test/images/'))\n","#Call test generator\n","test_gen = testGenerator('./test/images/')\n","#Return model outcome for each test image\n","results = model.predict_generator(test_gen,n_i,verbose=1)\n"],"execution_count":null,"outputs":[]},{"cell_type":"code","metadata":{"id":"l8A2E6Npkuix"},"source":["if not os.path.exists('./test/pred'):\n"," os.makedirs('./test/pred/')\n","\n","SaveResultwImage('./test',PATH+'pred/',results)"],"execution_count":null,"outputs":[]},{"cell_type":"code","metadata":{"id":"hAOlU7Rjkuix"},"source":["print(np.sum(results))"],"execution_count":null,"outputs":[]},{"cell_type":"code","metadata":{"id":"g9Ww4As1kuiy"},"source":["plt.imshow(results[0][:,:,1])"],"execution_count":null,"outputs":[]},{"cell_type":"code","metadata":{"id":"I18t61Tpkuiy"},"source":["plt.imshow(results[1])"],"execution_count":null,"outputs":[]},{"cell_type":"code","metadata":{"id":"hVBSXxCqkuiy"},"source":["\n","\n","\n","\n"],"execution_count":null,"outputs":[]}]} -------------------------------------------------------------------------------- /Google Colab Code/code/Step_1_One_Hot_encoded_GT_creation_3_classes.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "nbformat": 4, 3 | "nbformat_minor": 0, 4 | "metadata": { 5 | "kernelspec": { 6 | "display_name": "Python 3", 7 | "language": "python", 8 | "name": "python3" 9 | }, 10 | "language_info": { 11 | "codemirror_mode": { 12 | "name": "ipython", 13 | "version": 3 14 | }, 15 | "file_extension": ".py", 16 | "mimetype": "text/x-python", 17 | "name": "python", 18 | "nbconvert_exporter": "python", 19 | "pygments_lexer": "ipython3", 20 | "version": "3.8.5" 21 | }, 22 | "colab": { 23 | "name": "Step 1: One-Hot-encoded_GT_creation_3_classes.ipynb", 24 | "provenance": [], 25 | "collapsed_sections": [] 26 | } 27 | }, 28 | "cells": [ 29 | { 30 | "cell_type": "markdown", 31 | "metadata": { 32 | "id": "7K2fAAqYdCuX" 33 | }, 34 | "source": [ 35 | "# This code prepares the multi-class image data" 36 | ] 37 | }, 38 | { 39 | "cell_type": "code", 40 | "metadata": { 41 | "colab": { 42 | "base_uri": "https://localhost:8080/" 43 | }, 44 | "id": "jzlDrfu6eCoG", 45 | "outputId": "2a2f244a-d163-40d1-86ea-5e666dc4cc62" 46 | }, 47 | "source": [ 48 | "# First lets connect the Gdrive that contains the data\n", 49 | "from google.colab import drive\n", 50 | "drive.mount('/content/drive')" 51 | ], 52 | "execution_count": 1, 53 | "outputs": [ 54 | { 55 | "output_type": "stream", 56 | "text": [ 57 | "Mounted at /content/drive\n" 58 | ], 59 | "name": "stdout" 60 | } 61 | ] 62 | }, 63 | { 64 | "cell_type": "code", 65 | "metadata": { 66 | "colab": { 67 | "base_uri": "https://localhost:8080/" 68 | }, 69 | "id": "sYe2G9qeeYNU", 70 | "outputId": "556bdf5e-660d-42da-a3e7-a314c808267e" 71 | }, 72 | "source": [ 73 | "import os\n", 74 | "# The path below should point to the directory containing this notebook and the associated utility files\n", 75 | "# To replicate this step, create a folder in your Google drive named \"U-net Multi-class\" and place the code and diaretdb1 data set under this folder. \n", 76 | "os.chdir('/content/drive/MyDrive/U-net Multi-class/diaretdb1_v_1_1/resources/')\n", 77 | "!ls" 78 | ], 79 | "execution_count": 2, 80 | "outputs": [ 81 | { 82 | "output_type": "stream", 83 | "text": [ 84 | "example_evalresults images\t toolkit\t unet_DB1_multi.hdf5\n", 85 | "html\t\t testdatasets traindatasets\n" 86 | ], 87 | "name": "stdout" 88 | } 89 | ] 90 | }, 91 | { 92 | "cell_type": "markdown", 93 | "metadata": { 94 | "id": "magBj7blguAP" 95 | }, 96 | "source": [ 97 | "# Lets automatically partition the data into train and test folders. Image number 1-27 in train and remaining in test" 98 | ] 99 | }, 100 | { 101 | "cell_type": "code", 102 | "metadata": { 103 | "id": "Ik28Z1vJgtZ7" 104 | }, 105 | "source": [ 106 | "\n", 107 | "if not os.path.exists('./train/'):\n", 108 | " os.makedirs('./train/')\n", 109 | "if not os.path.exists('./train/images'):\n", 110 | " os.makedirs('./train/images')\n", 111 | "if not os.path.exists('./train/GT/'):\n", 112 | " os.makedirs('./train/GT/')\n", 113 | "\n", 114 | "\n", 115 | "\n", 116 | "if not os.path.exists('./test/'):\n", 117 | " os.makedirs('./test/')\n", 118 | "if not os.path.exists('./test/images'):\n", 119 | " os.makedirs('./test/images')\n" 120 | ], 121 | "execution_count": 3, 122 | "outputs": [] 123 | }, 124 | { 125 | "cell_type": "code", 126 | "metadata": { 127 | "id": "PyGhREukdCud" 128 | }, 129 | "source": [ 130 | "import numpy as np\n", 131 | "import os\n", 132 | "import skimage.io as io\n", 133 | "import skimage.transform as trans" 134 | ], 135 | "execution_count": 4, 136 | "outputs": [] 137 | }, 138 | { 139 | "cell_type": "code", 140 | "metadata": { 141 | "id": "diFEPKtidCue" 142 | }, 143 | "source": [ 144 | "def prepare_multi_class_GT(GT_PATH, class_names, savepath, target_size=(512,512), n_class=3):\n", 145 | "\n", 146 | " f_names = os.listdir(GT_PATH+class_names[0])\n", 147 | " for files in f_names:\n", 148 | " GT_im=np.zeros(np.concatenate((target_size,n_class),axis=None)) #This creates a zero array of size (512,512,3)\n", 149 | " FG=np.zeros(target_size)\n", 150 | " \n", 151 | " for idx,cn in enumerate(class_names):\n", 152 | " lab=io.imread(GT_PATH+cn+files, as_gray=True)\n", 153 | " lab = trans.resize(lab,target_size)\n", 154 | " if(np.max(lab)>1):\n", 155 | " lab=lab/255\n", 156 | " lab[lab>=0.1]=1 #threshold at 0.1. Change this value based on your requirement\n", 157 | " lab[lab<0.1]=0\n", 158 | " \n", 159 | " if (idx<2): #Bright Lesions\n", 160 | " GT_im[:,:,1]=GT_im[:,:,1]+lab\n", 161 | " else:#Red Lesions\n", 162 | " GT_im[:,:,0]=GT_im[:,:,0]+lab\n", 163 | " if(np.sum(GT_im[:,:,0])>0):\n", 164 | " GT_im[:,:,0]=GT_im[:,:,0]/np.ptp(GT_im[:,:,0])\n", 165 | " if(np.sum(GT_im[:,:,1])>0):\n", 166 | " GT_im[:,:,1]=GT_im[:,:,1]/np.ptp(GT_im[:,:,1])\n", 167 | " FG=(GT_im[:,:,0]+GT_im[:,:,1]>0).astype(int)\n", 168 | " GT_im[:,:,2]=1-FG\n", 169 | " io.imsave(savepath+files,GT_im)\n", 170 | " " 171 | ], 172 | "execution_count": 5, 173 | "outputs": [] 174 | }, 175 | { 176 | "cell_type": "code", 177 | "metadata": { 178 | "colab": { 179 | "base_uri": "https://localhost:8080/" 180 | }, 181 | "id": "NgPHGVCadCuf", 182 | "outputId": "7ab11074-f9e3-49af-9a6c-b8bb80efd381" 183 | }, 184 | "source": [ 185 | "\n", 186 | "GT_PATH='./images/ddb1_groundtruth/'\n", 187 | "class_names=['hemorrhages/','redsmalldots/','hardexudates/', 'softexudates/',]\n", 188 | "\n", 189 | "\n", 190 | "if not os.path.exists('./images/GT/'):\n", 191 | " os.makedirs('./images/GT/')\n", 192 | "\n", 193 | "prepare_multi_class_GT(GT_PATH,class_names, './images/GT/')" 194 | ], 195 | "execution_count": 6, 196 | "outputs": [ 197 | { 198 | "output_type": "stream", 199 | "text": [ 200 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 201 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 202 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 203 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 204 | "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:26: UserWarning: ./images/GT/image051.png is a low contrast image\n", 205 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 206 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 207 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 208 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 209 | "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:26: UserWarning: ./images/GT/image057.png is a low contrast image\n", 210 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 211 | "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:26: UserWarning: ./images/GT/image072.png is a low contrast image\n", 212 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 213 | "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:26: UserWarning: ./images/GT/image069.png is a low contrast image\n", 214 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 215 | "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:26: UserWarning: ./images/GT/image050.png is a low contrast image\n", 216 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 217 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 218 | "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:26: UserWarning: ./images/GT/image056.png is a low contrast image\n", 219 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 220 | "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:26: UserWarning: ./images/GT/image028.png is a low contrast image\n", 221 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 222 | "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:26: UserWarning: ./images/GT/image049.png is a low contrast image\n", 223 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 224 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 225 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 226 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 227 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 228 | "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:26: UserWarning: ./images/GT/image071.png is a low contrast image\n", 229 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 230 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 231 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 232 | "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:26: UserWarning: ./images/GT/image047.png is a low contrast image\n", 233 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 234 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 235 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 236 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 237 | "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:26: UserWarning: ./images/GT/image042.png is a low contrast image\n", 238 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 239 | "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:26: UserWarning: ./images/GT/image060.png is a low contrast image\n", 240 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 241 | "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:26: UserWarning: ./images/GT/image034.png is a low contrast image\n", 242 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 243 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 244 | "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:26: UserWarning: ./images/GT/image062.png is a low contrast image\n", 245 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 246 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 247 | "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:26: UserWarning: ./images/GT/image040.png is a low contrast image\n", 248 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 249 | "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:26: UserWarning: ./images/GT/image048.png is a low contrast image\n", 250 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 251 | "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:26: UserWarning: ./images/GT/image032.png is a low contrast image\n", 252 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 253 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 254 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 255 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 256 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 257 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 258 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 259 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 260 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 261 | "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:26: UserWarning: ./images/GT/image046.png is a low contrast image\n", 262 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 263 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 264 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 265 | "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:26: UserWarning: ./images/GT/image045.png is a low contrast image\n", 266 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 267 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 268 | "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:26: UserWarning: ./images/GT/image061.png is a low contrast image\n", 269 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 270 | "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:26: UserWarning: ./images/GT/image070.png is a low contrast image\n", 271 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 272 | "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:26: UserWarning: ./images/GT/image030.png is a low contrast image\n", 273 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 274 | "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:26: UserWarning: ./images/GT/image073.png is a low contrast image\n", 275 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 276 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 277 | "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:26: UserWarning: ./images/GT/image058.png is a low contrast image\n", 278 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 279 | "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:26: UserWarning: ./images/GT/image074.png is a low contrast image\n", 280 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 281 | "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:26: UserWarning: ./images/GT/image033.png is a low contrast image\n", 282 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 283 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 284 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 285 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 286 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 287 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 288 | "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:26: UserWarning: ./images/GT/image068.png is a low contrast image\n", 289 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 290 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 291 | "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:26: UserWarning: ./images/GT/image037.png is a low contrast image\n", 292 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 293 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 294 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 295 | "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:26: UserWarning: ./images/GT/image059.png is a low contrast image\n", 296 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 297 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 298 | "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:26: UserWarning: ./images/GT/image075.png is a low contrast image\n", 299 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 300 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 301 | "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:26: UserWarning: ./images/GT/image039.png is a low contrast image\n", 302 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 303 | "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:26: UserWarning: ./images/GT/image031.png is a low contrast image\n", 304 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 305 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 306 | "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:26: UserWarning: ./images/GT/image036.png is a low contrast image\n", 307 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 308 | "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:26: UserWarning: ./images/GT/image076.png is a low contrast image\n", 309 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 310 | "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:26: UserWarning: ./images/GT/image080.png is a low contrast image\n", 311 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 312 | "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:26: UserWarning: ./images/GT/image089.png is a low contrast image\n", 313 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 314 | "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:26: UserWarning: ./images/GT/image083.png is a low contrast image\n", 315 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 316 | "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:26: UserWarning: ./images/GT/image086.png is a low contrast image\n", 317 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 318 | "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:26: UserWarning: ./images/GT/image078.png is a low contrast image\n", 319 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 320 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 321 | "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:26: UserWarning: ./images/GT/image082.png is a low contrast image\n", 322 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 323 | "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:26: UserWarning: ./images/GT/image088.png is a low contrast image\n", 324 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 325 | "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:26: UserWarning: ./images/GT/image077.png is a low contrast image\n", 326 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 327 | "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:26: UserWarning: ./images/GT/image081.png is a low contrast image\n", 328 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 329 | "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:26: UserWarning: ./images/GT/image079.png is a low contrast image\n", 330 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 331 | "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:26: UserWarning: ./images/GT/image087.png is a low contrast image\n", 332 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 333 | "WARNING:root:Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n" 334 | ], 335 | "name": "stderr" 336 | } 337 | ] 338 | }, 339 | { 340 | "cell_type": "markdown", 341 | "metadata": { 342 | "id": "L1SEqVRjdCuh" 343 | }, 344 | "source": [ 345 | "# Now your data is ready. Partition it to 'train' and 'test' folders using the code below" 346 | ] 347 | }, 348 | { 349 | "cell_type": "code", 350 | "metadata": { 351 | "id": "pwPIs144h64e" 352 | }, 353 | "source": [ 354 | "import shutil\n", 355 | "for i in range(1,90):\n", 356 | " if (i<10):\n", 357 | " shutil.copy('./images/ddb1_fundusimages/image00'+str(i)+'.png', './train/images/')\n", 358 | " shutil.copy('./images/GT/image00'+str(i)+'.png', './train/GT/')\n", 359 | " elif (i>=10 and i<28):\n", 360 | " shutil.copy('./images/ddb1_fundusimages/image0'+str(i)+'.png', './train/images/')\n", 361 | " shutil.copy('./images/GT/image0'+str(i)+'.png', './train/GT/')\n", 362 | " else:\n", 363 | " shutil.copy('./images/ddb1_fundusimages/image0'+str(i)+'.png', './test/images/')\n", 364 | "\n" 365 | ], 366 | "execution_count": null, 367 | "outputs": [] 368 | }, 369 | { 370 | "cell_type": "code", 371 | "metadata": { 372 | "id": "05qyCIu8j5VI" 373 | }, 374 | "source": [ 375 | "" 376 | ], 377 | "execution_count": null, 378 | "outputs": [] 379 | } 380 | ] 381 | } -------------------------------------------------------------------------------- /Google Colab Code/code/model_depth_3.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | import skimage.io as io 4 | import skimage.transform as trans 5 | import numpy as np 6 | from tensorflow.keras.models import * 7 | from tensorflow.keras.layers import * 8 | from tensorflow.keras.optimizers import * 9 | from tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler 10 | from tensorflow.keras import backend as K 11 | import tensorflow as tf 12 | 13 | 14 | def dice_coef(y_true, y_pred, smooth=1): 15 | intersection = K.sum(y_true * y_pred, axis=[1,2,3]) 16 | union = K.sum(y_true, axis=[1,2,3]) + K.sum(y_pred, axis=[1,2,3]) 17 | return K.mean( (2. * intersection + smooth) / (union + smooth), axis=0) 18 | 19 | 20 | 21 | def dice_coef_loss(y_true, y_pred): 22 | return -dice_coef(y_true, y_pred) 23 | 24 | 25 | 26 | 27 | def unet(pretrained_weights = None,input_size=(512,512,1), n_class=3): 28 | inputs = tf.keras.Input(shape=input_size) 29 | conv1 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(inputs) 30 | conv1 = BatchNormalization()(conv1) 31 | conv1 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1) 32 | conv1 = BatchNormalization()(conv1) 33 | pool1 = MaxPooling2D(pool_size=(2, 2))(conv1) 34 | conv2 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1) 35 | conv2 = BatchNormalization()(conv2) 36 | conv2 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2) 37 | conv2 = BatchNormalization()(conv2) 38 | pool2 = MaxPooling2D(pool_size=(2, 2))(conv2) 39 | conv3 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2) 40 | conv3 = BatchNormalization()(conv3) 41 | conv3 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3) 42 | conv3 = BatchNormalization()(conv3) 43 | pool3 = MaxPooling2D(pool_size=(2, 2))(conv3) 44 | 45 | conv4 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3) 46 | conv4 = BatchNormalization()(conv4) 47 | conv4 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4) 48 | conv4 = BatchNormalization()(conv4) 49 | drop4 = Dropout(0.5)(conv4) 50 | 51 | 52 | conv5 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(drop4) 53 | conv5 = BatchNormalization()(conv5) 54 | conv5 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5) 55 | conv5 = BatchNormalization()(conv5) 56 | drop5 = Dropout(0.5)(conv5) 57 | 58 | 59 | up6 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop5)) 60 | merge6 = concatenate([conv3,up6], axis = 3) 61 | conv6 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6) 62 | conv6 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6) 63 | 64 | 65 | up7 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6)) 66 | merge7 = concatenate([conv2,up7], axis = 3) 67 | conv7 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7) 68 | conv7 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7) 69 | 70 | 71 | up8 = Conv2D(32, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7)) 72 | merge8 = concatenate([conv1,up8], axis = 3) 73 | conv8 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8) 74 | conv8 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8) 75 | conv9 = Conv2D(n_class, 1, activation = 'softmax')(conv8) 76 | 77 | model = tf.keras.Model(inputs = inputs, outputs = conv9) 78 | 79 | model.compile(optimizer = Adam(lr = 0.0001), loss = ['binary_crossentropy'], metrics = ['accuracy']) 80 | #model.compile(optimizer = Adam(lr = 0.0001), loss = [dice_coef_loss], metrics = [dice_coef]) 81 | if(pretrained_weights): 82 | model.load_weights(pretrained_weights) 83 | 84 | return model 85 | -------------------------------------------------------------------------------- /Google Colab Code/code/model_depth_4.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | import skimage.io as io 4 | import skimage.transform as trans 5 | import numpy as np 6 | from tensorflow.keras.models import * 7 | from tensorflow.keras.layers import * 8 | from tensorflow.keras.optimizers import * 9 | from tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler 10 | from tensorflow.keras import backend as K 11 | import tensorflow as tf 12 | 13 | 14 | def dice_coef(y_true, y_pred, smooth=1): 15 | intersection = K.sum(y_true * y_pred, axis=[1,2,3]) 16 | union = K.sum(y_true, axis=[1,2,3]) + K.sum(y_pred, axis=[1,2,3]) 17 | return K.mean( (2. * intersection + smooth) / (union + smooth), axis=0) 18 | 19 | 20 | 21 | def dice_coef_loss(y_true, y_pred): 22 | return -dice_coef(y_true, y_pred) 23 | 24 | 25 | 26 | def unet(pretrained_weights = None,input_size=(512,512,1), n_class=3): 27 | 28 | inputs = tf.keras.Input(shape=input_size) 29 | conv1 = Conv2D(64, 3, activation = 'relu', dilation_rate=2,padding = 'same', kernel_initializer = 'he_normal')(inputs) 30 | conv1 = BatchNormalization()(conv1) 31 | conv1 = Conv2D(64, 3, activation = 'relu', dilation_rate=2,padding = 'same', kernel_initializer = 'he_normal')(conv1) 32 | conv1 = BatchNormalization()(conv1) 33 | pool1 = MaxPooling2D(pool_size=(2, 2))(conv1) 34 | conv2 = Conv2D(128, 3, activation = 'relu', dilation_rate=2,padding = 'same', kernel_initializer = 'he_normal')(pool1) 35 | conv2 = BatchNormalization()(conv2) 36 | conv2 = Conv2D(128, 3, activation = 'relu', dilation_rate=2, padding = 'same', kernel_initializer = 'he_normal')(conv2) 37 | conv2 = BatchNormalization()(conv2) 38 | pool2 = MaxPooling2D(pool_size=(2, 2))(conv2) 39 | conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2) 40 | conv3 = BatchNormalization()(conv3) 41 | conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3) 42 | conv3 = BatchNormalization()(conv3) 43 | pool3 = MaxPooling2D(pool_size=(2, 2))(conv3) 44 | conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3) 45 | conv4 = BatchNormalization()(conv4) 46 | conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4) 47 | conv4 = BatchNormalization()(conv4) 48 | drop4 = Dropout(0.5)(conv4, training=True) 49 | pool4 = MaxPooling2D(pool_size=(2, 2))(drop4) 50 | 51 | conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4) 52 | conv5 = BatchNormalization()(conv5) 53 | conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5) 54 | conv5 = BatchNormalization()(conv5) 55 | drop5 = Dropout(0.5)(conv5, training=True) 56 | 57 | up6 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop5)) 58 | merge6 = concatenate([drop4,up6], axis = 3) 59 | conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6) 60 | conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6) 61 | 62 | 63 | up7 = Conv2D(256, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6)) 64 | merge7 = concatenate([conv3,up7], axis = 3) 65 | conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7) 66 | conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7) 67 | 68 | 69 | up8 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7)) 70 | merge8 = concatenate([conv2,up8], axis = 3) 71 | conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8) 72 | conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8) 73 | 74 | 75 | up9 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv8)) 76 | merge9 = concatenate([conv1,up9], axis = 3) 77 | conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge9) 78 | conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9) 79 | 80 | conv10 = Conv2D(n_class, (1,1), activation = 'softmax')(conv9) 81 | 82 | 83 | model = tf.keras.Model(inputs = inputs, outputs = conv10) 84 | 85 | model.compile(optimizer = Adam(lr = 0.0001), loss = ['binary_crossentropy'], metrics = ['accuracy']) 86 | #model.compile(optimizer = Adam(lr = 0.00001), loss = [dice_coef_loss], metrics = [dice_coef]) 87 | 88 | 89 | if(pretrained_weights): 90 | model=keras.models.load_model(pretrained_weights) 91 | 92 | return model -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Sohini Roychowdhury 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Multi-class Segmentation/GT/image001.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/GT/image001.png -------------------------------------------------------------------------------- /Multi-class Segmentation/GT/image002.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/GT/image002.png -------------------------------------------------------------------------------- /Multi-class Segmentation/GT/image003.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/GT/image003.png -------------------------------------------------------------------------------- /Multi-class Segmentation/GT/image004.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/GT/image004.png -------------------------------------------------------------------------------- /Multi-class Segmentation/GT/image005.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/GT/image005.png -------------------------------------------------------------------------------- /Multi-class Segmentation/GT/image006.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/GT/image006.png -------------------------------------------------------------------------------- /Multi-class Segmentation/GT/image007.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/GT/image007.png -------------------------------------------------------------------------------- /Multi-class Segmentation/GT/image008.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/GT/image008.png -------------------------------------------------------------------------------- /Multi-class Segmentation/GT/image009.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/GT/image009.png -------------------------------------------------------------------------------- /Multi-class Segmentation/GT/image010.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/GT/image010.png -------------------------------------------------------------------------------- /Multi-class Segmentation/GT/image011.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/GT/image011.png -------------------------------------------------------------------------------- /Multi-class Segmentation/GT/image012.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/GT/image012.png -------------------------------------------------------------------------------- /Multi-class Segmentation/GT/image013.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/GT/image013.png -------------------------------------------------------------------------------- /Multi-class Segmentation/GT/image014.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/GT/image014.png -------------------------------------------------------------------------------- /Multi-class Segmentation/GT/image015.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/GT/image015.png -------------------------------------------------------------------------------- /Multi-class Segmentation/GT/image016.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/GT/image016.png -------------------------------------------------------------------------------- /Multi-class Segmentation/GT/image017.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/GT/image017.png -------------------------------------------------------------------------------- /Multi-class Segmentation/GT/image018.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/GT/image018.png -------------------------------------------------------------------------------- /Multi-class Segmentation/GT/image019.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/GT/image019.png -------------------------------------------------------------------------------- /Multi-class Segmentation/GT/image020.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/GT/image020.png -------------------------------------------------------------------------------- /Multi-class Segmentation/GT/image021.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/GT/image021.png -------------------------------------------------------------------------------- /Multi-class Segmentation/GT/image022.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/GT/image022.png -------------------------------------------------------------------------------- /Multi-class Segmentation/GT/image023.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/GT/image023.png -------------------------------------------------------------------------------- /Multi-class Segmentation/GT/image024.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/GT/image024.png -------------------------------------------------------------------------------- /Multi-class Segmentation/GT/image025.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/GT/image025.png -------------------------------------------------------------------------------- /Multi-class Segmentation/GT/image026.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/GT/image026.png -------------------------------------------------------------------------------- /Multi-class Segmentation/GT/image027.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/GT/image027.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image028.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image028.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image029.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image029.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image030.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image030.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image031.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image031.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image032.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image032.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image033.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image033.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image034.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image034.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image035.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image035.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image036.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image036.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image037.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image037.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image038.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image038.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image039.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image039.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image040.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image040.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image041.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image041.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image042.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image042.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image043.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image043.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image044.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image044.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image045.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image045.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image046.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image046.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image047.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image047.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image048.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image048.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image049.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image049.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image050.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image050.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image051.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image051.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image052.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image052.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image053.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image053.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image054.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image054.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image055.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image055.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image056.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image056.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image057.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image057.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image058.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image058.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image059.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image059.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image060.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image060.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image061.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image061.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image062.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image062.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image063.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image063.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image064.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image064.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image065.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image065.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image066.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image066.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image067.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image067.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image068.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image068.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image069.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image069.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image070.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image070.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image071.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image071.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image072.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image072.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image073.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image073.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image074.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image074.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image075.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image075.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image076.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image076.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image077.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image077.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image078.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image078.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image079.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image079.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image080.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image080.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image081.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image081.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image082.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image082.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image083.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image083.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image084.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image084.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image085.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image085.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image086.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image086.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image087.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image087.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image088.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image088.png -------------------------------------------------------------------------------- /Multi-class Segmentation/pred/image089.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/Multi-class Segmentation/pred/image089.png -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # U-net-for-Multi-class-semantic-segmentation 2 | 3 | This example demonstrates the use of U-net model for pathology segmentation on retinal images. This supports binary and multi-class segmentation. 4 | 5 | # Google Colab support 6 | 7 | The Google colab folder contains code to help replicate the process for the DIARETDB1 data set. The folder structures are automatically created using the Step 1 code and the U-net is implemented using the Step 2 code. 8 | 9 | # This jupyter notebook presents all requirements needed to achieve pixel-level semantic segmentation using images. 10 | 11 | # Step 1: Package requirements 12 | * Tensorflow>=2.0 13 | * numpy 14 | * skimage.io 15 | * skimage.transform 16 | 17 | # Step 2: Train and Test Data 18 | Download the DIARETDB1 data set page: 19 | * Downliad the annotated data at http://www2.it.lut.fi/project/imageret/diaretdb1/ 20 | * We will use the images in folder: ddb1fundusimages, and annotations in folder: ddb1groundtruth 21 | * Create tho folders train and test, such that train has images 1-27 and test has all remaining images. 22 | * Create the following folder structure in your local folder for multi-class segmentation. : 23 | ``` 24 | ./resources/ 25 | │ 26 | └───train 27 | | └───images 28 | | └───GT 29 | | └───hardexudates 30 | | └───softexudates 31 | | └───hamorrhages 32 | | └───redsmalldots 33 | │ 34 | └───test 35 | ├──images 36 | 37 | ``` 38 | For binary segmentation, only the hamorrhages folder is retained under GT. 39 | # Step 3: Get the code 40 | Download the code in the code folder. 41 | * The U-net_binary.ipynb performs binary classification for hamorrhages. 42 | * The One-Hot-encoded_GT_creation_3_classes.ipynb creates the multi-class classification groundtruth 43 | * The U-net_multi-class_DB1.ipynb implements the multi-class U-net implementation. 44 | 45 | # Step 4: Results: 46 | * The U-net with Depth 4 is trained using 27 images, loss function of negative dice coefficient, Adam optimizer and augmented significnatly using the keras imagedata generator. 47 | * The tensorboard graphs are as follows: 48 | ![Tensorboard losses after 80 epochs](images/tensorboard.png) 49 | 50 | # How to avoid possible errors? 51 | The key component of this U-net framework is that input is an image and output is also an image. 52 | To use the code in this repo AS IS, you HAVE TO unpack the data set as suggested in Step 2. 53 | * One way to detect if your Path is incorrectly set is, you will get the message: "Found 0 images beloning to 1 classes". This means the images and GT are not detected. 54 | * If images are not detected, this will lead to a "peek" error while model.fit command is run. 55 | * Finally, for tensorboard, if no visualization is created, check the 'logs' folder. If a recored exists then rerun the tensorboard command. This should get things started. 56 | 57 | ## The segmentation perfromances on test images are: 58 | 59 | * Sample examples are: 60 | Predicted output: ![Predicted blood vessels](images/Result.png) 61 | * Other outputs under Multi-class Segmentation/pred/ 62 | 63 | 64 | 65 | 66 | 67 | 68 | -------------------------------------------------------------------------------- /code/One-Hot-encoded_GT_creation_3_classes.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# This code prepares the multi-class image data" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": 1, 13 | "metadata": {}, 14 | "outputs": [], 15 | "source": [ 16 | "import numpy as np\n", 17 | "import os\n", 18 | "import skimage.io as io\n", 19 | "import skimage.transform as trans" 20 | ] 21 | }, 22 | { 23 | "cell_type": "code", 24 | "execution_count": 2, 25 | "metadata": {}, 26 | "outputs": [], 27 | "source": [ 28 | "def prepare_multi_class_GT(GT_PATH, class_names, savepath, target_size=(512,512), n_class=3):\n", 29 | "\n", 30 | " f_names = os.listdir(GT_PATH+class_names[0])\n", 31 | " for files in f_names:\n", 32 | " GT_im=np.zeros(np.concatenate((target_size,n_class),axis=None)) #This creates a zero array of size (512,512,3)\n", 33 | " FG=np.zeros(target_size)\n", 34 | " \n", 35 | " for idx,cn in enumerate(class_names):\n", 36 | " lab=io.imread(GT_PATH+cn+files, as_gray=True)\n", 37 | " lab = trans.resize(lab,target_size)\n", 38 | " if(np.max(lab)>1):\n", 39 | " lab=lab/255\n", 40 | " lab[lab>=0.1]=1 #threshold at 0.1. Change this value based on your requirement\n", 41 | " lab[lab<0.1]=0\n", 42 | " \n", 43 | " if (idx<2): #Bright Lesions\n", 44 | " GT_im[:,:,1]=GT_im[:,:,1]+lab\n", 45 | " else:#Red Lesions\n", 46 | " GT_im[:,:,0]=GT_im[:,:,0]+lab\n", 47 | " if(np.sum(GT_im[:,:,0])>0):\n", 48 | " GT_im[:,:,0]=GT_im[:,:,0]/np.ptp(GT_im[:,:,0])\n", 49 | " if(np.sum(GT_im[:,:,1])>0):\n", 50 | " GT_im[:,:,1]=GT_im[:,:,1]/np.ptp(GT_im[:,:,1])\n", 51 | " FG=(GT_im[:,:,0]+GT_im[:,:,1]>0).astype(int)\n", 52 | " GT_im[:,:,2]=1-FG\n", 53 | " io.imsave(savepath+files,GT_im)\n", 54 | " " 55 | ] 56 | }, 57 | { 58 | "cell_type": "code", 59 | "execution_count": 3, 60 | "metadata": {}, 61 | "outputs": [ 62 | { 63 | "name": "stderr", 64 | "output_type": "stream", 65 | "text": [ 66 | ":26: UserWarning: ./GT/image089.png is a low contrast image\n", 67 | " io.imsave(savepath+files,GT_im)\n", 68 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 69 | ":26: UserWarning: ./GT/image088.png is a low contrast image\n", 70 | " io.imsave(savepath+files,GT_im)\n", 71 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 72 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 73 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 74 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 75 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 76 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 77 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 78 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 79 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 80 | ":26: UserWarning: ./GT/image046.png is a low contrast image\n", 81 | " io.imsave(savepath+files,GT_im)\n", 82 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 83 | ":26: UserWarning: ./GT/image057.png is a low contrast image\n", 84 | " io.imsave(savepath+files,GT_im)\n", 85 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 86 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 87 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 88 | ":26: UserWarning: ./GT/image082.png is a low contrast image\n", 89 | " io.imsave(savepath+files,GT_im)\n", 90 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 91 | ":26: UserWarning: ./GT/image086.png is a low contrast image\n", 92 | " io.imsave(savepath+files,GT_im)\n", 93 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 94 | ":26: UserWarning: ./GT/image059.png is a low contrast image\n", 95 | " io.imsave(savepath+files,GT_im)\n", 96 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 97 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 98 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 99 | ":26: UserWarning: ./GT/image033.png is a low contrast image\n", 100 | " io.imsave(savepath+files,GT_im)\n", 101 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 102 | ":26: UserWarning: ./GT/image081.png is a low contrast image\n", 103 | " io.imsave(savepath+files,GT_im)\n", 104 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 105 | ":26: UserWarning: ./GT/image049.png is a low contrast image\n", 106 | " io.imsave(savepath+files,GT_im)\n", 107 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 108 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 109 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 110 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 111 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 112 | ":26: UserWarning: ./GT/image034.png is a low contrast image\n", 113 | " io.imsave(savepath+files,GT_im)\n", 114 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 115 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 116 | ":26: UserWarning: ./GT/image037.png is a low contrast image\n", 117 | " io.imsave(savepath+files,GT_im)\n", 118 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 119 | ":26: UserWarning: ./GT/image070.png is a low contrast image\n", 120 | " io.imsave(savepath+files,GT_im)\n", 121 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 122 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 123 | ":26: UserWarning: ./GT/image036.png is a low contrast image\n", 124 | " io.imsave(savepath+files,GT_im)\n", 125 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 126 | ":26: UserWarning: ./GT/image048.png is a low contrast image\n", 127 | " io.imsave(savepath+files,GT_im)\n", 128 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 129 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 130 | ":26: UserWarning: ./GT/image050.png is a low contrast image\n", 131 | " io.imsave(savepath+files,GT_im)\n", 132 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 133 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 134 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 135 | ":26: UserWarning: ./GT/image031.png is a low contrast image\n", 136 | " io.imsave(savepath+files,GT_im)\n", 137 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 138 | ":26: UserWarning: ./GT/image079.png is a low contrast image\n", 139 | " io.imsave(savepath+files,GT_im)\n", 140 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 141 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 142 | ":26: UserWarning: ./GT/image073.png is a low contrast image\n", 143 | " io.imsave(savepath+files,GT_im)\n", 144 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 145 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 146 | ":26: UserWarning: ./GT/image060.png is a low contrast image\n", 147 | " io.imsave(savepath+files,GT_im)\n", 148 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 149 | ":26: UserWarning: ./GT/image040.png is a low contrast image\n", 150 | " io.imsave(savepath+files,GT_im)\n", 151 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 152 | ":26: UserWarning: ./GT/image045.png is a low contrast image\n", 153 | " io.imsave(savepath+files,GT_im)\n", 154 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 155 | ":26: UserWarning: ./GT/image030.png is a low contrast image\n", 156 | " io.imsave(savepath+files,GT_im)\n", 157 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n" 158 | ] 159 | }, 160 | { 161 | "name": "stderr", 162 | "output_type": "stream", 163 | "text": [ 164 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 165 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 166 | ":26: UserWarning: ./GT/image071.png is a low contrast image\n", 167 | " io.imsave(savepath+files,GT_im)\n", 168 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 169 | ":26: UserWarning: ./GT/image032.png is a low contrast image\n", 170 | " io.imsave(savepath+files,GT_im)\n", 171 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 172 | ":26: UserWarning: ./GT/image074.png is a low contrast image\n", 173 | " io.imsave(savepath+files,GT_im)\n", 174 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 175 | ":26: UserWarning: ./GT/image072.png is a low contrast image\n", 176 | " io.imsave(savepath+files,GT_im)\n", 177 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 178 | ":26: UserWarning: ./GT/image062.png is a low contrast image\n", 179 | " io.imsave(savepath+files,GT_im)\n", 180 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 181 | ":26: UserWarning: ./GT/image058.png is a low contrast image\n", 182 | " io.imsave(savepath+files,GT_im)\n", 183 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 184 | ":26: UserWarning: ./GT/image056.png is a low contrast image\n", 185 | " io.imsave(savepath+files,GT_im)\n", 186 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 187 | ":26: UserWarning: ./GT/image068.png is a low contrast image\n", 188 | " io.imsave(savepath+files,GT_im)\n", 189 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 190 | ":26: UserWarning: ./GT/image051.png is a low contrast image\n", 191 | " io.imsave(savepath+files,GT_im)\n", 192 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 193 | ":26: UserWarning: ./GT/image083.png is a low contrast image\n", 194 | " io.imsave(savepath+files,GT_im)\n", 195 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 196 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 197 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 198 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 199 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 200 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 201 | ":26: UserWarning: ./GT/image077.png is a low contrast image\n", 202 | " io.imsave(savepath+files,GT_im)\n", 203 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 204 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 205 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 206 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 207 | ":26: UserWarning: ./GT/image039.png is a low contrast image\n", 208 | " io.imsave(savepath+files,GT_im)\n", 209 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 210 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 211 | ":26: UserWarning: ./GT/image080.png is a low contrast image\n", 212 | " io.imsave(savepath+files,GT_im)\n", 213 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 214 | ":26: UserWarning: ./GT/image076.png is a low contrast image\n", 215 | " io.imsave(savepath+files,GT_im)\n", 216 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 217 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 218 | ":26: UserWarning: ./GT/image078.png is a low contrast image\n", 219 | " io.imsave(savepath+files,GT_im)\n", 220 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 221 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 222 | ":26: UserWarning: ./GT/image047.png is a low contrast image\n", 223 | " io.imsave(savepath+files,GT_im)\n", 224 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 225 | ":26: UserWarning: ./GT/image087.png is a low contrast image\n", 226 | " io.imsave(savepath+files,GT_im)\n", 227 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 228 | ":26: UserWarning: ./GT/image061.png is a low contrast image\n", 229 | " io.imsave(savepath+files,GT_im)\n", 230 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 231 | ":26: UserWarning: ./GT/image075.png is a low contrast image\n", 232 | " io.imsave(savepath+files,GT_im)\n", 233 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 234 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 235 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 236 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 237 | ":26: UserWarning: ./GT/image028.png is a low contrast image\n", 238 | " io.imsave(savepath+files,GT_im)\n", 239 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 240 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 241 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 242 | ":26: UserWarning: ./GT/image042.png is a low contrast image\n", 243 | " io.imsave(savepath+files,GT_im)\n", 244 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 245 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 246 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 247 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n", 248 | ":26: UserWarning: ./GT/image069.png is a low contrast image\n", 249 | " io.imsave(savepath+files,GT_im)\n", 250 | "Lossy conversion from float64 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.\n" 251 | ] 252 | } 253 | ], 254 | "source": [ 255 | "GT_PATH='./ddb1_groundtruth/'\n", 256 | "class_names=['hemorrhages/','redsmalldots/','hardexudates/', 'softexudates/',]\n", 257 | "\n", 258 | "prepare_multi_class_GT(GT_PATH,class_names, './GT/')" 259 | ] 260 | }, 261 | { 262 | "cell_type": "markdown", 263 | "metadata": {}, 264 | "source": [ 265 | "# Now your data is ready. Partition it to train and test.." 266 | ] 267 | }, 268 | { 269 | "cell_type": "code", 270 | "execution_count": null, 271 | "metadata": {}, 272 | "outputs": [], 273 | "source": [ 274 | "RL = [255,0,0]\n", 275 | "BL = [0,255,0]\n", 276 | "Unlabelled = [0,0,0]\n", 277 | "COLOR_DICT = np.array([ RL,BL, Unlabelled])\n", 278 | "class_name = [ 'RL','BL', 'None'] # You must define by yourself\n", 279 | "\n", 280 | "\n", 281 | "def labelVisualize(num_class,color_dict,img):\n", 282 | " img = img[:,:,0] if len(img.shape) == 3 else img\n", 283 | " img_out = np.zeros(img.shape + (3,))\n", 284 | " for i in range(num_class):\n", 285 | " img_out[img == i] = color_dict[i]\n", 286 | " \n", 287 | " return img_out\n", 288 | "\n", 289 | "\n", 290 | "def saveResult(img_path,save_path,npyfile,flag_multi_class = True,num_class = 2):\n", 291 | " files=os.listdir(img_path)\n", 292 | " #print(len(img_path))\n", 293 | " #print(len(npyfile))\n", 294 | " \n", 295 | " for i,item in enumerate(npyfile):\n", 296 | " img = labelVisualize(num_class,COLOR_DICT,item) if flag_multi_class else item[:,:,0]\n", 297 | " #img1=np.array(((img - np.min(img))/np.ptp(img))>0.6).astype(float)\n", 298 | " img[img>0.1]=1\n", 299 | " img[img<=0.1]=0\n", 300 | " io.imsave(os.path.join(save_path, files[i]+'_predict.png'),img)" 301 | ] 302 | } 303 | ], 304 | "metadata": { 305 | "kernelspec": { 306 | "display_name": "Python 3", 307 | "language": "python", 308 | "name": "python3" 309 | }, 310 | "language_info": { 311 | "codemirror_mode": { 312 | "name": "ipython", 313 | "version": 3 314 | }, 315 | "file_extension": ".py", 316 | "mimetype": "text/x-python", 317 | "name": "python", 318 | "nbconvert_exporter": "python", 319 | "pygments_lexer": "ipython3", 320 | "version": "3.8.5" 321 | } 322 | }, 323 | "nbformat": 4, 324 | "nbformat_minor": 4 325 | } 326 | -------------------------------------------------------------------------------- /code/model_depth_3.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | import skimage.io as io 4 | import skimage.transform as trans 5 | import numpy as np 6 | from tensorflow.keras.models import * 7 | from tensorflow.keras.layers import * 8 | from tensorflow.keras.optimizers import * 9 | from tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler 10 | from tensorflow.keras import backend as K 11 | import tensorflow as tf 12 | 13 | 14 | def dice_coef(y_true, y_pred, smooth=1): 15 | intersection = K.sum(y_true * y_pred, axis=[1,2,3]) 16 | union = K.sum(y_true, axis=[1,2,3]) + K.sum(y_pred, axis=[1,2,3]) 17 | return K.mean( (2. * intersection + smooth) / (union + smooth), axis=0) 18 | 19 | 20 | 21 | def dice_coef_loss(y_true, y_pred): 22 | return -dice_coef(y_true, y_pred) 23 | 24 | 25 | 26 | 27 | def unet(pretrained_weights = None,input_size=(512,512,1), n_class=3): 28 | inputs = tf.keras.Input(shape=input_size) 29 | conv1 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(inputs) 30 | conv1 = BatchNormalization()(conv1) 31 | conv1 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1) 32 | conv1 = BatchNormalization()(conv1) 33 | pool1 = MaxPooling2D(pool_size=(2, 2))(conv1) 34 | conv2 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1) 35 | conv2 = BatchNormalization()(conv2) 36 | conv2 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2) 37 | conv2 = BatchNormalization()(conv2) 38 | pool2 = MaxPooling2D(pool_size=(2, 2))(conv2) 39 | conv3 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2) 40 | conv3 = BatchNormalization()(conv3) 41 | conv3 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3) 42 | conv3 = BatchNormalization()(conv3) 43 | pool3 = MaxPooling2D(pool_size=(2, 2))(conv3) 44 | 45 | conv4 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3) 46 | conv4 = BatchNormalization()(conv4) 47 | conv4 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4) 48 | conv4 = BatchNormalization()(conv4) 49 | drop4 = Dropout(0.5)(conv4) 50 | 51 | 52 | conv5 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(drop4) 53 | conv5 = BatchNormalization()(conv5) 54 | conv5 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5) 55 | conv5 = BatchNormalization()(conv5) 56 | drop5 = Dropout(0.5)(conv5) 57 | 58 | 59 | up6 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop5)) 60 | merge6 = concatenate([conv3,up6], axis = 3) 61 | conv6 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6) 62 | conv6 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6) 63 | 64 | 65 | up7 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6)) 66 | merge7 = concatenate([conv2,up7], axis = 3) 67 | conv7 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7) 68 | conv7 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7) 69 | 70 | 71 | up8 = Conv2D(32, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7)) 72 | merge8 = concatenate([conv1,up8], axis = 3) 73 | conv8 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8) 74 | conv8 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8) 75 | conv9 = Conv2D(n_class, 1, activation = 'softmax')(conv8) 76 | 77 | model = tf.keras.Model(inputs = inputs, outputs = conv9) 78 | 79 | model.compile(optimizer = Adam(lr = 0.0001), loss = ['binary_crossentropy'], metrics = ['accuracy']) 80 | #model.compile(optimizer = Adam(lr = 0.0001), loss = [dice_coef_loss], metrics = [dice_coef]) 81 | if(pretrained_weights): 82 | model.load_weights(pretrained_weights) 83 | 84 | return model 85 | -------------------------------------------------------------------------------- /code/model_depth_4.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | import skimage.io as io 4 | import skimage.transform as trans 5 | import numpy as np 6 | from tensorflow.keras.models import * 7 | from tensorflow.keras.layers import * 8 | from tensorflow.keras.optimizers import * 9 | from tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler 10 | from tensorflow.keras import backend as K 11 | import tensorflow as tf 12 | 13 | 14 | def dice_coef(y_true, y_pred, smooth=1): 15 | intersection = K.sum(y_true * y_pred, axis=[1,2,3]) 16 | union = K.sum(y_true, axis=[1,2,3]) + K.sum(y_pred, axis=[1,2,3]) 17 | return K.mean( (2. * intersection + smooth) / (union + smooth), axis=0) 18 | 19 | 20 | 21 | def dice_coef_loss(y_true, y_pred): 22 | return -dice_coef(y_true, y_pred) 23 | 24 | 25 | 26 | def unet(pretrained_weights = None,input_size=(256,256,1), n_class=3): 27 | 28 | inputs = tf.keras.Input(shape=input_size) 29 | conv1 = Conv2D(64, 3, activation = 'relu', dilation_rate=2,padding = 'same', kernel_initializer = 'he_normal')(inputs) 30 | conv1 = BatchNormalization()(conv1) 31 | conv1 = Conv2D(64, 3, activation = 'relu', dilation_rate=2,padding = 'same', kernel_initializer = 'he_normal')(conv1) 32 | conv1 = BatchNormalization()(conv1) 33 | pool1 = MaxPooling2D(pool_size=(2, 2))(conv1) 34 | conv2 = Conv2D(128, 3, activation = 'relu', dilation_rate=2,padding = 'same', kernel_initializer = 'he_normal')(pool1) 35 | conv2 = BatchNormalization()(conv2) 36 | conv2 = Conv2D(128, 3, activation = 'relu', dilation_rate=2, padding = 'same', kernel_initializer = 'he_normal')(conv2) 37 | conv2 = BatchNormalization()(conv2) 38 | pool2 = MaxPooling2D(pool_size=(2, 2))(conv2) 39 | conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2) 40 | conv3 = BatchNormalization()(conv3) 41 | conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3) 42 | conv3 = BatchNormalization()(conv3) 43 | pool3 = MaxPooling2D(pool_size=(2, 2))(conv3) 44 | conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3) 45 | conv4 = BatchNormalization()(conv4) 46 | conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4) 47 | conv4 = BatchNormalization()(conv4) 48 | drop4 = Dropout(0.5)(conv4, training=True) 49 | pool4 = MaxPooling2D(pool_size=(2, 2))(drop4) 50 | 51 | conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4) 52 | conv5 = BatchNormalization()(conv5) 53 | conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5) 54 | conv5 = BatchNormalization()(conv5) 55 | drop5 = Dropout(0.5)(conv5, training=True) 56 | 57 | up6 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop5)) 58 | merge6 = concatenate([drop4,up6], axis = 3) 59 | conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6) 60 | conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6) 61 | 62 | 63 | up7 = Conv2D(256, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6)) 64 | merge7 = concatenate([conv3,up7], axis = 3) 65 | conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7) 66 | conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7) 67 | 68 | 69 | up8 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7)) 70 | merge8 = concatenate([conv2,up8], axis = 3) 71 | conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8) 72 | conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8) 73 | 74 | 75 | up9 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv8)) 76 | merge9 = concatenate([conv1,up9], axis = 3) 77 | conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge9) 78 | conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9) 79 | 80 | conv10 = Conv2D(n_class, (1,1), activation = 'softmax')(conv9) 81 | 82 | 83 | model = tf.keras.Model(inputs = inputs, outputs = conv10) 84 | 85 | model.compile(optimizer = Adam(lr = 0.0001), loss = ['binary_crossentropy'], metrics = ['accuracy']) 86 | #model.compile(optimizer = Adam(lr = 0.0001), loss = [dice_coef_loss], metrics = [dice_coef]) 87 | 88 | 89 | if(pretrained_weights): 90 | model=keras.models.load_model(pretrained_weights) 91 | 92 | return model 93 | -------------------------------------------------------------------------------- /images/Result.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/images/Result.png -------------------------------------------------------------------------------- /images/tensorboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sohiniroych/U-net-for-Multi-class-semantic-segmentation/9387a6080a93c9da7cb8b51f00b56a34fed434c0/images/tensorboard.png --------------------------------------------------------------------------------