├── U-Net ├── Acc.png ├── Loss.png ├── output │ ├── im02001.jpg │ ├── im02002.jpg │ ├── image (3).jpg │ ├── image (13).jpg │ └── image (14).jpg ├── __pycache__ │ ├── data.cpython-38.pyc │ └── model.cpython-38.pyc ├── dataset │ ├── train │ │ ├── image │ │ │ ├── im00001.jpg │ │ │ ├── im00002.jpg │ │ │ ├── im00003.jpg │ │ │ ├── im00004.jpg │ │ │ ├── im00005.jpg │ │ │ ├── im00006.jpg │ │ │ ├── im00007.jpg │ │ │ ├── im00008.jpg │ │ │ ├── im00009.jpg │ │ │ └── im00010.jpg │ │ └── mask │ │ │ ├── im00001_gt.png │ │ │ ├── im00002_gt.png │ │ │ ├── im00003_gt.png │ │ │ ├── im00004_gt.png │ │ │ ├── im00005_gt.png │ │ │ ├── im00006_gt.png │ │ │ ├── im00007_gt.png │ │ │ ├── im00008_gt.png │ │ │ ├── im00009_gt.png │ │ │ └── im00010_gt.png │ └── validation │ │ ├── image │ │ ├── im00001.jpg │ │ ├── im00002.jpg │ │ ├── im00003.jpg │ │ ├── im00004.jpg │ │ ├── im00005.jpg │ │ ├── im00006.jpg │ │ ├── im00007.jpg │ │ ├── im00008.jpg │ │ ├── im00009.jpg │ │ └── im00010.jpg │ │ └── mask │ │ ├── im00001_gt.png │ │ ├── im00002_gt.png │ │ ├── im00003_gt.png │ │ ├── im00004_gt.png │ │ ├── im00005_gt.png │ │ ├── im00006_gt.png │ │ ├── im00007_gt.png │ │ ├── im00008_gt.png │ │ ├── im00009_gt.png │ │ └── im00010_gt.png ├── test.py ├── train.py ├── model.py └── data.py ├── FCN ├── training loss.npy ├── output │ ├── im02001.jpg │ ├── im02002.jpg │ ├── image (13).jpg │ ├── image (14).jpg │ └── image (3).jpg ├── validation loss.npy ├── __pycache__ │ ├── FCN.cpython-38.pyc │ ├── onehot.cpython-38.pyc │ ├── data_train.cpython-38.pyc │ └── data_val.cpython-38.pyc ├── dataset │ ├── train │ │ ├── image │ │ │ ├── im00001.jpg │ │ │ ├── im00002.jpg │ │ │ ├── im00003.jpg │ │ │ ├── im00004.jpg │ │ │ ├── im00005.jpg │ │ │ ├── im00006.jpg │ │ │ ├── im00007.jpg │ │ │ ├── im00008.jpg │ │ │ ├── im00009.jpg │ │ │ └── im00010.jpg │ │ └── mask │ │ │ ├── im00001_gt.png │ │ │ ├── im00002_gt.png │ │ │ ├── im00003_gt.png │ │ │ ├── im00004_gt.png │ │ │ ├── im00005_gt.png │ │ │ ├── im00006_gt.png │ │ │ ├── im00007_gt.png │ │ │ ├── im00008_gt.png │ │ │ ├── im00009_gt.png │ │ │ └── im00010_gt.png │ └── validation │ │ ├── image │ │ ├── im00001.jpg │ │ ├── im00002.jpg │ │ ├── im00003.jpg │ │ ├── im00004.jpg │ │ ├── im00005.jpg │ │ ├── im00006.jpg │ │ ├── im00007.jpg │ │ ├── im00008.jpg │ │ ├── im00009.jpg │ │ └── im00010.jpg │ │ └── mask │ │ ├── im00001_gt.png │ │ ├── im00002_gt.png │ │ ├── im00003_gt.png │ │ ├── im00004_gt.png │ │ ├── im00005_gt.png │ │ ├── im00006_gt.png │ │ ├── im00007_gt.png │ │ ├── im00008_gt.png │ │ ├── im00009_gt.png │ │ └── im00010_gt.png ├── onehot.py ├── inference.py ├── data_val.py ├── data_train.py ├── prediction.py └── FCN.py ├── examples ├── pipeline.png └── results examples 1.png ├── test_images ├── im02001.jpg ├── im02002.jpg ├── image (13).jpg ├── image (14).jpg └── image (3).jpg ├── color augmentation ├── color_augmentation.png └── HSV_converter.py └── README.md /U-Net/Acc.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/U-Net/Acc.png -------------------------------------------------------------------------------- /U-Net/Loss.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/U-Net/Loss.png -------------------------------------------------------------------------------- /FCN/training loss.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/FCN/training loss.npy -------------------------------------------------------------------------------- /examples/pipeline.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/examples/pipeline.png -------------------------------------------------------------------------------- /FCN/output/im02001.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/FCN/output/im02001.jpg -------------------------------------------------------------------------------- /FCN/output/im02002.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/FCN/output/im02002.jpg -------------------------------------------------------------------------------- /FCN/validation loss.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/FCN/validation loss.npy -------------------------------------------------------------------------------- /test_images/im02001.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/test_images/im02001.jpg -------------------------------------------------------------------------------- /test_images/im02002.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/test_images/im02002.jpg -------------------------------------------------------------------------------- /FCN/output/image (13).jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/FCN/output/image (13).jpg -------------------------------------------------------------------------------- /FCN/output/image (14).jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/FCN/output/image (14).jpg -------------------------------------------------------------------------------- /FCN/output/image (3).jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/FCN/output/image (3).jpg -------------------------------------------------------------------------------- /U-Net/output/im02001.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/U-Net/output/im02001.jpg -------------------------------------------------------------------------------- /U-Net/output/im02002.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/U-Net/output/im02002.jpg -------------------------------------------------------------------------------- /U-Net/output/image (3).jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/U-Net/output/image (3).jpg -------------------------------------------------------------------------------- /test_images/image (13).jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/test_images/image (13).jpg -------------------------------------------------------------------------------- /test_images/image (14).jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/test_images/image (14).jpg -------------------------------------------------------------------------------- /test_images/image (3).jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/test_images/image (3).jpg -------------------------------------------------------------------------------- /U-Net/output/image (13).jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/U-Net/output/image (13).jpg -------------------------------------------------------------------------------- /U-Net/output/image (14).jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/U-Net/output/image (14).jpg -------------------------------------------------------------------------------- /examples/results examples 1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/examples/results examples 1.png -------------------------------------------------------------------------------- /FCN/__pycache__/FCN.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/FCN/__pycache__/FCN.cpython-38.pyc -------------------------------------------------------------------------------- /FCN/dataset/train/image/im00001.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/FCN/dataset/train/image/im00001.jpg -------------------------------------------------------------------------------- /FCN/dataset/train/image/im00002.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/FCN/dataset/train/image/im00002.jpg -------------------------------------------------------------------------------- /FCN/dataset/train/image/im00003.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/FCN/dataset/train/image/im00003.jpg -------------------------------------------------------------------------------- /FCN/dataset/train/image/im00004.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/FCN/dataset/train/image/im00004.jpg -------------------------------------------------------------------------------- /FCN/dataset/train/image/im00005.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/FCN/dataset/train/image/im00005.jpg -------------------------------------------------------------------------------- /FCN/dataset/train/image/im00006.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/FCN/dataset/train/image/im00006.jpg -------------------------------------------------------------------------------- /FCN/dataset/train/image/im00007.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/FCN/dataset/train/image/im00007.jpg -------------------------------------------------------------------------------- /FCN/dataset/train/image/im00008.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/FCN/dataset/train/image/im00008.jpg -------------------------------------------------------------------------------- /FCN/dataset/train/image/im00009.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/FCN/dataset/train/image/im00009.jpg -------------------------------------------------------------------------------- /FCN/dataset/train/image/im00010.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/FCN/dataset/train/image/im00010.jpg -------------------------------------------------------------------------------- /FCN/__pycache__/onehot.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/FCN/__pycache__/onehot.cpython-38.pyc -------------------------------------------------------------------------------- /FCN/dataset/train/mask/im00001_gt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/FCN/dataset/train/mask/im00001_gt.png -------------------------------------------------------------------------------- /FCN/dataset/train/mask/im00002_gt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/FCN/dataset/train/mask/im00002_gt.png -------------------------------------------------------------------------------- /FCN/dataset/train/mask/im00003_gt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/FCN/dataset/train/mask/im00003_gt.png -------------------------------------------------------------------------------- /FCN/dataset/train/mask/im00004_gt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/FCN/dataset/train/mask/im00004_gt.png -------------------------------------------------------------------------------- /FCN/dataset/train/mask/im00005_gt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/FCN/dataset/train/mask/im00005_gt.png -------------------------------------------------------------------------------- /FCN/dataset/train/mask/im00006_gt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/FCN/dataset/train/mask/im00006_gt.png -------------------------------------------------------------------------------- /FCN/dataset/train/mask/im00007_gt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/FCN/dataset/train/mask/im00007_gt.png -------------------------------------------------------------------------------- /FCN/dataset/train/mask/im00008_gt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/FCN/dataset/train/mask/im00008_gt.png -------------------------------------------------------------------------------- /FCN/dataset/train/mask/im00009_gt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/FCN/dataset/train/mask/im00009_gt.png -------------------------------------------------------------------------------- /FCN/dataset/train/mask/im00010_gt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/FCN/dataset/train/mask/im00010_gt.png -------------------------------------------------------------------------------- /U-Net/__pycache__/data.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/U-Net/__pycache__/data.cpython-38.pyc -------------------------------------------------------------------------------- /U-Net/__pycache__/model.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/U-Net/__pycache__/model.cpython-38.pyc -------------------------------------------------------------------------------- /U-Net/dataset/train/image/im00001.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/U-Net/dataset/train/image/im00001.jpg -------------------------------------------------------------------------------- /U-Net/dataset/train/image/im00002.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/U-Net/dataset/train/image/im00002.jpg -------------------------------------------------------------------------------- /U-Net/dataset/train/image/im00003.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/U-Net/dataset/train/image/im00003.jpg -------------------------------------------------------------------------------- /U-Net/dataset/train/image/im00004.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/U-Net/dataset/train/image/im00004.jpg -------------------------------------------------------------------------------- /U-Net/dataset/train/image/im00005.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/U-Net/dataset/train/image/im00005.jpg -------------------------------------------------------------------------------- /U-Net/dataset/train/image/im00006.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/U-Net/dataset/train/image/im00006.jpg -------------------------------------------------------------------------------- /U-Net/dataset/train/image/im00007.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/U-Net/dataset/train/image/im00007.jpg -------------------------------------------------------------------------------- /U-Net/dataset/train/image/im00008.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/U-Net/dataset/train/image/im00008.jpg -------------------------------------------------------------------------------- /U-Net/dataset/train/image/im00009.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/U-Net/dataset/train/image/im00009.jpg -------------------------------------------------------------------------------- /U-Net/dataset/train/image/im00010.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/U-Net/dataset/train/image/im00010.jpg -------------------------------------------------------------------------------- /FCN/__pycache__/data_train.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/FCN/__pycache__/data_train.cpython-38.pyc -------------------------------------------------------------------------------- /FCN/__pycache__/data_val.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/FCN/__pycache__/data_val.cpython-38.pyc -------------------------------------------------------------------------------- /FCN/dataset/validation/image/im00001.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/FCN/dataset/validation/image/im00001.jpg -------------------------------------------------------------------------------- /FCN/dataset/validation/image/im00002.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/FCN/dataset/validation/image/im00002.jpg -------------------------------------------------------------------------------- /FCN/dataset/validation/image/im00003.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/FCN/dataset/validation/image/im00003.jpg -------------------------------------------------------------------------------- /FCN/dataset/validation/image/im00004.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/FCN/dataset/validation/image/im00004.jpg -------------------------------------------------------------------------------- /FCN/dataset/validation/image/im00005.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/FCN/dataset/validation/image/im00005.jpg -------------------------------------------------------------------------------- /FCN/dataset/validation/image/im00006.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/FCN/dataset/validation/image/im00006.jpg -------------------------------------------------------------------------------- /FCN/dataset/validation/image/im00007.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/FCN/dataset/validation/image/im00007.jpg -------------------------------------------------------------------------------- /FCN/dataset/validation/image/im00008.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/FCN/dataset/validation/image/im00008.jpg -------------------------------------------------------------------------------- /FCN/dataset/validation/image/im00009.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/FCN/dataset/validation/image/im00009.jpg -------------------------------------------------------------------------------- /FCN/dataset/validation/image/im00010.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/FCN/dataset/validation/image/im00010.jpg -------------------------------------------------------------------------------- /U-Net/dataset/train/mask/im00001_gt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/U-Net/dataset/train/mask/im00001_gt.png -------------------------------------------------------------------------------- /U-Net/dataset/train/mask/im00002_gt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/U-Net/dataset/train/mask/im00002_gt.png -------------------------------------------------------------------------------- /U-Net/dataset/train/mask/im00003_gt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/U-Net/dataset/train/mask/im00003_gt.png -------------------------------------------------------------------------------- /U-Net/dataset/train/mask/im00004_gt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/U-Net/dataset/train/mask/im00004_gt.png -------------------------------------------------------------------------------- /U-Net/dataset/train/mask/im00005_gt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/U-Net/dataset/train/mask/im00005_gt.png -------------------------------------------------------------------------------- /U-Net/dataset/train/mask/im00006_gt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/U-Net/dataset/train/mask/im00006_gt.png -------------------------------------------------------------------------------- /U-Net/dataset/train/mask/im00007_gt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/U-Net/dataset/train/mask/im00007_gt.png -------------------------------------------------------------------------------- /U-Net/dataset/train/mask/im00008_gt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/U-Net/dataset/train/mask/im00008_gt.png -------------------------------------------------------------------------------- /U-Net/dataset/train/mask/im00009_gt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/U-Net/dataset/train/mask/im00009_gt.png -------------------------------------------------------------------------------- /U-Net/dataset/train/mask/im00010_gt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/U-Net/dataset/train/mask/im00010_gt.png -------------------------------------------------------------------------------- /color augmentation/color_augmentation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/color augmentation/color_augmentation.png -------------------------------------------------------------------------------- /FCN/dataset/validation/mask/im00001_gt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/FCN/dataset/validation/mask/im00001_gt.png -------------------------------------------------------------------------------- /FCN/dataset/validation/mask/im00002_gt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/FCN/dataset/validation/mask/im00002_gt.png -------------------------------------------------------------------------------- /FCN/dataset/validation/mask/im00003_gt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/FCN/dataset/validation/mask/im00003_gt.png -------------------------------------------------------------------------------- /FCN/dataset/validation/mask/im00004_gt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/FCN/dataset/validation/mask/im00004_gt.png -------------------------------------------------------------------------------- /FCN/dataset/validation/mask/im00005_gt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/FCN/dataset/validation/mask/im00005_gt.png -------------------------------------------------------------------------------- /FCN/dataset/validation/mask/im00006_gt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/FCN/dataset/validation/mask/im00006_gt.png -------------------------------------------------------------------------------- /FCN/dataset/validation/mask/im00007_gt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/FCN/dataset/validation/mask/im00007_gt.png -------------------------------------------------------------------------------- /FCN/dataset/validation/mask/im00008_gt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/FCN/dataset/validation/mask/im00008_gt.png -------------------------------------------------------------------------------- /FCN/dataset/validation/mask/im00009_gt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/FCN/dataset/validation/mask/im00009_gt.png -------------------------------------------------------------------------------- /FCN/dataset/validation/mask/im00010_gt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/FCN/dataset/validation/mask/im00010_gt.png -------------------------------------------------------------------------------- /U-Net/dataset/validation/image/im00001.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/U-Net/dataset/validation/image/im00001.jpg -------------------------------------------------------------------------------- /U-Net/dataset/validation/image/im00002.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/U-Net/dataset/validation/image/im00002.jpg -------------------------------------------------------------------------------- /U-Net/dataset/validation/image/im00003.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/U-Net/dataset/validation/image/im00003.jpg -------------------------------------------------------------------------------- /U-Net/dataset/validation/image/im00004.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/U-Net/dataset/validation/image/im00004.jpg -------------------------------------------------------------------------------- /U-Net/dataset/validation/image/im00005.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/U-Net/dataset/validation/image/im00005.jpg -------------------------------------------------------------------------------- /U-Net/dataset/validation/image/im00006.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/U-Net/dataset/validation/image/im00006.jpg -------------------------------------------------------------------------------- /U-Net/dataset/validation/image/im00007.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/U-Net/dataset/validation/image/im00007.jpg -------------------------------------------------------------------------------- /U-Net/dataset/validation/image/im00008.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/U-Net/dataset/validation/image/im00008.jpg -------------------------------------------------------------------------------- /U-Net/dataset/validation/image/im00009.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/U-Net/dataset/validation/image/im00009.jpg -------------------------------------------------------------------------------- /U-Net/dataset/validation/image/im00010.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/U-Net/dataset/validation/image/im00010.jpg -------------------------------------------------------------------------------- /U-Net/dataset/validation/mask/im00001_gt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/U-Net/dataset/validation/mask/im00001_gt.png -------------------------------------------------------------------------------- /U-Net/dataset/validation/mask/im00002_gt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/U-Net/dataset/validation/mask/im00002_gt.png -------------------------------------------------------------------------------- /U-Net/dataset/validation/mask/im00003_gt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/U-Net/dataset/validation/mask/im00003_gt.png -------------------------------------------------------------------------------- /U-Net/dataset/validation/mask/im00004_gt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/U-Net/dataset/validation/mask/im00004_gt.png -------------------------------------------------------------------------------- /U-Net/dataset/validation/mask/im00005_gt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/U-Net/dataset/validation/mask/im00005_gt.png -------------------------------------------------------------------------------- /U-Net/dataset/validation/mask/im00006_gt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/U-Net/dataset/validation/mask/im00006_gt.png -------------------------------------------------------------------------------- /U-Net/dataset/validation/mask/im00007_gt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/U-Net/dataset/validation/mask/im00007_gt.png -------------------------------------------------------------------------------- /U-Net/dataset/validation/mask/im00008_gt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/U-Net/dataset/validation/mask/im00008_gt.png -------------------------------------------------------------------------------- /U-Net/dataset/validation/mask/im00009_gt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/U-Net/dataset/validation/mask/im00009_gt.png -------------------------------------------------------------------------------- /U-Net/dataset/validation/mask/im00010_gt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanXuMartin/Color-Invariant-Skin-Segmentation/HEAD/U-Net/dataset/validation/mask/im00010_gt.png -------------------------------------------------------------------------------- /FCN/onehot.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def onehot(data, n): 5 | buf = np.zeros(data.shape + (n, )) 6 | nmsk = np.arange(data.size)*n + data.ravel() 7 | buf.ravel()[nmsk-1] = 1 8 | return buf 9 | 10 | 11 | 12 | -------------------------------------------------------------------------------- /FCN/inference.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.autograd import Variable 3 | import numpy as np 4 | from FCN import FCNs 5 | from FCN import VGGNet 6 | 7 | 8 | 9 | # the input to the model is of shape N*3*160*160 tensor, N presents the number of samples 10 | x = np.random.rand(1, 3, 160, 160) 11 | x = Variable(torch.FloatTensor(x)) 12 | model = torch.load('checkpoints/****.pt') 13 | model = model.cpu() 14 | # y is the output, of shape N*2*160*160, 2 present the class, [1 0] for background [0 1] for handbag 15 | y = model(x) 16 | 17 | 18 | 19 | 20 | -------------------------------------------------------------------------------- /U-Net/test.py: -------------------------------------------------------------------------------- 1 | from model import * 2 | from data import * 3 | import os 4 | import keras 5 | import matplotlib.pyplot as plt 6 | import numpy as np 7 | import tensorflow as tf 8 | import shutil 9 | import time 10 | 11 | 12 | 13 | time_start=time.time() 14 | os.environ["CUDA_VISIBLE_DEVICES"] = "0" 15 | 16 | # ----------Setup your test file and output file---------- 17 | test_file = 'C:/Users/xuhan/Desktop/ColorSpace_github/test_images' 18 | model = keras.models.load_model('checkpoints/pretrained.hdf5') #load your model 19 | output_file = './output' #your output file 20 | test_names = os.listdir(test_file) 21 | test_size = len(test_names) 22 | testGene = testGenerator(test_file, test_size) 23 | 24 | results = model.predict( 25 | testGene, batch_size=None, verbose=0, steps=test_size, callbacks=None, max_queue_size=10, 26 | workers=1, use_multiprocessing=False) 27 | 28 | if not os.path.exists(output_file): 29 | os.makedirs(output_file) 30 | 31 | saveResult(output_file,test_names,results) 32 | 33 | image_names = os.listdir(test_file) 34 | 35 | print('Binary-Size converter working') 36 | for image_name in tqdm(image_names): 37 | 38 | image = cv2.imread(os.path.join(test_file,image_name)) 39 | prediction = cv2.imread(os.path.join(output_file,image_name),0) 40 | size = (image.shape[1],image.shape[0]) 41 | prediction = cv2.resize(prediction,size) 42 | ret, prediction = cv2.threshold(prediction, 127, 255, cv2.THRESH_BINARY) 43 | cv2.imwrite(os.path.join(output_file,image_name), prediction) 44 | 45 | 46 | 47 | 48 | 49 | 50 | -------------------------------------------------------------------------------- /FCN/data_val.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | from torch.utils.data import Dataset, DataLoader 3 | from torchvision import transforms 4 | import os 5 | import cv2 6 | import pdb 7 | from onehot import onehot 8 | import torch 9 | import matplotlib.pyplot as plt 10 | 11 | transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) 12 | 13 | class Dataset(Dataset): 14 | 15 | def __init__(self, transform=None): 16 | self.transform = transform 17 | self.train_file = 'dataset/validation/image' 18 | self.mask_file = 'dataset/validation/mask' 19 | def __len__(self): 20 | return len(os.listdir(self.train_file)) 21 | 22 | def __getitem__(self, idx): 23 | img_name = os.listdir(self.train_file) 24 | img_name = img_name[idx] 25 | imgA = cv2.imread(os.path.join(self.train_file, img_name)) 26 | # print(img_name) 27 | # plt.imshow(imgA) 28 | imgA = cv2.resize(imgA, (160, 160)) 29 | imgB = cv2.imread(self.mask_file+'/'+img_name[0:7]+'_gt.png', 0) 30 | imgB = cv2.resize(imgB, (160, 160)) 31 | imgB = imgB/255 32 | imgB = imgB.astype('uint8') 33 | imgB = onehot(imgB, 2) 34 | imgB = imgB.swapaxes(0, 2).swapaxes(1, 2) 35 | imgB = torch.FloatTensor(imgB) 36 | #print(imgB.shape) 37 | if self.transform: 38 | imgA = self.transform(imgA) 39 | item = {'A':imgA, 'B':imgB} 40 | return item 41 | 42 | validation_data = Dataset(transform) 43 | dataloader_val = DataLoader(validation_data, batch_size=4, shuffle=True, num_workers=4) 44 | if __name__ =='__main__': 45 | for batch in dataloader: 46 | print(len(dataloader)) 47 | break 48 | 49 | 50 | 51 | 52 | 53 | 54 | -------------------------------------------------------------------------------- /FCN/data_train.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | from torch.utils.data import Dataset, DataLoader 3 | from torchvision import transforms 4 | import os 5 | import cv2 6 | import pdb 7 | from onehot import onehot 8 | import torch 9 | import matplotlib.pyplot as plt 10 | 11 | transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) 12 | 13 | class Dataset(Dataset): 14 | 15 | def __init__(self, transform=None): 16 | self.transform = transform 17 | self.train_file = 'dataset/train/image' 18 | self.mask_file = 'dataset/train/mask' 19 | def __len__(self): 20 | return len(os.listdir(self.train_file)) 21 | 22 | def __getitem__(self, idx): 23 | img_name = os.listdir(self.train_file) 24 | img_name = img_name[idx] 25 | imgA = cv2.imread(os.path.join(self.train_file, img_name)) 26 | # print(img_name) 27 | # plt.imshow(imgA) 28 | imgA = cv2.resize(imgA, (160, 160)) 29 | imgB = cv2.imread(self.mask_file+'/'+img_name[0:7]+'_gt.png', 0) #change to your mask format 30 | imgB = cv2.resize(imgB, (160, 160)) 31 | imgB = imgB/255 32 | imgB = imgB.astype('uint8') 33 | imgB = onehot(imgB, 2) 34 | imgB = imgB.swapaxes(0, 2).swapaxes(1, 2) 35 | imgB = torch.FloatTensor(imgB) 36 | #print(imgB.shape) 37 | if self.transform: 38 | imgA = self.transform(imgA) 39 | item = {'A':imgA, 'B':imgB} 40 | return item 41 | 42 | training_data = Dataset(transform) 43 | dataloader = DataLoader(training_data, batch_size=4, shuffle=True, num_workers=4) 44 | if __name__ =='__main__': 45 | for batch in dataloader: 46 | print(len(dataloader)) 47 | break 48 | 49 | 50 | 51 | 52 | 53 | 54 | -------------------------------------------------------------------------------- /FCN/prediction.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | import torch 4 | from torchvision import transforms 5 | import os 6 | import cv2 7 | from FCN import FCN8s, FCN16s, FCN32s, FCNs, VGGNet 8 | from tqdm import tqdm 9 | 10 | # before 06 after 02 11 | 12 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') 13 | model = torch.load('checkpoints/fcn_model_2.pt') #Load the model 14 | model = model.to(device) 15 | transform = transforms.Compose([ 16 | transforms.ToTensor(), 17 | transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) 18 | 19 | 20 | if __name__ =='__main__': 21 | img_file = 'C:/Users/xuhan/Desktop/ColorSpace_github/test_images' #your testing file 22 | 23 | img_names = os.listdir(img_file) 24 | 25 | save_file = './output' 26 | if not os.path.exists(save_file): 27 | os.makedirs(save_file) 28 | for img_name in tqdm(img_names): 29 | imgA = cv2.imread(os.path.join(img_file,img_name)) 30 | size = imgA.shape 31 | 32 | imgA = cv2.resize(imgA, (160, 160)) 33 | 34 | imgA = transform(imgA) 35 | imgA = imgA.to(device) 36 | imgA = imgA.unsqueeze(0) 37 | output = model(imgA) 38 | output = torch.sigmoid(output) 39 | output_np = output.cpu().detach().numpy().copy() 40 | output_np = np.squeeze(output_np)*255 41 | 42 | 43 | output_npA = output_np[0] 44 | output_npB = output_np[1] 45 | 46 | 47 | output_sigmoid = output_npA/(output_npA+output_npB)*255 48 | 49 | cv2.imwrite(os.path.join(save_file,img_name), output_sigmoid) 50 | 51 | 52 | output = cv2.imread(os.path.join(save_file,img_name),0) 53 | output = cv2.resize(output,(size[1],size[0])) 54 | ret,output = cv2.threshold(output,127,255,cv2.THRESH_BINARY) 55 | 56 | cv2.imwrite(os.path.join(save_file,img_name), output) 57 | 58 | # break 59 | 60 | -------------------------------------------------------------------------------- /U-Net/train.py: -------------------------------------------------------------------------------- 1 | from model import * 2 | from data import * 3 | import os 4 | import keras 5 | import matplotlib.pyplot as plt 6 | import numpy as np 7 | import tensorflow as tf 8 | import shutil 9 | import time 10 | 11 | time_start=time.time() 12 | # tf.compat.v1.disable_eager_execution() 13 | os.environ["CUDA_VISIBLE_DEVICES"] = "0" 14 | 15 | # ----------Shape Augmentation---------- 16 | data_gen_args = dict(rotation_range=0.2, 17 | width_shift_range=0.05, 18 | height_shift_range=0.05, 19 | shear_range=0.05, 20 | zoom_range=0.05, 21 | horizontal_flip=True, 22 | fill_mode='nearest') 23 | 24 | # ----------Training setup---------- 25 | batch_size = 8 26 | train_size = len(os.listdir('./dataset/train/'+'image')) 27 | val_size = len(os.listdir('./dataset/validation/'+'image')) 28 | model_save_file = './checkpoints' 29 | if not os.path.exists(model_save_file): 30 | os.makedirs(model_save_file) 31 | print(train_size,val_size) 32 | trainGene = trainGenerator(batch_size,'./dataset/train','image','mask',data_gen_args,save_to_dir = None) 33 | validGene = validGenerator(batch_size,'./dataset/validation','image','mask',save_to_dir = None) # Validation 34 | model = unet() 35 | model_checkpoint = ModelCheckpoint(model_save_file+'/unet_ECU-{epoch:02d}.hdf5', monitor='loss',verbose=1) 36 | epochs = 10 37 | 38 | 39 | 40 | history = model.fit_generator( 41 | trainGene, 42 | validation_data=validGene, 43 | validation_steps=val_size/batch_size, #tatalvalidationset/batchsize 44 | steps_per_epoch=train_size/batch_size, #totaltrainset/batchsize 45 | epochs=epochs, 46 | verbose=2, 47 | shuffle=True, 48 | callbacks=[model_checkpoint]) 49 | 50 | imagename = 'Loss' 51 | # print(a) 52 | y1 = history.history['loss'] 53 | y2 = history.history['val_loss'] 54 | # print(y) 55 | x = np.array(range(epochs)) 56 | # print(x.shape) 57 | plt.plot(x,y1, label = 'trainingloss') 58 | plt.plot(x,y2, label = 'validloss') 59 | plt.xlabel('Epoch') 60 | plt.ylabel('Loss') 61 | plt.legend() 62 | plt.savefig('Loss'+'.png') 63 | plt.show() 64 | 65 | imagename = 'Accuracy' 66 | # print(a) 67 | y1 = history.history['accuracy'] 68 | y2 = history.history['val_accuracy'] 69 | # print(y) 70 | x = np.array(range(epochs)) 71 | # print(x.shape) 72 | plt.plot(x,y1, label = 'trainingaccuracy') 73 | plt.plot(x,y2, label = 'validaccuracy') 74 | plt.xlabel('Epoch') 75 | plt.ylabel('Acc') 76 | plt.legend() 77 | plt.savefig('Acc'+'.png') 78 | plt.show() 79 | 80 | -------------------------------------------------------------------------------- /U-Net/model.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | import skimage.io as io 4 | from skimage import img_as_ubyte 5 | import skimage.transform as trans 6 | import numpy as np 7 | from keras.models import * 8 | from keras.layers import * 9 | from keras.optimizers import * 10 | from keras.callbacks import ModelCheckpoint, LearningRateScheduler 11 | from keras import backend as keras 12 | from PIL import ImageFile 13 | ImageFile.LOAD_TRUNCATED_IMAGES = True 14 | 15 | def unet(pretrained_weights = None,input_size = (256,256,3)):#(256,256,1) 16 | inputs = Input(input_size) 17 | conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(inputs) 18 | conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1) 19 | pool1 = MaxPooling2D(pool_size=(2, 2))(conv1) 20 | conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1) 21 | conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2) 22 | pool2 = MaxPooling2D(pool_size=(2, 2))(conv2) 23 | conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2) 24 | conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3) 25 | pool3 = MaxPooling2D(pool_size=(2, 2))(conv3) 26 | conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3) 27 | conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4) 28 | drop4 = Dropout(0.5)(conv4) 29 | pool4 = MaxPooling2D(pool_size=(2, 2))(drop4) 30 | 31 | conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4) 32 | conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5) 33 | drop5 = Dropout(0.5)(conv5) 34 | 35 | up6 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop5)) 36 | merge6 = concatenate([drop4,up6], axis = 3) 37 | conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6) 38 | conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6) 39 | 40 | up7 = Conv2D(256, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6)) 41 | merge7 = concatenate([conv3,up7], axis = 3) 42 | conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7) 43 | conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7) 44 | 45 | up8 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7)) 46 | merge8 = concatenate([conv2,up8], axis = 3) 47 | conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8) 48 | conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8) 49 | 50 | up9 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv8)) 51 | merge9 = concatenate([conv1,up9], axis = 3) 52 | conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge9) 53 | conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9) 54 | # conv9 = Conv2D(2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9) 55 | conv10 = Conv2D(1, 1, activation = 'sigmoid')(conv9)# gai 56 | # conv10 = Conv2D(3, 1, activation = 'sigmoid')(conv9) 57 | 58 | # model = Model(input = inputs, output = conv10) 59 | model = Model(inputs, conv10) 60 | 61 | model.compile(optimizer = Adam(lr = 1e-4), loss = 'binary_crossentropy', metrics = ['accuracy']) 62 | 63 | #model.summary() 64 | 65 | if(pretrained_weights): 66 | model.load_weights(pretrained_weights) 67 | 68 | return model 69 | 70 | 71 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Color Invariant Skin Segmentation 2 | 3 | This is the implementation of the paper [Color Invariant Skin Segmentation](https://openaccess.thecvf.com/content/CVPR2022W/FaDE-TCV/papers/Xu_Color_Invariant_Skin_Segmentation_CVPRW_2022_paper.pdf) using [FCN](https://github.com/yunlongdong/FCN-pytorch) [1] and [U-Net](https://github.com/zhixuhao/unet) [2]. 4 | 5 | ## Color Space Augmentation 6 | 7 | Images will be augmentated in HSV color space. We change the HSV values of images and enlarge the training set. We used the parameters shown in the image below. 8 | 9 | 10 | ![color augmentation](https://github.com/HanXuMartin/Color-Invariant-Skin-Segmentation/blob/main/color%20augmentation/color_augmentation.png) 11 | 12 | Here is the pipeline using color space augmentation for skin segmentation. 13 | 14 | ![pipeline](https://github.com/HanXuMartin/Color-Invariant-Skin-Segmentation/blob/main/examples/pipeline.png) 15 | 16 | [Here](https://github.com/HanXuMartin/Color-Invariant-Skin-Segmentation/blob/main/color%20augmentation/HSV_converter.py) is one example on how to change the HSV value. 17 | 18 | ## Output 19 | Color space augmentation can help skin segmentation models deal with complex illuminatlion conditions. Below is some examples. Label A means model was trained after (or with) color space augmentation while label B means before (or without) color space augmentation. 20 | ![examples](https://github.com/HanXuMartin/Color-Invariant-Skin-Segmentation/blob/main/examples/results%20examples%201.png) 21 | # How to use 22 | # Code has been rebuild. We recommand this [repo](https://github.com/HanXuMartin/SkinSegmentation) 23 | 24 | ## Requirements 25 | - Python 3.8.5 26 | - PyTorch 27 | - Tensorflow 28 | - Keras 29 | ## Test 30 | 1. Clone the repo: 31 | ``` 32 | git clone https://github.com/HanXuMartin/Color-Invariant-Skin-Segmentation.git 33 | ``` 34 | 2. cd to the local path 35 | ``` 36 | cd Color-Invariant-Skin-Segmentation 37 | ``` 38 | 3. Download the models from [here](https://drive.google.com/drive/folders/1QfoxabLN-UrsLwZjYXqmCYdHUkHxDJsf?usp=sharing) 39 | 40 | 4. For U-Net: Change the model path, testing path and output path in the test.py and then run test.py. 41 | ``` 42 | cd U-Net 43 | python test.py 44 | ``` 45 | 5. For FCN: Change the model path, testing path and output path in the prediction.py and then run prediction.py. 46 | ``` 47 | cd FCN 48 | python prediction.py 49 | ``` 50 | 51 | 52 | ## Train models with your own dataset 53 | 54 | We trained our models using [ECU](https://ieeexplore.ieee.org/document/1359760) [3] dataset. Following the steps if you want to train your own models. We suggest the groundtruth to be in the image format (like jpg, png) 55 | ## Dataset organization 56 | Origanize your dataset as follows: 57 | ``` 58 | dataset 59 | |-----train 60 | |-----image 61 | |-----mask 62 | |-----validation 63 | |-----image 64 | |-----mask 65 | ``` 66 | ## U-Net training 67 | 1. Open the U-Net/train.py and change the parameters in the "Training setup" section 68 | 2. Run the train.py file 69 | ``` 70 | python train.py 71 | ``` 72 | Checkpoints will be saved in the U-Net/checkpoints as default. 73 | ## FCN training 74 | 1. Open the data_train.py/data_val.py and change the training/validation path. Remeber to change the format of the masks names in the line 75 | ``` 76 | imgB = cv2.imread('your mask') 77 | ``` 78 | 2. Open the FCN.py and change the training parameters and the saving path of checkpoints. Then run FCN.py. 79 | ``` 80 | python FCN.py 81 | ``` 82 | Checkpoints will be saved in the FCN/checkpoints as default. 83 | 84 | 85 | ## Reference 86 | [1]. https://github.com/yunlongdong/FCN-pytorch 87 | 88 | [2]. https://github.com/zhixuhao/unet 89 | 90 | [3]. S. L. Phung, A. Bouzerdoum and D. Chai, "Skin segmentation using color pixel classification: analysis and comparison," in IEEE Transactions on Pattern Analysis and Machine Intelligence, vol. 27, no. 1, pp. 148-154, Jan. 2005, doi: 10.1109/TPAMI.2005.17. 91 | 92 | [4]. M. Wang, W. Deng, J. Hu, X. Tao and Y. Huang, "Racial Faces in the Wild: Reducing Racial Bias by Information Maximization Adaptation Network," 2019 IEEE/CVF International Conference on Computer Vision (ICCV), 2019, pp. 692-702, doi: 10.1109/ICCV.2019.00078. 93 | ## Cite this repo 94 | If you find this repo useful, please consider citing it as following: 95 | ``` 96 | @InProceedings{Xu_2022_CVPR, 97 | author = {Xu, Han and Sarkar, Abhijit and Abbott, A. Lynn}, 98 | title = {Color Invariant Skin Segmentation}, 99 | booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops}, 100 | month = {June}, 101 | year = {2022}, 102 | pages = {2906-2915} 103 | } 104 | ``` 105 | 106 | 107 | 108 | 109 | 110 | -------------------------------------------------------------------------------- /color augmentation/HSV_converter.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Wed Dec 2 19:27:28 2020 4 | 5 | @author: xuhan 6 | """ 7 | 8 | import cv2 9 | import os 10 | import numpy as np 11 | import scipy.io as sio 12 | import matplotlib.pyplot as plt 13 | import torch 14 | from tqdm import tqdm 15 | 16 | def Change_HUE(image, rgb_path, hue_path): 17 | img = cv2.imread(os.path.join(rgb_path,image)) 18 | img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV).astype('float') 19 | dhue = 30 20 | for i in range(int(180/dhue)): 21 | img_hsv[:,:,0] += dhue 22 | img_hsv[:,:,0] -= (img_hsv[:, :, 0] > 180) * 180 23 | 24 | img_hue = cv2.cvtColor(img_hsv.astype('uint8'), cv2.COLOR_HSV2BGR) 25 | save_hue = image.split('.')[0]+'_%d' %(dhue*(i+1))+'.png' 26 | 27 | if not os.path.exists(hue_path+'/'+'%d' %(dhue*(i+1))): 28 | os.makedirs(hue_path+'/'+'%d' %(dhue*(i+1))) 29 | 30 | save_path = os.path.join(hue_path+'/'+'%d' %(dhue*(i+1)),save_hue) 31 | # print(save_path) 32 | cv2.imwrite(save_path,img_hue) 33 | 34 | def Change_Saturation(image, rgb_path, saturation_path): 35 | img = cv2.imread(os.path.join(rgb_path,image)) 36 | img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV).astype('float') 37 | image_H, image_S, image_V = cv2.split(img_hsv) 38 | dsat = 0.2 39 | for i in range(6): 40 | # print(dsat) 41 | change_s = image_S*(1-i*dsat) 42 | img_change = cv2.merge([image_H,change_s,image_V]) 43 | img_sat = cv2.cvtColor(img_change.astype('uint8'), cv2.COLOR_HSV2BGR) 44 | # save_saturation = image.split('.')[0]+'_%.1f' %(i*dsat)+'.png' 45 | if not os.path.exists(saturation_path+'/'+'%.1f' %(1-i*dsat)): 46 | os.makedirs(saturation_path+'/'+'%.1f' %(1-i*dsat)) 47 | 48 | save_path = os.path.join(saturation_path+'/'+'%.1f' %(1-i*dsat),image) 49 | # print(save_path) 50 | cv2.imwrite(save_path,img_sat) 51 | 52 | 53 | 54 | def Change_Value(image, rgb_path, value_path): 55 | img = cv2.imread(os.path.join(rgb_path,image)) 56 | img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV).astype('float') 57 | image_H, image_S, image_V = cv2.split(img_hsv) 58 | dvalue = 0.2 59 | for i in range(int(1/dvalue)): 60 | # print(dsat) 61 | change_v = image_V*(1-i*dvalue) 62 | img_change = cv2.merge([image_H,image_S,change_v]) 63 | img_value = cv2.cvtColor(img_change.astype('uint8'), cv2.COLOR_HSV2BGR) 64 | # save_value = image.split('.')[0]+'_%.1f' %(i*dvalue)+'.png' 65 | if not os.path.exists(value_path+'/'+'%.1f' %(1-i*dvalue)): 66 | os.makedirs(value_path+'/'+'%.1f' %(1-i*dvalue)) 67 | 68 | save_path = os.path.join(value_path+'/'+'%.1f' %(1-i*dvalue),image) 69 | # print(save_path) 70 | cv2.imwrite(save_path,img_value) 71 | 72 | 73 | 74 | if __name__ == "__main__": 75 | image_path = 'G:/UNet2/data/ECU/test/image' 76 | save_path = 'G:/UNet2/data/ECU/test/HSV2' 77 | # value_path = 'G:/UNet2/data/ECU/test/HSV/Value/change' 78 | # sat_path = 'G:/UNet2/data/ECU/test/HSV/Saturation/change' 79 | image_names = os.listdir(image_path) 80 | dH = 30 81 | dS = 0.2 82 | dV = 0.2 83 | # print(image_names) 84 | for image in tqdm(image_names): 85 | # Change_Value(image, image_path, value_path) 86 | # Change_Saturation(image, image_path, sat_path) 87 | # break 88 | # print() 89 | img = cv2.imread(os.path.join(image_path,image)) 90 | img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV).astype('float') 91 | image_H, image_S, image_V = cv2.split(img_hsv) 92 | for h in range(6): 93 | change_H = image_H + dH*h 94 | change_H -= (change_H > 180) * 180 95 | 96 | for s in range(6): 97 | change_S = image_S*(1-s*dS) 98 | for v in range(5): 99 | change_V = image_V*(1-v*dV) 100 | img_change = cv2.merge([change_H,change_S,change_V]) 101 | img_change = cv2.cvtColor(img_change.astype('uint8'), cv2.COLOR_HSV2BGR) 102 | change_path = save_path+'/'+'H%dS%.1fV%.1f' %(dH*h, 1-dS*s, 1-dV*v) 103 | if not os.path.exists(change_path): 104 | os.makedirs(change_path) 105 | 106 | 107 | cv2.imwrite(change_path+'/'+image,img_change) 108 | 109 | # break 110 | # 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | 123 | 124 | 125 | -------------------------------------------------------------------------------- /U-Net/data.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | from keras.preprocessing.image import ImageDataGenerator 3 | import numpy as np 4 | import os 5 | import glob 6 | import skimage.io as io 7 | import skimage.transform as trans 8 | from skimage import img_as_ubyte, img_as_float 9 | import cv2 10 | from tqdm import tqdm 11 | from PIL import ImageFile 12 | ImageFile.LOAD_TRUNCATED_IMAGES = True 13 | 14 | Sky = [128,128,128] 15 | Building = [128,0,0] 16 | Pole = [192,192,128] 17 | Road = [128,64,128] 18 | Pavement = [60,40,222] 19 | Tree = [128,128,0] 20 | SignSymbol = [192,128,128] 21 | Fence = [64,64,128] 22 | Car = [64,0,128] 23 | Pedestrian = [64,64,0] 24 | Bicyclist = [0,128,192] 25 | Unlabelled = [0,0,0] 26 | 27 | COLOR_DICT = np.array([Sky, Building, Pole, Road, Pavement, 28 | Tree, SignSymbol, Fence, Car, Pedestrian, Bicyclist, Unlabelled]) 29 | 30 | 31 | def adjustData(img,mask,flag_multi_class,num_class): 32 | if(flag_multi_class): 33 | img = img / 255 34 | mask = mask[:,:,:,0] if(len(mask.shape) == 4) else mask[:,:,0] 35 | new_mask = np.zeros(mask.shape + (num_class,)) 36 | for i in range(num_class): 37 | #for one pixel in the image, find the class in mask and convert it into one-hot vector 38 | #index = np.where(mask == i) 39 | #index_mask = (index[0],index[1],index[2],np.zeros(len(index[0]),dtype = np.int64) + i) if (len(mask.shape) == 4) else (index[0],index[1],np.zeros(len(index[0]),dtype = np.int64) + i) 40 | #new_mask[index_mask] = 1 41 | new_mask[mask == i,i] = 1 42 | new_mask = np.reshape(new_mask,(new_mask.shape[0],new_mask.shape[1]*new_mask.shape[2],new_mask.shape[3])) if flag_multi_class else np.reshape(new_mask,(new_mask.shape[0]*new_mask.shape[1],new_mask.shape[2])) 43 | mask = new_mask 44 | elif(np.max(img) > 1): 45 | img = img / 255 46 | mask = mask /255 47 | mask[mask > 0.5] = 1 48 | mask[mask <= 0.5] = 0 49 | return (img,mask) 50 | 51 | 52 | 53 | def trainGenerator(batch_size,train_path,image_folder,mask_folder,aug_dict,image_color_mode = "rgb", 54 | mask_color_mode = "grayscale",image_save_prefix = "image",mask_save_prefix = "mask", 55 | flag_multi_class = False,num_class = 2,save_to_dir = None,target_size = (256,256),seed = 1): 56 | ''' 57 | can generate image and mask at the same time 58 | use the same seed for image_datagen and mask_datagen to ensure the transformation for image and mask is the same 59 | if you want to visualize the results of generator, set save_to_dir = "your path" 60 | ''' 61 | image_datagen = ImageDataGenerator(**aug_dict) 62 | mask_datagen = ImageDataGenerator(**aug_dict) 63 | image_generator = image_datagen.flow_from_directory( 64 | train_path, 65 | classes = [image_folder], 66 | class_mode = None, 67 | color_mode = image_color_mode, 68 | target_size = target_size, 69 | batch_size = batch_size, 70 | save_to_dir = save_to_dir, 71 | save_prefix = image_save_prefix, 72 | seed = seed) 73 | mask_generator = mask_datagen.flow_from_directory( 74 | train_path, 75 | classes = [mask_folder], 76 | class_mode = None, 77 | color_mode = mask_color_mode, 78 | target_size = target_size, 79 | batch_size = batch_size, 80 | save_to_dir = save_to_dir, 81 | save_prefix = mask_save_prefix, 82 | seed = seed) 83 | train_generator = zip(image_generator, mask_generator) 84 | for (img,mask) in train_generator: 85 | img,mask = adjustData(img,mask,flag_multi_class,num_class) 86 | yield (img,mask) 87 | 88 | def validGenerator(batch_size, val_path, image_folder, mask_folder, image_color_mode="rgb", 89 | mask_color_mode="grayscale", image_save_prefix="val_image", mask_save_prefix="val_mask", 90 | flag_multi_class=False, num_class=2, save_to_dir=None,target_size=(256,256), seed=1): 91 | image_datagen = ImageDataGenerator() 92 | mask_datagen = ImageDataGenerator() 93 | 94 | image_generator = image_datagen.flow_from_directory( 95 | val_path, 96 | classes = [image_folder], 97 | class_mode = None, 98 | color_mode = image_color_mode, 99 | target_size = target_size, 100 | batch_size = batch_size, 101 | save_to_dir = save_to_dir, 102 | save_prefix = image_save_prefix, 103 | seed = seed) 104 | 105 | mask_generator = mask_datagen.flow_from_directory( 106 | val_path, 107 | classes = [mask_folder], 108 | class_mode = None, 109 | color_mode = mask_color_mode, 110 | target_size = target_size, 111 | batch_size = batch_size, 112 | save_to_dir = save_to_dir, 113 | save_prefix = mask_save_prefix, 114 | seed = seed) 115 | 116 | validation_generator = zip(image_generator, mask_generator) 117 | 118 | for (img, mask) in validation_generator: 119 | img, mask = adjustData(img,mask, flag_multi_class, num_class) 120 | yield img, mask 121 | 122 | #'im0'+"%d 123 | def testGenerator(test_path,num_image,target_size = (256,256),flag_multi_class = False,as_gray = False): 124 | print('testGenerator Working') 125 | images = os.listdir(test_path) 126 | for i in tqdm(range(num_image)): 127 | # img = io.imread(os.path.join(test_path,images[i]), as_gray = as_gray) 128 | img = cv2.imread(os.path.join(test_path,images[i]))[...,::-1] 129 | # print(images[i],img.shape) 130 | #print(os.path.join(test_path,"im0%d.jpg" %(2001+i))) 131 | img = img / 255 132 | img = trans.resize(img,target_size) 133 | # img = np.reshape(img,img.shape+(1,)) if (not flag_multi_class) else img 134 | img = np.reshape(img,(1,)+img.shape) 135 | yield img 136 | 137 | 138 | def geneTrainNpy(image_path,mask_path,flag_multi_class = False,num_class = 2,image_prefix = "image",mask_prefix = "mask",image_as_gray = True,mask_as_gray = True): 139 | image_name_arr = glob.glob(os.path.join(image_path,"%s*.png"%image_prefix)) 140 | image_arr = [] 141 | mask_arr = [] 142 | for index,item in enumerate(image_name_arr): 143 | img = io.imread(item,as_gray = image_as_gray) 144 | img = np.reshape(img,img.shape + (1,)) if image_as_gray else img 145 | mask = io.imread(item.replace(image_path,mask_path).replace(image_prefix,mask_prefix),as_gray = mask_as_gray) 146 | mask = np.reshape(mask,mask.shape + (1,)) if mask_as_gray else mask 147 | img,mask = adjustData(img,mask,flag_multi_class,num_class) 148 | image_arr.append(img) 149 | mask_arr.append(mask) 150 | image_arr = np.array(image_arr) 151 | mask_arr = np.array(mask_arr) 152 | return image_arr,mask_arr 153 | 154 | 155 | def labelVisualize(num_class,color_dict,img): 156 | img = img[:,:,0] if len(img.shape) == 3 else img 157 | img_out = np.zeros(img.shape + (3,)) 158 | for i in range(num_class): 159 | img_out[img == i,:] = color_dict[i] 160 | return img_out / 255 161 | 162 | 163 | 164 | def saveResult(save_path,save_names, npyfile,flag_multi_class = False,num_class = 2): 165 | print('Saving Begin') 166 | for i,item in enumerate(tqdm(npyfile)): 167 | # print(i) 168 | img = labelVisualize(num_class,COLOR_DICT,item) if flag_multi_class else item[:,:,0] 169 | # print(img.shape) 170 | img_uint8 = img.astype(np.uint8) 171 | #io.imsave(os.path.join(save_path,"%d_predict.png"%i),img) 172 | io.imsave(os.path.join(save_path, save_names[i]),img_as_ubyte(img)) 173 | 174 | # cv2.imwrite(os.path.join(save_path,"%d_predict.png"%i),img) 175 | -------------------------------------------------------------------------------- /FCN/FCN.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | 4 | import torch 5 | import torch.nn as nn 6 | import torch.optim as optim 7 | from torchvision import models 8 | from torchvision.models.vgg import VGG 9 | # from BagData import dataloader 10 | from data_train import dataloader 11 | from data_val import dataloader_val 12 | import pdb 13 | import numpy as np 14 | import time 15 | import matplotlib.pyplot as plt 16 | # import visdom 17 | import os 18 | 19 | 20 | 21 | 22 | class FCN32s(nn.Module): 23 | 24 | def __init__(self, pretrained_net, n_class): 25 | super().__init__() 26 | self.n_class = n_class 27 | self.pretrained_net = pretrained_net 28 | self.relu = nn.ReLU(inplace=True) 29 | self.deconv1 = nn.ConvTranspose2d(512, 512, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1) 30 | self.bn1 = nn.BatchNorm2d(512) 31 | self.deconv2 = nn.ConvTranspose2d(512, 256, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1) 32 | self.bn2 = nn.BatchNorm2d(256) 33 | self.deconv3 = nn.ConvTranspose2d(256, 128, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1) 34 | self.bn3 = nn.BatchNorm2d(128) 35 | self.deconv4 = nn.ConvTranspose2d(128, 64, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1) 36 | self.bn4 = nn.BatchNorm2d(64) 37 | self.deconv5 = nn.ConvTranspose2d(64, 32, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1) 38 | self.bn5 = nn.BatchNorm2d(32) 39 | self.classifier = nn.Conv2d(32, n_class, kernel_size=1) 40 | 41 | def forward(self, x): 42 | output = self.pretrained_net(x) 43 | x5 = output['x5'] # size=(N, 512, x.H/32, x.W/32) 44 | 45 | score = self.bn1(self.relu(self.deconv1(x5))) # size=(N, 512, x.H/16, x.W/16) 46 | score = self.bn2(self.relu(self.deconv2(score))) # size=(N, 256, x.H/8, x.W/8) 47 | score = self.bn3(self.relu(self.deconv3(score))) # size=(N, 128, x.H/4, x.W/4) 48 | score = self.bn4(self.relu(self.deconv4(score))) # size=(N, 64, x.H/2, x.W/2) 49 | score = self.bn5(self.relu(self.deconv5(score))) # size=(N, 32, x.H, x.W) 50 | score = self.classifier(score) # size=(N, n_class, x.H/1, x.W/1) 51 | 52 | return score # size=(N, n_class, x.H/1, x.W/1) 53 | 54 | 55 | class FCN16s(nn.Module): 56 | 57 | def __init__(self, pretrained_net, n_class): 58 | super().__init__() 59 | self.n_class = n_class 60 | self.pretrained_net = pretrained_net 61 | self.relu = nn.ReLU(inplace=True) 62 | self.deconv1 = nn.ConvTranspose2d(512, 512, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1) 63 | self.bn1 = nn.BatchNorm2d(512) 64 | self.deconv2 = nn.ConvTranspose2d(512, 256, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1) 65 | self.bn2 = nn.BatchNorm2d(256) 66 | self.deconv3 = nn.ConvTranspose2d(256, 128, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1) 67 | self.bn3 = nn.BatchNorm2d(128) 68 | self.deconv4 = nn.ConvTranspose2d(128, 64, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1) 69 | self.bn4 = nn.BatchNorm2d(64) 70 | self.deconv5 = nn.ConvTranspose2d(64, 32, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1) 71 | self.bn5 = nn.BatchNorm2d(32) 72 | self.classifier = nn.Conv2d(32, n_class, kernel_size=1) 73 | 74 | def forward(self, x): 75 | output = self.pretrained_net(x) 76 | x5 = output['x5'] # size=(N, 512, x.H/32, x.W/32) 77 | x4 = output['x4'] # size=(N, 512, x.H/16, x.W/16) 78 | 79 | score = self.relu(self.deconv1(x5)) # size=(N, 512, x.H/16, x.W/16) 80 | score = self.bn1(score + x4) # element-wise add, size=(N, 512, x.H/16, x.W/16) 81 | score = self.bn2(self.relu(self.deconv2(score))) # size=(N, 256, x.H/8, x.W/8) 82 | score = self.bn3(self.relu(self.deconv3(score))) # size=(N, 128, x.H/4, x.W/4) 83 | score = self.bn4(self.relu(self.deconv4(score))) # size=(N, 64, x.H/2, x.W/2) 84 | score = self.bn5(self.relu(self.deconv5(score))) # size=(N, 32, x.H, x.W) 85 | score = self.classifier(score) # size=(N, n_class, x.H/1, x.W/1) 86 | 87 | return score # size=(N, n_class, x.H/1, x.W/1) 88 | 89 | 90 | class FCN8s(nn.Module): 91 | 92 | def __init__(self, pretrained_net, n_class): 93 | super().__init__() 94 | self.n_class = n_class 95 | self.pretrained_net = pretrained_net 96 | self.relu = nn.ReLU(inplace=True) 97 | self.deconv1 = nn.ConvTranspose2d(512, 512, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1) 98 | self.bn1 = nn.BatchNorm2d(512) 99 | self.deconv2 = nn.ConvTranspose2d(512, 256, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1) 100 | self.bn2 = nn.BatchNorm2d(256) 101 | self.deconv3 = nn.ConvTranspose2d(256, 128, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1) 102 | self.bn3 = nn.BatchNorm2d(128) 103 | self.deconv4 = nn.ConvTranspose2d(128, 64, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1) 104 | self.bn4 = nn.BatchNorm2d(64) 105 | self.deconv5 = nn.ConvTranspose2d(64, 32, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1) 106 | self.bn5 = nn.BatchNorm2d(32) 107 | self.classifier = nn.Conv2d(32, n_class, kernel_size=1) 108 | 109 | def forward(self, x): 110 | output = self.pretrained_net(x) 111 | x5 = output['x5'] # size=(N, 512, x.H/32, x.W/32) 112 | x4 = output['x4'] # size=(N, 512, x.H/16, x.W/16) 113 | x3 = output['x3'] # size=(N, 256, x.H/8, x.W/8) 114 | 115 | score = self.relu(self.deconv1(x5)) # size=(N, 512, x.H/16, x.W/16) 116 | score = self.bn1(score + x4) # element-wise add, size=(N, 512, x.H/16, x.W/16) 117 | score = self.relu(self.deconv2(score)) # size=(N, 256, x.H/8, x.W/8) 118 | score = self.bn2(score + x3) # element-wise add, size=(N, 256, x.H/8, x.W/8) 119 | score = self.bn3(self.relu(self.deconv3(score))) # size=(N, 128, x.H/4, x.W/4) 120 | score = self.bn4(self.relu(self.deconv4(score))) # size=(N, 64, x.H/2, x.W/2) 121 | score = self.bn5(self.relu(self.deconv5(score))) # size=(N, 32, x.H, x.W) 122 | score = self.classifier(score) # size=(N, n_class, x.H/1, x.W/1) 123 | 124 | return score # size=(N, n_class, x.H/1, x.W/1) 125 | 126 | 127 | class FCNs(nn.Module): 128 | 129 | def __init__(self, pretrained_net, n_class): 130 | super().__init__() 131 | self.n_class = n_class 132 | self.pretrained_net = pretrained_net 133 | self.relu = nn.ReLU(inplace=True) 134 | self.deconv1 = nn.ConvTranspose2d(512, 512, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1) 135 | self.bn1 = nn.BatchNorm2d(512) 136 | self.deconv2 = nn.ConvTranspose2d(512, 256, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1) 137 | self.bn2 = nn.BatchNorm2d(256) 138 | self.deconv3 = nn.ConvTranspose2d(256, 128, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1) 139 | self.bn3 = nn.BatchNorm2d(128) 140 | self.deconv4 = nn.ConvTranspose2d(128, 64, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1) 141 | self.bn4 = nn.BatchNorm2d(64) 142 | self.deconv5 = nn.ConvTranspose2d(64, 32, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1) 143 | self.bn5 = nn.BatchNorm2d(32) 144 | self.classifier = nn.Conv2d(32, n_class, kernel_size=1) 145 | 146 | def forward(self, x): 147 | output = self.pretrained_net(x) 148 | x5 = output['x5'] # size=(N, 512, x.H/32, x.W/32) 149 | x4 = output['x4'] # size=(N, 512, x.H/16, x.W/16) 150 | x3 = output['x3'] # size=(N, 256, x.H/8, x.W/8) 151 | x2 = output['x2'] # size=(N, 128, x.H/4, x.W/4) 152 | x1 = output['x1'] # size=(N, 64, x.H/2, x.W/2) 153 | 154 | score = self.bn1(self.relu(self.deconv1(x5))) # size=(N, 512, x.H/16, x.W/16) 155 | score = score + x4 # element-wise add, size=(N, 512, x.H/16, x.W/16) 156 | score = self.bn2(self.relu(self.deconv2(score))) # size=(N, 256, x.H/8, x.W/8) 157 | score = score + x3 # element-wise add, size=(N, 256, x.H/8, x.W/8) 158 | score = self.bn3(self.relu(self.deconv3(score))) # size=(N, 128, x.H/4, x.W/4) 159 | score = score + x2 # element-wise add, size=(N, 128, x.H/4, x.W/4) 160 | score = self.bn4(self.relu(self.deconv4(score))) # size=(N, 64, x.H/2, x.W/2) 161 | score = score + x1 # element-wise add, size=(N, 64, x.H/2, x.W/2) 162 | score = self.bn5(self.relu(self.deconv5(score))) # size=(N, 32, x.H, x.W) 163 | score = self.classifier(score) # size=(N, n_class, x.H/1, x.W/1) 164 | 165 | return score # size=(N, n_class, x.H/1, x.W/1) 166 | 167 | 168 | class VGGNet(VGG): 169 | def __init__(self, pretrained=True, model='vgg16', requires_grad=True, remove_fc=True, show_params=False): 170 | super().__init__(make_layers(cfg[model])) 171 | self.ranges = ranges[model] 172 | 173 | if pretrained: 174 | exec("self.load_state_dict(models.%s(pretrained=True).state_dict())" % model) 175 | 176 | if not requires_grad: 177 | for param in super().parameters(): 178 | param.requires_grad = False 179 | 180 | if remove_fc: # delete redundant fully-connected layer params, can save memory 181 | del self.classifier 182 | 183 | if show_params: 184 | for name, param in self.named_parameters(): 185 | print(name, param.size()) 186 | 187 | def forward(self, x): 188 | output = {} 189 | # get the output of each maxpooling layer (5 maxpool in VGG net) 190 | for idx in range(len(self.ranges)): 191 | for layer in range(self.ranges[idx][0], self.ranges[idx][1]): 192 | x = self.features[layer](x) 193 | output["x%d"%(idx+1)] = x 194 | 195 | return output 196 | 197 | 198 | ranges = { 199 | 'vgg11': ((0, 3), (3, 6), (6, 11), (11, 16), (16, 21)), 200 | 'vgg13': ((0, 5), (5, 10), (10, 15), (15, 20), (20, 25)), 201 | 'vgg16': ((0, 5), (5, 10), (10, 17), (17, 24), (24, 31)), 202 | 'vgg19': ((0, 5), (5, 10), (10, 19), (19, 28), (28, 37)) 203 | } 204 | 205 | # cropped version from https://github.com/pytorch/vision/blob/master/torchvision/models/vgg.py 206 | cfg = { 207 | 'vgg11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], 208 | 'vgg13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], 209 | 'vgg16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], 210 | 'vgg19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'], 211 | } 212 | 213 | def make_layers(cfg, batch_norm=False): 214 | layers = [] 215 | in_channels = 3 216 | for v in cfg: 217 | if v == 'M': 218 | layers += [nn.MaxPool2d(kernel_size=2, stride=2)] 219 | else: 220 | conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1) 221 | if batch_norm: 222 | layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] 223 | else: 224 | layers += [conv2d, nn.ReLU(inplace=True)] 225 | in_channels = v 226 | return nn.Sequential(*layers) 227 | 228 | 229 | if __name__ == "__main__": 230 | # vis = visdom.Visdom() 231 | vgg_model = VGGNet(requires_grad=True) 232 | fcn_model = FCNs(pretrained_net=vgg_model, n_class=2) 233 | fcn_model = fcn_model.cuda() 234 | criterion = nn.BCELoss().cuda() 235 | optimizer = optim.SGD(fcn_model.parameters(), lr=1e-2, momentum=0.7) 236 | #input = torch.autograd.Variable(torch.randn(batch_size, 3, h, w)) 237 | #y = torch.autograd.Variable(torch.randn(batch_size, n_class, h, w), requires_grad=False) 238 | # saving_index =0 239 | print('Train {},Validation {}'.format(len(dataloader)*4,len(dataloader_val)*4)) 240 | epochs=10 241 | train_loss=[] 242 | valid_loss=[] 243 | for epo in range(epochs): 244 | start = time.time() 245 | 246 | epo_loss = 0 247 | fcn_model.train() 248 | for item in dataloader: 249 | input = item['A'] 250 | y = item['B'] 251 | input = torch.autograd.Variable(input) 252 | y = torch.autograd.Variable(y) 253 | input = input.cuda() 254 | y = y.cuda() 255 | optimizer.zero_grad() 256 | output = fcn_model(input) 257 | output = nn.functional.sigmoid(output) 258 | loss = criterion(output, y) 259 | loss.backward() 260 | epo_loss += loss.item() 261 | optimizer.step() 262 | 263 | # output_np = output.cpu().data.numpy().copy() 264 | # output_np = np.argmin(output_np, axis=1) 265 | # y_np = y.cpu().data.numpy().copy() 266 | # y_np = np.argmin(y_np, axis=1) 267 | 268 | val_loss = 0 269 | fcn_model.eval() 270 | with torch.no_grad(): 271 | for item in dataloader_val: 272 | input = item['A'] 273 | y = item['B'] 274 | input = torch.autograd.Variable(input) 275 | y = torch.autograd.Variable(y) 276 | input = input.cuda() 277 | y = y.cuda() 278 | optimizer.zero_grad() 279 | output = fcn_model(input) 280 | output = nn.functional.sigmoid(output) 281 | loss = criterion(output, y) 282 | 283 | val_loss += loss.item() 284 | # optimizer.step() 285 | 286 | # output_np = output.cpu().data.numpy().copy() 287 | # output_np = np.argmin(output_np, axis=1) 288 | # y_np = y.cpu().data.numpy().copy() 289 | # y_np = np.argmin(y_np, axis=1) 290 | 291 | 292 | end = time.time() 293 | trainingLoss = epo_loss/len(dataloader) 294 | validationLoss = val_loss/len(dataloader_val) 295 | train_loss.append(trainingLoss) 296 | valid_loss.append(validationLoss) 297 | print('epoch loss = %f'%(trainingLoss), 298 | 'validation loss = %f'%(validationLoss), 299 | 'time cost',end-start,'s') 300 | 301 | # if np.mod(saving_index, 2)==1: 302 | if not os.path.exists('checkpoints'): 303 | os.makedirs('checkpoints') 304 | torch.save(fcn_model, 'checkpoints/fcn_model_{}.pt'.format(epo)) 305 | print('saveing checkpoints/fcn_model_{}.pt \n'.format(epo)) 306 | 307 | np.save('training loss', train_loss) 308 | np.save('validation loss', valid_loss) 309 | 310 | # imagename = 'Loss' 311 | # print(a) 312 | y1 = train_loss 313 | y2 = valid_loss 314 | # print(y) 315 | x = np.array(range(epochs)) 316 | # print(x.shape) 317 | plt.plot(x,y1, label = 'trainingloss') 318 | plt.plot(x,y2, label = 'validloss') 319 | plt.xlabel('Epoch') 320 | plt.ylabel('Loss') 321 | plt.legend() 322 | plt.savefig('Loss'+'.png') 323 | plt.show() --------------------------------------------------------------------------------