├── code ├── util │ ├── CelebAMask-HQ │ │ ├── __init__.py │ │ └── Data_preprocessing │ │ │ ├── __pycache__ │ │ │ └── utils.cpython-37.pyc │ │ │ ├── utils.py │ │ │ ├── g_mask.py │ │ │ └── v_mask.py │ ├── __pycache__ │ │ ├── coco.cpython-36.pyc │ │ ├── coco.cpython-37.pyc │ │ ├── coco.cpython-38.pyc │ │ ├── html.cpython-37.pyc │ │ ├── html.cpython-38.pyc │ │ ├── util.cpython-36.pyc │ │ ├── util.cpython-37.pyc │ │ ├── util.cpython-38.pyc │ │ ├── __init__.cpython-36.pyc │ │ ├── __init__.cpython-37.pyc │ │ ├── __init__.cpython-38.pyc │ │ ├── visualizer.cpython-37.pyc │ │ ├── visualizer.cpython-38.pyc │ │ ├── iter_counter.cpython-37.pyc │ │ └── iter_counter.cpython-38.pyc │ ├── __init__.py │ ├── html.py │ ├── iter_counter.py │ ├── coco.py │ ├── visualizer.py │ └── util.py ├── networks │ ├── __pycache__ │ │ ├── enet.cpython-36.pyc │ │ ├── enet.cpython-38.pyc │ │ ├── pnet.cpython-36.pyc │ │ ├── pnet.cpython-38.pyc │ │ ├── unet.cpython-36.pyc │ │ ├── unet.cpython-38.pyc │ │ ├── cenet.cpython-36.pyc │ │ ├── config.cpython-36.pyc │ │ ├── config.cpython-38.pyc │ │ ├── nnunet.cpython-36.pyc │ │ ├── nnunet.cpython-38.pyc │ │ ├── scse2d.cpython-36.pyc │ │ ├── unet2d.cpython-36.pyc │ │ ├── attention.cpython-36.pyc │ │ ├── attention.cpython-38.pyc │ │ ├── unet2d_2.cpython-36.pyc │ │ ├── net_factory.cpython-36.pyc │ │ ├── net_factory.cpython-38.pyc │ │ ├── unet2d_nest.cpython-36.pyc │ │ ├── unet2d_scse.cpython-36.pyc │ │ ├── unet_2Plus.cpython-36.pyc │ │ ├── unet_3Plus.cpython-36.pyc │ │ ├── efficientunet.cpython-36.pyc │ │ ├── efficientunet.cpython-38.pyc │ │ ├── neural_network.cpython-36.pyc │ │ ├── neural_network.cpython-38.pyc │ │ ├── unet2d_attention.cpython-36.pyc │ │ ├── efficient_encoder.cpython-36.pyc │ │ ├── efficient_encoder.cpython-38.pyc │ │ ├── vision_transformer.cpython-36.pyc │ │ ├── vision_transformer.cpython-38.pyc │ │ ├── swin_transformer_unet_skip_expand_decoder_sys.cpython-36.pyc │ │ └── swin_transformer_unet_skip_expand_decoder_sys.cpython-38.pyc │ ├── net_factory.py │ └── unet2d.py ├── utils │ ├── __pycache__ │ │ ├── losses.cpython-36.pyc │ │ ├── losses.cpython-38.pyc │ │ ├── metrics.cpython-36.pyc │ │ ├── metrics.cpython-38.pyc │ │ ├── ramps.cpython-36.pyc │ │ ├── ramps.cpython-38.pyc │ │ ├── distance_metric.cpython-36.pyc │ │ └── distance_metric.cpython-38.pyc │ ├── metrics.py │ ├── ramps.py │ ├── util.py │ └── losses.py ├── models │ ├── __pycache__ │ │ ├── __init__.cpython-36.pyc │ │ ├── __init__.cpython-37.pyc │ │ ├── __init__.cpython-38.pyc │ │ ├── pix2pix_model.cpython-36.pyc │ │ ├── pix2pix_model.cpython-37.pyc │ │ └── pix2pix_model.cpython-38.pyc │ ├── networks │ │ ├── __pycache__ │ │ │ ├── loss.cpython-36.pyc │ │ │ ├── loss.cpython-37.pyc │ │ │ ├── loss.cpython-38.pyc │ │ │ ├── encoder.cpython-36.pyc │ │ │ ├── encoder.cpython-37.pyc │ │ │ ├── encoder.cpython-38.pyc │ │ │ ├── __init__.cpython-36.pyc │ │ │ ├── __init__.cpython-37.pyc │ │ │ ├── __init__.cpython-38.pyc │ │ │ ├── generator.cpython-36.pyc │ │ │ ├── generator.cpython-37.pyc │ │ │ ├── generator.cpython-38.pyc │ │ │ ├── architecture.cpython-36.pyc │ │ │ ├── architecture.cpython-37.pyc │ │ │ ├── architecture.cpython-38.pyc │ │ │ ├── base_network.cpython-36.pyc │ │ │ ├── base_network.cpython-37.pyc │ │ │ ├── base_network.cpython-38.pyc │ │ │ ├── discriminator.cpython-36.pyc │ │ │ ├── discriminator.cpython-37.pyc │ │ │ ├── discriminator.cpython-38.pyc │ │ │ ├── normalization.cpython-36.pyc │ │ │ ├── normalization.cpython-37.pyc │ │ │ └── normalization.cpython-38.pyc │ │ ├── sync_batchnorm │ │ │ ├── __pycache__ │ │ │ │ ├── comm.cpython-36.pyc │ │ │ │ ├── comm.cpython-37.pyc │ │ │ │ ├── comm.cpython-38.pyc │ │ │ │ ├── __init__.cpython-36.pyc │ │ │ │ ├── __init__.cpython-37.pyc │ │ │ │ ├── __init__.cpython-38.pyc │ │ │ │ ├── batchnorm.cpython-36.pyc │ │ │ │ ├── batchnorm.cpython-37.pyc │ │ │ │ ├── batchnorm.cpython-38.pyc │ │ │ │ ├── replicate.cpython-36.pyc │ │ │ │ ├── replicate.cpython-37.pyc │ │ │ │ └── replicate.cpython-38.pyc │ │ │ ├── __init__.py │ │ │ ├── unittest.py │ │ │ ├── batchnorm_reimpl.py │ │ │ ├── replicate.py │ │ │ └── comm.py │ │ ├── encoder.py │ │ ├── __init__.py │ │ ├── base_network.py │ │ ├── discriminator.py │ │ ├── generator.py │ │ ├── loss.py │ │ ├── architecture.py │ │ └── normalization.py │ └── __init__.py ├── dataloaders │ ├── __pycache__ │ │ ├── utils.cpython-36.pyc │ │ ├── utils.cpython-38.pyc │ │ ├── dataset.cpython-38.pyc │ │ ├── dataset_covid.cpython-36.pyc │ │ └── dataset_covid.cpython-38.pyc │ ├── dataset_covid.py │ ├── dataset.py │ └── utils.py ├── test.py ├── test_covid.py ├── train_template.py ├── train_SAST.py └── train_SACPS.py ├── dataset.png ├── framework.png ├── data ├── MOS1000 │ ├── val_slice.xlsx │ ├── test_slice.xlsx │ ├── test_volume.xlsx │ ├── val_volume.xlsx │ ├── train_slice_label.xlsx │ └── train_slice_unlabel.xlsx └── COVID249 │ ├── test_slice.xlsx │ ├── test_volume.xlsx │ ├── train_0.1_l.xlsx │ ├── train_0.1_u.xlsx │ ├── train_0.2_l.xlsx │ ├── train_0.2_u.xlsx │ ├── train_0.3_l.xlsx │ ├── train_0.3_u.xlsx │ ├── val_slice.xlsx │ └── val_volume.xlsx └── README.md /code/util/CelebAMask-HQ/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /dataset.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/dataset.png -------------------------------------------------------------------------------- /framework.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/framework.png -------------------------------------------------------------------------------- /data/MOS1000/val_slice.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/data/MOS1000/val_slice.xlsx -------------------------------------------------------------------------------- /data/COVID249/test_slice.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/data/COVID249/test_slice.xlsx -------------------------------------------------------------------------------- /data/COVID249/test_volume.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/data/COVID249/test_volume.xlsx -------------------------------------------------------------------------------- /data/COVID249/train_0.1_l.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/data/COVID249/train_0.1_l.xlsx -------------------------------------------------------------------------------- /data/COVID249/train_0.1_u.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/data/COVID249/train_0.1_u.xlsx -------------------------------------------------------------------------------- /data/COVID249/train_0.2_l.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/data/COVID249/train_0.2_l.xlsx -------------------------------------------------------------------------------- /data/COVID249/train_0.2_u.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/data/COVID249/train_0.2_u.xlsx -------------------------------------------------------------------------------- /data/COVID249/train_0.3_l.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/data/COVID249/train_0.3_l.xlsx -------------------------------------------------------------------------------- /data/COVID249/train_0.3_u.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/data/COVID249/train_0.3_u.xlsx -------------------------------------------------------------------------------- /data/COVID249/val_slice.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/data/COVID249/val_slice.xlsx -------------------------------------------------------------------------------- /data/COVID249/val_volume.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/data/COVID249/val_volume.xlsx -------------------------------------------------------------------------------- /data/MOS1000/test_slice.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/data/MOS1000/test_slice.xlsx -------------------------------------------------------------------------------- /data/MOS1000/test_volume.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/data/MOS1000/test_volume.xlsx -------------------------------------------------------------------------------- /data/MOS1000/val_volume.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/data/MOS1000/val_volume.xlsx -------------------------------------------------------------------------------- /data/MOS1000/train_slice_label.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/data/MOS1000/train_slice_label.xlsx -------------------------------------------------------------------------------- /data/MOS1000/train_slice_unlabel.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/data/MOS1000/train_slice_unlabel.xlsx -------------------------------------------------------------------------------- /code/util/__pycache__/coco.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/util/__pycache__/coco.cpython-36.pyc -------------------------------------------------------------------------------- /code/util/__pycache__/coco.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/util/__pycache__/coco.cpython-37.pyc -------------------------------------------------------------------------------- /code/util/__pycache__/coco.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/util/__pycache__/coco.cpython-38.pyc -------------------------------------------------------------------------------- /code/util/__pycache__/html.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/util/__pycache__/html.cpython-37.pyc -------------------------------------------------------------------------------- /code/util/__pycache__/html.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/util/__pycache__/html.cpython-38.pyc -------------------------------------------------------------------------------- /code/util/__pycache__/util.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/util/__pycache__/util.cpython-36.pyc -------------------------------------------------------------------------------- /code/util/__pycache__/util.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/util/__pycache__/util.cpython-37.pyc -------------------------------------------------------------------------------- /code/util/__pycache__/util.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/util/__pycache__/util.cpython-38.pyc -------------------------------------------------------------------------------- /code/networks/__pycache__/enet.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/networks/__pycache__/enet.cpython-36.pyc -------------------------------------------------------------------------------- /code/networks/__pycache__/enet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/networks/__pycache__/enet.cpython-38.pyc -------------------------------------------------------------------------------- /code/networks/__pycache__/pnet.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/networks/__pycache__/pnet.cpython-36.pyc -------------------------------------------------------------------------------- /code/networks/__pycache__/pnet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/networks/__pycache__/pnet.cpython-38.pyc -------------------------------------------------------------------------------- /code/networks/__pycache__/unet.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/networks/__pycache__/unet.cpython-36.pyc -------------------------------------------------------------------------------- /code/networks/__pycache__/unet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/networks/__pycache__/unet.cpython-38.pyc -------------------------------------------------------------------------------- /code/util/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/util/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /code/util/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/util/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /code/util/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/util/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /code/utils/__pycache__/losses.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/utils/__pycache__/losses.cpython-36.pyc -------------------------------------------------------------------------------- /code/utils/__pycache__/losses.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/utils/__pycache__/losses.cpython-38.pyc -------------------------------------------------------------------------------- /code/utils/__pycache__/metrics.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/utils/__pycache__/metrics.cpython-36.pyc -------------------------------------------------------------------------------- /code/utils/__pycache__/metrics.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/utils/__pycache__/metrics.cpython-38.pyc -------------------------------------------------------------------------------- /code/utils/__pycache__/ramps.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/utils/__pycache__/ramps.cpython-36.pyc -------------------------------------------------------------------------------- /code/utils/__pycache__/ramps.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/utils/__pycache__/ramps.cpython-38.pyc -------------------------------------------------------------------------------- /code/models/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/models/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /code/models/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/models/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /code/models/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/models/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /code/networks/__pycache__/cenet.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/networks/__pycache__/cenet.cpython-36.pyc -------------------------------------------------------------------------------- /code/networks/__pycache__/config.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/networks/__pycache__/config.cpython-36.pyc -------------------------------------------------------------------------------- /code/networks/__pycache__/config.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/networks/__pycache__/config.cpython-38.pyc -------------------------------------------------------------------------------- /code/networks/__pycache__/nnunet.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/networks/__pycache__/nnunet.cpython-36.pyc -------------------------------------------------------------------------------- /code/networks/__pycache__/nnunet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/networks/__pycache__/nnunet.cpython-38.pyc -------------------------------------------------------------------------------- /code/networks/__pycache__/scse2d.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/networks/__pycache__/scse2d.cpython-36.pyc -------------------------------------------------------------------------------- /code/networks/__pycache__/unet2d.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/networks/__pycache__/unet2d.cpython-36.pyc -------------------------------------------------------------------------------- /code/util/__pycache__/visualizer.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/util/__pycache__/visualizer.cpython-37.pyc -------------------------------------------------------------------------------- /code/util/__pycache__/visualizer.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/util/__pycache__/visualizer.cpython-38.pyc -------------------------------------------------------------------------------- /code/dataloaders/__pycache__/utils.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/dataloaders/__pycache__/utils.cpython-36.pyc -------------------------------------------------------------------------------- /code/dataloaders/__pycache__/utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/dataloaders/__pycache__/utils.cpython-38.pyc -------------------------------------------------------------------------------- /code/networks/__pycache__/attention.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/networks/__pycache__/attention.cpython-36.pyc -------------------------------------------------------------------------------- /code/networks/__pycache__/attention.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/networks/__pycache__/attention.cpython-38.pyc -------------------------------------------------------------------------------- /code/networks/__pycache__/unet2d_2.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/networks/__pycache__/unet2d_2.cpython-36.pyc -------------------------------------------------------------------------------- /code/util/__pycache__/iter_counter.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/util/__pycache__/iter_counter.cpython-37.pyc -------------------------------------------------------------------------------- /code/util/__pycache__/iter_counter.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/util/__pycache__/iter_counter.cpython-38.pyc -------------------------------------------------------------------------------- /code/dataloaders/__pycache__/dataset.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/dataloaders/__pycache__/dataset.cpython-38.pyc -------------------------------------------------------------------------------- /code/models/__pycache__/pix2pix_model.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/models/__pycache__/pix2pix_model.cpython-36.pyc -------------------------------------------------------------------------------- /code/models/__pycache__/pix2pix_model.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/models/__pycache__/pix2pix_model.cpython-37.pyc -------------------------------------------------------------------------------- /code/models/__pycache__/pix2pix_model.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/models/__pycache__/pix2pix_model.cpython-38.pyc -------------------------------------------------------------------------------- /code/models/networks/__pycache__/loss.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/models/networks/__pycache__/loss.cpython-36.pyc -------------------------------------------------------------------------------- /code/models/networks/__pycache__/loss.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/models/networks/__pycache__/loss.cpython-37.pyc -------------------------------------------------------------------------------- /code/models/networks/__pycache__/loss.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/models/networks/__pycache__/loss.cpython-38.pyc -------------------------------------------------------------------------------- /code/networks/__pycache__/net_factory.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/networks/__pycache__/net_factory.cpython-36.pyc -------------------------------------------------------------------------------- /code/networks/__pycache__/net_factory.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/networks/__pycache__/net_factory.cpython-38.pyc -------------------------------------------------------------------------------- /code/networks/__pycache__/unet2d_nest.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/networks/__pycache__/unet2d_nest.cpython-36.pyc -------------------------------------------------------------------------------- /code/networks/__pycache__/unet2d_scse.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/networks/__pycache__/unet2d_scse.cpython-36.pyc -------------------------------------------------------------------------------- /code/networks/__pycache__/unet_2Plus.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/networks/__pycache__/unet_2Plus.cpython-36.pyc -------------------------------------------------------------------------------- /code/networks/__pycache__/unet_3Plus.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/networks/__pycache__/unet_3Plus.cpython-36.pyc -------------------------------------------------------------------------------- /code/models/networks/__pycache__/encoder.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/models/networks/__pycache__/encoder.cpython-36.pyc -------------------------------------------------------------------------------- /code/models/networks/__pycache__/encoder.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/models/networks/__pycache__/encoder.cpython-37.pyc -------------------------------------------------------------------------------- /code/models/networks/__pycache__/encoder.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/models/networks/__pycache__/encoder.cpython-38.pyc -------------------------------------------------------------------------------- /code/networks/__pycache__/efficientunet.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/networks/__pycache__/efficientunet.cpython-36.pyc -------------------------------------------------------------------------------- /code/networks/__pycache__/efficientunet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/networks/__pycache__/efficientunet.cpython-38.pyc -------------------------------------------------------------------------------- /code/networks/__pycache__/neural_network.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/networks/__pycache__/neural_network.cpython-36.pyc -------------------------------------------------------------------------------- /code/networks/__pycache__/neural_network.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/networks/__pycache__/neural_network.cpython-38.pyc -------------------------------------------------------------------------------- /code/utils/__pycache__/distance_metric.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/utils/__pycache__/distance_metric.cpython-36.pyc -------------------------------------------------------------------------------- /code/utils/__pycache__/distance_metric.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/utils/__pycache__/distance_metric.cpython-38.pyc -------------------------------------------------------------------------------- /code/dataloaders/__pycache__/dataset_covid.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/dataloaders/__pycache__/dataset_covid.cpython-36.pyc -------------------------------------------------------------------------------- /code/dataloaders/__pycache__/dataset_covid.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/dataloaders/__pycache__/dataset_covid.cpython-38.pyc -------------------------------------------------------------------------------- /code/models/networks/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/models/networks/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /code/models/networks/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/models/networks/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /code/models/networks/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/models/networks/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /code/models/networks/__pycache__/generator.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/models/networks/__pycache__/generator.cpython-36.pyc -------------------------------------------------------------------------------- /code/models/networks/__pycache__/generator.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/models/networks/__pycache__/generator.cpython-37.pyc -------------------------------------------------------------------------------- /code/models/networks/__pycache__/generator.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/models/networks/__pycache__/generator.cpython-38.pyc -------------------------------------------------------------------------------- /code/networks/__pycache__/unet2d_attention.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/networks/__pycache__/unet2d_attention.cpython-36.pyc -------------------------------------------------------------------------------- /code/models/networks/__pycache__/architecture.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/models/networks/__pycache__/architecture.cpython-36.pyc -------------------------------------------------------------------------------- /code/models/networks/__pycache__/architecture.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/models/networks/__pycache__/architecture.cpython-37.pyc -------------------------------------------------------------------------------- /code/models/networks/__pycache__/architecture.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/models/networks/__pycache__/architecture.cpython-38.pyc -------------------------------------------------------------------------------- /code/models/networks/__pycache__/base_network.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/models/networks/__pycache__/base_network.cpython-36.pyc -------------------------------------------------------------------------------- /code/models/networks/__pycache__/base_network.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/models/networks/__pycache__/base_network.cpython-37.pyc -------------------------------------------------------------------------------- /code/models/networks/__pycache__/base_network.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/models/networks/__pycache__/base_network.cpython-38.pyc -------------------------------------------------------------------------------- /code/networks/__pycache__/efficient_encoder.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/networks/__pycache__/efficient_encoder.cpython-36.pyc -------------------------------------------------------------------------------- /code/networks/__pycache__/efficient_encoder.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/networks/__pycache__/efficient_encoder.cpython-38.pyc -------------------------------------------------------------------------------- /code/networks/__pycache__/vision_transformer.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/networks/__pycache__/vision_transformer.cpython-36.pyc -------------------------------------------------------------------------------- /code/networks/__pycache__/vision_transformer.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/networks/__pycache__/vision_transformer.cpython-38.pyc -------------------------------------------------------------------------------- /code/models/networks/__pycache__/discriminator.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/models/networks/__pycache__/discriminator.cpython-36.pyc -------------------------------------------------------------------------------- /code/models/networks/__pycache__/discriminator.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/models/networks/__pycache__/discriminator.cpython-37.pyc -------------------------------------------------------------------------------- /code/models/networks/__pycache__/discriminator.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/models/networks/__pycache__/discriminator.cpython-38.pyc -------------------------------------------------------------------------------- /code/models/networks/__pycache__/normalization.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/models/networks/__pycache__/normalization.cpython-36.pyc -------------------------------------------------------------------------------- /code/models/networks/__pycache__/normalization.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/models/networks/__pycache__/normalization.cpython-37.pyc -------------------------------------------------------------------------------- /code/models/networks/__pycache__/normalization.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/models/networks/__pycache__/normalization.cpython-38.pyc -------------------------------------------------------------------------------- /code/models/networks/sync_batchnorm/__pycache__/comm.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/models/networks/sync_batchnorm/__pycache__/comm.cpython-36.pyc -------------------------------------------------------------------------------- /code/models/networks/sync_batchnorm/__pycache__/comm.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/models/networks/sync_batchnorm/__pycache__/comm.cpython-37.pyc -------------------------------------------------------------------------------- /code/models/networks/sync_batchnorm/__pycache__/comm.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/models/networks/sync_batchnorm/__pycache__/comm.cpython-38.pyc -------------------------------------------------------------------------------- /code/models/networks/sync_batchnorm/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/models/networks/sync_batchnorm/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /code/models/networks/sync_batchnorm/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/models/networks/sync_batchnorm/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /code/models/networks/sync_batchnorm/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/models/networks/sync_batchnorm/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /code/models/networks/sync_batchnorm/__pycache__/batchnorm.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/models/networks/sync_batchnorm/__pycache__/batchnorm.cpython-36.pyc -------------------------------------------------------------------------------- /code/models/networks/sync_batchnorm/__pycache__/batchnorm.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/models/networks/sync_batchnorm/__pycache__/batchnorm.cpython-37.pyc -------------------------------------------------------------------------------- /code/models/networks/sync_batchnorm/__pycache__/batchnorm.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/models/networks/sync_batchnorm/__pycache__/batchnorm.cpython-38.pyc -------------------------------------------------------------------------------- /code/models/networks/sync_batchnorm/__pycache__/replicate.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/models/networks/sync_batchnorm/__pycache__/replicate.cpython-36.pyc -------------------------------------------------------------------------------- /code/models/networks/sync_batchnorm/__pycache__/replicate.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/models/networks/sync_batchnorm/__pycache__/replicate.cpython-37.pyc -------------------------------------------------------------------------------- /code/models/networks/sync_batchnorm/__pycache__/replicate.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/models/networks/sync_batchnorm/__pycache__/replicate.cpython-38.pyc -------------------------------------------------------------------------------- /code/util/CelebAMask-HQ/Data_preprocessing/__pycache__/utils.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/util/CelebAMask-HQ/Data_preprocessing/__pycache__/utils.cpython-37.pyc -------------------------------------------------------------------------------- /code/util/CelebAMask-HQ/Data_preprocessing/utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | def make_folder(path): 4 | if not os.path.exists(os.path.join(path)): 5 | os.makedirs(os.path.join(path)) 6 | 7 | 8 | -------------------------------------------------------------------------------- /code/util/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2019 NVIDIA Corporation. All rights reserved. 3 | Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). 4 | """ 5 | -------------------------------------------------------------------------------- /code/networks/__pycache__/swin_transformer_unet_skip_expand_decoder_sys.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/networks/__pycache__/swin_transformer_unet_skip_expand_decoder_sys.cpython-36.pyc -------------------------------------------------------------------------------- /code/networks/__pycache__/swin_transformer_unet_skip_expand_decoder_sys.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeiLyu/SASSL/HEAD/code/networks/__pycache__/swin_transformer_unet_skip_expand_decoder_sys.cpython-38.pyc -------------------------------------------------------------------------------- /code/networks/net_factory.py: -------------------------------------------------------------------------------- 1 | from networks.unet2d import UNet2D 2 | 3 | def net_factory(net_type="unet"): 4 | if net_type == "unet": 5 | net = UNet2D().cuda() 6 | else: 7 | net = None 8 | return net -------------------------------------------------------------------------------- /code/models/networks/sync_batchnorm/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # File : __init__.py 3 | # Author : Jiayuan Mao 4 | # Email : maojiayuan@gmail.com 5 | # Date : 27/01/2018 6 | # 7 | # This file is part of Synchronized-BatchNorm-PyTorch. 8 | # https://github.com/vacancy/Synchronized-BatchNorm-PyTorch 9 | # Distributed under MIT License. 10 | 11 | from .batchnorm import SynchronizedBatchNorm1d, SynchronizedBatchNorm2d, SynchronizedBatchNorm3d 12 | from .batchnorm import convert_model 13 | from .replicate import DataParallelWithCallback, patch_replication_callback 14 | -------------------------------------------------------------------------------- /code/models/networks/sync_batchnorm/unittest.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # File : unittest.py 3 | # Author : Jiayuan Mao 4 | # Email : maojiayuan@gmail.com 5 | # Date : 27/01/2018 6 | # 7 | # This file is part of Synchronized-BatchNorm-PyTorch. 8 | # https://github.com/vacancy/Synchronized-BatchNorm-PyTorch 9 | # Distributed under MIT License. 10 | 11 | import unittest 12 | import torch 13 | 14 | 15 | class TorchTestCase(unittest.TestCase): 16 | def assertTensorClose(self, x, y): 17 | adiff = float((x - y).abs().max()) 18 | if (y == 0).all(): 19 | rdiff = 'NaN' 20 | else: 21 | rdiff = float((adiff / y).abs().max()) 22 | 23 | message = ( 24 | 'Tensor close check failed\n' 25 | 'adiff={}\n' 26 | 'rdiff={}\n' 27 | ).format(adiff, rdiff) 28 | self.assertTrue(torch.allclose(x, y), message) 29 | 30 | -------------------------------------------------------------------------------- /code/util/CelebAMask-HQ/Data_preprocessing/g_mask.py: -------------------------------------------------------------------------------- 1 | import os 2 | import cv2 3 | import glob 4 | import numpy as np 5 | from utils import make_folder 6 | 7 | 8 | label_list = ['skin', 'nose', 'eye_g', 'l_eye', 'r_eye', 'l_brow', 'r_brow', 'l_ear', 'r_ear', 'mouth', 'u_lip', 9 | 'l_lip', 'hair', 'hat', 'ear_r', 'neck_l', 'neck', 'cloth'] 10 | 11 | folder_base = '/media/zhup/Data/CelebAMask-HQ/CelebAMaskHQ-mask-anno' 12 | folder_save = '/media/zhup/Data/CelebAMask-HQ/CelebAMaskHQ-mask' 13 | img_num = 30000 14 | 15 | make_folder(folder_save) 16 | 17 | for k in range(img_num): 18 | folder_num = int(k / 2000) 19 | im_base = np.zeros((512, 512)) 20 | for idx, label in enumerate(label_list): 21 | filename = os.path.join(folder_base, str(folder_num), str(k).rjust(5, '0') + '_' + label + '.png') 22 | if (os.path.exists(filename)): 23 | print(label, idx + 1) 24 | im = cv2.imread(filename) 25 | im = im[:, :, 0] 26 | im_base[im != 0] = (idx + 1) 27 | 28 | filename_save = os.path.join(folder_save, str(k) + '.png') 29 | print(filename_save) 30 | cv2.imwrite(filename_save, im_base) 31 | 32 | -------------------------------------------------------------------------------- /code/utils/metrics.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # @Time : 2019/12/14 下午4:41 4 | # @Author : chuyu zhang 5 | # @File : metrics.py 6 | # @Software: PyCharm 7 | 8 | 9 | import numpy as np 10 | from medpy import metric 11 | 12 | 13 | def cal_dice(prediction, label, num=2): 14 | total_dice = np.zeros(num-1) 15 | for i in range(1, num): 16 | prediction_tmp = (prediction == i) 17 | label_tmp = (label == i) 18 | prediction_tmp = prediction_tmp.astype(np.float) 19 | label_tmp = label_tmp.astype(np.float) 20 | 21 | dice = 2 * np.sum(prediction_tmp * label_tmp) / (np.sum(prediction_tmp) + np.sum(label_tmp)) 22 | total_dice[i - 1] += dice 23 | 24 | return total_dice 25 | 26 | 27 | def calculate_metric_percase(pred, gt): 28 | dc = metric.binary.dc(pred, gt) 29 | jc = metric.binary.jc(pred, gt) 30 | hd = metric.binary.hd95(pred, gt) 31 | asd = metric.binary.asd(pred, gt) 32 | 33 | return dc, jc, hd, asd 34 | 35 | 36 | def dice(input, target, ignore_index=None): 37 | smooth = 1. 38 | # using clone, so that it can do change to original target. 39 | iflat = input.clone().view(-1) 40 | tflat = target.clone().view(-1) 41 | if ignore_index is not None: 42 | mask = tflat == ignore_index 43 | tflat[mask] = 0 44 | iflat[mask] = 0 45 | intersection = (iflat * tflat).sum() 46 | 47 | return (2. * intersection + smooth) / (iflat.sum() + tflat.sum() + smooth) -------------------------------------------------------------------------------- /code/utils/ramps.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2018, Curious AI Ltd. All rights reserved. 2 | # 3 | # This work is licensed under the Creative Commons Attribution-NonCommercial 4 | # 4.0 International License. To view a copy of this license, visit 5 | # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to 6 | # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. 7 | 8 | """Functions for ramping hyperparameters up or down 9 | 10 | Each function takes the current training step or epoch, and the 11 | ramp length in the same format, and returns a multiplier between 12 | 0 and 1. 13 | """ 14 | 15 | 16 | import numpy as np 17 | 18 | 19 | def sigmoid_rampup(current, rampup_length): 20 | """Exponential rampup from https://arxiv.org/abs/1610.02242""" 21 | if rampup_length == 0: 22 | return 1.0 23 | else: 24 | current = np.clip(current, 0.0, rampup_length) 25 | phase = 1.0 - current / rampup_length 26 | return float(np.exp(-5.0 * phase * phase)) 27 | 28 | 29 | def linear_rampup(current, rampup_length): 30 | """Linear rampup""" 31 | assert current >= 0 and rampup_length >= 0 32 | if current >= rampup_length: 33 | return 1.0 34 | else: 35 | return current / rampup_length 36 | 37 | 38 | def cosine_rampdown(current, rampdown_length): 39 | """Cosine rampdown from https://arxiv.org/abs/1608.03983""" 40 | assert 0 <= current <= rampdown_length 41 | return float(.5 * (np.cos(np.pi * current / rampdown_length) + 1)) 42 | -------------------------------------------------------------------------------- /code/models/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2019 NVIDIA Corporation. All rights reserved. 3 | Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). 4 | """ 5 | 6 | import importlib 7 | import torch 8 | 9 | 10 | def find_model_using_name(model_name): 11 | # Given the option --model [modelname], 12 | # the file "models/modelname_model.py" 13 | # will be imported. 14 | model_filename = "models." + model_name + "_model" 15 | modellib = importlib.import_module(model_filename) 16 | 17 | # In the file, the class called ModelNameModel() will 18 | # be instantiated. It has to be a subclass of torch.nn.Module, 19 | # and it is case-insensitive. 20 | model = None 21 | target_model_name = model_name.replace('_', '') + 'model' 22 | for name, cls in modellib.__dict__.items(): 23 | if name.lower() == target_model_name.lower() \ 24 | and issubclass(cls, torch.nn.Module): 25 | model = cls 26 | 27 | if model is None: 28 | print("In %s.py, there should be a subclass of torch.nn.Module with class name that matches %s in lowercase." % (model_filename, target_model_name)) 29 | exit(0) 30 | 31 | return model 32 | 33 | 34 | def get_option_setter(model_name): 35 | model_class = find_model_using_name(model_name) 36 | return model_class.modify_commandline_options 37 | 38 | 39 | def create_model(opt): 40 | model = find_model_using_name(opt.model) 41 | instance = model(opt) 42 | print("model [%s] was created" % (type(instance).__name__)) 43 | 44 | return instance 45 | -------------------------------------------------------------------------------- /code/models/networks/encoder.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2019 NVIDIA Corporation. All rights reserved. 3 | Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). 4 | """ 5 | 6 | import torch.nn as nn 7 | import numpy as np 8 | import torch.nn.functional as F 9 | from models.networks.base_network import BaseNetwork 10 | from models.networks.normalization import get_nonspade_norm_layer 11 | 12 | 13 | class ConvEncoder(BaseNetwork): 14 | """ Same architecture as the image discriminator """ 15 | 16 | def __init__(self, opt): 17 | super().__init__() 18 | 19 | kw = 3 20 | pw = int(np.ceil((kw - 1.0) / 2)) 21 | ndf = opt.ngf 22 | norm_layer = get_nonspade_norm_layer(opt, opt.norm_E) 23 | self.layer1 = norm_layer(nn.Conv2d(3, ndf, kw, stride=2, padding=pw)) 24 | self.layer2 = norm_layer(nn.Conv2d(ndf * 1, ndf * 2, kw, stride=2, padding=pw)) 25 | self.layer3 = norm_layer(nn.Conv2d(ndf * 2, ndf * 4, kw, stride=2, padding=pw)) 26 | self.layer4 = norm_layer(nn.Conv2d(ndf * 4, ndf * 8, kw, stride=2, padding=pw)) 27 | self.layer5 = norm_layer(nn.Conv2d(ndf * 8, ndf * 8, kw, stride=2, padding=pw)) 28 | if opt.crop_size >= 256: 29 | self.layer6 = norm_layer(nn.Conv2d(ndf * 8, ndf * 8, kw, stride=2, padding=pw)) 30 | 31 | self.so = s0 = 4 32 | self.fc_mu = nn.Linear(ndf * 8 * s0 * s0, 256) 33 | self.fc_var = nn.Linear(ndf * 8 * s0 * s0, 256) 34 | 35 | self.actvn = nn.LeakyReLU(0.2, False) 36 | self.opt = opt 37 | 38 | def forward(self, x): 39 | if x.size(2) != 256 or x.size(3) != 256: 40 | x = F.interpolate(x, size=(256, 256), mode='bilinear') 41 | 42 | x = self.layer1(x) 43 | x = self.layer2(self.actvn(x)) 44 | x = self.layer3(self.actvn(x)) 45 | x = self.layer4(self.actvn(x)) 46 | x = self.layer5(self.actvn(x)) 47 | if self.opt.crop_size >= 256: 48 | x = self.layer6(self.actvn(x)) 49 | x = self.actvn(x) 50 | 51 | x = x.view(x.size(0), -1) 52 | mu = self.fc_mu(x) 53 | logvar = self.fc_var(x) 54 | 55 | return mu, logvar 56 | -------------------------------------------------------------------------------- /code/models/networks/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2019 NVIDIA Corporation. All rights reserved. 3 | Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). 4 | """ 5 | 6 | import torch 7 | from models.networks.base_network import BaseNetwork 8 | from models.networks.loss import * 9 | from models.networks.discriminator import * 10 | from models.networks.generator import * 11 | from models.networks.encoder import * 12 | import util.util as util 13 | 14 | 15 | def find_network_using_name(target_network_name, filename): 16 | target_class_name = target_network_name + filename 17 | module_name = 'models.networks.' + filename 18 | network = util.find_class_in_module(target_class_name, module_name) 19 | 20 | assert issubclass(network, BaseNetwork), \ 21 | "Class %s should be a subclass of BaseNetwork" % network 22 | 23 | return network 24 | 25 | 26 | def modify_commandline_options(parser, is_train): 27 | opt, _ = parser.parse_known_args() 28 | 29 | netG_cls = find_network_using_name(opt.netG, 'generator') 30 | parser = netG_cls.modify_commandline_options(parser, is_train) 31 | if is_train: 32 | netD_cls = find_network_using_name(opt.netD, 'discriminator') 33 | parser = netD_cls.modify_commandline_options(parser, is_train) 34 | netE_cls = find_network_using_name('conv', 'encoder') 35 | parser = netE_cls.modify_commandline_options(parser, is_train) 36 | 37 | return parser 38 | 39 | 40 | def create_network(cls, opt): 41 | net = cls(opt) 42 | net.print_network() 43 | if len(opt.gpu_ids) > 0: 44 | assert(torch.cuda.is_available()) 45 | net.cuda() 46 | net.init_weights(opt.init_type, opt.init_variance) 47 | return net 48 | 49 | 50 | def define_G(opt): 51 | netG_cls = find_network_using_name(opt.netG, 'generator') 52 | return create_network(netG_cls, opt) 53 | 54 | 55 | def define_D(opt): 56 | netD_cls = find_network_using_name(opt.netD, 'discriminator') 57 | return create_network(netD_cls, opt) 58 | 59 | 60 | def define_E(opt): 61 | # there exists only one encoder type 62 | netE_cls = find_network_using_name('conv', 'encoder') 63 | return create_network(netE_cls, opt) 64 | -------------------------------------------------------------------------------- /code/util/CelebAMask-HQ/Data_preprocessing/v_mask.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | from utils import make_folder 4 | import skimage.io 5 | 6 | 7 | def labelcolormap(N): 8 | if N == 19: # CelebAMask-HQ 9 | cmap = np.array([(0, 0, 0), (204, 0, 0), (76, 153, 0), 10 | (204, 204, 0), (51, 51, 255), (204, 0, 204), (0, 255, 255), 11 | (51, 255, 255), (102, 51, 0), (255, 0, 0), (102, 204, 0), 12 | (255, 255, 0), (0, 0, 153), (0, 0, 204), (255, 51, 153), 13 | (0, 204, 204), (0, 51, 0), (255, 153, 51), (0, 204, 0)], 14 | dtype=np.uint8) 15 | else: 16 | cmap = np.zeros((N, 3), dtype=np.uint8) 17 | for i in range(N): 18 | r, g, b = 0, 0, 0 19 | id = i 20 | for j in range(7): 21 | str_id = uint82bin(id) 22 | r = r ^ (np.uint8(str_id[-1]) << (7-j)) 23 | g = g ^ (np.uint8(str_id[-2]) << (7-j)) 24 | b = b ^ (np.uint8(str_id[-3]) << (7-j)) 25 | id = id >> 3 26 | cmap[i, 0] = r 27 | cmap[i, 1] = g 28 | cmap[i, 2] = b 29 | return cmap 30 | 31 | def uint82bin(n, count=8): 32 | """returns the binary of integer n, count refers to amount of bits""" 33 | return ''.join([str((n >> y) & 1) for y in range(count-1, -1, -1)]) 34 | 35 | 36 | 37 | def colorize(gray_image, cmap): 38 | size = gray_image.shape 39 | color_image = np.zeros((size[0], size[1], 3), np.uint8) 40 | 41 | for label in range(0, len(cmap)): 42 | mask = (label == gray_image[:, :]) 43 | color_image[:, :, 0][mask] = cmap[label][0] 44 | color_image[:, :, 1][mask] = cmap[label][1] 45 | color_image[:, :, 2][mask] = cmap[label][2] 46 | 47 | return color_image 48 | 49 | 50 | 51 | folder_base = '/media/zhup/Data/CelebAMask-HQ/CelebAMaskHQ-mask' 52 | folder_save = '/media/zhup/Data/CelebAMask-HQ/CelebAMaskHQ-mask-vis' 53 | img_num = 30000 54 | 55 | make_folder(folder_save) 56 | my_cmp = labelcolormap(19) 57 | 58 | for k in range(img_num): 59 | 60 | filename = os.path.join(folder_base, str(k) + '.png') 61 | if (os.path.exists(filename)): 62 | print(k + 1) 63 | im = skimage.io.imread(filename) 64 | im_vis = colorize(im,my_cmp) 65 | 66 | filename_save = os.path.join(folder_save, str(k) + '.png') 67 | print(filename_save) 68 | skimage.io.imsave(filename_save,im_vis) -------------------------------------------------------------------------------- /code/models/networks/base_network.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2019 NVIDIA Corporation. All rights reserved. 3 | Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). 4 | """ 5 | 6 | import torch.nn as nn 7 | from torch.nn import init 8 | 9 | 10 | class BaseNetwork(nn.Module): 11 | def __init__(self): 12 | super(BaseNetwork, self).__init__() 13 | 14 | @staticmethod 15 | def modify_commandline_options(parser, is_train): 16 | return parser 17 | 18 | def print_network(self): 19 | if isinstance(self, list): 20 | self = self[0] 21 | num_params = 0 22 | for param in self.parameters(): 23 | num_params += param.numel() 24 | print('Network [%s] was created. Total number of parameters: %.1f million. ' 25 | 'To see the architecture, do print(network).' 26 | % (type(self).__name__, num_params / 1000000)) 27 | 28 | def init_weights(self, init_type='normal', gain=0.02): 29 | def init_func(m): 30 | classname = m.__class__.__name__ 31 | if classname.find('BatchNorm2d') != -1: 32 | if hasattr(m, 'weight') and m.weight is not None: 33 | init.normal_(m.weight.data, 1.0, gain) 34 | if hasattr(m, 'bias') and m.bias is not None: 35 | init.constant_(m.bias.data, 0.0) 36 | elif hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1): 37 | if init_type == 'normal': 38 | init.normal_(m.weight.data, 0.0, gain) 39 | elif init_type == 'xavier': 40 | init.xavier_normal_(m.weight.data, gain=gain) 41 | elif init_type == 'xavier_uniform': 42 | init.xavier_uniform_(m.weight.data, gain=1.0) 43 | elif init_type == 'kaiming': 44 | init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') 45 | elif init_type == 'orthogonal': 46 | init.orthogonal_(m.weight.data, gain=gain) 47 | elif init_type == 'none': # uses pytorch's default init method 48 | m.reset_parameters() 49 | else: 50 | raise NotImplementedError('initialization method [%s] is not implemented' % init_type) 51 | if hasattr(m, 'bias') and m.bias is not None: 52 | init.constant_(m.bias.data, 0.0) 53 | 54 | self.apply(init_func) 55 | 56 | # propagate to children 57 | for m in self.children(): 58 | if hasattr(m, 'init_weights'): 59 | m.init_weights(init_type, gain) 60 | -------------------------------------------------------------------------------- /code/models/networks/sync_batchnorm/batchnorm_reimpl.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | # File : batchnorm_reimpl.py 4 | # Author : acgtyrant 5 | # Date : 11/01/2018 6 | # 7 | # This file is part of Synchronized-BatchNorm-PyTorch. 8 | # https://github.com/vacancy/Synchronized-BatchNorm-PyTorch 9 | # Distributed under MIT License. 10 | 11 | import torch 12 | import torch.nn as nn 13 | import torch.nn.init as init 14 | 15 | __all__ = ['BatchNormReimpl'] 16 | 17 | 18 | class BatchNorm2dReimpl(nn.Module): 19 | """ 20 | A re-implementation of batch normalization, used for testing the numerical 21 | stability. 22 | 23 | Author: acgtyrant 24 | See also: 25 | https://github.com/vacancy/Synchronized-BatchNorm-PyTorch/issues/14 26 | """ 27 | def __init__(self, num_features, eps=1e-5, momentum=0.1): 28 | super().__init__() 29 | 30 | self.num_features = num_features 31 | self.eps = eps 32 | self.momentum = momentum 33 | self.weight = nn.Parameter(torch.empty(num_features)) 34 | self.bias = nn.Parameter(torch.empty(num_features)) 35 | self.register_buffer('running_mean', torch.zeros(num_features)) 36 | self.register_buffer('running_var', torch.ones(num_features)) 37 | self.reset_parameters() 38 | 39 | def reset_running_stats(self): 40 | self.running_mean.zero_() 41 | self.running_var.fill_(1) 42 | 43 | def reset_parameters(self): 44 | self.reset_running_stats() 45 | init.uniform_(self.weight) 46 | init.zeros_(self.bias) 47 | 48 | def forward(self, input_): 49 | batchsize, channels, height, width = input_.size() 50 | numel = batchsize * height * width 51 | input_ = input_.permute(1, 0, 2, 3).contiguous().view(channels, numel) 52 | sum_ = input_.sum(1) 53 | sum_of_square = input_.pow(2).sum(1) 54 | mean = sum_ / numel 55 | sumvar = sum_of_square - sum_ * mean 56 | 57 | self.running_mean = ( 58 | (1 - self.momentum) * self.running_mean 59 | + self.momentum * mean.detach() 60 | ) 61 | unbias_var = sumvar / (numel - 1) 62 | self.running_var = ( 63 | (1 - self.momentum) * self.running_var 64 | + self.momentum * unbias_var.detach() 65 | ) 66 | 67 | bias_var = sumvar / numel 68 | inv_std = 1 / (bias_var + self.eps).pow(0.5) 69 | output = ( 70 | (input_ - mean.unsqueeze(1)) * inv_std.unsqueeze(1) * 71 | self.weight.unsqueeze(1) + self.bias.unsqueeze(1)) 72 | 73 | return output.view(channels, batchsize, height, width).permute(1, 0, 2, 3).contiguous() 74 | 75 | -------------------------------------------------------------------------------- /code/util/html.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2019 NVIDIA Corporation. All rights reserved. 3 | Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). 4 | """ 5 | 6 | import datetime 7 | import dominate 8 | from dominate.tags import * 9 | import os 10 | 11 | 12 | class HTML: 13 | def __init__(self, web_dir, title, refresh=0): 14 | if web_dir.endswith('.html'): 15 | web_dir, html_name = os.path.split(web_dir) 16 | else: 17 | web_dir, html_name = web_dir, 'index.html' 18 | self.title = title 19 | self.web_dir = web_dir 20 | self.html_name = html_name 21 | self.img_dir = os.path.join(self.web_dir, 'images') 22 | if len(self.web_dir) > 0 and not os.path.exists(self.web_dir): 23 | os.makedirs(self.web_dir) 24 | if len(self.web_dir) > 0 and not os.path.exists(self.img_dir): 25 | os.makedirs(self.img_dir) 26 | 27 | self.doc = dominate.document(title=title) 28 | with self.doc: 29 | h1(datetime.datetime.now().strftime("%I:%M%p on %B %d, %Y")) 30 | if refresh > 0: 31 | with self.doc.head: 32 | meta(http_equiv="refresh", content=str(refresh)) 33 | 34 | def get_image_dir(self): 35 | return self.img_dir 36 | 37 | def add_header(self, str): 38 | with self.doc: 39 | h3(str) 40 | 41 | def add_table(self, border=1): 42 | self.t = table(border=border, style="table-layout: fixed;") 43 | self.doc.add(self.t) 44 | 45 | def add_images(self, ims, txts, links, width=512): 46 | self.add_table() 47 | with self.t: 48 | with tr(): 49 | for im, txt, link in zip(ims, txts, links): 50 | with td(style="word-wrap: break-word;", halign="center", valign="top"): 51 | with p(): 52 | with a(href=os.path.join('images', link)): 53 | img(style="width:%dpx" % (width), src=os.path.join('images', im)) 54 | br() 55 | p(txt.encode('utf-8')) 56 | 57 | def save(self): 58 | html_file = os.path.join(self.web_dir, self.html_name) 59 | f = open(html_file, 'wt') 60 | f.write(self.doc.render()) 61 | f.close() 62 | 63 | 64 | if __name__ == '__main__': 65 | html = HTML('web/', 'test_html') 66 | html.add_header('hello world') 67 | 68 | ims = [] 69 | txts = [] 70 | links = [] 71 | for n in range(4): 72 | ims.append('image_%d.jpg' % n) 73 | txts.append('text_%d' % n) 74 | links.append('image_%d.jpg' % n) 75 | html.add_images(ims, txts, links) 76 | html.save() 77 | -------------------------------------------------------------------------------- /code/test.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import logging 3 | import os 4 | import random 5 | import shutil 6 | import sys 7 | import time 8 | 9 | import numpy as np 10 | import torch 11 | import torch.backends.cudnn as cudnn 12 | import torch.nn as nn 13 | import torch.nn.functional as F 14 | import torch.optim as optim 15 | from tensorboardX import SummaryWriter 16 | from torch.nn import BCEWithLogitsLoss 17 | from torch.nn.modules.loss import CrossEntropyLoss 18 | from torch.utils.data import DataLoader 19 | from torchvision import transforms 20 | from torchvision.utils import make_grid 21 | from tqdm import tqdm 22 | from itertools import cycle 23 | import numpy as np 24 | import cv2 25 | 26 | from dataloaders import utils 27 | from dataloaders.dataset_covid import (CovidDataSets, RandomGenerator) 28 | from networks.net_factory import net_factory 29 | from utils import losses, metrics, ramps 30 | from test_covid import get_model_metric 31 | 32 | 33 | parser = argparse.ArgumentParser() 34 | parser.add_argument('--root_path', type=str, default='/home/code/SSL/', help='Name of Experiment') 35 | parser.add_argument('--exp', type=str, default='Test', help='experiment_name') 36 | parser.add_argument('--model', type=str, default='unet2', help='model_name') 37 | parser.add_argument('--deterministic', type=int, default=1, help='whether use deterministic training') 38 | parser.add_argument('--patch_size', type=list, default=[512, 512], help='patch size of network input') 39 | parser.add_argument('--num_classes', type=int, default=2, help='output channel of network') 40 | # label and unlabel 41 | parser.add_argument('--labeled_per', type=float, default=0.1, help='percent of labeled data') 42 | 43 | if False: 44 | parser.add_argument('--dataset_name', type=str, default='COVID249', help='Name of dataset') 45 | parser.add_argument('--model_path', type=str, default='/home/code/SSL/exp/COVID249/model.pth', help='path of teacher model') 46 | else: 47 | parser.add_argument('--dataset_name', type=str, default='MOS1000', help='Name of dataset') 48 | parser.add_argument('--model_path', type=str, default='/home/code/SSL/exp/MOS1000/model.pth', help='path of teacher model') 49 | args = parser.parse_args() 50 | 51 | 52 | 53 | def test(args, snapshot_path): 54 | model = net_factory(net_type=args.model) 55 | model.load_state_dict(torch.load(args.model_path)) 56 | model.eval() 57 | 58 | nsd, dice = get_model_metric(args = args, model = model, snapshot_path=snapshot_path, model_name='model', mode='test') 59 | print('nsd : %f dice : %f ' % (nsd, dice)) 60 | 61 | 62 | 63 | if __name__ == "__main__": 64 | snapshot_path = "{}exp/{}/test_{}_{}_{}".format(args.root_path, args.dataset_name, args.exp, args.labeled_per, args.model) 65 | if not os.path.exists(snapshot_path): 66 | os.makedirs(snapshot_path) 67 | test(args, snapshot_path) -------------------------------------------------------------------------------- /code/util/iter_counter.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2019 NVIDIA Corporation. All rights reserved. 3 | Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). 4 | """ 5 | 6 | import os 7 | import time 8 | import numpy as np 9 | 10 | 11 | # Helper class that keeps track of training iterations 12 | class IterationCounter(): 13 | def __init__(self, opt, dataset_size): 14 | self.opt = opt 15 | self.dataset_size = dataset_size 16 | 17 | self.first_epoch = 1 18 | self.total_epochs = opt.niter + opt.niter_decay + 1000 19 | self.epoch_iter = 0 # iter number within each epoch 20 | self.iter_record_path = os.path.join(self.opt.checkpoints_dir, self.opt.name, 'iter.txt') 21 | if opt.isTrain and opt.continue_train: 22 | try: 23 | self.first_epoch, self.epoch_iter = np.loadtxt( 24 | self.iter_record_path, delimiter=',', dtype=int) 25 | print('Resuming from epoch %d at iteration %d' % (self.first_epoch, self.epoch_iter)) 26 | except: 27 | print('Could not load iteration record at %s. Starting from beginning.' % 28 | self.iter_record_path) 29 | 30 | self.total_steps_so_far = (self.first_epoch - 1) * dataset_size + self.epoch_iter 31 | 32 | # return the iterator of epochs for the training 33 | def training_epochs(self): 34 | return range(self.first_epoch, self.total_epochs + 1) 35 | 36 | def record_epoch_start(self, epoch): 37 | self.epoch_start_time = time.time() 38 | self.epoch_iter = 0 39 | self.last_iter_time = time.time() 40 | self.current_epoch = epoch 41 | 42 | def record_one_iteration(self): 43 | current_time = time.time() 44 | 45 | # the last remaining batch is dropped (see data/__init__.py), 46 | # so we can assume batch size is always opt.batchSize 47 | self.time_per_iter = (current_time - self.last_iter_time) / self.opt.batchSize 48 | self.last_iter_time = current_time 49 | self.total_steps_so_far += self.opt.batchSize 50 | self.epoch_iter += self.opt.batchSize 51 | 52 | def record_epoch_end(self): 53 | current_time = time.time() 54 | self.time_per_epoch = current_time - self.epoch_start_time 55 | print('End of epoch %d / %d \t Time Taken: %d sec' % 56 | (self.current_epoch, self.total_epochs, self.time_per_epoch)) 57 | if self.current_epoch % self.opt.save_epoch_freq == 0: 58 | np.savetxt(self.iter_record_path, (self.current_epoch + 1, 0), 59 | delimiter=',', fmt='%d') 60 | print('Saved current iteration count at %s.' % self.iter_record_path) 61 | 62 | def record_current_iter(self): 63 | np.savetxt(self.iter_record_path, (self.current_epoch, self.epoch_iter), 64 | delimiter=',', fmt='%d') 65 | print('Saved current iteration count at %s.' % self.iter_record_path) 66 | 67 | def needs_saving(self): 68 | return (self.total_steps_so_far % self.opt.save_latest_freq) < self.opt.batchSize 69 | 70 | def needs_printing(self): 71 | return (self.total_steps_so_far % self.opt.print_freq) < self.opt.batchSize 72 | 73 | def needs_displaying(self): 74 | return (self.total_steps_so_far % self.opt.display_freq) < self.opt.batchSize 75 | -------------------------------------------------------------------------------- /code/models/networks/sync_batchnorm/replicate.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # File : replicate.py 3 | # Author : Jiayuan Mao 4 | # Email : maojiayuan@gmail.com 5 | # Date : 27/01/2018 6 | # 7 | # This file is part of Synchronized-BatchNorm-PyTorch. 8 | # https://github.com/vacancy/Synchronized-BatchNorm-PyTorch 9 | # Distributed under MIT License. 10 | 11 | import functools 12 | 13 | from torch.nn.parallel.data_parallel import DataParallel 14 | 15 | __all__ = [ 16 | 'CallbackContext', 17 | 'execute_replication_callbacks', 18 | 'DataParallelWithCallback', 19 | 'patch_replication_callback' 20 | ] 21 | 22 | 23 | class CallbackContext(object): 24 | pass 25 | 26 | 27 | def execute_replication_callbacks(modules): 28 | """ 29 | Execute an replication callback `__data_parallel_replicate__` on each module created by original replication. 30 | 31 | The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)` 32 | 33 | Note that, as all modules are isomorphism, we assign each sub-module with a context 34 | (shared among multiple copies of this module on different devices). 35 | Through this context, different copies can share some information. 36 | 37 | We guarantee that the callback on the master copy (the first copy) will be called ahead of calling the callback 38 | of any slave copies. 39 | """ 40 | master_copy = modules[0] 41 | nr_modules = len(list(master_copy.modules())) 42 | ctxs = [CallbackContext() for _ in range(nr_modules)] 43 | 44 | for i, module in enumerate(modules): 45 | for j, m in enumerate(module.modules()): 46 | if hasattr(m, '__data_parallel_replicate__'): 47 | m.__data_parallel_replicate__(ctxs[j], i) 48 | 49 | 50 | class DataParallelWithCallback(DataParallel): 51 | """ 52 | Data Parallel with a replication callback. 53 | 54 | An replication callback `__data_parallel_replicate__` of each module will be invoked after being created by 55 | original `replicate` function. 56 | The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)` 57 | 58 | Examples: 59 | > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False) 60 | > sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1]) 61 | # sync_bn.__data_parallel_replicate__ will be invoked. 62 | """ 63 | 64 | def replicate(self, module, device_ids): 65 | modules = super(DataParallelWithCallback, self).replicate(module, device_ids) 66 | execute_replication_callbacks(modules) 67 | return modules 68 | 69 | 70 | def patch_replication_callback(data_parallel): 71 | """ 72 | Monkey-patch an existing `DataParallel` object. Add the replication callback. 73 | Useful when you have customized `DataParallel` implementation. 74 | 75 | Examples: 76 | > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False) 77 | > sync_bn = DataParallel(sync_bn, device_ids=[0, 1]) 78 | > patch_replication_callback(sync_bn) 79 | # this is equivalent to 80 | > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False) 81 | > sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1]) 82 | """ 83 | 84 | assert isinstance(data_parallel, DataParallel) 85 | 86 | old_replicate = data_parallel.replicate 87 | 88 | @functools.wraps(old_replicate) 89 | def new_replicate(module, device_ids): 90 | modules = old_replicate(module, device_ids) 91 | execute_replication_callbacks(modules) 92 | return modules 93 | 94 | data_parallel.replicate = new_replicate 95 | -------------------------------------------------------------------------------- /code/networks/unet2d.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | An implementation of the U-Net paper: 4 | Olaf Ronneberger, Philipp Fischer, Thomas Brox: 5 | U-Net: Convolutional Networks for Biomedical Image Segmentation. 6 | MICCAI (3) 2015: 234-241 7 | Note that there are some modifications from the original paper, such as 8 | the use of batch normalization, dropout, and leaky relu here. 9 | """ 10 | 11 | 12 | import torch 13 | import torch.nn as nn 14 | from torchvision import models 15 | import torch.nn.functional as F 16 | 17 | 18 | 19 | class double_conv(nn.Module): 20 | def __init__(self, in_ch, out_ch): 21 | super(double_conv, self).__init__() 22 | self.conv = nn.Sequential( 23 | nn.Conv2d(in_ch, out_ch, kernel_size=3, padding=1), 24 | #nn.BatchNorm2d(out_ch), 25 | nn.InstanceNorm2d(out_ch), 26 | nn.ReLU(inplace=True), 27 | nn.Conv2d(out_ch, out_ch, kernel_size=3, padding=1), 28 | #nn.BatchNorm2d(out_ch), 29 | nn.InstanceNorm2d(out_ch), 30 | nn.ReLU(inplace=True) 31 | ) 32 | 33 | def forward(self, x): 34 | x = self.conv(x) 35 | return x 36 | 37 | 38 | class inconv(nn.Module): 39 | def __init__(self, in_ch, out_ch): 40 | super(inconv, self).__init__() 41 | self.conv = double_conv(in_ch, out_ch) 42 | 43 | def forward(self, x): 44 | x = self.conv(x) 45 | return x 46 | 47 | 48 | class down(nn.Module): 49 | def __init__(self, in_ch, out_ch): 50 | super(down, self).__init__() 51 | self.max_pool_conv = nn.Sequential( 52 | nn.MaxPool2d(2), 53 | double_conv(in_ch, out_ch) 54 | ) 55 | 56 | def forward(self, x): 57 | x = self.max_pool_conv(x) 58 | return x 59 | 60 | 61 | class up(nn.Module): 62 | def __init__(self, in_ch, out_ch, bilinear=True): 63 | super(up, self).__init__() 64 | if bilinear: 65 | #self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) 66 | self.up = nn.Upsample(scale_factor=2, mode='nearest') 67 | else: 68 | self.up = nn.ConvTranspose2d(in_ch // 2, in_ch // 2, 2, stride=2) 69 | 70 | self.conv = double_conv(in_ch, out_ch) 71 | 72 | def forward(self, x1, x2): 73 | x1 = self.up(x1) 74 | diffX = x1.size()[2] - x2.size()[2] 75 | diffY = x1.size()[3] - x2.size()[3] 76 | x2 = F.pad(x2, (diffX // 2, int(diffX / 2), diffY // 2, int(diffY / 2))) 77 | x = torch.cat([x2, x1], dim=1) 78 | x = self.conv(x) 79 | return x 80 | 81 | 82 | class outconv(nn.Module): 83 | def __init__(self, in_ch, out_ch): 84 | super(outconv, self).__init__() 85 | self.conv = nn.Conv2d(in_ch, out_ch, kernel_size=1) 86 | 87 | def forward(self, x): 88 | x = self.conv(x) 89 | return x 90 | 91 | 92 | class UNet2D(nn.Module): 93 | def __init__(self, n_channels=3, n_classes=2): 94 | super(UNet2D, self).__init__() 95 | self.inc = inconv(n_channels, 32) 96 | self.down1 = down(32, 64) 97 | self.down2 = down(64, 128) 98 | self.down3 = down(128, 256) 99 | self.down4 = down(256, 256) 100 | self.up1 = up(512, 128) 101 | self.up2 = up(256, 64) 102 | self.up3 = up(128, 32) 103 | self.up4 = up(64, 32) 104 | self.outc = outconv(32, n_classes) 105 | self.relu = nn.ReLU() 106 | 107 | def forward(self, x): 108 | x1 = self.inc(x) 109 | x2 = self.down1(x1) 110 | x3 = self.down2(x2) 111 | x4 = self.down3(x3) 112 | x5 = self.down4(x4) 113 | x = self.up1(x5, x4) 114 | x = self.up2(x, x3) 115 | x = self.up3(x, x2) 116 | x = self.up4(x, x1) 117 | x = self.outc(x) 118 | #x = self.relu(x) 119 | return x 120 | -------------------------------------------------------------------------------- /code/dataloaders/dataset_covid.py: -------------------------------------------------------------------------------- 1 | import os 2 | import os.path 3 | import cv2 4 | import torch 5 | import random 6 | import numpy as np 7 | from glob import glob 8 | from torch.utils.data import Dataset 9 | from scipy.ndimage.interpolation import zoom 10 | import itertools 11 | from scipy import ndimage 12 | from torch.utils.data.sampler import Sampler 13 | import pandas as pd 14 | from PIL import Image 15 | 16 | 17 | class CovidDataSets(Dataset): 18 | def __init__(self, root_path=None, dataset_name='COVID249', file_name = 'val_slice.xlsx', aug=False): 19 | self.root_path = root_path 20 | self.file_name = file_name 21 | self.dataset_name = dataset_name 22 | self.file_path = root_path + "data/{}/{}".format(dataset_name, file_name) 23 | self.aug = aug 24 | 25 | 26 | excelData = pd.read_excel(self.file_path) 27 | length = excelData.shape[0] 28 | self.paths = [] 29 | for i in range(length): 30 | file_name_i = excelData.iloc[i][0] 31 | self.paths.append(file_name_i) 32 | 33 | def __len__(self): 34 | return len(self.paths) 35 | 36 | 37 | def __getitem__(self, idx): 38 | case = self.paths[idx] 39 | 40 | case_img_path = self.root_path + "data/{}/PNG/images/{}".format(self.dataset_name, case) 41 | case_label_path = self.root_path + "data/{}/PNG/labels/{}".format(self.dataset_name, case) 42 | case_lung_path = self.root_path + "data/{}/PNG/lung/{}".format(self.dataset_name, case) 43 | 44 | image = Image.open(case_img_path) 45 | 46 | if os.path.exists(case_label_path): 47 | label = Image.open(case_label_path) 48 | else: 49 | label = Image.open(case_lung_path) 50 | 51 | lung = Image.open(case_lung_path) 52 | 53 | if self.aug: 54 | if random.random() > 0.5: 55 | image, label, lung = random_rot_flip(image, label, lung) 56 | elif random.random() > 0.5: 57 | image, label, lung = random_rotate(image, label, lung) 58 | 59 | image = (torch.from_numpy(np.asarray(image).astype(np.float32)).permute(2, 0, 1).contiguous())/255.0 60 | label = torch.from_numpy(np.asarray(label).astype(np.uint8)) 61 | lung = torch.from_numpy(np.asarray(lung).astype(np.uint8)) 62 | 63 | return image, label, case, lung 64 | 65 | 66 | def random_rot_flip(image, label, lung): 67 | k = np.random.randint(0, 4) 68 | image = np.rot90(image, k) 69 | label = np.rot90(label, k) 70 | lung = np.rot90(lung, k) 71 | axis = np.random.randint(0, 2) 72 | image = np.flip(image, axis=axis).copy() 73 | label = np.flip(label, axis=axis).copy() 74 | lung = np.flip(lung, axis=axis).copy() 75 | return image, label, lung 76 | 77 | 78 | def random_rotate(image, label, lung): 79 | angle = np.random.randint(-20, 20) 80 | image = ndimage.rotate(image, angle, order=0, reshape=False) 81 | label = ndimage.rotate(label, angle, order=0, reshape=False) 82 | lung = ndimage.rotate(lung, angle, order=0, reshape=False) 83 | return image, label, lung 84 | 85 | 86 | def rotate_90(image, label, lung): 87 | angle = 90 88 | image = ndimage.rotate(image, angle, order=0, reshape=False) 89 | label = ndimage.rotate(label, angle, order=0, reshape=False) 90 | lung = ndimage.rotate(lung, angle, order=0, reshape=False) 91 | return image, label, lung 92 | 93 | def rotate_n90(image, label, lung): 94 | angle = -90 95 | image = ndimage.rotate(image, angle, order=0, reshape=False) 96 | label = ndimage.rotate(label, angle, order=0, reshape=False) 97 | lung = ndimage.rotate(lung, angle, order=0, reshape=False) 98 | return image, label, lung 99 | 100 | 101 | 102 | class RandomGenerator(object): 103 | def __init__(self, output_size): 104 | self.output_size = output_size 105 | 106 | def __call__(self, image, label): 107 | if random.random() > 0.5: 108 | image, label = random_rot_flip(image, label) 109 | elif random.random() > 0.5: 110 | image, label = random_rotate(image, label) 111 | x, y = image.shape 112 | image = torch.from_numpy(image.astype(np.float32)).unsqueeze(0) 113 | label = torch.from_numpy(label.astype(np.uint8)) 114 | return image, label 115 | 116 | 117 | -------------------------------------------------------------------------------- /code/models/networks/discriminator.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2019 NVIDIA Corporation. All rights reserved. 3 | Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). 4 | """ 5 | 6 | import torch.nn as nn 7 | import numpy as np 8 | import torch.nn.functional as F 9 | from models.networks.base_network import BaseNetwork 10 | from models.networks.normalization import get_nonspade_norm_layer 11 | import util.util as util 12 | 13 | 14 | class MultiscaleDiscriminator(BaseNetwork): 15 | @staticmethod 16 | def modify_commandline_options(parser, is_train): 17 | parser.add_argument('--netD_subarch', type=str, default='n_layer', 18 | help='architecture of each discriminator') 19 | parser.add_argument('--num_D', type=int, default=2, 20 | help='number of discriminators to be used in multiscale') 21 | opt, _ = parser.parse_known_args() 22 | 23 | # define properties of each discriminator of the multiscale discriminator 24 | subnetD = util.find_class_in_module(opt.netD_subarch + 'discriminator', 25 | 'models.networks.discriminator') 26 | subnetD.modify_commandline_options(parser, is_train) 27 | 28 | return parser 29 | 30 | def __init__(self, opt): 31 | super().__init__() 32 | self.opt = opt 33 | 34 | for i in range(opt.num_D): 35 | subnetD = self.create_single_discriminator(opt) 36 | self.add_module('discriminator_%d' % i, subnetD) 37 | 38 | def create_single_discriminator(self, opt): 39 | subarch = opt.netD_subarch 40 | if subarch == 'n_layer': 41 | netD = NLayerDiscriminator(opt) 42 | else: 43 | raise ValueError('unrecognized discriminator subarchitecture %s' % subarch) 44 | return netD 45 | 46 | def downsample(self, input): 47 | return F.avg_pool2d(input, kernel_size=3, 48 | stride=2, padding=[1, 1], 49 | count_include_pad=False) 50 | 51 | # Returns list of lists of discriminator outputs. 52 | # The final result is of size opt.num_D x opt.n_layers_D 53 | def forward(self, input): 54 | result = [] 55 | get_intermediate_features = not self.opt.no_ganFeat_loss 56 | for name, D in self.named_children(): 57 | out = D(input) 58 | if not get_intermediate_features: 59 | out = [out] 60 | result.append(out) 61 | input = self.downsample(input) 62 | 63 | return result 64 | 65 | 66 | # Defines the PatchGAN discriminator with the specified arguments. 67 | class NLayerDiscriminator(BaseNetwork): 68 | @staticmethod 69 | def modify_commandline_options(parser, is_train): 70 | parser.add_argument('--n_layers_D', type=int, default=3, 71 | help='# layers in each discriminator') 72 | return parser 73 | 74 | def __init__(self, opt): 75 | super().__init__() 76 | self.opt = opt 77 | 78 | kw = 4 79 | padw = int(np.ceil((kw - 1.0) / 2)) 80 | nf = opt.ndf 81 | input_nc = self.compute_D_input_nc(opt) 82 | 83 | norm_layer = get_nonspade_norm_layer(opt, opt.norm_D) 84 | sequence = [[nn.Conv2d(input_nc, nf, kernel_size=kw, stride=2, padding=padw), 85 | nn.LeakyReLU(0.2, False)]] 86 | 87 | for n in range(1, opt.n_layers_D): 88 | nf_prev = nf 89 | nf = min(nf * 2, 512) 90 | sequence += [[norm_layer(nn.Conv2d(nf_prev, nf, kernel_size=kw, 91 | stride=2, padding=padw)), 92 | nn.LeakyReLU(0.2, False) 93 | ]] 94 | 95 | sequence += [[nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw)]] 96 | 97 | # We divide the layers into groups to extract intermediate layer outputs 98 | for n in range(len(sequence)): 99 | self.add_module('model' + str(n), nn.Sequential(*sequence[n])) 100 | 101 | def compute_D_input_nc(self, opt): 102 | input_nc = opt.label_nc + opt.output_nc 103 | if opt.contain_dontcare_label: 104 | input_nc += 1 105 | if not opt.no_instance: 106 | input_nc += 1 107 | return input_nc 108 | 109 | def forward(self, input): 110 | results = [input] 111 | for submodel in self.children(): 112 | intermediate_output = submodel(results[-1]) 113 | results.append(intermediate_output) 114 | 115 | get_intermediate_features = not self.opt.no_ganFeat_loss 116 | if get_intermediate_features: 117 | return results[1:] 118 | else: 119 | return results[-1] 120 | -------------------------------------------------------------------------------- /code/models/networks/sync_batchnorm/comm.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # File : comm.py 3 | # Author : Jiayuan Mao 4 | # Email : maojiayuan@gmail.com 5 | # Date : 27/01/2018 6 | # 7 | # This file is part of Synchronized-BatchNorm-PyTorch. 8 | # https://github.com/vacancy/Synchronized-BatchNorm-PyTorch 9 | # Distributed under MIT License. 10 | 11 | import queue 12 | import collections 13 | import threading 14 | 15 | __all__ = ['FutureResult', 'SlavePipe', 'SyncMaster'] 16 | 17 | 18 | class FutureResult(object): 19 | """A thread-safe future implementation. Used only as one-to-one pipe.""" 20 | 21 | def __init__(self): 22 | self._result = None 23 | self._lock = threading.Lock() 24 | self._cond = threading.Condition(self._lock) 25 | 26 | def put(self, result): 27 | with self._lock: 28 | assert self._result is None, 'Previous result has\'t been fetched.' 29 | self._result = result 30 | self._cond.notify() 31 | 32 | def get(self): 33 | with self._lock: 34 | if self._result is None: 35 | self._cond.wait() 36 | 37 | res = self._result 38 | self._result = None 39 | return res 40 | 41 | 42 | _MasterRegistry = collections.namedtuple('MasterRegistry', ['result']) 43 | _SlavePipeBase = collections.namedtuple('_SlavePipeBase', ['identifier', 'queue', 'result']) 44 | 45 | 46 | class SlavePipe(_SlavePipeBase): 47 | """Pipe for master-slave communication.""" 48 | 49 | def run_slave(self, msg): 50 | self.queue.put((self.identifier, msg)) 51 | ret = self.result.get() 52 | self.queue.put(True) 53 | return ret 54 | 55 | 56 | class SyncMaster(object): 57 | """An abstract `SyncMaster` object. 58 | 59 | - During the replication, as the data parallel will trigger an callback of each module, all slave devices should 60 | call `register(id)` and obtain an `SlavePipe` to communicate with the master. 61 | - During the forward pass, master device invokes `run_master`, all messages from slave devices will be collected, 62 | and passed to a registered callback. 63 | - After receiving the messages, the master device should gather the information and determine to message passed 64 | back to each slave devices. 65 | """ 66 | 67 | def __init__(self, master_callback): 68 | """ 69 | 70 | Args: 71 | master_callback: a callback to be invoked after having collected messages from slave devices. 72 | """ 73 | self._master_callback = master_callback 74 | self._queue = queue.Queue() 75 | self._registry = collections.OrderedDict() 76 | self._activated = False 77 | 78 | def __getstate__(self): 79 | return {'master_callback': self._master_callback} 80 | 81 | def __setstate__(self, state): 82 | self.__init__(state['master_callback']) 83 | 84 | def register_slave(self, identifier): 85 | """ 86 | Register an slave device. 87 | 88 | Args: 89 | identifier: an identifier, usually is the device id. 90 | 91 | Returns: a `SlavePipe` object which can be used to communicate with the master device. 92 | 93 | """ 94 | if self._activated: 95 | assert self._queue.empty(), 'Queue is not clean before next initialization.' 96 | self._activated = False 97 | self._registry.clear() 98 | future = FutureResult() 99 | self._registry[identifier] = _MasterRegistry(future) 100 | return SlavePipe(identifier, self._queue, future) 101 | 102 | def run_master(self, master_msg): 103 | """ 104 | Main entry for the master device in each forward pass. 105 | The messages were first collected from each devices (including the master device), and then 106 | an callback will be invoked to compute the message to be sent back to each devices 107 | (including the master device). 108 | 109 | Args: 110 | master_msg: the message that the master want to send to itself. This will be placed as the first 111 | message when calling `master_callback`. For detailed usage, see `_SynchronizedBatchNorm` for an example. 112 | 113 | Returns: the message to be sent back to the master device. 114 | 115 | """ 116 | self._activated = True 117 | 118 | intermediates = [(0, master_msg)] 119 | for i in range(self.nr_slaves): 120 | intermediates.append(self._queue.get()) 121 | 122 | results = self._master_callback(intermediates) 123 | assert results[0][0] == 0, 'The first result should belongs to the master.' 124 | 125 | for i, res in results: 126 | if i == 0: 127 | continue 128 | self._registry[i].result.put(res) 129 | 130 | for i in range(self.nr_slaves): 131 | assert self._queue.get() is True 132 | 133 | return results[0][1] 134 | 135 | @property 136 | def nr_slaves(self): 137 | return len(self._registry) 138 | -------------------------------------------------------------------------------- /code/models/networks/generator.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2019 NVIDIA Corporation. All rights reserved. 3 | Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). 4 | """ 5 | 6 | import torch 7 | import torch.nn as nn 8 | import torch.nn.functional as F 9 | from models.networks.base_network import BaseNetwork 10 | from models.networks.normalization import get_nonspade_norm_layer 11 | from models.networks.architecture import ResnetBlock as ResnetBlock 12 | from models.networks.architecture import SPADEResnetBlock as SPADEResnetBlock 13 | from models.networks.architecture import Zencoder 14 | import random 15 | 16 | class SPADEGenerator(BaseNetwork): 17 | @staticmethod 18 | def modify_commandline_options(parser, is_train): 19 | parser.set_defaults(norm_G='spectralspadesyncbatch3x3') 20 | parser.add_argument('--num_upsampling_layers', 21 | choices=('normal', 'more', 'most'), default='normal', 22 | help="If 'more', adds upsampling layer between the two middle resnet blocks. If 'most', also add one more upsampling + resnet layer at the end of the generator") 23 | 24 | return parser 25 | 26 | def __init__(self, opt): 27 | super().__init__() 28 | self.opt = opt 29 | nf = opt.ngf 30 | 31 | self.sw, self.sh = self.compute_latent_vector_size(opt) 32 | 33 | self.Zencoder = Zencoder(3, 512) 34 | 35 | 36 | self.fc = nn.Conv2d(self.opt.semantic_nc, 16 * nf, 3, padding=1) 37 | 38 | self.head_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt, Block_Name='head_0') 39 | 40 | self.G_middle_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt, Block_Name='G_middle_0') 41 | self.G_middle_1 = SPADEResnetBlock(16 * nf, 16 * nf, opt, Block_Name='G_middle_1') 42 | 43 | self.up_0 = SPADEResnetBlock(16 * nf, 8 * nf, opt, Block_Name='up_0') 44 | self.up_1 = SPADEResnetBlock(8 * nf, 4 * nf, opt, Block_Name='up_1') 45 | self.up_2 = SPADEResnetBlock(4 * nf, 2 * nf, opt, Block_Name='up_2') 46 | self.up_3 = SPADEResnetBlock(2 * nf, 1 * nf, opt, Block_Name='up_3', use_rgb=False) 47 | 48 | final_nc = nf 49 | 50 | if opt.num_upsampling_layers == 'most': 51 | self.up_4 = SPADEResnetBlock(1 * nf, nf // 2, opt, Block_Name='up_4') 52 | final_nc = nf // 2 53 | 54 | self.conv_img = nn.Conv2d(final_nc, 3, 3, padding=1) 55 | 56 | self.up = nn.Upsample(scale_factor=2) 57 | #self.up = nn.Upsample(scale_factor=2, mode='bilinear') 58 | 59 | 60 | def compute_latent_vector_size(self, opt): 61 | if opt.num_upsampling_layers == 'normal': 62 | num_up_layers = 5 63 | elif opt.num_upsampling_layers == 'more': 64 | num_up_layers = 6 65 | elif opt.num_upsampling_layers == 'most': 66 | num_up_layers = 7 67 | else: 68 | raise ValueError('opt.num_upsampling_layers [%s] not recognized' % 69 | opt.num_upsampling_layers) 70 | 71 | sw = opt.crop_size // (2**num_up_layers) 72 | sh = round(sw / opt.aspect_ratio) 73 | 74 | return sw, sh 75 | 76 | def forward(self, input, rgb_img, obj_dic=None, return_style = False, style_input=None, alpha=0): 77 | seg = input 78 | 79 | x = F.interpolate(seg, size=(self.sh, self.sw)) 80 | x = self.fc(x) 81 | 82 | style_codes = self.Zencoder(input=rgb_img, segmap=seg) 83 | 84 | #---------------------------------------------------------------------------------------------------------- 85 | if return_style: 86 | return style_codes 87 | 88 | 89 | if style_input is not None and len(style_input)>0: 90 | extra_codes = torch.mean(torch.cat(style_input, 0), 0, keepdim=True) 91 | style_codes = alpha*style_codes + (1-alpha)*extra_codes 92 | 93 | #---------------------------------------------------------------------------------------------------------- 94 | 95 | 96 | x = self.head_0(x, seg, style_codes, obj_dic=obj_dic) 97 | 98 | x = self.up(x) 99 | x = self.G_middle_0(x, seg, style_codes, obj_dic=obj_dic) 100 | 101 | if self.opt.num_upsampling_layers == 'more' or \ 102 | self.opt.num_upsampling_layers == 'most': 103 | x = self.up(x) 104 | 105 | x = self.G_middle_1(x, seg, style_codes, obj_dic=obj_dic) 106 | 107 | x = self.up(x) 108 | x = self.up_0(x, seg, style_codes, obj_dic=obj_dic) 109 | x = self.up(x) 110 | x = self.up_1(x, seg, style_codes, obj_dic=obj_dic) 111 | x = self.up(x) 112 | x = self.up_2(x, seg, style_codes, obj_dic=obj_dic) 113 | x = self.up(x) 114 | x = self.up_3(x, seg, style_codes, obj_dic=obj_dic) 115 | 116 | # if self.opt.num_upsampling_layers == 'most': 117 | # x = self.up(x) 118 | # x= self.up_4(x, seg, style_codes, obj_dic=obj_dic) 119 | 120 | x = self.conv_img(F.leaky_relu(x, 2e-1)) 121 | x = F.tanh(x) 122 | return x 123 | -------------------------------------------------------------------------------- /code/models/networks/loss.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2019 NVIDIA Corporation. All rights reserved. 3 | Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). 4 | """ 5 | 6 | import torch 7 | import torch.nn as nn 8 | import torch.nn.functional as F 9 | from models.networks.architecture import VGG19 10 | 11 | 12 | # Defines the GAN loss which uses either LSGAN or the regular GAN. 13 | # When LSGAN is used, it is basically same as MSELoss, 14 | # but it abstracts away the need to create the target label tensor 15 | # that has the same size as the input 16 | class GANLoss(nn.Module): 17 | def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0, 18 | tensor=torch.FloatTensor, opt=None): 19 | super(GANLoss, self).__init__() 20 | self.real_label = target_real_label 21 | self.fake_label = target_fake_label 22 | self.real_label_tensor = None 23 | self.fake_label_tensor = None 24 | self.zero_tensor = None 25 | self.Tensor = tensor 26 | self.gan_mode = gan_mode 27 | self.opt = opt 28 | if gan_mode == 'ls': 29 | pass 30 | elif gan_mode == 'original': 31 | pass 32 | elif gan_mode == 'w': 33 | pass 34 | elif gan_mode == 'hinge': 35 | pass 36 | else: 37 | raise ValueError('Unexpected gan_mode {}'.format(gan_mode)) 38 | 39 | def get_target_tensor(self, input, target_is_real): 40 | if target_is_real: 41 | if self.real_label_tensor is None: 42 | self.real_label_tensor = self.Tensor(1).fill_(self.real_label) 43 | self.real_label_tensor.requires_grad_(False) 44 | return self.real_label_tensor.expand_as(input) 45 | else: 46 | if self.fake_label_tensor is None: 47 | self.fake_label_tensor = self.Tensor(1).fill_(self.fake_label) 48 | self.fake_label_tensor.requires_grad_(False) 49 | return self.fake_label_tensor.expand_as(input) 50 | 51 | def get_zero_tensor(self, input): 52 | if self.zero_tensor is None: 53 | self.zero_tensor = self.Tensor(1).fill_(0) 54 | self.zero_tensor.requires_grad_(False) 55 | return self.zero_tensor.expand_as(input) 56 | 57 | def loss(self, input, target_is_real, for_discriminator=True): 58 | if self.gan_mode == 'original': # cross entropy loss 59 | target_tensor = self.get_target_tensor(input, target_is_real) 60 | loss = F.binary_cross_entropy_with_logits(input, target_tensor) 61 | return loss 62 | elif self.gan_mode == 'ls': 63 | target_tensor = self.get_target_tensor(input, target_is_real) 64 | return F.mse_loss(input, target_tensor) 65 | elif self.gan_mode == 'hinge': 66 | if for_discriminator: 67 | if target_is_real: 68 | minval = torch.min(input - 1, self.get_zero_tensor(input)) 69 | loss = -torch.mean(minval) 70 | else: 71 | minval = torch.min(-input - 1, self.get_zero_tensor(input)) 72 | loss = -torch.mean(minval) 73 | else: 74 | assert target_is_real, "The generator's hinge loss must be aiming for real" 75 | loss = -torch.mean(input) 76 | return loss 77 | else: 78 | # wgan 79 | if target_is_real: 80 | return -input.mean() 81 | else: 82 | return input.mean() 83 | 84 | def __call__(self, input, target_is_real, for_discriminator=True): 85 | # computing loss is a bit complicated because |input| may not be 86 | # a tensor, but list of tensors in case of multiscale discriminator 87 | if isinstance(input, list): 88 | loss = 0 89 | for pred_i in input: 90 | if isinstance(pred_i, list): 91 | pred_i = pred_i[-1] 92 | loss_tensor = self.loss(pred_i, target_is_real, for_discriminator) 93 | bs = 1 if len(loss_tensor.size()) == 0 else loss_tensor.size(0) 94 | new_loss = torch.mean(loss_tensor.view(bs, -1), dim=1) 95 | loss += new_loss 96 | return loss / len(input) 97 | else: 98 | return self.loss(input, target_is_real, for_discriminator) 99 | 100 | 101 | # Perceptual loss that uses a pretrained VGG network 102 | class VGGLoss(nn.Module): 103 | def __init__(self, gpu_ids): 104 | super(VGGLoss, self).__init__() 105 | self.vgg = VGG19().cuda() 106 | self.criterion = nn.L1Loss() 107 | self.weights = [1.0 / 32, 1.0 / 16, 1.0 / 8, 1.0 / 4, 1.0] 108 | 109 | def forward(self, x, y): 110 | x_vgg, y_vgg = self.vgg(x), self.vgg(y) 111 | loss = 0 112 | for i in range(len(x_vgg)): 113 | loss += self.weights[i] * self.criterion(x_vgg[i], y_vgg[i].detach()) 114 | return loss 115 | 116 | 117 | # KL Divergence loss used in VAE with an image encoder 118 | # class KLDLoss(nn.Module): 119 | # def forward(self, mu, logvar): 120 | # return -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp()) 121 | -------------------------------------------------------------------------------- /code/utils/util.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017-present, Facebook, Inc. 2 | # All rights reserved. 3 | # 4 | # This source code is licensed under the license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | # 7 | import os 8 | import pickle 9 | import numpy as np 10 | from scipy.ndimage import distance_transform_edt as distance 11 | from skimage import segmentation as skimage_seg 12 | import torch 13 | from torch.utils.data.sampler import Sampler 14 | 15 | import networks 16 | 17 | def load_model(path): 18 | """Loads model and return it without DataParallel table.""" 19 | if os.path.isfile(path): 20 | print("=> loading checkpoint '{}'".format(path)) 21 | checkpoint = torch.load(path) 22 | 23 | # size of the top layer 24 | N = checkpoint['state_dict']['top_layer.bias'].size() 25 | 26 | # build skeleton of the model 27 | sob = 'sobel.0.weight' in checkpoint['state_dict'].keys() 28 | model = models.__dict__[checkpoint['arch']](sobel=sob, out=int(N[0])) 29 | 30 | # deal with a dataparallel table 31 | def rename_key(key): 32 | if not 'module' in key: 33 | return key 34 | return ''.join(key.split('.module')) 35 | 36 | checkpoint['state_dict'] = {rename_key(key): val 37 | for key, val 38 | in checkpoint['state_dict'].items()} 39 | 40 | # load weights 41 | model.load_state_dict(checkpoint['state_dict']) 42 | print("Loaded") 43 | else: 44 | model = None 45 | print("=> no checkpoint found at '{}'".format(path)) 46 | return model 47 | 48 | 49 | class UnifLabelSampler(Sampler): 50 | """Samples elements uniformely accross pseudolabels. 51 | Args: 52 | N (int): size of returned iterator. 53 | images_lists: dict of key (target), value (list of data with this target) 54 | """ 55 | 56 | def __init__(self, N, images_lists): 57 | self.N = N 58 | self.images_lists = images_lists 59 | self.indexes = self.generate_indexes_epoch() 60 | 61 | def generate_indexes_epoch(self): 62 | size_per_pseudolabel = int(self.N / len(self.images_lists)) + 1 63 | res = np.zeros(size_per_pseudolabel * len(self.images_lists)) 64 | 65 | for i in range(len(self.images_lists)): 66 | indexes = np.random.choice( 67 | self.images_lists[i], 68 | size_per_pseudolabel, 69 | replace=(len(self.images_lists[i]) <= size_per_pseudolabel) 70 | ) 71 | res[i * size_per_pseudolabel: (i + 1) * size_per_pseudolabel] = indexes 72 | 73 | np.random.shuffle(res) 74 | return res[:self.N].astype('int') 75 | 76 | def __iter__(self): 77 | return iter(self.indexes) 78 | 79 | def __len__(self): 80 | return self.N 81 | 82 | 83 | class AverageMeter(object): 84 | """Computes and stores the average and current value""" 85 | def __init__(self): 86 | self.reset() 87 | 88 | def reset(self): 89 | self.val = 0 90 | self.avg = 0 91 | self.sum = 0 92 | self.count = 0 93 | 94 | def update(self, val, n=1): 95 | self.val = val 96 | self.sum += val * n 97 | self.count += n 98 | self.avg = self.sum / self.count 99 | 100 | 101 | def learning_rate_decay(optimizer, t, lr_0): 102 | for param_group in optimizer.param_groups: 103 | lr = lr_0 / np.sqrt(1 + lr_0 * param_group['weight_decay'] * t) 104 | param_group['lr'] = lr 105 | 106 | 107 | class Logger(): 108 | """ Class to update every epoch to keep trace of the results 109 | Methods: 110 | - log() log and save 111 | """ 112 | 113 | def __init__(self, path): 114 | self.path = path 115 | self.data = [] 116 | 117 | def log(self, train_point): 118 | self.data.append(train_point) 119 | with open(os.path.join(self.path), 'wb') as fp: 120 | pickle.dump(self.data, fp, -1) 121 | 122 | 123 | def compute_sdf(img_gt, out_shape): 124 | """ 125 | compute the signed distance map of binary mask 126 | input: segmentation, shape = (batch_size, x, y, z) 127 | output: the Signed Distance Map (SDM) 128 | sdf(x) = 0; x in segmentation boundary 129 | -inf|x-y|; x in segmentation 130 | +inf|x-y|; x out of segmentation 131 | normalize sdf to [-1,1] 132 | """ 133 | 134 | img_gt = img_gt.astype(np.uint8) 135 | normalized_sdf = np.zeros(out_shape) 136 | 137 | for b in range(out_shape[0]): # batch size 138 | posmask = img_gt[b].astype(np.bool) 139 | if posmask.any(): 140 | negmask = ~posmask 141 | posdis = distance(posmask) 142 | negdis = distance(negmask) 143 | boundary = skimage_seg.find_boundaries(posmask, mode='inner').astype(np.uint8) 144 | sdf = (negdis-np.min(negdis))/(np.max(negdis)-np.min(negdis)) - (posdis-np.min(posdis))/(np.max(posdis)-np.min(posdis)) 145 | sdf[boundary==1] = 0 146 | normalized_sdf[b] = sdf 147 | # assert np.min(sdf) == -1.0, print(np.min(posdis), np.max(posdis), np.min(negdis), np.max(negdis)) 148 | # assert np.max(sdf) == 1.0, print(np.min(posdis), np.min(negdis), np.max(posdis), np.max(negdis)) 149 | 150 | return normalized_sdf -------------------------------------------------------------------------------- /code/dataloaders/dataset.py: -------------------------------------------------------------------------------- 1 | import os 2 | import cv2 3 | import torch 4 | import random 5 | import numpy as np 6 | from glob import glob 7 | from torch.utils.data import Dataset 8 | import h5py 9 | from scipy.ndimage.interpolation import zoom 10 | import itertools 11 | from scipy import ndimage 12 | from torch.utils.data.sampler import Sampler 13 | 14 | 15 | class BaseDataSets(Dataset): 16 | def __init__(self, base_dir=None, split='train', num=None, transform=None): 17 | self._base_dir = base_dir 18 | self.sample_list = [] 19 | self.split = split 20 | self.transform = transform 21 | if self.split == 'train': 22 | with open(self._base_dir + '/train_slices.list', 'r') as f1: 23 | self.sample_list = f1.readlines() 24 | self.sample_list = [item.replace('\n', '') 25 | for item in self.sample_list] 26 | 27 | elif self.split == 'val': 28 | with open(self._base_dir + '/val.list', 'r') as f: 29 | self.sample_list = f.readlines() 30 | self.sample_list = [item.replace('\n', '') 31 | for item in self.sample_list] 32 | if num is not None and self.split == "train": 33 | self.sample_list = self.sample_list[:num] 34 | print("total {} samples".format(len(self.sample_list))) 35 | 36 | def __len__(self): 37 | return len(self.sample_list) 38 | 39 | def __getitem__(self, idx): 40 | case = self.sample_list[idx] 41 | if self.split == "train": 42 | h5f = h5py.File(self._base_dir + 43 | "/data/slices/{}.h5".format(case), 'r') 44 | else: 45 | h5f = h5py.File(self._base_dir + "/data/{}.h5".format(case), 'r') 46 | image = h5f['image'][:] 47 | label = h5f['label'][:] 48 | sample = {'image': image, 'label': label} 49 | if self.split == "train": 50 | sample = self.transform(sample) 51 | sample["idx"] = idx 52 | return sample 53 | 54 | 55 | def random_rot_flip(image, label): 56 | k = np.random.randint(0, 4) 57 | image = np.rot90(image, k) 58 | label = np.rot90(label, k) 59 | axis = np.random.randint(0, 2) 60 | image = np.flip(image, axis=axis).copy() 61 | label = np.flip(label, axis=axis).copy() 62 | return image, label 63 | 64 | 65 | def random_rotate(image, label): 66 | angle = np.random.randint(-20, 20) 67 | image = ndimage.rotate(image, angle, order=0, reshape=False) 68 | label = ndimage.rotate(label, angle, order=0, reshape=False) 69 | return image, label 70 | 71 | 72 | class RandomGenerator(object): 73 | def __init__(self, output_size): 74 | self.output_size = output_size 75 | 76 | def __call__(self, sample): 77 | image, label = sample['image'], sample['label'] 78 | # ind = random.randrange(0, img.shape[0]) 79 | # image = img[ind, ...] 80 | # label = lab[ind, ...] 81 | if random.random() > 0.5: 82 | image, label = random_rot_flip(image, label) 83 | elif random.random() > 0.5: 84 | image, label = random_rotate(image, label) 85 | x, y = image.shape 86 | image = zoom( 87 | image, (self.output_size[0] / x, self.output_size[1] / y), order=0) 88 | label = zoom( 89 | label, (self.output_size[0] / x, self.output_size[1] / y), order=0) 90 | image = torch.from_numpy( 91 | image.astype(np.float32)).unsqueeze(0) 92 | label = torch.from_numpy(label.astype(np.uint8)) 93 | sample = {'image': image, 'label': label} 94 | return sample 95 | 96 | 97 | class TwoStreamBatchSampler(Sampler): 98 | """Iterate two sets of indices 99 | 100 | An 'epoch' is one iteration through the primary indices. 101 | During the epoch, the secondary indices are iterated through 102 | as many times as needed. 103 | """ 104 | 105 | def __init__(self, primary_indices, secondary_indices, batch_size, secondary_batch_size): 106 | self.primary_indices = primary_indices 107 | self.secondary_indices = secondary_indices 108 | self.secondary_batch_size = secondary_batch_size 109 | self.primary_batch_size = batch_size - secondary_batch_size 110 | 111 | assert len(self.primary_indices) >= self.primary_batch_size > 0 112 | assert len(self.secondary_indices) >= self.secondary_batch_size > 0 113 | 114 | def __iter__(self): 115 | primary_iter = iterate_once(self.primary_indices) 116 | secondary_iter = iterate_eternally(self.secondary_indices) 117 | return ( 118 | primary_batch + secondary_batch 119 | for (primary_batch, secondary_batch) 120 | in zip(grouper(primary_iter, self.primary_batch_size), 121 | grouper(secondary_iter, self.secondary_batch_size)) 122 | ) 123 | 124 | def __len__(self): 125 | return len(self.primary_indices) // self.primary_batch_size 126 | 127 | 128 | def iterate_once(iterable): 129 | return np.random.permutation(iterable) 130 | 131 | 132 | def iterate_eternally(indices): 133 | def infinite_shuffles(): 134 | while True: 135 | yield np.random.permutation(indices) 136 | return itertools.chain.from_iterable(infinite_shuffles()) 137 | 138 | 139 | def grouper(iterable, n): 140 | "Collect data into fixed-length chunks or blocks" 141 | # grouper('ABCDEFG', 3) --> ABC DEF" 142 | args = [iter(iterable)] * n 143 | return zip(*args) 144 | -------------------------------------------------------------------------------- /code/util/coco.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2019 NVIDIA Corporation. All rights reserved. 3 | Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). 4 | """ 5 | 6 | 7 | def id2label(id): 8 | if id == 182: 9 | id = 0 10 | else: 11 | id = id + 1 12 | labelmap = \ 13 | {0: 'unlabeled', 14 | 1: 'person', 15 | 2: 'bicycle', 16 | 3: 'car', 17 | 4: 'motorcycle', 18 | 5: 'airplane', 19 | 6: 'bus', 20 | 7: 'train', 21 | 8: 'truck', 22 | 9: 'boat', 23 | 10: 'traffic light', 24 | 11: 'fire hydrant', 25 | 12: 'street sign', 26 | 13: 'stop sign', 27 | 14: 'parking meter', 28 | 15: 'bench', 29 | 16: 'bird', 30 | 17: 'cat', 31 | 18: 'dog', 32 | 19: 'horse', 33 | 20: 'sheep', 34 | 21: 'cow', 35 | 22: 'elephant', 36 | 23: 'bear', 37 | 24: 'zebra', 38 | 25: 'giraffe', 39 | 26: 'hat', 40 | 27: 'backpack', 41 | 28: 'umbrella', 42 | 29: 'shoe', 43 | 30: 'eye glasses', 44 | 31: 'handbag', 45 | 32: 'tie', 46 | 33: 'suitcase', 47 | 34: 'frisbee', 48 | 35: 'skis', 49 | 36: 'snowboard', 50 | 37: 'sports ball', 51 | 38: 'kite', 52 | 39: 'baseball bat', 53 | 40: 'baseball glove', 54 | 41: 'skateboard', 55 | 42: 'surfboard', 56 | 43: 'tennis racket', 57 | 44: 'bottle', 58 | 45: 'plate', 59 | 46: 'wine glass', 60 | 47: 'cup', 61 | 48: 'fork', 62 | 49: 'knife', 63 | 50: 'spoon', 64 | 51: 'bowl', 65 | 52: 'banana', 66 | 53: 'apple', 67 | 54: 'sandwich', 68 | 55: 'orange', 69 | 56: 'broccoli', 70 | 57: 'carrot', 71 | 58: 'hot dog', 72 | 59: 'pizza', 73 | 60: 'donut', 74 | 61: 'cake', 75 | 62: 'chair', 76 | 63: 'couch', 77 | 64: 'potted plant', 78 | 65: 'bed', 79 | 66: 'mirror', 80 | 67: 'dining table', 81 | 68: 'window', 82 | 69: 'desk', 83 | 70: 'toilet', 84 | 71: 'door', 85 | 72: 'tv', 86 | 73: 'laptop', 87 | 74: 'mouse', 88 | 75: 'remote', 89 | 76: 'keyboard', 90 | 77: 'cell phone', 91 | 78: 'microwave', 92 | 79: 'oven', 93 | 80: 'toaster', 94 | 81: 'sink', 95 | 82: 'refrigerator', 96 | 83: 'blender', 97 | 84: 'book', 98 | 85: 'clock', 99 | 86: 'vase', 100 | 87: 'scissors', 101 | 88: 'teddy bear', 102 | 89: 'hair drier', 103 | 90: 'toothbrush', 104 | 91: 'hair brush', # Last class of Thing 105 | 92: 'banner', # Beginning of Stuff 106 | 93: 'blanket', 107 | 94: 'branch', 108 | 95: 'bridge', 109 | 96: 'building-other', 110 | 97: 'bush', 111 | 98: 'cabinet', 112 | 99: 'cage', 113 | 100: 'cardboard', 114 | 101: 'carpet', 115 | 102: 'ceiling-other', 116 | 103: 'ceiling-tile', 117 | 104: 'cloth', 118 | 105: 'clothes', 119 | 106: 'clouds', 120 | 107: 'counter', 121 | 108: 'cupboard', 122 | 109: 'curtain', 123 | 110: 'desk-stuff', 124 | 111: 'dirt', 125 | 112: 'door-stuff', 126 | 113: 'fence', 127 | 114: 'floor-marble', 128 | 115: 'floor-other', 129 | 116: 'floor-stone', 130 | 117: 'floor-tile', 131 | 118: 'floor-wood', 132 | 119: 'flower', 133 | 120: 'fog', 134 | 121: 'food-other', 135 | 122: 'fruit', 136 | 123: 'furniture-other', 137 | 124: 'grass', 138 | 125: 'gravel', 139 | 126: 'ground-other', 140 | 127: 'hill', 141 | 128: 'house', 142 | 129: 'leaves', 143 | 130: 'light', 144 | 131: 'mat', 145 | 132: 'metal', 146 | 133: 'mirror-stuff', 147 | 134: 'moss', 148 | 135: 'mountain', 149 | 136: 'mud', 150 | 137: 'napkin', 151 | 138: 'net', 152 | 139: 'paper', 153 | 140: 'pavement', 154 | 141: 'pillow', 155 | 142: 'plant-other', 156 | 143: 'plastic', 157 | 144: 'platform', 158 | 145: 'playingfield', 159 | 146: 'railing', 160 | 147: 'railroad', 161 | 148: 'river', 162 | 149: 'road', 163 | 150: 'rock', 164 | 151: 'roof', 165 | 152: 'rug', 166 | 153: 'salad', 167 | 154: 'sand', 168 | 155: 'sea', 169 | 156: 'shelf', 170 | 157: 'sky-other', 171 | 158: 'skyscraper', 172 | 159: 'snow', 173 | 160: 'solid-other', 174 | 161: 'stairs', 175 | 162: 'stone', 176 | 163: 'straw', 177 | 164: 'structural-other', 178 | 165: 'table', 179 | 166: 'tent', 180 | 167: 'textile-other', 181 | 168: 'towel', 182 | 169: 'tree', 183 | 170: 'vegetable', 184 | 171: 'wall-brick', 185 | 172: 'wall-concrete', 186 | 173: 'wall-other', 187 | 174: 'wall-panel', 188 | 175: 'wall-stone', 189 | 176: 'wall-tile', 190 | 177: 'wall-wood', 191 | 178: 'water-other', 192 | 179: 'waterdrops', 193 | 180: 'window-blind', 194 | 181: 'window-other', 195 | 182: 'wood'} 196 | if id in labelmap: 197 | return labelmap[id] 198 | else: 199 | return 'unknown' 200 | -------------------------------------------------------------------------------- /code/test_covid.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | import shutil 4 | 5 | import h5py 6 | import nibabel as nib 7 | import numpy as np 8 | import SimpleITK as sitk 9 | import torch 10 | from medpy import metric 11 | from scipy.ndimage import zoom 12 | from scipy.ndimage.interpolation import zoom 13 | from tqdm import tqdm 14 | 15 | # from networks.efficientunet import UNet 16 | from networks.net_factory import net_factory 17 | from dataloaders.dataset_covid import (CovidDataSets, RandomGenerator) 18 | from torch.utils.data import DataLoader 19 | import cv2 20 | import pandas as pd 21 | from utils.distance_metric import compute_surface_distances, compute_surface_dice_at_tolerance, compute_dice_coefficient, compute_robust_hausdorff 22 | 23 | 24 | parser = argparse.ArgumentParser() 25 | parser.add_argument('--root_path', type=str, default='home/code/SSL/', help='Name of Experiment') 26 | parser.add_argument('--dataset_name', type=str, default='COVID249', help='Name of dataset') 27 | parser.add_argument('--exp', type=str, default='Cross_Pseudo_Supervision', help='experiment_name') 28 | parser.add_argument('--model', type=str, default='unet', help='model_name') 29 | 30 | 31 | def save_sample_png(png_results_path, file_name , out, rot = False): 32 | split_list = file_name.split('_') 33 | if len(split_list)>2: 34 | volume_num = split_list[0]+'_'+split_list[1] 35 | save_img_name = split_list[2] 36 | else: 37 | volume_num = split_list[0] 38 | save_img_name = split_list[1] 39 | 40 | volume_path = os.path.join(png_results_path, volume_num) 41 | if not os.path.exists(volume_path): 42 | os.makedirs(volume_path) 43 | img = out * 255 44 | img = np.clip(img, 0, 255) 45 | img = img.astype(np.uint8) 46 | #cv2.imshow('result', th2) 47 | #cv2.waitKey(0) 48 | #------------------------------------------------------------------------ 49 | # Save to certain path 50 | save_img_path = os.path.join(volume_path, save_img_name) 51 | cv2.imwrite(save_img_path, img) 52 | 53 | 54 | def pngs_2_niigz(args, png_results_path, nii_results_path, file_volume_name): 55 | volume_files = pd.read_excel(args.root_path + "data/{}/{}".format(args.dataset_name, file_volume_name)) 56 | length = volume_files.shape[0] 57 | for idx in range(length): 58 | volume_file = volume_files.iloc[idx][0] 59 | # load original nii 60 | ori_path= args.root_path + "data/{}/NII/{}".format(args.dataset_name, volume_file+'_ct.nii.gz') 61 | ori_nii = sitk.ReadImage(ori_path , sitk.sitkUInt8) 62 | ori_data = sitk.GetArrayFromImage(ori_nii) 63 | 64 | volume_png_folder = os.path.join(png_results_path, volume_file) 65 | if os.path.exists(volume_png_folder): 66 | png_files = os.listdir(volume_png_folder) 67 | for png_file in png_files: 68 | png_file_slice = int(png_file.split('.')[0]) 69 | png_file_data = cv2.imread(os.path.join(volume_png_folder, png_file), -1) 70 | ori_data_slice = ori_data[png_file_slice, :,:] 71 | ori_data_slice = 0*ori_data_slice 72 | ori_data_slice[png_file_data==255]=1 73 | ori_data[png_file_slice, :,:] = ori_data_slice 74 | 75 | #save nii 76 | out_path = os.path.join(nii_results_path, volume_file+'.nii.gz') 77 | img_new = sitk.GetImageFromArray(ori_data) 78 | img_new.CopyInformation(ori_nii) 79 | sitk.WriteImage(img_new, out_path) 80 | print(volume_file) 81 | 82 | 83 | def evaluate_nii(args, nii_results_path, file_volume_name): 84 | nsd_sum = 0 85 | dice_sum = 0 86 | hau_sum = 0 87 | volume_files = pd.read_excel(args.root_path + "data/{}/{}".format(args.dataset_name, file_volume_name)) 88 | length = volume_files.shape[0] 89 | for idx in range(length): 90 | volume_file = volume_files.iloc[idx][0] 91 | # load gt nii 92 | gt_path= args.root_path + "data/{}/NII/{}".format(args.dataset_name, volume_file+'_seg.nii.gz') 93 | gt_nii = nib.load(gt_path) 94 | gt_data = np.uint8(gt_nii.get_fdata()) 95 | 96 | pred_path= nii_results_path + volume_file+'.nii.gz' 97 | pred_nii = nib.load(pred_path) 98 | pred_data = np.uint8(pred_nii.get_fdata()) 99 | 100 | spacing = gt_nii.header.get_zooms() 101 | 102 | surface_distances = compute_surface_distances(gt_data, pred_data, spacing_mm=spacing) 103 | nsd = compute_surface_dice_at_tolerance(surface_distances, 1) 104 | dice = compute_dice_coefficient(gt_data, pred_data) 105 | print(nsd) 106 | print(dice) 107 | nsd_sum += nsd 108 | dice_sum +=dice 109 | mean_nsd = nsd_sum/length 110 | mean_dice = dice_sum/length 111 | 112 | return mean_nsd, mean_dice 113 | 114 | 115 | 116 | 117 | def get_model_metric(args, model, snapshot_path, model_name, mode='test'): 118 | model.eval() 119 | 120 | file_slice_name = '{}_slice.xlsx'.format(mode) 121 | file_volume_name = '{}_volume.xlsx'.format(mode) 122 | val_dataset = CovidDataSets(root_path=args.root_path, dataset_name=args.dataset_name, file_name = file_slice_name) 123 | print('The overall number of validation images equals to %d' % len(val_dataset)) 124 | val_dataloader = DataLoader(val_dataset, batch_size=1, shuffle=False, num_workers=1) 125 | 126 | png_results_path = os.path.join(snapshot_path, '{}_png/'.format(model_name)) 127 | if os.path.isdir(png_results_path) is False: 128 | os.mkdir(png_results_path) 129 | 130 | for batch_idx, (image, label, file_name, _) in enumerate(val_dataloader): 131 | image = image.cuda() 132 | label = label.cuda() 133 | 134 | with torch.no_grad(): 135 | out_main = model(image) 136 | out = torch.argmax(torch.softmax(out_main, dim=1), dim=1).squeeze(0) 137 | out = out.cpu().detach().numpy() 138 | save_sample_png(png_results_path, file_name = file_name[0], out=out) 139 | 140 | # png results to nii.gz label 141 | nii_results_path = os.path.join(snapshot_path, '{}_nii/'.format(model_name)) 142 | if os.path.isdir(nii_results_path) is False: 143 | os.mkdir(nii_results_path) 144 | pngs_2_niigz(args= args, png_results_path = png_results_path, nii_results_path=nii_results_path, file_volume_name = file_volume_name) 145 | # evaluate result 146 | nsd, dice = evaluate_nii(args = args, nii_results_path=nii_results_path, file_volume_name = file_volume_name) 147 | return nsd, dice 148 | 149 | 150 | if __name__ == '__main__': 151 | args = parser.parse_args() 152 | 153 | -------------------------------------------------------------------------------- /code/utils/losses.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.nn import functional as F 3 | import numpy as np 4 | import torch.nn as nn 5 | from torch.autograd import Variable 6 | 7 | 8 | def dice_loss(score, target): 9 | target = target.float() 10 | smooth = 1e-5 11 | intersect = torch.sum(score * target) 12 | y_sum = torch.sum(target * target) 13 | z_sum = torch.sum(score * score) 14 | loss = (2 * intersect + smooth) / (z_sum + y_sum + smooth) 15 | loss = 1 - loss 16 | return loss 17 | 18 | 19 | def dice_loss1(score, target): 20 | target = target.float() 21 | smooth = 1e-5 22 | intersect = torch.sum(score * target) 23 | y_sum = torch.sum(target) 24 | z_sum = torch.sum(score) 25 | loss = (2 * intersect + smooth) / (z_sum + y_sum + smooth) 26 | loss = 1 - loss 27 | return loss 28 | 29 | 30 | def entropy_loss(p, C=2): 31 | # p N*C*W*H*D 32 | y1 = -1*torch.sum(p*torch.log(p+1e-6), dim=1) / \ 33 | torch.tensor(np.log(C)).cuda() 34 | ent = torch.mean(y1) 35 | 36 | return ent 37 | 38 | 39 | def softmax_dice_loss(input_logits, target_logits): 40 | """Takes softmax on both sides and returns MSE loss 41 | 42 | Note: 43 | - Returns the sum over all examples. Divide by the batch size afterwards 44 | if you want the mean. 45 | - Sends gradients to inputs but not the targets. 46 | """ 47 | assert input_logits.size() == target_logits.size() 48 | input_softmax = F.softmax(input_logits, dim=1) 49 | target_softmax = F.softmax(target_logits, dim=1) 50 | n = input_logits.shape[1] 51 | dice = 0 52 | for i in range(0, n): 53 | dice += dice_loss1(input_softmax[:, i], target_softmax[:, i]) 54 | mean_dice = dice / n 55 | 56 | return mean_dice 57 | 58 | 59 | def entropy_loss_map(p, C=2): 60 | ent = -1*torch.sum(p * torch.log(p + 1e-6), dim=1, 61 | keepdim=True)/torch.tensor(np.log(C)).cuda() 62 | return ent 63 | 64 | 65 | def softmax_mse_loss(input_logits, target_logits, sigmoid=False): 66 | """Takes softmax on both sides and returns MSE loss 67 | 68 | Note: 69 | - Returns the sum over all examples. Divide by the batch size afterwards 70 | if you want the mean. 71 | - Sends gradients to inputs but not the targets. 72 | """ 73 | assert input_logits.size() == target_logits.size() 74 | if sigmoid: 75 | input_softmax = torch.sigmoid(input_logits) 76 | target_softmax = torch.sigmoid(target_logits) 77 | else: 78 | input_softmax = F.softmax(input_logits, dim=1) 79 | target_softmax = F.softmax(target_logits, dim=1) 80 | 81 | mse_loss = (input_softmax-target_softmax)**2 82 | return mse_loss 83 | 84 | 85 | def softmax_kl_loss(input_logits, target_logits, sigmoid=False): 86 | """Takes softmax on both sides and returns KL divergence 87 | 88 | Note: 89 | - Returns the sum over all examples. Divide by the batch size afterwards 90 | if you want the mean. 91 | - Sends gradients to inputs but not the targets. 92 | """ 93 | assert input_logits.size() == target_logits.size() 94 | if sigmoid: 95 | input_log_softmax = torch.log(torch.sigmoid(input_logits)) 96 | target_softmax = torch.sigmoid(target_logits) 97 | else: 98 | input_log_softmax = F.log_softmax(input_logits, dim=1) 99 | target_softmax = F.softmax(target_logits, dim=1) 100 | 101 | # return F.kl_div(input_log_softmax, target_softmax) 102 | kl_div = F.kl_div(input_log_softmax, target_softmax, reduction='mean') 103 | # mean_kl_div = torch.mean(0.2*kl_div[:,0,...]+0.8*kl_div[:,1,...]) 104 | return kl_div 105 | 106 | 107 | def symmetric_mse_loss(input1, input2): 108 | """Like F.mse_loss but sends gradients to both directions 109 | 110 | Note: 111 | - Returns the sum over all examples. Divide by the batch size afterwards 112 | if you want the mean. 113 | - Sends gradients to both input1 and input2. 114 | """ 115 | assert input1.size() == input2.size() 116 | return torch.mean((input1 - input2)**2) 117 | 118 | 119 | class FocalLoss(nn.Module): 120 | def __init__(self, gamma=2, alpha=None, size_average=True): 121 | super(FocalLoss, self).__init__() 122 | self.gamma = gamma 123 | self.alpha = alpha 124 | if isinstance(alpha, (float, int)): 125 | self.alpha = torch.Tensor([alpha, 1-alpha]) 126 | if isinstance(alpha, list): 127 | self.alpha = torch.Tensor(alpha) 128 | self.size_average = size_average 129 | 130 | def forward(self, input, target): 131 | if input.dim() > 2: 132 | # N,C,H,W => N,C,H*W 133 | input = input.view(input.size(0), input.size(1), -1) 134 | input = input.transpose(1, 2) # N,C,H*W => N,H*W,C 135 | input = input.contiguous().view(-1, input.size(2)) # N,H*W,C => N*H*W,C 136 | target = target.view(-1, 1) 137 | 138 | logpt = F.log_softmax(input, dim=1) 139 | logpt = logpt.gather(1, target) 140 | logpt = logpt.view(-1) 141 | pt = Variable(logpt.data.exp()) 142 | 143 | if self.alpha is not None: 144 | if self.alpha.type() != input.data.type(): 145 | self.alpha = self.alpha.type_as(input.data) 146 | at = self.alpha.gather(0, target.data.view(-1)) 147 | logpt = logpt * Variable(at) 148 | 149 | loss = -1 * (1-pt)**self.gamma * logpt 150 | if self.size_average: 151 | return loss.mean() 152 | else: 153 | return loss.sum() 154 | 155 | 156 | class DiceLoss(nn.Module): 157 | def __init__(self, n_classes): 158 | super(DiceLoss, self).__init__() 159 | self.n_classes = n_classes 160 | 161 | def _one_hot_encoder(self, input_tensor): 162 | tensor_list = [] 163 | for i in range(self.n_classes): 164 | temp_prob = input_tensor == i * torch.ones_like(input_tensor) 165 | tensor_list.append(temp_prob) 166 | output_tensor = torch.cat(tensor_list, dim=1) 167 | return output_tensor.float() 168 | 169 | def _dice_loss(self, score, target): 170 | target = target.float() 171 | smooth = 1e-5 172 | intersect = torch.sum(score * target) 173 | y_sum = torch.sum(target * target) 174 | z_sum = torch.sum(score * score) 175 | loss = (2 * intersect + smooth) / (z_sum + y_sum + smooth) 176 | loss = 1 - loss 177 | return loss 178 | 179 | def forward(self, inputs, target, weight=None, softmax=False): 180 | if softmax: 181 | inputs = torch.softmax(inputs, dim=1) 182 | target = self._one_hot_encoder(target) 183 | if weight is None: 184 | weight = [1] * self.n_classes 185 | assert inputs.size() == target.size(), 'predict & target shape do not match' 186 | class_wise_dice = [] 187 | loss = 0.0 188 | for i in range(0, self.n_classes): 189 | dice = self._dice_loss(inputs[:, i], target[:, i]) 190 | class_wise_dice.append(1.0 - dice.item()) 191 | loss += dice * weight[i] 192 | return loss / self.n_classes 193 | 194 | 195 | def entropy_minmization(p): 196 | y1 = -1*torch.sum(p*torch.log(p+1e-6), dim=1) 197 | ent = torch.mean(y1) 198 | 199 | return ent 200 | 201 | 202 | def entropy_map(p): 203 | ent_map = -1*torch.sum(p * torch.log(p + 1e-6), dim=1, 204 | keepdim=True) 205 | return ent_map 206 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Pseudo-Label Guided Image Synthesis for Semi-Supervised COVID-19 Pneumonia Infection Segmentation. 2 | 3 | Implementation of [Pseudo-Label Guided Image Synthesis for Semi-Supervised COVID-19 Pneumonia Infection Segmentation](https://ieeexplore.ieee.org/document/9931157). 4 | 5 |
6 |
7 |
126 |
127 |