├── assests ├── SACA.png ├── scSE.PNG ├── 3dVNet.png ├── AGModel.PNG ├── AGVnet.PNG ├── DCVNet.png ├── ETVNet.PNG ├── GAVnet.png ├── MA-UNet.png ├── PEVnet.png ├── PEmodel.png ├── SEModel.png ├── SEVNet.png ├── V2-Net.png ├── Vnet++.PNG ├── ETModule.PNG ├── FusionVnet.bmp ├── NonLocal.PNG ├── Res-Path.png ├── ResUModule.png ├── scSEVnet.png ├── NonLocalVnet.PNG ├── multiResVnet.png ├── GrideAttention.png ├── VNettripleplus.bmp ├── multiResmodule.png ├── mutil-task Vnet.bmp ├── mutil-task-Vnet.bmp ├── wetchatnumber.jpg ├── dualchannelmodule.png ├── mutil_depthfusion.bmp ├── dual_attention_model.PNG ├── dual_attention_net.PNG ├── mutil_depth_deepVNet.bmp └── FullScaleSkipConncetion.bmp ├── VnetFamily ├── ETVnet │ ├── __init__.py │ └── model_ETvnet3d.py ├── GAVNet │ ├── __init__.py │ └── GAVnet.py ├── PEVnet │ ├── __init__.py │ └── PEVnet.py ├── SCSEVnet │ ├── __init__.py │ └── model_scsevnet3d.py ├── SEVnet │ ├── __init__.py │ └── model_sevnet3d.py ├── Vnet │ ├── __init__.py │ └── model_vnet3d.py ├── FusionVnet │ ├── __init__.py │ └── model_fusionvnet3d.py ├── NestedVnet │ ├── __init__.py │ ├── layer.py │ └── model_Nestedvnet3d.py ├── NonLocalVNet │ ├── __init__.py │ └── NonLocalVnet.py ├── Vnettripleplus │ ├── __init__.py │ └── model_vnet3dtripleplus.py ├── mutildepthVnet │ ├── __init__.py │ ├── layer.py │ └── mutildepth_vnet.py ├── AttentionGatedVnet │ ├── __init__.py │ ├── layer.py │ └── model_attention_vnet3d.py ├── DualAttentionVnet │ ├── __init__.py │ ├── layer.py │ └── model_dualattention_vnet3d.py └── mutiltask-Vnet │ ├── __init__.py │ └── model_vnet3d_distancemap_multilabel.py └── README.md /assests/SACA.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junqiangchen/VNetFamily/HEAD/assests/SACA.png -------------------------------------------------------------------------------- /assests/scSE.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junqiangchen/VNetFamily/HEAD/assests/scSE.PNG -------------------------------------------------------------------------------- /assests/3dVNet.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junqiangchen/VNetFamily/HEAD/assests/3dVNet.png -------------------------------------------------------------------------------- /assests/AGModel.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junqiangchen/VNetFamily/HEAD/assests/AGModel.PNG -------------------------------------------------------------------------------- /assests/AGVnet.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junqiangchen/VNetFamily/HEAD/assests/AGVnet.PNG -------------------------------------------------------------------------------- /assests/DCVNet.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junqiangchen/VNetFamily/HEAD/assests/DCVNet.png -------------------------------------------------------------------------------- /assests/ETVNet.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junqiangchen/VNetFamily/HEAD/assests/ETVNet.PNG -------------------------------------------------------------------------------- /assests/GAVnet.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junqiangchen/VNetFamily/HEAD/assests/GAVnet.png -------------------------------------------------------------------------------- /assests/MA-UNet.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junqiangchen/VNetFamily/HEAD/assests/MA-UNet.png -------------------------------------------------------------------------------- /assests/PEVnet.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junqiangchen/VNetFamily/HEAD/assests/PEVnet.png -------------------------------------------------------------------------------- /assests/PEmodel.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junqiangchen/VNetFamily/HEAD/assests/PEmodel.png -------------------------------------------------------------------------------- /assests/SEModel.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junqiangchen/VNetFamily/HEAD/assests/SEModel.png -------------------------------------------------------------------------------- /assests/SEVNet.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junqiangchen/VNetFamily/HEAD/assests/SEVNet.png -------------------------------------------------------------------------------- /assests/V2-Net.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junqiangchen/VNetFamily/HEAD/assests/V2-Net.png -------------------------------------------------------------------------------- /assests/Vnet++.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junqiangchen/VNetFamily/HEAD/assests/Vnet++.PNG -------------------------------------------------------------------------------- /assests/ETModule.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junqiangchen/VNetFamily/HEAD/assests/ETModule.PNG -------------------------------------------------------------------------------- /assests/FusionVnet.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junqiangchen/VNetFamily/HEAD/assests/FusionVnet.bmp -------------------------------------------------------------------------------- /assests/NonLocal.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junqiangchen/VNetFamily/HEAD/assests/NonLocal.PNG -------------------------------------------------------------------------------- /assests/Res-Path.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junqiangchen/VNetFamily/HEAD/assests/Res-Path.png -------------------------------------------------------------------------------- /assests/ResUModule.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junqiangchen/VNetFamily/HEAD/assests/ResUModule.png -------------------------------------------------------------------------------- /assests/scSEVnet.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junqiangchen/VNetFamily/HEAD/assests/scSEVnet.png -------------------------------------------------------------------------------- /assests/NonLocalVnet.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junqiangchen/VNetFamily/HEAD/assests/NonLocalVnet.PNG -------------------------------------------------------------------------------- /assests/multiResVnet.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junqiangchen/VNetFamily/HEAD/assests/multiResVnet.png -------------------------------------------------------------------------------- /assests/GrideAttention.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junqiangchen/VNetFamily/HEAD/assests/GrideAttention.png -------------------------------------------------------------------------------- /assests/VNettripleplus.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junqiangchen/VNetFamily/HEAD/assests/VNettripleplus.bmp -------------------------------------------------------------------------------- /assests/multiResmodule.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junqiangchen/VNetFamily/HEAD/assests/multiResmodule.png -------------------------------------------------------------------------------- /assests/mutil-task Vnet.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junqiangchen/VNetFamily/HEAD/assests/mutil-task Vnet.bmp -------------------------------------------------------------------------------- /assests/mutil-task-Vnet.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junqiangchen/VNetFamily/HEAD/assests/mutil-task-Vnet.bmp -------------------------------------------------------------------------------- /assests/wetchatnumber.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junqiangchen/VNetFamily/HEAD/assests/wetchatnumber.jpg -------------------------------------------------------------------------------- /VnetFamily/ETVnet/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'junqiang chen' 2 | __version__ = '1.0.0' 3 | __Time__ = '2020.1.14' 4 | -------------------------------------------------------------------------------- /VnetFamily/GAVNet/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'junqiang chen' 2 | __version__ = '1.0.0' 3 | __Time__ = '2019.11.1' 4 | -------------------------------------------------------------------------------- /VnetFamily/PEVnet/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'junqiang chen' 2 | __version__ = '1.0.0' 3 | __Time__ = '2.19.4.9' 4 | -------------------------------------------------------------------------------- /VnetFamily/SCSEVnet/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'junqiang chen' 2 | __version__ = '1.0.0' 3 | __Time__ = '2019.10.24' 4 | -------------------------------------------------------------------------------- /VnetFamily/SEVnet/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'junqiang chen' 2 | __version__ = '1.0.0' 3 | __Time__ = '2019.10.24' 4 | -------------------------------------------------------------------------------- /VnetFamily/Vnet/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'junqiang chen' 2 | __version__ = '1.0.0' 3 | __Time__ = '2019.10.24' 4 | -------------------------------------------------------------------------------- /assests/dualchannelmodule.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junqiangchen/VNetFamily/HEAD/assests/dualchannelmodule.png -------------------------------------------------------------------------------- /assests/mutil_depthfusion.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junqiangchen/VNetFamily/HEAD/assests/mutil_depthfusion.bmp -------------------------------------------------------------------------------- /VnetFamily/FusionVnet/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'junqiang chen' 2 | __version__ = '1.0.0' 3 | __Time__ = '2019.10.21' 4 | -------------------------------------------------------------------------------- /VnetFamily/NestedVnet/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'junqiang chen' 2 | __version__ = '1.0.0' 3 | __Time__ = '2019.10.21' 4 | -------------------------------------------------------------------------------- /VnetFamily/NonLocalVNet/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'junqiang chen' 2 | __version__ = '1.0.0' 3 | __Time__ = '2.19.4.9' 4 | -------------------------------------------------------------------------------- /VnetFamily/Vnettripleplus/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'junqiang chen' 2 | __version__ = '1.0.0' 3 | __Time__ = '2020.4.23' 4 | -------------------------------------------------------------------------------- /VnetFamily/mutildepthVnet/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'junqiang chen' 2 | __version__ = '1.0.0' 3 | __Time__ = '2020.3.5' 4 | -------------------------------------------------------------------------------- /assests/dual_attention_model.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junqiangchen/VNetFamily/HEAD/assests/dual_attention_model.PNG -------------------------------------------------------------------------------- /assests/dual_attention_net.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junqiangchen/VNetFamily/HEAD/assests/dual_attention_net.PNG -------------------------------------------------------------------------------- /assests/mutil_depth_deepVNet.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junqiangchen/VNetFamily/HEAD/assests/mutil_depth_deepVNet.bmp -------------------------------------------------------------------------------- /VnetFamily/AttentionGatedVnet/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'junqiang chen' 2 | __version__ = '1.0.0' 3 | __Time__ = '2019.10.9' 4 | -------------------------------------------------------------------------------- /VnetFamily/DualAttentionVnet/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'junqiang chen' 2 | __version__ = '1.0.0' 3 | __Time__ = '2019.4.9' 4 | -------------------------------------------------------------------------------- /VnetFamily/mutiltask-Vnet/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'junqiang chen' 2 | __version__ = '1.0.0' 3 | __Time__ = '2019.12.13' 4 | -------------------------------------------------------------------------------- /assests/FullScaleSkipConncetion.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junqiangchen/VNetFamily/HEAD/assests/FullScaleSkipConncetion.bmp -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # VNetFamily-Tensorflow 2 | > There are some VNet Variant Networks that implement with tensorflow 3 | 4 | ## Requirements 5 | * Tensorflow-gpu == 1.8 6 | * Python == 3.5.5 7 | 8 | ## VNet 9 | you can see the ***paper***, please refer to this [link](http://campar.in.tum.de/pub/milletari2016Vnet/milletari2016Vnet.pdf) 10 | ### What is the VNet 11 | ![senet](./assests/3dVNet.png) 12 | 13 | ## AttentionGatedVNet 14 | you can see the ***paper***, please refer to this [link](https://openreview.net/pdf?id=BJtn7-3sM) 15 | ### What is the AttentionGate 16 | ![senet](./assests/AGModel.PNG) 17 | ### What is the AttentionGateVNet 18 | ![senet](./assests/AGVnet.PNG) 19 | 20 | ## DualAttentionVNet 21 | you can see the ***paper***, please refer to this [link](https://arxiv.org/abs/1809.02983) 22 | ### What is the DualAttention 23 | ![senet](./assests/dual_attention_model.PNG) 24 | ### What is the DualAttentionNet 25 | ![senet](./assests/dual_attention_net.PNG) 26 | 27 | ## FusionVNet 28 | you can see the ***paper***, please refer to this [link](https://hal-univ-bourgogne.archives-ouvertes.fr/hal-02060222/document) 29 | ### What is the FusionVNet 30 | ![senet](./assests/FusionVnet.bmp) 31 | 32 | ## NestedVNet(VNet++) 33 | you can see the ***paper***, please refer to this [link](https://arxiv.org/abs/1807.10165) 34 | ### What is the VNet++ 35 | ![senet](./assests/Vnet++.PNG) 36 | 37 | ## SEVNet 38 | you can see the ***paper***, please refer to this [link](https://arxiv.org/abs/1709.01507) 39 | ### What is the SEModel 40 | ![senet](./assests/SEModel.png) 41 | ### What is the SEVNet 42 | ![senet](./assests/SEVNet.png) 43 | 44 | ## SCSEVNet 45 | you can see the ***paper***, please refer to this [link](https://arxiv.org/abs/1803.02579) 46 | ### What is the SCSEModel 47 | ![senet](./assests/scSE.PNG) 48 | ### What is the SCSEVNet 49 | ![senet](./assests/scSEVnet.png) 50 | 51 | ## GAVNet 52 | you can see the ***paper***, please refer to this [link](https://arxiv.org/abs/1907.12930) 53 | ### What is the GrideAttentionModel 54 | ![senet](./assests/GrideAttention.png) 55 | ### What is the GAVNet 56 | ![senet](./assests/GAVnet.png) 57 | 58 | ## NonLocalVNet 59 | you can see the ***paper***, please refer to this [link](http://openaccess.thecvf.com/content_cvpr_2018/papers/Wang_Non-Local_Neural_Networks_CVPR_2018_paper.pdf) 60 | ### What is the NonLocalBlock 61 | ![senet](./assests/NonLocal.PNG) 62 | ### What is the NonLocalVNet 63 | ![senet](./assests/NonLocalVnet.PNG) 64 | 65 | ## PEVNet 66 | you can see the ***paper***, please refer to this [link](https://arxiv.org/abs/1906.04649) 67 | ### What is the PEModel 68 | ![senet](./assests/PEmodel.png) 69 | ### What is the PEVNet 70 | ![senet](./assests/PEVnet.png) 71 | 72 | ## ETVNet 73 | you can see the ***paper***, please refer to this [link](https://arxiv.org/abs/1907.10936) 74 | ### What is the ETModel 75 | ![senet](./assests/ETModule.PNG) 76 | ### What is the ETVNet 77 | ![senet](./assests/ETVNet.PNG) 78 | 79 | ## MutilTask-VNet 80 | you can see the ***paper***, please refer to this [link](https://arxiv.org/abs/1902.04099) 81 | ### What is the MutilTask-VNet 82 | ![senet](./assests/mutil-task-Vnet.bmp) 83 | 84 | ## MutilDepthFusion-VNet 85 | you can see the ***paper***, please refer to this [link](https://www.researchgate.net/publication/331145628_Multi-Depth_Fusion_Network_for_Whole-Heart_CT_Image_Segmentation) 86 | ### What is the MutilDepthFusion-VNet 87 | ![senet](./assests/mutil_depthfusion.bmp) 88 | ### What is the MutilDepthFusion 89 | ![senet](./assests/mutil_depth_deepVNet.bmp) 90 | 91 | ## VNettripleplus 92 | you can see the ***paper***, please refer to this [link](https://arxiv.org/abs/2004.08790) 93 | ### What is the VNettripleplus 94 | ![senet](./assests/mutil_depthfusion.bmp) 95 | ### What is the full_scale skip conncetion 96 | ![senet](./assests/FullScaleSkipConncetion.bmp) 97 | 98 | ## MultiResVNet 99 | you can see the ***paper***, please refer to this [link](https://arxiv.org/abs/2006.00414) 100 | ### What is the MultiResModule 101 | ![senet](./assests/multiResmodule.png) 102 | ### What is the MultiResModule 103 | ![senet](./assests/Res-Path.png) 104 | ### What is the MultiResVNet 105 | ![senet](./assests/multiResVnet.png) 106 | 107 | ## DCVNet 108 | you can see the ***paper***, please refer to this [link](https://arxiv.org/abs/2006.00414) 109 | ### What is the Dual-ChannelModule 110 | ![senet](./assests/dualchannelmodule.png) 111 | ### What is the MultiResModule 112 | ![senet](./assests/Res-Path.png) 113 | ### What is the DCVNet 114 | ![senet](./assests/DCVNet.png) 115 | 116 | ## V2-Net 117 | you can see the ***paper***, please refer to this [link](https://arxiv.org/abs/2005.09007) 118 | ### What is the ResUModule 119 | ![senet](./assests/ResUModule.png) 120 | ### What is the V2-Net 121 | ![senet](./assests/V2-Net.png) 122 | 123 | ## MA-UNet 124 | you can see the ***paper***, please refer to this [link](https://arxiv.org/abs/2012.10952) 125 | ### What is the Spatial Attention and Channel Attention 126 | ![senet](./assests/SACA.png) 127 | ### What is the MA-UNet 128 | ![senet](./assests/MA-UNet.png) 129 | 130 | ## Author 131 | junqiangchen 132 | 133 | ## Contact 134 | * https://github.com/junqiangchen 135 | * email: 1207173174@qq.com 136 | * WeChat Public number: 最新医学影像技术 137 | * ![senet](./assests/wetchatnumber.jpg) 138 | -------------------------------------------------------------------------------- /VnetFamily/Vnet/model_vnet3d.py: -------------------------------------------------------------------------------- 1 | ''' 2 | 3 | ''' 4 | from .layer import (conv_bn_relu_drop, down_sampling, deconv_relu, crop_and_concat, resnet_Add, conv_sigmod) 5 | import tensorflow as tf 6 | 7 | 8 | # Create Conv Net 9 | def _createVnet(X, image_z, image_width, image_height, image_channel, phase, drop, n_class=1): 10 | inputX = tf.reshape(X, [-1, image_z, image_width, image_height, image_channel]) # shape=(?, 32, 32, 1) 11 | # Vnet model 12 | # layer1->convolution 13 | layer0 = conv_bn_relu_drop(x=inputX, kernal=(3, 3, 3, image_channel, 16), phase=phase, drop=drop, 14 | scope='layer0') 15 | layer1 = conv_bn_relu_drop(x=layer0, kernal=(3, 3, 3, 16, 16), phase=phase, drop=drop, 16 | scope='layer1') 17 | layer1 = resnet_Add(x1=layer0, x2=layer1) 18 | # down sampling1 19 | down1 = down_sampling(x=layer1, kernal=(3, 3, 3, 16, 32), phase=phase, drop=drop, scope='down1') 20 | # layer2->convolution 21 | layer2 = conv_bn_relu_drop(x=down1, kernal=(3, 3, 3, 32, 32), phase=phase, drop=drop, 22 | scope='layer2_1') 23 | layer2 = conv_bn_relu_drop(x=layer2, kernal=(3, 3, 3, 32, 32), phase=phase, drop=drop, 24 | scope='layer2_2') 25 | layer2 = resnet_Add(x1=down1, x2=layer2) 26 | # down sampling2 27 | down2 = down_sampling(x=layer2, kernal=(3, 3, 3, 32, 64), phase=phase, drop=drop, scope='down2') 28 | # layer3->convolution 29 | layer3 = conv_bn_relu_drop(x=down2, kernal=(3, 3, 3, 64, 64), phase=phase, drop=drop, 30 | scope='layer3_1') 31 | layer3 = conv_bn_relu_drop(x=layer3, kernal=(3, 3, 3, 64, 64), phase=phase, drop=drop, 32 | scope='layer3_2') 33 | layer3 = conv_bn_relu_drop(x=layer3, kernal=(3, 3, 3, 64, 64), phase=phase, drop=drop, 34 | scope='layer3_3') 35 | layer3 = resnet_Add(x1=down2, x2=layer3) 36 | # down sampling3 37 | down3 = down_sampling(x=layer3, kernal=(3, 3, 3, 64, 128), phase=phase, drop=drop, scope='down3') 38 | # layer4->convolution 39 | layer4 = conv_bn_relu_drop(x=down3, kernal=(3, 3, 3, 128, 128), phase=phase, drop=drop, 40 | scope='layer4_1') 41 | layer4 = conv_bn_relu_drop(x=layer4, kernal=(3, 3, 3, 128, 128), phase=phase, drop=drop, 42 | scope='layer4_2') 43 | layer4 = conv_bn_relu_drop(x=layer4, kernal=(3, 3, 3, 128, 128), phase=phase, drop=drop, 44 | scope='layer4_3') 45 | layer4 = resnet_Add(x1=down3, x2=layer4) 46 | # down sampling4 47 | down4 = down_sampling(x=layer4, kernal=(3, 3, 3, 128, 256), phase=phase, drop=drop, scope='down4') 48 | # layer5->convolution 49 | layer5 = conv_bn_relu_drop(x=down4, kernal=(3, 3, 3, 256, 256), phase=phase, drop=drop, 50 | scope='layer5_1') 51 | layer5 = conv_bn_relu_drop(x=layer5, kernal=(3, 3, 3, 256, 256), phase=phase, drop=drop, 52 | scope='layer5_2') 53 | layer5 = conv_bn_relu_drop(x=layer5, kernal=(3, 3, 3, 256, 256), phase=phase, drop=drop, 54 | scope='layer5_3') 55 | layer5 = resnet_Add(x1=down4, x2=layer5) 56 | 57 | # layer9->deconvolution 58 | deconv1 = deconv_relu(x=layer5, kernal=(3, 3, 3, 128, 256), scope='deconv1') 59 | # layer8->convolution 60 | layer6 = crop_and_concat(layer4, deconv1) 61 | _, Z, H, W, _ = layer4.get_shape().as_list() 62 | layer6 = conv_bn_relu_drop(x=layer6, kernal=(3, 3, 3, 256, 128), image_z=Z, height=H, width=W, phase=phase, 63 | drop=drop, scope='layer6_1') 64 | layer6 = conv_bn_relu_drop(x=layer6, kernal=(3, 3, 3, 128, 128), image_z=Z, height=H, width=W, phase=phase, 65 | drop=drop, scope='layer6_2') 66 | layer6 = conv_bn_relu_drop(x=layer6, kernal=(3, 3, 3, 128, 128), image_z=Z, height=H, width=W, phase=phase, 67 | drop=drop, scope='layer6_3') 68 | layer6 = resnet_Add(x1=deconv1, x2=layer6) 69 | # layer9->deconvolution 70 | deconv2 = deconv_relu(x=layer6, kernal=(3, 3, 3, 64, 128), scope='deconv2') 71 | # layer8->convolution 72 | layer7 = crop_and_concat(layer3, deconv2) 73 | _, Z, H, W, _ = layer3.get_shape().as_list() 74 | layer7 = conv_bn_relu_drop(x=layer7, kernal=(3, 3, 3, 128, 64), image_z=Z, height=H, width=W, phase=phase, 75 | drop=drop, scope='layer7_1') 76 | layer7 = conv_bn_relu_drop(x=layer7, kernal=(3, 3, 3, 64, 64), image_z=Z, height=H, width=W, phase=phase, 77 | drop=drop, scope='layer7_2') 78 | layer7 = conv_bn_relu_drop(x=layer7, kernal=(3, 3, 3, 64, 64), image_z=Z, height=H, width=W, phase=phase, 79 | drop=drop, scope='layer7_3') 80 | layer7 = resnet_Add(x1=deconv2, x2=layer7) 81 | # layer9->deconvolution 82 | deconv3 = deconv_relu(x=layer7, kernal=(3, 3, 3, 32, 64), scope='deconv3') 83 | # layer8->convolution 84 | layer8 = crop_and_concat(layer2, deconv3) 85 | _, Z, H, W, _ = layer2.get_shape().as_list() 86 | layer8 = conv_bn_relu_drop(x=layer8, kernal=(3, 3, 3, 64, 32), image_z=Z, height=H, width=W, phase=phase, 87 | drop=drop, scope='layer8_1') 88 | layer8 = conv_bn_relu_drop(x=layer8, kernal=(3, 3, 3, 32, 32), image_z=Z, height=H, width=W, phase=phase, 89 | drop=drop, scope='layer8_2') 90 | layer8 = conv_bn_relu_drop(x=layer8, kernal=(3, 3, 3, 32, 32), image_z=Z, height=H, width=W, phase=phase, 91 | drop=drop, scope='layer8_3') 92 | layer8 = resnet_Add(x1=deconv3, x2=layer8) 93 | # layer9->deconvolution 94 | deconv4 = deconv_relu(x=layer8, kernal=(3, 3, 3, 16, 32), scope='deconv4') 95 | # layer8->convolution 96 | layer9 = crop_and_concat(layer1, deconv4) 97 | _, Z, H, W, _ = layer1.get_shape().as_list() 98 | layer9 = conv_bn_relu_drop(x=layer9, kernal=(3, 3, 3, 32, 16), image_z=Z, height=H, width=W, phase=phase, 99 | drop=drop, scope='layer9_1') 100 | layer9 = conv_bn_relu_drop(x=layer9, kernal=(3, 3, 3, 16, 16), image_z=Z, height=H, width=W, phase=phase, 101 | drop=drop, scope='layer9_2') 102 | layer9 = conv_bn_relu_drop(x=layer9, kernal=(3, 3, 3, 16, 16), image_z=Z, height=H, width=W, phase=phase, 103 | drop=drop, scope='layer9_3') 104 | layer9 = resnet_Add(x1=deconv4, x2=layer9) 105 | # layer14->output 106 | output_map_logit = conv_sigmod(x=layer9, kernal=(1, 1, 1, 16, n_class), scope='output', activeflag=False) 107 | output_map = tf.nn.sigmoid(output_map_logit) 108 | return output_map_logit, output_map 109 | -------------------------------------------------------------------------------- /VnetFamily/mutildepthVnet/layer.py: -------------------------------------------------------------------------------- 1 | ''' 2 | covlution layer,pool layer,initialization。。。。 3 | ''' 4 | from __future__ import division 5 | import tensorflow as tf 6 | import numpy as np 7 | import cv2 8 | 9 | 10 | # Weight initialization (Xavier's init) 11 | def weight_xavier_init(shape, n_inputs, n_outputs, activefunction='sigomd', uniform=True, variable_name=None): 12 | if activefunction == 'sigomd': 13 | if uniform: 14 | init_range = tf.sqrt(6.0 / (n_inputs + n_outputs)) 15 | initial = tf.random_uniform(shape, -init_range, init_range) 16 | return tf.get_variable(name=variable_name, initializer=initial, trainable=True) 17 | else: 18 | stddev = tf.sqrt(2.0 / (n_inputs + n_outputs)) 19 | initial = tf.truncated_normal(shape, mean=0.0, stddev=stddev) 20 | return tf.get_variable(name=variable_name, initializer=initial, trainable=True) 21 | elif activefunction == 'relu': 22 | if uniform: 23 | init_range = tf.sqrt(6.0 / (n_inputs + n_outputs)) * np.sqrt(2) 24 | initial = tf.random_uniform(shape, -init_range, init_range) 25 | return tf.get_variable(name=variable_name, initializer=initial, trainable=True) 26 | else: 27 | stddev = tf.sqrt(2.0 / (n_inputs + n_outputs)) * np.sqrt(2) 28 | initial = tf.truncated_normal(shape, mean=0.0, stddev=stddev) 29 | return tf.get_variable(name=variable_name, initializer=initial, trainable=True) 30 | elif activefunction == 'tan': 31 | if uniform: 32 | init_range = tf.sqrt(6.0 / (n_inputs + n_outputs)) * 4 33 | initial = tf.random_uniform(shape, -init_range, init_range) 34 | return tf.get_variable(name=variable_name, initializer=initial, trainable=True) 35 | else: 36 | stddev = tf.sqrt(2.0 / (n_inputs + n_outputs)) * 4 37 | initial = tf.truncated_normal(shape, mean=0.0, stddev=stddev) 38 | return tf.get_variable(name=variable_name, initializer=initial, trainable=True) 39 | 40 | 41 | # Bias initialization 42 | def bias_variable(shape, variable_name=None): 43 | initial = tf.constant(0.1, shape=shape) 44 | return tf.get_variable(name=variable_name, initializer=initial, trainable=True) 45 | 46 | 47 | # 3D convolution 48 | def conv3d(x, W, stride=1, dilation=1): 49 | conv_3d = tf.nn.conv3d(x, W, strides=[1, stride, stride, stride, 1], padding='SAME', 50 | dilations=[1, dilation, dilation, dilation, 1]) 51 | return conv_3d 52 | 53 | 54 | # 3D upsampling 55 | def upsample3d(x, scale_factor, scope=None): 56 | '''' 57 | X shape is [nsample,dim,rows, cols, channel] 58 | out shape is[nsample,dim*scale_factor,rows*scale_factor, cols*scale_factor, channel] 59 | ''' 60 | x_shape = tf.shape(x) 61 | k = tf.ones([scale_factor, scale_factor, scale_factor, x_shape[-1], x_shape[-1]]) 62 | # note k.shape = [dim,rows, cols, depth_in, depth_output] 63 | output_shape = tf.stack( 64 | [x_shape[0], x_shape[1] * scale_factor, x_shape[2] * scale_factor, x_shape[3] * scale_factor, x_shape[4]]) 65 | upsample = tf.nn.conv3d_transpose(value=x, filter=k, output_shape=output_shape, 66 | strides=[1, scale_factor, scale_factor, scale_factor, 1], 67 | padding='SAME', name=scope) 68 | return upsample 69 | 70 | 71 | # 3D deconvolution 72 | def deconv3d(x, W, samefeature=False, depth=False): 73 | """ 74 | depth flag:False is z axis is same between input and output,true is z axis is input is twice than output 75 | """ 76 | x_shape = tf.shape(x) 77 | if depth: 78 | if samefeature: 79 | output_shape = tf.stack([x_shape[0], x_shape[1] * 2, x_shape[2] * 2, x_shape[3] * 2, x_shape[4]]) 80 | else: 81 | output_shape = tf.stack([x_shape[0], x_shape[1] * 2, x_shape[2] * 2, x_shape[3] * 2, x_shape[4] // 2]) 82 | deconv = tf.nn.conv3d_transpose(x, W, output_shape, strides=[1, 2, 2, 2, 1], padding='SAME') 83 | else: 84 | if samefeature: 85 | output_shape = tf.stack([x_shape[0], x_shape[1] * 2, x_shape[2] * 2, x_shape[3], x_shape[4]]) 86 | else: 87 | output_shape = tf.stack([x_shape[0], x_shape[1] * 2, x_shape[2] * 2, x_shape[3], x_shape[4] // 2]) 88 | deconv = tf.nn.conv3d_transpose(x, W, output_shape, strides=[1, 2, 2, 1, 1], padding='SAME') 89 | return deconv 90 | 91 | 92 | # Max Pooling 93 | def max_pool3d(x, depth=False): 94 | """ 95 | depth flag:False is z axis is same between input and output,true is z axis is input is twice than output 96 | """ 97 | if depth: 98 | pool3d = tf.nn.max_pool3d(x, ksize=[1, 2, 2, 2, 1], strides=[1, 2, 2, 2, 1], padding='SAME') 99 | else: 100 | pool3d = tf.nn.max_pool3d(x, ksize=[1, 2, 2, 1, 1], strides=[1, 2, 2, 1, 1], padding='SAME') 101 | return pool3d 102 | 103 | 104 | # Unet crop and concat 105 | def crop_and_concat(x1, x2): 106 | """ 107 | concat x1 and x2 108 | :param x1: 109 | :param x2: 110 | :return: 111 | """ 112 | x1_shape = tf.shape(x1) 113 | x2_shape = tf.shape(x2) 114 | # offsets for the top left corner of the crop 115 | offsets = [0, (x1_shape[1] - x2_shape[1]) // 2, 116 | (x1_shape[2] - x2_shape[2]) // 2, (x1_shape[3] - x2_shape[3]) // 2, 0] 117 | size = [-1, x2_shape[1], x2_shape[2], x2_shape[3], -1] 118 | x1_crop = tf.slice(x1, offsets, size) 119 | return tf.concat([x1_crop, x2], 4) 120 | 121 | 122 | # Batch Normalization 123 | def normalizationlayer(x, is_train, height=None, width=None, image_z=None, norm_type=None, G=16, esp=1e-5, scope=None): 124 | """ 125 | normalizationlayer 126 | :param x:input data with shap of[batch,height,width,channel] 127 | :param is_train:flag of normalizationlayer,True is training,False is Testing 128 | :param height:in some condition,the data height is in Runtime determined,such as through deconv layer and conv2d 129 | :param width:in some condition,the data width is in Runtime determined 130 | :param image_z: 131 | :param norm_type:normalization type:support"batch","group","None" 132 | :param G:in group normalization,channel is seperated with group number(G) 133 | :param esp:Prevent divisor from being zero 134 | :param scope:normalizationlayer scope 135 | :return: 136 | """ 137 | with tf.name_scope(scope + norm_type): 138 | if norm_type == None: 139 | output = x 140 | elif norm_type == 'batch': 141 | output = tf.contrib.layers.batch_norm(x, center=True, scale=True, is_train=is_train) 142 | elif norm_type == "group": 143 | # tranpose:[bs,z,h,w,c]to[bs,c,z,h,w]following the paper 144 | x = tf.transpose(x, [0, 4, 1, 2, 3]) 145 | N, C, Z, H, W = x.get_shape().as_list() 146 | G = min(G, C) 147 | if H == None and W == None and Z == None: 148 | Z, H, W = image_z, height, width 149 | x = tf.reshape(x, [-1, G, C // G, Z, H, W]) 150 | mean, var = tf.nn.moments(x, [2, 3, 4, 5], keep_dims=True) 151 | x = (x - mean) / tf.sqrt(var + esp) 152 | gama = tf.get_variable(scope + norm_type + 'group_gama', [C], initializer=tf.constant_initializer(1.0)) 153 | beta = tf.get_variable(scope + norm_type + 'group_beta', [C], initializer=tf.constant_initializer(0.0)) 154 | gama = tf.reshape(gama, [1, C, 1, 1, 1]) 155 | beta = tf.reshape(beta, [1, C, 1, 1, 1]) 156 | output = tf.reshape(x, [-1, C, Z, H, W]) * gama + beta 157 | # tranpose:[bs,c,z,h,w]to[bs,z,h,w,c]following the paper 158 | output = tf.transpose(output, [0, 2, 3, 4, 1]) 159 | return output 160 | 161 | 162 | # resnet add_connect 163 | def resnet_Add(x1, x2): 164 | """ 165 | add x1 and x2 166 | :param x1: 167 | :param x2: 168 | :return: 169 | """ 170 | residual_connection = tf.add(x1, x2) 171 | return residual_connection -------------------------------------------------------------------------------- /VnetFamily/NestedVnet/layer.py: -------------------------------------------------------------------------------- 1 | ''' 2 | covlution layer,pool layer,initialization。。。。 3 | ''' 4 | from __future__ import division 5 | import tensorflow as tf 6 | import numpy as np 7 | import cv2 8 | 9 | 10 | # Weight initialization (Xavier's init) 11 | def weight_xavier_init(shape, n_inputs, n_outputs, activefunction='sigomd', uniform=True, variable_name=None): 12 | if activefunction == 'sigomd': 13 | if uniform: 14 | init_range = tf.sqrt(6.0 / (n_inputs + n_outputs)) 15 | initial = tf.random_uniform(shape, -init_range, init_range) 16 | return tf.get_variable(name=variable_name, initializer=initial, trainable=True) 17 | else: 18 | stddev = tf.sqrt(2.0 / (n_inputs + n_outputs)) 19 | initial = tf.truncated_normal(shape, mean=0.0, stddev=stddev) 20 | return tf.get_variable(name=variable_name, initializer=initial, trainable=True) 21 | elif activefunction == 'relu': 22 | if uniform: 23 | init_range = tf.sqrt(6.0 / (n_inputs + n_outputs)) * np.sqrt(2) 24 | initial = tf.random_uniform(shape, -init_range, init_range) 25 | return tf.get_variable(name=variable_name, initializer=initial, trainable=True) 26 | else: 27 | stddev = tf.sqrt(2.0 / (n_inputs + n_outputs)) * np.sqrt(2) 28 | initial = tf.truncated_normal(shape, mean=0.0, stddev=stddev) 29 | return tf.get_variable(name=variable_name, initializer=initial, trainable=True) 30 | elif activefunction == 'tan': 31 | if uniform: 32 | init_range = tf.sqrt(6.0 / (n_inputs + n_outputs)) * 4 33 | initial = tf.random_uniform(shape, -init_range, init_range) 34 | return tf.get_variable(name=variable_name, initializer=initial, trainable=True) 35 | else: 36 | stddev = tf.sqrt(2.0 / (n_inputs + n_outputs)) * 4 37 | initial = tf.truncated_normal(shape, mean=0.0, stddev=stddev) 38 | return tf.get_variable(name=variable_name, initializer=initial, trainable=True) 39 | 40 | 41 | # Bias initialization 42 | def bias_variable(shape, variable_name=None): 43 | initial = tf.constant(0.1, shape=shape) 44 | return tf.get_variable(name=variable_name, initializer=initial, trainable=True) 45 | 46 | 47 | # 3D convolution 48 | def conv3d(x, W, stride=1): 49 | conv_3d = tf.nn.conv3d(x, W, strides=[1, stride, stride, stride, 1], padding='SAME') 50 | return conv_3d 51 | 52 | 53 | # 3D upsampling 54 | def upsample3d(x, scale_factor, scope=None): 55 | '''' 56 | X shape is [nsample,dim,rows, cols, channel] 57 | out shape is[nsample,dim*scale_factor,rows*scale_factor, cols*scale_factor, channel] 58 | ''' 59 | x_shape = tf.shape(x) 60 | k = tf.ones([scale_factor, scale_factor, scale_factor, x_shape[-1], x_shape[-1]]) 61 | # note k.shape = [dim,rows, cols, depth_in, depth_output] 62 | output_shape = tf.stack( 63 | [x_shape[0], x_shape[1] * scale_factor, x_shape[2] * scale_factor, x_shape[3] * scale_factor, x_shape[4]]) 64 | upsample = tf.nn.conv3d_transpose(value=x, filter=k, output_shape=output_shape, 65 | strides=[1, scale_factor, scale_factor, scale_factor, 1], 66 | padding='SAME', name=scope) 67 | return upsample 68 | 69 | 70 | # 3D deconvolution 71 | def deconv3d(x, W, samefeature=False, depth=False): 72 | """ 73 | depth flag:False is z axis is same between input and output,true is z axis is input is twice than output 74 | """ 75 | x_shape = tf.shape(x) 76 | if depth: 77 | if samefeature: 78 | output_shape = tf.stack([x_shape[0], x_shape[1] * 2, x_shape[2] * 2, x_shape[3] * 2, x_shape[4]]) 79 | else: 80 | output_shape = tf.stack([x_shape[0], x_shape[1] * 2, x_shape[2] * 2, x_shape[3] * 2, x_shape[4] // 2]) 81 | deconv = tf.nn.conv3d_transpose(x, W, output_shape, strides=[1, 2, 2, 2, 1], padding='SAME') 82 | else: 83 | if samefeature: 84 | output_shape = tf.stack([x_shape[0], x_shape[1] * 2, x_shape[2] * 2, x_shape[3], x_shape[4]]) 85 | else: 86 | output_shape = tf.stack([x_shape[0], x_shape[1] * 2, x_shape[2] * 2, x_shape[3], x_shape[4] // 2]) 87 | deconv = tf.nn.conv3d_transpose(x, W, output_shape, strides=[1, 2, 2, 1, 1], padding='SAME') 88 | return deconv 89 | 90 | 91 | # Max Pooling 92 | def max_pool3d(x, depth=False): 93 | """ 94 | depth flag:False is z axis is same between input and output,true is z axis is input is twice than output 95 | """ 96 | if depth: 97 | pool3d = tf.nn.max_pool3d(x, ksize=[1, 2, 2, 2, 1], strides=[1, 2, 2, 2, 1], padding='SAME') 98 | else: 99 | pool3d = tf.nn.max_pool3d(x, ksize=[1, 2, 2, 1, 1], strides=[1, 2, 2, 1, 1], padding='SAME') 100 | return pool3d 101 | 102 | 103 | # Unet crop and concat 104 | def crop_and_concat(x1, x2): 105 | x1_shape = tf.shape(x1) 106 | x2_shape = tf.shape(x2) 107 | # offsets for the top left corner of the crop 108 | offsets = [0, (x1_shape[1] - x2_shape[1]) // 2, 109 | (x1_shape[2] - x2_shape[2]) // 2, (x1_shape[3] - x2_shape[3]) // 2, 0] 110 | size = [-1, x2_shape[1], x2_shape[2], x2_shape[3], -1] 111 | x1_crop = tf.slice(x1, offsets, size) 112 | return tf.concat([x1_crop, x2], 4) 113 | 114 | 115 | # Batch Normalization 116 | def normalizationlayer(x, is_train, height=None, width=None, image_z=None, norm_type=None, G=16, esp=1e-5, scope=None): 117 | """ 118 | :param x:input data with shap of[batch,height,width,channel] 119 | :param is_train:flag of normalizationlayer,True is training,False is Testing 120 | :param height:in some condition,the data height is in Runtime determined,such as through deconv layer and conv2d 121 | :param width:in some condition,the data width is in Runtime determined 122 | :param image_z: 123 | :param norm_type:normalization type:support"batch","group","None" 124 | :param G:in group normalization,channel is seperated with group number(G) 125 | :param esp:Prevent divisor from being zero 126 | :param scope:normalizationlayer scope 127 | :return: 128 | """ 129 | with tf.name_scope(scope + norm_type): 130 | if norm_type == None: 131 | output = x 132 | elif norm_type == 'batch': 133 | output = tf.contrib.layers.batch_norm(x, center=True, scale=True, is_train=is_train) 134 | elif norm_type == "group": 135 | # tranpose:[bs,z,h,w,c]to[bs,c,z,h,w]following the paper 136 | x = tf.transpose(x, [0, 4, 1, 2, 3]) 137 | N, C, Z, H, W = x.get_shape().as_list() 138 | G = min(G, C) 139 | if H == None and W == None and Z == None: 140 | Z, H, W = image_z, height, width 141 | x = tf.reshape(x, [-1, G, C // G, Z, H, W]) 142 | mean, var = tf.nn.moments(x, [2, 3, 4, 5], keep_dims=True) 143 | x = (x - mean) / tf.sqrt(var + esp) 144 | gama = tf.get_variable(scope + norm_type + 'group_gama', [C], initializer=tf.constant_initializer(1.0)) 145 | beta = tf.get_variable(scope + norm_type + 'group_beta', [C], initializer=tf.constant_initializer(0.0)) 146 | gama = tf.reshape(gama, [1, C, 1, 1, 1]) 147 | beta = tf.reshape(beta, [1, C, 1, 1, 1]) 148 | output = tf.reshape(x, [-1, C, Z, H, W]) * gama + beta 149 | # tranpose:[bs,c,z,h,w]to[bs,z,h,w,c]following the paper 150 | output = tf.transpose(output, [0, 2, 3, 4, 1]) 151 | return output 152 | 153 | 154 | # resnet add_connect 155 | def resnet_Add(x1, x2): 156 | if x1.get_shape().as_list()[4] != x2.get_shape().as_list()[4]: 157 | # Option A: Zero-padding 158 | residual_connection = x2 + tf.pad(x1, [[0, 0], [0, 0], [0, 0], [0, 0], 159 | [0, x2.get_shape().as_list()[4] - 160 | x1.get_shape().as_list()[4]]]) 161 | else: 162 | residual_connection = x2 + x1 163 | return residual_connection 164 | 165 | 166 | def save_images(images, size, path): 167 | img = (images + 1.0) / 2.0 168 | h, w = img.shape[1], img.shape[2] 169 | merge_img = np.zeros((h * size[0], w * size[1])) 170 | for idx, image in enumerate(images): 171 | i = idx % size[1] 172 | j = idx // size[1] 173 | merge_img[j * h:j * h + h, i * w:i * w + w] = image 174 | result = merge_img * 255. 175 | result = np.clip(result, 0, 255).astype('uint8') 176 | return cv2.imwrite(path, result) 177 | -------------------------------------------------------------------------------- /VnetFamily/DualAttentionVnet/layer.py: -------------------------------------------------------------------------------- 1 | ''' 2 | covlution layer,pool layer,initialization。。。。 3 | ''' 4 | from __future__ import division 5 | import tensorflow as tf 6 | import numpy as np 7 | import cv2 8 | 9 | 10 | # Weight initialization (Xavier's init) 11 | def weight_xavier_init(shape, n_inputs, n_outputs, activefunction='sigomd', uniform=True, variable_name=None): 12 | if activefunction == 'sigomd': 13 | if uniform: 14 | init_range = tf.sqrt(6.0 / (n_inputs + n_outputs)) 15 | initial = tf.random_uniform(shape, -init_range, init_range) 16 | return tf.get_variable(name=variable_name, initializer=initial, trainable=True) 17 | else: 18 | stddev = tf.sqrt(2.0 / (n_inputs + n_outputs)) 19 | initial = tf.truncated_normal(shape, mean=0.0, stddev=stddev) 20 | return tf.get_variable(name=variable_name, initializer=initial, trainable=True) 21 | elif activefunction == 'relu': 22 | if uniform: 23 | init_range = tf.sqrt(6.0 / (n_inputs + n_outputs)) * np.sqrt(2) 24 | initial = tf.random_uniform(shape, -init_range, init_range) 25 | return tf.get_variable(name=variable_name, initializer=initial, trainable=True) 26 | else: 27 | stddev = tf.sqrt(2.0 / (n_inputs + n_outputs)) * np.sqrt(2) 28 | initial = tf.truncated_normal(shape, mean=0.0, stddev=stddev) 29 | return tf.get_variable(name=variable_name, initializer=initial, trainable=True) 30 | elif activefunction == 'tan': 31 | if uniform: 32 | init_range = tf.sqrt(6.0 / (n_inputs + n_outputs)) * 4 33 | initial = tf.random_uniform(shape, -init_range, init_range) 34 | return tf.get_variable(name=variable_name, initializer=initial, trainable=True) 35 | else: 36 | stddev = tf.sqrt(2.0 / (n_inputs + n_outputs)) * 4 37 | initial = tf.truncated_normal(shape, mean=0.0, stddev=stddev) 38 | return tf.get_variable(name=variable_name, initializer=initial, trainable=True) 39 | 40 | 41 | # Bias initialization 42 | def bias_variable(shape, variable_name=None): 43 | initial = tf.constant(0.1, shape=shape) 44 | return tf.get_variable(name=variable_name, initializer=initial, trainable=True) 45 | 46 | 47 | # 3D convolution 48 | def conv3d(x, W, stride=1): 49 | conv_3d = tf.nn.conv3d(x, W, strides=[1, stride, stride, stride, 1], padding='SAME') 50 | return conv_3d 51 | 52 | 53 | # 3D upsampling 54 | def upsample3d(x, scale_factor, scope=None): 55 | '''' 56 | X shape is [nsample,dim,rows, cols, channel] 57 | out shape is[nsample,dim*scale_factor,rows*scale_factor, cols*scale_factor, channel] 58 | ''' 59 | x_shape = tf.shape(x) 60 | k = tf.ones([scale_factor, scale_factor, scale_factor, x_shape[-1], x_shape[-1]]) 61 | # note k.shape = [dim,rows, cols, depth_in, depth_output] 62 | output_shape = tf.stack( 63 | [x_shape[0], x_shape[1] * scale_factor, x_shape[2] * scale_factor, x_shape[3] * scale_factor, x_shape[4]]) 64 | upsample = tf.nn.conv3d_transpose(value=x, filter=k, output_shape=output_shape, 65 | strides=[1, scale_factor, scale_factor, scale_factor, 1], 66 | padding='SAME', name=scope) 67 | return upsample 68 | 69 | 70 | # 3D deconvolution 71 | def deconv3d(x, W, samefeature=False, depth=False): 72 | """ 73 | depth flag:False is z axis is same between input and output,true is z axis is input is twice than output 74 | """ 75 | x_shape = tf.shape(x) 76 | if depth: 77 | if samefeature: 78 | output_shape = tf.stack([x_shape[0], x_shape[1] * 2, x_shape[2] * 2, x_shape[3] * 2, x_shape[4]]) 79 | else: 80 | output_shape = tf.stack([x_shape[0], x_shape[1] * 2, x_shape[2] * 2, x_shape[3] * 2, x_shape[4] // 2]) 81 | deconv = tf.nn.conv3d_transpose(x, W, output_shape, strides=[1, 2, 2, 2, 1], padding='SAME') 82 | else: 83 | if samefeature: 84 | output_shape = tf.stack([x_shape[0], x_shape[1] * 2, x_shape[2] * 2, x_shape[3], x_shape[4]]) 85 | else: 86 | output_shape = tf.stack([x_shape[0], x_shape[1] * 2, x_shape[2] * 2, x_shape[3], x_shape[4] // 2]) 87 | deconv = tf.nn.conv3d_transpose(x, W, output_shape, strides=[1, 2, 2, 1, 1], padding='SAME') 88 | return deconv 89 | 90 | 91 | # Max Pooling 92 | def max_pool3d(x, depth=False): 93 | """ 94 | depth flag:False is z axis is same between input and output,true is z axis is input is twice than output 95 | """ 96 | if depth: 97 | pool3d = tf.nn.max_pool3d(x, ksize=[1, 2, 2, 2, 1], strides=[1, 2, 2, 2, 1], padding='SAME') 98 | else: 99 | pool3d = tf.nn.max_pool3d(x, ksize=[1, 2, 2, 1, 1], strides=[1, 2, 2, 1, 1], padding='SAME') 100 | return pool3d 101 | 102 | 103 | # Unet crop and concat 104 | def crop_and_concat(x1, x2): 105 | x1_shape = tf.shape(x1) 106 | x2_shape = tf.shape(x2) 107 | # offsets for the top left corner of the crop 108 | offsets = [0, (x1_shape[1] - x2_shape[1]) // 2, 109 | (x1_shape[2] - x2_shape[2]) // 2, (x1_shape[3] - x2_shape[3]) // 2, 0] 110 | size = [-1, x2_shape[1], x2_shape[2], x2_shape[3], -1] 111 | x1_crop = tf.slice(x1, offsets, size) 112 | return tf.concat([x1_crop, x2], 4) 113 | 114 | 115 | # Batch Normalization 116 | def normalizationlayer(x, is_train, height=None, width=None, image_z=None, norm_type=None, G=16, esp=1e-5, scope=None): 117 | """ 118 | :param x:input data with shap of[batch,height,width,channel] 119 | :param is_train:flag of normalizationlayer,True is training,False is Testing 120 | :param height:in some condition,the data height is in Runtime determined,such as through deconv layer and conv2d 121 | :param width:in some condition,the data width is in Runtime determined 122 | :param image_z: 123 | :param norm_type:normalization type:support"batch","group","None" 124 | :param G:in group normalization,channel is seperated with group number(G) 125 | :param esp:Prevent divisor from being zero 126 | :param scope:normalizationlayer scope 127 | :return: 128 | """ 129 | with tf.name_scope(scope + norm_type): 130 | if norm_type == None: 131 | output = x 132 | elif norm_type == 'batch': 133 | output = tf.contrib.layers.batch_norm(x, center=True, scale=True, is_train=is_train) 134 | elif norm_type == "group": 135 | # tranpose:[bs,z,h,w,c]to[bs,c,z,h,w]following the paper 136 | x = tf.transpose(x, [0, 4, 1, 2, 3]) 137 | N, C, Z, H, W = x.get_shape().as_list() 138 | G = min(G, C) 139 | if H == None and W == None and Z == None: 140 | Z, H, W = image_z, height, width 141 | x = tf.reshape(x, [-1, G, C // G, Z, H, W]) 142 | mean, var = tf.nn.moments(x, [2, 3, 4, 5], keep_dims=True) 143 | x = (x - mean) / tf.sqrt(var + esp) 144 | gama = tf.get_variable(scope + norm_type + 'group_gama', [C], initializer=tf.constant_initializer(1.0)) 145 | beta = tf.get_variable(scope + norm_type + 'group_beta', [C], initializer=tf.constant_initializer(0.0)) 146 | gama = tf.reshape(gama, [1, C, 1, 1, 1]) 147 | beta = tf.reshape(beta, [1, C, 1, 1, 1]) 148 | output = tf.reshape(x, [-1, C, Z, H, W]) * gama + beta 149 | # tranpose:[bs,c,z,h,w]to[bs,z,h,w,c]following the paper 150 | output = tf.transpose(output, [0, 2, 3, 4, 1]) 151 | return output 152 | 153 | 154 | # resnet add_connect 155 | def resnet_Add(x1, x2): 156 | if x1.get_shape().as_list()[4] != x2.get_shape().as_list()[4]: 157 | # Option A: Zero-padding 158 | residual_connection = x2 + tf.pad(x1, [[0, 0], [0, 0], [0, 0], [0, 0], 159 | [0, x2.get_shape().as_list()[4] - 160 | x1.get_shape().as_list()[4]]]) 161 | else: 162 | residual_connection = x2 + x1 163 | return residual_connection 164 | 165 | 166 | def save_images(images, size, path): 167 | img = (images + 1.0) / 2.0 168 | h, w = img.shape[1], img.shape[2] 169 | merge_img = np.zeros((h * size[0], w * size[1])) 170 | for idx, image in enumerate(images): 171 | i = idx % size[1] 172 | j = idx // size[1] 173 | merge_img[j * h:j * h + h, i * w:i * w + w] = image 174 | result = merge_img * 255. 175 | result = np.clip(result, 0, 255).astype('uint8') 176 | return cv2.imwrite(path, result) 177 | -------------------------------------------------------------------------------- /VnetFamily/AttentionGatedVnet/layer.py: -------------------------------------------------------------------------------- 1 | ''' 2 | covlution layer,pool layer,initialization。。。。 3 | ''' 4 | from __future__ import division 5 | import tensorflow as tf 6 | import numpy as np 7 | import cv2 8 | 9 | 10 | # Weight initialization (Xavier's init) 11 | def weight_xavier_init(shape, n_inputs, n_outputs, activefunction='sigomd', uniform=True, variable_name=None): 12 | if activefunction == 'sigomd': 13 | if uniform: 14 | init_range = tf.sqrt(6.0 / (n_inputs + n_outputs)) 15 | initial = tf.random_uniform(shape, -init_range, init_range) 16 | return tf.get_variable(name=variable_name, initializer=initial, trainable=True) 17 | else: 18 | stddev = tf.sqrt(2.0 / (n_inputs + n_outputs)) 19 | initial = tf.truncated_normal(shape, mean=0.0, stddev=stddev) 20 | return tf.get_variable(name=variable_name, initializer=initial, trainable=True) 21 | elif activefunction == 'relu': 22 | if uniform: 23 | init_range = tf.sqrt(6.0 / (n_inputs + n_outputs)) * np.sqrt(2) 24 | initial = tf.random_uniform(shape, -init_range, init_range) 25 | return tf.get_variable(name=variable_name, initializer=initial, trainable=True) 26 | else: 27 | stddev = tf.sqrt(2.0 / (n_inputs + n_outputs)) * np.sqrt(2) 28 | initial = tf.truncated_normal(shape, mean=0.0, stddev=stddev) 29 | return tf.get_variable(name=variable_name, initializer=initial, trainable=True) 30 | elif activefunction == 'tan': 31 | if uniform: 32 | init_range = tf.sqrt(6.0 / (n_inputs + n_outputs)) * 4 33 | initial = tf.random_uniform(shape, -init_range, init_range) 34 | return tf.get_variable(name=variable_name, initializer=initial, trainable=True) 35 | else: 36 | stddev = tf.sqrt(2.0 / (n_inputs + n_outputs)) * 4 37 | initial = tf.truncated_normal(shape, mean=0.0, stddev=stddev) 38 | return tf.get_variable(name=variable_name, initializer=initial, trainable=True) 39 | 40 | 41 | # Bias initialization 42 | def bias_variable(shape, variable_name=None): 43 | initial = tf.constant(0.1, shape=shape) 44 | return tf.get_variable(name=variable_name, initializer=initial, trainable=True) 45 | 46 | 47 | # 3D convolution 48 | def conv3d(x, W, stride=1): 49 | conv_3d = tf.nn.conv3d(x, W, strides=[1, stride, stride, stride, 1], padding='SAME') 50 | return conv_3d 51 | 52 | 53 | # 3D downsampling 54 | def downsample3d(x): 55 | pool3d = tf.nn.avg_pool3d(x, ksize=[1, 2, 2, 2, 1], strides=[1, 2, 2, 2, 1], padding='SAME') 56 | return pool3d 57 | 58 | 59 | # 3D upsampling 60 | def upsample3d(x, scale_factor, scope=None): 61 | '''' 62 | X shape is [nsample,dim,rows, cols, channel] 63 | out shape is[nsample,dim*scale_factor,rows*scale_factor, cols*scale_factor, channel] 64 | ''' 65 | x_shape = tf.shape(x) 66 | k = tf.ones([scale_factor, scale_factor, scale_factor, x_shape[-1], x_shape[-1]]) 67 | # note k.shape = [dim,rows, cols, depth_in, depth_output] 68 | output_shape = tf.stack( 69 | [x_shape[0], x_shape[1] * scale_factor, x_shape[2] * scale_factor, x_shape[3] * scale_factor, x_shape[4]]) 70 | upsample = tf.nn.conv3d_transpose(value=x, filter=k, output_shape=output_shape, 71 | strides=[1, scale_factor, scale_factor, scale_factor, 1], 72 | padding='SAME', name=scope) 73 | return upsample 74 | 75 | 76 | # 3D deconvolution 77 | def deconv3d(x, W, samefeature=False, depth=False): 78 | """ 79 | depth flag:False is z axis is same between input and output,true is z axis is input is twice than output 80 | """ 81 | x_shape = tf.shape(x) 82 | if depth: 83 | if samefeature: 84 | output_shape = tf.stack([x_shape[0], x_shape[1] * 2, x_shape[2] * 2, x_shape[3] * 2, x_shape[4]]) 85 | else: 86 | output_shape = tf.stack([x_shape[0], x_shape[1] * 2, x_shape[2] * 2, x_shape[3] * 2, x_shape[4] // 2]) 87 | deconv = tf.nn.conv3d_transpose(x, W, output_shape, strides=[1, 2, 2, 2, 1], padding='SAME') 88 | else: 89 | if samefeature: 90 | output_shape = tf.stack([x_shape[0], x_shape[1] * 2, x_shape[2] * 2, x_shape[3], x_shape[4]]) 91 | else: 92 | output_shape = tf.stack([x_shape[0], x_shape[1] * 2, x_shape[2] * 2, x_shape[3], x_shape[4] // 2]) 93 | deconv = tf.nn.conv3d_transpose(x, W, output_shape, strides=[1, 2, 2, 1, 1], padding='SAME') 94 | return deconv 95 | 96 | 97 | # Max Pooling 98 | def max_pool3d(x, depth=False): 99 | """ 100 | depth flag:False is z axis is same between input and output,true is z axis is input is twice than output 101 | """ 102 | if depth: 103 | pool3d = tf.nn.max_pool3d(x, ksize=[1, 2, 2, 2, 1], strides=[1, 2, 2, 2, 1], padding='SAME') 104 | else: 105 | pool3d = tf.nn.max_pool3d(x, ksize=[1, 2, 2, 1, 1], strides=[1, 2, 2, 1, 1], padding='SAME') 106 | return pool3d 107 | 108 | 109 | # Unet crop and concat 110 | def crop_and_concat(x1, x2): 111 | x1_shape = tf.shape(x1) 112 | x2_shape = tf.shape(x2) 113 | # offsets for the top left corner of the crop 114 | offsets = [0, (x1_shape[1] - x2_shape[1]) // 2, 115 | (x1_shape[2] - x2_shape[2]) // 2, (x1_shape[3] - x2_shape[3]) // 2, 0] 116 | size = [-1, x2_shape[1], x2_shape[2], x2_shape[3], -1] 117 | x1_crop = tf.slice(x1, offsets, size) 118 | return tf.concat([x1_crop, x2], 4) 119 | 120 | 121 | # Batch Normalization 122 | def normalizationlayer(x, is_train, height=None, width=None, image_z=None, norm_type=None, G=16, esp=1e-5, scope=None): 123 | """ 124 | :param x:input data with shap of[batch,height,width,channel] 125 | :param is_train:flag of normalizationlayer,True is training,False is Testing 126 | :param height:in some condition,the data height is in Runtime determined,such as through deconv layer and conv2d 127 | :param width:in some condition,the data width is in Runtime determined 128 | :param image_z: 129 | :param norm_type:normalization type:support"batch","group","None" 130 | :param G:in group normalization,channel is seperated with group number(G) 131 | :param esp:Prevent divisor from being zero 132 | :param scope:normalizationlayer scope 133 | :return: 134 | """ 135 | with tf.name_scope(scope + norm_type): 136 | if norm_type == None: 137 | output = x 138 | elif norm_type == 'batch': 139 | output = tf.contrib.layers.batch_norm(x, center=True, scale=True, is_train=is_train) 140 | elif norm_type == "group": 141 | # tranpose:[bs,z,h,w,c]to[bs,c,z,h,w]following the paper 142 | x = tf.transpose(x, [0, 4, 1, 2, 3]) 143 | N, C, Z, H, W = x.get_shape().as_list() 144 | G = min(G, C) 145 | if H == None and W == None and Z == None: 146 | Z, H, W = image_z, height, width 147 | x = tf.reshape(x, [-1, G, C // G, Z, H, W]) 148 | mean, var = tf.nn.moments(x, [2, 3, 4, 5], keep_dims=True) 149 | x = (x - mean) / tf.sqrt(var + esp) 150 | gama = tf.get_variable(scope + norm_type + 'group_gama', [C], initializer=tf.constant_initializer(1.0)) 151 | beta = tf.get_variable(scope + norm_type + 'group_beta', [C], initializer=tf.constant_initializer(0.0)) 152 | gama = tf.reshape(gama, [1, C, 1, 1, 1]) 153 | beta = tf.reshape(beta, [1, C, 1, 1, 1]) 154 | output = tf.reshape(x, [-1, C, Z, H, W]) * gama + beta 155 | # tranpose:[bs,c,z,h,w]to[bs,z,h,w,c]following the paper 156 | output = tf.transpose(output, [0, 2, 3, 4, 1]) 157 | return output 158 | 159 | 160 | # resnet add_connect 161 | def resnet_Add(x1, x2): 162 | if x1.get_shape().as_list()[4] != x2.get_shape().as_list()[4]: 163 | # Option A: Zero-padding 164 | residual_connection = x2 + tf.pad(x1, [[0, 0], [0, 0], [0, 0], [0, 0], 165 | [0, x2.get_shape().as_list()[4] - 166 | x1.get_shape().as_list()[4]]]) 167 | else: 168 | residual_connection = x2 + x1 169 | return residual_connection 170 | 171 | 172 | def save_images(images, size, path): 173 | img = (images + 1.0) / 2.0 174 | h, w = img.shape[1], img.shape[2] 175 | merge_img = np.zeros((h * size[0], w * size[1])) 176 | for idx, image in enumerate(images): 177 | i = idx % size[1] 178 | j = idx // size[1] 179 | merge_img[j * h:j * h + h, i * w:i * w + w] = image 180 | result = merge_img * 255. 181 | result = np.clip(result, 0, 255).astype('uint8') 182 | return cv2.imwrite(path, result) 183 | -------------------------------------------------------------------------------- /VnetFamily/mutildepthVnet/mutildepth_vnet.py: -------------------------------------------------------------------------------- 1 | ''' 2 | 3 | ''' 4 | from Vnet.layer import (conv3d, deconv3d, normalizationlayer, crop_and_concat, resnet_Add, 5 | weight_xavier_init, bias_variable) 6 | import tensorflow as tf 7 | import numpy as np 8 | import os 9 | 10 | 11 | def conv_bn_relu_drop(x, kernal, phase, drop, image_z=None, height=None, width=None, scope=None): 12 | with tf.name_scope(scope): 13 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3], 14 | n_outputs=kernal[-1], activefunction='relu', variable_name=scope + 'conv_W') 15 | B = bias_variable([kernal[-1]], variable_name=scope + 'conv_B') 16 | conv = conv3d(x, W) + B 17 | conv = normalizationlayer(conv, is_train=phase, height=height, width=width, image_z=image_z, norm_type='group', 18 | G=20, scope=scope) 19 | conv = tf.nn.dropout(tf.nn.relu(conv), drop) 20 | return conv 21 | 22 | 23 | def mutildepthModel(x, featuremap, phase, drop, image_z=None, height=None, width=None, scope=None): 24 | kernal = (3, 3, 3, featuremap, featuremap) 25 | 26 | branch1 = conv_bn_relu_drop(x, kernal=kernal, phase=phase, drop=drop, image_z=image_z, height=height, width=width, 27 | scope=scope + 'branch1') 28 | 29 | branch2 = conv_bn_relu_drop(x, kernal=kernal, phase=phase, drop=drop, image_z=image_z, height=height, width=width, 30 | scope=scope + 'branch2') 31 | branch3 = conv_bn_relu_drop(x, kernal=kernal, phase=phase, drop=drop, image_z=image_z, height=height, width=width, 32 | scope=scope + 'branch3') 33 | branch3_1 = conv_bn_relu_drop(branch3, kernal=kernal, phase=phase, drop=drop, image_z=image_z, height=height, 34 | width=width, scope=scope + 'branch3_1') 35 | branch3_1 = (branch2 + branch3_1) / 2. 36 | 37 | branch2_1 = conv_bn_relu_drop(branch3_1, kernal=kernal, phase=phase, drop=drop, image_z=image_z, height=height, 38 | width=width, scope=scope + 'branch2_1') 39 | branch3_2 = conv_bn_relu_drop(branch3_1, kernal=kernal, phase=phase, drop=drop, image_z=image_z, height=height, 40 | width=width, scope=scope + 'branch3_2') 41 | branch3_3 = conv_bn_relu_drop(branch3_2, kernal=kernal, phase=phase, drop=drop, image_z=image_z, height=height, 42 | width=width, scope=scope + 'branch3_3') 43 | branch3_3 = (branch3_3 + branch2_1 + branch1) / 3. 44 | 45 | branch3_4 = conv_bn_relu_drop(branch3_3, kernal=kernal, phase=phase, drop=drop, image_z=image_z, height=height, 46 | width=width, scope=scope + 'branch3_4') 47 | output = resnet_Add(x, branch3_4) 48 | return output 49 | 50 | 51 | def down_sampling(x, kernal, phase, drop, image_z=None, height=None, width=None, scope=None): 52 | with tf.name_scope(scope): 53 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3], 54 | n_outputs=kernal[-1], 55 | activefunction='relu', variable_name=scope + 'W') 56 | B = bias_variable([kernal[-1]], variable_name=scope + 'B') 57 | conv = conv3d(x, W, 2) + B 58 | conv = normalizationlayer(conv, is_train=phase, height=height, width=width, image_z=image_z, norm_type='group', 59 | G=20, scope=scope) 60 | conv = tf.nn.dropout(tf.nn.relu(conv), drop) 61 | return conv 62 | 63 | 64 | def deconv_relu(x, kernal, samefeture=False, scope=None): 65 | with tf.name_scope(scope): 66 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[-1], 67 | n_outputs=kernal[-2], activefunction='relu', variable_name=scope + 'W') 68 | B = bias_variable([kernal[-2]], variable_name=scope + 'B') 69 | conv = deconv3d(x, W, samefeture, True) + B 70 | conv = tf.nn.relu(conv) 71 | return conv 72 | 73 | 74 | def conv_softmax(x, kernal, scope=None): 75 | with tf.name_scope(scope): 76 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3], 77 | n_outputs=kernal[-1], activefunction='sigomd', variable_name=scope + 'W') 78 | B = bias_variable([kernal[-1]], variable_name=scope + 'B') 79 | conv = conv3d(x, W) + B 80 | conv = tf.nn.softmax(conv) 81 | return conv 82 | 83 | 84 | def _create_mutildepth_conv_net(X, image_z, image_width, image_height, image_channel, phase, drop, n_class=2): 85 | inputX = tf.reshape(X, [-1, image_z, image_width, image_height, image_channel]) # shape=(?, 32, 32, 1) 86 | # Vnet model 87 | # layer1->convolution 88 | layer0 = conv_bn_relu_drop(x=inputX, kernal=(3, 3, 3, image_channel, 20), phase=phase, drop=drop, 89 | scope='layer0') 90 | layer1 = conv_bn_relu_drop(x=layer0, kernal=(3, 3, 3, 20, 20), phase=phase, drop=drop, 91 | scope='layer1') 92 | layer1 = resnet_Add(x1=layer0, x2=layer1) 93 | # down sampling1 94 | down1 = down_sampling(x=layer1, kernal=(3, 3, 3, 20, 40), phase=phase, drop=drop, scope='down1') 95 | # layer2->convolution 96 | layer2 = mutildepthModel(x=down1, featuremap=40, phase=phase, drop=drop, scope='layer2_1') 97 | layer2 = conv_bn_relu_drop(x=layer2, kernal=(3, 3, 3, 40, 40), phase=phase, drop=drop, 98 | scope='layer2_2') 99 | layer2 = resnet_Add(x1=down1, x2=layer2) 100 | # down sampling2 101 | down2 = down_sampling(x=layer2, kernal=(3, 3, 3, 40, 80), phase=phase, drop=drop, scope='down2') 102 | # layer3->convolution 103 | layer3 = mutildepthModel(x=down2, featuremap=80, phase=phase, drop=drop, scope='layer3_1') 104 | layer3 = conv_bn_relu_drop(x=layer3, kernal=(3, 3, 3, 80, 80), phase=phase, drop=drop, 105 | scope='layer3_2') 106 | layer3 = resnet_Add(x1=down2, x2=layer3) 107 | # down sampling3 108 | down3 = down_sampling(x=layer3, kernal=(3, 3, 3, 80, 160), phase=phase, drop=drop, scope='down3') 109 | # layer4->convolution 110 | layer4 = mutildepthModel(x=down3, featuremap=160, phase=phase, drop=drop, scope='layer4_1') 111 | layer4 = conv_bn_relu_drop(x=layer4, kernal=(3, 3, 3, 160, 160), phase=phase, drop=drop, 112 | scope='layer4_2') 113 | layer4 = resnet_Add(x1=down3, x2=layer4) 114 | # down sampling4 115 | down4 = down_sampling(x=layer4, kernal=(3, 3, 3, 160, 320), phase=phase, drop=drop, scope='down4') 116 | # layer5->convolution 117 | layer5 = conv_bn_relu_drop(x=down4, kernal=(3, 3, 3, 320, 320), phase=phase, drop=drop, 118 | scope='layer5_1') 119 | layer5 = conv_bn_relu_drop(x=layer5, kernal=(3, 3, 3, 320, 320), phase=phase, drop=drop, 120 | scope='layer5_2') 121 | layer5 = resnet_Add(x1=down4, x2=layer5) 122 | # layer9->deconvolution 123 | deconv1 = deconv_relu(x=layer5, kernal=(3, 3, 3, 160, 320), scope='deconv1') 124 | # layer8->convolution 125 | layer6 = crop_and_concat(layer4, deconv1) 126 | _, Z, H, W, _ = layer4.get_shape().as_list() 127 | layer6 = conv_bn_relu_drop(x=layer6, kernal=(3, 3, 3, 320, 160), image_z=Z, height=H, width=W, phase=phase, 128 | drop=drop, scope='layer6_1') 129 | layer6 = mutildepthModel(x=layer6, featuremap=160, phase=phase, drop=drop, image_z=Z, height=H, width=W, 130 | scope='layer6_2') 131 | layer6 = resnet_Add(x1=deconv1, x2=layer6) 132 | # layer9->deconvolution 133 | deconv2 = deconv_relu(x=layer6, kernal=(3, 3, 3, 80, 160), scope='deconv2') 134 | # layer8->convolution 135 | layer7 = crop_and_concat(layer3, deconv2) 136 | _, Z, H, W, _ = layer3.get_shape().as_list() 137 | layer7 = conv_bn_relu_drop(x=layer7, kernal=(3, 3, 3, 160, 80), image_z=Z, height=H, width=W, phase=phase, 138 | drop=drop, scope='layer7_1') 139 | layer7 = mutildepthModel(x=layer7, featuremap=80, phase=phase, drop=drop, image_z=Z, height=H, width=W, 140 | scope='layer7_2') 141 | layer7 = resnet_Add(x1=deconv2, x2=layer7) 142 | # layer9->deconvolution 143 | deconv3 = deconv_relu(x=layer7, kernal=(3, 3, 3, 40, 80), scope='deconv3') 144 | # layer8->convolution 145 | layer8 = crop_and_concat(layer2, deconv3) 146 | _, Z, H, W, _ = layer2.get_shape().as_list() 147 | layer8 = conv_bn_relu_drop(x=layer8, kernal=(3, 3, 3, 80, 40), image_z=Z, height=H, width=W, phase=phase, 148 | drop=drop, scope='layer8_1') 149 | layer8 = mutildepthModel(x=layer8, featuremap=40, phase=phase, drop=drop, image_z=Z, height=H, width=W, 150 | scope='layer8_2') 151 | layer8 = resnet_Add(x1=deconv3, x2=layer8) 152 | # layer9->deconvolution 153 | deconv4 = deconv_relu(x=layer8, kernal=(3, 3, 3, 20, 40), scope='deconv4') 154 | # layer8->convolution 155 | layer9 = crop_and_concat(layer1, deconv4) 156 | _, Z, H, W, _ = layer1.get_shape().as_list() 157 | layer9 = conv_bn_relu_drop(x=layer9, kernal=(3, 3, 3, 40, 20), image_z=Z, height=H, width=W, phase=phase, 158 | drop=drop, scope='layer9_1') 159 | layer9 = conv_bn_relu_drop(x=layer9, kernal=(3, 3, 3, 20, 20), image_z=Z, height=H, width=W, phase=phase, 160 | drop=drop, scope='layer9_2') 161 | layer9 = resnet_Add(x1=deconv4, x2=layer9) 162 | # layer14->output 163 | output_map = conv_softmax(x=layer9, kernal=(1, 1, 1, 20, n_class), scope='output') 164 | return output_map -------------------------------------------------------------------------------- /VnetFamily/GAVNet/GAVnet.py: -------------------------------------------------------------------------------- 1 | ''' 2 | 3 | ''' 4 | from .layer import (conv3d, deconv3d, normalizationlayer, resnet_Add, weight_xavier_init, bias_variable, conv_sigmod) 5 | import tensorflow as tf 6 | 7 | 8 | def conv_bn_relu_drop(x, kernal, phase, drop, image_z=None, height=None, width=None, scope=None): 9 | with tf.name_scope(scope): 10 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3], 11 | n_outputs=kernal[-1], activefunction='relu', variable_name=scope + 'conv_W') 12 | B = bias_variable([kernal[-1]], variable_name=scope + 'conv_B') 13 | conv = conv3d(x, W) + B 14 | conv = normalizationlayer(conv, is_train=phase, height=height, width=width, image_z=image_z, norm_type='group', 15 | G=20, scope=scope) 16 | conv = tf.nn.dropout(tf.nn.relu(conv), drop) 17 | return conv 18 | 19 | 20 | def down_sampling(x, kernal, phase, drop, image_z=None, height=None, width=None, scope=None): 21 | with tf.name_scope(scope): 22 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3], 23 | n_outputs=kernal[-1], 24 | activefunction='relu', variable_name=scope + 'W') 25 | B = bias_variable([kernal[-1]], variable_name=scope + 'B') 26 | conv = conv3d(x, W, 2) + B 27 | conv = normalizationlayer(conv, is_train=phase, height=height, width=width, image_z=image_z, norm_type='group', 28 | G=20, scope=scope) 29 | conv = tf.nn.dropout(tf.nn.relu(conv), drop) 30 | return conv 31 | 32 | 33 | def gridattentionblock(theta_x, phi_g, in_channels, scope=None): 34 | with tf.name_scope(scope): 35 | kernal = (1, 1, 1, in_channels, in_channels) 36 | thetaW = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3], 37 | n_outputs=kernal[-1], activefunction='relu', variable_name=scope + 'thetaW') 38 | thetaB = bias_variable([kernal[-1]], variable_name=scope + 'thetaB') 39 | convtheta = conv3d(theta_x, thetaW) + thetaB 40 | 41 | phiW = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3], 42 | n_outputs=kernal[-1], activefunction='relu', variable_name=scope + 'phiW') 43 | phiB = bias_variable([kernal[-1]], variable_name=scope + 'phiB') 44 | convphi = conv3d(phi_g, phiW) + phiB 45 | 46 | f = resnet_Add(convtheta, convphi) 47 | f = tf.nn.relu(f) 48 | 49 | psiW = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3], 50 | n_outputs=kernal[-1], activefunction='relu', variable_name=scope + 'psiW') 51 | psiB = bias_variable([kernal[-1]], variable_name=scope + 'psiB') 52 | convpsi = conv3d(f, psiW) + psiB 53 | sofmax_psi_f = tf.nn.softmax(convpsi) 54 | return sofmax_psi_f 55 | 56 | 57 | def deconv_relu(x, kernal, samefeture=False, scope=None): 58 | with tf.name_scope(scope): 59 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[-1], 60 | n_outputs=kernal[-2], activefunction='relu', variable_name=scope + 'W') 61 | B = bias_variable([kernal[-2]], variable_name=scope + 'B') 62 | conv = deconv3d(x, W, samefeture, True) + B 63 | conv = tf.nn.relu(conv) 64 | return conv 65 | 66 | 67 | def _create_gaconv_net(X, image_z, image_width, image_height, image_channel, phase, drop, n_class=1): 68 | inputX = tf.reshape(X, [-1, image_z, image_width, image_height, image_channel]) # shape=(?, 32, 32, 1) 69 | # Vnet model 70 | # layer1->convolution 71 | layer0 = conv_bn_relu_drop(x=inputX, kernal=(3, 3, 3, image_channel, 20), phase=phase, drop=drop, 72 | scope='layer0') 73 | layer1 = conv_bn_relu_drop(x=layer0, kernal=(3, 3, 3, 20, 20), phase=phase, drop=drop, 74 | scope='layer1') 75 | layer1 = resnet_Add(x1=layer0, x2=layer1) 76 | # down sampling1 77 | down1 = down_sampling(x=layer1, kernal=(3, 3, 3, 20, 40), phase=phase, drop=drop, scope='down1') 78 | # layer2->convolution 79 | layer2 = conv_bn_relu_drop(x=down1, kernal=(3, 3, 3, 40, 40), phase=phase, drop=drop, 80 | scope='layer2_1') 81 | layer2 = conv_bn_relu_drop(x=layer2, kernal=(3, 3, 3, 40, 40), phase=phase, drop=drop, 82 | scope='layer2_2') 83 | layer2 = resnet_Add(x1=down1, x2=layer2) 84 | # down sampling2 85 | down2 = down_sampling(x=layer2, kernal=(3, 3, 3, 40, 80), phase=phase, drop=drop, scope='down2') 86 | # layer3->convolution 87 | layer3 = conv_bn_relu_drop(x=down2, kernal=(3, 3, 3, 80, 80), phase=phase, drop=drop, 88 | scope='layer3_1') 89 | layer3 = conv_bn_relu_drop(x=layer3, kernal=(3, 3, 3, 80, 80), phase=phase, drop=drop, 90 | scope='layer3_2') 91 | layer3 = conv_bn_relu_drop(x=layer3, kernal=(3, 3, 3, 80, 80), phase=phase, drop=drop, 92 | scope='layer3_3') 93 | layer3 = resnet_Add(x1=down2, x2=layer3) 94 | # down sampling3 95 | down3 = down_sampling(x=layer3, kernal=(3, 3, 3, 80, 160), phase=phase, drop=drop, scope='down3') 96 | # layer4->convolution 97 | layer4 = conv_bn_relu_drop(x=down3, kernal=(3, 3, 3, 160, 160), phase=phase, drop=drop, 98 | scope='layer4_1') 99 | layer4 = conv_bn_relu_drop(x=layer4, kernal=(3, 3, 3, 160, 160), phase=phase, drop=drop, 100 | scope='layer4_2') 101 | layer4 = conv_bn_relu_drop(x=layer4, kernal=(3, 3, 3, 160, 160), phase=phase, drop=drop, 102 | scope='layer4_3') 103 | layer4 = resnet_Add(x1=down3, x2=layer4) 104 | # down sampling4 105 | down4 = down_sampling(x=layer4, kernal=(3, 3, 3, 160, 320), phase=phase, drop=drop, scope='down4') 106 | # layer5->convolution 107 | layer5 = conv_bn_relu_drop(x=down4, kernal=(3, 3, 3, 320, 320), phase=phase, drop=drop, 108 | scope='layer5_1') 109 | layer5 = conv_bn_relu_drop(x=layer5, kernal=(3, 3, 3, 320, 320), phase=phase, drop=drop, 110 | scope='layer5_2') 111 | layer5 = conv_bn_relu_drop(x=layer5, kernal=(3, 3, 3, 320, 320), phase=phase, drop=drop, 112 | scope='layer5_3') 113 | layer5 = resnet_Add(x1=down4, x2=layer5) 114 | # layer9->deconvolution 115 | deconv1 = deconv_relu(x=layer5, kernal=(3, 3, 3, 160, 320), scope='deconv1') 116 | # layer8->convolution 117 | layer6 = gridattentionblock(layer4, deconv1, 160, scope='GAB1') 118 | _, Z, H, W, _ = layer4.get_shape().as_list() 119 | layer6 = conv_bn_relu_drop(x=layer6, kernal=(3, 3, 3, 160, 160), image_z=Z, height=H, width=W, phase=phase, 120 | drop=drop, scope='layer6_1') 121 | layer6 = conv_bn_relu_drop(x=layer6, kernal=(3, 3, 3, 160, 160), image_z=Z, height=H, width=W, phase=phase, 122 | drop=drop, scope='layer6_2') 123 | layer6 = conv_bn_relu_drop(x=layer6, kernal=(3, 3, 3, 160, 160), image_z=Z, height=H, width=W, phase=phase, 124 | drop=drop, scope='layer6_3') 125 | layer6 = resnet_Add(x1=deconv1, x2=layer6) 126 | # layer9->deconvolution 127 | deconv2 = deconv_relu(x=layer6, kernal=(3, 3, 3, 80, 160), scope='deconv2') 128 | # layer8->convolution 129 | layer7 = gridattentionblock(layer3, deconv2, 80, scope='GAB2') 130 | _, Z, H, W, _ = layer3.get_shape().as_list() 131 | layer7 = conv_bn_relu_drop(x=layer7, kernal=(3, 3, 3, 80, 80), image_z=Z, height=H, width=W, phase=phase, 132 | drop=drop, scope='layer7_1') 133 | layer7 = conv_bn_relu_drop(x=layer7, kernal=(3, 3, 3, 80, 80), image_z=Z, height=H, width=W, phase=phase, 134 | drop=drop, scope='layer7_2') 135 | layer7 = conv_bn_relu_drop(x=layer7, kernal=(3, 3, 3, 80, 80), image_z=Z, height=H, width=W, phase=phase, 136 | drop=drop, scope='layer7_3') 137 | layer7 = resnet_Add(x1=deconv2, x2=layer7) 138 | # layer9->deconvolution 139 | deconv3 = deconv_relu(x=layer7, kernal=(3, 3, 3, 40, 80), scope='deconv3') 140 | # layer8->convolution 141 | layer8 = gridattentionblock(layer2, deconv3, 40, scope='GAB3') 142 | _, Z, H, W, _ = layer2.get_shape().as_list() 143 | layer8 = conv_bn_relu_drop(x=layer8, kernal=(3, 3, 3, 40, 40), image_z=Z, height=H, width=W, phase=phase, 144 | drop=drop, scope='layer8_1') 145 | layer8 = conv_bn_relu_drop(x=layer8, kernal=(3, 3, 3, 40, 40), image_z=Z, height=H, width=W, phase=phase, 146 | drop=drop, scope='layer8_2') 147 | layer8 = conv_bn_relu_drop(x=layer8, kernal=(3, 3, 3, 40, 40), image_z=Z, height=H, width=W, phase=phase, 148 | drop=drop, scope='layer8_3') 149 | layer8 = resnet_Add(x1=deconv3, x2=layer8) 150 | # layer9->deconvolution 151 | deconv4 = deconv_relu(x=layer8, kernal=(3, 3, 3, 20, 40), scope='deconv4') 152 | # layer8->convolution 153 | layer9 = gridattentionblock(layer1, deconv4, 20, scope='GAB4') 154 | _, Z, H, W, _ = layer1.get_shape().as_list() 155 | layer9 = conv_bn_relu_drop(x=layer9, kernal=(3, 3, 3, 20, 20), image_z=Z, height=H, width=W, phase=phase, 156 | drop=drop, scope='layer9_1') 157 | layer9 = conv_bn_relu_drop(x=layer9, kernal=(3, 3, 3, 20, 20), image_z=Z, height=H, width=W, phase=phase, 158 | drop=drop, scope='layer9_2') 159 | layer9 = conv_bn_relu_drop(x=layer9, kernal=(3, 3, 3, 20, 20), image_z=Z, height=H, width=W, phase=phase, 160 | drop=drop, scope='layer9_3') 161 | layer9 = resnet_Add(x1=deconv4, x2=layer9) 162 | # layer14->output 163 | output_map = conv_sigmod(x=layer9, kernal=(1, 1, 1, 20, n_class), scope='output') 164 | return output_map 165 | -------------------------------------------------------------------------------- /VnetFamily/PEVnet/PEVnet.py: -------------------------------------------------------------------------------- 1 | ''' 2 | 3 | ''' 4 | from Vnet.layer import (conv3d, deconv3d, normalizationlayer, crop_and_concat, resnet_Add, 5 | weight_xavier_init, bias_variable) 6 | import tensorflow as tf 7 | import numpy as np 8 | import os 9 | 10 | 11 | def conv_bn_relu_drop(x, kernal, phase, drop, image_z=None, height=None, width=None, scope=None): 12 | with tf.name_scope(scope): 13 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3], 14 | n_outputs=kernal[-1], activefunction='relu', variable_name=scope + 'conv_W') 15 | B = bias_variable([kernal[-1]], variable_name=scope + 'conv_B') 16 | conv = conv3d(x, W) + B 17 | conv = normalizationlayer(conv, is_train=phase, height=height, width=width, image_z=image_z, norm_type='group', 18 | G=20, scope=scope) 19 | conv = tf.nn.dropout(tf.nn.relu(conv), drop) 20 | return conv 21 | 22 | 23 | def conv_relu(x, kernal, scope=None): 24 | with tf.name_scope(scope): 25 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3], 26 | n_outputs=kernal[-1], activefunction='relu', variable_name=scope + 'W') 27 | B = bias_variable([kernal[-1]], variable_name=scope + 'B') 28 | conv = conv3d(x, W) + B 29 | conv = tf.nn.relu(conv) 30 | return conv 31 | 32 | 33 | def conv_sigomd(x, kernal, scope=None): 34 | with tf.name_scope(scope): 35 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3], 36 | n_outputs=kernal[-1], activefunction='sigomd', variable_name=scope + 'W') 37 | B = bias_variable([kernal[-1]], variable_name=scope + 'B') 38 | conv = conv3d(x, W) + B 39 | conv = tf.nn.sigmoid(conv) 40 | return conv 41 | 42 | 43 | def project_excitation_layer(x, out_dim, ratio=4, height=None, width=None, image_z=None, scope=None): 44 | with tf.name_scope(scope): 45 | _, Z, H, W, C = x.get_shape().as_list() 46 | if H == None and W == None and Z == None: 47 | Z, H, W, C = image_z, height, width, out_dim 48 | # Global_Average_Pooling,whz_squeeze 49 | squeezew = tf.reduce_mean(x, axis=(1, 2), name=scope + 'squeezew') 50 | squeezeh = tf.reduce_mean(x, axis=(1, 3), name=scope + 'squeezeh') 51 | squeezez = tf.reduce_mean(x, axis=(2, 3), name=scope + 'squeezez') 52 | squeezew = tf.reshape(squeezew, (-1, 1, 1, W, C)) 53 | squeezeh = tf.reshape(squeezeh, (-1, 1, H, 1, C)) 54 | squeezez = tf.reshape(squeezez, (-1, Z, 1, 1, C)) 55 | final_squeeze = tf.add(squeezew, squeezeh) 56 | final_squeeze = tf.add(final_squeeze, squeezez) 57 | # full_connect 58 | excitation = conv_relu(final_squeeze, kernal=(1, 1, 1, out_dim, out_dim // ratio), 59 | scope=scope + 'excitation1') 60 | excitation = conv_sigomd(excitation, kernal=(1, 1, 1, out_dim // ratio, out_dim), 61 | scope=scope + 'excitation2') 62 | # scale the x 63 | scale = tf.multiply(x, excitation) 64 | return scale 65 | 66 | 67 | def down_sampling(x, kernal, phase, drop, image_z=None, height=None, width=None, scope=None): 68 | with tf.name_scope(scope): 69 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3], 70 | n_outputs=kernal[-1], 71 | activefunction='relu', variable_name=scope + 'W') 72 | B = bias_variable([kernal[-1]], variable_name=scope + 'B') 73 | conv = conv3d(x, W, 2) + B 74 | conv = normalizationlayer(conv, is_train=phase, height=height, width=width, image_z=image_z, norm_type='group', 75 | G=20, scope=scope) 76 | conv = tf.nn.dropout(tf.nn.relu(conv), drop) 77 | return conv 78 | 79 | 80 | def deconv_relu(x, kernal, samefeture=False, scope=None): 81 | with tf.name_scope(scope): 82 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[-1], 83 | n_outputs=kernal[-2], activefunction='relu', variable_name=scope + 'W') 84 | B = bias_variable([kernal[-2]], variable_name=scope + 'B') 85 | conv = deconv3d(x, W, samefeture, True) + B 86 | conv = tf.nn.relu(conv) 87 | return conv 88 | 89 | 90 | def _create_peconv_net(X, image_z, image_width, image_height, image_channel, phase, drop, n_class=2): 91 | inputX = tf.reshape(X, [-1, image_z, image_width, image_height, image_channel]) # shape=(?, 32, 32, 1) 92 | # Vnet model 93 | # layer1->convolution 94 | layer0 = conv_bn_relu_drop(x=inputX, kernal=(3, 3, 3, image_channel, 20), phase=phase, drop=drop, scope='layer0') 95 | layer1 = conv_bn_relu_drop(x=layer0, kernal=(3, 3, 3, 20, 20), phase=phase, drop=drop, scope='layer1') 96 | layer1 = project_excitation_layer(layer1, out_dim=20, scope='pe1') 97 | layer1 = resnet_Add(x1=layer0, x2=layer1) 98 | # down sampling1 99 | down1 = down_sampling(x=layer1, kernal=(3, 3, 3, 20, 40), phase=phase, drop=drop, scope='down1') 100 | # layer2->convolution 101 | layer2 = conv_bn_relu_drop(x=down1, kernal=(3, 3, 3, 40, 40), phase=phase, drop=drop, scope='layer2_1') 102 | layer2 = conv_bn_relu_drop(x=layer2, kernal=(3, 3, 3, 40, 40), phase=phase, drop=drop, scope='layer2_2') 103 | layer2 = project_excitation_layer(layer2, out_dim=40, scope='pe2') 104 | layer2 = resnet_Add(x1=down1, x2=layer2) 105 | # down sampling2 106 | down2 = down_sampling(x=layer2, kernal=(3, 3, 3, 40, 80), phase=phase, drop=drop, scope='down2') 107 | # layer3->convolution 108 | layer3 = conv_bn_relu_drop(x=down2, kernal=(3, 3, 3, 80, 80), phase=phase, drop=drop, scope='layer3_1') 109 | layer3 = conv_bn_relu_drop(x=layer3, kernal=(3, 3, 3, 80, 80), phase=phase, drop=drop, scope='layer3_2') 110 | layer3 = conv_bn_relu_drop(x=layer3, kernal=(3, 3, 3, 80, 80), phase=phase, drop=drop, scope='layer3_3') 111 | layer3 = project_excitation_layer(layer3, out_dim=80, scope='pe3') 112 | layer3 = resnet_Add(x1=down2, x2=layer3) 113 | # down sampling3 114 | down3 = down_sampling(x=layer3, kernal=(3, 3, 3, 80, 160), phase=phase, drop=drop, scope='down3') 115 | # layer4->convolution 116 | layer4 = conv_bn_relu_drop(x=down3, kernal=(3, 3, 3, 160, 160), phase=phase, drop=drop, scope='layer4_1') 117 | layer4 = conv_bn_relu_drop(x=layer4, kernal=(3, 3, 3, 160, 160), phase=phase, drop=drop, scope='layer4_2') 118 | layer4 = conv_bn_relu_drop(x=layer4, kernal=(3, 3, 3, 160, 160), phase=phase, drop=drop, scope='layer4_4') 119 | layer4 = project_excitation_layer(layer4, out_dim=160, scope='pe4') 120 | layer4 = resnet_Add(x1=down3, x2=layer4) 121 | # down sampling4 122 | down4 = down_sampling(x=layer4, kernal=(3, 3, 3, 160, 320), phase=phase, drop=drop, scope='down4') 123 | # layer5->convolution 124 | layer5 = conv_bn_relu_drop(x=down4, kernal=(3, 3, 3, 320, 320), phase=phase, drop=drop, scope='layer5_1') 125 | layer5 = conv_bn_relu_drop(x=layer5, kernal=(3, 3, 3, 320, 320), phase=phase, drop=drop, scope='layer5_2') 126 | layer5 = conv_bn_relu_drop(x=layer5, kernal=(3, 3, 3, 320, 320), phase=phase, drop=drop, scope='layer5_3') 127 | layer5 = project_excitation_layer(layer5, out_dim=320, scope='pe5') 128 | layer5 = resnet_Add(x1=down4, x2=layer5) 129 | 130 | # layer9->deconvolution 131 | deconv1 = deconv_relu(x=layer5, kernal=(3, 3, 3, 160, 320), scope='deconv1') 132 | # layer8->convolution 133 | layer6 = crop_and_concat(layer4, deconv1) 134 | _, Z, H, W, _ = layer4.get_shape().as_list() 135 | layer6 = conv_bn_relu_drop(x=layer6, kernal=(3, 3, 3, 320, 160), image_z=Z, height=H, width=W, phase=phase, 136 | drop=drop, scope='layer6_1') 137 | layer6 = conv_bn_relu_drop(x=layer6, kernal=(3, 3, 3, 160, 160), image_z=Z, height=H, width=W, phase=phase, 138 | drop=drop, scope='layer6_2') 139 | layer6 = conv_bn_relu_drop(x=layer6, kernal=(3, 3, 3, 160, 160), image_z=Z, height=H, width=W, phase=phase, 140 | drop=drop, scope='layer6_3') 141 | layer6 = project_excitation_layer(layer6, out_dim=160, image_z=Z, height=H, width=W, scope='pe6') 142 | layer6 = resnet_Add(x1=deconv1, x2=layer6) 143 | # layer9->deconvolution 144 | deconv2 = deconv_relu(x=layer6, kernal=(3, 3, 3, 80, 160), scope='deconv2') 145 | # layer8->convolution 146 | layer7 = crop_and_concat(layer3, deconv2) 147 | _, Z, H, W, _ = layer3.get_shape().as_list() 148 | layer7 = conv_bn_relu_drop(x=layer7, kernal=(3, 3, 3, 160, 80), image_z=Z, height=H, width=W, phase=phase, 149 | drop=drop, scope='layer7_1') 150 | layer7 = conv_bn_relu_drop(x=layer7, kernal=(3, 3, 3, 80, 80), image_z=Z, height=H, width=W, phase=phase, 151 | drop=drop, scope='layer7_2') 152 | layer7 = conv_bn_relu_drop(x=layer7, kernal=(3, 3, 3, 80, 80), image_z=Z, height=H, width=W, phase=phase, 153 | drop=drop, scope='layer7_3') 154 | layer7 = project_excitation_layer(layer7, out_dim=80, image_z=Z, height=H, width=W, scope='pe7') 155 | layer7 = resnet_Add(x1=deconv2, x2=layer7) 156 | # layer9->deconvolution 157 | deconv3 = deconv_relu(x=layer7, kernal=(3, 3, 3, 40, 80), scope='deconv3') 158 | # layer8->convolution 159 | layer8 = crop_and_concat(layer2, deconv3) 160 | _, Z, H, W, _ = layer2.get_shape().as_list() 161 | layer8 = conv_bn_relu_drop(x=layer8, kernal=(3, 3, 3, 80, 40), image_z=Z, height=H, width=W, phase=phase, 162 | drop=drop, scope='layer8_1') 163 | layer8 = conv_bn_relu_drop(x=layer8, kernal=(3, 3, 3, 40, 40), image_z=Z, height=H, width=W, phase=phase, 164 | drop=drop, scope='layer8_2') 165 | layer8 = project_excitation_layer(layer8, out_dim=40, image_z=Z, height=H, width=W, scope='pe8') 166 | layer8 = resnet_Add(x1=deconv3, x2=layer8) 167 | # layer9->deconvolution 168 | deconv4 = deconv_relu(x=layer8, kernal=(3, 3, 3, 20, 40), scope='deconv4') 169 | # layer8->convolution 170 | layer9 = crop_and_concat(layer1, deconv4) 171 | _, Z, H, W, _ = layer1.get_shape().as_list() 172 | layer9 = conv_bn_relu_drop(x=layer9, kernal=(3, 3, 3, 40, 20), image_z=Z, height=H, width=W, phase=phase, 173 | drop=drop, scope='layer9_1') 174 | layer9 = conv_bn_relu_drop(x=layer9, kernal=(3, 3, 3, 20, 20), image_z=Z, height=H, width=W, phase=phase, 175 | drop=drop, scope='layer9_2') 176 | layer9 = project_excitation_layer(layer9, out_dim=20, image_z=Z, height=H, width=W, scope='pe9') 177 | layer9 = resnet_Add(x1=deconv4, x2=layer9) 178 | # layer14->output 179 | output_map = conv_sigomd(x=layer9, kernal=(1, 1, 1, 20, n_class), scope='output') 180 | 181 | return output_map -------------------------------------------------------------------------------- /VnetFamily/SEVnet/model_sevnet3d.py: -------------------------------------------------------------------------------- 1 | ''' 2 | 3 | ''' 4 | from .layer import (conv3d, deconv3d, normalizationlayer, crop_and_concat, resnet_Add, weight_xavier_init, 5 | bias_variable) 6 | import tensorflow as tf 7 | 8 | 9 | def conv_bn_relu_drop(x, kernal, phase, drop, image_z=None, height=None, width=None, scope=None): 10 | with tf.name_scope(scope): 11 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3], 12 | n_outputs=kernal[-1], activefunction='relu', variable_name=scope + 'conv_W') 13 | B = bias_variable([kernal[-1]], variable_name=scope + 'conv_B') 14 | conv = conv3d(x, W) + B 15 | conv = normalizationlayer(conv, is_train=phase, height=height, width=width, image_z=image_z, norm_type='group', 16 | G=20, scope=scope) 17 | conv = tf.nn.dropout(tf.nn.relu(conv), drop) 18 | return conv 19 | 20 | 21 | def full_connected_relu(x, kernal, activefunction='relu', scope=None): 22 | with tf.name_scope(scope): 23 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1], 24 | n_outputs=kernal[-1], activefunction='relu', variable_name=scope + 'W') 25 | B = bias_variable([kernal[-1]], variable_name=scope + 'B') 26 | FC = tf.matmul(x, W) + B 27 | if activefunction == 'relu': 28 | FC = tf.nn.relu(FC) 29 | elif activefunction == 'softmax': 30 | FC = tf.nn.softmax(FC) 31 | elif activefunction == 'sigmoid': 32 | FC = tf.nn.sigmoid(FC) 33 | return FC 34 | 35 | 36 | def conv_sigomd(x, kernal, scope=None): 37 | with tf.name_scope(scope): 38 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3], 39 | n_outputs=kernal[-1], activefunction='sigomd', variable_name=scope + 'W') 40 | B = bias_variable([kernal[-1]], variable_name=scope + 'B') 41 | conv = conv3d(x, W) + B 42 | conv = tf.nn.sigmoid(conv) 43 | return conv 44 | 45 | 46 | def squeeze_excitation_model(x, out_dim, name='ssce', ratio=4, scope=None): 47 | with tf.name_scope(scope): 48 | if name == 'ssce': 49 | recalibrate = spatial_squeeze_channel_excitation_layer(x, out_dim, ratio, scope=scope + 'ssce') 50 | return recalibrate 51 | if name == 'csse': 52 | recalibrate = channel_squeeze_spatial_excitiation_layer(x, out_dim, scope=scope + 'csse') 53 | return recalibrate 54 | 55 | 56 | def spatial_squeeze_channel_excitation_layer(x, out_dim, ratio=4, scope=None): 57 | with tf.name_scope(scope): 58 | # Global_Average_Pooling,channel_squeeze 59 | squeeze = tf.reduce_mean(x, axis=(1, 2, 3), name=scope + 'channel_squeeze') 60 | # full_connect 61 | excitation = full_connected_relu(squeeze, kernal=(out_dim, out_dim // ratio), activefunction='relu', 62 | scope=scope + '_fully_connected1') 63 | excitation = full_connected_relu(excitation, kernal=(out_dim // ratio, out_dim), 64 | activefunction='sigmoid', scope=scope + '_fully_connected2') 65 | # scale the x 66 | excitation = tf.reshape(excitation, [-1, 1, 1, 1, out_dim]) 67 | scale = x * excitation 68 | return scale 69 | 70 | 71 | def channel_squeeze_spatial_excitiation_layer(x, out_dim, scope=None): 72 | with tf.name_scope(scope): 73 | squeeze = conv_sigomd(x, kernal=(1, 1, 1, out_dim, 1), scope=scope + 'spatial_squeeze') 74 | scale = x * squeeze 75 | return scale 76 | 77 | 78 | def down_sampling(x, kernal, phase, drop, image_z=None, height=None, width=None, scope=None): 79 | with tf.name_scope(scope): 80 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3], 81 | n_outputs=kernal[-1], 82 | activefunction='relu', variable_name=scope + 'W') 83 | B = bias_variable([kernal[-1]], variable_name=scope + 'B') 84 | conv = conv3d(x, W, 2) + B 85 | conv = normalizationlayer(conv, is_train=phase, height=height, width=width, image_z=image_z, norm_type='group', 86 | G=20, scope=scope) 87 | conv = tf.nn.dropout(tf.nn.relu(conv), drop) 88 | return conv 89 | 90 | 91 | def deconv_relu(x, kernal, samefeture=False, scope=None): 92 | with tf.name_scope(scope): 93 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[-1], 94 | n_outputs=kernal[-2], activefunction='relu', variable_name=scope + 'W') 95 | B = bias_variable([kernal[-2]], variable_name=scope + 'B') 96 | conv = deconv3d(x, W, samefeture, True) + B 97 | conv = tf.nn.relu(conv) 98 | return conv 99 | 100 | 101 | def _createSEvnet(X, image_z, image_width, image_height, image_channel, phase, drop, n_class=1): 102 | inputX = tf.reshape(X, [-1, image_z, image_width, image_height, image_channel]) # shape=(?, 32, 32, 1) 103 | # Vnet model 104 | # layer1->convolution 105 | layer0 = conv_bn_relu_drop(x=inputX, kernal=(3, 3, 3, image_channel, 20), phase=phase, drop=drop, 106 | scope='layer0') 107 | layer1 = conv_bn_relu_drop(x=layer0, kernal=(3, 3, 3, 20, 20), phase=phase, drop=drop, 108 | scope='layer1') 109 | layer1 = squeeze_excitation_model(layer1, out_dim=20, scope='sem1') 110 | layer1 = resnet_Add(x1=layer0, x2=layer1) 111 | # down sampling1 112 | down1 = down_sampling(x=layer1, kernal=(3, 3, 3, 20, 40), phase=phase, drop=drop, scope='down1') 113 | # layer2->convolution 114 | layer2 = conv_bn_relu_drop(x=down1, kernal=(3, 3, 3, 40, 40), phase=phase, drop=drop, 115 | scope='layer2_1') 116 | layer2 = conv_bn_relu_drop(x=layer2, kernal=(3, 3, 3, 40, 40), phase=phase, drop=drop, 117 | scope='layer2_2') 118 | layer2 = squeeze_excitation_model(layer2, out_dim=40, scope='sem2') 119 | layer2 = resnet_Add(x1=down1, x2=layer2) 120 | # down sampling2 121 | down2 = down_sampling(x=layer2, kernal=(3, 3, 3, 40, 80), phase=phase, drop=drop, scope='down2') 122 | # layer3->convolution 123 | layer3 = conv_bn_relu_drop(x=down2, kernal=(3, 3, 3, 80, 80), phase=phase, drop=drop, 124 | scope='layer3_1') 125 | layer3 = conv_bn_relu_drop(x=layer3, kernal=(3, 3, 3, 80, 80), phase=phase, drop=drop, 126 | scope='layer3_2') 127 | layer3 = squeeze_excitation_model(layer3, out_dim=80, scope='sem3') 128 | layer3 = resnet_Add(x1=down2, x2=layer3) 129 | # down sampling3 130 | down3 = down_sampling(x=layer3, kernal=(3, 3, 3, 80, 160), phase=phase, drop=drop, scope='down3') 131 | # layer4->convolution 132 | layer4 = conv_bn_relu_drop(x=down3, kernal=(3, 3, 3, 160, 160), phase=phase, drop=drop, 133 | scope='layer4_1') 134 | layer4 = conv_bn_relu_drop(x=layer4, kernal=(3, 3, 3, 160, 160), phase=phase, drop=drop, 135 | scope='layer4_2') 136 | layer4 = squeeze_excitation_model(layer4, out_dim=160, scope='sem4') 137 | layer4 = resnet_Add(x1=down3, x2=layer4) 138 | # down sampling4 139 | down4 = down_sampling(x=layer4, kernal=(3, 3, 3, 160, 320), phase=phase, drop=drop, scope='down4') 140 | # layer5->convolution 141 | layer5 = conv_bn_relu_drop(x=down4, kernal=(3, 3, 3, 320, 320), phase=phase, drop=drop, 142 | scope='layer5_1') 143 | layer5 = conv_bn_relu_drop(x=layer5, kernal=(3, 3, 3, 320, 320), phase=phase, drop=drop, 144 | scope='layer5_2') 145 | layer5 = squeeze_excitation_model(layer5, out_dim=320, scope='sem5') 146 | layer5 = resnet_Add(x1=down4, x2=layer5) 147 | 148 | # layer9->deconvolution 149 | deconv1 = deconv_relu(x=layer5, kernal=(3, 3, 3, 160, 320), scope='deconv1') 150 | # layer8->convolution 151 | layer6 = crop_and_concat(layer4, deconv1) 152 | _, Z, H, W, _ = layer4.get_shape().as_list() 153 | layer6 = conv_bn_relu_drop(x=layer6, kernal=(3, 3, 3, 320, 160), image_z=Z, height=H, width=W, phase=phase, 154 | drop=drop, scope='layer6_1') 155 | layer6 = conv_bn_relu_drop(x=layer6, kernal=(3, 3, 3, 160, 160), image_z=Z, height=H, width=W, phase=phase, 156 | drop=drop, scope='layer6_2') 157 | layer6 = squeeze_excitation_model(layer6, out_dim=160, scope='sem6') 158 | layer6 = resnet_Add(x1=deconv1, x2=layer6) 159 | # layer9->deconvolution 160 | deconv2 = deconv_relu(x=layer6, kernal=(3, 3, 3, 80, 160), scope='deconv2') 161 | # layer8->convolution 162 | layer7 = crop_and_concat(layer3, deconv2) 163 | _, Z, H, W, _ = layer3.get_shape().as_list() 164 | layer7 = conv_bn_relu_drop(x=layer7, kernal=(3, 3, 3, 160, 80), image_z=Z, height=H, width=W, phase=phase, 165 | drop=drop, scope='layer7_1') 166 | layer7 = conv_bn_relu_drop(x=layer7, kernal=(3, 3, 3, 80, 80), image_z=Z, height=H, width=W, phase=phase, 167 | drop=drop, scope='layer7_2') 168 | layer7 = squeeze_excitation_model(layer7, out_dim=80, scope='sem7') 169 | layer7 = resnet_Add(x1=deconv2, x2=layer7) 170 | # layer9->deconvolution 171 | deconv3 = deconv_relu(x=layer7, kernal=(3, 3, 3, 40, 80), scope='deconv3') 172 | # layer8->convolution 173 | layer8 = crop_and_concat(layer2, deconv3) 174 | _, Z, H, W, _ = layer2.get_shape().as_list() 175 | layer8 = conv_bn_relu_drop(x=layer8, kernal=(3, 3, 3, 80, 40), image_z=Z, height=H, width=W, phase=phase, 176 | drop=drop, scope='layer8_1') 177 | layer8 = conv_bn_relu_drop(x=layer8, kernal=(3, 3, 3, 40, 40), image_z=Z, height=H, width=W, phase=phase, 178 | drop=drop, scope='layer8_2') 179 | layer8 = squeeze_excitation_model(layer8, out_dim=40, scope='sem8') 180 | layer8 = resnet_Add(x1=deconv3, x2=layer8) 181 | # layer9->deconvolution 182 | deconv4 = deconv_relu(x=layer8, kernal=(3, 3, 3, 20, 40), scope='deconv4') 183 | # layer8->convolution 184 | layer9 = crop_and_concat(layer1, deconv4) 185 | _, Z, H, W, _ = layer1.get_shape().as_list() 186 | layer9 = conv_bn_relu_drop(x=layer9, kernal=(3, 3, 3, 40, 20), image_z=Z, height=H, width=W, phase=phase, 187 | drop=drop, scope='layer9_1') 188 | layer9 = conv_bn_relu_drop(x=layer9, kernal=(3, 3, 3, 20, 20), image_z=Z, height=H, width=W, phase=phase, 189 | drop=drop, scope='layer9_2') 190 | layer9 = squeeze_excitation_model(layer9, out_dim=20, scope='sem9') 191 | layer9 = resnet_Add(x1=deconv4, x2=layer9) 192 | # layer14->output 193 | output_map = conv_sigomd(x=layer9, kernal=(1, 1, 1, 20, n_class), scope='output') 194 | return output_map 195 | -------------------------------------------------------------------------------- /VnetFamily/mutiltask-Vnet/model_vnet3d_distancemap_multilabel.py: -------------------------------------------------------------------------------- 1 | ''' 2 | 3 | ''' 4 | from .layer import (conv3d, deconv3d, normalizationlayer, crop_and_concat, resnet_Add, weight_xavier_init, 5 | bias_variable) 6 | import tensorflow as tf 7 | 8 | 9 | def conv_bn_relu_drop(x, kernal, phase, drop, image_z=None, height=None, width=None, scope=None): 10 | with tf.name_scope(scope): 11 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3], 12 | n_outputs=kernal[-1], activefunction='relu', variable_name=scope + 'conv_W') 13 | B = bias_variable([kernal[-1]], variable_name=scope + 'conv_B') 14 | conv = conv3d(x, W) + B 15 | conv = normalizationlayer(conv, is_train=phase, height=height, width=width, image_z=image_z, norm_type='group', 16 | G=20, scope=scope) 17 | conv = tf.nn.dropout(tf.nn.relu(conv), drop) 18 | return conv 19 | 20 | 21 | def down_sampling(x, kernal, phase, drop, image_z=None, height=None, width=None, scope=None): 22 | with tf.name_scope(scope): 23 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3], 24 | n_outputs=kernal[-1], 25 | activefunction='relu', variable_name=scope + 'W') 26 | B = bias_variable([kernal[-1]], variable_name=scope + 'B') 27 | conv = conv3d(x, W, 2) + B 28 | conv = normalizationlayer(conv, is_train=phase, height=height, width=width, image_z=image_z, norm_type='group', 29 | G=20, scope=scope) 30 | conv = tf.nn.dropout(tf.nn.relu(conv), drop) 31 | return conv 32 | 33 | 34 | def deconv_relu(x, kernal, samefeture=False, scope=None): 35 | with tf.name_scope(scope): 36 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[-1], 37 | n_outputs=kernal[-2], activefunction='relu', variable_name=scope + 'W') 38 | B = bias_variable([kernal[-2]], variable_name=scope + 'B') 39 | conv = deconv3d(x, W, samefeture, True) + B 40 | conv = tf.nn.relu(conv) 41 | return conv 42 | 43 | 44 | def conv_softmax(x, kernal, scope=None): 45 | with tf.name_scope(scope): 46 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3], 47 | n_outputs=kernal[-1], activefunction='sigomd', variable_name=scope + 'W') 48 | B = bias_variable([kernal[-1]], variable_name=scope + 'B') 49 | conv = conv3d(x, W) + B 50 | conv = tf.nn.softmax(conv) 51 | return conv 52 | 53 | 54 | def conv_sigmoid(x, kernal, scope=None): 55 | with tf.name_scope(scope): 56 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3], 57 | n_outputs=kernal[-1], activefunction='sigomd', variable_name=scope + 'W') 58 | B = bias_variable([kernal[-1]], variable_name=scope + 'B') 59 | conv = conv3d(x, W) + B 60 | conv = tf.nn.sigmoid(conv) 61 | return conv 62 | 63 | 64 | def _create_conv_net(X, image_z, image_width, image_height, image_channel, phase, drop, n_class=2): 65 | inputX = tf.reshape(X, [-1, image_z, image_width, image_height, image_channel]) # shape=(?, 32, 32, 1) 66 | # Vnet model 67 | # layer1->convolution 68 | layer0 = conv_bn_relu_drop(x=inputX, kernal=(3, 3, 3, image_channel, 20), phase=phase, drop=drop, 69 | scope='layer0') 70 | layer1 = conv_bn_relu_drop(x=layer0, kernal=(3, 3, 3, 20, 20), phase=phase, drop=drop, 71 | scope='layer1') 72 | layer1 = resnet_Add(x1=layer0, x2=layer1) 73 | # down sampling1 74 | down1 = down_sampling(x=layer1, kernal=(3, 3, 3, 20, 40), phase=phase, drop=drop, scope='down1') 75 | # layer2->convolution 76 | layer2 = conv_bn_relu_drop(x=down1, kernal=(3, 3, 3, 40, 40), phase=phase, drop=drop, 77 | scope='layer2_1') 78 | layer2 = conv_bn_relu_drop(x=layer2, kernal=(3, 3, 3, 40, 40), phase=phase, drop=drop, 79 | scope='layer2_2') 80 | layer2 = resnet_Add(x1=down1, x2=layer2) 81 | # down sampling2 82 | down2 = down_sampling(x=layer2, kernal=(3, 3, 3, 40, 80), phase=phase, drop=drop, scope='down2') 83 | # layer3->convolution 84 | layer3 = conv_bn_relu_drop(x=down2, kernal=(3, 3, 3, 80, 80), phase=phase, drop=drop, 85 | scope='layer3_1') 86 | layer3 = conv_bn_relu_drop(x=layer3, kernal=(3, 3, 3, 80, 80), phase=phase, drop=drop, 87 | scope='layer3_2') 88 | layer3 = conv_bn_relu_drop(x=layer3, kernal=(3, 3, 3, 80, 80), phase=phase, drop=drop, 89 | scope='layer3_3') 90 | layer3 = resnet_Add(x1=down2, x2=layer3) 91 | # down sampling3 92 | down3 = down_sampling(x=layer3, kernal=(3, 3, 3, 80, 160), phase=phase, drop=drop, scope='down3') 93 | # layer4->convolution 94 | layer4 = conv_bn_relu_drop(x=down3, kernal=(3, 3, 3, 160, 160), phase=phase, drop=drop, 95 | scope='layer4_1') 96 | layer4 = conv_bn_relu_drop(x=layer4, kernal=(3, 3, 3, 160, 160), phase=phase, drop=drop, 97 | scope='layer4_2') 98 | layer4 = conv_bn_relu_drop(x=layer4, kernal=(3, 3, 3, 160, 160), phase=phase, drop=drop, 99 | scope='layer4_3') 100 | layer4 = resnet_Add(x1=down3, x2=layer4) 101 | # down sampling4 102 | down4 = down_sampling(x=layer4, kernal=(3, 3, 3, 160, 320), phase=phase, drop=drop, scope='down4') 103 | # layer5->convolution 104 | layer5 = conv_bn_relu_drop(x=down4, kernal=(3, 3, 3, 320, 320), phase=phase, drop=drop, 105 | scope='layer5_1') 106 | layer5 = conv_bn_relu_drop(x=layer5, kernal=(3, 3, 3, 320, 320), phase=phase, drop=drop, 107 | scope='layer5_2') 108 | layer5 = conv_bn_relu_drop(x=layer5, kernal=(3, 3, 3, 320, 320), phase=phase, drop=drop, 109 | scope='layer5_3') 110 | layer5 = resnet_Add(x1=down4, x2=layer5) 111 | 112 | # layer9->deconvolution 113 | deconv1 = deconv_relu(x=layer5, kernal=(3, 3, 3, 160, 320), scope='deconv1') 114 | # layer8->convolution 115 | layer6 = crop_and_concat(layer4, deconv1) 116 | _, Z, H, W, _ = layer4.get_shape().as_list() 117 | layer6 = conv_bn_relu_drop(x=layer6, kernal=(3, 3, 3, 320, 160), image_z=Z, height=H, width=W, phase=phase, 118 | drop=drop, scope='layer6_1') 119 | layer6 = conv_bn_relu_drop(x=layer6, kernal=(3, 3, 3, 160, 160), image_z=Z, height=H, width=W, phase=phase, 120 | drop=drop, scope='layer6_2') 121 | layer6 = resnet_Add(x1=deconv1, x2=layer6) 122 | # layer9->deconvolution 123 | deconv2 = deconv_relu(x=layer6, kernal=(3, 3, 3, 80, 160), scope='deconv2') 124 | # layer8->convolution 125 | layer7 = crop_and_concat(layer3, deconv2) 126 | _, Z, H, W, _ = layer3.get_shape().as_list() 127 | layer7 = conv_bn_relu_drop(x=layer7, kernal=(3, 3, 3, 160, 80), image_z=Z, height=H, width=W, phase=phase, 128 | drop=drop, scope='layer7_1') 129 | layer7 = conv_bn_relu_drop(x=layer7, kernal=(3, 3, 3, 80, 80), image_z=Z, height=H, width=W, phase=phase, 130 | drop=drop, scope='layer7_2') 131 | layer7 = resnet_Add(x1=deconv2, x2=layer7) 132 | # layer9->deconvolution 133 | deconv3 = deconv_relu(x=layer7, kernal=(3, 3, 3, 40, 80), scope='deconv3') 134 | # layer8->convolution 135 | layer8 = crop_and_concat(layer2, deconv3) 136 | _, Z, H, W, _ = layer2.get_shape().as_list() 137 | layer8 = conv_bn_relu_drop(x=layer8, kernal=(3, 3, 3, 80, 40), image_z=Z, height=H, width=W, phase=phase, 138 | drop=drop, scope='layer8_1') 139 | layer8 = conv_bn_relu_drop(x=layer8, kernal=(3, 3, 3, 40, 40), image_z=Z, height=H, width=W, phase=phase, 140 | drop=drop, scope='layer8_2') 141 | layer8 = resnet_Add(x1=deconv3, x2=layer8) 142 | # layer9->deconvolution 143 | deconv4 = deconv_relu(x=layer8, kernal=(3, 3, 3, 20, 40), scope='deconv4') 144 | # layer8->convolution 145 | layer9 = crop_and_concat(layer1, deconv4) 146 | _, Z, H, W, _ = layer1.get_shape().as_list() 147 | layer9 = conv_bn_relu_drop(x=layer9, kernal=(3, 3, 3, 40, 20), image_z=Z, height=H, width=W, phase=phase, 148 | drop=drop, scope='layer9_1') 149 | layer9 = conv_bn_relu_drop(x=layer9, kernal=(3, 3, 3, 20, 20), image_z=Z, height=H, width=W, phase=phase, 150 | drop=drop, scope='layer9_2') 151 | layer9 = resnet_Add(x1=deconv4, x2=layer9) 152 | # layer14->output 153 | output_map = conv_softmax(x=layer9, kernal=(1, 1, 1, 20, n_class), scope='output_map') 154 | 155 | # layer9->deconvolution 156 | deconv1_1 = deconv_relu(x=layer5, kernal=(3, 3, 3, 160, 320), scope='deconv1_1') 157 | # layer8->convolution 158 | layer6_1 = crop_and_concat(layer4, deconv1_1) 159 | _, Z, H, W, _ = layer4.get_shape().as_list() 160 | layer6_1 = conv_bn_relu_drop(x=layer6_1, kernal=(3, 3, 3, 320, 160), image_z=Z, height=H, width=W, phase=phase, 161 | drop=drop, scope='layer6_1_1') 162 | layer6_1 = conv_bn_relu_drop(x=layer6_1, kernal=(3, 3, 3, 160, 160), image_z=Z, height=H, width=W, phase=phase, 163 | drop=drop, scope='layer6_2_1') 164 | layer6_1 = resnet_Add(x1=deconv1_1, x2=layer6_1) 165 | # layer9->deconvolution 166 | deconv2_1 = deconv_relu(x=layer6_1, kernal=(3, 3, 3, 80, 160), scope='deconv2_1') 167 | # layer8->convolution 168 | layer7_1 = crop_and_concat(layer3, deconv2_1) 169 | _, Z, H, W, _ = layer3.get_shape().as_list() 170 | layer7_1 = conv_bn_relu_drop(x=layer7_1, kernal=(3, 3, 3, 160, 80), image_z=Z, height=H, width=W, phase=phase, 171 | drop=drop, scope='layer7_1_1') 172 | layer7_1 = conv_bn_relu_drop(x=layer7_1, kernal=(3, 3, 3, 80, 80), image_z=Z, height=H, width=W, phase=phase, 173 | drop=drop, scope='layer7_2_1') 174 | layer7_1 = resnet_Add(x1=deconv2_1, x2=layer7_1) 175 | # layer9->deconvolution 176 | deconv3_1 = deconv_relu(x=layer7_1, kernal=(3, 3, 3, 40, 80), scope='deconv3_1') 177 | # layer8->convolution 178 | layer8_1 = crop_and_concat(layer2, deconv3_1) 179 | _, Z, H, W, _ = layer2.get_shape().as_list() 180 | layer8_1 = conv_bn_relu_drop(x=layer8_1, kernal=(3, 3, 3, 80, 40), image_z=Z, height=H, width=W, phase=phase, 181 | drop=drop, scope='layer8_1_1') 182 | layer8_1 = conv_bn_relu_drop(x=layer8_1, kernal=(3, 3, 3, 40, 40), image_z=Z, height=H, width=W, phase=phase, 183 | drop=drop, scope='layer8_2_1') 184 | layer8_1 = resnet_Add(x1=deconv3_1, x2=layer8_1) 185 | # layer9->deconvolution 186 | deconv4_1 = deconv_relu(x=layer8_1, kernal=(3, 3, 3, 20, 40), scope='deconv4_1') 187 | # layer8->convolution 188 | layer9_1 = crop_and_concat(layer1, deconv4_1) 189 | _, Z, H, W, _ = layer1.get_shape().as_list() 190 | layer9_1 = conv_bn_relu_drop(x=layer9_1, kernal=(3, 3, 3, 40, 20), image_z=Z, height=H, width=W, phase=phase, 191 | drop=drop, scope='layer9_1_1') 192 | layer9_1 = conv_bn_relu_drop(x=layer9_1, kernal=(3, 3, 3, 20, 20), image_z=Z, height=H, width=W, phase=phase, 193 | drop=drop, scope='layer9_2_1') 194 | layer9_1 = resnet_Add(x1=deconv4_1, x2=layer9_1) 195 | # layer14->output 196 | output_distance_map = conv_sigmoid(x=layer9_1, kernal=(1, 1, 1, 20, n_class - 1), scope='output_distance_map') 197 | 198 | return output_map, output_distance_map 199 | -------------------------------------------------------------------------------- /VnetFamily/NonLocalVNet/NonLocalVnet.py: -------------------------------------------------------------------------------- 1 | ''' 2 | 3 | ''' 4 | from .layer import (conv3d, deconv3d, normalizationlayer, crop_and_concat, resnet_Add, 5 | weight_xavier_init, bias_variable) 6 | import tensorflow as tf 7 | 8 | 9 | 10 | def conv_bn_relu_drop(x, kernal, phase, drop, image_z=None, height=None, width=None, scope=None): 11 | with tf.name_scope(scope): 12 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3], 13 | n_outputs=kernal[-1], activefunction='relu', variable_name=scope + 'conv_W') 14 | B = bias_variable([kernal[-1]], variable_name=scope + 'conv_B') 15 | conv = conv3d(x, W) + B 16 | conv = normalizationlayer(conv, is_train=phase, height=height, width=width, image_z=image_z, norm_type='group', 17 | G=20, scope=scope) 18 | conv = tf.nn.dropout(tf.nn.relu(conv), drop) 19 | return conv 20 | 21 | 22 | def full_connected_relu(x, kernal, activefunction='relu', scope=None): 23 | with tf.name_scope(scope): 24 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1], 25 | n_outputs=kernal[-1], activefunction='relu', variable_name=scope + 'W') 26 | B = bias_variable([kernal[-1]], variable_name=scope + 'B') 27 | FC = tf.matmul(x, W) + B 28 | if activefunction == 'relu': 29 | FC = tf.nn.relu(FC) 30 | elif activefunction == 'softmax': 31 | FC = tf.nn.softmax(FC) 32 | elif activefunction == 'sigmoid': 33 | FC = tf.nn.sigmoid(FC) 34 | return FC 35 | 36 | 37 | def conv_relu(x, kernal, scope=None): 38 | with tf.name_scope(scope): 39 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3], 40 | n_outputs=kernal[-1], activefunction='relu', variable_name=scope + 'W') 41 | B = bias_variable([kernal[-1]], variable_name=scope + 'B') 42 | conv = conv3d(x, W) + B 43 | conv = tf.nn.relu(conv) 44 | return conv 45 | 46 | 47 | def non_local_block(x, compression=2, scope=None): 48 | """ 49 | non_local_block form Non_local Neural NetWorks paper,hekaiming 50 | :param x:input image 51 | :param compression:channel reduce rato 52 | :param scope:name scope 53 | :return: 54 | """ 55 | with tf.name_scope(scope): 56 | _, Z, H, W, C = x.get_shape().as_list() 57 | # step1 58 | kernal1 = (1, 1, 1, C, C // compression) 59 | thetaW = weight_xavier_init(shape=kernal1, n_inputs=kernal1[0] * kernal1[1] * kernal1[2] * kernal1[3], 60 | n_outputs=kernal1[-1], activefunction='relu', variable_name=scope + 'thetaW') 61 | thetaB = bias_variable([kernal1[-1]], variable_name=scope + 'thetaB') 62 | theta = conv3d(x, thetaW) + thetaB 63 | theta = tf.reshape(theta, [-1, Z * H * W, C // compression]) 64 | 65 | phiW = weight_xavier_init(shape=kernal1, n_inputs=kernal1[0] * kernal1[1] * kernal1[2] * kernal1[3], 66 | n_outputs=kernal1[-1], activefunction='relu', variable_name=scope + 'phiW') 67 | phiB = bias_variable([kernal1[-1]], variable_name=scope + 'phiB') 68 | phi = conv3d(x, phiW) + phiB 69 | phi = tf.reshape(phi, [-1, Z * H * W, C // compression]) 70 | phi = tf.transpose(phi, [0, 2, 1]) 71 | # step2 72 | energy = tf.matmul(theta, phi) 73 | energy = tf.nn.softmax(energy) 74 | # step3 75 | gW = weight_xavier_init(shape=kernal1, n_inputs=kernal1[0] * kernal1[1] * kernal1[2] * kernal1[3], 76 | n_outputs=kernal1[-1], activefunction='relu', variable_name=scope + 'gW') 77 | gB = bias_variable([kernal1[-1]], variable_name=scope + 'gB') 78 | g = conv3d(x, gW) + gB 79 | g = tf.reshape(g, [-1, Z * H * W, C // compression]) 80 | # step4 81 | y = tf.matmul(energy, g) 82 | y = tf.reshape(y, [-1, Z, H, W, C // compression]) 83 | kernal2 = (1, 1, 1, C // compression, C) 84 | yW = weight_xavier_init(shape=kernal2, n_inputs=kernal2[0] * kernal2[1] * kernal2[2] * kernal2[3], 85 | n_outputs=kernal2[-1], activefunction='relu', variable_name=scope + 'yW') 86 | yB = bias_variable([kernal2[-1]], variable_name=scope + 'yB') 87 | y = conv3d(y, yW) + yB 88 | y = resnet_Add(x, y) 89 | return y 90 | 91 | 92 | def down_sampling(x, kernal, phase, drop, image_z=None, height=None, width=None, scope=None): 93 | with tf.name_scope(scope): 94 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3], 95 | n_outputs=kernal[-1], 96 | activefunction='relu', variable_name=scope + 'W') 97 | B = bias_variable([kernal[-1]], variable_name=scope + 'B') 98 | conv = conv3d(x, W, 2) + B 99 | conv = normalizationlayer(conv, is_train=phase, height=height, width=width, image_z=image_z, norm_type='group', 100 | G=20, scope=scope) 101 | conv = tf.nn.dropout(tf.nn.relu(conv), drop) 102 | return conv 103 | 104 | 105 | def deconv_relu(x, kernal, samefeture=False, scope=None): 106 | with tf.name_scope(scope): 107 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[-1], 108 | n_outputs=kernal[-2], activefunction='relu', variable_name=scope + 'W') 109 | B = bias_variable([kernal[-2]], variable_name=scope + 'B') 110 | conv = deconv3d(x, W, samefeture, True) + B 111 | conv = tf.nn.relu(conv) 112 | return conv 113 | 114 | 115 | def conv_softmax(x, kernal, scope=None): 116 | with tf.name_scope(scope): 117 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3], 118 | n_outputs=kernal[-1], activefunction='sigomd', variable_name=scope + 'W') 119 | B = bias_variable([kernal[-1]], variable_name=scope + 'B') 120 | conv = conv3d(x, W) + B 121 | conv = tf.nn.softmax(conv) 122 | return conv 123 | 124 | 125 | def _create_nonlocal_conv_net(X, image_z, image_width, image_height, image_channel, phase, drop, n_class=2): 126 | inputX = tf.reshape(X, [-1, image_z, image_width, image_height, image_channel]) # shape=(?, 32, 32, 1) 127 | # Vnet model 128 | # layer1->convolution 129 | layer0 = conv_bn_relu_drop(x=inputX, kernal=(3, 3, 3, image_channel, 20), phase=phase, drop=drop, scope='layer0') 130 | layer1 = conv_bn_relu_drop(x=layer0, kernal=(3, 3, 3, 20, 20), phase=phase, drop=drop, scope='layer1') 131 | layer1 = resnet_Add(x1=layer0, x2=layer1) 132 | # down sampling1 133 | down1 = down_sampling(x=layer1, kernal=(3, 3, 3, 20, 40), phase=phase, drop=drop, scope='down1') 134 | # layer2->convolution 135 | layer2 = conv_bn_relu_drop(x=down1, kernal=(3, 3, 3, 40, 40), phase=phase, drop=drop, scope='layer2_1') 136 | layer2 = conv_bn_relu_drop(x=layer2, kernal=(3, 3, 3, 40, 40), phase=phase, drop=drop, scope='layer2_2') 137 | layer2 = resnet_Add(x1=down1, x2=layer2) 138 | # down sampling2 139 | down2 = down_sampling(x=layer2, kernal=(3, 3, 3, 40, 80), phase=phase, drop=drop, scope='down2') 140 | # layer3->convolution 141 | layer3 = conv_bn_relu_drop(x=down2, kernal=(3, 3, 3, 80, 80), phase=phase, drop=drop, scope='layer3_1') 142 | layer3 = conv_bn_relu_drop(x=layer3, kernal=(3, 3, 3, 80, 80), phase=phase, drop=drop, scope='layer3_2') 143 | layer3 = conv_bn_relu_drop(x=layer3, kernal=(3, 3, 3, 80, 80), phase=phase, drop=drop, scope='layer3_3') 144 | layer3 = resnet_Add(x1=down2, x2=layer3) 145 | # down sampling3 146 | down3 = down_sampling(x=layer3, kernal=(3, 3, 3, 80, 160), phase=phase, drop=drop, scope='down3') 147 | # layer4->convolution 148 | layer4 = conv_bn_relu_drop(x=down3, kernal=(3, 3, 3, 160, 160), phase=phase, drop=drop, scope='layer4_1') 149 | layer4 = conv_bn_relu_drop(x=layer4, kernal=(3, 3, 3, 160, 160), phase=phase, drop=drop, scope='layer4_2') 150 | layer4 = conv_bn_relu_drop(x=layer4, kernal=(3, 3, 3, 160, 160), phase=phase, drop=drop, scope='layer4_4') 151 | layer4 = resnet_Add(x1=down3, x2=layer4) 152 | # down sampling4 153 | down4 = down_sampling(x=layer4, kernal=(3, 3, 3, 160, 320), phase=phase, drop=drop, scope='down4') 154 | # layer5->convolution 155 | layer5 = conv_bn_relu_drop(x=down4, kernal=(3, 3, 3, 320, 320), phase=phase, drop=drop, scope='layer5_1') 156 | layer5 = conv_bn_relu_drop(x=layer5, kernal=(3, 3, 3, 320, 320), phase=phase, drop=drop, scope='layer5_2') 157 | layer5 = conv_bn_relu_drop(x=layer5, kernal=(3, 3, 3, 320, 320), phase=phase, drop=drop, scope='layer5_3') 158 | layer5 = resnet_Add(x1=down4, x2=layer5) 159 | layer5 = non_local_block(layer5, 2, scope='nlb1') 160 | # layer9->deconvolution 161 | deconv1 = deconv_relu(x=layer5, kernal=(3, 3, 3, 160, 320), scope='deconv1') 162 | # layer8->convolution 163 | layer6 = crop_and_concat(layer4, deconv1) 164 | _, Z, H, W, _ = layer4.get_shape().as_list() 165 | layer6 = conv_bn_relu_drop(x=layer6, kernal=(3, 3, 3, 320, 160), image_z=Z, height=H, width=W, phase=phase, 166 | drop=drop, scope='layer6_1') 167 | layer6 = conv_bn_relu_drop(x=layer6, kernal=(3, 3, 3, 160, 160), image_z=Z, height=H, width=W, phase=phase, 168 | drop=drop, scope='layer6_2') 169 | layer6 = conv_bn_relu_drop(x=layer6, kernal=(3, 3, 3, 160, 160), image_z=Z, height=H, width=W, phase=phase, 170 | drop=drop, scope='layer6_3') 171 | layer6 = resnet_Add(x1=deconv1, x2=layer6) 172 | # layer9->deconvolution 173 | deconv2 = deconv_relu(x=layer6, kernal=(3, 3, 3, 80, 160), scope='deconv2') 174 | # layer8->convolution 175 | layer7 = crop_and_concat(layer3, deconv2) 176 | _, Z, H, W, _ = layer3.get_shape().as_list() 177 | layer7 = conv_bn_relu_drop(x=layer7, kernal=(3, 3, 3, 160, 80), image_z=Z, height=H, width=W, phase=phase, 178 | drop=drop, scope='layer7_1') 179 | layer7 = conv_bn_relu_drop(x=layer7, kernal=(3, 3, 3, 80, 80), image_z=Z, height=H, width=W, phase=phase, 180 | drop=drop, scope='layer7_2') 181 | layer7 = conv_bn_relu_drop(x=layer7, kernal=(3, 3, 3, 80, 80), image_z=Z, height=H, width=W, phase=phase, 182 | drop=drop, scope='layer7_3') 183 | layer7 = resnet_Add(x1=deconv2, x2=layer7) 184 | # layer9->deconvolution 185 | deconv3 = deconv_relu(x=layer7, kernal=(3, 3, 3, 40, 80), scope='deconv3') 186 | # layer8->convolution 187 | layer8 = crop_and_concat(layer2, deconv3) 188 | _, Z, H, W, _ = layer2.get_shape().as_list() 189 | layer8 = conv_bn_relu_drop(x=layer8, kernal=(3, 3, 3, 80, 40), image_z=Z, height=H, width=W, phase=phase, 190 | drop=drop, scope='layer8_1') 191 | layer8 = conv_bn_relu_drop(x=layer8, kernal=(3, 3, 3, 40, 40), image_z=Z, height=H, width=W, phase=phase, 192 | drop=drop, scope='layer8_2') 193 | layer8 = resnet_Add(x1=deconv3, x2=layer8) 194 | # layer9->deconvolution 195 | deconv4 = deconv_relu(x=layer8, kernal=(3, 3, 3, 20, 40), scope='deconv4') 196 | # layer8->convolution 197 | layer9 = crop_and_concat(layer1, deconv4) 198 | _, Z, H, W, _ = layer1.get_shape().as_list() 199 | layer9 = conv_bn_relu_drop(x=layer9, kernal=(3, 3, 3, 40, 20), image_z=Z, height=H, width=W, phase=phase, 200 | drop=drop, scope='layer9_1') 201 | layer9 = conv_bn_relu_drop(x=layer9, kernal=(3, 3, 3, 20, 20), image_z=Z, height=H, width=W, phase=phase, 202 | drop=drop, scope='layer9_2') 203 | layer9 = resnet_Add(x1=deconv4, x2=layer9) 204 | # layer14->output 205 | output_map = conv_softmax(x=layer9, kernal=(1, 1, 1, 20, n_class), scope='output') 206 | 207 | return output_map -------------------------------------------------------------------------------- /VnetFamily/SCSEVnet/model_scsevnet3d.py: -------------------------------------------------------------------------------- 1 | ''' 2 | 3 | ''' 4 | from .layer import (conv3d, deconv3d, normalizationlayer, crop_and_concat, resnet_Add, 5 | weight_xavier_init, bias_variable) 6 | import tensorflow as tf 7 | import numpy as np 8 | import os 9 | 10 | 11 | def conv_bn_relu_drop(x, kernal, phase, drop, image_z=None, height=None, width=None, scope=None): 12 | with tf.name_scope(scope): 13 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3], 14 | n_outputs=kernal[-1], activefunction='relu', variable_name=scope + 'conv_W') 15 | B = bias_variable([kernal[-1]], variable_name=scope + 'conv_B') 16 | conv = conv3d(x, W) + B 17 | conv = normalizationlayer(conv, is_train=phase, height=height, width=width, image_z=image_z, norm_type='group', 18 | G=20, scope=scope) 19 | conv = tf.nn.dropout(tf.nn.relu(conv), drop) 20 | return conv 21 | 22 | 23 | def full_connected_relu(x, kernal, activefunction='relu', scope=None): 24 | with tf.name_scope(scope): 25 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1], 26 | n_outputs=kernal[-1], activefunction='relu', variable_name=scope + 'W') 27 | B = bias_variable([kernal[-1]], variable_name=scope + 'B') 28 | FC = tf.matmul(x, W) + B 29 | if activefunction == 'relu': 30 | FC = tf.nn.relu(FC) 31 | elif activefunction == 'softmax': 32 | FC = tf.nn.softmax(FC) 33 | elif activefunction == 'sigmoid': 34 | FC = tf.nn.sigmoid(FC) 35 | return FC 36 | 37 | 38 | def conv_sigomd(x, kernal, scope=None): 39 | with tf.name_scope(scope): 40 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3], 41 | n_outputs=kernal[-1], activefunction='sigomd', variable_name=scope + 'W') 42 | B = bias_variable([kernal[-1]], variable_name=scope + 'B') 43 | conv = conv3d(x, W) + B 44 | conv = tf.nn.sigmoid(conv) 45 | return conv 46 | 47 | 48 | def squeeze_excitation_model(x, out_dim, name='ssceaddcsse', ratio=4, scope=None): 49 | with tf.name_scope(scope): 50 | if name == 'sscemaxcsse': 51 | recalibrate1 = spatial_squeeze_channel_excitation_layer(x, out_dim, ratio, scope=scope + 'ssce') 52 | recalibrate2 = channel_squeeze_spatial_excitiation_layer(x, out_dim, scope=scope + 'csse') 53 | recalibrate = tf.maximum(recalibrate1, recalibrate2) 54 | return recalibrate 55 | if name == 'ssceaddcsse': 56 | recalibrate1 = spatial_squeeze_channel_excitation_layer(x, out_dim, ratio, scope=scope + 'ssce') 57 | recalibrate2 = channel_squeeze_spatial_excitiation_layer(x, out_dim, scope=scope + 'csse') 58 | recalibrate = tf.add(recalibrate1, recalibrate2) 59 | return recalibrate 60 | if name == 'sscemutiplycsse': 61 | recalibrate1 = spatial_squeeze_channel_excitation_layer(x, out_dim, ratio, scope=scope + 'ssce') 62 | recalibrate2 = channel_squeeze_spatial_excitiation_layer(x, out_dim, scope=scope + 'csse') 63 | recalibrate = tf.multiply(recalibrate1, recalibrate2) 64 | return recalibrate 65 | if name == 'ssceconcatcsse': 66 | recalibrate1 = spatial_squeeze_channel_excitation_layer(x, out_dim, ratio, scope=scope + 'ssce') 67 | recalibrate2 = channel_squeeze_spatial_excitiation_layer(x, out_dim, scope=scope + 'csse') 68 | recalibrate = crop_and_concat(recalibrate1, recalibrate2) 69 | recalibrate = conv_sigomd(recalibrate, kernal=(1, 1, 1, out_dim * 2, out_dim), 70 | scope=scope + 'ssceconcatcsse') 71 | return recalibrate 72 | 73 | 74 | def spatial_squeeze_channel_excitation_layer(x, out_dim, ratio=4, scope=None): 75 | with tf.name_scope(scope): 76 | # Global_Average_Pooling,channel_squeeze 77 | squeeze = tf.reduce_mean(x, axis=(1, 2, 3), name=scope + 'channel_squeeze') 78 | # full_connect 79 | excitation = full_connected_relu(squeeze, kernal=(out_dim, out_dim // ratio), activefunction='relu', 80 | scope=scope + '_fully_connected1') 81 | excitation = full_connected_relu(excitation, kernal=(out_dim // ratio, out_dim), 82 | activefunction='sigmoid', scope=scope + '_fully_connected2') 83 | # scale the x 84 | excitation = tf.reshape(excitation, [-1, 1, 1, 1, out_dim]) 85 | scale = x * excitation 86 | return scale 87 | 88 | 89 | def channel_squeeze_spatial_excitiation_layer(x, out_dim, scope=None): 90 | with tf.name_scope(scope): 91 | squeeze = conv_sigomd(x, kernal=(1, 1, 1, out_dim, 1), scope=scope + 'spatial_squeeze') 92 | scale = x * squeeze 93 | return scale 94 | 95 | 96 | def down_sampling(x, kernal, phase, drop, image_z=None, height=None, width=None, scope=None): 97 | with tf.name_scope(scope): 98 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3], 99 | n_outputs=kernal[-1], 100 | activefunction='relu', variable_name=scope + 'W') 101 | B = bias_variable([kernal[-1]], variable_name=scope + 'B') 102 | conv = conv3d(x, W, 2) + B 103 | conv = normalizationlayer(conv, is_train=phase, height=height, width=width, image_z=image_z, norm_type='group', 104 | G=20, scope=scope) 105 | conv = tf.nn.dropout(tf.nn.relu(conv), drop) 106 | return conv 107 | 108 | 109 | def deconv_relu(x, kernal, samefeture=False, scope=None): 110 | with tf.name_scope(scope): 111 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[-1], 112 | n_outputs=kernal[-2], activefunction='relu', variable_name=scope + 'W') 113 | B = bias_variable([kernal[-2]], variable_name=scope + 'B') 114 | conv = deconv3d(x, W, samefeture, True) + B 115 | conv = tf.nn.relu(conv) 116 | return conv 117 | 118 | 119 | def _createscsevnet(X, image_z, image_width, image_height, image_channel, phase, drop, n_class=1): 120 | inputX = tf.reshape(X, [-1, image_z, image_width, image_height, image_channel]) # shape=(?, 32, 32, 1) 121 | # Vnet model 122 | # layer1->convolution 123 | layer0 = conv_bn_relu_drop(x=inputX, kernal=(3, 3, 3, image_channel, 20), phase=phase, drop=drop, 124 | scope='layer0') 125 | layer1 = conv_bn_relu_drop(x=layer0, kernal=(3, 3, 3, 20, 20), phase=phase, drop=drop, 126 | scope='layer1') 127 | layer1 = squeeze_excitation_model(layer1, out_dim=20, scope='sem1') 128 | layer1 = resnet_Add(x1=layer0, x2=layer1) 129 | # down sampling1 130 | down1 = down_sampling(x=layer1, kernal=(3, 3, 3, 20, 40), phase=phase, drop=drop, scope='down1') 131 | # layer2->convolution 132 | layer2 = conv_bn_relu_drop(x=down1, kernal=(3, 3, 3, 40, 40), phase=phase, drop=drop, 133 | scope='layer2_1') 134 | layer2 = conv_bn_relu_drop(x=layer2, kernal=(3, 3, 3, 40, 40), phase=phase, drop=drop, 135 | scope='layer2_2') 136 | layer2 = squeeze_excitation_model(layer2, out_dim=40, scope='sem2') 137 | layer2 = resnet_Add(x1=down1, x2=layer2) 138 | # down sampling2 139 | down2 = down_sampling(x=layer2, kernal=(3, 3, 3, 40, 80), phase=phase, drop=drop, scope='down2') 140 | # layer3->convolution 141 | layer3 = conv_bn_relu_drop(x=down2, kernal=(3, 3, 3, 80, 80), phase=phase, drop=drop, 142 | scope='layer3_1') 143 | layer3 = conv_bn_relu_drop(x=layer3, kernal=(3, 3, 3, 80, 80), phase=phase, drop=drop, 144 | scope='layer3_2') 145 | layer3 = squeeze_excitation_model(layer3, out_dim=80, scope='sem3') 146 | layer3 = resnet_Add(x1=down2, x2=layer3) 147 | # down sampling3 148 | down3 = down_sampling(x=layer3, kernal=(3, 3, 3, 80, 160), phase=phase, drop=drop, scope='down3') 149 | # layer4->convolution 150 | layer4 = conv_bn_relu_drop(x=down3, kernal=(3, 3, 3, 160, 160), phase=phase, drop=drop, 151 | scope='layer4_1') 152 | layer4 = conv_bn_relu_drop(x=layer4, kernal=(3, 3, 3, 160, 160), phase=phase, drop=drop, 153 | scope='layer4_2') 154 | layer4 = squeeze_excitation_model(layer4, out_dim=160, scope='sem4') 155 | layer4 = resnet_Add(x1=down3, x2=layer4) 156 | # down sampling4 157 | down4 = down_sampling(x=layer4, kernal=(3, 3, 3, 160, 320), phase=phase, drop=drop, scope='down4') 158 | # layer5->convolution 159 | layer5 = conv_bn_relu_drop(x=down4, kernal=(3, 3, 3, 320, 320), phase=phase, drop=drop, 160 | scope='layer5_1') 161 | layer5 = conv_bn_relu_drop(x=layer5, kernal=(3, 3, 3, 320, 320), phase=phase, drop=drop, 162 | scope='layer5_2') 163 | layer5 = squeeze_excitation_model(layer5, out_dim=320, scope='sem5') 164 | layer5 = resnet_Add(x1=down4, x2=layer5) 165 | 166 | # layer9->deconvolution 167 | deconv1 = deconv_relu(x=layer5, kernal=(3, 3, 3, 160, 320), scope='deconv1') 168 | # layer8->convolution 169 | layer6 = crop_and_concat(layer4, deconv1) 170 | _, Z, H, W, _ = layer4.get_shape().as_list() 171 | layer6 = conv_bn_relu_drop(x=layer6, kernal=(3, 3, 3, 320, 160), image_z=Z, height=H, width=W, phase=phase, 172 | drop=drop, scope='layer6_1') 173 | layer6 = conv_bn_relu_drop(x=layer6, kernal=(3, 3, 3, 160, 160), image_z=Z, height=H, width=W, phase=phase, 174 | drop=drop, scope='layer6_2') 175 | layer6 = squeeze_excitation_model(layer6, out_dim=160, scope='sem6') 176 | layer6 = resnet_Add(x1=deconv1, x2=layer6) 177 | # layer9->deconvolution 178 | deconv2 = deconv_relu(x=layer6, kernal=(3, 3, 3, 80, 160), scope='deconv2') 179 | # layer8->convolution 180 | layer7 = crop_and_concat(layer3, deconv2) 181 | _, Z, H, W, _ = layer3.get_shape().as_list() 182 | layer7 = conv_bn_relu_drop(x=layer7, kernal=(3, 3, 3, 160, 80), image_z=Z, height=H, width=W, phase=phase, 183 | drop=drop, scope='layer7_1') 184 | layer7 = conv_bn_relu_drop(x=layer7, kernal=(3, 3, 3, 80, 80), image_z=Z, height=H, width=W, phase=phase, 185 | drop=drop, scope='layer7_2') 186 | layer7 = squeeze_excitation_model(layer7, out_dim=80, scope='sem7') 187 | layer7 = resnet_Add(x1=deconv2, x2=layer7) 188 | # layer9->deconvolution 189 | deconv3 = deconv_relu(x=layer7, kernal=(3, 3, 3, 40, 80), scope='deconv3') 190 | # layer8->convolution 191 | layer8 = crop_and_concat(layer2, deconv3) 192 | _, Z, H, W, _ = layer2.get_shape().as_list() 193 | layer8 = conv_bn_relu_drop(x=layer8, kernal=(3, 3, 3, 80, 40), image_z=Z, height=H, width=W, phase=phase, 194 | drop=drop, scope='layer8_1') 195 | layer8 = conv_bn_relu_drop(x=layer8, kernal=(3, 3, 3, 40, 40), image_z=Z, height=H, width=W, phase=phase, 196 | drop=drop, scope='layer8_2') 197 | layer8 = squeeze_excitation_model(layer8, out_dim=40, scope='sem8') 198 | layer8 = resnet_Add(x1=deconv3, x2=layer8) 199 | # layer9->deconvolution 200 | deconv4 = deconv_relu(x=layer8, kernal=(3, 3, 3, 20, 40), scope='deconv4') 201 | # layer8->convolution 202 | layer9 = crop_and_concat(layer1, deconv4) 203 | _, Z, H, W, _ = layer1.get_shape().as_list() 204 | layer9 = conv_bn_relu_drop(x=layer9, kernal=(3, 3, 3, 40, 20), image_z=Z, height=H, width=W, phase=phase, 205 | drop=drop, scope='layer9_1') 206 | layer9 = conv_bn_relu_drop(x=layer9, kernal=(3, 3, 3, 20, 20), image_z=Z, height=H, width=W, phase=phase, 207 | drop=drop, scope='layer9_2') 208 | layer9 = squeeze_excitation_model(layer9, out_dim=20, scope='sem9') 209 | layer9 = resnet_Add(x1=deconv4, x2=layer9) 210 | # layer14->output 211 | output_map = conv_sigomd(x=layer9, kernal=(1, 1, 1, 20, n_class), scope='output') 212 | return output_map 213 | -------------------------------------------------------------------------------- /VnetFamily/Vnettripleplus/model_vnet3dtripleplus.py: -------------------------------------------------------------------------------- 1 | ''' 2 | 3 | ''' 4 | from Vnet.layer import (conv3d, deconv3d, normalizationlayer, crop_and_concat, resnet_Add, max_pool3d, upsample3d, 5 | weight_xavier_init, bias_variable) 6 | import tensorflow as tf 7 | 8 | 9 | def conv_bn_relu_drop(x, kernal, phase, drop, image_z=None, height=None, width=None, scope=None): 10 | with tf.name_scope(scope): 11 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3], 12 | n_outputs=kernal[-1], activefunction='relu', variable_name=scope + 'conv_W') 13 | B = bias_variable([kernal[-1]], variable_name=scope + 'conv_B') 14 | conv = conv3d(x, W) + B 15 | conv = normalizationlayer(conv, is_train=phase, height=height, width=width, image_z=image_z, norm_type='group', 16 | G=4, scope=scope) 17 | conv = tf.nn.dropout(tf.nn.relu(conv), drop) 18 | return conv 19 | 20 | 21 | def conv_relu(x, kernal, scope=None): 22 | with tf.name_scope(scope): 23 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3], 24 | n_outputs=kernal[-1], activefunction='relu', variable_name=scope + 'W') 25 | B = bias_variable([kernal[-1]], variable_name=scope + 'B') 26 | conv = conv3d(x, W) + B 27 | conv = tf.nn.relu(conv) 28 | return conv 29 | 30 | 31 | 32 | def down_sampling(x, kernal, phase, drop, image_z=None, height=None, width=None, scope=None): 33 | with tf.name_scope(scope): 34 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3], 35 | n_outputs=kernal[-1], 36 | activefunction='relu', variable_name=scope + 'W') 37 | B = bias_variable([kernal[-1]], variable_name=scope + 'B') 38 | conv = conv3d(x, W, 2) + B 39 | conv = normalizationlayer(conv, is_train=phase, height=height, width=width, image_z=image_z, norm_type='group', 40 | G=4, scope=scope) 41 | conv = tf.nn.dropout(tf.nn.relu(conv), drop) 42 | return conv 43 | 44 | 45 | def conv_sigmod(x, kernal, scope=None): 46 | with tf.name_scope(scope): 47 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3], 48 | n_outputs=kernal[-1], activefunction='sigomd', variable_name=scope + 'W') 49 | B = bias_variable([kernal[-1]], variable_name=scope + 'B') 50 | conv = conv3d(x, W) + B 51 | conv = tf.nn.sigmod(conv) 52 | return conv 53 | 54 | 55 | def deconv_relu(x, kernal, samefeture=False, scope=None): 56 | with tf.name_scope(scope): 57 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[-1], 58 | n_outputs=kernal[-2], activefunction='relu', variable_name=scope + 'W') 59 | B = bias_variable([kernal[-2]], variable_name=scope + 'B') 60 | conv = deconv3d(x, W, samefeture, True) + B 61 | conv = tf.nn.relu(conv) 62 | return conv 63 | 64 | 65 | def _create_convtripleplus_net(X, image_z, image_width, image_height, image_channel, phase, drop, n_class=2): 66 | inputX = tf.reshape(X, [-1, image_z, image_width, image_height, image_channel]) # shape=(?, 32, 32, 1) 67 | # Vnettripleplus model 68 | # layer1->convolution 69 | layer0 = conv_bn_relu_drop(x=inputX, kernal=(3, 3, 3, image_channel, 20), phase=phase, drop=drop, scope='layer1_1') 70 | layer1 = conv_bn_relu_drop(x=layer0, kernal=(3, 3, 3, 20, 20), phase=phase, drop=drop, scope='layer1_2') 71 | layer1 = resnet_Add(x1=layer0, x2=layer1) 72 | # down sampling1 73 | down1 = down_sampling(x=layer1, kernal=(3, 3, 3, 20, 40), phase=phase, drop=drop, scope='down1') 74 | # layer2->convolution 75 | layer2 = conv_bn_relu_drop(x=down1, kernal=(3, 3, 3, 40, 40), phase=phase, drop=drop, scope='layer2_1') 76 | layer2 = conv_bn_relu_drop(x=layer2, kernal=(3, 3, 3, 40, 40), phase=phase, drop=drop, scope='layer2_2') 77 | layer2 = resnet_Add(x1=down1, x2=layer2) 78 | # down sampling2 79 | down2 = down_sampling(x=layer2, kernal=(3, 3, 3, 40, 80), phase=phase, drop=drop, scope='down2') 80 | # layer3->convolution 81 | layer3 = conv_bn_relu_drop(x=down2, kernal=(3, 3, 3, 80, 80), phase=phase, drop=drop, scope='layer3_1') 82 | layer3 = conv_bn_relu_drop(x=layer3, kernal=(3, 3, 3, 80, 80), phase=phase, drop=drop, scope='layer3_2') 83 | layer3 = conv_bn_relu_drop(x=layer3, kernal=(3, 3, 3, 80, 80), phase=phase, drop=drop, scope='layer3_3') 84 | layer3 = resnet_Add(x1=down2, x2=layer3) 85 | # down sampling3 86 | down3 = down_sampling(x=layer3, kernal=(3, 3, 3, 80, 160), phase=phase, drop=drop, scope='down3') 87 | # layer4->convolution 88 | layer4 = conv_bn_relu_drop(x=down3, kernal=(3, 3, 3, 160, 160), phase=phase, drop=drop, scope='layer4_1') 89 | layer4 = conv_bn_relu_drop(x=layer4, kernal=(3, 3, 3, 160, 160), phase=phase, drop=drop, scope='layer4_2') 90 | layer4 = conv_bn_relu_drop(x=layer4, kernal=(3, 3, 3, 160, 160), phase=phase, drop=drop, scope='layer4_4') 91 | layer4 = resnet_Add(x1=down3, x2=layer4) 92 | # down sampling4 93 | down4 = down_sampling(x=layer4, kernal=(3, 3, 3, 160, 320), phase=phase, drop=drop, scope='down4') 94 | # layer5->convolution 95 | layer5 = conv_bn_relu_drop(x=down4, kernal=(3, 3, 3, 320, 320), phase=phase, drop=drop, scope='layer5_1') 96 | layer5 = conv_bn_relu_drop(x=layer5, kernal=(3, 3, 3, 320, 320), phase=phase, drop=drop, scope='layer5_2') 97 | layer5 = conv_bn_relu_drop(x=layer5, kernal=(3, 3, 3, 320, 320), phase=phase, drop=drop, scope='layer5_3') 98 | layer5 = resnet_Add(x1=down4, x2=layer5) 99 | 100 | _, Z, H, W, _ = layer4.get_shape().as_list() 101 | # layer9->decode1 102 | upsample1_1 = upsample3d(x=layer5, scale_factor=2, scope='upsample1_1') 103 | decode1_1 = conv_bn_relu_drop(upsample1_1, kernal=(3, 3, 3, 160, 32), image_z=Z, height=H, width=W, phase=phase, 104 | drop=drop, scope='decode1_1') 105 | decode1_2 = conv_bn_relu_drop(layer4, kernal=(3, 3, 3, 160, 32), image_z=Z, height=H, width=W, phase=phase, 106 | drop=drop, scope='decode1_2') 107 | decode1_3 = max_pool3d(x=layer3, depth=True) 108 | decode1_3 = conv_bn_relu_drop(decode1_3, kernal=(3, 3, 3, 80, 32), image_z=Z, height=H, width=W, phase=phase, 109 | drop=drop, scope='decode1_3') 110 | decode1_4 = max_pool3d(x=layer2, depth=True) 111 | decode1_4 = max_pool3d(x=decode1_4, depth=True) 112 | decode1_4 = conv_bn_relu_drop(decode1_4, kernal=(3, 3, 3, 40, 32), image_z=Z, height=H, width=W, phase=phase, 113 | drop=drop, scope='decode1_4') 114 | decode1_5 = max_pool3d(x=layer1, depth=True) 115 | decode1_5 = max_pool3d(x=decode1_5, depth=True) 116 | decode1_5 = max_pool3d(x=decode1_5, depth=True) 117 | decode1_5 = conv_bn_relu_drop(decode1_5, kernal=(3, 3, 3, 20, 32), image_z=Z, height=H, width=W, phase=phase, 118 | drop=drop, scope='decode1_5') 119 | 120 | decode1 = tf.concat([decode1_1, decode1_2, decode1_3, decode1_4, decode1_5], axis=4) 121 | decode1 = conv_bn_relu_drop(x=decode1, kernal=(3, 3, 3, 160, 160), image_z=Z, height=H, width=W, phase=phase, 122 | drop=drop, scope='decode1_6') 123 | # layer9->decode2 124 | _, Z, H, W, _ = layer3.get_shape().as_list() 125 | upsample2_1 = upsample3d(x=layer5, scale_factor=4, scope='upsample2_1') 126 | decode2_1 = conv_bn_relu_drop(upsample2_1, kernal=(3, 3, 3, 80, 16), image_z=Z, height=H, width=W, phase=phase, 127 | drop=drop, scope='decode2_1') 128 | decode2_2 = upsample3d(x=decode1, scale_factor=2, scope='upsample2_2') 129 | decode2_2 = conv_bn_relu_drop(decode2_2, kernal=(3, 3, 3, 80, 16), image_z=Z, height=H, width=W, phase=phase, 130 | drop=drop, scope='decode2_2') 131 | decode2_3 = conv_bn_relu_drop(layer3, kernal=(3, 3, 3, 80, 16), image_z=Z, height=H, width=W, phase=phase, 132 | drop=drop, scope='decode2_3') 133 | decode2_4 = max_pool3d(x=layer2, depth=True) 134 | decode2_4 = conv_bn_relu_drop(decode2_4, kernal=(3, 3, 3, 40, 16), image_z=Z, height=H, width=W, phase=phase, 135 | drop=drop, scope='decode2_4') 136 | decode2_5 = max_pool3d(x=layer1, depth=True) 137 | decode2_5 = max_pool3d(x=decode2_5, depth=True) 138 | decode2_5 = conv_bn_relu_drop(decode2_5, kernal=(3, 3, 3, 20, 16), image_z=Z, height=H, width=W, phase=phase, 139 | drop=drop, scope='decode2_5') 140 | 141 | decode2 = tf.concat([decode2_1, decode2_2, decode2_3, decode2_4, decode2_5], axis=4) 142 | decode2 = conv_bn_relu_drop(x=decode2, kernal=(3, 3, 3, 80, 80), image_z=Z, height=H, width=W, phase=phase, 143 | drop=drop, scope='decode2_6') 144 | # layer9->decode3 145 | _, Z, H, W, _ = layer2.get_shape().as_list() 146 | upsample3_1 = upsample3d(x=layer5, scale_factor=8, scope='upsample3_1') 147 | decode3_1 = conv_bn_relu_drop(upsample3_1, kernal=(3, 3, 3, 40, 8), image_z=Z, height=H, width=W, phase=phase, 148 | drop=drop, scope='decode3_1') 149 | decode3_2 = upsample3d(x=decode1, scale_factor=4, scope='upsample3_2') 150 | decode3_2 = conv_bn_relu_drop(decode3_2, kernal=(3, 3, 3, 40, 8), image_z=Z, height=H, width=W, phase=phase, 151 | drop=drop, scope='decode3_2') 152 | decode3_3 = upsample3d(x=decode2, scale_factor=2, scope='upsample3_3') 153 | decode3_3 = conv_bn_relu_drop(decode3_3, kernal=(3, 3, 3, 40, 8), image_z=Z, height=H, width=W, phase=phase, 154 | drop=drop, scope='decode3_3') 155 | decode3_4 = conv_bn_relu_drop(layer2, kernal=(3, 3, 3, 40, 8), image_z=Z, height=H, width=W, phase=phase, 156 | drop=drop, scope='decode3_4') 157 | decode3_5 = max_pool3d(x=layer1, depth=True) 158 | decode3_5 = conv_bn_relu_drop(decode3_5, kernal=(3, 3, 3, 20, 8), image_z=Z, height=H, width=W, phase=phase, 159 | drop=drop, scope='decode3_5') 160 | 161 | decode3 = tf.concat([decode3_1, decode3_2, decode3_3, decode3_4, decode3_5], axis=4) 162 | decode3 = conv_bn_relu_drop(x=decode3, kernal=(3, 3, 3, 40, 40), image_z=Z, height=H, width=W, phase=phase, 163 | drop=drop, scope='decode3_6') 164 | # layer9->decode4 165 | _, Z, H, W, _ = layer1.get_shape().as_list() 166 | upsample4_1 = upsample3d(x=layer5, scale_factor=16, scope='upsample4_1') 167 | decode4_1 = conv_bn_relu_drop(upsample4_1, kernal=(3, 3, 3, 20, 4), image_z=Z, height=H, width=W, phase=phase, 168 | drop=drop, scope='decode4_1') 169 | decode4_2 = upsample3d(x=decode1, scale_factor=8, scope='upsample4_2') 170 | decode4_2 = conv_bn_relu_drop(decode4_2, kernal=(3, 3, 3, 20, 4), image_z=Z, height=H, width=W, phase=phase, 171 | drop=drop, scope='decode4_2') 172 | decode4_3 = upsample3d(x=decode2, scale_factor=4, scope='upsample4_3') 173 | decode4_3 = conv_bn_relu_drop(decode4_3, kernal=(3, 3, 3, 20, 4), image_z=Z, height=H, width=W, phase=phase, 174 | drop=drop, scope='decode4_3') 175 | decode4_4 = upsample3d(x=decode3, scale_factor=2, scope='upsample4_4') 176 | decode4_4 = conv_bn_relu_drop(decode4_4, kernal=(3, 3, 3, 20, 4), image_z=Z, height=H, width=W, phase=phase, 177 | drop=drop, scope='decode4_4') 178 | decode4_5 = conv_bn_relu_drop(layer1, kernal=(3, 3, 3, 20, 4), image_z=Z, height=H, width=W, phase=phase, 179 | drop=drop, scope='decode4_5') 180 | 181 | decode4 = tf.concat([decode4_1, decode4_2, decode4_3, decode4_4, decode4_5], axis=4) 182 | decode4 = conv_bn_relu_drop(x=decode4, kernal=(3, 3, 3, 20, 20), image_z=Z, height=H, width=W, 183 | phase=phase, drop=drop, scope='decode4_6') 184 | # layer14->output 185 | output_map = conv_sigmod(x=decode4, kernal=(1, 1, 1, 20, n_class), scope='output') 186 | return output_map -------------------------------------------------------------------------------- /VnetFamily/NestedVnet/model_Nestedvnet3d.py: -------------------------------------------------------------------------------- 1 | ''' 2 | 3 | ''' 4 | from .layer import (conv3d, deconv3d, normalizationlayer, crop_and_concat, resnet_Add, weight_xavier_init, 5 | bias_variable) 6 | import tensorflow as tf 7 | 8 | 9 | def conv_bn_relu_drop(x, kernal, phase, drop, image_z=None, height=None, width=None, scope=None): 10 | """ 11 | :param x: 12 | :param kernal: 13 | :param phase: 14 | :param drop: 15 | :param image_z: 16 | :param height: 17 | :param width: 18 | :param scope: 19 | :return: 20 | """ 21 | with tf.name_scope(scope): 22 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3], 23 | n_outputs=kernal[-1], activefunction='relu', variable_name=scope + 'conv_W') 24 | B = bias_variable([kernal[-1]], variable_name=scope + 'conv_B') 25 | conv = conv3d(x, W) + B 26 | conv = normalizationlayer(conv, is_train=phase, height=height, width=width, image_z=image_z, norm_type='group', 27 | scope=scope) 28 | conv = tf.nn.dropout(tf.nn.relu(conv), drop) 29 | return conv 30 | 31 | 32 | def down_sampling(x, kernal, phase, drop, image_z=None, height=None, width=None, scope=None): 33 | with tf.name_scope(scope): 34 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3], 35 | n_outputs=kernal[-1], 36 | activefunction='relu', variable_name=scope + 'W') 37 | B = bias_variable([kernal[-1]], variable_name=scope + 'B') 38 | conv = conv3d(x, W, 2) + B 39 | conv = normalizationlayer(conv, is_train=phase, height=height, width=width, image_z=image_z, norm_type='group', 40 | scope=scope) 41 | conv = tf.nn.dropout(tf.nn.relu(conv), drop) 42 | return conv 43 | 44 | 45 | def deconv_relu(x, kernal, samefeture=False, scope=None): 46 | with tf.name_scope(scope): 47 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[-1], 48 | n_outputs=kernal[-2], activefunction='relu', variable_name=scope + 'W') 49 | B = bias_variable([kernal[-2]], variable_name=scope + 'B') 50 | conv = deconv3d(x, W, samefeture, True) + B 51 | conv = tf.nn.relu(conv) 52 | return conv 53 | 54 | 55 | def conv_sigmod(x, kernal, scope=None): 56 | with tf.name_scope(scope): 57 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3], 58 | n_outputs=kernal[-1], activefunction='sigomd', variable_name=scope + 'W') 59 | B = bias_variable([kernal[-1]], variable_name=scope + 'B') 60 | conv = conv3d(x, W) + B 61 | conv = tf.nn.sigmoid(conv) 62 | return conv 63 | 64 | 65 | def _createNestednet(X, image_z, image_width, image_height, image_channel, phase, drop, n_class=1): 66 | inputX = tf.reshape(X, [-1, image_z, image_width, image_height, image_channel]) # shape=(?, 32, 32, 1) 67 | # Vnet model 68 | # layer1->convolution 69 | layer0 = conv_bn_relu_drop(x=inputX, kernal=(3, 3, 3, image_channel, 16), phase=phase, drop=drop, 70 | scope='layer0') 71 | layer1 = conv_bn_relu_drop(x=layer0, kernal=(3, 3, 3, 16, 16), phase=phase, drop=drop, 72 | scope='layer1') 73 | layer1 = resnet_Add(x1=layer0, x2=layer1) 74 | # down sampling1 75 | down1 = down_sampling(x=layer1, kernal=(3, 3, 3, 16, 32), phase=phase, drop=drop, scope='down1') 76 | # layer2->convolution 77 | layer2 = conv_bn_relu_drop(x=down1, kernal=(3, 3, 3, 32, 32), phase=phase, drop=drop, 78 | scope='layer2_1') 79 | layer2 = conv_bn_relu_drop(x=layer2, kernal=(3, 3, 3, 32, 32), phase=phase, drop=drop, 80 | scope='layer2_2') 81 | layer2 = resnet_Add(x1=down1, x2=layer2) 82 | # down sampling2 83 | down2 = down_sampling(x=layer2, kernal=(3, 3, 3, 32, 64), phase=phase, drop=drop, scope='down2') 84 | # Nested block1 85 | deconv1_1 = deconv_relu(x=layer2, kernal=(3, 3, 3, 16, 32), scope='deconv1_1') 86 | layer1_1 = crop_and_concat(layer1, deconv1_1) 87 | _, Z, H, W, _ = layer1.get_shape().as_list() 88 | layer1_1 = conv_bn_relu_drop(x=layer1_1, kernal=(3, 3, 3, 32, 16), phase=phase, drop=drop, image_z=Z, 89 | height=H, width=W, scope='layer1_1_1') 90 | layer1_1 = conv_bn_relu_drop(x=layer1_1, kernal=(3, 3, 3, 16, 16), phase=phase, drop=drop, image_z=Z, 91 | height=H, width=W, scope='layer1_1_2') 92 | # layer3->convolution 93 | layer3 = conv_bn_relu_drop(x=down2, kernal=(3, 3, 3, 64, 64), phase=phase, drop=drop, scope='layer3_1') 94 | layer3 = conv_bn_relu_drop(x=layer3, kernal=(3, 3, 3, 64, 64), phase=phase, drop=drop, scope='layer3_2') 95 | layer3 = conv_bn_relu_drop(x=layer3, kernal=(3, 3, 3, 64, 64), phase=phase, drop=drop, scope='layer3_3') 96 | layer3 = resnet_Add(x1=down2, x2=layer3) 97 | # down sampling3 98 | down3 = down_sampling(x=layer3, kernal=(3, 3, 3, 64, 128), phase=phase, drop=drop, scope='down3') 99 | # Nested block2 100 | deconv2_1 = deconv_relu(x=layer3, kernal=(3, 3, 3, 32, 64), scope='deconv2_1') 101 | layer2_1 = crop_and_concat(layer2, deconv2_1) 102 | _, Z, H, W, _ = layer2.get_shape().as_list() 103 | layer2_1 = conv_bn_relu_drop(x=layer2_1, kernal=(3, 3, 3, 64, 32), phase=phase, drop=drop, image_z=Z, 104 | height=H, width=W, scope='layer2_1_1') 105 | layer2_1 = conv_bn_relu_drop(x=layer2_1, kernal=(3, 3, 3, 32, 32), phase=phase, drop=drop, image_z=Z, 106 | height=H, width=W, scope='layer2_1_2') 107 | # Nested block3 108 | deconv1_2 = deconv_relu(x=layer2_1, kernal=(3, 3, 3, 16, 32), scope='deconv1_2') 109 | layer1_2 = crop_and_concat(layer1_1, deconv1_2) 110 | layer1_2 = crop_and_concat(layer1_2, layer1) 111 | _, Z, H, W, _ = layer1.get_shape().as_list() 112 | layer1_2 = conv_bn_relu_drop(x=layer1_2, kernal=(3, 3, 3, 16 * 3, 16), phase=phase, drop=drop, image_z=Z, 113 | height=H, width=W, scope='layer1_2_1') 114 | layer1_2 = conv_bn_relu_drop(x=layer1_2, kernal=(3, 3, 3, 16, 16), phase=phase, drop=drop, image_z=Z, 115 | height=H, width=W, scope='layer1_2_2') 116 | # layer4->convolution 117 | layer4 = conv_bn_relu_drop(x=down3, kernal=(3, 3, 3, 128, 128), phase=phase, drop=drop, scope='layer4_1') 118 | layer4 = conv_bn_relu_drop(x=layer4, kernal=(3, 3, 3, 128, 128), phase=phase, drop=drop, scope='layer4_2') 119 | layer4 = conv_bn_relu_drop(x=layer4, kernal=(3, 3, 3, 128, 128), phase=phase, drop=drop, scope='layer4_3') 120 | layer4 = resnet_Add(x1=down3, x2=layer4) 121 | # down sampling4 122 | down4 = down_sampling(x=layer4, kernal=(3, 3, 3, 128, 256), phase=phase, drop=drop, scope='down4') 123 | # Nested block4 124 | deconv3_1 = deconv_relu(x=layer4, kernal=(3, 3, 3, 64, 128), scope='deconv3_1') 125 | layer3_1 = crop_and_concat(layer3, deconv3_1) 126 | _, Z, H, W, _ = layer3.get_shape().as_list() 127 | layer3_1 = conv_bn_relu_drop(x=layer3_1, kernal=(3, 3, 3, 128, 64), phase=phase, drop=drop, image_z=Z, 128 | height=H, width=W, scope='layer3_1_1') 129 | layer3_1 = conv_bn_relu_drop(x=layer3_1, kernal=(3, 3, 3, 64, 64), phase=phase, drop=drop, image_z=Z, 130 | height=H, width=W, scope='layer3_1_2') 131 | # Nested block5 132 | deconv2_2 = deconv_relu(x=layer3_1, kernal=(3, 3, 3, 32, 64), scope='deconv2_2') 133 | layer2_2 = crop_and_concat(layer2_1, deconv2_2) 134 | layer2_2 = crop_and_concat(layer2_2, layer2) 135 | _, Z, H, W, _ = layer2.get_shape().as_list() 136 | layer2_2 = conv_bn_relu_drop(x=layer2_2, kernal=(3, 3, 3, 32 * 3, 32), phase=phase, drop=drop, image_z=Z, 137 | height=H, width=W, scope='layer2_2_1') 138 | layer2_2 = conv_bn_relu_drop(x=layer2_2, kernal=(3, 3, 3, 32, 32), phase=phase, drop=drop, image_z=Z, 139 | height=H, width=W, scope='layer2_2_2') 140 | # Nested block6 141 | deconv1_3 = deconv_relu(x=layer2_2, kernal=(3, 3, 3, 16, 32), scope='deconv1_3') 142 | layer1_3 = crop_and_concat(layer1_2, deconv1_3) 143 | layer1_3 = crop_and_concat(layer1_3, layer1_1) 144 | layer1_3 = crop_and_concat(layer1_3, layer1) 145 | _, Z, H, W, _ = layer1.get_shape().as_list() 146 | layer1_3 = conv_bn_relu_drop(x=layer1_3, kernal=(3, 3, 3, 16 * 4, 16), phase=phase, drop=drop, image_z=Z, 147 | height=H, width=W, scope='layer1_3_1') 148 | layer1_3 = conv_bn_relu_drop(x=layer1_3, kernal=(3, 3, 3, 16, 16), phase=phase, drop=drop, image_z=Z, 149 | height=H, width=W, scope='layer1_3_2') 150 | # layer5->convolution 151 | layer5 = conv_bn_relu_drop(x=down4, kernal=(3, 3, 3, 256, 256), phase=phase, drop=drop, 152 | scope='layer5_1') 153 | layer5 = conv_bn_relu_drop(x=layer5, kernal=(3, 3, 3, 256, 256), phase=phase, drop=drop, 154 | scope='layer5_2') 155 | layer5 = conv_bn_relu_drop(x=layer5, kernal=(3, 3, 3, 256, 256), phase=phase, drop=drop, scope='layer5_3') 156 | layer5 = resnet_Add(x1=down4, x2=layer5) 157 | # Nested block7 158 | deconv4_1 = deconv_relu(x=layer5, kernal=(3, 3, 3, 128, 256), scope='deconv4_1') 159 | layer4_1 = crop_and_concat(layer4, deconv4_1) 160 | _, Z, H, W, _ = layer4.get_shape().as_list() 161 | layer4_1 = conv_bn_relu_drop(x=layer4_1, kernal=(3, 3, 3, 256, 128), phase=phase, drop=drop, image_z=Z, 162 | height=H, width=W, scope='layer4_1_1') 163 | layer4_1 = conv_bn_relu_drop(x=layer4_1, kernal=(3, 3, 3, 128, 128), phase=phase, drop=drop, image_z=Z, 164 | height=H, width=W, scope='layer4_1_2') 165 | # Nested block8 166 | deconv3_2 = deconv_relu(x=layer4_1, kernal=(3, 3, 3, 64, 128), scope='deconv3_2') 167 | layer3_2 = crop_and_concat(layer3_1, deconv3_2) 168 | layer3_2 = crop_and_concat(layer3_2, layer3) 169 | _, Z, H, W, _ = layer3.get_shape().as_list() 170 | layer3_2 = conv_bn_relu_drop(x=layer3_2, kernal=(3, 3, 3, 64 * 3, 64), phase=phase, drop=drop, image_z=Z, 171 | height=H, width=W, scope='layer3_2_1') 172 | layer3_2 = conv_bn_relu_drop(x=layer3_2, kernal=(3, 3, 3, 64, 64), phase=phase, drop=drop, image_z=Z, 173 | height=H, width=W, scope='layer3_2_2') 174 | # Nested block9 175 | deconv2_3 = deconv_relu(x=layer3_2, kernal=(3, 3, 3, 32, 64), scope='deconv2_3') 176 | layer2_3 = crop_and_concat(layer2_2, deconv2_3) 177 | layer2_3 = crop_and_concat(layer2_3, layer2_1) 178 | layer2_3 = crop_and_concat(layer2_3, layer2) 179 | _, Z, H, W, _ = layer2.get_shape().as_list() 180 | layer2_3 = conv_bn_relu_drop(x=layer2_3, kernal=(3, 3, 3, 32 * 4, 32), phase=phase, drop=drop, image_z=Z, 181 | height=H, width=W, scope='layer2_3_1') 182 | layer2_3 = conv_bn_relu_drop(x=layer2_3, kernal=(3, 3, 3, 32, 32), phase=phase, drop=drop, image_z=Z, 183 | height=H, width=W, scope='layer2_3_2') 184 | # Nested block10 185 | deconv1_4 = deconv_relu(x=layer2_3, kernal=(3, 3, 3, 16, 32), scope='deconv1_4') 186 | layer1_4 = crop_and_concat(layer1_3, deconv1_4) 187 | layer1_4 = crop_and_concat(layer1_4, layer1_2) 188 | layer1_4 = crop_and_concat(layer1_4, layer1_1) 189 | layer1_4 = crop_and_concat(layer1_4, layer1) 190 | _, Z, H, W, _ = layer1.get_shape().as_list() 191 | layer1_4 = conv_bn_relu_drop(x=layer1_4, kernal=(3, 3, 3, 16 * 5, 16), phase=phase, drop=drop, image_z=Z, 192 | height=H, width=W, scope='layer1_4_1') 193 | layer1_4 = conv_bn_relu_drop(x=layer1_4, kernal=(3, 3, 3, 16, 16), phase=phase, drop=drop, image_z=Z, 194 | height=H, width=W, scope='layer1_4_2') 195 | # layer14->output 196 | output_map1 = conv_sigmod(x=layer1_1, kernal=(1, 1, 1, 16, n_class), scope='output1') 197 | output_map2 = conv_sigmod(x=layer1_2, kernal=(1, 1, 1, 16, n_class), scope='output2') 198 | output_map3 = conv_sigmod(x=layer1_3, kernal=(1, 1, 1, 16, n_class), scope='output3') 199 | output_map4 = conv_sigmod(x=layer1_4, kernal=(1, 1, 1, 16, n_class), scope='output4') 200 | return output_map1, output_map2, output_map3, output_map4 201 | -------------------------------------------------------------------------------- /VnetFamily/ETVnet/model_ETvnet3d.py: -------------------------------------------------------------------------------- 1 | ''' 2 | 3 | ''' 4 | from .layer import (conv3d, deconv3d, upsample3d, normalizationlayer, crop_and_concat, resnet_Add, 5 | weight_xavier_init, bias_variable, save_images) 6 | 7 | 8 | 9 | def conv_bn_relu_drop(x, kernal, phase, drop, image_z=None, height=None, width=None, scope=None): 10 | with tf.name_scope(scope): 11 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3], 12 | n_outputs=kernal[-1], activefunction='relu', variable_name=scope + 'conv_W') 13 | B = bias_variable([kernal[-1]], variable_name=scope + 'conv_B') 14 | conv = conv3d(x, W) + B 15 | conv = normalizationlayer(conv, is_train=phase, height=height, width=width, image_z=image_z, norm_type='group', 16 | G=20, scope=scope) 17 | conv = tf.nn.dropout(tf.nn.relu(conv), drop) 18 | return conv 19 | 20 | 21 | def down_sampling(x, kernal, phase, drop, image_z=None, height=None, width=None, scope=None): 22 | with tf.name_scope(scope): 23 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3], 24 | n_outputs=kernal[-1], 25 | activefunction='relu', variable_name=scope + 'W') 26 | B = bias_variable([kernal[-1]], variable_name=scope + 'B') 27 | conv = conv3d(x, W, 2) + B 28 | conv = normalizationlayer(conv, is_train=phase, height=height, width=width, image_z=image_z, norm_type='group', 29 | G=20, scope=scope) 30 | conv = tf.nn.dropout(tf.nn.relu(conv), drop) 31 | return conv 32 | 33 | 34 | def full_connected_relu(x, kernal, activefunction='relu', scope=None): 35 | with tf.name_scope(scope): 36 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1], 37 | n_outputs=kernal[-1], activefunction='relu', variable_name=scope + 'W') 38 | B = bias_variable([kernal[-1]], variable_name=scope + 'B') 39 | FC = tf.matmul(x, W) + B 40 | if activefunction == 'relu': 41 | FC = tf.nn.relu(FC) 42 | elif activefunction == 'softmax': 43 | FC = tf.nn.softmax(FC) 44 | elif activefunction == 'sigmoid': 45 | FC = tf.nn.sigmoid(FC) 46 | return FC 47 | 48 | 49 | def deconv_relu(x, kernal, samefeture=False, scope=None): 50 | with tf.name_scope(scope): 51 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[-1], 52 | n_outputs=kernal[-2], activefunction='relu', variable_name=scope + 'W') 53 | B = bias_variable([kernal[-2]], variable_name=scope + 'B') 54 | conv = deconv3d(x, W, samefeture, True) + B 55 | conv = tf.nn.relu(conv) 56 | return conv 57 | 58 | 59 | 60 | def conv_active(x, kernal, active=None, scope=None): 61 | with tf.name_scope(scope): 62 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3], 63 | n_outputs=kernal[-1], activefunction='relu', variable_name=scope + 'W') 64 | B = bias_variable([kernal[-1]], variable_name=scope + 'B') 65 | conv = conv3d(x, W) + B 66 | if active == 'relu': 67 | conv = tf.nn.softmax(conv) 68 | if active == 'sigmod': 69 | conv = tf.nn.sigmoid(conv) 70 | return conv 71 | 72 | 73 | def Spatial_squeeze_Channel_excitation_layer(x, out_dim, ratio=4, scope=None): 74 | with tf.name_scope(scope): 75 | # Global_Average_Pooling,channel_squeeze 76 | squeeze = tf.reduce_mean(x, axis=(1, 2, 3), name=scope + 'channel_squeeze') 77 | # full_connect 78 | excitation = full_connected_relu(squeeze, kernal=(out_dim, out_dim // ratio), activefunction='relu', 79 | scope=scope + '_fully_connected1') 80 | excitation = full_connected_relu(excitation, kernal=(out_dim // ratio, out_dim), 81 | activefunction='sigmoid', scope=scope + '_fully_connected2') 82 | # scale the x 83 | excitation = tf.reshape(excitation, [-1, 1, 1, 1, out_dim]) 84 | scale = x * excitation 85 | return scale 86 | 87 | 88 | def weighted_aggregation_model(x1, x2, x3, x4, Channel, scope=None): 89 | """ 90 | weighted_aggregation_model 91 | :param x1: 92 | :param x2: 93 | :param x3: 94 | :param x4: 95 | :param Channel: 96 | :param scope: 97 | :return: 98 | """ 99 | with tf.name_scope(scope): 100 | wb1 = Spatial_squeeze_Channel_excitation_layer(x1, Channel, scope=scope + 'wb1') 101 | wb1 = conv_active(wb1, kernal=(1, 1, 1, Channel, Channel // 2), active='relu', scope=scope + 'layer1') 102 | wb1 = upsample3d(wb1, 2, scope=scope + 'up1') 103 | wb2 = Spatial_squeeze_Channel_excitation_layer(x2, Channel // 2, scope=scope + 'wb2') 104 | wb2 = resnet_Add(wb1, wb2) 105 | wb2 = conv_active(wb2, kernal=(1, 1, 1, Channel // 2, Channel // 4), active='relu', scope=scope + 'layer2') 106 | wb2 = upsample3d(wb2, 2, scope=scope + 'up2') 107 | wb3 = Spatial_squeeze_Channel_excitation_layer(x3, Channel // 4, scope=scope + 'wb3') 108 | wb3 = resnet_Add(wb3, wb2) 109 | wb3 = conv_active(wb3, kernal=(1, 1, 1, Channel // 4, Channel // 8), active='relu', scope=scope + 'layer3') 110 | wb3 = upsample3d(wb3, 2, scope=scope + 'up3') 111 | wb4 = Spatial_squeeze_Channel_excitation_layer(x4, Channel // 8, scope=scope + 'wb4') 112 | wb4 = resnet_Add(wb3, wb4) 113 | return wb4 114 | 115 | 116 | def edge_guidance_model(x1, x2, scope=None): 117 | """ 118 | edge_guidance_model 119 | :param x1: 120 | :param x2: 121 | :param scope: 122 | :return: 123 | """ 124 | with tf.name_scope(scope): 125 | C1 = x1.get_shape().as_list()[4] 126 | layer1 = conv_active(x1, kernal=(1, 1, 1, C1, C1), active='relu', scope=scope + 'layer1_0') 127 | layer1 = conv_active(layer1, kernal=(3, 3, 3, C1, C1), active='relu', scope=scope + 'layer1_1') 128 | 129 | C2 = x2.get_shape().as_list()[4] 130 | layer2 = upsample3d(x2, scale_factor=2, scope=scope + 'up1') 131 | layer2 = conv_active(layer2, kernal=(1, 1, 1, C2, C2), active='relu', scope=scope + 'layer2_0') 132 | layer2 = conv_active(layer2, kernal=(3, 3, 3, C2, C2), active='relu', scope=scope + 'layer2_1') 133 | 134 | egm = crop_and_concat(layer1, layer2) 135 | 136 | C = C1 + C2 137 | egm = conv_active(egm, kernal=(1, 1, 1, C, C), scope=scope + 'layer3') 138 | return egm 139 | 140 | 141 | def _create_etconv_net(X, image_z, image_width, image_height, image_channel, phase, drop, n_class=2): 142 | inputX = tf.reshape(X, [-1, image_z, image_width, image_height, image_channel]) # shape=(?, 32, 32, 1) 143 | # Vnet model 144 | # layer1->convolution 145 | layer0 = conv_bn_relu_drop(x=inputX, kernal=(3, 3, 3, image_channel, 20), phase=phase, drop=drop, 146 | scope='layer0') 147 | layer1 = conv_bn_relu_drop(x=layer0, kernal=(3, 3, 3, 20, 20), phase=phase, drop=drop, 148 | scope='layer1') 149 | layer1 = resnet_Add(x1=layer0, x2=layer1) 150 | # down sampling1 151 | down1 = down_sampling(x=layer1, kernal=(3, 3, 3, 20, 40), phase=phase, drop=drop, scope='down1') 152 | # layer2->convolution 153 | layer2 = conv_bn_relu_drop(x=down1, kernal=(3, 3, 3, 40, 40), phase=phase, drop=drop, 154 | scope='layer2_1') 155 | layer2 = conv_bn_relu_drop(x=layer2, kernal=(3, 3, 3, 40, 40), phase=phase, drop=drop, 156 | scope='layer2_2') 157 | layer2 = resnet_Add(x1=down1, x2=layer2) 158 | # down sampling2 159 | down2 = down_sampling(x=layer2, kernal=(3, 3, 3, 40, 80), phase=phase, drop=drop, scope='down2') 160 | # layer3->convolution 161 | layer3 = conv_bn_relu_drop(x=down2, kernal=(3, 3, 3, 80, 80), phase=phase, drop=drop, 162 | scope='layer3_1') 163 | layer3 = conv_bn_relu_drop(x=layer3, kernal=(3, 3, 3, 80, 80), phase=phase, drop=drop, 164 | scope='layer3_2') 165 | layer3 = conv_bn_relu_drop(x=layer3, kernal=(3, 3, 3, 80, 80), phase=phase, drop=drop, 166 | scope='layer3_3') 167 | layer3 = resnet_Add(x1=down2, x2=layer3) 168 | # down sampling3 169 | down3 = down_sampling(x=layer3, kernal=(3, 3, 3, 80, 160), phase=phase, drop=drop, scope='down3') 170 | # layer4->convolution 171 | layer4 = conv_bn_relu_drop(x=down3, kernal=(3, 3, 3, 160, 160), phase=phase, drop=drop, 172 | scope='layer4_1') 173 | layer4 = conv_bn_relu_drop(x=layer4, kernal=(3, 3, 3, 160, 160), phase=phase, drop=drop, 174 | scope='layer4_2') 175 | layer4 = conv_bn_relu_drop(x=layer4, kernal=(3, 3, 3, 160, 160), phase=phase, drop=drop, 176 | scope='layer4_3') 177 | layer4 = resnet_Add(x1=down3, x2=layer4) 178 | # down sampling4 179 | down4 = down_sampling(x=layer4, kernal=(3, 3, 3, 160, 320), phase=phase, drop=drop, scope='down4') 180 | # layer5->convolution 181 | layer5 = conv_bn_relu_drop(x=down4, kernal=(3, 3, 3, 320, 320), phase=phase, drop=drop, 182 | scope='layer5_1') 183 | layer5 = conv_bn_relu_drop(x=layer5, kernal=(3, 3, 3, 320, 320), phase=phase, drop=drop, 184 | scope='layer5_2') 185 | layer5 = conv_bn_relu_drop(x=layer5, kernal=(3, 3, 3, 320, 320), phase=phase, drop=drop, 186 | scope='layer5_3') 187 | layer5 = resnet_Add(x1=down4, x2=layer5) 188 | 189 | # layer6->deconvolution 190 | deconv1 = deconv_relu(x=layer5, kernal=(3, 3, 3, 160, 320), scope='deconv1') 191 | # layer7->convolution 192 | layer6 = crop_and_concat(layer4, deconv1) 193 | _, Z, H, W, _ = layer4.get_shape().as_list() 194 | layer6 = conv_bn_relu_drop(x=layer6, kernal=(3, 3, 3, 320, 160), image_z=Z, height=H, width=W, phase=phase, 195 | drop=drop, scope='layer6_1') 196 | layer6 = conv_bn_relu_drop(x=layer6, kernal=(3, 3, 3, 160, 160), image_z=Z, height=H, width=W, phase=phase, 197 | drop=drop, scope='layer6_2') 198 | layer6 = conv_bn_relu_drop(x=layer6, kernal=(3, 3, 3, 160, 160), image_z=Z, height=H, width=W, phase=phase, 199 | drop=drop, scope='layer6_3') 200 | layer6 = resnet_Add(x1=deconv1, x2=layer6) 201 | # layer8->deconvolution 202 | deconv2 = deconv_relu(x=layer6, kernal=(3, 3, 3, 80, 160), scope='deconv2') 203 | # layer9->convolution 204 | layer7 = crop_and_concat(layer3, deconv2) 205 | _, Z, H, W, _ = layer3.get_shape().as_list() 206 | layer7 = conv_bn_relu_drop(x=layer7, kernal=(3, 3, 3, 160, 80), image_z=Z, height=H, width=W, phase=phase, 207 | drop=drop, scope='layer7_1') 208 | layer7 = conv_bn_relu_drop(x=layer7, kernal=(3, 3, 3, 80, 80), image_z=Z, height=H, width=W, phase=phase, 209 | drop=drop, scope='layer7_2') 210 | layer7 = conv_bn_relu_drop(x=layer7, kernal=(3, 3, 3, 80, 80), image_z=Z, height=H, width=W, phase=phase, 211 | drop=drop, scope='layer7_3') 212 | layer7 = resnet_Add(x1=deconv2, x2=layer7) 213 | # layer10->deconvolution 214 | deconv3 = deconv_relu(x=layer7, kernal=(3, 3, 3, 40, 80), scope='deconv3') 215 | # layer11->convolution 216 | layer8 = crop_and_concat(layer2, deconv3) 217 | _, Z, H, W, _ = layer2.get_shape().as_list() 218 | layer8 = conv_bn_relu_drop(x=layer8, kernal=(3, 3, 3, 80, 40), image_z=Z, height=H, width=W, phase=phase, 219 | drop=drop, scope='layer8_1') 220 | layer8 = conv_bn_relu_drop(x=layer8, kernal=(3, 3, 3, 40, 40), image_z=Z, height=H, width=W, phase=phase, 221 | drop=drop, scope='layer8_2') 222 | layer8 = conv_bn_relu_drop(x=layer8, kernal=(3, 3, 3, 40, 40), image_z=Z, height=H, width=W, phase=phase, 223 | drop=drop, scope='layer8_3') 224 | layer8 = resnet_Add(x1=deconv3, x2=layer8) 225 | # layer12->deconvolution 226 | deconv4 = deconv_relu(x=layer8, kernal=(3, 3, 3, 20, 40), scope='deconv4') 227 | # layer13->convolution 228 | layer9 = crop_and_concat(layer1, deconv4) 229 | _, Z, H, W, _ = layer1.get_shape().as_list() 230 | layer9 = conv_bn_relu_drop(x=layer9, kernal=(3, 3, 3, 40, 20), image_z=Z, height=H, width=W, phase=phase, 231 | drop=drop, scope='layer9_1') 232 | layer9 = conv_bn_relu_drop(x=layer9, kernal=(3, 3, 3, 20, 20), image_z=Z, height=H, width=W, phase=phase, 233 | drop=drop, scope='layer9_2') 234 | layer9 = conv_bn_relu_drop(x=layer9, kernal=(3, 3, 3, 20, 20), image_z=Z, height=H, width=W, phase=phase, 235 | drop=drop, scope='layer9_3') 236 | layer9 = resnet_Add(x1=deconv4, x2=layer9) 237 | # layer14->edge_guidance_model 238 | egm_output = edge_guidance_model(x1=layer1, x2=layer2, scope='edm') 239 | _, _, _, _, egm_output_C = egm_output.get_shape().as_list() 240 | # layer15->weighted_aggregation_model 241 | wam_output = weighted_aggregation_model(x1=layer6, x2=layer7, x3=layer8, x4=layer9, Channel=160, scope='wam') 242 | # # layer16->output 243 | fusion_output = crop_and_concat(x1=egm_output, x2=wam_output) 244 | output_map = conv_active(x=fusion_output, kernal=(1, 1, 1, 20 + egm_output_C, n_class),active='sigmod', scope='output') 245 | return output_map -------------------------------------------------------------------------------- /VnetFamily/AttentionGatedVnet/model_attention_vnet3d.py: -------------------------------------------------------------------------------- 1 | ''' 2 | 3 | ''' 4 | from Vnet.layer import (conv3d, deconv3d, normalizationlayer, crop_and_concat, resnet_Add, upsample3d, 5 | weight_xavier_init, bias_variable, save_images) 6 | import tensorflow as tf 7 | import numpy as np 8 | import os 9 | 10 | 11 | def gatingsignal3d(x, kernal, phase, image_z=None, height=None, width=None, scope=None): 12 | """this is simply 1x1x1 convolution, bn, activation,Gating Signal(Query) 13 | :param x: 14 | :param kernal:(1,1,1,inputfilters,outputfilters) 15 | :param phase: 16 | :param drop: 17 | :param image_z: 18 | :param height: 19 | :param width: 20 | :param scope: 21 | :return: 22 | """ 23 | with tf.name_scope(scope): 24 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3], 25 | n_outputs=kernal[-1], activefunction='relu', variable_name=scope + 'conv_W') 26 | B = bias_variable([kernal[-1]], variable_name=scope + 'conv_B') 27 | conv = conv3d(x, W) + B 28 | conv = normalizationlayer(conv, is_train=phase, height=height, width=width, image_z=image_z, norm_type='group', 29 | scope=scope) 30 | conv = tf.nn.relu(conv) 31 | return conv 32 | 33 | 34 | def attngatingblock(x, g, inputfilters, outfilters, scale_factor, phase, image_z=None, height=None, width=None, 35 | scope=None): 36 | """ 37 | take g which is the spatially smaller signal, do a conv to get the same number of feature channels as x (bigger spatially) 38 | do a conv on x to also get same feature channels (theta_x) 39 | then, upsample g to be same size as x add x and g (concat_xg) relu, 1x1x1 conv, then sigmoid then upsample the final - 40 | this gives us attn coefficients 41 | :param x: 42 | :param g: 43 | :param inputfilters: 44 | :param outfilters: 45 | :param scale_factor:2 46 | :param scope: 47 | :return: 48 | """ 49 | with tf.name_scope(scope): 50 | kernalx = (1, 1, 1, inputfilters, outfilters) 51 | Wx = weight_xavier_init(shape=kernalx, n_inputs=kernalx[0] * kernalx[1] * kernalx[2] * kernalx[3], 52 | n_outputs=kernalx[-1], activefunction='relu', variable_name=scope + 'conv_Wx') 53 | Bx = bias_variable([kernalx[-1]], variable_name=scope + 'conv_Bx') 54 | theta_x = conv3d(x, Wx, scale_factor) + Bx 55 | kernalg = (1, 1, 1, inputfilters, outfilters) 56 | Wg = weight_xavier_init(shape=kernalg, n_inputs=kernalg[0] * kernalg[1] * kernalg[2] * kernalg[3], 57 | n_outputs=kernalg[-1], activefunction='relu', variable_name=scope + 'conv_Wg') 58 | Bg = bias_variable([kernalg[-1]], variable_name=scope + 'conv_Bg') 59 | phi_g = conv3d(g, Wg) + Bg 60 | 61 | add_xg = resnet_Add(theta_x, phi_g) 62 | act_xg = tf.nn.relu(add_xg) 63 | 64 | kernalpsi = (1, 1, 1, outfilters, 1) 65 | Wpsi = weight_xavier_init(shape=kernalpsi, n_inputs=kernalpsi[0] * kernalpsi[1] * kernalpsi[2] * kernalpsi[3], 66 | n_outputs=kernalpsi[-1], activefunction='relu', variable_name=scope + 'conv_Wpsi') 67 | Bpsi = bias_variable([kernalpsi[-1]], variable_name=scope + 'conv_Bpsi') 68 | psi = conv3d(act_xg, Wpsi) + Bpsi 69 | sigmoid_psi = tf.nn.sigmoid(psi) 70 | 71 | upsample_psi = upsample3d(sigmoid_psi, scale_factor=scale_factor, scope=scope + "resampler") 72 | 73 | # Attention: upsample_psi * x 74 | # upsample_psi = layers.Lambda(lambda x, repnum: K.repeat_elements(x, repnum, axis=4), 75 | # arguments={'repnum': outfilters})(upsample_psi) 76 | gat_x = tf.multiply(upsample_psi, x) 77 | kernal_gat_x = (1, 1, 1, outfilters, outfilters) 78 | Wgatx = weight_xavier_init(shape=kernal_gat_x, 79 | n_inputs=kernal_gat_x[0] * kernal_gat_x[1] * kernal_gat_x[2] * kernal_gat_x[3], 80 | n_outputs=kernal_gat_x[-1], activefunction='relu', 81 | variable_name=scope + 'conv_Wgatx') 82 | Bgatx = bias_variable([kernalpsi[-1]], variable_name=scope + 'conv_Bgatx') 83 | gat_x_out = conv3d(gat_x, Wgatx) + Bgatx 84 | gat_x_out = normalizationlayer(gat_x_out, is_train=phase, height=height, width=width, image_z=image_z, 85 | norm_type='group', scope=scope) 86 | return gat_x_out 87 | 88 | 89 | def conv_bn_relu_drop(x, kernal, phase, drop, image_z=None, height=None, width=None, scope=None): 90 | """ 91 | :param x: 92 | :param kernal: 93 | :param phase: 94 | :param drop: 95 | :param image_z: 96 | :param height: 97 | :param width: 98 | :param scope: 99 | :return: 100 | """ 101 | with tf.name_scope(scope): 102 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3], 103 | n_outputs=kernal[-1], activefunction='relu', variable_name=scope + 'conv_W') 104 | B = bias_variable([kernal[-1]], variable_name=scope + 'conv_B') 105 | conv = conv3d(x, W) + B 106 | conv = normalizationlayer(conv, is_train=phase, height=height, width=width, image_z=image_z, norm_type='group', 107 | scope=scope) 108 | conv = tf.nn.dropout(tf.nn.relu(conv), drop) 109 | return conv 110 | 111 | 112 | def down_sampling(x, kernal, phase, drop, image_z=None, height=None, width=None, scope=None): 113 | with tf.name_scope(scope): 114 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3], 115 | n_outputs=kernal[-1], 116 | activefunction='relu', variable_name=scope + 'W') 117 | B = bias_variable([kernal[-1]], variable_name=scope + 'B') 118 | conv = conv3d(x, W, 2) + B 119 | conv = normalizationlayer(conv, is_train=phase, height=height, width=width, image_z=image_z, norm_type='group', 120 | scope=scope) 121 | conv = tf.nn.dropout(tf.nn.relu(conv), drop) 122 | return conv 123 | 124 | 125 | def deconv_relu(x, kernal, samefeture=False, scope=None): 126 | with tf.name_scope(scope): 127 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[-1], 128 | n_outputs=kernal[-2], activefunction='relu', variable_name=scope + 'W') 129 | B = bias_variable([kernal[-2]], variable_name=scope + 'B') 130 | conv = deconv3d(x, W, samefeture, True) + B 131 | conv = tf.nn.relu(conv) 132 | return conv 133 | 134 | 135 | def conv_sigmod(x, kernal, scope=None): 136 | with tf.name_scope(scope): 137 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3], 138 | n_outputs=kernal[-1], activefunction='sigomd', variable_name=scope + 'W') 139 | B = bias_variable([kernal[-1]], variable_name=scope + 'B') 140 | conv = conv3d(x, W) + B 141 | conv = tf.nn.sigmoid(conv) 142 | return conv 143 | 144 | 145 | def _createattentionnet(X, image_z, image_width, image_height, image_channel, phase, drop, n_class=1): 146 | inputX = tf.reshape(X, [-1, image_z, image_width, image_height, image_channel]) # shape=(?, 32, 32, 1) 147 | # Vnet model 148 | # layer1->convolution 149 | layer0 = conv_bn_relu_drop(x=inputX, kernal=(3, 3, 3, image_channel, 16), phase=phase, drop=drop, 150 | scope='layer0') 151 | layer1 = conv_bn_relu_drop(x=layer0, kernal=(3, 3, 3, 16, 16), phase=phase, drop=drop, 152 | scope='layer1') 153 | layer1 = resnet_Add(x1=layer0, x2=layer1) 154 | # down sampling1 155 | down1 = down_sampling(x=layer1, kernal=(3, 3, 3, 16, 32), phase=phase, drop=drop, scope='down1') 156 | # layer2->convolution 157 | layer2 = conv_bn_relu_drop(x=down1, kernal=(3, 3, 3, 32, 32), phase=phase, drop=drop, 158 | scope='layer2_1') 159 | layer2 = conv_bn_relu_drop(x=layer2, kernal=(3, 3, 3, 32, 32), phase=phase, drop=drop, 160 | scope='layer2_2') 161 | layer2 = resnet_Add(x1=down1, x2=layer2) 162 | # down sampling2 163 | down2 = down_sampling(x=layer2, kernal=(3, 3, 3, 32, 64), phase=phase, drop=drop, scope='down2') 164 | # layer3->convolution 165 | layer3 = conv_bn_relu_drop(x=down2, kernal=(3, 3, 3, 64, 64), phase=phase, drop=drop, 166 | scope='layer3_1') 167 | layer3 = conv_bn_relu_drop(x=layer3, kernal=(3, 3, 3, 64, 64), phase=phase, drop=drop, 168 | scope='layer3_2') 169 | layer3 = conv_bn_relu_drop(x=layer3, kernal=(3, 3, 3, 64, 64), phase=phase, drop=drop, 170 | scope='layer3_3') 171 | layer3 = resnet_Add(x1=down2, x2=layer3) 172 | # down sampling3 173 | down3 = down_sampling(x=layer3, kernal=(3, 3, 3, 64, 128), phase=phase, drop=drop, scope='down3') 174 | # layer4->convolution 175 | layer4 = conv_bn_relu_drop(x=down3, kernal=(3, 3, 3, 128, 128), phase=phase, drop=drop, 176 | scope='layer4_1') 177 | layer4 = conv_bn_relu_drop(x=layer4, kernal=(3, 3, 3, 128, 128), phase=phase, drop=drop, 178 | scope='layer4_2') 179 | layer4 = conv_bn_relu_drop(x=layer4, kernal=(3, 3, 3, 128, 128), phase=phase, drop=drop, 180 | scope='layer4_3') 181 | layer4 = resnet_Add(x1=down3, x2=layer4) 182 | # down sampling4 183 | down4 = down_sampling(x=layer4, kernal=(3, 3, 3, 128, 256), phase=phase, drop=drop, scope='down4') 184 | # layer5->convolution 185 | layer5 = conv_bn_relu_drop(x=down4, kernal=(3, 3, 3, 256, 256), phase=phase, drop=drop, 186 | scope='layer5_1') 187 | layer5 = conv_bn_relu_drop(x=layer5, kernal=(3, 3, 3, 256, 256), phase=phase, drop=drop, 188 | scope='layer5_2') 189 | layer5 = conv_bn_relu_drop(x=layer5, kernal=(3, 3, 3, 256, 256), phase=phase, drop=drop, 190 | scope='layer5_3') 191 | layer5 = resnet_Add(x1=down4, x2=layer5) 192 | 193 | # layer9->attngating 194 | g1 = gatingsignal3d(layer5, kernal=(1, 1, 1, 256, 128), phase=phase, scope='g1') 195 | attn1 = attngatingblock(layer4, g1, 128, 128, scale_factor=2, phase=phase, scope='attn1') 196 | # layer9->deconvolution 197 | deconv1 = deconv_relu(x=layer5, kernal=(3, 3, 3, 128, 256), scope='deconv1') 198 | # layer8->convolution 199 | layer6 = crop_and_concat(attn1, deconv1) 200 | _, Z, H, W, _ = attn1.get_shape().as_list() 201 | layer6 = conv_bn_relu_drop(x=layer6, kernal=(3, 3, 3, 256, 128), image_z=Z, height=H, width=W, phase=phase, 202 | drop=drop, scope='layer6_1') 203 | layer6 = conv_bn_relu_drop(x=layer6, kernal=(3, 3, 3, 128, 128), image_z=Z, height=H, width=W, phase=phase, 204 | drop=drop, scope='layer6_2') 205 | layer6 = conv_bn_relu_drop(x=layer6, kernal=(3, 3, 3, 128, 128), image_z=Z, height=H, width=W, phase=phase, 206 | drop=drop, scope='layer6_3') 207 | layer6 = resnet_Add(x1=deconv1, x2=layer6) 208 | # layer9->attngating 209 | g2 = gatingsignal3d(layer6, kernal=(1, 1, 1, 128, 64), phase=phase, scope='g2') 210 | attn2 = attngatingblock(layer3, g2, 64, 64, scale_factor=2, phase=phase, scope='attn2') 211 | # layer9->deconvolution 212 | deconv2 = deconv_relu(x=layer6, kernal=(3, 3, 3, 64, 128), scope='deconv2') 213 | # layer8->convolution 214 | layer7 = crop_and_concat(attn2, deconv2) 215 | _, Z, H, W, _ = attn2.get_shape().as_list() 216 | layer7 = conv_bn_relu_drop(x=layer7, kernal=(3, 3, 3, 128, 64), image_z=Z, height=H, width=W, phase=phase, 217 | drop=drop, scope='layer7_1') 218 | layer7 = conv_bn_relu_drop(x=layer7, kernal=(3, 3, 3, 64, 64), image_z=Z, height=H, width=W, phase=phase, 219 | drop=drop, scope='layer7_2') 220 | layer7 = conv_bn_relu_drop(x=layer7, kernal=(3, 3, 3, 64, 64), image_z=Z, height=H, width=W, phase=phase, 221 | drop=drop, scope='layer7_3') 222 | layer7 = resnet_Add(x1=deconv2, x2=layer7) 223 | # layer9->attngating 224 | g3 = gatingsignal3d(layer7, kernal=(1, 1, 1, 64, 32), phase=phase, scope='g3') 225 | attn3 = attngatingblock(layer2, g3, 32, 32, scale_factor=2, phase=phase, scope='attn3') 226 | # layer9->deconvolution 227 | deconv3 = deconv_relu(x=layer7, kernal=(3, 3, 3, 32, 64), scope='deconv3') 228 | # layer8->convolution 229 | layer8 = crop_and_concat(attn3, deconv3) 230 | _, Z, H, W, _ = attn3.get_shape().as_list() 231 | layer8 = conv_bn_relu_drop(x=layer8, kernal=(3, 3, 3, 64, 32), image_z=Z, height=H, width=W, phase=phase, 232 | drop=drop, scope='layer8_1') 233 | layer8 = conv_bn_relu_drop(x=layer8, kernal=(3, 3, 3, 32, 32), image_z=Z, height=H, width=W, phase=phase, 234 | drop=drop, scope='layer8_2') 235 | layer8 = conv_bn_relu_drop(x=layer8, kernal=(3, 3, 3, 32, 32), image_z=Z, height=H, width=W, phase=phase, 236 | drop=drop, scope='layer8_3') 237 | layer8 = resnet_Add(x1=deconv3, x2=layer8) 238 | # layer9->attngating 239 | g4 = gatingsignal3d(layer8, kernal=(1, 1, 1, 32, 16), phase=phase, scope='g4') 240 | attn4 = attngatingblock(layer1, g4, 16, 16, scale_factor=2, phase=phase, scope='attn4') 241 | # layer9->deconvolution 242 | deconv4 = deconv_relu(x=layer8, kernal=(3, 3, 3, 16, 32), scope='deconv4') 243 | # layer8->convolution 244 | layer9 = crop_and_concat(attn4, deconv4) 245 | _, Z, H, W, _ = attn4.get_shape().as_list() 246 | layer9 = conv_bn_relu_drop(x=layer9, kernal=(3, 3, 3, 32, 32), image_z=Z, height=H, width=W, phase=phase, 247 | drop=drop, scope='layer9_1') 248 | layer9 = conv_bn_relu_drop(x=layer9, kernal=(3, 3, 3, 32, 32), image_z=Z, height=H, width=W, phase=phase, 249 | drop=drop, scope='layer9_2') 250 | layer9 = conv_bn_relu_drop(x=layer9, kernal=(3, 3, 3, 32, 32), image_z=Z, height=H, width=W, phase=phase, 251 | drop=drop, scope='layer9_3') 252 | layer9 = resnet_Add(x1=deconv4, x2=layer9) 253 | # layer14->output 254 | output_map = conv_sigmod(x=layer9, kernal=(1, 1, 1, 32, n_class), scope='output') 255 | return output_map 256 | -------------------------------------------------------------------------------- /VnetFamily/DualAttentionVnet/model_dualattention_vnet3d.py: -------------------------------------------------------------------------------- 1 | ''' 2 | 3 | ''' 4 | from .layer import (conv3d, deconv3d, normalizationlayer, crop_and_concat, resnet_Add, weight_xavier_init, 5 | bias_variable) 6 | import tensorflow as tf 7 | 8 | 9 | def positionAttentionblock(x, inputfilters, outfilters, kernal_size=1, scope=None): 10 | """ 11 | Position attention module 12 | :param x: 13 | :param inputfilters:inputfilter number 14 | :param outfilters:outputfilter number 15 | :param scope: 16 | :return: 17 | """ 18 | with tf.name_scope(scope): 19 | m_batchsize, Z, H, W, C = x.get_shape().as_list() 20 | 21 | kernalquery = (kernal_size, kernal_size, kernal_size, inputfilters, outfilters) 22 | Wquery = weight_xavier_init(shape=kernalquery, 23 | n_inputs=kernalquery[0] * kernalquery[1] * kernalquery[2] * kernalquery[3], 24 | n_outputs=kernalquery[-1], activefunction='relu', 25 | variable_name=scope + 'conv_Wquery') 26 | Bquery = bias_variable([kernalquery[-1]], variable_name=scope + 'conv_Bquery') 27 | query_conv = conv3d(x, Wquery) + Bquery 28 | query_conv_new = tf.reshape(query_conv, [-1, Z * H * W]) 29 | 30 | kernalkey = (kernal_size, kernal_size, kernal_size, inputfilters, outfilters) 31 | Wkey = weight_xavier_init(shape=kernalkey, n_inputs=kernalkey[0] * kernalkey[1] * kernalkey[2] * kernalkey[3], 32 | n_outputs=kernalkey[-1], activefunction='relu', variable_name=scope + 'conv_Wkey') 33 | Bkey = bias_variable([kernalkey[-1]], variable_name=scope + 'conv_Bkey') 34 | key_conv = conv3d(x, Wkey) + Bkey 35 | key_conv_new = tf.reshape(key_conv, [-1, Z * H * W]) 36 | 37 | # OOM,such as 512x512x32 then matric is 8388608x8388608 38 | # key_conv_new = tf.transpose(key_conv_new, [0, 2, 1]) 39 | # (2,2,2,3)*(2,2,3,4)=(2,2,2,4),(2,2,3)*(2,3,4)=(2,2,4) 40 | # energy = tf.matmul(query_conv_new, key_conv_new) # (m_batchsize,Z*H*W,Z*H*W) 41 | 42 | energy = tf.multiply(query_conv_new, key_conv_new) 43 | attention = tf.nn.sigmoid(energy) 44 | 45 | kernalproj = (kernal_size, kernal_size, kernal_size, inputfilters, outfilters) 46 | Wproj = weight_xavier_init(shape=kernalproj, 47 | n_inputs=kernalproj[0] * kernalproj[1] * kernalproj[2] * kernalproj[3], 48 | n_outputs=kernalproj[-1], activefunction='relu', variable_name=scope + 'conv_Wproj') 49 | Bproj = bias_variable([kernalproj[-1]], variable_name=scope + 'conv_Bproj') 50 | proj_value = conv3d(x, Wproj) + Bproj 51 | proj_value_new = tf.reshape(proj_value, [-1, Z * H * W]) 52 | 53 | out = tf.multiply(attention, proj_value_new) 54 | out_new = tf.reshape(out, [-1, Z, H, W, C]) 55 | 56 | out_new = resnet_Add(out_new, x) 57 | return out_new 58 | 59 | 60 | def channelAttentionblock(x, scope=None): 61 | """ 62 | Channel attention module 63 | :param x:input 64 | :param scope: scope name 65 | :return:channelattention result 66 | """ 67 | with tf.name_scope(scope): 68 | m_batchsize, Z, H, W, C = x.get_shape().as_list() 69 | 70 | proj_query = tf.reshape(x, [-1, C]) 71 | proj_key = tf.reshape(x, [-1, C]) 72 | proj_query = tf.transpose(proj_query, [1, 0]) 73 | 74 | energy = tf.matmul(proj_query, proj_key) # (C,C) 75 | attention = tf.nn.sigmoid(energy) 76 | 77 | proj_value = tf.reshape(x, [-1, C]) 78 | proj_value = tf.transpose(proj_value, [1, 0]) 79 | out = tf.matmul(attention, proj_value) # (C,-1) 80 | 81 | out = tf.reshape(out, [-1, Z, H, W, C]) 82 | out = resnet_Add(out, x) 83 | return out 84 | 85 | 86 | def conv_bn_relu_drop(x, kernal, phase, drop, image_z=None, height=None, width=None, scope=None): 87 | """ 88 | :param x: 89 | :param kernal: 90 | :param phase: 91 | :param drop: 92 | :param image_z: 93 | :param height: 94 | :param width: 95 | :param scope: 96 | :return: 97 | """ 98 | with tf.name_scope(scope): 99 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3], 100 | n_outputs=kernal[-1], activefunction='relu', variable_name=scope + 'conv_W') 101 | B = bias_variable([kernal[-1]], variable_name=scope + 'conv_B') 102 | conv = conv3d(x, W) + B 103 | conv = normalizationlayer(conv, is_train=phase, height=height, width=width, image_z=image_z, norm_type='group', 104 | scope=scope) 105 | conv = tf.nn.dropout(tf.nn.relu(conv), drop) 106 | return conv 107 | 108 | 109 | def down_sampling(x, kernal, phase, drop, image_z=None, height=None, width=None, scope=None): 110 | with tf.name_scope(scope): 111 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3], 112 | n_outputs=kernal[-1], 113 | activefunction='relu', variable_name=scope + 'W') 114 | B = bias_variable([kernal[-1]], variable_name=scope + 'B') 115 | conv = conv3d(x, W, 2) + B 116 | conv = normalizationlayer(conv, is_train=phase, height=height, width=width, image_z=image_z, norm_type='group', 117 | scope=scope) 118 | conv = tf.nn.dropout(tf.nn.relu(conv), drop) 119 | return conv 120 | 121 | 122 | def deconv_relu(x, kernal, samefeture=False, scope=None): 123 | with tf.name_scope(scope): 124 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[-1], 125 | n_outputs=kernal[-2], activefunction='relu', variable_name=scope + 'W') 126 | B = bias_variable([kernal[-2]], variable_name=scope + 'B') 127 | conv = deconv3d(x, W, samefeture, True) + B 128 | conv = tf.nn.relu(conv) 129 | return conv 130 | 131 | 132 | def conv_sigmod(x, kernal, scope=None): 133 | with tf.name_scope(scope): 134 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3], 135 | n_outputs=kernal[-1], activefunction='sigomd', variable_name=scope + 'W') 136 | B = bias_variable([kernal[-1]], variable_name=scope + 'B') 137 | conv = conv3d(x, W) + B 138 | conv = tf.nn.sigmoid(conv) 139 | return conv 140 | 141 | 142 | def _createdualattentionnet(X, image_z, image_width, image_height, image_channel, phase, drop, n_class=1): 143 | inputX = tf.reshape(X, [-1, image_z, image_width, image_height, image_channel]) # shape=(?, 32, 32, 1) 144 | # Vnet model 145 | # layer1->convolution 146 | layer0 = conv_bn_relu_drop(x=inputX, kernal=(3, 3, 3, image_channel, 16), phase=phase, drop=drop, 147 | scope='layer0') 148 | layer1 = conv_bn_relu_drop(x=layer0, kernal=(3, 3, 3, 16, 16), phase=phase, drop=drop, 149 | scope='layer1') 150 | layer1 = resnet_Add(x1=layer0, x2=layer1) 151 | # down sampling1 152 | down1 = down_sampling(x=layer1, kernal=(3, 3, 3, 16, 32), phase=phase, drop=drop, scope='down1') 153 | # layer2->convolution 154 | layer2 = conv_bn_relu_drop(x=down1, kernal=(3, 3, 3, 32, 32), phase=phase, drop=drop, 155 | scope='layer2_1') 156 | layer2 = conv_bn_relu_drop(x=layer2, kernal=(3, 3, 3, 32, 32), phase=phase, drop=drop, 157 | scope='layer2_2') 158 | layer2 = resnet_Add(x1=down1, x2=layer2) 159 | # down sampling2 160 | down2 = down_sampling(x=layer2, kernal=(3, 3, 3, 32, 64), phase=phase, drop=drop, scope='down2') 161 | # layer3->convolution 162 | layer3 = conv_bn_relu_drop(x=down2, kernal=(3, 3, 3, 64, 64), phase=phase, drop=drop, 163 | scope='layer3_1') 164 | layer3 = conv_bn_relu_drop(x=layer3, kernal=(3, 3, 3, 64, 64), phase=phase, drop=drop, 165 | scope='layer3_2') 166 | layer3 = conv_bn_relu_drop(x=layer3, kernal=(3, 3, 3, 64, 64), phase=phase, drop=drop, 167 | scope='layer3_3') 168 | layer3 = resnet_Add(x1=down2, x2=layer3) 169 | # down sampling3 170 | down3 = down_sampling(x=layer3, kernal=(3, 3, 3, 64, 128), phase=phase, drop=drop, scope='down3') 171 | # layer4->convolution 172 | layer4 = conv_bn_relu_drop(x=down3, kernal=(3, 3, 3, 128, 128), phase=phase, drop=drop, 173 | scope='layer4_1') 174 | layer4 = conv_bn_relu_drop(x=layer4, kernal=(3, 3, 3, 128, 128), phase=phase, drop=drop, 175 | scope='layer4_2') 176 | layer4 = conv_bn_relu_drop(x=layer4, kernal=(3, 3, 3, 128, 128), phase=phase, drop=drop, 177 | scope='layer4_3') 178 | layer4 = resnet_Add(x1=down3, x2=layer4) 179 | # down sampling4 180 | down4 = down_sampling(x=layer4, kernal=(3, 3, 3, 128, 256), phase=phase, drop=drop, scope='down4') 181 | # layer5->convolution 182 | layer5 = conv_bn_relu_drop(x=down4, kernal=(3, 3, 3, 256, 256), phase=phase, drop=drop, 183 | scope='layer5_1') 184 | layer5 = conv_bn_relu_drop(x=layer5, kernal=(3, 3, 3, 256, 256), phase=phase, drop=drop, 185 | scope='layer5_2') 186 | layer5 = conv_bn_relu_drop(x=layer5, kernal=(3, 3, 3, 256, 256), phase=phase, drop=drop, 187 | scope='layer5_3') 188 | layer5 = resnet_Add(x1=down4, x2=layer5) 189 | # layer9->deconvolution 190 | deconv1 = deconv_relu(x=layer5, kernal=(3, 3, 3, 128, 256), scope='deconv1') 191 | # dual model1 192 | pos_attenfeat1 = conv_bn_relu_drop(x=layer4, kernal=(3, 3, 3, 128, 128 // 2), phase=phase, drop=drop, 193 | scope='dual_layer1_1') 194 | pos_attenfeat1 = positionAttentionblock(pos_attenfeat1, 128 // 2, 128 // 2, scope='dual_pos_atten1') 195 | pos_attenfeat1 = conv_bn_relu_drop(x=pos_attenfeat1, kernal=(3, 3, 3, 128 // 2, 128 // 2), phase=phase, drop=drop, 196 | scope='dual_layer1_2') 197 | 198 | cha_attenfeat1 = conv_bn_relu_drop(x=layer4, kernal=(3, 3, 3, 128, 128 // 2), phase=phase, drop=drop, 199 | scope='dual_layer1_3') 200 | cha_attenfeat1 = channelAttentionblock(cha_attenfeat1, scope='dual_cha_atten1') 201 | cha_attenfeat1 = conv_bn_relu_drop(x=cha_attenfeat1, kernal=(3, 3, 3, 128 // 2, 128 // 2), phase=phase, drop=drop, 202 | scope='dual_layer1_4') 203 | 204 | feat_sum1 = resnet_Add(pos_attenfeat1, cha_attenfeat1) 205 | sasc_output1 = conv_bn_relu_drop(x=feat_sum1, kernal=(1, 1, 1, 128 // 2, 128), phase=phase, drop=drop, 206 | scope='dual_layer1_5') 207 | # layer8->convolution 208 | layer6 = crop_and_concat(sasc_output1, deconv1) 209 | _, Z, H, W, _ = sasc_output1.get_shape().as_list() 210 | layer6 = conv_bn_relu_drop(x=layer6, kernal=(3, 3, 3, 256, 128), image_z=Z, height=H, width=W, phase=phase, 211 | drop=drop, scope='layer6_1') 212 | layer6 = conv_bn_relu_drop(x=layer6, kernal=(3, 3, 3, 128, 128), image_z=Z, height=H, width=W, phase=phase, 213 | drop=drop, scope='layer6_2') 214 | layer6 = conv_bn_relu_drop(x=layer6, kernal=(3, 3, 3, 128, 128), image_z=Z, height=H, width=W, phase=phase, 215 | drop=drop, scope='layer6_3') 216 | layer6 = resnet_Add(x1=deconv1, x2=layer6) 217 | 218 | # layer9->deconvolution 219 | deconv2 = deconv_relu(x=layer6, kernal=(3, 3, 3, 64, 128), scope='deconv2') 220 | # dual model2 221 | pos_attenfeat2 = conv_bn_relu_drop(x=layer3, kernal=(3, 3, 3, 64, 64 // 2), phase=phase, drop=drop, 222 | scope='dual_layer2_1') 223 | pos_attenfeat2 = positionAttentionblock(pos_attenfeat2, 64 // 2, 64 // 2, scope='dual_pos_atten2') 224 | pos_attenfeat2 = conv_bn_relu_drop(x=pos_attenfeat2, kernal=(3, 3, 3, 64 // 2, 64 // 2), phase=phase, drop=drop, 225 | scope='dual_layer2_2') 226 | cha_attenfeat2 = conv_bn_relu_drop(x=layer3, kernal=(3, 3, 3, 64, 64 // 2), phase=phase, drop=drop, 227 | scope='dual_layer2_3') 228 | cha_attenfeat2 = channelAttentionblock(cha_attenfeat2, scope='dual_cha_atten2') 229 | cha_attenfeat2 = conv_bn_relu_drop(x=cha_attenfeat2, kernal=(3, 3, 3, 64 // 2, 64 // 2), phase=phase, drop=drop, 230 | scope='dual_layer2_4') 231 | feat_sum2 = resnet_Add(pos_attenfeat2, cha_attenfeat2) 232 | sasc_output2 = conv_bn_relu_drop(x=feat_sum2, kernal=(1, 1, 1, 64 // 2, 64), phase=phase, drop=drop, 233 | scope='dual_layer2_5') 234 | # layer8->convolution 235 | layer7 = crop_and_concat(sasc_output2, deconv2) 236 | _, Z, H, W, _ = sasc_output2.get_shape().as_list() 237 | layer7 = conv_bn_relu_drop(x=layer7, kernal=(3, 3, 3, 128, 64), image_z=Z, height=H, width=W, phase=phase, 238 | drop=drop, scope='layer7_1') 239 | layer7 = conv_bn_relu_drop(x=layer7, kernal=(3, 3, 3, 64, 64), image_z=Z, height=H, width=W, phase=phase, 240 | drop=drop, scope='layer7_2') 241 | layer7 = conv_bn_relu_drop(x=layer7, kernal=(3, 3, 3, 64, 64), image_z=Z, height=H, width=W, phase=phase, 242 | drop=drop, scope='layer7_3') 243 | layer7 = resnet_Add(x1=deconv2, x2=layer7) 244 | # layer9->deconvolution 245 | deconv3 = deconv_relu(x=layer7, kernal=(3, 3, 3, 32, 64), scope='deconv3') 246 | # dual model3 247 | pos_attenfeat3 = conv_bn_relu_drop(x=layer2, kernal=(3, 3, 3, 32, 32 // 2), phase=phase, drop=drop, 248 | scope='dual_layer3_1') 249 | pos_attenfeat3 = positionAttentionblock(pos_attenfeat3, 32 // 2, 32 // 2, scope='dual_pos_atten3') 250 | pos_attenfeat3 = conv_bn_relu_drop(x=pos_attenfeat3, kernal=(3, 3, 3, 32 // 2, 32 // 2), phase=phase, drop=drop, 251 | scope='dual_layer3_2') 252 | cha_attenfeat3 = conv_bn_relu_drop(x=layer2, kernal=(3, 3, 3, 32, 32 // 2), phase=phase, drop=drop, 253 | scope='dual_layer3_3') 254 | cha_attenfeat3 = channelAttentionblock(cha_attenfeat3, scope='dual_cha_atten3') 255 | cha_attenfeat3 = conv_bn_relu_drop(x=cha_attenfeat3, kernal=(3, 3, 3, 32 // 2, 32 // 2), phase=phase, drop=drop, 256 | scope='dual_layer3_4') 257 | feat_sum3 = resnet_Add(pos_attenfeat3, cha_attenfeat3) 258 | sasc_output3 = conv_bn_relu_drop(x=feat_sum3, kernal=(1, 1, 1, 32 // 2, 32), phase=phase, drop=drop, 259 | scope='dual_layer3_5') 260 | # layer8->convolution 261 | layer8 = crop_and_concat(sasc_output3, deconv3) 262 | _, Z, H, W, _ = sasc_output3.get_shape().as_list() 263 | layer8 = conv_bn_relu_drop(x=layer8, kernal=(3, 3, 3, 64, 32), image_z=Z, height=H, width=W, phase=phase, 264 | drop=drop, scope='layer8_1') 265 | layer8 = conv_bn_relu_drop(x=layer8, kernal=(3, 3, 3, 32, 32), image_z=Z, height=H, width=W, phase=phase, 266 | drop=drop, scope='layer8_2') 267 | layer8 = conv_bn_relu_drop(x=layer8, kernal=(3, 3, 3, 32, 32), image_z=Z, height=H, width=W, phase=phase, 268 | drop=drop, scope='layer8_3') 269 | layer8 = resnet_Add(x1=deconv3, x2=layer8) 270 | # layer9->deconvolution 271 | deconv4 = deconv_relu(x=layer8, kernal=(3, 3, 3, 16, 32), scope='deconv4') 272 | # dual model4 273 | pos_attenfeat4 = conv_bn_relu_drop(x=layer1, kernal=(3, 3, 3, 16, 16 // 2), phase=phase, drop=drop, 274 | scope='dual_layer4_1') 275 | pos_attenfeat4 = positionAttentionblock(pos_attenfeat4, 16 // 2, 16 // 2, scope='dual_pos_atten4') 276 | pos_attenfeat4 = conv_bn_relu_drop(x=pos_attenfeat4, kernal=(3, 3, 3, 16 // 2, 16 // 2), phase=phase, drop=drop, 277 | scope='dual_layer4_2') 278 | cha_attenfeat4 = conv_bn_relu_drop(x=layer1, kernal=(3, 3, 3, 16, 16 // 2), phase=phase, drop=drop, 279 | scope='dual_layer4_3') 280 | cha_attenfeat4 = channelAttentionblock(cha_attenfeat4, scope='dual_cha_atten4') 281 | cha_attenfeat4 = conv_bn_relu_drop(x=cha_attenfeat4, kernal=(3, 3, 3, 16 // 2, 16 // 2), phase=phase, drop=drop, 282 | scope='dual_layer4_4') 283 | feat_sum4 = resnet_Add(pos_attenfeat4, cha_attenfeat4) 284 | sasc_output4 = conv_bn_relu_drop(x=feat_sum4, kernal=(1, 1, 1, 16 // 2, 16), phase=phase, drop=drop, 285 | scope='dual_layer4_5') 286 | # layer8->convolution 287 | layer9 = crop_and_concat(sasc_output4, deconv4) 288 | _, Z, H, W, _ = sasc_output4.get_shape().as_list() 289 | layer9 = conv_bn_relu_drop(x=layer9, kernal=(3, 3, 3, 32, 16), image_z=Z, height=H, width=W, phase=phase, 290 | drop=drop, scope='layer9_1') 291 | layer9 = conv_bn_relu_drop(x=layer9, kernal=(3, 3, 3, 16, 16), image_z=Z, height=H, width=W, phase=phase, 292 | drop=drop, scope='layer9_2') 293 | layer9 = conv_bn_relu_drop(x=layer9, kernal=(3, 3, 3, 16, 16), image_z=Z, height=H, width=W, phase=phase, 294 | drop=drop, scope='layer9_3') 295 | layer9 = resnet_Add(x1=deconv4, x2=layer9) 296 | # layer14->output 297 | output_map = conv_sigmod(x=layer9, kernal=(1, 1, 1, 16, n_class), scope='output') 298 | return output_map 299 | -------------------------------------------------------------------------------- /VnetFamily/FusionVnet/model_fusionvnet3d.py: -------------------------------------------------------------------------------- 1 | ''' 2 | 3 | ''' 4 | from .layer import (conv3d, deconv3d, normalizationlayer, crop_and_concat, resnet_Add, weight_xavier_init, 5 | bias_variable) 6 | import tensorflow as tf 7 | 8 | 9 | def conv_bn_relu_drop(x, kernal, phase, drop, image_z=None, height=None, width=None, scope=None): 10 | with tf.name_scope(scope): 11 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3], 12 | n_outputs=kernal[-1], activefunction='relu', variable_name=scope + 'conv_W') 13 | B = bias_variable([kernal[-1]], variable_name=scope + 'conv_B') 14 | conv = conv3d(x, W) + B 15 | conv = normalizationlayer(conv, is_train=phase, height=height, width=width, image_z=image_z, norm_type='group', 16 | G=16, scope=scope) 17 | conv = tf.nn.dropout(tf.nn.relu(conv), drop) 18 | return conv 19 | 20 | 21 | def down_sampling(x, kernal, phase, drop, image_z=None, height=None, width=None, scope=None): 22 | with tf.name_scope(scope): 23 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3], 24 | n_outputs=kernal[-1], 25 | activefunction='relu', variable_name=scope + 'W') 26 | B = bias_variable([kernal[-1]], variable_name=scope + 'B') 27 | conv = conv3d(x, W, 2) + B 28 | conv = normalizationlayer(conv, is_train=phase, height=height, width=width, image_z=image_z, norm_type='group', 29 | G=16, scope=scope) 30 | conv = tf.nn.dropout(tf.nn.relu(conv), drop) 31 | return conv 32 | 33 | 34 | def deconv_relu(x, kernal, samefeture=False, scope=None): 35 | with tf.name_scope(scope): 36 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[-1], 37 | n_outputs=kernal[-2], activefunction='relu', variable_name=scope + 'W') 38 | B = bias_variable([kernal[-2]], variable_name=scope + 'B') 39 | conv = deconv3d(x, W, samefeture, True) + B 40 | conv = tf.nn.relu(conv) 41 | return conv 42 | 43 | 44 | def conv_sigmoid(x, kernal, scope=None): 45 | with tf.name_scope(scope): 46 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3], 47 | n_outputs=kernal[-1], activefunction='sigomd', variable_name=scope + 'W') 48 | B = bias_variable([kernal[-1]], variable_name=scope + 'B') 49 | conv = conv3d(x, W) + B 50 | conv = tf.nn.sigmoid(conv) 51 | return conv 52 | 53 | 54 | def _createfusionnet(X1, X2, X3, X4, image_z, image_width, image_height, image_channel, phase, drop, n_class=1): 55 | inputX1 = tf.reshape(X1, [-1, image_z, image_width, image_height, image_channel]) # shape=(?, 32, 32, 1) 56 | inputX2 = tf.reshape(X2, [-1, image_z, image_width, image_height, image_channel]) # shape=(?, 32, 32, 1) 57 | inputX3 = tf.reshape(X3, [-1, image_z, image_width, image_height, image_channel]) # shape=(?, 32, 32, 1) 58 | inputX4 = tf.reshape(X4, [-1, image_z, image_width, image_height, image_channel]) # shape=(?, 32, 32, 1) 59 | # Vnet model 60 | # layer1->convolution 61 | layer0_1 = conv_bn_relu_drop(x=inputX1, kernal=(3, 3, 3, image_channel, 16), phase=phase, drop=drop, 62 | scope='layer0_1') 63 | layer1_1 = conv_bn_relu_drop(x=layer0_1, kernal=(3, 3, 3, 16, 16), phase=phase, drop=drop, 64 | scope='layer1_1') 65 | layer1_1 = resnet_Add(x1=layer0_1, x2=layer1_1) 66 | layer0_2 = conv_bn_relu_drop(x=inputX2, kernal=(3, 3, 3, image_channel, 16), phase=phase, drop=drop, 67 | scope='layer0_2') 68 | layer1_2 = conv_bn_relu_drop(x=layer0_2, kernal=(3, 3, 3, 16, 16), phase=phase, drop=drop, 69 | scope='layer1_2') 70 | layer1_2 = resnet_Add(x1=layer0_2, x2=layer1_2) 71 | layer0_3 = conv_bn_relu_drop(x=inputX3, kernal=(3, 3, 3, image_channel, 16), phase=phase, drop=drop, 72 | scope='layer0_3') 73 | layer1_3 = conv_bn_relu_drop(x=layer0_3, kernal=(3, 3, 3, 16, 16), phase=phase, drop=drop, 74 | scope='layer1_3') 75 | layer1_3 = resnet_Add(x1=layer0_3, x2=layer1_3) 76 | layer0_4 = conv_bn_relu_drop(x=inputX4, kernal=(3, 3, 3, image_channel, 16), phase=phase, drop=drop, 77 | scope='layer0_4') 78 | layer1_4 = conv_bn_relu_drop(x=layer0_4, kernal=(3, 3, 3, 16, 16), phase=phase, drop=drop, 79 | scope='layer1_4') 80 | layer1_4 = resnet_Add(x1=layer0_4, x2=layer1_4) 81 | 82 | layer1 = crop_and_concat(crop_and_concat(layer1_1, layer1_2), crop_and_concat(layer1_3, layer1_4)) 83 | layer1 = conv_bn_relu_drop(x=layer1, kernal=(3, 3, 3, 16 * 4, 16), phase=phase, drop=drop, 84 | scope='layer1') 85 | # down sampling1 86 | down1_1 = down_sampling(x=layer1_1, kernal=(3, 3, 3, 16, 32), phase=phase, drop=drop, scope='down1_1') 87 | down1_2 = down_sampling(x=layer1_2, kernal=(3, 3, 3, 16, 32), phase=phase, drop=drop, scope='down1_2') 88 | down1_3 = down_sampling(x=layer1_3, kernal=(3, 3, 3, 16, 32), phase=phase, drop=drop, scope='down1_3') 89 | down1_4 = down_sampling(x=layer1_4, kernal=(3, 3, 3, 16, 32), phase=phase, drop=drop, scope='down1_4') 90 | # layer2->convolution 91 | layer2_1 = conv_bn_relu_drop(x=down1_1, kernal=(3, 3, 3, 32, 32), phase=phase, drop=drop, 92 | scope='layer2_1') 93 | layer2_1 = conv_bn_relu_drop(x=layer2_1, kernal=(3, 3, 3, 32, 32), phase=phase, drop=drop, 94 | scope='layer2_2') 95 | layer2_1 = resnet_Add(x1=down1_1, x2=layer2_1) 96 | layer2_2 = conv_bn_relu_drop(x=down1_2, kernal=(3, 3, 3, 32, 32), phase=phase, drop=drop, 97 | scope='layer2_3') 98 | layer2_2 = conv_bn_relu_drop(x=layer2_2, kernal=(3, 3, 3, 32, 32), phase=phase, drop=drop, 99 | scope='layer2_4') 100 | layer2_2 = resnet_Add(x1=down1_2, x2=layer2_2) 101 | layer2_3 = conv_bn_relu_drop(x=down1_3, kernal=(3, 3, 3, 32, 32), phase=phase, drop=drop, 102 | scope='layer2_5') 103 | layer2_3 = conv_bn_relu_drop(x=layer2_3, kernal=(3, 3, 3, 32, 32), phase=phase, drop=drop, 104 | scope='layer2_6') 105 | layer2_3 = resnet_Add(x1=down1_3, x2=layer2_3) 106 | layer2_4 = conv_bn_relu_drop(x=down1_4, kernal=(3, 3, 3, 32, 32), phase=phase, drop=drop, 107 | scope='layer2_7') 108 | layer2_4 = conv_bn_relu_drop(x=layer2_4, kernal=(3, 3, 3, 32, 32), phase=phase, drop=drop, 109 | scope='layer2_8') 110 | layer2_4 = resnet_Add(x1=down1_4, x2=layer2_4) 111 | 112 | layer2 = crop_and_concat(crop_and_concat(layer2_1, layer2_2), crop_and_concat(layer2_3, layer2_4)) 113 | layer2 = conv_bn_relu_drop(x=layer2, kernal=(3, 3, 3, 32 * 4, 32), phase=phase, drop=drop, 114 | scope='layer2') 115 | # down sampling2 116 | down2_1 = down_sampling(x=layer2_1, kernal=(3, 3, 3, 32, 64), phase=phase, drop=drop, scope='down2_1') 117 | down2_2 = down_sampling(x=layer2_2, kernal=(3, 3, 3, 32, 64), phase=phase, drop=drop, scope='down2_2') 118 | down2_3 = down_sampling(x=layer2_3, kernal=(3, 3, 3, 32, 64), phase=phase, drop=drop, scope='down2_3') 119 | down2_4 = down_sampling(x=layer2_4, kernal=(3, 3, 3, 32, 64), phase=phase, drop=drop, scope='down2_4') 120 | # layer3->convolution 121 | layer3_1 = conv_bn_relu_drop(x=down2_1, kernal=(3, 3, 3, 64, 64), phase=phase, drop=drop, 122 | scope='layer3_1') 123 | layer3_1 = conv_bn_relu_drop(x=layer3_1, kernal=(3, 3, 3, 64, 64), phase=phase, drop=drop, 124 | scope='layer3_2') 125 | layer3_1 = conv_bn_relu_drop(x=layer3_1, kernal=(3, 3, 3, 64, 64), phase=phase, drop=drop, 126 | scope='layer3_2_1') 127 | layer3_1 = resnet_Add(x1=down2_1, x2=layer3_1) 128 | layer3_2 = conv_bn_relu_drop(x=down2_2, kernal=(3, 3, 3, 64, 64), phase=phase, drop=drop, 129 | scope='layer3_3') 130 | layer3_2 = conv_bn_relu_drop(x=layer3_2, kernal=(3, 3, 3, 64, 64), phase=phase, drop=drop, 131 | scope='layer3_4') 132 | layer3_2 = conv_bn_relu_drop(x=layer3_2, kernal=(3, 3, 3, 64, 64), phase=phase, drop=drop, 133 | scope='layer3_4_1') 134 | layer3_2 = resnet_Add(x1=down2_2, x2=layer3_2) 135 | layer3_3 = conv_bn_relu_drop(x=down2_3, kernal=(3, 3, 3, 64, 64), phase=phase, drop=drop, 136 | scope='layer3_5') 137 | layer3_3 = conv_bn_relu_drop(x=layer3_3, kernal=(3, 3, 3, 64, 64), phase=phase, drop=drop, 138 | scope='layer3_6') 139 | layer3_3 = conv_bn_relu_drop(x=layer3_3, kernal=(3, 3, 3, 64, 64), phase=phase, drop=drop, 140 | scope='layer3_6_1') 141 | layer3_3 = resnet_Add(x1=down2_3, x2=layer3_3) 142 | layer3_4 = conv_bn_relu_drop(x=down2_4, kernal=(3, 3, 3, 64, 64), phase=phase, drop=drop, 143 | scope='layer3_7') 144 | layer3_4 = conv_bn_relu_drop(x=layer3_4, kernal=(3, 3, 3, 64, 64), phase=phase, drop=drop, 145 | scope='layer3_8') 146 | layer3_4 = conv_bn_relu_drop(x=layer3_4, kernal=(3, 3, 3, 64, 64), phase=phase, drop=drop, 147 | scope='layer3_8_1') 148 | layer3_4 = resnet_Add(x1=down2_4, x2=layer3_4) 149 | 150 | layer3 = crop_and_concat(crop_and_concat(layer3_1, layer3_2), crop_and_concat(layer3_3, layer3_4)) 151 | layer3 = conv_bn_relu_drop(x=layer3, kernal=(3, 3, 3, 64 * 4, 64), phase=phase, drop=drop, 152 | scope='layer3') 153 | # down sampling3 154 | down3_1 = down_sampling(x=layer3_1, kernal=(3, 3, 3, 64, 128), phase=phase, drop=drop, scope='down3_1') 155 | down3_2 = down_sampling(x=layer3_2, kernal=(3, 3, 3, 64, 128), phase=phase, drop=drop, scope='down3_2') 156 | down3_3 = down_sampling(x=layer3_3, kernal=(3, 3, 3, 64, 128), phase=phase, drop=drop, scope='down3_3') 157 | down3_4 = down_sampling(x=layer3_4, kernal=(3, 3, 3, 64, 128), phase=phase, drop=drop, scope='down3_4') 158 | # layer4->convolution 159 | layer4_1 = conv_bn_relu_drop(x=down3_1, kernal=(3, 3, 3, 128, 128), phase=phase, drop=drop, 160 | scope='layer4_1') 161 | layer4_1 = conv_bn_relu_drop(x=layer4_1, kernal=(3, 3, 3, 128, 128), phase=phase, drop=drop, 162 | scope='layer4_2') 163 | layer4_1 = conv_bn_relu_drop(x=layer4_1, kernal=(3, 3, 3, 128, 128), phase=phase, drop=drop, 164 | scope='layer4_2_1') 165 | layer4_1 = resnet_Add(x1=down3_1, x2=layer4_1) 166 | layer4_2 = conv_bn_relu_drop(x=down3_2, kernal=(3, 3, 3, 128, 128), phase=phase, drop=drop, 167 | scope='layer4_3') 168 | layer4_2 = conv_bn_relu_drop(x=layer4_2, kernal=(3, 3, 3, 128, 128), phase=phase, drop=drop, 169 | scope='layer4_4') 170 | layer4_2 = conv_bn_relu_drop(x=layer4_2, kernal=(3, 3, 3, 128, 128), phase=phase, drop=drop, 171 | scope='layer4_4_1') 172 | layer4_2 = resnet_Add(x1=down3_2, x2=layer4_2) 173 | layer4_3 = conv_bn_relu_drop(x=down3_3, kernal=(3, 3, 3, 128, 128), phase=phase, drop=drop, 174 | scope='layer4_5') 175 | layer4_3 = conv_bn_relu_drop(x=layer4_3, kernal=(3, 3, 3, 128, 128), phase=phase, drop=drop, 176 | scope='layer4_6') 177 | layer4_3 = conv_bn_relu_drop(x=layer4_3, kernal=(3, 3, 3, 128, 128), phase=phase, drop=drop, 178 | scope='layer4_6_1') 179 | layer4_3 = resnet_Add(x1=down3_3, x2=layer4_3) 180 | layer4_4 = conv_bn_relu_drop(x=down3_4, kernal=(3, 3, 3, 128, 128), phase=phase, drop=drop, 181 | scope='layer4_7') 182 | layer4_4 = conv_bn_relu_drop(x=layer4_4, kernal=(3, 3, 3, 128, 128), phase=phase, drop=drop, 183 | scope='layer4_8') 184 | layer4_4 = conv_bn_relu_drop(x=layer4_4, kernal=(3, 3, 3, 128, 128), phase=phase, drop=drop, 185 | scope='layer4_8_1') 186 | layer4_4 = resnet_Add(x1=down3_4, x2=layer4_4) 187 | 188 | layer4 = crop_and_concat(crop_and_concat(layer4_1, layer4_2), crop_and_concat(layer4_3, layer4_4)) 189 | layer4 = conv_bn_relu_drop(x=layer4, kernal=(3, 3, 3, 128 * 4, 128), phase=phase, drop=drop, 190 | scope='layer4') 191 | # down sampling4 192 | down4_1 = down_sampling(x=layer4, kernal=(3, 3, 3, 128, 256), phase=phase, drop=drop, scope='down4_1') 193 | down4_2 = down_sampling(x=layer4, kernal=(3, 3, 3, 128, 256), phase=phase, drop=drop, scope='down4_2') 194 | down4_3 = down_sampling(x=layer4, kernal=(3, 3, 3, 128, 256), phase=phase, drop=drop, scope='down4_3') 195 | down4_4 = down_sampling(x=layer4, kernal=(3, 3, 3, 128, 256), phase=phase, drop=drop, scope='down4_4') 196 | # layer5->convolution 197 | layer5_1 = conv_bn_relu_drop(x=down4_1, kernal=(3, 3, 3, 256, 256), phase=phase, drop=drop, 198 | scope='layer5_1') 199 | layer5_1 = conv_bn_relu_drop(x=layer5_1, kernal=(3, 3, 3, 256, 256), phase=phase, drop=drop, 200 | scope='layer5_2') 201 | layer5_1 = conv_bn_relu_drop(x=layer5_1, kernal=(3, 3, 3, 256, 256), phase=phase, drop=drop, 202 | scope='layer5_2_1') 203 | layer5_1 = resnet_Add(x1=down4_1, x2=layer5_1) 204 | layer5_2 = conv_bn_relu_drop(x=down4_2, kernal=(3, 3, 3, 256, 256), phase=phase, drop=drop, 205 | scope='layer5_3') 206 | layer5_2 = conv_bn_relu_drop(x=layer5_2, kernal=(3, 3, 3, 256, 256), phase=phase, drop=drop, 207 | scope='layer5_4') 208 | layer5_2 = conv_bn_relu_drop(x=layer5_2, kernal=(3, 3, 3, 256, 256), phase=phase, drop=drop, 209 | scope='layer5_4_1') 210 | layer5_2 = resnet_Add(x1=down4_2, x2=layer5_2) 211 | layer5_3 = conv_bn_relu_drop(x=down4_3, kernal=(3, 3, 3, 256, 256), phase=phase, drop=drop, 212 | scope='layer5_5') 213 | layer5_3 = conv_bn_relu_drop(x=layer5_3, kernal=(3, 3, 3, 256, 256), phase=phase, drop=drop, 214 | scope='layer5_6') 215 | layer5_3 = conv_bn_relu_drop(x=layer5_3, kernal=(3, 3, 3, 256, 256), phase=phase, drop=drop, 216 | scope='layer5_6_1') 217 | layer5_3 = resnet_Add(x1=down4_3, x2=layer5_3) 218 | layer5_4 = conv_bn_relu_drop(x=down4_4, kernal=(3, 3, 3, 256, 256), phase=phase, drop=drop, 219 | scope='layer5_7') 220 | layer5_4 = conv_bn_relu_drop(x=layer5_4, kernal=(3, 3, 3, 256, 256), phase=phase, drop=drop, 221 | scope='layer5_8') 222 | layer5_4 = conv_bn_relu_drop(x=layer5_4, kernal=(3, 3, 3, 256, 256), phase=phase, drop=drop, 223 | scope='layer5_8_1') 224 | layer5_4 = resnet_Add(x1=down4_4, x2=layer5_4) 225 | 226 | layer5 = crop_and_concat(crop_and_concat(layer5_1, layer5_2), crop_and_concat(layer5_3, layer5_4)) 227 | layer5 = conv_bn_relu_drop(x=layer5, kernal=(3, 3, 3, 256 * 4, 256), phase=phase, drop=drop, 228 | scope='layer5') 229 | # layer9->deconvolution 230 | deconv1 = deconv_relu(x=layer5, kernal=(3, 3, 3, 128, 256), scope='deconv1') 231 | # layer8->convolution 232 | layer6 = crop_and_concat(layer4, deconv1) 233 | _, Z, H, W, _ = layer4.get_shape().as_list() 234 | layer6 = conv_bn_relu_drop(x=layer6, kernal=(3, 3, 3, 256, 128), image_z=Z, height=H, width=W, phase=phase, 235 | drop=drop, scope='layer6_1') 236 | layer6 = conv_bn_relu_drop(x=layer6, kernal=(3, 3, 3, 128, 128), image_z=Z, height=H, width=W, phase=phase, 237 | drop=drop, scope='layer6_2') 238 | layer6 = conv_bn_relu_drop(x=layer6, kernal=(3, 3, 3, 128, 128), image_z=Z, height=H, width=W, phase=phase, 239 | drop=drop, scope='layer6_3') 240 | layer6 = resnet_Add(x1=deconv1, x2=layer6) 241 | # layer9->deconvolution 242 | deconv2 = deconv_relu(x=layer6, kernal=(3, 3, 3, 64, 128), scope='deconv2') 243 | # layer8->convolution 244 | layer7 = crop_and_concat(layer3, deconv2) 245 | _, Z, H, W, _ = layer3.get_shape().as_list() 246 | layer7 = conv_bn_relu_drop(x=layer7, kernal=(3, 3, 3, 128, 64), image_z=Z, height=H, width=W, phase=phase, 247 | drop=drop, scope='layer7_1') 248 | layer7 = conv_bn_relu_drop(x=layer7, kernal=(3, 3, 3, 64, 64), image_z=Z, height=H, width=W, phase=phase, 249 | drop=drop, scope='layer7_2') 250 | layer7 = conv_bn_relu_drop(x=layer7, kernal=(3, 3, 3, 64, 64), image_z=Z, height=H, width=W, phase=phase, 251 | drop=drop, scope='layer7_3') 252 | layer7 = resnet_Add(x1=deconv2, x2=layer7) 253 | # layer9->deconvolution 254 | deconv3 = deconv_relu(x=layer7, kernal=(3, 3, 3, 32, 64), scope='deconv3') 255 | # layer8->convolution 256 | layer8 = crop_and_concat(layer2, deconv3) 257 | _, Z, H, W, _ = layer2.get_shape().as_list() 258 | layer8 = conv_bn_relu_drop(x=layer8, kernal=(3, 3, 3, 64, 32), image_z=Z, height=H, width=W, phase=phase, 259 | drop=drop, scope='layer8_1') 260 | layer8 = conv_bn_relu_drop(x=layer8, kernal=(3, 3, 3, 32, 32), image_z=Z, height=H, width=W, phase=phase, 261 | drop=drop, scope='layer8_2') 262 | layer8 = resnet_Add(x1=deconv3, x2=layer8) 263 | # layer9->deconvolution 264 | deconv4 = deconv_relu(x=layer8, kernal=(3, 3, 3, 16, 32), scope='deconv4') 265 | # layer8->convolution 266 | layer9 = crop_and_concat(layer1, deconv4) 267 | _, Z, H, W, _ = layer1.get_shape().as_list() 268 | layer9 = conv_bn_relu_drop(x=layer9, kernal=(3, 3, 3, 32, 16), image_z=Z, height=H, width=W, phase=phase, 269 | drop=drop, scope='layer9_1') 270 | layer9 = conv_bn_relu_drop(x=layer9, kernal=(3, 3, 3, 16, 16), image_z=Z, height=H, width=W, phase=phase, 271 | drop=drop, scope='layer9_2') 272 | layer9 = resnet_Add(x1=deconv4, x2=layer9) 273 | # layer14->output 274 | output_map = conv_sigmoid(x=layer9, kernal=(1, 1, 1, 16, n_class), scope='output') 275 | 276 | return output_map 277 | --------------------------------------------------------------------------------