├── .gitignore ├── DIRNet-mxnet ├── Cardiac_preprocess.py ├── README.md ├── RegistrationIterator.py ├── convnet.py ├── custom_loss.py ├── evaluate.py ├── helper.py ├── playground.py ├── playground2.py ├── requirements.txt └── similarity.py └── DIRNet-tensorflow ├── AffineST.py ├── PlotLogs.py ├── README.md ├── Resnet_model.py ├── WarpST.py ├── __pycache__ ├── WarpST.cpython-36.pyc ├── bicubic_interp.cpython-36.pyc ├── config.cpython-36.pyc ├── data.cpython-36.pyc ├── models.cpython-36.pyc └── ops.cpython-36.pyc ├── bicubic_interp.py ├── ckpt ├── checkpoint ├── model.ckpt.data-00000-of-00001 ├── model.ckpt.index └── model.ckpt.meta ├── config.py ├── create_file_structure.py ├── data.py ├── data_org.py ├── deploy.py ├── label.txt ├── misc └── DIRNet.png ├── models.py ├── ops.py ├── tf_playground.py └── train.py /.gitignore: -------------------------------------------------------------------------------- 1 | .data/* 2 | .data/ 3 | DIRNet-tensorflow/ckpt/ 4 | DIRNet-tensorflow/ckpt/* 5 | DIRNet-tensorflow/tf_impl_rslt_all/* 6 | DIRNet-tensorflow/tf_impl_rslt_all/ 7 | DIRNet-tensorflow/tf_impl_rslt_fix/* 8 | DIRNet-tensorflow/tf_impl_rslt_fix/ 9 | DIRNet-tensorflow/tf_impl_rslt_mov/ 10 | DIRNet-tensorflow/tf_impl_rslt_mov/* 11 | DIRNet-tensorflow/ckpt/ 12 | #Pycharm stuff 13 | .idea/* -------------------------------------------------------------------------------- /DIRNet-mxnet/Cardiac_preprocess.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from os import listdir, mkdir 3 | from os.path import isfile, join 4 | import scipy.ndimage as ndimage 5 | import numpy as np 6 | import scipy.misc as misc 7 | 8 | 9 | def find_moving_img(arr, start_idx, fixed_name): 10 | # patient035_frame01.nz.10.png 11 | patient_id = fixed_name[7:10] 12 | slice_id = fixed_name[22:24] 13 | for i in range(len(arr)): 14 | idx = (i + start_idx) % len(arr) # iterate through the whole array but dont start at 0 15 | moving_name = arr[idx] 16 | if patient_id == moving_name[7:10] and slice_id == moving_name[22:24]: 17 | return moving_name 18 | return None 19 | 20 | 21 | if __name__=='__main__': 22 | ''' 23 | Reads the fixed (ED) cardio images, looks for a corresponding moving image (ES) by name (same patient, same slice) 24 | rescales and writes as greyscale to disk 25 | ''' 26 | path_to_root = './Registration/' 27 | path_to_root = '/home/adrian/Documents/dl2/Cardiac/' 28 | path_fixed = join(path_to_root, 'ED') 29 | path_moving = join(path_to_root, 'ES') 30 | out_fixed = join(path_to_root, 'ED_rescaled') 31 | out_mov = join(path_to_root, 'ES_rescaled') 32 | mkdir(out_fixed) 33 | mkdir(out_mov) 34 | shape = (222, 247) 35 | i=0 36 | 37 | onlyfiles_fixed = [f for f in listdir(path_fixed) if isfile(join(path_fixed, f))] 38 | onlyfiles_moving = [f for f in listdir(path_moving) if isfile(join(path_moving, f))] 39 | for i, fixed in enumerate(onlyfiles_fixed): 40 | if fixed.endswith('.png'): 41 | moving = find_moving_img(onlyfiles_moving, i, fixed) 42 | assert moving is not None #we have to search for the correct moving, cuz not same amnt of pics in ED, ES 43 | 44 | abspath = join(path_fixed, fixed) 45 | pic_fix = ndimage.imread(abspath, flatten=True) 46 | pic_fix = misc.imresize(pic_fix, (shape[0], shape[1])) 47 | misc.imsave(arr=pic_fix, name=join(out_fixed, fixed)) 48 | 49 | abspath = join(path_moving, moving) 50 | pic_mov = ndimage.imread(abspath, flatten=True) 51 | pic_mov = misc.imresize(pic_mov, (shape[0], shape[1])) 52 | misc.imsave(arr=pic_mov, name=join(out_mov, moving)) 53 | print(str(i) + 'pics rescaled') -------------------------------------------------------------------------------- /DIRNet-mxnet/README.md: -------------------------------------------------------------------------------- 1 | # DIRNet 2 | 3 | This is an Mxnet implementation of the DIRNet -------------------------------------------------------------------------------- /DIRNet-mxnet/RegistrationIterator.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from collections import OrderedDict, namedtuple 3 | 4 | import sys 5 | import ctypes 6 | import logging 7 | import threading 8 | try: 9 | import h5py 10 | except ImportError: 11 | h5py = None 12 | import numpy as np 13 | #from .base import _LIB 14 | #from .base import c_array, c_str, mx_uint, py_str 15 | #from .base import DataIterHandle, NDArrayHandle 16 | #from .base import mx_real_t 17 | #from .base import check_call, build_param_doc as _build_param_doc 18 | from mxnet.ndarray import NDArray 19 | from mxnet.ndarray.sparse import CSRNDArray 20 | #from .ndarray import _ndarray_cls 21 | from mxnet.ndarray import array 22 | from mxnet.ndarray import concatenate 23 | import mxnet as mx 24 | from mxnet.io import DataDesc 25 | from mxnet.io import DataBatch 26 | from mxnet.io import DataIter 27 | from os import listdir 28 | from os.path import isfile, join 29 | import scipy.ndimage as ndimage 30 | import scipy.misc as misc 31 | 32 | 33 | # The only difference to the mxnet.io NDArrayIter is that provide_label returns None, as we do not have any labels 34 | # This is needed to avoid an assertion error, see https://github.com/apache/incubator-mxnet/issues/8910 35 | class RegistrationIter(DataIter): 36 | 37 | def find_moving_img(self, arr, start_idx, fixed_name): 38 | # patient035_frame01.nz.10.png 39 | patient_id = fixed_name[7:10] 40 | slice_id = fixed_name[22:24] 41 | for i in range(len(arr)): 42 | idx = (i + start_idx) % len(arr) # iterate through the whole array but dont start at 0 43 | moving_name = arr[idx] 44 | if patient_id == moving_name[7:10] and slice_id == moving_name[22:24]: 45 | return moving_name 46 | return None 47 | 48 | def read_cardio_dirs_to_ndarray(self, path_fixed, path_moving, shape): 49 | onlyfiles_fixed = [f for f in listdir(path_fixed) if isfile(join(path_fixed, f))] 50 | onlyfiles_moving = [f for f in listdir(path_moving) if isfile(join(path_moving, f))] 51 | # out_fix = np.empty(shape=(shape[1], shape[2])) 52 | # out_mov = np.empty(shape=(shape[1], shape[2])) 53 | arrays_fix = [] 54 | arrays_mov = [] 55 | for i, fixed in enumerate(onlyfiles_fixed): 56 | if fixed.endswith('.png'): 57 | moving = self.find_moving_img(onlyfiles_moving, i, fixed) 58 | assert moving is not None 59 | 60 | abspath = join(path_fixed, fixed) 61 | pic_fix = ndimage.imread(abspath, flatten=True) 62 | pic_fix = misc.imresize(pic_fix, (shape[1], shape[2])) 63 | 64 | abspath = join(path_moving, moving) 65 | pic_mov = ndimage.imread(abspath, flatten=True) 66 | pic_mov = misc.imresize(pic_mov, (shape[1], shape[2])) 67 | arrays_fix.append(np.stack([pic_fix, pic_mov])) 68 | #arrays_mov.append(pic_mov) 69 | return arrays_fix 70 | # sh = np.shape(arrays_fix) 71 | # out = np.stack(arrays_fix) 72 | # sh2 = np.shape(out) 73 | # return (np.stack(arrays_fix), np.stack(arrays_mov)) 74 | # return (np.stack(arrays_fix), np.stack(arrays_mov)) 75 | 76 | """Returns an iterator for ``mx.nd.NDArray``, ``numpy.ndarray``, ``h5py.Dataset`` 77 | or ``mx.nd.sparse.CSRNDArray``. 78 | Parameters 79 | ---------- 80 | data: array or list of array or dict of string to array 81 | The input data. 82 | label: array or list of array or dict of string to array, optional 83 | The input label. 84 | batch_size: int 85 | Batch size of data. 86 | shuffle: bool, optional 87 | Whether to shuffle the data. 88 | Only supported if no h5py.Dataset inputs are used. 89 | last_batch_handle : str, optional 90 | How to handle the last batch. This parameter can be 'pad', 'discard' or 91 | 'roll_over'. 'roll_over' is intended for training and can cause problems 92 | if used for prediction. 93 | data_name : str, optional 94 | The data name. 95 | label_name : str, optional 96 | The label name. 97 | """ 98 | def __init__(self, ES_path, ED_path, shape, batch_size=1, shuffle=False, 99 | last_batch_handle='pad'): 100 | super(RegistrationIter, self).__init__(batch_size) 101 | 102 | data = self.read_cardio_dirs_to_ndarray(path_fixed=ED_path, path_moving=ES_path, shape=shape) 103 | self.data = mx.io._init_data(data, allow_empty=False, default_name='data') 104 | #self.data_moving = mx.io._init_data(data[1], allow_empty=False, default_name='data_fixed') 105 | 106 | self.idx = np.arange(self.data[0][1].shape[0]) 107 | # shuffle data 108 | if shuffle: 109 | np.random.shuffle(self.idx) 110 | self.data = [(k, array(v.asnumpy()[self.idx], v.context)) 111 | if not (isinstance(v, h5py.Dataset) 112 | if h5py else False) else (k, v) 113 | for k, v in self.data] 114 | self.label = [(k, array(v.asnumpy()[self.idx], v.context)) 115 | if not (isinstance(v, h5py.Dataset) 116 | if h5py else False) else (k, v) 117 | for k, v in self.label] 118 | 119 | # batching 120 | if last_batch_handle == 'discard': 121 | new_n = self.data[0][1].shape[0] - self.data[0][1].shape[0] % batch_size 122 | self.idx = self.idx[:new_n] 123 | 124 | # self.data_list = [x[1] for x in self.data] + [x[1] for x in self.label] 125 | #self.num_source = len(self.data_list) 126 | self.num_data = self.idx.shape[0] 127 | assert self.num_data >= batch_size, \ 128 | "batch_size needs to be smaller than data size." 129 | self.cursor = -batch_size 130 | self.batch_size = batch_size 131 | self.last_batch_handle = last_batch_handle 132 | 133 | @property 134 | def provide_data(self): 135 | """The name and shape of data provided by this iterator.""" 136 | return [ 137 | DataDesc(k, tuple([self.batch_size] + list(v.shape[1:])), v.dtype) 138 | for k, v in self.data 139 | ] 140 | 141 | @property 142 | def provide_label(self): 143 | """The name and shape of label provided by this iterator.""" 144 | return None 145 | 146 | def hard_reset(self): 147 | """Ignore roll over data and set to start.""" 148 | self.cursor = -self.batch_size 149 | 150 | def reset(self): 151 | if self.last_batch_handle == 'roll_over' and self.cursor > self.num_data: 152 | self.cursor = -self.batch_size + (self.cursor%self.num_data)%self.batch_size 153 | else: 154 | self.cursor = -self.batch_size 155 | 156 | def iter_next(self): 157 | self.cursor += self.batch_size 158 | return self.cursor < self.num_data 159 | 160 | def next(self): 161 | if self.iter_next(): 162 | return DataBatch(data=self.getdata(), label=self.getlabel(), \ 163 | pad=self.getpad(), index=None) 164 | else: 165 | raise StopIteration 166 | 167 | def _getdata(self, data_source): 168 | """Load data from underlying arrays, internal use only.""" 169 | assert(self.cursor < self.num_data), "DataIter needs reset." 170 | if self.cursor + self.batch_size <= self.num_data: 171 | return [ 172 | # np.ndarray or NDArray case 173 | x[1][self.cursor:self.cursor + self.batch_size] 174 | if isinstance(x[1], (np.ndarray, NDArray)) else 175 | # h5py (only supports indices in increasing order) 176 | array(x[1][sorted(self.idx[ 177 | self.cursor:self.cursor + self.batch_size])][[ 178 | list(self.idx[self.cursor: 179 | self.cursor + self.batch_size]).index(i) 180 | for i in sorted(self.idx[ 181 | self.cursor:self.cursor + self.batch_size]) 182 | ]]) for x in data_source 183 | ] 184 | else: 185 | pad = self.batch_size - self.num_data + self.cursor 186 | return [ 187 | # np.ndarray or NDArray case 188 | concatenate([x[1][self.cursor:], x[1][:pad]]) 189 | if isinstance(x[1], (np.ndarray, NDArray)) else 190 | # h5py (only supports indices in increasing order) 191 | concatenate([ 192 | array(x[1][sorted(self.idx[self.cursor:])][[ 193 | list(self.idx[self.cursor:]).index(i) 194 | for i in sorted(self.idx[self.cursor:]) 195 | ]]), 196 | array(x[1][sorted(self.idx[:pad])][[ 197 | list(self.idx[:pad]).index(i) 198 | for i in sorted(self.idx[:pad]) 199 | ]]) 200 | ]) for x in data_source 201 | ] 202 | 203 | def getdata(self): 204 | return self._getdata(self.data) 205 | 206 | def getlabel(self): 207 | return None 208 | 209 | def getpad(self): 210 | if self.last_batch_handle == 'pad' and \ 211 | self.cursor + self.batch_size > self.num_data: 212 | return self.cursor + self.batch_size - self.num_data 213 | else: 214 | return 0 -------------------------------------------------------------------------------- /DIRNet-mxnet/convnet.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Implementation of the following paper: 3 | https://arxiv.org/abs/1704.06065 4 | ''' 5 | import logging 6 | logging.getLogger().setLevel(logging.DEBUG) 7 | import mxnet as mx 8 | import numpy as np 9 | import os 10 | import helper as hlp 11 | from os import listdir, mkdir 12 | from os.path import isfile, join 13 | import scipy.ndimage as ndimage 14 | import sys 15 | 16 | 17 | def conv_net_regressor(shape, use_additional_pool=False, bn_mom=0.9): 18 | # We have 2 data sources and concatenate them 19 | data_fixed = mx.sym.Variable(name='data_fixed') 20 | data_moving = mx.sym.Variable(name='data_moving') 21 | concat_data = mx.sym.concat(*[data_fixed, data_moving]) 22 | batched = mx.sym.BatchNorm(data=concat_data, fix_gamma=True, eps=2e-5, momentum=bn_mom, name='bn_data') 23 | # The number of kernels per layer can be of arbitrary size, but the number of kernels of the output layer is 24 | # determined by the dimensionality of the input images 25 | filter_list = [16, 32, 64, 128] 26 | # four alternating layers of 3 × 3 convolutions with 0-padding and 2 × 2 downsampling layers 27 | for i in range(4): 28 | if i == 0: 29 | body = mx.sym.Convolution(data=batched, num_filter=filter_list[i], kernel=(3, 3), stride=(1, 1), pad=(0, 0), 30 | no_bias=True, name="conv" + str(i)) 31 | else: 32 | body = mx.sym.Convolution(data=body, num_filter=filter_list[i], kernel=(3, 3), stride=(1, 1), pad=(0, 0), 33 | no_bias=True, name="conv" + str(i)) 34 | body = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-5, momentum=bn_mom, name='bn' + str(i)) 35 | # TO DO: the original authors use exponential linear units as activation 36 | body = mx.sym.LeakyReLU(data=body, act_type='elu', name='relu' + str(i)) 37 | body = mx.sym.Pooling(data=body, kernel=(2, 2), stride=(1, 1), pad=(1, 1), pool_type='avg') 38 | # Subsequently, three 1 × 1 convolutional layers are applied to make the ConvNet regressor fully convolutional 39 | for k in range(2): 40 | i = k + 4 41 | body = mx.sym.Convolution(data=body, num_filter=256, kernel=(1, 1), stride=(1, 1), pad=(0, 0), 42 | no_bias=True, name="conv" + str(i)) 43 | body = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-5, momentum=bn_mom, name='bn' + str(i)) 44 | # body = mx.sym.Activation(data=body, act_type='relu', name='relu' + str(i)) 45 | body = mx.sym.LeakyReLU(data=body, act_type='elu', name='relu' + str(i)) 46 | if use_additional_pool: 47 | body = mx.sym.Pooling(data=body, kernel=(2, 2), stride=(2, 2), pad=(1, 1), pool_type='avg') 48 | # body = mx.sym.Pooling(data=body, kernel=(2, 2), stride=(2, 2), pad=(1, 1), pool_type='avg') 49 | flatten = mx.sym.flatten(data=body) 50 | fc3 = mx.sym.FullyConnected(data=flatten, num_hidden=6) 51 | fc3 = mx.sym.Activation(data=fc3, act_type='tanh', name='tanh_after_fc') 52 | # The Spatial Transformer performs a affine transformation to the moving image, 53 | # parametrized by the output of the body network 54 | stnet = mx.sym.SpatialTransformer(data=data_moving, loc=fc3, target_shape=(shape[2], shape[3]), transform_type='affine', 55 | sampler_type="bilinear", name='SpatialTransformer') 56 | #cor = mx.sym.Correlation(data1=stnet, data2=data_fixed, kernel_size=28, stride1=2, stride2=2, pad_size=0, max_displacement=0) 57 | #cor2 = mx.sym.Correlation(data1=data_fixed, data2=stnet, kernel_size=28, stride1=1, stride2=1, max_displacement=0) 58 | # loss = mx.sym.MakeLoss(hlp.ncc(stnet, data_fixed), normalization='batch') 59 | loss = mx.sym.MakeLoss(hlp.rmse(stnet, data_fixed), normalization='batch') 60 | output = mx.sym.Group([mx.sym.BlockGrad(fc3), mx.sym.BlockGrad(stnet), mx.sym.BlockGrad(fc3), loss]) 61 | return output 62 | 63 | 64 | def custom_training_simple_bind(symbol, iterators, ctx=mx.gpu(), epochs=10, save_path='./', lr=0.0000001): 65 | ''' 66 | Our own training method for the network. using the low-level simple_bind API 67 | Many code snippets are from https://github.com/apache/incubator-mxnet/blob/5ff545f2345f9b607b81546a168665bd63d02d9f/example/notebooks/simple_bind.ipynb 68 | :param symbol: 69 | :param train_iter: 70 | :return: 71 | ''' 72 | 73 | # helper function 74 | def Init(key, arr): 75 | if "fullyconnected0_weight" in key: 76 | # initialize with identity transformation 77 | arr[:] = 0 78 | elif "fullyconnected0_bias" in key: 79 | # initialize with identity transformation 80 | initial = np.array([[1., 0, 0], [0, 1., 0]]) 81 | initial = initial.astype('float32').flatten() 82 | arr[:] = initial 83 | elif "weight" in key: 84 | arr[:] = mx.random.uniform(-0.07, 0.07, arr.shape) 85 | # or 86 | # arr[:] = np.random.uniform(-0.07, 0.07, arr.shape) 87 | elif "gamma" in key: 88 | # for batch norm slope 89 | arr[:] = 1.0 90 | elif "bias" in key: 91 | arr[:] = 0 92 | elif "beta" in key: 93 | # for batch norm bias 94 | arr[:] = 0 95 | 96 | def customSGD(key, weight, grad, lr=lr, grad_norm=1): 97 | # key is key for weight, we can customize update rule 98 | # weight is weight array 99 | # grad is grad array 100 | # lr is learning rate 101 | # grad_norm is scalar to norm gradient, usually it is batch_size 102 | norm = 1.0 / grad_norm 103 | # here we can bias' learning rate 2 times larger than weight 104 | if "weight" in key or "gamma" in key: 105 | weight[:] -= lr * (grad * norm) 106 | elif "bias" in key or "beta" in key: 107 | weight[:] -= 2.0 * lr * (grad * norm) 108 | else: 109 | pass 110 | 111 | executor = symbol.simple_bind(ctx=ctx, data_moving=(1, 1, 28, 28), data_fixed=(1, 1, 28, 28), 112 | label_shapes=None, grad_req='write') 113 | # get argument arrays 114 | arg_arrays = executor.arg_arrays 115 | # get grad arrays 116 | grad_arrays = executor.grad_arrays 117 | # get aux_states arrays. Note: currently only BatchNorm symbol has auxiliary states, which is moving_mean and moving_var 118 | aux_arrays = executor.aux_arrays 119 | # get outputs from executor 120 | output_arrays = executor.outputs 121 | # The sequence of arrays is in same sequence of symbol arguments 122 | args = dict(zip(symbol.list_arguments(), arg_arrays)) # dict containing parameter names and values 123 | grads = dict(zip(symbol.list_arguments(), grad_arrays)) 124 | outputs = dict(zip(symbol.list_outputs(), output_arrays)) 125 | aux_states = dict(zip(symbol.list_auxiliary_states(), aux_arrays)) 126 | 127 | # initialize parameters by uniform random numbers 128 | #mx.random.seed(2) 129 | for key, arr in args.items(): 130 | Init(key, arr) 131 | keys = symbol.list_arguments() 132 | train_iter = iterators[0] 133 | eval_iter = iterators[1] 134 | debug = False 135 | # train 5 epochs, i.e. going over the data iter one pass 136 | for epoch in range(epochs): 137 | train_iter.reset() 138 | fc3 = None 139 | avg_cor = 0 140 | i = 0 141 | fixed_img_data = train_iter.next().data 142 | #hlp.printNumpyArray(fixed_img_data[0][0][0], thresh=0) 143 | for batch in train_iter: 144 | i += 1 145 | executor.forward(is_train=True, data_fixed=fixed_img_data[0], data_moving=batch.data[0]) 146 | cor1 = executor.outputs[0] 147 | stnet = executor.outputs[1] 148 | loss = executor.outputs[3] 149 | fc3 = executor.outputs[2] 150 | if debug: 151 | if np.sum(stnet.asnumpy()) == 0: 152 | print(' STN produces empty feature map!') 153 | else: 154 | print(' STN seems to work') 155 | #sh = stnet.shape 156 | print("Affine transformation parameters Theta: " + str(fc3)) 157 | print("loss " + str(loss.asnumpy()[0])) 158 | #hlp.printNumpyArray(stnet.asnumpy()[0][0], thresh=0) 159 | hlp.printNontZeroGradients(grads) 160 | #if loss != -1.0: # otherwise ncc gradient is NaN 161 | executor.backward() # compute gradients 162 | for key in keys: # update parameters 163 | customSGD(key, args[key], grads[key]) 164 | aval = loss.asnumpy()[0] 165 | avg_cor += float(aval) 166 | print("Affine transformation parameters Theta: " + str(fc3)) 167 | print('Epoch %d, Training avg rmse %s ' % (epoch, avg_cor/i)) 168 | eval_iter.reset() 169 | avg_cor = 0 170 | i = 0 171 | for batch in eval_iter: 172 | executor.forward(is_train=True, data_fixed=fixed_img_data[0], data_moving=batch.data[0]) 173 | loss = executor.outputs[3] 174 | aval = loss.asnumpy()[0] 175 | avg_cor += float(aval) 176 | i += 1 177 | print('Epoch %d, Evaluation avg rmse %s ' % (epoch, avg_cor/i)) 178 | 179 | # # save some to see what the model does 180 | # hlp.saveArrayAsImg(fixed_img_data[0].asnumpy()[0][0], "./fixed.png") 181 | # train_iter.reset() 182 | # for l in range(7): 183 | # batch = train_iter.next() 184 | # moving = batch.data[0] 185 | # executor.forward(is_train=True, data_fixed=fixed_img_data[0], data_moving=moving) 186 | # stnet = executor.outputs[1] 187 | # hlp.saveArrayAsImg(stnet.asnumpy()[0][0], './{0}0warped_{0}.png'.format(l)) 188 | # hlp.saveArrayAsImg(moving.asnumpy()[0][0], './{0}1original_{0}.png'.format(l)) 189 | return executor 190 | 191 | 192 | def cardiac_training(symbol, img_shape, data, ctx=mx.gpu(), epochs=10, lr=0.00000000001): 193 | ''' 194 | Our own training method for the network. using the low-level simple_bind API 195 | Many code snippets are from https://github.com/apache/incubator-mxnet/blob/5ff545f2345f9b607b81546a168665bd63d02d9f/example/notebooks/simple_bind.ipynb 196 | nearly the same as training on mnist, just different iterators 197 | :param symbol: 198 | :param train_iter: 199 | :return: 200 | ''' 201 | 202 | # helper function 203 | def Init(key, arr): 204 | if "fullyconnected0_weight" in key: 205 | # initialize with identity transformation 206 | arr[:] = 0 207 | elif "fullyconnected0_bias" in key: 208 | # initialize with identity transformation 209 | initial = np.array([[1., 0, 0], [0, 1., 0]]) 210 | initial = initial.astype('float32').flatten() 211 | arr[:] = initial 212 | elif "weight" in key: 213 | arr[:] = mx.random.uniform(-0.07, 0.07, arr.shape) 214 | # or 215 | # arr[:] = np.random.uniform(-0.07, 0.07, arr.shape) 216 | elif "gamma" in key: 217 | # for batch norm slope 218 | arr[:] = 1.0 219 | elif "bias" in key: 220 | arr[:] = 0 221 | elif "beta" in key: 222 | # for batch norm bias 223 | arr[:] = 0 224 | 225 | def customSGD(key, weight, grad, lr=lr, grad_norm=1): 226 | # key is key for weight, we can customize update rule 227 | # weight is weight array 228 | # grad is grad array 229 | # lr is learning rate 230 | # grad_norm is scalar to norm gradient, usually it is batch_size 231 | norm = 1.0 / grad_norm 232 | # here we can bias' learning rate 2 times larger than weight 233 | if "weight" in key or "gamma" in key: 234 | weight[:] -= lr * (grad * norm) 235 | elif "bias" in key or "beta" in key: 236 | weight[:] -= 2.0 * lr * (grad * norm) 237 | else: 238 | pass 239 | 240 | executor = symbol.simple_bind(ctx=ctx, data_moving=img_shape, data_fixed=img_shape, 241 | label_shapes=None, grad_req='write') 242 | # get argument arrays 243 | arg_arrays = executor.arg_arrays 244 | # get grad arrays 245 | grad_arrays = executor.grad_arrays 246 | # get aux_states arrays. Note: currently only BatchNorm symbol has auxiliary states, which is moving_mean and moving_var 247 | aux_arrays = executor.aux_arrays 248 | # get outputs from executor 249 | output_arrays = executor.outputs 250 | # The sequence of arrays is in same sequence of symbol arguments 251 | args = dict(zip(symbol.list_arguments(), arg_arrays)) # dict containing parameter names and values 252 | grads = dict(zip(symbol.list_arguments(), grad_arrays)) 253 | outputs = dict(zip(symbol.list_outputs(), output_arrays)) 254 | aux_states = dict(zip(symbol.list_auxiliary_states(), aux_arrays)) 255 | 256 | # initialize parameters by uniform random numbers 257 | #mx.random.seed(2) 258 | for key, arr in args.items(): 259 | Init(key, arr) 260 | keys = symbol.list_arguments() 261 | debug = False 262 | data_fixed = np.empty((1, 1, img_shape[2], img_shape[3])) 263 | data_mov = np.empty((1, 1, img_shape[2], img_shape[3])) 264 | for epoch in range(epochs): 265 | fc3 = None 266 | avg_cor = 0 267 | i = 0 268 | for batch in data: 269 | i += 1 270 | data_fixed[0][0] = batch[0] 271 | data_mov[0][0] = batch[1] 272 | executor.forward(is_train=True, data_fixed=data_fixed, data_moving=data_mov) 273 | stnet = executor.outputs[1] 274 | loss = executor.outputs[3] 275 | fc3 = executor.outputs[2] 276 | if debug: 277 | if np.sum(stnet.asnumpy()) == 0: 278 | print(' STN produces empty feature map!') 279 | else: 280 | print(' STN seems to work') 281 | #sh = stnet.shape 282 | print("Affine transformation parameters Theta: " + str(fc3)) 283 | print("loss " + str(loss.asnumpy()[0])) 284 | #hlp.printNumpyArray(stnet.asnumpy()[0][0], thresh=0) 285 | hlp.printNontZeroGradients(grads) 286 | #if loss != -1.0: # otherwise ncc gradient is NaN 287 | executor.backward() # compute gradients 288 | for key in keys: # update parameters 289 | customSGD(key, args[key], grads[key]) 290 | aval = loss.asnumpy()[0] 291 | avg_cor += float(aval) 292 | print("Affine transformation parameters Theta: " + str(fc3)) 293 | print('Epoch %d, Training avg rmse %s ' % (epoch, avg_cor/i)) 294 | # eval_iter.reset() 295 | # avg_cor = 0 296 | # i = 0 297 | # for batch in eval_iter: 298 | # executor.forward(is_train=True, data_fixed=fixed_img_data[0], data_moving=batch.data[0]) 299 | # loss = executor.outputs[3] 300 | # aval = loss.asnumpy()[0] 301 | # avg_cor += float(aval) 302 | # i += 1 303 | # print('Epoch %d, Evaluation avg rmse %s ' % (epoch, avg_cor/i)) 304 | 305 | # # save some to see what the model does 306 | # hlp.saveArrayAsImg(fixed_img_data[0].asnumpy()[0][0], "./fixed.png") 307 | # train_iter.reset() 308 | # for l in range(7): 309 | # batch = train_iter.next() 310 | # moving = batch.data[0] 311 | # executor.forward(is_train=True, data_fixed=fixed_img_data[0], data_moving=moving) 312 | # stnet = executor.outputs[1] 313 | # hlp.saveArrayAsImg(stnet.asnumpy()[0][0], './{0}0warped_{0}.png'.format(l)) 314 | # hlp.saveArrayAsImg(moving.asnumpy()[0][0], './{0}1original_{0}.png'.format(l)) 315 | return executor 316 | 317 | 318 | def cardiac_predict(executor, data_fixed, data_moving, outpath='./'): 319 | ''' 320 | Our own training method for the network. using the low-level simple_bind API 321 | Many code snippets are from https://github.com/apache/incubator-mxnet/blob/5ff545f2345f9b607b81546a168665bd63d02d9f/example/notebooks/simple_bind.ipynb 322 | nearly the same as training on mnist, just different iterators 323 | :param symbol: 324 | :param train_iter: 325 | :return: 326 | ''' 327 | img_shape = (1, 1, np.shape(data_fixed)[0], np.shape(data_fixed)[1]) 328 | # symbol.simple_bind(ctx=ctx, data_moving=img_shape, data_fixed=img_shape, 329 | # label_shapes=None, grad_req='null') 330 | df = np.empty((1, 1, np.shape(data_fixed)[0], np.shape(data_fixed)[1])) 331 | dm = np.empty((1, 1, np.shape(data_fixed)[0], np.shape(data_fixed)[1])) 332 | df[0][0] = data_fixed 333 | dm[0][0] = data_moving 334 | executor.forward(is_train=False, data_fixed=df, data_moving=dm) 335 | stnet = executor.outputs[1] 336 | fc3 = executor.outputs[2] 337 | hlp.saveArrayAsImg(stnet.asnumpy()[0][0], outpath) 338 | print("Applied affine transformation with parameters Theta: " + str(fc3)) 339 | 340 | 341 | def save_params(symbol, executor, path='./dirnet_params.json'): 342 | 343 | # get argument arrays 344 | arg_arrays = executor.arg_arrays 345 | # get aux_states arrays. Note: currently only BatchNorm symbol has auxiliary states, which is moving_mean and moving_var 346 | aux_arrays = executor.aux_arrays 347 | # The sequence of arrays is in same sequence of symbol arguments 348 | args = dict(zip(symbol.list_arguments(), arg_arrays)) # dict containing parameter names and values 349 | aux_states = dict(zip(symbol.list_auxiliary_states(), aux_arrays)) 350 | save_dict = {('arg:%s' % k) : v.as_in_context(mx.cpu()) for k, v in args.items()} 351 | save_dict.update({('aux:%s' % k) : v.as_in_context(mx.cpu()) for k, v in aux_states.items()}) 352 | mx.nd.save(os.path.abspath(path), save_dict) 353 | #mx.nd.save(path, [arg_arrays, aux_arrays]) 354 | 355 | 356 | def load_params_to_exec(symbol, shape, ctx=mx.gpu(), path='./dirnet_params.json'): 357 | save_dict = mx.nd.load(os.path.abspath(path)) 358 | arg_params = {} 359 | aux_params = {} 360 | for k, v in save_dict.items(): 361 | tp, name = k.split(':', 1) 362 | if tp == 'arg': 363 | arg_params[name] = v 364 | if tp == 'aux': 365 | aux_params[name] = v 366 | executor = symbol.simple_bind(ctx=ctx, data_moving=(1, 1, shape[0], shape[1]), 367 | data_fixed=(1, 1, shape[0], shape[1]), 368 | label_shapes=None, grad_req='null') 369 | executor.copy_params_from(arg_params=arg_params, aux_params=aux_params) 370 | return executor 371 | 372 | 373 | def predict(executor, iterator): 374 | avg_cor = 0 375 | i = 0 376 | fixed_img_data = iterator.next().data 377 | #hlp.printNumpyArray(fixed_img_data[0][0][0], thresh=0) 378 | for batch in iterator: 379 | i += 1 380 | executor.forward(is_train=True, data_fixed=fixed_img_data[0], data_moving=batch.data[0]) 381 | cor1 = executor.outputs[0] 382 | stnet = executor.outputs[1] 383 | loss = executor.outputs[3] 384 | fc3 = executor.outputs[2] 385 | aval = loss.asnumpy()[0] 386 | avg_cor += float(aval) 387 | print("Affine transformation parameters Theta: " + str(fc3)) 388 | 389 | 390 | def train_cardio_wrapper(): 391 | cardio_shape = (222, 247) 392 | epochs = 1 393 | if len(sys.argv) == 5: 394 | if sys.argv[1] == 'gpu': 395 | ctx = mx.gpu() 396 | elif sys.argv[1] == 'cpu': 397 | ctx = mx.cpu() 398 | else: 399 | print('first argument has to be gpu or cpu') 400 | epochs = int(sys.argv[2]) 401 | path_ed = sys.argv[3] 402 | path_es = sys.argv[4] 403 | else: 404 | path_ed = '/home/adrian/Documents/dl2/Cardiac/ED' 405 | path_es = '/home/adrian/Documents/dl2/Cardiac/ES' 406 | ctx = mx.cpu() 407 | # mnist_shape = (1, 1, 28, 28) 408 | # net = get_symbol(mnist_shape) 409 | # mnist = get_mnist(mnistdir='./data/') # or use mnist = mx.test_utils.get_mnist() to download 410 | # iterator = hlp.get_mnist_data_iterator(mnistdir='./data/', digit=0) 411 | # trained_exec = custom_training_simple_bind(symbol=net, epochs=1, ctx=ctx, iterators=iterator) 412 | # iter = RIter.RegistrationIter(ES_path='/home/adrian/Documents/dl2/Cardiac/ES', 413 | # ED_path='/home/adrian/Documents/dl2/Cardiac/ED', shape=cardio_shape) 414 | # iter.reset() 415 | # for batch_fixed in iter: 416 | # # print(batch_fixed) 417 | # # print(str(np.shape(batch_fixed.data))) 418 | # print(str(np.shape(batch_fixed.data[0]))) 419 | net = conv_net_regressor(shape=(1, 1, cardio_shape[0], cardio_shape[1]), use_additional_pool=True) 420 | # hlp.create_imglist('/home/adrian/Documents/dl2/Cardiac/ES') 421 | # hlp.create_imglist('/home/adrian/Documents/dl2/Cardiac/ED') 422 | # The following does not work with greyscale images... 423 | # iterator_ES = mx.image.ImageIter(batch_size=1, data_shape=cardio_shape, 424 | # path_root='/home/adrian/Documents/dl2/Cardiac/ES', 425 | # path_imglist='/home/adrian/Documents/dl2/Cardiac/ES/imglist.txt') 426 | # iterator_ED = mx.image.ImageIter(batch_size=1, data_shape=cardio_shape, 427 | # path_root='/home/adrian/Documents/dl2/Cardiac/ED', 428 | # path_imglist='/home/adrian/Documents/dl2/Cardiac/ES/imglist.txt') 429 | 430 | data = hlp.read_cardio_dirs_to_ndarray(path_moving=path_es, path_fixed=path_ed, shape=cardio_shape) 431 | trained_exec = cardiac_training(symbol=net, img_shape=(1, 1, cardio_shape[0], cardio_shape[1]), 432 | epochs=epochs, ctx=ctx, data=data) 433 | save_params(executor=trained_exec, symbol=net) 434 | # loaded_exec = load_params_to_exec(net, ctx=ctx) 435 | 436 | 437 | if __name__ == '__main__': 438 | # Set variables 439 | cardio_shape = (222, 247) 440 | shape_for_net = (1, 1, cardio_shape[0], cardio_shape[1]) 441 | if len(sys.argv) == 4: 442 | if sys.argv[1] == 'gpu': 443 | ctx = mx.gpu() 444 | elif sys.argv[1] == 'cpu': 445 | ctx = mx.cpu() 446 | else: 447 | print('first argument has to be gpu or cpu') 448 | epochs = int(sys.argv[2]) 449 | path_ed = sys.argv[3] 450 | path_es = sys.argv[4] 451 | else: 452 | path_ed = '/home/adrian/Documents/dl2/Cardiac/ED_rescaled' 453 | path_es = '/home/adrian/Documents/dl2/Cardiac/ES_rescaled' 454 | ctx = mx.cpu() 455 | net = conv_net_regressor(shape=shape_for_net, use_additional_pool=True) 456 | outdir = '/home/adrian/Documents/dl2/Cardiac/ES_registered' 457 | param_path = '/home/adrian/PycharmProjects/DIRNet/dirnet_params.json' 458 | # Go through data 459 | onlyfiles_fixed = [f for f in listdir(path_ed) if isfile(join(path_ed, f))] 460 | onlyfiles_moving = [f for f in listdir(path_es) if isfile(join(path_es, f))] 461 | 462 | executor = load_params_to_exec(symbol=net, shape=shape_for_net, ctx=ctx, path=param_path) 463 | # out_fix = np.empty(shape=(shape[1], shape[2])) 464 | # out_mov = np.empty(shape=(shape[1], shape[2])) 465 | arrays_fix = [] 466 | arrays_mov = [] 467 | for i, fixed in enumerate(onlyfiles_fixed): 468 | if fixed.endswith('.png'): 469 | moving = hlp.find_moving_img(onlyfiles_moving, i, fixed) 470 | assert moving is not None 471 | 472 | abspath = join(path_ed, fixed) 473 | pic_fix = ndimage.imread(abspath, flatten=True) 474 | abspath = join(path_es, moving) 475 | pic_mov = ndimage.imread(abspath, flatten=True) 476 | cardiac_predict(symbol=net, data_fixed=pic_fix, data_moving=pic_mov, outpath=join(outdir, moving), ctx=ctx) -------------------------------------------------------------------------------- /DIRNet-mxnet/custom_loss.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | logging.getLogger().setLevel(logging.DEBUG) 4 | import mxnet as mx 5 | import numpy as np 6 | import gzip 7 | import struct 8 | 9 | 10 | def get_mnist(mnistdir='./data/'): 11 | def read_data(label_url, image_url): 12 | with gzip.open(label_url) as flbl: 13 | struct.unpack(">II", flbl.read(8)) 14 | label = np.fromstring(flbl.read(), dtype=np.int8) 15 | with gzip.open(image_url, 'rb') as fimg: 16 | _, _, rows, cols = struct.unpack(">IIII", fimg.read(16)) 17 | image = np.fromstring(fimg.read(), dtype=np.uint8).reshape(len(label), rows, cols) 18 | image = image.reshape(image.shape[0], 1, 28, 28).astype(np.float32) / 255 19 | return label, image 20 | 21 | (train_lbl, train_img) = read_data( 22 | mnistdir + 'train-labels-idx1-ubyte.gz', mnistdir + 'train-images-idx3-ubyte.gz') 23 | (test_lbl, test_img) = read_data( 24 | mnistdir + 't10k-labels-idx1-ubyte.gz', mnistdir + 't10k-images-idx3-ubyte.gz') 25 | return {'train_data': train_img, 'train_label': train_lbl, 26 | 'test_data': test_img, 'test_label': test_lbl} 27 | 28 | 29 | mnist = get_mnist() 30 | 31 | batch_size = 100 32 | weighted_train_labels = np.zeros((mnist['train_label'].shape[0], np.max(mnist['train_label']) + 1)) 33 | weighted_train_labels[np.arange(mnist['train_label'].shape[0]), mnist['train_label']] = 1 34 | train_iter = mx.io.NDArrayIter(mnist['train_data'], {'label': weighted_train_labels}, batch_size, shuffle=True) 35 | 36 | weighted_test_labels = np.zeros((mnist['test_label'].shape[0], np.max(mnist['test_label']) + 1)) 37 | weighted_test_labels[np.arange(mnist['test_label'].shape[0]), mnist['test_label']] = 1 38 | val_iter = mx.io.NDArrayIter(mnist['test_data'], {'label': weighted_test_labels}, batch_size) 39 | 40 | data = mx.sym.var('data') 41 | # first conv layer 42 | conv1 = mx.sym.Convolution(data=data, kernel=(5, 5), num_filter=20) 43 | tanh1 = mx.sym.Activation(data=conv1, act_type="tanh") 44 | pool1 = mx.sym.Pooling(data=tanh1, pool_type="max", kernel=(2, 2), stride=(2, 2)) 45 | # second conv layer 46 | conv2 = mx.sym.Convolution(data=pool1, kernel=(5, 5), num_filter=50) 47 | tanh2 = mx.sym.Activation(data=conv2, act_type="tanh") 48 | pool2 = mx.sym.Pooling(data=tanh2, pool_type="max", kernel=(2, 2), stride=(2, 2)) 49 | # first fullc layer 50 | flatten = mx.sym.flatten(data=pool2) 51 | fc1 = mx.symbol.FullyConnected(data=flatten, num_hidden=500) 52 | tanh3 = mx.sym.Activation(data=fc1, act_type="tanh") 53 | # second fullc 54 | fc2 = mx.sym.FullyConnected(data=tanh3, num_hidden=10) 55 | # softmax loss 56 | # lenet = mx.sym.SoftmaxOutput(data=fc2, name='softmax') 57 | 58 | label = mx.sym.var('label') 59 | softmax = mx.sym.log_softmax(data=fc2) 60 | softmax_output = mx.sym.BlockGrad(data=softmax, name='softmax') 61 | ce = -mx.sym.sum(mx.sym.sum(mx.sym.broadcast_mul(softmax, label), 1)) 62 | lenet = mx.symbol.MakeLoss(ce, normalization='batch') 63 | 64 | sym = mx.sym.Group([softmax_output, lenet]) 65 | print(sym.list_outputs) 66 | 67 | 68 | def custom_metric(label, softmax): 69 | return len(np.where(np.argmax(softmax, 1) == np.argmax(label, 1))[0]) / float(label.shape[0]) 70 | 71 | 72 | eval_metrics = mx.metric.CustomMetric(custom_metric, name='custom-accuracy', output_names=['softmax_output'], 73 | label_names=['label']) 74 | 75 | lenet_model = mx.mod.Module(symbol=sym, context=mx.cpu(), data_names=['data'], label_names=['label']) 76 | lenet_model.fit(train_iter, 77 | eval_data=val_iter, 78 | optimizer='sgd', 79 | optimizer_params={'learning_rate': 0.1}, 80 | eval_metric=mx.metric.Loss(),#'acc', 81 | # batch_end_callback = mx.callback.Speedometer(batch_size, 100), 82 | num_epoch=10) 83 | -------------------------------------------------------------------------------- /DIRNet-mxnet/evaluate.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import numpy as np 3 | from os import listdir, mkdir 4 | from os.path import isfile, join 5 | import scipy.ndimage as ndimage 6 | import numpy as np 7 | import helper as hlp 8 | import similarity as sim 9 | import scipy.misc as misc 10 | 11 | def rmse(x, y): 12 | ''' 13 | calculates the root mean squared error of two arrays 14 | :param x: 15 | :param y: 16 | :return: 17 | ''' 18 | error = np.subtract(x, y) 19 | squared = np.square(error) 20 | avg = np.average(squared) 21 | rooted = np.sqrt(avg) 22 | return rooted 23 | 24 | def ncc(x,y): 25 | 26 | mean_x = np.mean(a=x, axis=(0, 1), keepdims=True) 27 | mean_y = np.mean(a=y, axis=(0, 1), keepdims=True) 28 | mean_x2 = np.mean(np.square(x), (0, 1), keepdims=True) 29 | mean_y2 = np.mean(np.square(y), (0, 1), keepdims=True) 30 | stddev_x = np.sum(np.sqrt( 31 | mean_x2 - np.square(mean_x)), (0, 1), keepdims=True) 32 | stddev_y = np.sum(np.sqrt( 33 | mean_y2 - np.square(mean_y)), (0, 1), keepdims=True) 34 | top = np.subtract(x, mean_x) * (np.subtract(y, mean_y)) 35 | # return -np.mean(np.broadcast_div(top, np.broadcast_sub((stddev_x * stddev_y), 0.1))) 36 | return np.mean(np.divide((top), ((stddev_x * stddev_y)))) 37 | 38 | def difference(x,y): 39 | dif = np.subtract(x,y) 40 | abs = np.abs(dif) 41 | return abs 42 | 43 | if __name__ == '__main__': 44 | # path_fixed = '/home/adrian/Documents/dl2/Cardiac/ED' 45 | path_es = '/home/adrian/Documents/dl2/Cardiac/ES' 46 | path_es_reg = '/home/adrian/Documents/dl2/Cardiac/ES_rescaled' 47 | path_ed_scaled = '/home/adrian/Documents/dl2/Cardiac/ED_rescaled' 48 | path_diff = '/home/adrian/Documents/dl2/Cardiac/ED_ES_reg_diff' 49 | 50 | # path_to_root = './Registration/' 51 | # path_to_root = '/home/adrian/Documents/dl2/Cardiac/' 52 | # path_fixed = join(path_to_root, 'ED') 53 | # path_moving = join(path_to_root, 'ES') 54 | # out_fixed = join(path_to_root, 'ED_rescaled') 55 | # out_mov = join(path_to_root, 'ES_rescaled') 56 | # mkdir(out_fixed) 57 | # mkdir(out_mov) 58 | shape = (222, 247) 59 | sum_rmse = 0 60 | sum_ncc = 0 61 | sum_ssim = 0 62 | i = 0 63 | onlyfiles_fixed = [f for f in listdir(path_ed_scaled) if isfile(join(path_ed_scaled, f))] 64 | onlyfiles_moving = [f for f in listdir(path_es_reg) if isfile(join(path_es_reg, f))] 65 | for fixed in onlyfiles_fixed: 66 | if fixed.endswith('.png'): 67 | moving = hlp.find_moving_img(onlyfiles_moving, i, fixed) 68 | assert moving is not None # we have to search for the correct moving, cuz not same amnt of pics in ED, ES 69 | 70 | abspath = join(path_ed_scaled, fixed) 71 | pic_fix = ndimage.imread(abspath, flatten=True) 72 | abspath = join(path_es_reg, moving) 73 | pic_mov = ndimage.imread(abspath, flatten=True) 74 | i += 1 75 | sum_rmse += rmse(pic_fix, pic_mov) 76 | nvv = ncc(pic_fix, pic_mov) 77 | sum_ncc += nvv 78 | #misc.imsave(arr=difference(pic_fix, pic_mov), name=join(path_diff, fixed)) 79 | # sum_ssim += sim.MultiScaleSSIM(pic_fix, pic_mov) 80 | print('Average RMSE: ' + str(sum_rmse / i)) 81 | print('Average NCC: ' + str(sum_ncc / i)) 82 | print('Average SSIM: ' + str(sum_ssim / i)) -------------------------------------------------------------------------------- /DIRNet-mxnet/helper.py: -------------------------------------------------------------------------------- 1 | 2 | import logging 3 | 4 | from requests.api import post 5 | 6 | logging.getLogger().setLevel(logging.DEBUG) 7 | import mxnet as mx 8 | import numpy as np 9 | from PIL import Image 10 | import scipy.misc 11 | from os import listdir 12 | from os.path import isfile, join 13 | import gzip 14 | import struct 15 | from mxnet import nd, autograd 16 | from os import listdir 17 | from os.path import isfile, join 18 | import scipy.ndimage as ndimage 19 | import scipy.misc as misc 20 | 21 | 22 | 23 | def get_mnist(mnistdir='../data/'): 24 | def read_data(label_url, image_url): 25 | with gzip.open(label_url) as flbl: 26 | struct.unpack(">II", flbl.read(8)) 27 | label = np.fromstring(flbl.read(), dtype=np.int8) 28 | with gzip.open(image_url, 'rb') as fimg: 29 | _, _, rows, cols = struct.unpack(">IIII", fimg.read(16)) 30 | image = np.fromstring(fimg.read(), dtype=np.uint8).reshape(len(label), rows, cols) 31 | image = image.reshape(image.shape[0], 1, 28, 28).astype(np.float32) / 255 32 | return label, image 33 | 34 | (train_lbl, train_img) = read_data( 35 | mnistdir + 'train-labels-idx1-ubyte.gz', mnistdir + 'train-images-idx3-ubyte.gz') 36 | (test_lbl, test_img) = read_data( 37 | mnistdir + 't10k-labels-idx1-ubyte.gz', mnistdir + 't10k-images-idx3-ubyte.gz') 38 | return {'train_data': train_img, 'train_label': train_lbl, 39 | 'test_data': test_img, 'test_label': test_lbl} 40 | 41 | 42 | def get_mnist_data_iterator_w_labels(mnistdir='../data/', digit=1): 43 | def get_iterator_single_digit(data, label): 44 | one_digit_indices = [] # Contains all indices with images depicting the digit 45 | for index in range(len(label)): # There might be a faster way to do this 46 | if label[index] == digit: 47 | one_digit_indices.append(index) 48 | one_digit_data = data[one_digit_indices] 49 | one_digit_label = label[one_digit_indices] 50 | fixed_image = one_digit_data[np.random.randint(0, len(one_digit_label))] 51 | one_digit_fixed_image = [] # array of same length as above data array, but its the same img multiple times 52 | for _ in one_digit_data: 53 | one_digit_fixed_image.append(fixed_image) 54 | 55 | iterator = mx.io.NDArrayIter([one_digit_fixed_image, one_digit_data], 56 | [one_digit_label, one_digit_label], 57 | batch_size=1, shuffle=True) 58 | return iterator 59 | 60 | mnist = get_mnist(mnistdir) 61 | train_iter = get_iterator_single_digit(mnist['train_data'], mnist['train_label']) 62 | val_iter = get_iterator_single_digit(mnist['test_data'], mnist['test_label']) 63 | return train_iter, val_iter 64 | 65 | 66 | def get_mnist_data_iterator_two_data_sources(mnistdir='../data/', digit=1): 67 | def get_iterator_single_digit(data, label): 68 | one_digit_indices = [] # Contains all indices with images depicting the digit 69 | for index in range(len(label)): # There might be a faster way to do this 70 | if label[index] == digit: 71 | one_digit_indices.append(index) 72 | one_digit_data = data[one_digit_indices] 73 | one_digit_label = label[one_digit_indices] 74 | fixed_image = one_digit_data[np.random.randint(0, len(one_digit_label))] 75 | one_digit_fixed_image = [] # array of same length as above data array, but its the same img multiple times 76 | for _ in one_digit_data: 77 | one_digit_fixed_image.append(fixed_image) 78 | data = {'data_fixed': one_digit_fixed_image, 'data_moving': one_digit_data} 79 | iterator = mx.io.NDArrayIter(data, batch_size=1, shuffle=True) 80 | return iterator 81 | 82 | mnist = get_mnist(mnistdir) 83 | train_iter = get_iterator_single_digit(mnist['train_data'], mnist['train_label']) 84 | val_iter = get_iterator_single_digit(mnist['test_data'], mnist['test_label']) 85 | return train_iter, val_iter 86 | 87 | 88 | def get_mnist_data_iterator(mnistdir='./data/', digit=1): 89 | def get_iterator_single_digit(data, label): 90 | one_digit_indices = [] # Contains all indices with images depicting the digit 91 | for index in range(90): # There might be a faster way to do this 92 | #for index in range(len(label)): # There might be a faster way to do this 93 | if label[index] == digit: 94 | one_digit_indices.append(index) 95 | one_digit_data = data[one_digit_indices] 96 | #one_digit_label = label[one_digit_indices] 97 | # fixed_image = one_digit_data[np.random.randint(0, len(one_digit_label))] 98 | #data = {'data_fixed': one_digit_fixed_image, 'data_moving': one_digit_data} 99 | iterator = mx.io.NDArrayIter(one_digit_data, batch_size=1, shuffle=True) 100 | return iterator 101 | 102 | mnist = get_mnist(mnistdir) 103 | train_iter = get_iterator_single_digit(mnist['train_data'], mnist['train_label']) 104 | val_iter = get_iterator_single_digit(mnist['test_data'], mnist['test_label']) 105 | return train_iter, val_iter 106 | 107 | 108 | def find_moving_img(arr, start_idx, fixed_name): 109 | # patient035_frame01.nz.10.png 110 | patient_id = fixed_name[7:10] 111 | slice_id = fixed_name[22:24] 112 | for i in range(len(arr)): 113 | idx = (i + start_idx) % len(arr) # iterate through the whole array but dont start at 0 114 | moving_name = arr[idx] 115 | if patient_id == moving_name[7:10] and slice_id == moving_name[22:24]: 116 | return moving_name 117 | return None 118 | 119 | 120 | def read_cardio_dirs_to_ndarray(path_fixed, path_moving, shape): 121 | ''' 122 | Reads the fixed (ED) cardio images, looks for a corresponding moving image (ES) by name (same patient, same slice) 123 | and returns an array containing them 124 | :param path_fixed: path to ED 125 | :param path_moving: path to ES 126 | :param shape: target shape, will rescale all images to the same size 127 | :return: an array of shape (amount_pairs, 2, shape[0], shape[1]) fixed is in [idx][0] and mov in [idx][1] 128 | ''' 129 | 130 | 131 | onlyfiles_fixed = [f for f in listdir(path_fixed) if isfile(join(path_fixed, f))] 132 | onlyfiles_moving = [f for f in listdir(path_moving) if isfile(join(path_moving, f))] 133 | # out_fix = np.empty(shape=(shape[1], shape[2])) 134 | # out_mov = np.empty(shape=(shape[1], shape[2])) 135 | arrays_fix = [] 136 | arrays_mov = [] 137 | for i, fixed in enumerate(onlyfiles_fixed): 138 | if fixed.endswith('.png'): 139 | moving = find_moving_img(onlyfiles_moving, i, fixed) 140 | assert moving is not None 141 | 142 | abspath = join(path_fixed, fixed) 143 | pic_fix = ndimage.imread(abspath, flatten=True) 144 | pic_fix = misc.imresize(pic_fix, (shape[0], shape[1])) 145 | 146 | abspath = join(path_moving, moving) 147 | pic_mov = ndimage.imread(abspath, flatten=True) 148 | pic_mov = misc.imresize(pic_mov, (shape[0], shape[1])) 149 | arrays_fix.append(np.stack([pic_fix, pic_mov])) 150 | #arrays_mov.append(pic_mov) 151 | return arrays_fix 152 | 153 | 154 | def ncc(x, y): 155 | # mean_x = tf.reduce_mean(x, [1, 2, 3], keep_dims=True) 156 | # mean_y = tf.reduce_mean(y, [1, 2, 3], keep_dims=True) 157 | # mean_x2 = tf.reduce_mean(tf.square(x), [1, 2, 3], keep_dims=True) 158 | # mean_y2 = tf.reduce_mean(tf.square(y), [1, 2, 3], keep_dims=True) 159 | # stddev_x = tf.reduce_sum(tf.sqrt( 160 | # mean_x2 - tf.square(mean_x)), [1, 2, 3], keep_dims=True) 161 | # stddev_y = tf.reduce_sum(tf.sqrt( 162 | # mean_y2 - tf.square(mean_y)), [1, 2, 3], keep_dims=True) 163 | # return tf.reduce_mean((x - mean_x) * (y - mean_y) / (stddev_x * stddev_y)) 164 | mean_x = mx.symbol.mean(data=x, axis=(1, 2, 3), keepdims=True) 165 | mean_y = mx.symbol.mean(data=y, axis=(1, 2, 3), keepdims=True) 166 | mean_x2 = mx.symbol.mean(mx.symbol.square(x), (1, 2, 3), keepdims=True) 167 | mean_y2 = mx.symbol.mean(mx.symbol.square(y), (1, 2, 3), keepdims=True) 168 | stddev_x = mx.symbol.sum(mx.symbol.sqrt( 169 | mean_x2 - mx.symbol.square(mean_x)), (1, 2, 3), keepdims=True) 170 | stddev_y = mx.symbol.sum(mx.symbol.sqrt( 171 | mean_y2 - mx.symbol.square(mean_y)), (1, 2, 3), keepdims=True) 172 | top = mx.symbol.broadcast_sub(x, mean_x) * (mx.symbol.broadcast_sub(y, mean_y)) 173 | # return -mx.symbol.mean(mx.symbol.broadcast_div(top, mx.symbol.broadcast_sub((stddev_x * stddev_y), 0.1))) 174 | return -mx.symbol.mean(mx.symbol.broadcast_div((top + 0.1), ((stddev_x * stddev_y) + 0.1))) 175 | 176 | 177 | def rmse(x, y): 178 | error = mx.symbol.broadcast_sub(x, y) 179 | squared = mx.symbol.square(error) 180 | avg = mx.symbol.mean(squared) 181 | rooted = mx.symbol.sqrt(avg) 182 | return rooted 183 | 184 | 185 | def printNumpyArray(a, thresh=0.5): 186 | for i in range(len(a)): 187 | linestr = '' 188 | for k in range(len(a[0])): 189 | if a[i][k] > thresh: 190 | linestr += 'X' 191 | else: 192 | linestr += '_' 193 | print(linestr) 194 | 195 | 196 | def saveArrayAsImg(array, filepath='./imgfromarray.png'): 197 | #print(type(array)) 198 | #print(np.shape(array)) 199 | #im = Image.fromarray(array) 200 | #im.save(filepath) 201 | scipy.misc.imsave(arr=array, name=filepath) 202 | 203 | def printNontZeroGradients(grads, thresh=0): 204 | print("Gradient arrays that contain non-zero values:") 205 | for key in grads.keys(): 206 | allZero = True 207 | for v in np.nditer(grads[key].asnumpy()): 208 | if v > thresh: 209 | allZero = False 210 | break 211 | if not allZero: 212 | print('\t ' + key) 213 | 214 | def printNaNGradients(grads, thresh=0): 215 | print("Gradient arrays that contain non-zero values:") 216 | for key in grads.keys(): 217 | hasNan = False 218 | for v in np.nditer(grads[key].asnumpy()): 219 | if np.isnan(v): 220 | hasNan = True 221 | break 222 | if hasNan: 223 | print('\t ' + key + ' has NaN values!') 224 | 225 | 226 | def create_imglist(root_path, pathout=''): 227 | onlyfiles = [f for f in listdir(root_path) if isfile(join(root_path, f))] 228 | str_out = '' 229 | i = 0 230 | for filename in onlyfiles: 231 | # Format: Tab separated record of index, one or more labels and relative_path_from_root. 232 | i += 1 233 | if filename.endswith('.png'): 234 | str_out += str(i) + '\t1\t/\n' 235 | if pathout == '': 236 | pathout = root_path+'/imglist.txt' 237 | with open(pathout, 'w') as f: 238 | f.write(str_out) 239 | 240 | 241 | def pure_batch_norm(X, gamma, beta, eps = 2e-5): 242 | if len(X.shape) not in (2, 4): 243 | raise ValueError('only supports dense or 2dconv') 244 | 245 | # dense 246 | if len(X.shape) == 2: 247 | # mini-batch mean 248 | mean = nd.mean(X, axis=0) 249 | # mini-batch variance 250 | variance = nd.mean((X - mean) ** 2, axis=0) 251 | # normalize 252 | X_hat = (X - mean) * 1.0 / nd.sqrt(variance + eps) 253 | # scale and shift 254 | out = gamma * X_hat + beta 255 | 256 | # 2d conv 257 | elif len(X.shape) == 4: 258 | # extract the dimensions 259 | N, C, H, W = X.shape 260 | # mini-batch mean 261 | mean = nd.mean(X, axis=(0, 2, 3)) 262 | # mini-batch variance 263 | variance = nd.mean((X - mean.reshape((1, C, 1, 1))) ** 2, axis=(0, 2, 3)) 264 | # normalize 265 | X_hat = (X - mean.reshape((1, C, 1, 1))) * 1.0 / nd.sqrt(variance.reshape((1, C, 1, 1)) + eps) 266 | # scale and shift 267 | out = gamma.reshape((1, C, 1, 1)) * X_hat + beta.reshape((1, C, 1, 1)) 268 | 269 | return out -------------------------------------------------------------------------------- /DIRNet-mxnet/playground.py: -------------------------------------------------------------------------------- 1 | 2 | import logging 3 | import numpy as np 4 | import gzip 5 | import struct 6 | import helper as hlp 7 | 8 | logging.getLogger().setLevel(logging.DEBUG) 9 | import mxnet as mx 10 | 11 | 12 | def get_mnist(mnistdir='./data/'): 13 | def read_data(label_url, image_url): 14 | with gzip.open(label_url) as flbl: 15 | struct.unpack(">II", flbl.read(8)) 16 | label = np.fromstring(flbl.read(), dtype=np.int8) 17 | with gzip.open(image_url, 'rb') as fimg: 18 | _, _, rows, cols = struct.unpack(">IIII", fimg.read(16)) 19 | image = np.fromstring(fimg.read(), dtype=np.uint8).reshape(len(label), rows, cols) 20 | image = image.reshape(image.shape[0], 1, 28, 28).astype(np.float32) / 255 21 | return label, image 22 | 23 | (train_lbl, train_img) = read_data( 24 | mnistdir + 'train-labels-idx1-ubyte.gz', mnistdir + 'train-images-idx3-ubyte.gz') 25 | (test_lbl, test_img) = read_data( 26 | mnistdir + 't10k-labels-idx1-ubyte.gz', mnistdir + 't10k-images-idx3-ubyte.gz') 27 | return {'train_data': train_img, 'train_label': train_lbl, 28 | 'test_data': test_img, 'test_label': test_lbl} 29 | 30 | 31 | def get_mnist_data_iterator_w_labels(mnistdir='./data/', digit=1): 32 | def get_iterator_single_digit(data, label): 33 | one_digit_indices = [] # Contains all indices with images depicting the digit 34 | for index in range(len(label)): # There might be a faster way to do this 35 | if label[index] == digit: 36 | one_digit_indices.append(index) 37 | one_digit_data = data[one_digit_indices] 38 | one_digit_label = label[one_digit_indices] 39 | fixed_image = one_digit_data[np.random.randint(0, len(one_digit_label))] 40 | one_digit_fixed_image = [] # array of same length as above data array, but its the same img multiple times 41 | for _ in one_digit_data: 42 | one_digit_fixed_image.append(fixed_image) 43 | 44 | iterator = mx.io.NDArrayIter([one_digit_fixed_image, one_digit_data], 45 | [one_digit_label, one_digit_label], 46 | batch_size=1, shuffle=True) 47 | return iterator 48 | 49 | mnist = get_mnist(mnistdir) 50 | train_iter = get_iterator_single_digit(mnist['train_data'], mnist['train_label']) 51 | val_iter = get_iterator_single_digit(mnist['test_data'], mnist['test_label']) 52 | return train_iter, val_iter 53 | 54 | 55 | def get_mnist_data_iterator(mnistdir='./data/', digit=1): 56 | def get_iterator_single_digit(data, label): 57 | one_digit_indices = [] # Contains all indices with images depicting the digit 58 | for index in range(len(label)): # There might be a faster way to do this 59 | if label[index] == digit: 60 | one_digit_indices.append(index) 61 | one_digit_data = data[one_digit_indices] 62 | one_digit_label = label[one_digit_indices] 63 | fixed_image = one_digit_data[np.random.randint(0, len(one_digit_label))] 64 | one_digit_fixed_image = [] # array of same length as above data array, but its the same img multiple times 65 | for _ in one_digit_data: 66 | one_digit_fixed_image.append(fixed_image) 67 | data = {'data_fixed': one_digit_fixed_image, 'data_moving': one_digit_data} 68 | iterator = hlp.get_mnist_data_iterator(mnistdir='./data/', digit=1) 69 | return iterator 70 | 71 | mnist = get_mnist(mnistdir) 72 | train_iter = get_iterator_single_digit(mnist['train_data'], mnist['train_label']) 73 | val_iter = get_iterator_single_digit(mnist['test_data'], mnist['test_label']) 74 | return train_iter, val_iter 75 | 76 | 77 | def printNumpyArray(a, thresh=0.5): 78 | for i in range(len(a)): 79 | linestr = '' 80 | for k in range(len(a[0])): 81 | if a[i][k] > thresh: 82 | linestr += 'X' 83 | else: 84 | linestr += '_' 85 | print(linestr) 86 | 87 | # helper function 88 | def Init(key, arr): 89 | if "fc2_bias" in key: 90 | # initialize with identity transformation 91 | initial = np.array([[1., 1, 0], [1, 1., 0]]) 92 | initial = np.array([[ 0.91179425, 0.88957721, -0.90980798, 0.95847398, 0.24638432, 0.95731395]]) 93 | initial = initial.astype('float32').flatten() 94 | arr[:] = initial 95 | elif "weight" in key: 96 | arr[:] = mx.random.uniform(-0.07, 0.07, arr.shape) 97 | # or 98 | # arr[:] = np.random.uniform(-0.07, 0.07, arr.shape) 99 | elif "gamma" in key: 100 | # for batch norm slope 101 | arr[:] = 1.0 102 | elif "bias" in key: 103 | arr[:] = 0 104 | elif "beta" in key: 105 | # for batch norm bias 106 | arr[:] = 0 107 | 108 | 109 | data1 = mx.symbol.Variable('data1') 110 | data2 = mx.symbol.Variable('data2') 111 | net = mx.symbol.concat(data1, data2) 112 | fc1 = mx.symbol.FullyConnected(data=net, name='fc1', num_hidden=128) 113 | net = mx.symbol.Activation(data=fc1, name='relu1', act_type="relu") 114 | net = mx.symbol.FullyConnected(data=net, name='fc2', num_hidden=6) 115 | stnet = mx.sym.SpatialTransformer(data=data1, loc=net, target_shape=(60, 60), transform_type='affine', 116 | sampler_type="bilinear", name='SpatialTransformer') 117 | #cor = mx.sym.Correlation(data1=data1, data2=stnet, kernel_size=28, stride1=2, stride2=2, pad_size=0, max_displacement=0) 118 | #loss = mx.sym.MakeLoss(cor, normalization='batch') 119 | # group fc1 and out together 120 | group = mx.symbol.Group([mx.sym.BlockGrad(net), mx.sym.BlockGrad(stnet)]) 121 | #print group.list_outputs() 122 | 123 | 124 | 125 | 126 | executor = group.simple_bind(ctx=mx.cpu(), data1=(1, 1, 28, 28), data2=(1, 1, 28, 28), label_shapes=None) 127 | # get argument arrays 128 | arg_arrays = executor.arg_arrays 129 | # get grad arrays 130 | grad_arrays = executor.grad_arrays 131 | # get aux_states arrays. Note: currently only BatchNorm symbol has auxiliary states, which is moving_mean and moving_var 132 | aux_arrays = executor.aux_arrays 133 | # get outputs from executor 134 | output_arrays = executor.outputs 135 | # The sequence of arrays is in same sequence of symbol arguments 136 | args = dict(zip(group.list_arguments(), arg_arrays)) # dict containing parameter names and values (i think) 137 | grads = dict(zip(group.list_arguments(), grad_arrays)) 138 | outputs = dict(zip(group.list_outputs(), output_arrays)) 139 | aux_states = dict(zip(group.list_auxiliary_states(), aux_arrays)) 140 | 141 | # initialize parameters by uniform random numbers 142 | for key, arr in args.items(): 143 | Init(key, arr) 144 | keys = group.list_arguments() 145 | train_iter = hlp.get_mnist_data_iterator()[0] 146 | for epoch in range(5): 147 | train_iter.reset() 148 | fixed_img_data = train_iter.next().data 149 | for batch in train_iter: 150 | outs = executor.forward(is_train=True, data1=fixed_img_data[0], data2=batch.data[0]) 151 | cor1 = executor.outputs[0] 152 | theta = executor.outputs[0] 153 | transformed = executor.outputs[1] 154 | transformed = transformed[0][0][:][:] 155 | printNumpyArray(batch.data[0][0][0]) 156 | print('-------------------') 157 | printNumpyArray(transformed) 158 | loss = executor.outputs[3] 159 | executor.backward() # compute gradients 160 | #for key in keys: # update parameters 161 | # customSGD(key, args[key], grads[key]) 162 | #print('Epoch %d, Training cor %s grad %s' % (epoch, cor1, grad1)) 163 | -------------------------------------------------------------------------------- /DIRNet-mxnet/playground2.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Implementation of the following paper: 3 | https://arxiv.org/abs/1704.06065 4 | ''' 5 | import logging 6 | 7 | from requests.api import post 8 | 9 | logging.getLogger().setLevel(logging.DEBUG) 10 | import mxnet as mx 11 | import numpy as np 12 | import helper as hlp 13 | #import CustomNDArrayIter as customIter 14 | 15 | def conv_net_regressor(image_shape, bn_mom=0.9): 16 | # We have 2 data sources and concatenate them 17 | data_fixed = mx.sym.Variable(name='data') 18 | #data_moving = mx.sym.Variable(name='data_moving') 19 | #concat_data = mx.sym.concat(*[data_fixed, data_moving]) 20 | batched = mx.sym.BatchNorm(data=data_fixed, fix_gamma=True, eps=2e-5, momentum=bn_mom, name='bn_data') 21 | # The number of kernels per layer can be of arbitrary size, but the number of kernels of the output layer is 22 | # determined by the dimensionality of the input images 23 | filter_list = [16, 32, 64, 128] 24 | # four alternating layers of 3 × 3 convolutions with 0-padding and 2 × 2 downsampling layers 25 | for i in range(4): 26 | if i == 0: 27 | body = mx.sym.Convolution(data=batched, num_filter=filter_list[i], kernel=(3, 3), stride=(1, 1), pad=(0, 0), 28 | no_bias=True, name="conv" + str(i)) 29 | else: 30 | body = mx.sym.Convolution(data=body, num_filter=filter_list[i], kernel=(3, 3), stride=(1, 1), pad=(0, 0), 31 | no_bias=True, name="conv" + str(i)) 32 | if i == 3: 33 | body_out = body 34 | body = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-5, momentum=bn_mom, name='bn' + str(i)) 35 | if i == 3: 36 | body_out2 = body 37 | # TO DO: the original authors use exponential linear units as activation 38 | body = mx.sym.LeakyReLU(data=body, act_type='leaky', name='relu' + str(i)) 39 | body = mx.sym.Pooling(data=body, kernel=(2, 2), stride=(1, 1), pad=(1, 1), pool_type='avg') 40 | # Subsequently, three 1 × 1 convolutional layers are applied to make the ConvNet regressor fully convolutional 41 | #for k in range(3): 42 | i = i + 4 43 | a_conv = mx.sym.Convolution(data=body, num_filter=256, kernel=(1, 1), stride=(1, 1), pad=(0, 0), 44 | no_bias=True, name="conv" + str(i)) 45 | body = mx.sym.BatchNorm(data=a_conv, fix_gamma=False, eps=2e-5, momentum=bn_mom, name='bn' + str(i)) 46 | # TO DO: the original authors use exponential linear units as activation 47 | body = mx.sym.LeakyReLU(data=body, act_type='leaky', name='relu' + str(i)) 48 | # body = mx.sym.Pooling(data=body, kernel=(2, 2), stride=(2, 2), pad=(1, 1), pool_type='avg') 49 | 50 | flatten = mx.sym.flatten(data=body) 51 | fc3 = mx.sym.FullyConnected(data=flatten, num_hidden=20) 52 | net = mx.sym.SoftmaxOutput(data=fc3, name='softmax') 53 | output = mx.sym.Group([mx.sym.BlockGrad(body_out), mx.sym.BlockGrad(body_out2), net]) 54 | return output 55 | 56 | 57 | def get_symbol(image_shape): 58 | return conv_net_regressor(image_shape) 59 | 60 | 61 | def custom_training_simple_bind(symbol, iterators): 62 | ''' 63 | Our own training method for the network. using the low-level simple_bind API 64 | Many code snippets are from https://github.com/apache/incubator-mxnet/blob/5ff545f2345f9b607b81546a168665bd63d02d9f/example/notebooks/simple_bind.ipynb 65 | :param symbol: 66 | :param train_iter: 67 | :return: 68 | ''' 69 | 70 | # helper function 71 | def Init(key, arr): 72 | # if "fullyconnected0_bias" in key: 73 | # # initialize with identity transformation 74 | # initial = np.array([[1., 0, 0], [0, 1., 0]]) 75 | # initial = initial.astype('float32').flatten() 76 | # arr[:] = initial 77 | if "weight" in key: 78 | arr[:] = mx.random.uniform(-0.07, 0.07, arr.shape) 79 | # or 80 | # arr[:] = np.random.uniform(-0.07, 0.07, arr.shape) 81 | elif "gamma" in key: 82 | # for batch norm slope 83 | arr[:] = 1.0 84 | elif "bias" in key: 85 | arr[:] = 0 86 | elif "beta" in key: 87 | # for batch norm bias 88 | arr[:] = 0 89 | 90 | def customSGD(key, weight, grad, lr=0.01, grad_norm=1): 91 | # key is key for weight, we can customize update rule 92 | # weight is weight array 93 | # grad is grad array 94 | # lr is learning rate 95 | # grad_norm is scalar to norm gradient, usually it is batch_size 96 | norm = 1.0 / grad_norm 97 | # here we can bias' learning rate 2 times larger than weight 98 | if "weight" in key or "gamma" in key: 99 | weight[:] -= lr * (grad * norm) 100 | elif "bias" in key or "beta" in key: 101 | weight[:] -= 2.0 * lr * (grad * norm) 102 | else: 103 | pass 104 | 105 | def Accuracy(label, pred_prob): 106 | pred = np.argmax(pred_prob, axis=1) 107 | return np.sum(label == pred) * 1.0 / label.shape[0] 108 | 109 | executor = symbol.simple_bind(ctx=mx.cpu(), data=(1, 1, 28, 28), 110 | label_shapes=None, grad_req='write') 111 | 112 | # get argument arrays 113 | arg_arrays = executor.arg_arrays 114 | # get grad arrays 115 | grad_arrays = executor.grad_arrays 116 | # get aux_states arrays. Note: currently only BatchNorm symbol has auxiliary states, which is moving_mean and moving_var 117 | aux_arrays = executor.aux_arrays 118 | # get outputs from executor 119 | output_arrays = executor.outputs 120 | # The sequence of arrays is in same sequence of symbol arguments 121 | args = dict(zip(symbol.list_arguments(), arg_arrays)) # dict containing parameter names and values (i think) 122 | grads = dict(zip(symbol.list_arguments(), grad_arrays)) 123 | outputs = dict(zip(symbol.list_outputs(), output_arrays)) 124 | aux_states = dict(zip(symbol.list_auxiliary_states(), aux_arrays)) 125 | 126 | # initialize parameters by uniform random numbers 127 | for key, arr in args.items(): 128 | Init(key, arr) 129 | keys = symbol.list_arguments() 130 | train_iter = iterators 131 | pred_prob = mx.nd.zeros(executor.outputs[2].shape) 132 | # train 5 epochs, i.e. going over the data iter one pass 133 | for epoch in range(50): 134 | train_iter.reset() 135 | avg_cor = 0 136 | i = 0 137 | train_acc = 0. 138 | fc3 = None 139 | #fixed_img_data = train_iter.next().data 140 | for batch in train_iter: 141 | i += 1 142 | # printNumpyArray(batch.data[0][0][0]) 143 | label = batch.label[0] 144 | outs = executor.forward(is_train=True, data=batch.data[0], softmax_label=label) 145 | # pre_bn = executor.outputs[0] 146 | # post_bn = executor.outputs[1] 147 | # shape_pre = pre_bn.shape 148 | # shape_post = post_bn.shape 149 | # summed_pre = np.sum(pre_bn.asnumpy()) 150 | # summed_post = np.sum(post_bn.asnumpy()) 151 | # pure_bn = hlp.pure_batch_norm(X=pre_bn, gamma=args['bn3_gamma'], beta=args['bn3_beta']) 152 | # fc3 = executor.outputs[1] 153 | pred_prob[:] = executor.outputs[2] 154 | train_acc += Accuracy(label.asnumpy(), pred_prob.asnumpy()) 155 | # stnet = executor.outputs[1] 156 | # fc3 = executor.outputs[2] 157 | # # print("Affine transformation parameters Theta: " + str(fc3)) 158 | # loss = executor.outputs[3] 159 | 160 | executor.backward() # compute gradients 161 | if i%1800 == 0: 162 | print("batch " + str(i)) 163 | hlp.printNontZeroGradients(grads) 164 | #print(grads['conv3_weight']) 165 | #print(args['conv3_weight'][0]) 166 | for key in keys: # update parameters 167 | customSGD(key, args[key], grads[key]) 168 | # aval = cor1[0][0][0][0].asnumpy()[0] 169 | # avg_cor += float(aval) 170 | # print("Affine transformation parameters Theta: " + str(fc3)) 171 | print('Epoch %d, Training acc %s ' % (epoch, train_acc/i)) 172 | 173 | 174 | if __name__ == '__main__': 175 | mnist_shape = (1, 1, 28, 28) 176 | mnist = hlp.get_mnist(mnistdir='/home/adrian/PycharmProjects/DIRNet/data/') # or use mnist = mx.test_utils.get_mnist() to download 177 | #mnist = mx.test_utils.get_mnist() 178 | standard_iter = mx.io.NDArrayIter(mnist['train_data'], mnist['train_label'], 1, shuffle=True) 179 | batch_size = 1 180 | iterator = hlp.get_mnist_data_iterator(mnistdir='/home/adrian/PycharmProjects/DIRNet/data/', digit=1) 181 | net = get_symbol(mnist_shape) 182 | custom_training_simple_bind(net, standard_iter) 183 | -------------------------------------------------------------------------------- /DIRNet-mxnet/requirements.txt: -------------------------------------------------------------------------------- 1 | tensorflow 2 | mxnet 3 | numpy -------------------------------------------------------------------------------- /DIRNet-mxnet/similarity.py: -------------------------------------------------------------------------------- 1 | from skimage.measure import compare_ssim as ssim 2 | import numpy as np 3 | from scipy import signal 4 | import cv2 5 | from scipy.ndimage.filters import convolve 6 | 7 | # #opencv is only used for loading right now 8 | # image1 = cv2.imread(image1..) 9 | # image2 = cv2.imread(image2...) 10 | # #call to MultiScaleSSIM needs to have a batch dimension 11 | # image1_1=np.expand_dims(image1,axis=0) 12 | # image2_1=np.expand_dims(image2,axis=0) 13 | # #mse and ssim need to get grayscale images 14 | # image1=cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY) 15 | # image2=cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY) 16 | 17 | 18 | def mse(imageA, imageB): 19 | # the 'Mean Squared Error' between the two images is the 20 | # sum of the squared difference between the two images; 21 | # NOTE: the two images must have the same dimension 22 | err = np.sum((imageA.astype("float") - imageB.astype("float")) ** 2) 23 | err /= float(imageA.shape[0] * imageA.shape[1]) 24 | 25 | # return the MSE, the lower the error, the more "similar" 26 | # the two images are 27 | return err/255 28 | 29 | def MultiScaleSSIM(img1, img2, max_val=255, filter_size=11, filter_sigma=1.5, 30 | k1=0.01, k2=0.03, weights=None): 31 | if img1.shape != img2.shape: 32 | raise RuntimeError('Input images must have the same shape (%s vs. %s).', 33 | img1.shape, img2.shape) 34 | if img1.ndim != 4: 35 | raise RuntimeError('Input images must have four dimensions, not %d', 36 | img1.ndim) 37 | 38 | # Note: default weights don't sum to 1.0 but do match the paper / matlab code. 39 | weights = np.array(weights if weights else 40 | [0.0448, 0.2856, 0.3001, 0.2363, 0.1333]) 41 | levels = weights.size 42 | downsample_filter = np.ones((1, 2, 2, 1)) / 4.0 43 | im1, im2 = [x.astype(np.float64) for x in [img1, img2]] 44 | mssim = np.array([]) 45 | mcs = np.array([]) 46 | for _ in range(levels): 47 | ssim, cs = _SSIMForMultiScale( 48 | im1, im2, max_val=max_val, filter_size=filter_size, 49 | filter_sigma=filter_sigma, k1=k1, k2=k2) 50 | mssim = np.append(mssim, ssim) 51 | mcs = np.append(mcs, cs) 52 | filtered = [convolve(im, downsample_filter, mode='reflect') 53 | for im in [im1, im2]] 54 | im1, im2 = [x[:, ::2, ::2, :] for x in filtered] 55 | return 1-(np.prod(mcs[0:levels-1] ** weights[0:levels-1]) * 56 | (mssim[levels-1] ** weights[levels-1])) 57 | 58 | def _FSpecialGauss(size, sigma): 59 | """Function to mimic the 'fspecial' gaussian MATLAB function.""" 60 | radius = size // 2 61 | offset = 0.0 62 | start, stop = -radius, radius + 1 63 | if size % 2 == 0: 64 | offset = 0.5 65 | stop -= 1 66 | x, y = np.mgrid[offset + start:stop, offset + start:stop] 67 | assert len(x) == size 68 | g = np.exp(-((x**2 + y**2)/(2.0 * sigma**2))) 69 | return g / g.sum() 70 | 71 | def _SSIMForMultiScale(img1, img2, max_val=255, filter_size=11, 72 | filter_sigma=1.5, k1=0.01, k2=0.03): 73 | if img1.shape != img2.shape: 74 | raise RuntimeError('Input images must have the same shape (%s vs. %s).', 75 | img1.shape, img2.shape) 76 | if img1.ndim != 4: 77 | raise RuntimeError('Input images must have four dimensions, not %d', 78 | img1.ndim) 79 | 80 | img1 = img1.astype(np.float64) 81 | img2 = img2.astype(np.float64) 82 | _, height, width, _ = img1.shape 83 | 84 | # Filter size can't be larger than height or width of images. 85 | size = min(filter_size, height, width) 86 | 87 | # Scale down sigma if a smaller filter size is used. 88 | sigma = size * filter_sigma / filter_size if filter_size else 0 89 | 90 | if filter_size: 91 | window = np.reshape(_FSpecialGauss(size, sigma), (1, size, size, 1)) 92 | mu1 = signal.fftconvolve(img1, window, mode='valid') 93 | mu2 = signal.fftconvolve(img2, window, mode='valid') 94 | sigma11 = signal.fftconvolve(img1 * img1, window, mode='valid') 95 | sigma22 = signal.fftconvolve(img2 * img2, window, mode='valid') 96 | sigma12 = signal.fftconvolve(img1 * img2, window, mode='valid') 97 | else: 98 | # Empty blur kernel so no need to convolve. 99 | mu1, mu2 = img1, img2 100 | sigma11 = img1 * img1 101 | sigma22 = img2 * img2 102 | sigma12 = img1 * img2 103 | 104 | mu11 = mu1 * mu1 105 | mu22 = mu2 * mu2 106 | mu12 = mu1 * mu2 107 | sigma11 -= mu11 108 | sigma22 -= mu22 109 | sigma12 -= mu12 110 | 111 | # Calculate intermediate values used by both ssim and cs_map. 112 | c1 = (k1 * max_val) ** 2 113 | c2 = (k2 * max_val) ** 2 114 | v1 = 2.0 * sigma12 + c2 115 | v2 = sigma11 + sigma22 + c2 116 | ssim = np.mean((((2.0 * mu12 + c1) * v1) / ((mu11 + mu22 + c1) * v2))) 117 | cs = np.mean(v1 / v2) 118 | return ssim, cs 119 | 120 | 121 | # print('ms-ssim: {}'.format(MultiScaleSSIM(image1_1,image2_1))) 122 | # print('ssim: {}'.format(1-ssim(image1, image2))) 123 | # print('mse: {}'.format(mse(image1,image2))) 124 | -------------------------------------------------------------------------------- /DIRNet-tensorflow/AffineST.py: -------------------------------------------------------------------------------- 1 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | import tensorflow as tf 16 | 17 | def AffineST(U, V, out_size, name='DeformableTransformer', **kwargs): 18 | def transformer(U, theta, out_size, name='SpatialTransformer', **kwargs): 19 | """Spatial Transformer Layer 20 | Implements a spatial transformer layer as described in [1]_. 21 | Based on [2]_ and edited by David Dao for Tensorflow. 22 | Parameters 23 | ---------- 24 | U : float 25 | The output of a convolutional net should have the 26 | shape [num_batch, height, width, num_channels]. 27 | theta: float 28 | The output of the 29 | localisation network should be [num_batch, 6]. 30 | out_size: tuple of two ints 31 | The size of the output of the network (height, width) 32 | References 33 | ---------- 34 | .. [1] Spatial Transformer Networks 35 | Max Jaderberg, Karen Simonyan, Andrew Zisserman, Koray Kavukcuoglu 36 | Submitted on 5 Jun 2015 37 | .. [2] https://github.com/skaae/transformer_network/blob/master/transformerlayer.py 38 | Notes 39 | ----- 40 | To initialize the network to the identity transform init 41 | ``theta`` to : 42 | identity = np.array([[1., 0., 0.], 43 | [0., 1., 0.]]) 44 | identity = identity.flatten() 45 | theta = tf.Variable(initial_value=identity) 46 | """ 47 | 48 | def _repeat(x, n_repeats): 49 | with tf.variable_scope('_repeat'): 50 | rep = tf.transpose( 51 | tf.expand_dims(tf.ones(shape=tf.pack([n_repeats, ])), 1), [1, 0]) 52 | rep = tf.cast(rep, 'int32') 53 | x = tf.matmul(tf.reshape(x, (-1, 1)), rep) 54 | return tf.reshape(x, [-1]) 55 | 56 | def _interpolate(im, x, y, out_size): 57 | with tf.variable_scope('_interpolate'): 58 | # constants 59 | num_batch = tf.shape(im)[0] 60 | height = tf.shape(im)[1] 61 | width = tf.shape(im)[2] 62 | channels = tf.shape(im)[3] 63 | 64 | x = tf.cast(x, 'float32') 65 | y = tf.cast(y, 'float32') 66 | height_f = tf.cast(height, 'float32') 67 | width_f = tf.cast(width, 'float32') 68 | out_height = out_size[0] 69 | out_width = out_size[1] 70 | zero = tf.zeros([], dtype='int32') 71 | max_y = tf.cast(tf.shape(im)[1] - 1, 'int32') 72 | max_x = tf.cast(tf.shape(im)[2] - 1, 'int32') 73 | 74 | # scale indices from [-1, 1] to [0, width/height] 75 | x = (x + 1.0)*(width_f) / 2.0 76 | y = (y + 1.0)*(height_f) / 2.0 77 | 78 | # do sampling 79 | x0 = tf.cast(tf.floor(x), 'int32') 80 | x1 = x0 + 1 81 | y0 = tf.cast(tf.floor(y), 'int32') 82 | y1 = y0 + 1 83 | 84 | x0 = tf.clip_by_value(x0, zero, max_x) 85 | x1 = tf.clip_by_value(x1, zero, max_x) 86 | y0 = tf.clip_by_value(y0, zero, max_y) 87 | y1 = tf.clip_by_value(y1, zero, max_y) 88 | dim2 = width 89 | dim1 = width*height 90 | base = _repeat(tf.range(num_batch)*dim1, out_height*out_width) 91 | base_y0 = base + y0*dim2 92 | base_y1 = base + y1*dim2 93 | idx_a = base_y0 + x0 94 | idx_b = base_y1 + x0 95 | idx_c = base_y0 + x1 96 | idx_d = base_y1 + x1 97 | 98 | # use indices to lookup pixels in the flat image and restore 99 | # channels dim 100 | im_flat = tf.reshape(im, tf.pack([-1, channels])) 101 | im_flat = tf.cast(im_flat, 'float32') 102 | Ia = tf.gather(im_flat, idx_a) 103 | Ib = tf.gather(im_flat, idx_b) 104 | Ic = tf.gather(im_flat, idx_c) 105 | Id = tf.gather(im_flat, idx_d) 106 | 107 | # and finally calculate interpolated values 108 | x0_f = tf.cast(x0, 'float32') 109 | x1_f = tf.cast(x1, 'float32') 110 | y0_f = tf.cast(y0, 'float32') 111 | y1_f = tf.cast(y1, 'float32') 112 | wa = tf.expand_dims(((x1_f-x) * (y1_f-y)), 1) 113 | wb = tf.expand_dims(((x1_f-x) * (y-y0_f)), 1) 114 | wc = tf.expand_dims(((x-x0_f) * (y1_f-y)), 1) 115 | wd = tf.expand_dims(((x-x0_f) * (y-y0_f)), 1) 116 | output = tf.add_n([wa*Ia, wb*Ib, wc*Ic, wd*Id]) 117 | return output 118 | 119 | def _meshgrid(height, width): 120 | with tf.variable_scope('_meshgrid'): 121 | # This should be equivalent to: 122 | # x_t, y_t = np.meshgrid(np.linspace(-1, 1, width), 123 | # np.linspace(-1, 1, height)) 124 | # ones = np.ones(np.prod(x_t.shape)) 125 | # grid = np.vstack([x_t.flatten(), y_t.flatten(), ones]) 126 | x_t = tf.matmul(tf.ones(shape=tf.pack([height, 1])), 127 | tf.transpose(tf.expand_dims(tf.linspace(-1.0, 1.0, width), 1), [1, 0])) 128 | y_t = tf.matmul(tf.expand_dims(tf.linspace(-1.0, 1.0, height), 1), 129 | tf.ones(shape=tf.pack([1, width]))) 130 | 131 | x_t_flat = tf.reshape(x_t, (1, -1)) 132 | y_t_flat = tf.reshape(y_t, (1, -1)) 133 | 134 | ones = tf.ones_like(x_t_flat) 135 | grid = tf.concat(0, [x_t_flat, y_t_flat, ones]) 136 | return grid 137 | 138 | def _transform(theta, input_dim, out_size): 139 | with tf.variable_scope('_transform'): 140 | num_batch = tf.shape(input_dim)[0] 141 | height = tf.shape(input_dim)[1] 142 | width = tf.shape(input_dim)[2] 143 | num_channels = tf.shape(input_dim)[3] 144 | theta = tf.reshape(theta, (-1, 2, 3)) 145 | theta = tf.cast(theta, 'float32') 146 | 147 | # grid of (x_t, y_t, 1), eq (1) in ref [1] 148 | height_f = tf.cast(height, 'float32') 149 | width_f = tf.cast(width, 'float32') 150 | out_height = out_size[0] 151 | out_width = out_size[1] 152 | grid = _meshgrid(out_height, out_width) 153 | grid = tf.expand_dims(grid, 0) 154 | grid = tf.reshape(grid, [-1]) 155 | grid = tf.tile(grid, tf.pack([num_batch])) 156 | grid = tf.reshape(grid, tf.pack([num_batch, 3, -1])) 157 | 158 | # Transform A x (x_t, y_t, 1)^T -> (x_s, y_s) 159 | T_g = tf.batch_matmul(theta, grid) 160 | x_s = tf.slice(T_g, [0, 0, 0], [-1, 1, -1]) 161 | y_s = tf.slice(T_g, [0, 1, 0], [-1, 1, -1]) 162 | x_s_flat = tf.reshape(x_s, [-1]) 163 | y_s_flat = tf.reshape(y_s, [-1]) 164 | 165 | input_transformed = _interpolate( 166 | input_dim, x_s_flat, y_s_flat, 167 | out_size) 168 | 169 | output = tf.reshape( 170 | input_transformed, tf.pack([num_batch, out_height, out_width, num_channels])) 171 | return output 172 | 173 | with tf.variable_scope(name): 174 | output = _transform(theta, U, out_size) 175 | return output 176 | 177 | 178 | def batch_transformer(U, thetas, out_size, name='BatchSpatialTransformer'): 179 | """Batch Spatial Transformer Layer 180 | Parameters 181 | ---------- 182 | U : float 183 | tensor of inputs [num_batch,height,width,num_channels] 184 | thetas : float 185 | a set of transformations for each input [num_batch,num_transforms,6] 186 | out_size : int 187 | the size of the output [out_height,out_width] 188 | Returns: float 189 | Tensor of size [num_batch*num_transforms,out_height,out_width,num_channels] 190 | """ 191 | with tf.variable_scope(name): 192 | num_batch, num_transforms = map(int, thetas.get_shape().as_list()[:2]) 193 | indices = [[i]*num_transforms for i in range(num_batch)] 194 | input_repeated = tf.gather(U, tf.reshape(indices, [-1])) 195 | return transformer(input_repeated, thetas, out_size) 196 | -------------------------------------------------------------------------------- /DIRNet-tensorflow/PlotLogs.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | # import sys 3 | import os 4 | 5 | path = '/home/adrian/Documents/dl2/final_results/' 6 | dic1 = {} 7 | dic2 = {} 8 | files = [] 9 | # for i in range(len(sys.argv)): 10 | filelist = os.listdir(path) 11 | for i in sorted(filelist): 12 | if "small_12" in i: 13 | # if "_6" not in i: 14 | files.append(open(path+i, 'r')) 15 | for inpfile in files: 16 | s = inpfile.name.split('/') 17 | fname = s[len(s)-1] 18 | dic1[fname] = [] 19 | for line in inpfile: 20 | if 'epoch' in line: 21 | split = line.split(' Acc: ') 22 | print(line) 23 | acc = float(split[1]) 24 | loss = split[0][15:22] 25 | # print(str(loss) + ' ' + acc) 26 | dic1[fname].append(acc) 27 | elif 'Eval' in line: 28 | evacc = line.split(' ')[3] 29 | print(evacc) 30 | evacc = float(evacc.strip()) 31 | dic2[fname] = evacc 32 | 33 | 34 | # Plot Training curves together 35 | for logfile in dic1: 36 | plt.plot(dic1[logfile], label=logfile) 37 | 38 | plt.legend(loc='best') 39 | plt.ylabel("Acc") 40 | plt.xlabel("Epoch") 41 | plt.title("Comparison of Multitask DIRNets for classification") 42 | plt.show() 43 | 44 | ## Barplot of eval accs: 45 | # x = [] 46 | # y = [] 47 | # for key in dic2.keys(): 48 | # x.append(key) 49 | # y.append(dic2[key]) 50 | # plt.bar(x,y) 51 | # # plt.xticks(x, ('Bill', 'Fred', 'Mary', 'Sue')) 52 | # plt.title("Comparison of Multitask DIRNets for classification") 53 | # plt.ylabel("Acc on Evaluation Set") 54 | # plt.show() 55 | -------------------------------------------------------------------------------- /DIRNet-tensorflow/README.md: -------------------------------------------------------------------------------- 1 | # DIRNet-tensorflow 2 | Tensorflow implementation of DIRNet 3 | 4 | ![alt tag](misc/DIRNet.png) 5 | 6 | ## Usage 7 | ``` 8 | # Training 9 | python train.py 10 | ``` 11 | Intermediate results and model checkpoint can be found in ```tmp``` and ```ckpt``` each. 12 | 13 | ``` 14 | # Evaluation 15 | python deploy.py 16 | ``` 17 | Evaluation results can be found in ```result```. 18 | 19 | ## References 20 | - [End-to-End Unsupervised Deformable Image Registration with a Convolutional Neural Network](https://arxiv.org/abs/1704.06065) 21 | - [Spatial Transformer Network](https://arxiv.org/abs/1704.06065) 22 | - [Tensorflow implementation of STN](https://github.com/daviddao/spatial-transformer-tensorflow/blob/master/spatial_transformer.py) 23 | - [Tensorflow implementation of bicubic interpolation](https://github.com/iwyoo/bicubic_interp-tensorflow) 24 | 25 | ## Author 26 | Inwan Yoo / iwyoo@unist.ac.kr 27 | -------------------------------------------------------------------------------- /DIRNet-tensorflow/Resnet_model.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | """Contains definitions for the preactivation form of Residual Networks. 16 | Residual networks (ResNets) were originally proposed in: 17 | [1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun 18 | Deep Residual Learning for Image Recognition. arXiv:1512.03385 19 | The full preactivation 'v2' ResNet variant implemented in this module was 20 | introduced by: 21 | [2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun 22 | Identity Mappings in Deep Residual Networks. arXiv: 1603.05027 23 | The key difference of the full preactivation 'v2' variant compared to the 24 | 'v1' variant in [1] is the use of batch normalization before every weight layer 25 | rather than after. 26 | """ 27 | 28 | from __future__ import absolute_import 29 | from __future__ import division 30 | from __future__ import print_function 31 | 32 | import tensorflow as tf 33 | 34 | _BATCH_NORM_DECAY = 0.997 35 | _BATCH_NORM_EPSILON = 1e-5 36 | 37 | 38 | def batch_norm_relu(inputs, is_training, data_format): 39 | """Performs a batch normalization followed by a ReLU.""" 40 | # We set fused=True for a significant performance boost. See 41 | # https://www.tensorflow.org/performance/performance_guide#common_fused_ops 42 | inputs = tf.layers.batch_normalization( 43 | inputs=inputs, axis=1 if data_format == 'channels_first' else 3, 44 | momentum=_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON, center=True, 45 | scale=True, training=is_training, fused=True) 46 | inputs = tf.nn.relu(inputs) 47 | return inputs 48 | 49 | 50 | def fixed_padding(inputs, kernel_size, data_format): 51 | """Pads the input along the spatial dimensions independently of input size. 52 | Args: 53 | inputs: A tensor of size [batch, channels, height_in, width_in] or 54 | [batch, height_in, width_in, channels] depending on data_format. 55 | kernel_size: The kernel to be used in the conv2d or max_pool2d operation. 56 | Should be a positive integer. 57 | data_format: The input format ('channels_last' or 'channels_first'). 58 | Returns: 59 | A tensor with the same format as the input with the data either intact 60 | (if kernel_size == 1) or padded (if kernel_size > 1). 61 | """ 62 | pad_total = kernel_size - 1 63 | pad_beg = pad_total // 2 64 | pad_end = pad_total - pad_beg 65 | 66 | if data_format == 'channels_first': 67 | padded_inputs = tf.pad(inputs, [[0, 0], [0, 0], 68 | [pad_beg, pad_end], [pad_beg, pad_end]]) 69 | else: 70 | padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end], 71 | [pad_beg, pad_end], [0, 0]]) 72 | return padded_inputs 73 | 74 | 75 | def conv2d_fixed_padding(inputs, filters, kernel_size, strides, data_format): 76 | """Strided 2-D convolution with explicit padding.""" 77 | # The padding is consistent and is based only on `kernel_size`, not on the 78 | # dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone). 79 | if strides > 1: 80 | inputs = fixed_padding(inputs, kernel_size, data_format) 81 | 82 | return tf.layers.conv2d( 83 | inputs=inputs, filters=filters, kernel_size=kernel_size, strides=strides, 84 | padding=('SAME' if strides == 1 else 'VALID'), use_bias=False, 85 | kernel_initializer=tf.variance_scaling_initializer(), 86 | data_format=data_format) 87 | 88 | 89 | def building_block(inputs, filters, is_training, projection_shortcut, strides, 90 | data_format): 91 | """Standard building block for residual networks with BN before convolutions. 92 | Args: 93 | inputs: A tensor of size [batch, channels, height_in, width_in] or 94 | [batch, height_in, width_in, channels] depending on data_format. 95 | filters: The number of filters for the convolutions. 96 | is_training: A Boolean for whether the model is in training or inference 97 | mode. Needed for batch normalization. 98 | projection_shortcut: The function to use for projection shortcuts (typically 99 | a 1x1 convolution when downsampling the input). 100 | strides: The block's stride. If greater than 1, this block will ultimately 101 | downsample the input. 102 | data_format: The input format ('channels_last' or 'channels_first'). 103 | Returns: 104 | The output tensor of the block. 105 | """ 106 | shortcut = inputs 107 | inputs = batch_norm_relu(inputs, is_training, data_format) 108 | 109 | # The projection shortcut should come after the first batch norm and ReLU 110 | # since it performs a 1x1 convolution. 111 | if projection_shortcut is not None: 112 | shortcut = projection_shortcut(inputs) 113 | 114 | inputs = conv2d_fixed_padding( 115 | inputs=inputs, filters=filters, kernel_size=3, strides=strides, 116 | data_format=data_format) 117 | 118 | inputs = batch_norm_relu(inputs, is_training, data_format) 119 | inputs = conv2d_fixed_padding( 120 | inputs=inputs, filters=filters, kernel_size=3, strides=1, 121 | data_format=data_format) 122 | 123 | return inputs + shortcut 124 | 125 | 126 | def bottleneck_block(inputs, filters, is_training, projection_shortcut, 127 | strides, data_format): 128 | """Bottleneck block variant for residual networks with BN before convolutions. 129 | Args: 130 | inputs: A tensor of size [batch, channels, height_in, width_in] or 131 | [batch, height_in, width_in, channels] depending on data_format. 132 | filters: The number of filters for the first two convolutions. Note that the 133 | third and final convolution will use 4 times as many filters. 134 | is_training: A Boolean for whether the model is in training or inference 135 | mode. Needed for batch normalization. 136 | projection_shortcut: The function to use for projection shortcuts (typically 137 | a 1x1 convolution when downsampling the input). 138 | strides: The block's stride. If greater than 1, this block will ultimately 139 | downsample the input. 140 | data_format: The input format ('channels_last' or 'channels_first'). 141 | Returns: 142 | The output tensor of the block. 143 | """ 144 | shortcut = inputs 145 | inputs = batch_norm_relu(inputs, is_training, data_format) 146 | 147 | # The projection shortcut should come after the first batch norm and ReLU 148 | # since it performs a 1x1 convolution. 149 | if projection_shortcut is not None: 150 | shortcut = projection_shortcut(inputs) 151 | 152 | inputs = conv2d_fixed_padding( 153 | inputs=inputs, filters=filters, kernel_size=1, strides=1, 154 | data_format=data_format) 155 | 156 | inputs = batch_norm_relu(inputs, is_training, data_format) 157 | inputs = conv2d_fixed_padding( 158 | inputs=inputs, filters=filters, kernel_size=3, strides=strides, 159 | data_format=data_format) 160 | 161 | inputs = batch_norm_relu(inputs, is_training, data_format) 162 | inputs = conv2d_fixed_padding( 163 | inputs=inputs, filters=4 * filters, kernel_size=1, strides=1, 164 | data_format=data_format) 165 | 166 | return inputs + shortcut 167 | 168 | 169 | def block_layer(inputs, filters, block_fn, blocks, strides, is_training, name, 170 | data_format): 171 | """Creates one layer of blocks for the ResNet model. 172 | Args: 173 | inputs: A tensor of size [batch, channels, height_in, width_in] or 174 | [batch, height_in, width_in, channels] depending on data_format. 175 | filters: The number of filters for the first convolution of the layer. 176 | block_fn: The block to use within the model, either `building_block` or 177 | `bottleneck_block`. 178 | blocks: The number of blocks contained in the layer. 179 | strides: The stride to use for the first convolution of the layer. If 180 | greater than 1, this layer will ultimately downsample the input. 181 | is_training: Either True or False, whether we are currently training the 182 | model. Needed for batch norm. 183 | name: A string name for the tensor output of the block layer. 184 | data_format: The input format ('channels_last' or 'channels_first'). 185 | Returns: 186 | The output tensor of the block layer. 187 | """ 188 | # Bottleneck blocks end with 4x the number of filters as they start with 189 | filters_out = 4 * filters if block_fn is bottleneck_block else filters 190 | 191 | def projection_shortcut(inputs): 192 | return conv2d_fixed_padding( 193 | inputs=inputs, filters=filters_out, kernel_size=1, strides=strides, 194 | data_format=data_format) 195 | 196 | # Only the first block per block_layer uses projection_shortcut and strides 197 | inputs = block_fn(inputs, filters, is_training, projection_shortcut, strides, 198 | data_format) 199 | 200 | for _ in range(1, blocks): 201 | inputs = block_fn(inputs, filters, is_training, None, 1, data_format) 202 | 203 | return tf.identity(inputs, name) 204 | 205 | 206 | def cifar10_resnet_v2_generator(resnet_size, num_classes, data_format=None): 207 | """Generator for CIFAR-10 ResNet v2 models. 208 | Args: 209 | resnet_size: A single integer for the size of the ResNet model. 210 | num_classes: The number of possible classes for image classification. 211 | data_format: The input format ('channels_last', 'channels_first', or None). 212 | If set to None, the format is dependent on whether a GPU is available. 213 | Returns: 214 | The model function that takes in `inputs` and `is_training` and 215 | returns the output tensor of the ResNet model. 216 | Raises: 217 | ValueError: If `resnet_size` is invalid. 218 | """ 219 | if resnet_size % 6 != 2: 220 | raise ValueError('resnet_size must be 6n + 2:', resnet_size) 221 | 222 | num_blocks = (resnet_size - 2) // 6 223 | 224 | if data_format is None: 225 | data_format = ( 226 | 'channels_first' if tf.test.is_built_with_cuda() else 'channels_last') 227 | 228 | def model(inputs, is_training): 229 | """Constructs the ResNet model given the inputs.""" 230 | if data_format == 'channels_first': 231 | # Convert the inputs from channels_last (NHWC) to channels_first (NCHW). 232 | # This provides a large performance boost on GPU. See 233 | # https://www.tensorflow.org/performance/performance_guide#data_formats 234 | inputs = tf.transpose(inputs, [0, 3, 1, 2]) 235 | 236 | inputs = conv2d_fixed_padding( 237 | inputs=inputs, filters=16, kernel_size=3, strides=1, 238 | data_format=data_format) 239 | inputs = tf.identity(inputs, 'initial_conv') 240 | 241 | inputs = block_layer( 242 | inputs=inputs, filters=16, block_fn=building_block, blocks=num_blocks, 243 | strides=1, is_training=is_training, name='block_layer1', 244 | data_format=data_format) 245 | inputs = block_layer( 246 | inputs=inputs, filters=32, block_fn=building_block, blocks=num_blocks, 247 | strides=2, is_training=is_training, name='block_layer2', 248 | data_format=data_format) 249 | inputs = block_layer( 250 | inputs=inputs, filters=64, block_fn=building_block, blocks=num_blocks, 251 | strides=2, is_training=is_training, name='block_layer3', 252 | data_format=data_format) 253 | 254 | inputs = batch_norm_relu(inputs, is_training, data_format) 255 | inputs = tf.layers.average_pooling2d( 256 | inputs=inputs, pool_size=8, strides=1, padding='VALID', 257 | data_format=data_format) 258 | inputs = tf.identity(inputs, 'final_avg_pool') 259 | inputs = tf.reshape(inputs, [-1, 64]) 260 | inputs = tf.layers.dense(inputs=inputs, units=num_classes) 261 | inputs = tf.identity(inputs, 'final_dense') 262 | return inputs 263 | 264 | return model 265 | 266 | 267 | def imagenet_resnet_v2_generator(block_fn, layers, num_classes, use_as_loc, 268 | data_format=None): 269 | """Generator for ImageNet ResNet v2 models. 270 | Args: 271 | block_fn: The block to use within the model, either `building_block` or 272 | `bottleneck_block`. 273 | layers: A length-4 array denoting the number of blocks to include in each 274 | layer. Each layer consists of blocks that take inputs of the same size. 275 | num_classes: The number of possible classes for image classification. 276 | data_format: The input format ('channels_last', 'channels_first', or None). 277 | If set to None, the format is dependent on whether a GPU is available. 278 | Returns: 279 | The model function that takes in `inputs` and `is_training` and 280 | returns the output tensor of the ResNet model. 281 | """ 282 | if data_format is None: 283 | data_format = ( 284 | 'channels_first' if tf.test.is_built_with_cuda() else 'channels_last') 285 | 286 | def model(inputs, is_training): 287 | """Constructs the ResNet model given the inputs.""" 288 | if data_format == 'channels_first': 289 | # Convert the inputs from channels_last (NHWC) to channels_first (NCHW). 290 | # This provides a large performance boost on GPU. See 291 | # https://www.tensorflow.org/performance/performance_guide#data_formats 292 | inputs = tf.transpose(inputs, [0, 3, 1, 2]) 293 | 294 | inputs = conv2d_fixed_padding( 295 | inputs=inputs, filters=64, kernel_size=7, strides=2, 296 | data_format=data_format) 297 | inputs = tf.identity(inputs, 'initial_conv') 298 | inputs = tf.layers.max_pooling2d( 299 | inputs=inputs, pool_size=3, strides=2, padding='SAME', 300 | data_format=data_format) 301 | inputs = tf.identity(inputs, 'initial_max_pool') 302 | 303 | inputs = block_layer( 304 | inputs=inputs, filters=64, block_fn=block_fn, blocks=layers[0], 305 | strides=1, is_training=is_training, name='block_layer1', 306 | data_format=data_format) 307 | inputs = block_layer( 308 | inputs=inputs, filters=128, block_fn=block_fn, blocks=layers[1], 309 | strides=2, is_training=is_training, name='block_layer2', 310 | data_format=data_format) 311 | inputs = block_layer( 312 | inputs=inputs, filters=256, block_fn=block_fn, blocks=layers[2], 313 | strides=2, is_training=is_training, name='block_layer3', 314 | data_format=data_format) 315 | inputs = block_layer( 316 | inputs=inputs, filters=512, block_fn=block_fn, blocks=layers[3], 317 | strides=2, is_training=is_training, name='block_layer4', 318 | data_format=data_format) 319 | if use_as_loc: 320 | return inputs 321 | 322 | inputs = batch_norm_relu(inputs, is_training, data_format) 323 | inputs = tf.layers.average_pooling2d( 324 | inputs=inputs, pool_size=7, strides=1, padding='VALID', 325 | data_format=data_format) 326 | inputs = tf.identity(inputs, 'final_avg_pool') 327 | inputs = tf.reshape(inputs, 328 | [-1, 1024 if block_fn is building_block else 2048]) 329 | inputs = tf.layers.dense(inputs=inputs, units=num_classes) 330 | inputs = tf.identity(inputs, 'final_dense') 331 | return inputs 332 | 333 | return model 334 | 335 | 336 | def imagenet_resnet_v2(resnet_size, num_classes, use_as_loc=False, data_format=None): 337 | """Returns the ResNet model for a given size and number of output classes.""" 338 | model_params = { 339 | 18: {'block': building_block, 'layers': [2, 2, 2, 2]}, 340 | 34: {'block': building_block, 'layers': [3, 4, 6, 3]}, 341 | 50: {'block': bottleneck_block, 'layers': [3, 4, 6, 3]}, 342 | 101: {'block': bottleneck_block, 'layers': [3, 4, 23, 3]}, 343 | 152: {'block': bottleneck_block, 'layers': [3, 8, 36, 3]}, 344 | 200: {'block': bottleneck_block, 'layers': [3, 24, 36, 3]} 345 | } 346 | 347 | if resnet_size not in model_params: 348 | raise ValueError('Not a valid resnet_size:', resnet_size) 349 | 350 | params = model_params[resnet_size] 351 | return imagenet_resnet_v2_generator( 352 | params['block'], params['layers'], num_classes, data_format=data_format, use_as_loc=use_as_loc) -------------------------------------------------------------------------------- /DIRNet-tensorflow/WarpST.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from bicubic_interp import bicubic_interp_2d 3 | 4 | def WarpST(U, V, out_size, name='DeformableTransformer', **kwargs): 5 | """Deformable Transformer Layer with bicubic interpolation 6 | U : tf.float, [num_batch, height, width, num_channels]. 7 | Input tensor to warp 8 | V : tf.float, [num_batch, height, width, 2] 9 | Warp map. It is interpolated to out_size. 10 | out_size: a tuple of two ints 11 | The size of the output of the network (height, width) 12 | ---------- 13 | References : 14 | https://github.com/daviddao/spatial-transformer-tensorflow/blob/master/spatial_transformer.py 15 | """ 16 | 17 | def _repeat(x, n_repeats): 18 | with tf.variable_scope('_repeat'): 19 | rep = tf.transpose( 20 | tf.expand_dims(tf.ones(shape=tf.stack([n_repeats, ])), 1), [1, 0]) 21 | rep = tf.cast(rep, 'int32') 22 | x = tf.matmul(tf.reshape(x, (-1, 1)), rep) 23 | return tf.reshape(x, [-1]) 24 | 25 | def _interpolate(im, x, y, out_size): 26 | with tf.variable_scope('_interpolate'): 27 | # constants 28 | num_batch = tf.shape(im)[0] 29 | height = tf.shape(im)[1] 30 | width = tf.shape(im)[2] 31 | channels = tf.shape(im)[3] 32 | 33 | x = tf.cast(x, 'float32') 34 | y = tf.cast(y, 'float32') 35 | height_f = tf.cast(height, 'float32') 36 | width_f = tf.cast(width, 'float32') 37 | out_height = out_size[0] 38 | out_width = out_size[1] 39 | zero = tf.zeros([], dtype='int32') 40 | max_y = tf.cast(tf.shape(im)[1] - 1, 'int32') 41 | max_x = tf.cast(tf.shape(im)[2] - 1, 'int32') 42 | 43 | # scale indices from [-1, 1] to [0, width/height] 44 | x = (x + 1.0)*(width_f) / 2.0 45 | y = (y + 1.0)*(height_f) / 2.0 46 | 47 | # do sampling 48 | x0 = tf.cast(tf.floor(x), 'int32') 49 | x1 = x0 + 1 50 | y0 = tf.cast(tf.floor(y), 'int32') 51 | y1 = y0 + 1 52 | 53 | x0 = tf.clip_by_value(x0, zero, max_x) 54 | x1 = tf.clip_by_value(x1, zero, max_x) 55 | y0 = tf.clip_by_value(y0, zero, max_y) 56 | y1 = tf.clip_by_value(y1, zero, max_y) 57 | dim2 = width 58 | dim1 = width*height 59 | base = _repeat(tf.range(num_batch)*dim1, out_height*out_width) 60 | base_y0 = base + y0*dim2 61 | base_y1 = base + y1*dim2 62 | idx_a = base_y0 + x0 63 | idx_b = base_y1 + x0 64 | idx_c = base_y0 + x1 65 | idx_d = base_y1 + x1 66 | 67 | # use indices to lookup pixels in the flat image and restore 68 | # channels dim 69 | im_flat = tf.reshape(im, tf.stack([-1, channels])) 70 | im_flat = tf.cast(im_flat, 'float32') 71 | Ia = tf.gather(im_flat, idx_a) 72 | Ib = tf.gather(im_flat, idx_b) 73 | Ic = tf.gather(im_flat, idx_c) 74 | Id = tf.gather(im_flat, idx_d) 75 | 76 | # and finally calculate interpolated values 77 | x0_f = tf.cast(x0, 'float32') 78 | x1_f = tf.cast(x1, 'float32') 79 | y0_f = tf.cast(y0, 'float32') 80 | y1_f = tf.cast(y1, 'float32') 81 | wa = tf.expand_dims(((x1_f-x) * (y1_f-y)), 1) 82 | wb = tf.expand_dims(((x1_f-x) * (y-y0_f)), 1) 83 | wc = tf.expand_dims(((x-x0_f) * (y1_f-y)), 1) 84 | wd = tf.expand_dims(((x-x0_f) * (y-y0_f)), 1) 85 | output = tf.add_n([wa*Ia, wb*Ib, wc*Ic, wd*Id]) 86 | return output 87 | 88 | def _meshgrid(height, width): 89 | with tf.variable_scope('_meshgrid'): 90 | # This should be equivalent to: 91 | # x_t, y_t = np.meshgrid(np.linspace(-1, 1, width), 92 | # np.linspace(-1, 1, height)) 93 | # ones = np.ones(np.prod(x_t.shape)) 94 | # grid = np.vstack([x_t.flatten(), y_t.flatten(), ones]) 95 | x_t = tf.matmul(tf.ones(shape=tf.stack([height, 1])), 96 | tf.transpose(tf.expand_dims(tf.linspace(-1.0, 1.0, width), 1), [1, 0])) 97 | y_t = tf.matmul(tf.expand_dims(tf.linspace(-1.0, 1.0, height), 1), 98 | tf.ones(shape=tf.stack([1, width]))) 99 | 100 | x_t_flat = tf.reshape(x_t, (1, -1)) 101 | y_t_flat = tf.reshape(y_t, (1, -1)) 102 | 103 | grid = tf.concat([x_t_flat, y_t_flat], 0) 104 | return grid 105 | 106 | def _transform(V, U, out_size): 107 | with tf.variable_scope('_transform'): 108 | num_batch = tf.shape(U)[0] 109 | height = tf.shape(U)[1] 110 | width = tf.shape(U)[2] 111 | num_channels = tf.shape(U)[3] 112 | 113 | # grid of (x_t, y_t, 1), eq (1) in ref [1] 114 | height_f = tf.cast(height, 'float32') 115 | width_f = tf.cast(width, 'float32') 116 | out_height = out_size[0] 117 | out_width = out_size[1] 118 | grid = _meshgrid(out_height, out_width) # [2, h*w] 119 | grid = tf.reshape(grid, [-1]) # [2*h*w] 120 | grid = tf.tile(grid, tf.stack([num_batch])) # [n*2*h*w] 121 | grid = tf.reshape(grid, tf.stack([num_batch, 2, -1])) # [n, 2, h*w] 122 | 123 | # transform (x, y)^T -> (x+vx, x+vy)^T 124 | V = bicubic_interp_2d(V, out_size) 125 | V = tf.transpose(V, [0, 3, 1, 2]) # [n, 2, h, w] 126 | V = tf.reshape(V, [num_batch, 2, -1]) # [n, 2, h*w] 127 | T_g = tf.add(V, grid) # [n, 2, h*w] 128 | 129 | x_s = tf.slice(T_g, [0, 0, 0], [-1, 1, -1]) 130 | y_s = tf.slice(T_g, [0, 1, 0], [-1, 1, -1]) 131 | x_s_flat = tf.reshape(x_s, [-1]) 132 | y_s_flat = tf.reshape(y_s, [-1]) 133 | 134 | input_transformed = _interpolate( 135 | U, x_s_flat, y_s_flat, out_size) 136 | 137 | output = tf.reshape( 138 | input_transformed, 139 | tf.stack([num_batch, out_height, out_width, num_channels])) 140 | return output 141 | 142 | with tf.variable_scope(name): 143 | output = _transform(V, U, out_size) 144 | return output 145 | -------------------------------------------------------------------------------- /DIRNet-tensorflow/__pycache__/WarpST.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HPI-DeepLearning/DIRNet/2817314fa79979ee2edae21e11d0cd45b8594ec8/DIRNet-tensorflow/__pycache__/WarpST.cpython-36.pyc -------------------------------------------------------------------------------- /DIRNet-tensorflow/__pycache__/bicubic_interp.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HPI-DeepLearning/DIRNet/2817314fa79979ee2edae21e11d0cd45b8594ec8/DIRNet-tensorflow/__pycache__/bicubic_interp.cpython-36.pyc -------------------------------------------------------------------------------- /DIRNet-tensorflow/__pycache__/config.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HPI-DeepLearning/DIRNet/2817314fa79979ee2edae21e11d0cd45b8594ec8/DIRNet-tensorflow/__pycache__/config.cpython-36.pyc -------------------------------------------------------------------------------- /DIRNet-tensorflow/__pycache__/data.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HPI-DeepLearning/DIRNet/2817314fa79979ee2edae21e11d0cd45b8594ec8/DIRNet-tensorflow/__pycache__/data.cpython-36.pyc -------------------------------------------------------------------------------- /DIRNet-tensorflow/__pycache__/models.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HPI-DeepLearning/DIRNet/2817314fa79979ee2edae21e11d0cd45b8594ec8/DIRNet-tensorflow/__pycache__/models.cpython-36.pyc -------------------------------------------------------------------------------- /DIRNet-tensorflow/__pycache__/ops.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HPI-DeepLearning/DIRNet/2817314fa79979ee2edae21e11d0cd45b8594ec8/DIRNet-tensorflow/__pycache__/ops.cpython-36.pyc -------------------------------------------------------------------------------- /DIRNet-tensorflow/bicubic_interp.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | 4 | def bicubic_interp_2d(input_, new_size): 5 | """ 6 | Args : 7 | input_ : Input tensor. Its shape should be 8 | [batch_size, height, width, channel]. 9 | In this implementation, the shape should be fixed for speed. 10 | new_size : The output size [new_height, new_width] 11 | ref : http://blog.demofox.org/2015/08/15/resizing-images-with-bicubic-interpolation/ 12 | """ 13 | 14 | shape = input_.get_shape().as_list() 15 | batch_size = shape[0] 16 | height = shape[1] 17 | width = shape[2] 18 | channel = shape[3] 19 | 20 | def _hermite(A, B, C, D, t): 21 | a = A * -0.5 + B * 1.5 + C * -1.5 + D * 0.5 22 | b = A + B * -2.5 + C * 2.0 + D * -0.5 23 | c = A * -0.5 + C * 0.5 24 | d = B 25 | 26 | return a*t*t*t + b*t*t + c*t + d 27 | 28 | def _get_grid_array(n_i, y_i, x_i, c_i): 29 | n, y, x, c = np.meshgrid(n_i, y_i, x_i, c_i, indexing='ij') 30 | n = np.expand_dims(n, axis=4) 31 | y = np.expand_dims(y, axis=4) 32 | x = np.expand_dims(x, axis=4) 33 | c = np.expand_dims(c, axis=4) 34 | return np.concatenate([n,y,x,c], axis=4) 35 | 36 | def _get_frac_array(x_d, y_d, n, c): 37 | x = x_d.shape[0] 38 | y = y_d.shape[0] 39 | x_t = x_d.reshape([1, 1, -1, 1]) 40 | y_t = y_d.reshape([1, -1, 1, 1]) 41 | y_t = np.tile(y_t, (n,1,x,c)) 42 | x_t = np.tile(x_t, (n,y,1,c)) 43 | return x_t, y_t 44 | 45 | def _get_index_tensor(grid, x, y): 46 | new_grid = np.array(grid) 47 | 48 | grid_y = grid[:,:,:,:,1] + y 49 | grid_x = grid[:,:,:,:,2] + x 50 | grid_y = np.clip(grid_y, 0, height-1) 51 | grid_x = np.clip(grid_x, 0, width-1) 52 | 53 | new_grid[:,:,:,:,1] = grid_y 54 | new_grid[:,:,:,:,2] = grid_x 55 | 56 | return tf.constant(new_grid, dtype=tf.int32) 57 | 58 | new_height = new_size[0] 59 | new_width = new_size[1] 60 | 61 | n_i = np.arange(batch_size) 62 | c_i = np.arange(channel) 63 | 64 | y_f = np.linspace(0., height-1, new_height) 65 | y_i = y_f.astype(np.int32) 66 | y_d = y_f - np.floor(y_f) 67 | 68 | x_f = np.linspace(0., width-1, new_width) 69 | x_i = x_f.astype(np.int32) 70 | x_d = x_f - np.floor(x_f) 71 | 72 | grid = _get_grid_array(n_i, y_i, x_i, c_i) 73 | x_t, y_t = _get_frac_array(x_d, y_d, batch_size, channel) 74 | 75 | i_00 = _get_index_tensor(grid, -1, -1) 76 | i_10 = _get_index_tensor(grid, +0, -1) 77 | i_20 = _get_index_tensor(grid, +1, -1) 78 | i_30 = _get_index_tensor(grid, +2, -1) 79 | 80 | i_01 = _get_index_tensor(grid, -1, +0) 81 | i_11 = _get_index_tensor(grid, +0, +0) 82 | i_21 = _get_index_tensor(grid, +1, +0) 83 | i_31 = _get_index_tensor(grid, +2, +0) 84 | 85 | i_02 = _get_index_tensor(grid, -1, +1) 86 | i_12 = _get_index_tensor(grid, +0, +1) 87 | i_22 = _get_index_tensor(grid, +1, +1) 88 | i_32 = _get_index_tensor(grid, +2, +1) 89 | 90 | i_03 = _get_index_tensor(grid, -1, +2) 91 | i_13 = _get_index_tensor(grid, +0, +2) 92 | i_23 = _get_index_tensor(grid, +1, +2) 93 | i_33 = _get_index_tensor(grid, +2, +2) 94 | 95 | p_00 = tf.gather_nd(input_, i_00) 96 | p_10 = tf.gather_nd(input_, i_10) 97 | p_20 = tf.gather_nd(input_, i_20) 98 | p_30 = tf.gather_nd(input_, i_30) 99 | 100 | p_01 = tf.gather_nd(input_, i_01) 101 | p_11 = tf.gather_nd(input_, i_11) 102 | p_21 = tf.gather_nd(input_, i_21) 103 | p_31 = tf.gather_nd(input_, i_31) 104 | 105 | p_02 = tf.gather_nd(input_, i_02) 106 | p_12 = tf.gather_nd(input_, i_12) 107 | p_22 = tf.gather_nd(input_, i_22) 108 | p_32 = tf.gather_nd(input_, i_32) 109 | 110 | p_03 = tf.gather_nd(input_, i_03) 111 | p_13 = tf.gather_nd(input_, i_13) 112 | p_23 = tf.gather_nd(input_, i_23) 113 | p_33 = tf.gather_nd(input_, i_33) 114 | 115 | col0 = _hermite(p_00, p_10, p_20, p_30, x_t) 116 | col1 = _hermite(p_01, p_11, p_21, p_31, x_t) 117 | col2 = _hermite(p_02, p_12, p_22, p_32, x_t) 118 | col3 = _hermite(p_03, p_13, p_23, p_33, x_t) 119 | value = _hermite(col0, col1, col2, col3, y_t) 120 | 121 | return value 122 | 123 | 124 | # Future : bicubic_interp_3d 125 | -------------------------------------------------------------------------------- /DIRNet-tensorflow/ckpt/checkpoint: -------------------------------------------------------------------------------- 1 | model_checkpoint_path: "model.ckpt" 2 | all_model_checkpoint_paths: "model.ckpt" 3 | -------------------------------------------------------------------------------- /DIRNet-tensorflow/ckpt/model.ckpt.data-00000-of-00001: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HPI-DeepLearning/DIRNet/2817314fa79979ee2edae21e11d0cd45b8594ec8/DIRNet-tensorflow/ckpt/model.ckpt.data-00000-of-00001 -------------------------------------------------------------------------------- /DIRNet-tensorflow/ckpt/model.ckpt.index: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HPI-DeepLearning/DIRNet/2817314fa79979ee2edae21e11d0cd45b8594ec8/DIRNet-tensorflow/ckpt/model.ckpt.index -------------------------------------------------------------------------------- /DIRNet-tensorflow/ckpt/model.ckpt.meta: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HPI-DeepLearning/DIRNet/2817314fa79979ee2edae21e11d0cd45b8594ec8/DIRNet-tensorflow/ckpt/model.ckpt.meta -------------------------------------------------------------------------------- /DIRNet-tensorflow/config.py: -------------------------------------------------------------------------------- 1 | class Config(object): 2 | pass 3 | 4 | def get_config(is_train): 5 | config = Config() 6 | config.os_is_windows = False 7 | if is_train: 8 | config.use_saved_data = False 9 | config.batch_size = 1 10 | config.im_size = [222, 247] 11 | config.lr = 1e-4 12 | config.iteration = 170000 # ca 20 epochs 13 | config.s_dir="/home/adrian/Documents/dl2/Cardiac/ES_rescaled/" 14 | config.d_dir="/home/adrian/Documents/dl2/Cardiac/ED_rescaled/" 15 | config.label_path="./label.txt" 16 | config.s_data_filename="./s_data_save" 17 | config.d_data_filename="./d_data_save" 18 | config.label_filename="./label_save" 19 | config.s_data_eval_filename="./s_data_eval_save" 20 | config.d_data_eval_filename="./d_data_eval_save" 21 | config.label_eval_filename="./label_eval_save" 22 | config.save=True 23 | config.tmp_dir = "tmp" 24 | config.ckpt_dir = "ckpt" 25 | config.use_AffineST=False 26 | config.checkpoint_distance=50000 27 | config.result_dir = "result" 28 | config.eval_split_fraction = 0.2 29 | # else: 30 | # config.use_saved_data = True 31 | # config.s_dir="../Cardiac/ES" 32 | # config.d_dir="../Cardiac/ED" 33 | # config.s_data_filename="./s_data_save" 34 | # config.d_data_filename="./d_data_save" 35 | # config.save=True 36 | # config.batch_size = 25 37 | # config.im_size = [105, 128] 38 | # config.use_AffineST=False 39 | # 40 | # config.result_dir = "result" 41 | # config.ckpt_dir = "ckpt" 42 | return config 43 | -------------------------------------------------------------------------------- /DIRNet-tensorflow/create_file_structure.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | import os 3 | import re 4 | import shutil 5 | 6 | path_src="./Medic_data/Alzeimer_small" 7 | path_dest="./Medic_data/Alzeimer_ordered" 8 | 9 | 10 | 11 | pathlist = Path(path_src).glob('**/*.png') 12 | for image_path in pathlist: 13 | if dim_string not in image_path: 14 | continue 15 | dest = str(image_path).split(".")[2] 16 | if not os.path.isdir(path_dest+"/"+dest): 17 | os.makedirs(path_dest+"/"+dest) 18 | shutil.copy(image_path, path_dest+"/"+dest) 19 | -------------------------------------------------------------------------------- /DIRNet-tensorflow/data.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import gzip 3 | from pathlib import Path 4 | #import imageio 5 | import scipy.ndimage as ndimage 6 | from skimage.transform import resize 7 | from skimage import color 8 | import scipy.misc 9 | from config import get_config 10 | import random 11 | import h5py 12 | import random 13 | 14 | 15 | class DIRNetDatahandler(object): 16 | 17 | ''' 18 | reads the data 19 | :param config: config object 20 | ''' 21 | def __init__(self, config): 22 | self.s_data = [] 23 | self.d_data = [] 24 | self.config = config 25 | self.labels = [] 26 | # read data from folder 27 | if not config.use_saved_data: 28 | print("getting data") 29 | self.s_data, s_data_names = self.get_data(self.config.s_dir) 30 | self.d_data, d_data_names = self.get_data(self.config.d_dir) 31 | label_names, labels_raw = self.load_labels(self.config.label_path) 32 | index = 0 33 | 34 | # delete files that are only contained in one of the folders 35 | to_be_deleted = [] 36 | for i in s_data_names: 37 | if i in d_data_names and i in label_names: 38 | index += 1 39 | 40 | self.labels.append(int(labels_raw[label_names.index(i)])) 41 | else: 42 | to_be_deleted.append(index) 43 | index += 1 44 | self.s_data = np.delete(self.s_data, to_be_deleted, 0) 45 | self.labels = np.asarray(self.labels) 46 | 47 | # delete files that are only contained in one of the folders 48 | to_be_deleted = [] 49 | index = 0 50 | for i in d_data_names: 51 | if i in s_data_names: 52 | index += 1 53 | else: 54 | to_be_deleted.append(index) 55 | index += 1 56 | self.d_data = np.delete(self.d_data, to_be_deleted, 0) 57 | 58 | # create evaluation datasets 59 | # eval_idx = np.random.choice(len(self.d_data) - 1, 60 | # int(np.round(config.eval_split_fraction * len(self.d_data)))) 61 | # train_idx = [x for x in range(len(self.d_data) - 1) if x not in eval_idx] 62 | 63 | # There are 20 patients for each of the 5 disease classes 64 | # we want to get so many patients for eval from each class 65 | amnt_pat_for_eval = int(np.round(config.eval_split_fraction * 20)) 66 | eval_patients_ids = np.empty((5, amnt_pat_for_eval)) # stores the indices for the evaluation date for each disease 67 | # eval_idx = np.empty(config.eval_split_fraction * len(self.d_data)) 68 | # train_idx = np.empty((1-config.eval_split_fraction) * len(self.d_data)) 69 | eval_idx = [] 70 | train_idx = [] 71 | for i in range(5): 72 | eval_patients_ids[i] =random.sample(range(1,20), amnt_pat_for_eval) 73 | # print(eval_patients_ids[i]) 74 | for i in range(len(self.s_data)-1): 75 | # label_names, labels_raw 76 | image_name = label_names[i] 77 | num = image_name.split("_")[0][-3:] 78 | pat_id = num.lstrip("0") 79 | pat_label = int(labels_raw[i]) 80 | # if this patient 81 | if (int(pat_id)%20)+1 in eval_patients_ids[pat_label]: 82 | eval_idx.append(i) 83 | else: 84 | train_idx.append(i) 85 | print('eval pairs size ' + str(len(eval_idx))) 86 | print('train pairs size ' + str(len(train_idx))) 87 | s_train = self.s_data[train_idx] 88 | s_eval = self.s_data[eval_idx] 89 | d_train = self.d_data[train_idx] 90 | d_eval = self.d_data[eval_idx] 91 | labels_train = self.labels[train_idx] 92 | labels_eval = self.labels[eval_idx] 93 | 94 | a = [0, 0, 0, 0, 0] 95 | for i in labels_eval: 96 | a[int(i)] += 1 97 | print('distribution of labels in eval set:') 98 | print(a) 99 | b = [0, 0, 0, 0, 0] 100 | for i in labels_train: 101 | b[int(i)] += 1 102 | print('distribution of labels in train set:') 103 | print(b) 104 | self.s_data = s_train 105 | self.d_data = d_train 106 | self.labels = labels_train 107 | 108 | self.s_data_eval = s_eval 109 | self.d_data_eval = d_eval 110 | self.labels_eval = labels_eval 111 | 112 | # save numpy arrays as .h5 if config.save is true 113 | if self.config.save: 114 | with h5py.File('{}.h5'.format(self.config.s_data_filename), 'w') as hf: 115 | hf.create_dataset(self.config.s_data_filename, data=self.s_data) 116 | with h5py.File('{}.h5'.format(self.config.d_data_filename), 'w') as hf: 117 | hf.create_dataset(self.config.d_data_filename, data=self.d_data) 118 | with h5py.File('{}.h5'.format(self.config.label_filename), 'w') as hf: 119 | hf.create_dataset(self.config.label_filename, data=self.labels) 120 | 121 | with h5py.File('{}.h5'.format(self.config.s_data_eval_filename), 'w') as hf: 122 | hf.create_dataset(self.config.s_data_eval_filename, data=self.s_data_eval) 123 | with h5py.File('{}.h5'.format(self.config.d_data_eval_filename), 'w') as hf: 124 | hf.create_dataset(self.config.d_data_eval_filename, data=self.d_data_eval) 125 | with h5py.File('{}.h5'.format(self.config.label_eval_filename), 'w') as hf: 126 | hf.create_dataset(self.config.label_eval_filename, data=self.labels_eval) 127 | else: 128 | 129 | # load numpy arrays from .h5 files 130 | def h5py_dataset_iterator(g, prefix=''): 131 | for key in g.keys(): 132 | item = g[key] 133 | path = '{}/{}'.format(prefix, key) 134 | if isinstance(item, h5py.Dataset): # test for dataset 135 | yield (path, item) 136 | elif isinstance(item, h5py.Group): # test for group (go down) 137 | yield from h5py_dataset_iterator(item, path) 138 | 139 | # load s_data 140 | with h5py.File(self.config.s_data_filename + '.h5', 'r') as hf: 141 | for (path, dset) in h5py_dataset_iterator(hf): 142 | self.s_data = hf[dset.name][:] 143 | 144 | # load d_data 145 | with h5py.File(self.config.d_data_filename + '.h5', 'r') as hf: 146 | for (path, dset) in h5py_dataset_iterator(hf): 147 | self.d_data = hf[dset.name][:] 148 | 149 | with h5py.File(self.config.label_filename + '.h5', 'r') as hf: 150 | for (path, dset) in h5py_dataset_iterator(hf): 151 | self.labels = hf[dset.name][:] 152 | 153 | # load s_data 154 | with h5py.File(self.config.s_data_eval_filename + '.h5', 'r') as hf: 155 | for (path, dset) in h5py_dataset_iterator(hf): 156 | self.s_data_eval = hf[dset.name][:] 157 | 158 | # load d_data 159 | with h5py.File(self.config.d_data_eval_filename + '.h5', 'r') as hf: 160 | for (path, dset) in h5py_dataset_iterator(hf): 161 | self.d_data_eval = hf[dset.name][:] 162 | 163 | with h5py.File(self.config.label_eval_filename + '.h5', 'r') as hf: 164 | for (path, dset) in h5py_dataset_iterator(hf): 165 | self.labels_eval = hf[dset.name][:] 166 | 167 | 168 | def load_labels(self, path): 169 | """ 170 | 171 | :param path: path to label file 172 | :type path: string 173 | :return: pathnames and labels 174 | :rtype: list,list 175 | """ 176 | pathnames = [] 177 | labels = [] 178 | 179 | with open(path) as label_f: 180 | label_data = label_f.readlines() 181 | for line in label_data: 182 | if line is not "": 183 | line = line.split(',') 184 | slice_number = line[0].split('.') 185 | slice_number = slice_number[len(slice_number) - 2] 186 | # print((line[0].split(".")[0].split('_')[0]) + "_" + slice_number) 187 | pathnames.append((line[0].split(".")[0].split('_')[0]) + "_" + slice_number) 188 | labels.append(line[1]) 189 | return pathnames, labels 190 | 191 | def extract_patientnumber(self, filepath): 192 | ''' 193 | extract patient number from filename 194 | :param filepath: path to file 195 | return: patientnumber 196 | ''' 197 | if self.config.os_is_windows: 198 | image_name = str(filepath).split("\\") 199 | else: 200 | image_name = str(filepath).split("/") 201 | image_name = image_name[len(image_name) - 1] 202 | num = image_name.split("_")[0][-3:] 203 | return num.lstrip("0") 204 | 205 | def get_data(self, path): 206 | ''' 207 | load images from path into numpy array 208 | :param path: path to folder 209 | return: numpy array with images and list with pathnames (images,pathnames) 210 | ''' 211 | 212 | pathlist = Path(path).glob('**/*.png') 213 | imagelist = [] 214 | pathnames = [] 215 | if self.config.os_is_windows: 216 | splitchar = '\\' 217 | else: 218 | splitchar = '/' 219 | for image_path in pathlist: 220 | print(image_path) 221 | # maybe interesting at some point 222 | slice_number = str(image_path).split(".") 223 | slice_number = slice_number[len(slice_number) - 2] 224 | if str(image_path).split(".")[len(slice_number) - 5].split(splitchar)[-1].split('_')[0].startswith("nz"): 225 | # print(str(image_path).split(".")[len(slice_number) - 6].split(splitchar)[-1].split('_')[ 226 | # 0] + "_" + slice_number) 227 | pathnames.append( 228 | str(image_path).split(".")[len(slice_number) - 6].split(splitchar)[-1].split('_')[ 229 | 0] + "_" + slice_number) 230 | else: 231 | # print(str(image_path).split(".")[len(slice_number) - 5].split(splitchar)[-1].split('_')[ 232 | # 0] + "_" + slice_number) 233 | pathnames.append( 234 | str(image_path).split(".")[len(slice_number) - 5].split(splitchar)[-1].split('_')[ 235 | 0] + "_" + slice_number) 236 | num = str(image_path).split(".")[2] 237 | 238 | # load images from file; rgb-> grayscale; resize to size defined in config.im_size 239 | # res_im = resize(color.rgb2gray(imageio.imread(str(image_path))), self.config.im_size, mode='constant') 240 | res_im = ndimage.imread(str(image_path), flatten=True) 241 | imagelist.append(res_im) 242 | 243 | # list to numpy array 244 | imagelist = np.asarray(imagelist) 245 | 246 | # create colorchannel for grayscaled images 247 | # not needed if rgb is used -> comment this line out 248 | imagelist = np.expand_dims(imagelist, axis=4) 249 | 250 | return imagelist, pathnames 251 | 252 | def sample_pair(self, batch_size): 253 | ''' 254 | sample random pairs of moving and fixed images 255 | :param batch_size: number of moving/fixed images to be retrieved 256 | return: numpy arrays x and y with shape [batch_size, height, width,color_channels] and numpy array with all labels 257 | ''' 258 | choice = np.random.choice(len(self.d_data) - 1, batch_size) 259 | 260 | x = self.s_data[choice] 261 | y = self.d_data[choice] 262 | labels = self.labels[choice] 263 | 264 | return x, y, labels 265 | 266 | 267 | def get_pair_by_idx(self, idx, batch_size=1): 268 | ''' 269 | sample a batch of pairs of moving and fixed images and label, starting by the pair at the index. 270 | If index+batch_size is not a valid index, the missing ones are sampled starting at index 0 271 | :param batch_size: number of moving/fixed images images to be retrieved 272 | :param idx: index in the data from where the samples should be retrived. 273 | return: numpy arrays x and y with shape [batch_size, height, width,color_channels] and numpy array with all labels 274 | ''' 275 | x = self.s_data[np.expand_dims(idx, 0)] 276 | y = self.d_data[np.expand_dims(idx, 0)] 277 | labels = self.labels[np.expand_dims(idx, 0)] 278 | # :TODO: adjust for batchsize other than 1 279 | return x, y, labels 280 | 281 | 282 | def get_eval_pair_by_idx(self, idx, batch_size=1): 283 | ''' 284 | sample a batch of pairs of moving and fixed images and label, starting by the pair at the index. 285 | If index+batch_size is not a valid index, the missing ones are sampled starting at index 0 286 | :param batch_size: number of moving/fixed images images to be retrieved 287 | :param idx: index in the data from where the samples should be retrived. 288 | return: numpy arrays x and y with shape [batch_size, height, width,color_channels] and numpy array with all labels 289 | ''' 290 | x = self.s_data_eval[np.expand_dims(idx, 0)] 291 | y = self.d_data_eval[np.expand_dims(idx, 0)] 292 | labels = self.labels_eval[np.expand_dims(idx, 0)] 293 | # :TODO: adjust for batchsize other than 1 294 | return x, y, labels -------------------------------------------------------------------------------- /DIRNet-tensorflow/data_org.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | class MNISTDataHandler(object): 4 | """ 5 | Members : 6 | is_train - Options for sampling 7 | path - MNIST data path 8 | data - a list of np.array w/ shape [batch_size, 28, 28, 1] 9 | """ 10 | def __init__(self, path, is_train): 11 | self.is_train = is_train 12 | self.path = path 13 | self.data = self._get_data() 14 | 15 | def _get_data(self): 16 | from tensorflow.examples.tutorials.mnist import input_data 17 | from tensorflow.contrib.learn.python.learn.datasets.base \ 18 | import maybe_download 19 | from tensorflow.contrib.learn.python.learn.datasets.mnist \ 20 | import extract_images, extract_labels 21 | 22 | if self.is_train: 23 | IMAGES = 'train-images-idx3-ubyte.gz' 24 | LABELS = 'train-labels-idx1-ubyte.gz' 25 | else : 26 | IMAGES = 't10k-images-idx3-ubyte.gz' 27 | LABELS = 't10k-labels-idx1-ubyte.gz' 28 | SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/' 29 | 30 | 31 | # local_file = maybe_download(IMAGES, self.path, SOURCE_URL) 32 | with open("MNIST_data/"+IMAGES, 'rb') as f: 33 | images = extract_images(f) 34 | # local_file = maybe_download(LABELS, self.path, SOURCE_URL) 35 | with open("MNIST_data/"+LABELS, 'rb') as f: 36 | labels = extract_labels(f, one_hot=False) 37 | 38 | values, counts = np.unique(labels, return_counts=True) 39 | 40 | data = [] 41 | for i in range(10): 42 | label = values[i] 43 | count = counts[i] 44 | arr = np.empty([count, 28, 28, 1], dtype=np.float32) 45 | data.append(arr) 46 | 47 | l_iter = [0]*10 48 | for i in range(labels.shape[0]): 49 | label = labels[i] 50 | data[label][l_iter[label]] = images[i] / 255. 51 | l_iter[label] += 1 52 | 53 | return data 54 | 55 | def sample_pair(self, batch_size, label=None): 56 | label = np.random.randint(10) if label is None else label 57 | images = self.data[label] 58 | 59 | choice1 = np.random.choice(images.shape[0], batch_size) 60 | choice2 = np.random.choice(images.shape[0], batch_size) 61 | x = images[choice1] 62 | y = images[choice2] 63 | 64 | return x, y 65 | -------------------------------------------------------------------------------- /DIRNet-tensorflow/deploy.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from models import DIRNet,ResNet 3 | from config import get_config 4 | from data import DIRNetDatahandler 5 | import numpy as np 6 | from ops import mkdir 7 | 8 | 9 | def main(): 10 | tf.reset_default_graph() 11 | eval_resnet() 12 | # sess_config = tf.ConfigProto() 13 | # sess_config.gpu_options.allow_growth = True 14 | # sess = tf.Session(config=sess_config) 15 | # config = get_config(is_train=True) 16 | # mkdir(config.result_dir) 17 | # 18 | # reg = DIRNet(sess, config, "DIRNet", is_train=False) 19 | # reg.restore(config.ckpt_dir) 20 | # dh = DIRNetDatahandler(config=config) 21 | # 22 | # # print(reg.calc_rmse_all(dh.s_data, dh.d_data, config.result_dir + "/",save_images=False)) 23 | # batch_x, batch_y, batch_labels = dh.sample_pair(config.batch_size) 24 | # # prediction = reg.deploy_with_labels(batch_x, batch_y, batch_labels) 25 | # # print(str(prediction)) 26 | # 27 | # amnt_pics = np.shape(dh.d_data)[0] 28 | # acc = 0 29 | # prev_x = np.empty(shape=(1, 222, 247)) 30 | # amnt_eva = np.shape(dh.d_data_eval)[0] 31 | # for i in range(amnt_eva): 32 | # batch_x, batch_y, batch_labels = dh.get_eval_pair_by_idx(i) 33 | # if np.array_equal(prev_x, batch_x): 34 | # print('weird') 35 | # prev_x = batch_x 36 | # # loss = reg.fit((1, batch_x[0], batch_x[1], batch_x[2]), 37 | # # (1, batch_y[0], batch_y[1], batch_y[2])) 38 | # prediction = reg.deploy_with_labels(batch_x, batch_y, batch_labels) 39 | # truth = int(batch_labels[0]) 40 | # # print("pred {} truth {}".format(prediction, truth)) 41 | # if prediction == truth: 42 | # acc += 1 43 | # print("Acc: {0:.4f}".format( acc / amnt_eva)) 44 | # # to use the deploy func from models 45 | # 46 | # # for i in range(10): 47 | # # result_i_dir = config.result_dir+"/{}".format(i) 48 | # # mkdir(result_i_dir) 49 | # # 50 | # # batch_x, batch_y = dh.sample_pair(config.batch_size, i) 51 | # # reg.deploy(result_i_dir, batch_x, batch_y) 52 | 53 | def eval_resnet(): 54 | sess_config = tf.ConfigProto() 55 | sess_config.gpu_options.allow_growth = True 56 | sess = tf.Session(config=sess_config) 57 | config = get_config(is_train=True) 58 | mkdir(config.result_dir) 59 | 60 | reg = ResNet(sess, config, "DIRNet", is_train=False) 61 | reg.restore(config.ckpt_dir) 62 | dh = DIRNetDatahandler(config=config) 63 | 64 | # print(reg.calc_rmse_all(dh.s_data, dh.d_data, config.result_dir + "/",save_images=False)) 65 | batch_x, batch_y, batch_labels = dh.sample_pair(config.batch_size) 66 | # prediction = reg.deploy_with_labels(batch_x, batch_y, batch_labels) 67 | # print(str(prediction)) 68 | 69 | amnt_pics = np.shape(dh.d_data)[0] 70 | acc = 0 71 | prev_x = np.empty(shape=(1, 222, 247)) 72 | amnt_eva = np.shape(dh.d_data_eval)[0] 73 | for i in range(amnt_eva): 74 | batch_x, batch_y, batch_labels = dh.get_eval_pair_by_idx(i) 75 | if np.array_equal(prev_x, batch_x): 76 | print('weird') 77 | prev_x = batch_x 78 | # loss = reg.fit((1, batch_x[0], batch_x[1], batch_x[2]), 79 | # (1, batch_y[0], batch_y[1], batch_y[2])) 80 | prediction = reg.deploy_with_labels(batch_x, batch_y, batch_labels) 81 | print(prediction,"::", batch_labels[0]) 82 | truth = int(batch_labels[0]) 83 | # print("pred {} truth {}".format(prediction, truth)) 84 | if prediction == truth: 85 | acc += 1 86 | print("Acc: {0:.4f}".format(acc / amnt_eva)) 87 | # to use the deploy func from models 88 | 89 | # for i in range(10): 90 | # result_i_dir = config.result_dir+"/{}".format(i) 91 | # mkdir(result_i_dir) 92 | # 93 | # batch_x, batch_y = dh.sample_pair(config.batch_size, i) 94 | # reg.deploy(result_i_dir, batch_x, batch_y) 95 | 96 | if __name__ == "__main__": 97 | main() 98 | -------------------------------------------------------------------------------- /DIRNet-tensorflow/label.txt: -------------------------------------------------------------------------------- 1 | patient001_frame01.nz.0.png,0 2 | patient001_frame01.nz.1.png,0 3 | patient001_frame01.nz.2.png,0 4 | patient001_frame01.nz.3.png,0 5 | patient001_frame01.nz.4.png,0 6 | patient001_frame01.nz.5.png,0 7 | patient001_frame01.nz.6.png,0 8 | patient001_frame01.nz.7.png,0 9 | patient001_frame01.nz.8.png,0 10 | patient001_frame01.nz.9.png,0 11 | patient001_frame12.nz.0.png,0 12 | patient001_frame12.nz.1.png,0 13 | patient001_frame12.nz.2.png,0 14 | patient001_frame12.nz.3.png,0 15 | patient001_frame12.nz.4.png,0 16 | patient001_frame12.nz.5.png,0 17 | patient001_frame12.nz.6.png,0 18 | patient001_frame12.nz.7.png,0 19 | patient001_frame12.nz.8.png,0 20 | patient001_frame12.nz.9.png,0 21 | patient002_frame01.nz.0.png,0 22 | patient002_frame01.nz.1.png,0 23 | patient002_frame01.nz.2.png,0 24 | patient002_frame01.nz.3.png,0 25 | patient002_frame01.nz.4.png,0 26 | patient002_frame01.nz.5.png,0 27 | patient002_frame01.nz.6.png,0 28 | patient002_frame01.nz.7.png,0 29 | patient002_frame01.nz.8.png,0 30 | patient002_frame01.nz.9.png,0 31 | patient002_frame12.nz.0.png,0 32 | patient002_frame12.nz.1.png,0 33 | patient002_frame12.nz.2.png,0 34 | patient002_frame12.nz.3.png,0 35 | patient002_frame12.nz.4.png,0 36 | patient002_frame12.nz.5.png,0 37 | patient002_frame12.nz.6.png,0 38 | patient002_frame12.nz.7.png,0 39 | patient002_frame12.nz.8.png,0 40 | patient002_frame12.nz.9.png,0 41 | patient003_frame01.nz.0.png,0 42 | patient003_frame01.nz.1.png,0 43 | patient003_frame01.nz.2.png,0 44 | patient003_frame01.nz.3.png,0 45 | patient003_frame01.nz.4.png,0 46 | patient003_frame01.nz.5.png,0 47 | patient003_frame01.nz.6.png,0 48 | patient003_frame01.nz.7.png,0 49 | patient003_frame01.nz.8.png,0 50 | patient003_frame01.nz.9.png,0 51 | patient003_frame15.nz.0.png,0 52 | patient003_frame15.nz.1.png,0 53 | patient003_frame15.nz.2.png,0 54 | patient003_frame15.nz.3.png,0 55 | patient003_frame15.nz.4.png,0 56 | patient003_frame15.nz.5.png,0 57 | patient003_frame15.nz.6.png,0 58 | patient003_frame15.nz.7.png,0 59 | patient003_frame15.nz.8.png,0 60 | patient003_frame15.nz.9.png,0 61 | patient004_frame01.nz.0.png,0 62 | patient004_frame01.nz.1.png,0 63 | patient004_frame01.nz.2.png,0 64 | patient004_frame01.nz.3.png,0 65 | patient004_frame01.nz.4.png,0 66 | patient004_frame01.nz.5.png,0 67 | patient004_frame01.nz.6.png,0 68 | patient004_frame01.nz.7.png,0 69 | patient004_frame01.nz.8.png,0 70 | patient004_frame01.nz.9.png,0 71 | patient004_frame15.nz.0.png,0 72 | patient004_frame15.nz.1.png,0 73 | patient004_frame15.nz.2.png,0 74 | patient004_frame15.nz.3.png,0 75 | patient004_frame15.nz.4.png,0 76 | patient004_frame15.nz.5.png,0 77 | patient004_frame15.nz.6.png,0 78 | patient004_frame15.nz.7.png,0 79 | patient004_frame15.nz.8.png,0 80 | patient004_frame15.nz.9.png,0 81 | patient005_frame01.nz.0.png,0 82 | patient005_frame01.nz.1.png,0 83 | patient005_frame01.nz.2.png,0 84 | patient005_frame01.nz.3.png,0 85 | patient005_frame01.nz.4.png,0 86 | patient005_frame01.nz.5.png,0 87 | patient005_frame01.nz.6.png,0 88 | patient005_frame01.nz.7.png,0 89 | patient005_frame01.nz.8.png,0 90 | patient005_frame01.nz.9.png,0 91 | patient005_frame13.nz.0.png,0 92 | patient005_frame13.nz.1.png,0 93 | patient005_frame13.nz.2.png,0 94 | patient005_frame13.nz.3.png,0 95 | patient005_frame13.nz.4.png,0 96 | patient005_frame13.nz.5.png,0 97 | patient005_frame13.nz.6.png,0 98 | patient005_frame13.nz.7.png,0 99 | patient005_frame13.nz.8.png,0 100 | patient005_frame13.nz.9.png,0 101 | patient006_frame01.nz.0.png,0 102 | patient006_frame01.nz.1.png,0 103 | patient006_frame01.nz.10.png,0 104 | patient006_frame01.nz.2.png,0 105 | patient006_frame01.nz.3.png,0 106 | patient006_frame01.nz.4.png,0 107 | patient006_frame01.nz.5.png,0 108 | patient006_frame01.nz.6.png,0 109 | patient006_frame01.nz.7.png,0 110 | patient006_frame01.nz.8.png,0 111 | patient006_frame01.nz.9.png,0 112 | patient006_frame16.nz.0.png,0 113 | patient006_frame16.nz.1.png,0 114 | patient006_frame16.nz.10.png,0 115 | patient006_frame16.nz.2.png,0 116 | patient006_frame16.nz.3.png,0 117 | patient006_frame16.nz.4.png,0 118 | patient006_frame16.nz.5.png,0 119 | patient006_frame16.nz.6.png,0 120 | patient006_frame16.nz.7.png,0 121 | patient006_frame16.nz.8.png,0 122 | patient006_frame16.nz.9.png,0 123 | patient008_frame01.nz.0.png,0 124 | patient008_frame01.nz.1.png,0 125 | patient008_frame01.nz.2.png,0 126 | patient008_frame01.nz.3.png,0 127 | patient008_frame01.nz.4.png,0 128 | patient008_frame01.nz.5.png,0 129 | patient008_frame01.nz.6.png,0 130 | patient008_frame01.nz.7.png,0 131 | patient008_frame01.nz.8.png,0 132 | patient008_frame01.nz.9.png,0 133 | patient008_frame13.nz.0.png,0 134 | patient008_frame13.nz.1.png,0 135 | patient008_frame13.nz.2.png,0 136 | patient008_frame13.nz.3.png,0 137 | patient008_frame13.nz.4.png,0 138 | patient008_frame13.nz.5.png,0 139 | patient008_frame13.nz.6.png,0 140 | patient008_frame13.nz.7.png,0 141 | patient008_frame13.nz.8.png,0 142 | patient008_frame13.nz.9.png,0 143 | patient009_frame01.nz.0.png,0 144 | patient009_frame01.nz.1.png,0 145 | patient009_frame01.nz.2.png,0 146 | patient009_frame01.nz.3.png,0 147 | patient009_frame01.nz.4.png,0 148 | patient009_frame01.nz.5.png,0 149 | patient009_frame01.nz.6.png,0 150 | patient009_frame01.nz.7.png,0 151 | patient009_frame01.nz.8.png,0 152 | patient009_frame01.nz.9.png,0 153 | patient009_frame13.nz.0.png,0 154 | patient009_frame13.nz.1.png,0 155 | patient009_frame13.nz.2.png,0 156 | patient009_frame13.nz.3.png,0 157 | patient009_frame13.nz.4.png,0 158 | patient009_frame13.nz.5.png,0 159 | patient009_frame13.nz.6.png,0 160 | patient009_frame13.nz.7.png,0 161 | patient009_frame13.nz.8.png,0 162 | patient009_frame13.nz.9.png,0 163 | patient010_frame01.nz.0.png,0 164 | patient010_frame01.nz.1.png,0 165 | patient010_frame01.nz.2.png,0 166 | patient010_frame01.nz.3.png,0 167 | patient010_frame01.nz.4.png,0 168 | patient010_frame01.nz.5.png,0 169 | patient010_frame01.nz.6.png,0 170 | patient010_frame01.nz.7.png,0 171 | patient010_frame01.nz.8.png,0 172 | patient010_frame01.nz.9.png,0 173 | patient010_frame13.nz.0.png,0 174 | patient010_frame13.nz.1.png,0 175 | patient010_frame13.nz.2.png,0 176 | patient010_frame13.nz.3.png,0 177 | patient010_frame13.nz.4.png,0 178 | patient010_frame13.nz.5.png,0 179 | patient010_frame13.nz.6.png,0 180 | patient010_frame13.nz.7.png,0 181 | patient010_frame13.nz.8.png,0 182 | patient010_frame13.nz.9.png,0 183 | patient011_frame01.nz.0.png,0 184 | patient011_frame01.nz.1.png,0 185 | patient011_frame01.nz.2.png,0 186 | patient011_frame01.nz.3.png,0 187 | patient011_frame01.nz.4.png,0 188 | patient011_frame01.nz.5.png,0 189 | patient011_frame01.nz.6.png,0 190 | patient011_frame01.nz.7.png,0 191 | patient011_frame01.nz.8.png,0 192 | patient011_frame08.nz.0.png,0 193 | patient011_frame08.nz.1.png,0 194 | patient011_frame08.nz.2.png,0 195 | patient011_frame08.nz.3.png,0 196 | patient011_frame08.nz.4.png,0 197 | patient011_frame08.nz.5.png,0 198 | patient011_frame08.nz.6.png,0 199 | patient011_frame08.nz.7.png,0 200 | patient011_frame08.nz.8.png,0 201 | patient012_frame01.nz.0.png,0 202 | patient012_frame01.nz.1.png,0 203 | patient012_frame01.nz.2.png,0 204 | patient012_frame01.nz.3.png,0 205 | patient012_frame01.nz.4.png,0 206 | patient012_frame01.nz.5.png,0 207 | patient012_frame01.nz.6.png,0 208 | patient012_frame01.nz.7.png,0 209 | patient012_frame01.nz.8.png,0 210 | patient012_frame01.nz.9.png,0 211 | patient012_frame13.nz.0.png,0 212 | patient012_frame13.nz.1.png,0 213 | patient012_frame13.nz.2.png,0 214 | patient012_frame13.nz.3.png,0 215 | patient012_frame13.nz.4.png,0 216 | patient012_frame13.nz.5.png,0 217 | patient012_frame13.nz.6.png,0 218 | patient012_frame13.nz.7.png,0 219 | patient012_frame13.nz.8.png,0 220 | patient012_frame13.nz.9.png,0 221 | patient013_frame01.nz.0.png,0 222 | patient013_frame01.nz.1.png,0 223 | patient013_frame01.nz.2.png,0 224 | patient013_frame01.nz.3.png,0 225 | patient013_frame01.nz.4.png,0 226 | patient013_frame01.nz.5.png,0 227 | patient013_frame01.nz.6.png,0 228 | patient013_frame01.nz.7.png,0 229 | patient013_frame01.nz.8.png,0 230 | patient013_frame01.nz.9.png,0 231 | patient013_frame14.nz.0.png,0 232 | patient013_frame14.nz.1.png,0 233 | patient013_frame14.nz.2.png,0 234 | patient013_frame14.nz.3.png,0 235 | patient013_frame14.nz.4.png,0 236 | patient013_frame14.nz.5.png,0 237 | patient013_frame14.nz.6.png,0 238 | patient013_frame14.nz.7.png,0 239 | patient013_frame14.nz.8.png,0 240 | patient013_frame14.nz.9.png,0 241 | patient014_frame01.nz.0.png,0 242 | patient014_frame01.nz.1.png,0 243 | patient014_frame01.nz.2.png,0 244 | patient014_frame01.nz.3.png,0 245 | patient014_frame01.nz.4.png,0 246 | patient014_frame01.nz.5.png,0 247 | patient014_frame01.nz.6.png,0 248 | patient014_frame01.nz.7.png,0 249 | patient014_frame01.nz.8.png,0 250 | patient014_frame01.nz.9.png,0 251 | patient014_frame13.nz.0.png,0 252 | patient014_frame13.nz.1.png,0 253 | patient014_frame13.nz.2.png,0 254 | patient014_frame13.nz.3.png,0 255 | patient014_frame13.nz.4.png,0 256 | patient014_frame13.nz.5.png,0 257 | patient014_frame13.nz.6.png,0 258 | patient014_frame13.nz.7.png,0 259 | patient014_frame13.nz.8.png,0 260 | patient014_frame13.nz.9.png,0 261 | patient015_frame01.nz.0.png,0 262 | patient015_frame01.nz.1.png,0 263 | patient015_frame01.nz.2.png,0 264 | patient015_frame01.nz.3.png,0 265 | patient015_frame01.nz.4.png,0 266 | patient015_frame01.nz.5.png,0 267 | patient015_frame01.nz.6.png,0 268 | patient015_frame01.nz.7.png,0 269 | patient015_frame01.nz.8.png,0 270 | patient015_frame10.nz.0.png,0 271 | patient015_frame10.nz.1.png,0 272 | patient015_frame10.nz.2.png,0 273 | patient015_frame10.nz.3.png,0 274 | patient015_frame10.nz.4.png,0 275 | patient015_frame10.nz.5.png,0 276 | patient015_frame10.nz.6.png,0 277 | patient015_frame10.nz.7.png,0 278 | patient015_frame10.nz.8.png,0 279 | patient016_frame01.nz.0.png,0 280 | patient016_frame01.nz.1.png,0 281 | patient016_frame01.nz.2.png,0 282 | patient016_frame01.nz.3.png,0 283 | patient016_frame01.nz.4.png,0 284 | patient016_frame01.nz.5.png,0 285 | patient016_frame01.nz.6.png,0 286 | patient016_frame01.nz.7.png,0 287 | patient016_frame01.nz.8.png,0 288 | patient016_frame01.nz.9.png,0 289 | patient016_frame12.nz.0.png,0 290 | patient016_frame12.nz.1.png,0 291 | patient016_frame12.nz.2.png,0 292 | patient016_frame12.nz.3.png,0 293 | patient016_frame12.nz.4.png,0 294 | patient016_frame12.nz.5.png,0 295 | patient016_frame12.nz.6.png,0 296 | patient016_frame12.nz.7.png,0 297 | patient016_frame12.nz.8.png,0 298 | patient016_frame12.nz.9.png,0 299 | patient017_frame01.nz.0.png,0 300 | patient017_frame01.nz.1.png,0 301 | patient017_frame01.nz.2.png,0 302 | patient017_frame01.nz.3.png,0 303 | patient017_frame01.nz.4.png,0 304 | patient017_frame01.nz.5.png,0 305 | patient017_frame01.nz.6.png,0 306 | patient017_frame01.nz.7.png,0 307 | patient017_frame01.nz.8.png,0 308 | patient017_frame09.nz.0.png,0 309 | patient017_frame09.nz.1.png,0 310 | patient017_frame09.nz.2.png,0 311 | patient017_frame09.nz.3.png,0 312 | patient017_frame09.nz.4.png,0 313 | patient017_frame09.nz.5.png,0 314 | patient017_frame09.nz.6.png,0 315 | patient017_frame09.nz.7.png,0 316 | patient017_frame09.nz.8.png,0 317 | patient018_frame01.nz.0.png,0 318 | patient018_frame01.nz.1.png,0 319 | patient018_frame01.nz.2.png,0 320 | patient018_frame01.nz.3.png,0 321 | patient018_frame01.nz.4.png,0 322 | patient018_frame01.nz.5.png,0 323 | patient018_frame01.nz.6.png,0 324 | patient018_frame01.nz.7.png,0 325 | patient018_frame10.nz.0.png,0 326 | patient018_frame10.nz.1.png,0 327 | patient018_frame10.nz.2.png,0 328 | patient018_frame10.nz.3.png,0 329 | patient018_frame10.nz.4.png,0 330 | patient018_frame10.nz.5.png,0 331 | patient018_frame10.nz.6.png,0 332 | patient018_frame10.nz.7.png,0 333 | patient019_frame01.nz.0.png,0 334 | patient019_frame01.nz.1.png,0 335 | patient019_frame01.nz.10.png,0 336 | patient019_frame01.nz.2.png,0 337 | patient019_frame01.nz.3.png,0 338 | patient019_frame01.nz.4.png,0 339 | patient019_frame01.nz.5.png,0 340 | patient019_frame01.nz.6.png,0 341 | patient019_frame01.nz.7.png,0 342 | patient019_frame01.nz.8.png,0 343 | patient019_frame01.nz.9.png,0 344 | patient019_frame11.nz.0.png,0 345 | patient019_frame11.nz.1.png,0 346 | patient019_frame11.nz.10.png,0 347 | patient019_frame11.nz.2.png,0 348 | patient019_frame11.nz.3.png,0 349 | patient019_frame11.nz.4.png,0 350 | patient019_frame11.nz.5.png,0 351 | patient019_frame11.nz.6.png,0 352 | patient019_frame11.nz.7.png,0 353 | patient019_frame11.nz.8.png,0 354 | patient019_frame11.nz.9.png,0 355 | patient020_frame01.nz.0.png,0 356 | patient020_frame01.nz.1.png,0 357 | patient020_frame01.nz.2.png,0 358 | patient020_frame01.nz.3.png,0 359 | patient020_frame01.nz.4.png,0 360 | patient020_frame01.nz.5.png,0 361 | patient020_frame01.nz.6.png,0 362 | patient020_frame01.nz.7.png,0 363 | patient020_frame11.nz.0.png,0 364 | patient020_frame11.nz.1.png,0 365 | patient020_frame11.nz.2.png,0 366 | patient020_frame11.nz.3.png,0 367 | patient020_frame11.nz.4.png,0 368 | patient020_frame11.nz.5.png,0 369 | patient020_frame11.nz.6.png,0 370 | patient020_frame11.nz.7.png,0 371 | patient021_frame01.nz.0.png,1 372 | patient021_frame01.nz.1.png,1 373 | patient021_frame01.nz.2.png,1 374 | patient021_frame01.nz.3.png,1 375 | patient021_frame01.nz.4.png,1 376 | patient021_frame01.nz.5.png,1 377 | patient021_frame01.nz.6.png,1 378 | patient021_frame01.nz.7.png,1 379 | patient021_frame01.nz.8.png,1 380 | patient021_frame01.nz.9.png,1 381 | patient021_frame13.nz.0.png,1 382 | patient021_frame13.nz.1.png,1 383 | patient021_frame13.nz.2.png,1 384 | patient021_frame13.nz.3.png,1 385 | patient021_frame13.nz.4.png,1 386 | patient021_frame13.nz.5.png,1 387 | patient021_frame13.nz.6.png,1 388 | patient021_frame13.nz.7.png,1 389 | patient021_frame13.nz.8.png,1 390 | patient021_frame13.nz.9.png,1 391 | patient022_frame01.nz.0.png,1 392 | patient022_frame01.nz.1.png,1 393 | patient022_frame01.nz.2.png,1 394 | patient022_frame01.nz.3.png,1 395 | patient022_frame01.nz.4.png,1 396 | patient022_frame01.nz.5.png,1 397 | patient022_frame01.nz.6.png,1 398 | patient022_frame11.nz.0.png,1 399 | patient022_frame11.nz.1.png,1 400 | patient022_frame11.nz.2.png,1 401 | patient022_frame11.nz.3.png,1 402 | patient022_frame11.nz.4.png,1 403 | patient022_frame11.nz.5.png,1 404 | patient022_frame11.nz.6.png,1 405 | patient023_frame01.nz.0.png,1 406 | patient023_frame01.nz.1.png,1 407 | patient023_frame01.nz.2.png,1 408 | patient023_frame01.nz.3.png,1 409 | patient023_frame01.nz.4.png,1 410 | patient023_frame01.nz.5.png,1 411 | patient023_frame01.nz.6.png,1 412 | patient023_frame01.nz.7.png,1 413 | patient023_frame01.nz.8.png,1 414 | patient023_frame09.nz.0.png,1 415 | patient023_frame09.nz.1.png,1 416 | patient023_frame09.nz.2.png,1 417 | patient023_frame09.nz.3.png,1 418 | patient023_frame09.nz.4.png,1 419 | patient023_frame09.nz.5.png,1 420 | patient023_frame09.nz.6.png,1 421 | patient023_frame09.nz.7.png,1 422 | patient023_frame09.nz.8.png,1 423 | patient024_frame01.nz.0.png,1 424 | patient024_frame01.nz.1.png,1 425 | patient024_frame01.nz.2.png,1 426 | patient024_frame01.nz.3.png,1 427 | patient024_frame01.nz.4.png,1 428 | patient024_frame01.nz.5.png,1 429 | patient024_frame01.nz.6.png,1 430 | patient024_frame01.nz.7.png,1 431 | patient024_frame09.nz.0.png,1 432 | patient024_frame09.nz.1.png,1 433 | patient024_frame09.nz.2.png,1 434 | patient024_frame09.nz.3.png,1 435 | patient024_frame09.nz.4.png,1 436 | patient024_frame09.nz.5.png,1 437 | patient024_frame09.nz.6.png,1 438 | patient024_frame09.nz.7.png,1 439 | patient025_frame01.nz.0.png,1 440 | patient025_frame01.nz.1.png,1 441 | patient025_frame01.nz.2.png,1 442 | patient025_frame01.nz.3.png,1 443 | patient025_frame01.nz.4.png,1 444 | patient025_frame01.nz.5.png,1 445 | patient025_frame01.nz.6.png,1 446 | patient025_frame01.nz.7.png,1 447 | patient025_frame01.nz.8.png,1 448 | patient025_frame09.nz.0.png,1 449 | patient025_frame09.nz.1.png,1 450 | patient025_frame09.nz.2.png,1 451 | patient025_frame09.nz.3.png,1 452 | patient025_frame09.nz.4.png,1 453 | patient025_frame09.nz.5.png,1 454 | patient025_frame09.nz.6.png,1 455 | patient025_frame09.nz.7.png,1 456 | patient025_frame09.nz.8.png,1 457 | patient026_frame01.nz.0.png,1 458 | patient026_frame01.nz.1.png,1 459 | patient026_frame01.nz.2.png,1 460 | patient026_frame01.nz.3.png,1 461 | patient026_frame01.nz.4.png,1 462 | patient026_frame01.nz.5.png,1 463 | patient026_frame01.nz.6.png,1 464 | patient026_frame01.nz.7.png,1 465 | patient026_frame01.nz.8.png,1 466 | patient026_frame01.nz.9.png,1 467 | patient026_frame12.nz.0.png,1 468 | patient026_frame12.nz.1.png,1 469 | patient026_frame12.nz.2.png,1 470 | patient026_frame12.nz.3.png,1 471 | patient026_frame12.nz.4.png,1 472 | patient026_frame12.nz.5.png,1 473 | patient026_frame12.nz.6.png,1 474 | patient026_frame12.nz.7.png,1 475 | patient026_frame12.nz.8.png,1 476 | patient026_frame12.nz.9.png,1 477 | patient027_frame01.nz.0.png,1 478 | patient027_frame01.nz.1.png,1 479 | patient027_frame01.nz.2.png,1 480 | patient027_frame01.nz.3.png,1 481 | patient027_frame01.nz.4.png,1 482 | patient027_frame01.nz.5.png,1 483 | patient027_frame01.nz.6.png,1 484 | patient027_frame01.nz.7.png,1 485 | patient027_frame01.nz.8.png,1 486 | patient027_frame01.nz.9.png,1 487 | patient027_frame11.nz.0.png,1 488 | patient027_frame11.nz.1.png,1 489 | patient027_frame11.nz.2.png,1 490 | patient027_frame11.nz.3.png,1 491 | patient027_frame11.nz.4.png,1 492 | patient027_frame11.nz.5.png,1 493 | patient027_frame11.nz.6.png,1 494 | patient027_frame11.nz.7.png,1 495 | patient027_frame11.nz.8.png,1 496 | patient027_frame11.nz.9.png,1 497 | patient028_frame01.nz.0.png,1 498 | patient028_frame01.nz.1.png,1 499 | patient028_frame01.nz.2.png,1 500 | patient028_frame01.nz.3.png,1 501 | patient028_frame01.nz.4.png,1 502 | patient028_frame01.nz.5.png,1 503 | patient028_frame01.nz.6.png,1 504 | patient028_frame01.nz.7.png,1 505 | patient028_frame01.nz.8.png,1 506 | patient028_frame01.nz.9.png,1 507 | patient028_frame09.nz.0.png,1 508 | patient028_frame09.nz.1.png,1 509 | patient028_frame09.nz.2.png,1 510 | patient028_frame09.nz.3.png,1 511 | patient028_frame09.nz.4.png,1 512 | patient028_frame09.nz.5.png,1 513 | patient028_frame09.nz.6.png,1 514 | patient028_frame09.nz.7.png,1 515 | patient028_frame09.nz.8.png,1 516 | patient028_frame09.nz.9.png,1 517 | patient029_frame01.nz.0.png,1 518 | patient029_frame01.nz.1.png,1 519 | patient029_frame01.nz.10.png,1 520 | patient029_frame01.nz.2.png,1 521 | patient029_frame01.nz.3.png,1 522 | patient029_frame01.nz.4.png,1 523 | patient029_frame01.nz.5.png,1 524 | patient029_frame01.nz.6.png,1 525 | patient029_frame01.nz.7.png,1 526 | patient029_frame01.nz.8.png,1 527 | patient029_frame01.nz.9.png,1 528 | patient029_frame12.nz.0.png,1 529 | patient029_frame12.nz.1.png,1 530 | patient029_frame12.nz.10.png,1 531 | patient029_frame12.nz.2.png,1 532 | patient029_frame12.nz.3.png,1 533 | patient029_frame12.nz.4.png,1 534 | patient029_frame12.nz.5.png,1 535 | patient029_frame12.nz.6.png,1 536 | patient029_frame12.nz.7.png,1 537 | patient029_frame12.nz.8.png,1 538 | patient029_frame12.nz.9.png,1 539 | patient030_frame01.nz.0.png,1 540 | patient030_frame01.nz.1.png,1 541 | patient030_frame01.nz.2.png,1 542 | patient030_frame01.nz.3.png,1 543 | patient030_frame01.nz.4.png,1 544 | patient030_frame01.nz.5.png,1 545 | patient030_frame01.nz.6.png,1 546 | patient030_frame01.nz.7.png,1 547 | patient030_frame01.nz.8.png,1 548 | patient030_frame01.nz.9.png,1 549 | patient030_frame12.nz.0.png,1 550 | patient030_frame12.nz.1.png,1 551 | patient030_frame12.nz.2.png,1 552 | patient030_frame12.nz.3.png,1 553 | patient030_frame12.nz.4.png,1 554 | patient030_frame12.nz.5.png,1 555 | patient030_frame12.nz.6.png,1 556 | patient030_frame12.nz.7.png,1 557 | patient030_frame12.nz.8.png,1 558 | patient030_frame12.nz.9.png,1 559 | patient031_frame01.nz.0.png,1 560 | patient031_frame01.nz.1.png,1 561 | patient031_frame01.nz.2.png,1 562 | patient031_frame01.nz.3.png,1 563 | patient031_frame01.nz.4.png,1 564 | patient031_frame01.nz.5.png,1 565 | patient031_frame01.nz.6.png,1 566 | patient031_frame01.nz.7.png,1 567 | patient031_frame01.nz.8.png,1 568 | patient031_frame01.nz.9.png,1 569 | patient031_frame10.nz.0.png,1 570 | patient031_frame10.nz.1.png,1 571 | patient031_frame10.nz.2.png,1 572 | patient031_frame10.nz.3.png,1 573 | patient031_frame10.nz.4.png,1 574 | patient031_frame10.nz.5.png,1 575 | patient031_frame10.nz.6.png,1 576 | patient031_frame10.nz.7.png,1 577 | patient031_frame10.nz.8.png,1 578 | patient031_frame10.nz.9.png,1 579 | patient032_frame01.nz.0.png,1 580 | patient032_frame01.nz.1.png,1 581 | patient032_frame01.nz.2.png,1 582 | patient032_frame01.nz.3.png,1 583 | patient032_frame01.nz.4.png,1 584 | patient032_frame01.nz.5.png,1 585 | patient032_frame01.nz.6.png,1 586 | patient032_frame01.nz.7.png,1 587 | patient032_frame01.nz.8.png,1 588 | patient032_frame01.nz.9.png,1 589 | patient032_frame12.nz.0.png,1 590 | patient032_frame12.nz.1.png,1 591 | patient032_frame12.nz.2.png,1 592 | patient032_frame12.nz.3.png,1 593 | patient032_frame12.nz.4.png,1 594 | patient032_frame12.nz.5.png,1 595 | patient032_frame12.nz.6.png,1 596 | patient032_frame12.nz.7.png,1 597 | patient032_frame12.nz.8.png,1 598 | patient032_frame12.nz.9.png,1 599 | patient033_frame01.nz.0.png,1 600 | patient033_frame01.nz.1.png,1 601 | patient033_frame01.nz.2.png,1 602 | patient033_frame01.nz.3.png,1 603 | patient033_frame01.nz.4.png,1 604 | patient033_frame01.nz.5.png,1 605 | patient033_frame01.nz.6.png,1 606 | patient033_frame01.nz.7.png,1 607 | patient033_frame01.nz.8.png,1 608 | patient033_frame01.nz.9.png,1 609 | patient033_frame14.nz.0.png,1 610 | patient033_frame14.nz.1.png,1 611 | patient033_frame14.nz.2.png,1 612 | patient033_frame14.nz.3.png,1 613 | patient033_frame14.nz.4.png,1 614 | patient033_frame14.nz.5.png,1 615 | patient033_frame14.nz.6.png,1 616 | patient033_frame14.nz.7.png,1 617 | patient033_frame14.nz.8.png,1 618 | patient033_frame14.nz.9.png,1 619 | patient034_frame01.nz.0.png,1 620 | patient034_frame01.nz.1.png,1 621 | patient034_frame01.nz.2.png,1 622 | patient034_frame01.nz.3.png,1 623 | patient034_frame01.nz.4.png,1 624 | patient034_frame01.nz.5.png,1 625 | patient034_frame01.nz.6.png,1 626 | patient034_frame01.nz.7.png,1 627 | patient034_frame01.nz.8.png,1 628 | patient034_frame01.nz.9.png,1 629 | patient034_frame16.nz.0.png,1 630 | patient034_frame16.nz.1.png,1 631 | patient034_frame16.nz.2.png,1 632 | patient034_frame16.nz.3.png,1 633 | patient034_frame16.nz.4.png,1 634 | patient034_frame16.nz.5.png,1 635 | patient034_frame16.nz.6.png,1 636 | patient034_frame16.nz.7.png,1 637 | patient034_frame16.nz.8.png,1 638 | patient034_frame16.nz.9.png,1 639 | patient035_frame01.nz.0.png,1 640 | patient035_frame01.nz.1.png,1 641 | patient035_frame01.nz.10.png,1 642 | patient035_frame01.nz.11.png,1 643 | patient035_frame01.nz.12.png,1 644 | patient035_frame01.nz.2.png,1 645 | patient035_frame01.nz.3.png,1 646 | patient035_frame01.nz.4.png,1 647 | patient035_frame01.nz.5.png,1 648 | patient035_frame01.nz.6.png,1 649 | patient035_frame01.nz.7.png,1 650 | patient035_frame01.nz.8.png,1 651 | patient035_frame01.nz.9.png,1 652 | patient035_frame11.nz.0.png,1 653 | patient035_frame11.nz.1.png,1 654 | patient035_frame11.nz.10.png,1 655 | patient035_frame11.nz.11.png,1 656 | patient035_frame11.nz.12.png,1 657 | patient035_frame11.nz.2.png,1 658 | patient035_frame11.nz.3.png,1 659 | patient035_frame11.nz.4.png,1 660 | patient035_frame11.nz.5.png,1 661 | patient035_frame11.nz.6.png,1 662 | patient035_frame11.nz.7.png,1 663 | patient035_frame11.nz.8.png,1 664 | patient035_frame11.nz.9.png,1 665 | patient036_frame01.nz.0.png,1 666 | patient036_frame01.nz.1.png,1 667 | patient036_frame01.nz.2.png,1 668 | patient036_frame01.nz.3.png,1 669 | patient036_frame01.nz.4.png,1 670 | patient036_frame01.nz.5.png,1 671 | patient036_frame01.nz.6.png,1 672 | patient036_frame01.nz.7.png,1 673 | patient036_frame12.nz.0.png,1 674 | patient036_frame12.nz.1.png,1 675 | patient036_frame12.nz.2.png,1 676 | patient036_frame12.nz.3.png,1 677 | patient036_frame12.nz.4.png,1 678 | patient036_frame12.nz.5.png,1 679 | patient036_frame12.nz.6.png,1 680 | patient036_frame12.nz.7.png,1 681 | patient037_frame01.nz.0.png,1 682 | patient037_frame01.nz.1.png,1 683 | patient037_frame01.nz.2.png,1 684 | patient037_frame01.nz.3.png,1 685 | patient037_frame01.nz.4.png,1 686 | patient037_frame01.nz.5.png,1 687 | patient037_frame01.nz.6.png,1 688 | patient037_frame12.nz.0.png,1 689 | patient037_frame12.nz.1.png,1 690 | patient037_frame12.nz.2.png,1 691 | patient037_frame12.nz.3.png,1 692 | patient037_frame12.nz.4.png,1 693 | patient037_frame12.nz.5.png,1 694 | patient037_frame12.nz.6.png,1 695 | patient038_frame01.nz.0.png,1 696 | patient038_frame01.nz.1.png,1 697 | patient038_frame01.nz.2.png,1 698 | patient038_frame01.nz.3.png,1 699 | patient038_frame01.nz.4.png,1 700 | patient038_frame01.nz.5.png,1 701 | patient038_frame01.nz.6.png,1 702 | patient038_frame01.nz.7.png,1 703 | patient038_frame11.nz.0.png,1 704 | patient038_frame11.nz.1.png,1 705 | patient038_frame11.nz.2.png,1 706 | patient038_frame11.nz.3.png,1 707 | patient038_frame11.nz.4.png,1 708 | patient038_frame11.nz.5.png,1 709 | patient038_frame11.nz.6.png,1 710 | patient038_frame11.nz.7.png,1 711 | patient039_frame01.nz.0.png,1 712 | patient039_frame01.nz.1.png,1 713 | patient039_frame01.nz.2.png,1 714 | patient039_frame01.nz.3.png,1 715 | patient039_frame01.nz.4.png,1 716 | patient039_frame01.nz.5.png,1 717 | patient039_frame01.nz.6.png,1 718 | patient039_frame01.nz.7.png,1 719 | patient039_frame01.nz.8.png,1 720 | patient039_frame10.nz.0.png,1 721 | patient039_frame10.nz.1.png,1 722 | patient039_frame10.nz.2.png,1 723 | patient039_frame10.nz.3.png,1 724 | patient039_frame10.nz.4.png,1 725 | patient039_frame10.nz.5.png,1 726 | patient039_frame10.nz.6.png,1 727 | patient039_frame10.nz.7.png,1 728 | patient039_frame10.nz.8.png,1 729 | patient040_frame01.nz.0.png,1 730 | patient040_frame01.nz.1.png,1 731 | patient040_frame01.nz.2.png,1 732 | patient040_frame01.nz.3.png,1 733 | patient040_frame01.nz.4.png,1 734 | patient040_frame01.nz.5.png,1 735 | patient040_frame01.nz.6.png,1 736 | patient040_frame01.nz.7.png,1 737 | patient040_frame01.nz.8.png,1 738 | patient040_frame01.nz.9.png,1 739 | patient040_frame13.nz.0.png,1 740 | patient040_frame13.nz.1.png,1 741 | patient040_frame13.nz.2.png,1 742 | patient040_frame13.nz.3.png,1 743 | patient040_frame13.nz.4.png,1 744 | patient040_frame13.nz.5.png,1 745 | patient040_frame13.nz.6.png,1 746 | patient040_frame13.nz.7.png,1 747 | patient040_frame13.nz.8.png,1 748 | patient040_frame13.nz.9.png,1 749 | patient042_frame01.nz.0.png,2 750 | patient042_frame01.nz.1.png,2 751 | patient042_frame01.nz.2.png,2 752 | patient042_frame01.nz.3.png,2 753 | patient042_frame01.nz.4.png,2 754 | patient042_frame01.nz.5.png,2 755 | patient042_frame01.nz.6.png,2 756 | patient042_frame01.nz.7.png,2 757 | patient042_frame01.nz.8.png,2 758 | patient042_frame16.nz.0.png,2 759 | patient042_frame16.nz.1.png,2 760 | patient042_frame16.nz.2.png,2 761 | patient042_frame16.nz.3.png,2 762 | patient042_frame16.nz.4.png,2 763 | patient042_frame16.nz.5.png,2 764 | patient042_frame16.nz.6.png,2 765 | patient042_frame16.nz.7.png,2 766 | patient042_frame16.nz.8.png,2 767 | patient044_frame01.nz.0.png,2 768 | patient044_frame01.nz.1.png,2 769 | patient044_frame01.nz.2.png,2 770 | patient044_frame01.nz.3.png,2 771 | patient044_frame01.nz.4.png,2 772 | patient044_frame01.nz.5.png,2 773 | patient044_frame01.nz.6.png,2 774 | patient044_frame01.nz.7.png,2 775 | patient044_frame01.nz.8.png,2 776 | patient044_frame11.nz.0.png,2 777 | patient044_frame11.nz.1.png,2 778 | patient044_frame11.nz.2.png,2 779 | patient044_frame11.nz.3.png,2 780 | patient044_frame11.nz.4.png,2 781 | patient044_frame11.nz.5.png,2 782 | patient044_frame11.nz.6.png,2 783 | patient044_frame11.nz.7.png,2 784 | patient044_frame11.nz.8.png,2 785 | patient045_frame01.nz.0.png,2 786 | patient045_frame01.nz.1.png,2 787 | patient045_frame01.nz.2.png,2 788 | patient045_frame01.nz.3.png,2 789 | patient045_frame01.nz.4.png,2 790 | patient045_frame01.nz.5.png,2 791 | patient045_frame01.nz.6.png,2 792 | patient045_frame01.nz.7.png,2 793 | patient045_frame13.nz.0.png,2 794 | patient045_frame13.nz.1.png,2 795 | patient045_frame13.nz.2.png,2 796 | patient045_frame13.nz.3.png,2 797 | patient045_frame13.nz.4.png,2 798 | patient045_frame13.nz.5.png,2 799 | patient045_frame13.nz.6.png,2 800 | patient045_frame13.nz.7.png,2 801 | patient046_frame01.nz.0.png,2 802 | patient046_frame01.nz.1.png,2 803 | patient046_frame01.nz.2.png,2 804 | patient046_frame01.nz.3.png,2 805 | patient046_frame01.nz.4.png,2 806 | patient046_frame01.nz.5.png,2 807 | patient046_frame01.nz.6.png,2 808 | patient046_frame01.nz.7.png,2 809 | patient046_frame01.nz.8.png,2 810 | patient046_frame10.nz.0.png,2 811 | patient046_frame10.nz.1.png,2 812 | patient046_frame10.nz.2.png,2 813 | patient046_frame10.nz.3.png,2 814 | patient046_frame10.nz.4.png,2 815 | patient046_frame10.nz.5.png,2 816 | patient046_frame10.nz.6.png,2 817 | patient046_frame10.nz.7.png,2 818 | patient046_frame10.nz.8.png,2 819 | patient047_frame01.nz.0.png,2 820 | patient047_frame01.nz.1.png,2 821 | patient047_frame01.nz.2.png,2 822 | patient047_frame01.nz.3.png,2 823 | patient047_frame01.nz.4.png,2 824 | patient047_frame01.nz.5.png,2 825 | patient047_frame01.nz.6.png,2 826 | patient047_frame01.nz.7.png,2 827 | patient047_frame01.nz.8.png,2 828 | patient047_frame09.nz.0.png,2 829 | patient047_frame09.nz.1.png,2 830 | patient047_frame09.nz.2.png,2 831 | patient047_frame09.nz.3.png,2 832 | patient047_frame09.nz.4.png,2 833 | patient047_frame09.nz.5.png,2 834 | patient047_frame09.nz.6.png,2 835 | patient047_frame09.nz.7.png,2 836 | patient047_frame09.nz.8.png,2 837 | patient048_frame01.nz.0.png,2 838 | patient048_frame01.nz.1.png,2 839 | patient048_frame01.nz.2.png,2 840 | patient048_frame01.nz.3.png,2 841 | patient048_frame01.nz.4.png,2 842 | patient048_frame01.nz.5.png,2 843 | patient048_frame01.nz.6.png,2 844 | patient048_frame01.nz.7.png,2 845 | patient048_frame08.nz.0.png,2 846 | patient048_frame08.nz.1.png,2 847 | patient048_frame08.nz.2.png,2 848 | patient048_frame08.nz.3.png,2 849 | patient048_frame08.nz.4.png,2 850 | patient048_frame08.nz.5.png,2 851 | patient048_frame08.nz.6.png,2 852 | patient048_frame08.nz.7.png,2 853 | patient049_frame01.nz.0.png,2 854 | patient049_frame01.nz.1.png,2 855 | patient049_frame01.nz.2.png,2 856 | patient049_frame01.nz.3.png,2 857 | patient049_frame01.nz.4.png,2 858 | patient049_frame01.nz.5.png,2 859 | patient049_frame01.nz.6.png,2 860 | patient049_frame11.nz.0.png,2 861 | patient049_frame11.nz.1.png,2 862 | patient049_frame11.nz.2.png,2 863 | patient049_frame11.nz.3.png,2 864 | patient049_frame11.nz.4.png,2 865 | patient049_frame11.nz.5.png,2 866 | patient049_frame11.nz.6.png,2 867 | patient050_frame01.nz.0.png,2 868 | patient050_frame01.nz.1.png,2 869 | patient050_frame01.nz.2.png,2 870 | patient050_frame01.nz.3.png,2 871 | patient050_frame01.nz.4.png,2 872 | patient050_frame01.nz.5.png,2 873 | patient050_frame01.nz.6.png,2 874 | patient050_frame01.nz.7.png,2 875 | patient050_frame01.nz.8.png,2 876 | patient050_frame01.nz.9.png,2 877 | patient050_frame12.nz.0.png,2 878 | patient050_frame12.nz.1.png,2 879 | patient050_frame12.nz.2.png,2 880 | patient050_frame12.nz.3.png,2 881 | patient050_frame12.nz.4.png,2 882 | patient050_frame12.nz.5.png,2 883 | patient050_frame12.nz.6.png,2 884 | patient050_frame12.nz.7.png,2 885 | patient050_frame12.nz.8.png,2 886 | patient050_frame12.nz.9.png,2 887 | patient051_frame01.nz.0.png,2 888 | patient051_frame01.nz.1.png,2 889 | patient051_frame01.nz.2.png,2 890 | patient051_frame01.nz.3.png,2 891 | patient051_frame01.nz.4.png,2 892 | patient051_frame01.nz.5.png,2 893 | patient051_frame01.nz.6.png,2 894 | patient051_frame01.nz.7.png,2 895 | patient051_frame01.nz.8.png,2 896 | patient051_frame01.nz.9.png,2 897 | patient051_frame11.nz.0.png,2 898 | patient051_frame11.nz.1.png,2 899 | patient051_frame11.nz.2.png,2 900 | patient051_frame11.nz.3.png,2 901 | patient051_frame11.nz.4.png,2 902 | patient051_frame11.nz.5.png,2 903 | patient051_frame11.nz.6.png,2 904 | patient051_frame11.nz.7.png,2 905 | patient051_frame11.nz.8.png,2 906 | patient051_frame11.nz.9.png,2 907 | patient052_frame01.nz.0.png,2 908 | patient052_frame01.nz.1.png,2 909 | patient052_frame01.nz.2.png,2 910 | patient052_frame01.nz.3.png,2 911 | patient052_frame01.nz.4.png,2 912 | patient052_frame01.nz.5.png,2 913 | patient052_frame01.nz.6.png,2 914 | patient052_frame01.nz.7.png,2 915 | patient052_frame09.nz.0.png,2 916 | patient052_frame09.nz.1.png,2 917 | patient052_frame09.nz.2.png,2 918 | patient052_frame09.nz.3.png,2 919 | patient052_frame09.nz.4.png,2 920 | patient052_frame09.nz.5.png,2 921 | patient052_frame09.nz.6.png,2 922 | patient052_frame09.nz.7.png,2 923 | patient053_frame01.nz.0.png,2 924 | patient053_frame01.nz.1.png,2 925 | patient053_frame01.nz.2.png,2 926 | patient053_frame01.nz.3.png,2 927 | patient053_frame01.nz.4.png,2 928 | patient053_frame01.nz.5.png,2 929 | patient053_frame01.nz.6.png,2 930 | patient053_frame12.nz.0.png,2 931 | patient053_frame12.nz.1.png,2 932 | patient053_frame12.nz.2.png,2 933 | patient053_frame12.nz.3.png,2 934 | patient053_frame12.nz.4.png,2 935 | patient053_frame12.nz.5.png,2 936 | patient053_frame12.nz.6.png,2 937 | patient054_frame01.nz.0.png,2 938 | patient054_frame01.nz.1.png,2 939 | patient054_frame01.nz.2.png,2 940 | patient054_frame01.nz.3.png,2 941 | patient054_frame01.nz.4.png,2 942 | patient054_frame01.nz.5.png,2 943 | patient054_frame01.nz.6.png,2 944 | patient054_frame01.nz.7.png,2 945 | patient054_frame12.nz.0.png,2 946 | patient054_frame12.nz.1.png,2 947 | patient054_frame12.nz.2.png,2 948 | patient054_frame12.nz.3.png,2 949 | patient054_frame12.nz.4.png,2 950 | patient054_frame12.nz.5.png,2 951 | patient054_frame12.nz.6.png,2 952 | patient054_frame12.nz.7.png,2 953 | patient055_frame01.nz.0.png,2 954 | patient055_frame01.nz.1.png,2 955 | patient055_frame01.nz.2.png,2 956 | patient055_frame01.nz.3.png,2 957 | patient055_frame01.nz.4.png,2 958 | patient055_frame01.nz.5.png,2 959 | patient055_frame01.nz.6.png,2 960 | patient055_frame01.nz.7.png,2 961 | patient055_frame01.nz.8.png,2 962 | patient055_frame10.nz.0.png,2 963 | patient055_frame10.nz.1.png,2 964 | patient055_frame10.nz.2.png,2 965 | patient055_frame10.nz.3.png,2 966 | patient055_frame10.nz.4.png,2 967 | patient055_frame10.nz.5.png,2 968 | patient055_frame10.nz.6.png,2 969 | patient055_frame10.nz.7.png,2 970 | patient055_frame10.nz.8.png,2 971 | patient056_frame01.nz.0.png,2 972 | patient056_frame01.nz.1.png,2 973 | patient056_frame01.nz.2.png,2 974 | patient056_frame01.nz.3.png,2 975 | patient056_frame01.nz.4.png,2 976 | patient056_frame01.nz.5.png,2 977 | patient056_frame01.nz.6.png,2 978 | patient056_frame01.nz.7.png,2 979 | patient056_frame01.nz.8.png,2 980 | patient056_frame12.nz.0.png,2 981 | patient056_frame12.nz.1.png,2 982 | patient056_frame12.nz.2.png,2 983 | patient056_frame12.nz.3.png,2 984 | patient056_frame12.nz.4.png,2 985 | patient056_frame12.nz.5.png,2 986 | patient056_frame12.nz.6.png,2 987 | patient056_frame12.nz.7.png,2 988 | patient056_frame12.nz.8.png,2 989 | patient057_frame01.nz.0.png,2 990 | patient057_frame01.nz.1.png,2 991 | patient057_frame01.nz.2.png,2 992 | patient057_frame01.nz.3.png,2 993 | patient057_frame01.nz.4.png,2 994 | patient057_frame01.nz.5.png,2 995 | patient057_frame01.nz.6.png,2 996 | patient057_frame01.nz.7.png,2 997 | patient057_frame09.nz.0.png,2 998 | patient057_frame09.nz.1.png,2 999 | patient057_frame09.nz.2.png,2 1000 | patient057_frame09.nz.3.png,2 1001 | patient057_frame09.nz.4.png,2 1002 | patient057_frame09.nz.5.png,2 1003 | patient057_frame09.nz.6.png,2 1004 | patient057_frame09.nz.7.png,2 1005 | patient058_frame01.nz.0.png,2 1006 | patient058_frame01.nz.1.png,2 1007 | patient058_frame01.nz.2.png,2 1008 | patient058_frame01.nz.3.png,2 1009 | patient058_frame01.nz.4.png,2 1010 | patient058_frame01.nz.5.png,2 1011 | patient058_frame01.nz.6.png,2 1012 | patient058_frame01.nz.7.png,2 1013 | patient058_frame01.nz.8.png,2 1014 | patient058_frame14.nz.0.png,2 1015 | patient058_frame14.nz.1.png,2 1016 | patient058_frame14.nz.2.png,2 1017 | patient058_frame14.nz.3.png,2 1018 | patient058_frame14.nz.4.png,2 1019 | patient058_frame14.nz.5.png,2 1020 | patient058_frame14.nz.6.png,2 1021 | patient058_frame14.nz.7.png,2 1022 | patient058_frame14.nz.8.png,2 1023 | patient059_frame01.nz.0.png,2 1024 | patient059_frame01.nz.1.png,2 1025 | patient059_frame01.nz.2.png,2 1026 | patient059_frame01.nz.3.png,2 1027 | patient059_frame01.nz.4.png,2 1028 | patient059_frame01.nz.5.png,2 1029 | patient059_frame01.nz.6.png,2 1030 | patient059_frame01.nz.7.png,2 1031 | patient059_frame01.nz.8.png,2 1032 | patient059_frame09.nz.0.png,2 1033 | patient059_frame09.nz.1.png,2 1034 | patient059_frame09.nz.2.png,2 1035 | patient059_frame09.nz.3.png,2 1036 | patient059_frame09.nz.4.png,2 1037 | patient059_frame09.nz.5.png,2 1038 | patient059_frame09.nz.6.png,2 1039 | patient059_frame09.nz.7.png,2 1040 | patient059_frame09.nz.8.png,2 1041 | patient060_frame01.nz.0.png,2 1042 | patient060_frame01.nz.1.png,2 1043 | patient060_frame01.nz.2.png,2 1044 | patient060_frame01.nz.3.png,2 1045 | patient060_frame01.nz.4.png,2 1046 | patient060_frame01.nz.5.png,2 1047 | patient060_frame01.nz.6.png,2 1048 | patient060_frame01.nz.7.png,2 1049 | patient060_frame01.nz.8.png,2 1050 | patient060_frame14.nz.0.png,2 1051 | patient060_frame14.nz.1.png,2 1052 | patient060_frame14.nz.2.png,2 1053 | patient060_frame14.nz.3.png,2 1054 | patient060_frame14.nz.4.png,2 1055 | patient060_frame14.nz.5.png,2 1056 | patient060_frame14.nz.6.png,2 1057 | patient060_frame14.nz.7.png,2 1058 | patient060_frame14.nz.8.png,2 1059 | patient061_frame01.nz.0.png,3 1060 | patient061_frame01.nz.1.png,3 1061 | patient061_frame01.nz.2.png,3 1062 | patient061_frame01.nz.3.png,3 1063 | patient061_frame01.nz.4.png,3 1064 | patient061_frame01.nz.5.png,3 1065 | patient061_frame01.nz.6.png,3 1066 | patient061_frame01.nz.7.png,3 1067 | patient061_frame01.nz.8.png,3 1068 | patient061_frame10.nz.0.png,3 1069 | patient061_frame10.nz.1.png,3 1070 | patient061_frame10.nz.2.png,3 1071 | patient061_frame10.nz.3.png,3 1072 | patient061_frame10.nz.4.png,3 1073 | patient061_frame10.nz.5.png,3 1074 | patient061_frame10.nz.6.png,3 1075 | patient061_frame10.nz.7.png,3 1076 | patient061_frame10.nz.8.png,3 1077 | patient062_frame01.nz.0.png,3 1078 | patient062_frame01.nz.1.png,3 1079 | patient062_frame01.nz.2.png,3 1080 | patient062_frame01.nz.3.png,3 1081 | patient062_frame01.nz.4.png,3 1082 | patient062_frame01.nz.5.png,3 1083 | patient062_frame01.nz.6.png,3 1084 | patient062_frame01.nz.7.png,3 1085 | patient062_frame01.nz.8.png,3 1086 | patient062_frame01.nz.9.png,3 1087 | patient062_frame09.nz.0.png,3 1088 | patient062_frame09.nz.1.png,3 1089 | patient062_frame09.nz.2.png,3 1090 | patient062_frame09.nz.3.png,3 1091 | patient062_frame09.nz.4.png,3 1092 | patient062_frame09.nz.5.png,3 1093 | patient062_frame09.nz.6.png,3 1094 | patient062_frame09.nz.7.png,3 1095 | patient062_frame09.nz.8.png,3 1096 | patient062_frame09.nz.9.png,3 1097 | patient063_frame01.nz.0.png,3 1098 | patient063_frame01.nz.1.png,3 1099 | patient063_frame01.nz.2.png,3 1100 | patient063_frame01.nz.3.png,3 1101 | patient063_frame01.nz.4.png,3 1102 | patient063_frame01.nz.5.png,3 1103 | patient063_frame01.nz.6.png,3 1104 | patient063_frame01.nz.7.png,3 1105 | patient063_frame16.nz.0.png,3 1106 | patient063_frame16.nz.1.png,3 1107 | patient063_frame16.nz.2.png,3 1108 | patient063_frame16.nz.3.png,3 1109 | patient063_frame16.nz.4.png,3 1110 | patient063_frame16.nz.5.png,3 1111 | patient063_frame16.nz.6.png,3 1112 | patient063_frame16.nz.7.png,3 1113 | patient064_frame01.nz.0.png,3 1114 | patient064_frame01.nz.1.png,3 1115 | patient064_frame01.nz.2.png,3 1116 | patient064_frame01.nz.3.png,3 1117 | patient064_frame01.nz.4.png,3 1118 | patient064_frame01.nz.5.png,3 1119 | patient064_frame01.nz.6.png,3 1120 | patient064_frame01.nz.7.png,3 1121 | patient064_frame01.nz.8.png,3 1122 | patient064_frame01.nz.9.png,3 1123 | patient064_frame12.nz.0.png,3 1124 | patient064_frame12.nz.1.png,3 1125 | patient064_frame12.nz.2.png,3 1126 | patient064_frame12.nz.3.png,3 1127 | patient064_frame12.nz.4.png,3 1128 | patient064_frame12.nz.5.png,3 1129 | patient064_frame12.nz.6.png,3 1130 | patient064_frame12.nz.7.png,3 1131 | patient064_frame12.nz.8.png,3 1132 | patient064_frame12.nz.9.png,3 1133 | patient066_frame01.nz.0.png,3 1134 | patient066_frame01.nz.1.png,3 1135 | patient066_frame01.nz.2.png,3 1136 | patient066_frame01.nz.3.png,3 1137 | patient066_frame01.nz.4.png,3 1138 | patient066_frame01.nz.5.png,3 1139 | patient066_frame01.nz.6.png,3 1140 | patient066_frame01.nz.7.png,3 1141 | patient066_frame01.nz.8.png,3 1142 | patient066_frame11.nz.0.png,3 1143 | patient066_frame11.nz.1.png,3 1144 | patient066_frame11.nz.2.png,3 1145 | patient066_frame11.nz.3.png,3 1146 | patient066_frame11.nz.4.png,3 1147 | patient066_frame11.nz.5.png,3 1148 | patient066_frame11.nz.6.png,3 1149 | patient066_frame11.nz.7.png,3 1150 | patient066_frame11.nz.8.png,3 1151 | patient067_frame01.nz.0.png,3 1152 | patient067_frame01.nz.1.png,3 1153 | patient067_frame01.nz.2.png,3 1154 | patient067_frame01.nz.3.png,3 1155 | patient067_frame01.nz.4.png,3 1156 | patient067_frame01.nz.5.png,3 1157 | patient067_frame01.nz.6.png,3 1158 | patient067_frame01.nz.7.png,3 1159 | patient067_frame01.nz.8.png,3 1160 | patient067_frame01.nz.9.png,3 1161 | patient067_frame10.nz.0.png,3 1162 | patient067_frame10.nz.1.png,3 1163 | patient067_frame10.nz.2.png,3 1164 | patient067_frame10.nz.3.png,3 1165 | patient067_frame10.nz.4.png,3 1166 | patient067_frame10.nz.5.png,3 1167 | patient067_frame10.nz.6.png,3 1168 | patient067_frame10.nz.7.png,3 1169 | patient067_frame10.nz.8.png,3 1170 | patient067_frame10.nz.9.png,3 1171 | patient068_frame01.nz.0.png,3 1172 | patient068_frame01.nz.1.png,3 1173 | patient068_frame01.nz.2.png,3 1174 | patient068_frame01.nz.3.png,3 1175 | patient068_frame01.nz.4.png,3 1176 | patient068_frame01.nz.5.png,3 1177 | patient068_frame01.nz.6.png,3 1178 | patient068_frame12.nz.0.png,3 1179 | patient068_frame12.nz.1.png,3 1180 | patient068_frame12.nz.2.png,3 1181 | patient068_frame12.nz.3.png,3 1182 | patient068_frame12.nz.4.png,3 1183 | patient068_frame12.nz.5.png,3 1184 | patient068_frame12.nz.6.png,3 1185 | patient069_frame01.nz.0.png,3 1186 | patient069_frame01.nz.1.png,3 1187 | patient069_frame01.nz.2.png,3 1188 | patient069_frame01.nz.3.png,3 1189 | patient069_frame01.nz.4.png,3 1190 | patient069_frame01.nz.5.png,3 1191 | patient069_frame01.nz.6.png,3 1192 | patient069_frame12.nz.0.png,3 1193 | patient069_frame12.nz.1.png,3 1194 | patient069_frame12.nz.2.png,3 1195 | patient069_frame12.nz.3.png,3 1196 | patient069_frame12.nz.4.png,3 1197 | patient069_frame12.nz.5.png,3 1198 | patient069_frame12.nz.6.png,3 1199 | patient070_frame01.nz.0.png,3 1200 | patient070_frame01.nz.1.png,3 1201 | patient070_frame01.nz.2.png,3 1202 | patient070_frame01.nz.3.png,3 1203 | patient070_frame01.nz.4.png,3 1204 | patient070_frame01.nz.5.png,3 1205 | patient070_frame10.nz.0.png,3 1206 | patient070_frame10.nz.1.png,3 1207 | patient070_frame10.nz.2.png,3 1208 | patient070_frame10.nz.3.png,3 1209 | patient070_frame10.nz.4.png,3 1210 | patient070_frame10.nz.5.png,3 1211 | patient071_frame01.nz.0.png,3 1212 | patient071_frame01.nz.1.png,3 1213 | patient071_frame01.nz.2.png,3 1214 | patient071_frame01.nz.3.png,3 1215 | patient071_frame01.nz.4.png,3 1216 | patient071_frame01.nz.5.png,3 1217 | patient071_frame01.nz.6.png,3 1218 | patient071_frame01.nz.7.png,3 1219 | patient071_frame01.nz.8.png,3 1220 | patient071_frame01.nz.9.png,3 1221 | patient071_frame09.nz.0.png,3 1222 | patient071_frame09.nz.1.png,3 1223 | patient071_frame09.nz.2.png,3 1224 | patient071_frame09.nz.3.png,3 1225 | patient071_frame09.nz.4.png,3 1226 | patient071_frame09.nz.5.png,3 1227 | patient071_frame09.nz.6.png,3 1228 | patient071_frame09.nz.7.png,3 1229 | patient071_frame09.nz.8.png,3 1230 | patient071_frame09.nz.9.png,3 1231 | patient072_frame01.nz.0.png,3 1232 | patient072_frame01.nz.1.png,3 1233 | patient072_frame01.nz.2.png,3 1234 | patient072_frame01.nz.3.png,3 1235 | patient072_frame01.nz.4.png,3 1236 | patient072_frame01.nz.5.png,3 1237 | patient072_frame01.nz.6.png,3 1238 | patient072_frame01.nz.7.png,3 1239 | patient072_frame11.nz.0.png,3 1240 | patient072_frame11.nz.1.png,3 1241 | patient072_frame11.nz.2.png,3 1242 | patient072_frame11.nz.3.png,3 1243 | patient072_frame11.nz.4.png,3 1244 | patient072_frame11.nz.5.png,3 1245 | patient072_frame11.nz.6.png,3 1246 | patient072_frame11.nz.7.png,3 1247 | patient073_frame01.nz.0.png,3 1248 | patient073_frame01.nz.1.png,3 1249 | patient073_frame01.nz.2.png,3 1250 | patient073_frame01.nz.3.png,3 1251 | patient073_frame01.nz.4.png,3 1252 | patient073_frame01.nz.5.png,3 1253 | patient073_frame01.nz.6.png,3 1254 | patient073_frame10.nz.0.png,3 1255 | patient073_frame10.nz.1.png,3 1256 | patient073_frame10.nz.2.png,3 1257 | patient073_frame10.nz.3.png,3 1258 | patient073_frame10.nz.4.png,3 1259 | patient073_frame10.nz.5.png,3 1260 | patient073_frame10.nz.6.png,3 1261 | patient074_frame01.nz.0.png,3 1262 | patient074_frame01.nz.1.png,3 1263 | patient074_frame01.nz.2.png,3 1264 | patient074_frame01.nz.3.png,3 1265 | patient074_frame01.nz.4.png,3 1266 | patient074_frame01.nz.5.png,3 1267 | patient074_frame01.nz.6.png,3 1268 | patient074_frame01.nz.7.png,3 1269 | patient074_frame12.nz.0.png,3 1270 | patient074_frame12.nz.1.png,3 1271 | patient074_frame12.nz.2.png,3 1272 | patient074_frame12.nz.3.png,3 1273 | patient074_frame12.nz.4.png,3 1274 | patient074_frame12.nz.5.png,3 1275 | patient074_frame12.nz.6.png,3 1276 | patient074_frame12.nz.7.png,3 1277 | patient075_frame01.nz.0.png,3 1278 | patient075_frame01.nz.1.png,3 1279 | patient075_frame01.nz.10.png,3 1280 | patient075_frame01.nz.11.png,3 1281 | patient075_frame01.nz.12.png,3 1282 | patient075_frame01.nz.13.png,3 1283 | patient075_frame01.nz.2.png,3 1284 | patient075_frame01.nz.3.png,3 1285 | patient075_frame01.nz.4.png,3 1286 | patient075_frame01.nz.5.png,3 1287 | patient075_frame01.nz.6.png,3 1288 | patient075_frame01.nz.7.png,3 1289 | patient075_frame01.nz.8.png,3 1290 | patient075_frame01.nz.9.png,3 1291 | patient075_frame06.nz.0.png,3 1292 | patient075_frame06.nz.1.png,3 1293 | patient075_frame06.nz.10.png,3 1294 | patient075_frame06.nz.11.png,3 1295 | patient075_frame06.nz.12.png,3 1296 | patient075_frame06.nz.13.png,3 1297 | patient075_frame06.nz.2.png,3 1298 | patient075_frame06.nz.3.png,3 1299 | patient075_frame06.nz.4.png,3 1300 | patient075_frame06.nz.5.png,3 1301 | patient075_frame06.nz.6.png,3 1302 | patient075_frame06.nz.7.png,3 1303 | patient075_frame06.nz.8.png,3 1304 | patient075_frame06.nz.9.png,3 1305 | patient076_frame01.nz.0.png,3 1306 | patient076_frame01.nz.1.png,3 1307 | patient076_frame01.nz.2.png,3 1308 | patient076_frame01.nz.3.png,3 1309 | patient076_frame01.nz.4.png,3 1310 | patient076_frame01.nz.5.png,3 1311 | patient076_frame01.nz.6.png,3 1312 | patient076_frame01.nz.7.png,3 1313 | patient076_frame12.nz.0.png,3 1314 | patient076_frame12.nz.1.png,3 1315 | patient076_frame12.nz.2.png,3 1316 | patient076_frame12.nz.3.png,3 1317 | patient076_frame12.nz.4.png,3 1318 | patient076_frame12.nz.5.png,3 1319 | patient076_frame12.nz.6.png,3 1320 | patient076_frame12.nz.7.png,3 1321 | patient078_frame01.nz.0.png,3 1322 | patient078_frame01.nz.1.png,3 1323 | patient078_frame01.nz.2.png,3 1324 | patient078_frame01.nz.3.png,3 1325 | patient078_frame01.nz.4.png,3 1326 | patient078_frame01.nz.5.png,3 1327 | patient078_frame01.nz.6.png,3 1328 | patient078_frame01.nz.7.png,3 1329 | patient078_frame09.nz.0.png,3 1330 | patient078_frame09.nz.1.png,3 1331 | patient078_frame09.nz.2.png,3 1332 | patient078_frame09.nz.3.png,3 1333 | patient078_frame09.nz.4.png,3 1334 | patient078_frame09.nz.5.png,3 1335 | patient078_frame09.nz.6.png,3 1336 | patient078_frame09.nz.7.png,3 1337 | patient079_frame01.nz.0.png,3 1338 | patient079_frame01.nz.1.png,3 1339 | patient079_frame01.nz.2.png,3 1340 | patient079_frame01.nz.3.png,3 1341 | patient079_frame01.nz.4.png,3 1342 | patient079_frame01.nz.5.png,3 1343 | patient079_frame01.nz.6.png,3 1344 | patient079_frame01.nz.7.png,3 1345 | patient079_frame01.nz.8.png,3 1346 | patient079_frame11.nz.0.png,3 1347 | patient079_frame11.nz.1.png,3 1348 | patient079_frame11.nz.2.png,3 1349 | patient079_frame11.nz.3.png,3 1350 | patient079_frame11.nz.4.png,3 1351 | patient079_frame11.nz.5.png,3 1352 | patient079_frame11.nz.6.png,3 1353 | patient079_frame11.nz.7.png,3 1354 | patient079_frame11.nz.8.png,3 1355 | patient080_frame01.nz.0.png,3 1356 | patient080_frame01.nz.1.png,3 1357 | patient080_frame01.nz.2.png,3 1358 | patient080_frame01.nz.3.png,3 1359 | patient080_frame01.nz.4.png,3 1360 | patient080_frame01.nz.5.png,3 1361 | patient080_frame10.nz.0.png,3 1362 | patient080_frame10.nz.1.png,3 1363 | patient080_frame10.nz.2.png,3 1364 | patient080_frame10.nz.3.png,3 1365 | patient080_frame10.nz.4.png,3 1366 | patient080_frame10.nz.5.png,3 1367 | patient082_frame01.nz.0.png,4 1368 | patient082_frame01.nz.1.png,4 1369 | patient082_frame01.nz.10.png,4 1370 | patient082_frame01.nz.11.png,4 1371 | patient082_frame01.nz.12.png,4 1372 | patient082_frame01.nz.13.png,4 1373 | patient082_frame01.nz.14.png,4 1374 | patient082_frame01.nz.15.png,4 1375 | patient082_frame01.nz.2.png,4 1376 | patient082_frame01.nz.3.png,4 1377 | patient082_frame01.nz.4.png,4 1378 | patient082_frame01.nz.5.png,4 1379 | patient082_frame01.nz.6.png,4 1380 | patient082_frame01.nz.7.png,4 1381 | patient082_frame01.nz.8.png,4 1382 | patient082_frame01.nz.9.png,4 1383 | patient082_frame07.nz.0.png,4 1384 | patient082_frame07.nz.1.png,4 1385 | patient082_frame07.nz.10.png,4 1386 | patient082_frame07.nz.11.png,4 1387 | patient082_frame07.nz.12.png,4 1388 | patient082_frame07.nz.13.png,4 1389 | patient082_frame07.nz.14.png,4 1390 | patient082_frame07.nz.15.png,4 1391 | patient082_frame07.nz.2.png,4 1392 | patient082_frame07.nz.3.png,4 1393 | patient082_frame07.nz.4.png,4 1394 | patient082_frame07.nz.5.png,4 1395 | patient082_frame07.nz.6.png,4 1396 | patient082_frame07.nz.7.png,4 1397 | patient082_frame07.nz.8.png,4 1398 | patient082_frame07.nz.9.png,4 1399 | patient083_frame01.nz.0.png,4 1400 | patient083_frame01.nz.1.png,4 1401 | patient083_frame01.nz.2.png,4 1402 | patient083_frame01.nz.3.png,4 1403 | patient083_frame01.nz.4.png,4 1404 | patient083_frame01.nz.5.png,4 1405 | patient083_frame08.nz.0.png,4 1406 | patient083_frame08.nz.1.png,4 1407 | patient083_frame08.nz.2.png,4 1408 | patient083_frame08.nz.3.png,4 1409 | patient083_frame08.nz.4.png,4 1410 | patient083_frame08.nz.5.png,4 1411 | patient084_frame01.nz.0.png,4 1412 | patient084_frame01.nz.1.png,4 1413 | patient084_frame01.nz.10.png,4 1414 | patient084_frame01.nz.11.png,4 1415 | patient084_frame01.nz.2.png,4 1416 | patient084_frame01.nz.3.png,4 1417 | patient084_frame01.nz.4.png,4 1418 | patient084_frame01.nz.5.png,4 1419 | patient084_frame01.nz.6.png,4 1420 | patient084_frame01.nz.7.png,4 1421 | patient084_frame01.nz.8.png,4 1422 | patient084_frame01.nz.9.png,4 1423 | patient084_frame10.nz.0.png,4 1424 | patient084_frame10.nz.1.png,4 1425 | patient084_frame10.nz.10.png,4 1426 | patient084_frame10.nz.11.png,4 1427 | patient084_frame10.nz.2.png,4 1428 | patient084_frame10.nz.3.png,4 1429 | patient084_frame10.nz.4.png,4 1430 | patient084_frame10.nz.5.png,4 1431 | patient084_frame10.nz.6.png,4 1432 | patient084_frame10.nz.7.png,4 1433 | patient084_frame10.nz.8.png,4 1434 | patient084_frame10.nz.9.png,4 1435 | patient085_frame01.nz.0.png,4 1436 | patient085_frame01.nz.1.png,4 1437 | patient085_frame01.nz.10.png,4 1438 | patient085_frame01.nz.11.png,4 1439 | patient085_frame01.nz.12.png,4 1440 | patient085_frame01.nz.13.png,4 1441 | patient085_frame01.nz.14.png,4 1442 | patient085_frame01.nz.2.png,4 1443 | patient085_frame01.nz.3.png,4 1444 | patient085_frame01.nz.4.png,4 1445 | patient085_frame01.nz.5.png,4 1446 | patient085_frame01.nz.6.png,4 1447 | patient085_frame01.nz.7.png,4 1448 | patient085_frame01.nz.8.png,4 1449 | patient085_frame01.nz.9.png,4 1450 | patient085_frame09.nz.0.png,4 1451 | patient085_frame09.nz.1.png,4 1452 | patient085_frame09.nz.10.png,4 1453 | patient085_frame09.nz.11.png,4 1454 | patient085_frame09.nz.12.png,4 1455 | patient085_frame09.nz.13.png,4 1456 | patient085_frame09.nz.14.png,4 1457 | patient085_frame09.nz.2.png,4 1458 | patient085_frame09.nz.3.png,4 1459 | patient085_frame09.nz.4.png,4 1460 | patient085_frame09.nz.5.png,4 1461 | patient085_frame09.nz.6.png,4 1462 | patient085_frame09.nz.7.png,4 1463 | patient085_frame09.nz.8.png,4 1464 | patient085_frame09.nz.9.png,4 1465 | patient086_frame01.nz.0.png,4 1466 | patient086_frame01.nz.1.png,4 1467 | patient086_frame01.nz.2.png,4 1468 | patient086_frame01.nz.3.png,4 1469 | patient086_frame01.nz.4.png,4 1470 | patient086_frame01.nz.5.png,4 1471 | patient086_frame01.nz.6.png,4 1472 | patient086_frame08.nz.0.png,4 1473 | patient086_frame08.nz.1.png,4 1474 | patient086_frame08.nz.2.png,4 1475 | patient086_frame08.nz.3.png,4 1476 | patient086_frame08.nz.4.png,4 1477 | patient086_frame08.nz.5.png,4 1478 | patient086_frame08.nz.6.png,4 1479 | patient087_frame01.nz.0.png,4 1480 | patient087_frame01.nz.1.png,4 1481 | patient087_frame01.nz.2.png,4 1482 | patient087_frame01.nz.3.png,4 1483 | patient087_frame01.nz.4.png,4 1484 | patient087_frame01.nz.5.png,4 1485 | patient087_frame01.nz.6.png,4 1486 | patient087_frame01.nz.7.png,4 1487 | patient087_frame10.nz.0.png,4 1488 | patient087_frame10.nz.1.png,4 1489 | patient087_frame10.nz.2.png,4 1490 | patient087_frame10.nz.3.png,4 1491 | patient087_frame10.nz.4.png,4 1492 | patient087_frame10.nz.5.png,4 1493 | patient087_frame10.nz.6.png,4 1494 | patient087_frame10.nz.7.png,4 1495 | patient088_frame01.nz.0.png,4 1496 | patient088_frame01.nz.1.png,4 1497 | patient088_frame01.nz.10.png,4 1498 | patient088_frame01.nz.11.png,4 1499 | patient088_frame01.nz.12.png,4 1500 | patient088_frame01.nz.13.png,4 1501 | patient088_frame01.nz.14.png,4 1502 | patient088_frame01.nz.15.png,4 1503 | patient088_frame01.nz.2.png,4 1504 | patient088_frame01.nz.3.png,4 1505 | patient088_frame01.nz.4.png,4 1506 | patient088_frame01.nz.5.png,4 1507 | patient088_frame01.nz.6.png,4 1508 | patient088_frame01.nz.7.png,4 1509 | patient088_frame01.nz.8.png,4 1510 | patient088_frame01.nz.9.png,4 1511 | patient088_frame12.nz.0.png,4 1512 | patient088_frame12.nz.1.png,4 1513 | patient088_frame12.nz.10.png,4 1514 | patient088_frame12.nz.11.png,4 1515 | patient088_frame12.nz.12.png,4 1516 | patient088_frame12.nz.13.png,4 1517 | patient088_frame12.nz.14.png,4 1518 | patient088_frame12.nz.15.png,4 1519 | patient088_frame12.nz.2.png,4 1520 | patient088_frame12.nz.3.png,4 1521 | patient088_frame12.nz.4.png,4 1522 | patient088_frame12.nz.5.png,4 1523 | patient088_frame12.nz.6.png,4 1524 | patient088_frame12.nz.7.png,4 1525 | patient088_frame12.nz.8.png,4 1526 | patient088_frame12.nz.9.png,4 1527 | patient089_frame01.nz.0.png,4 1528 | patient089_frame01.nz.1.png,4 1529 | patient089_frame01.nz.2.png,4 1530 | patient089_frame01.nz.3.png,4 1531 | patient089_frame01.nz.4.png,4 1532 | patient089_frame01.nz.5.png,4 1533 | patient089_frame10.nz.0.png,4 1534 | patient089_frame10.nz.1.png,4 1535 | patient089_frame10.nz.2.png,4 1536 | patient089_frame10.nz.3.png,4 1537 | patient089_frame10.nz.4.png,4 1538 | patient089_frame10.nz.5.png,4 1539 | patient090_frame04.nz.0.png,4 1540 | patient090_frame04.nz.1.png,4 1541 | patient090_frame04.nz.2.png,4 1542 | patient090_frame04.nz.3.png,4 1543 | patient090_frame04.nz.4.png,4 1544 | patient090_frame04.nz.5.png,4 1545 | patient090_frame04.nz.6.png,4 1546 | patient091_frame01.nz.0.png,4 1547 | patient091_frame01.nz.1.png,4 1548 | patient091_frame01.nz.2.png,4 1549 | patient091_frame01.nz.3.png,4 1550 | patient091_frame01.nz.4.png,4 1551 | patient091_frame01.nz.5.png,4 1552 | patient091_frame01.nz.6.png,4 1553 | patient091_frame01.nz.7.png,4 1554 | patient091_frame09.nz.0.png,4 1555 | patient091_frame09.nz.1.png,4 1556 | patient091_frame09.nz.2.png,4 1557 | patient091_frame09.nz.3.png,4 1558 | patient091_frame09.nz.4.png,4 1559 | patient091_frame09.nz.5.png,4 1560 | patient091_frame09.nz.6.png,4 1561 | patient091_frame09.nz.7.png,4 1562 | patient093_frame01.nz.0.png,4 1563 | patient093_frame01.nz.1.png,4 1564 | patient093_frame01.nz.2.png,4 1565 | patient093_frame01.nz.3.png,4 1566 | patient093_frame01.nz.4.png,4 1567 | patient093_frame01.nz.5.png,4 1568 | patient093_frame01.nz.6.png,4 1569 | patient093_frame01.nz.7.png,4 1570 | patient093_frame01.nz.8.png,4 1571 | patient093_frame01.nz.9.png,4 1572 | patient093_frame14.nz.0.png,4 1573 | patient093_frame14.nz.1.png,4 1574 | patient093_frame14.nz.2.png,4 1575 | patient093_frame14.nz.3.png,4 1576 | patient093_frame14.nz.4.png,4 1577 | patient093_frame14.nz.5.png,4 1578 | patient093_frame14.nz.6.png,4 1579 | patient093_frame14.nz.7.png,4 1580 | patient093_frame14.nz.8.png,4 1581 | patient093_frame14.nz.9.png,4 1582 | patient094_frame01.nz.0.png,4 1583 | patient094_frame01.nz.1.png,4 1584 | patient094_frame01.nz.2.png,4 1585 | patient094_frame01.nz.3.png,4 1586 | patient094_frame01.nz.4.png,4 1587 | patient094_frame01.nz.5.png,4 1588 | patient094_frame01.nz.6.png,4 1589 | patient094_frame01.nz.7.png,4 1590 | patient094_frame01.nz.8.png,4 1591 | patient094_frame01.nz.9.png,4 1592 | patient094_frame07.nz.0.png,4 1593 | patient094_frame07.nz.1.png,4 1594 | patient094_frame07.nz.2.png,4 1595 | patient094_frame07.nz.3.png,4 1596 | patient094_frame07.nz.4.png,4 1597 | patient094_frame07.nz.5.png,4 1598 | patient094_frame07.nz.6.png,4 1599 | patient094_frame07.nz.7.png,4 1600 | patient094_frame07.nz.8.png,4 1601 | patient094_frame07.nz.9.png,4 1602 | patient095_frame01.nz.0.png,4 1603 | patient095_frame01.nz.1.png,4 1604 | patient095_frame01.nz.10.png,4 1605 | patient095_frame01.nz.11.png,4 1606 | patient095_frame01.nz.12.png,4 1607 | patient095_frame01.nz.13.png,4 1608 | patient095_frame01.nz.2.png,4 1609 | patient095_frame01.nz.3.png,4 1610 | patient095_frame01.nz.4.png,4 1611 | patient095_frame01.nz.5.png,4 1612 | patient095_frame01.nz.6.png,4 1613 | patient095_frame01.nz.7.png,4 1614 | patient095_frame01.nz.8.png,4 1615 | patient095_frame01.nz.9.png,4 1616 | patient095_frame12.nz.0.png,4 1617 | patient095_frame12.nz.1.png,4 1618 | patient095_frame12.nz.10.png,4 1619 | patient095_frame12.nz.11.png,4 1620 | patient095_frame12.nz.12.png,4 1621 | patient095_frame12.nz.13.png,4 1622 | patient095_frame12.nz.2.png,4 1623 | patient095_frame12.nz.3.png,4 1624 | patient095_frame12.nz.4.png,4 1625 | patient095_frame12.nz.5.png,4 1626 | patient095_frame12.nz.6.png,4 1627 | patient095_frame12.nz.7.png,4 1628 | patient095_frame12.nz.8.png,4 1629 | patient095_frame12.nz.9.png,4 1630 | patient097_frame01.nz.0.png,4 1631 | patient097_frame01.nz.1.png,4 1632 | patient097_frame01.nz.2.png,4 1633 | patient097_frame01.nz.3.png,4 1634 | patient097_frame01.nz.4.png,4 1635 | patient097_frame01.nz.5.png,4 1636 | patient097_frame01.nz.6.png,4 1637 | patient097_frame01.nz.7.png,4 1638 | patient097_frame11.nz.0.png,4 1639 | patient097_frame11.nz.1.png,4 1640 | patient097_frame11.nz.2.png,4 1641 | patient097_frame11.nz.3.png,4 1642 | patient097_frame11.nz.4.png,4 1643 | patient097_frame11.nz.5.png,4 1644 | patient097_frame11.nz.6.png,4 1645 | patient097_frame11.nz.7.png,4 1646 | patient098_frame01.nz.0.png,4 1647 | patient098_frame01.nz.1.png,4 1648 | patient098_frame01.nz.2.png,4 1649 | patient098_frame01.nz.3.png,4 1650 | patient098_frame01.nz.4.png,4 1651 | patient098_frame01.nz.5.png,4 1652 | patient098_frame01.nz.6.png,4 1653 | patient098_frame09.nz.0.png,4 1654 | patient098_frame09.nz.1.png,4 1655 | patient098_frame09.nz.2.png,4 1656 | patient098_frame09.nz.3.png,4 1657 | patient098_frame09.nz.4.png,4 1658 | patient098_frame09.nz.5.png,4 1659 | patient098_frame09.nz.6.png,4 1660 | patient100_frame01.nz.0.png,4 1661 | patient100_frame01.nz.1.png,4 1662 | patient100_frame01.nz.2.png,4 1663 | patient100_frame01.nz.3.png,4 1664 | patient100_frame01.nz.4.png,4 1665 | patient100_frame01.nz.5.png,4 1666 | patient100_frame01.nz.6.png,4 1667 | patient100_frame01.nz.7.png,4 1668 | patient100_frame13.nz.0.png,4 1669 | patient100_frame13.nz.1.png,4 1670 | patient100_frame13.nz.2.png,4 1671 | patient100_frame13.nz.3.png,4 1672 | patient100_frame13.nz.4.png,4 1673 | patient100_frame13.nz.5.png,4 1674 | patient100_frame13.nz.6.png,4 1675 | patient100_frame13.nz.7.png,4 -------------------------------------------------------------------------------- /DIRNet-tensorflow/misc/DIRNet.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HPI-DeepLearning/DIRNet/2817314fa79979ee2edae21e11d0cd45b8594ec8/DIRNet-tensorflow/misc/DIRNet.png -------------------------------------------------------------------------------- /DIRNet-tensorflow/models.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from Resnet_model import imagenet_resnet_v2 3 | from WarpST import WarpST 4 | from AffineST import AffineST 5 | from ops import * 6 | import scipy.misc 7 | 8 | 9 | class CNN(object): 10 | def __init__(self, name, is_train): 11 | self.name = name 12 | self.is_train = is_train 13 | self.reuse = None 14 | 15 | def __call__(self, x): 16 | # localisation network 17 | with tf.variable_scope(self.name, reuse=self.reuse): 18 | # x = conv2d(x, "conv1", 512, 3, 1, 19 | # "SAME", True, tf.nn.elu, self.is_train) 20 | # x = tf.nn.avg_pool(x, [1, 2, 2, 1], [1, 2, 2, 1], "SAME") 21 | # 22 | # x = conv2d(x, "conv2", 256, 3, 1, 23 | # "SAME", True, tf.nn.elu, self.is_train) 24 | # x = conv2d(x, "conv3", 128, 3, 1, 25 | # "SAME", True, tf.nn.elu, self.is_train) 26 | # x = conv2d(x, "conv4", 128, 3, 1, 27 | # "SAME", True, tf.nn.elu, self.is_train) 28 | # x = tf.nn.avg_pool(x, [1, 2, 2, 1], [1, 2, 2, 1], "SAME") 29 | # x = conv2d(x, "conv5", 64, 3, 1, 30 | # "SAME", False, tf.nn.elu, self.is_train) 31 | # x = conv2d(x, "conv6", 32, 3, 1, 32 | # "SAME", False, tf.nn.elu, self.is_train) 33 | # x = tf.nn.avg_pool(x, [1, 2, 2, 1], [1, 2, 2, 1], "SAME") 34 | # x = conv2d(x, "out", 2, 3, 1, 35 | # "SAME", False, None, self.is_train) 36 | # x = tf.nn.avg_pool(x, [1, 2, 2, 1], [1, 2, 2, 1], "SAME") 37 | x = conv2d(x, "conv1", 64, 3, 1, 38 | "SAME", True, tf.nn.elu, self.is_train) 39 | x = tf.nn.avg_pool(x, [1, 2, 2, 1], [1, 2, 2, 1], "SAME") 40 | 41 | x = conv2d(x, "conv2", 128, 3, 1, 42 | "SAME", True, tf.nn.elu, self.is_train) 43 | x = conv2d(x, "out1", 128, 3, 1, 44 | "SAME", True, tf.nn.elu, self.is_train) 45 | x = tf.nn.avg_pool(x, [1, 2, 2, 1], [1, 2, 2, 1], "SAME") 46 | x = conv2d(x, "out2", 2, 3, 1, 47 | "SAME", False, None, self.is_train) 48 | x = tf.nn.avg_pool(x, [1, 2, 2, 1], [1, 2, 2, 1], "SAME") 49 | 50 | if self.reuse is None: 51 | self.var_list = tf.get_collection( 52 | tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name) 53 | self.saver = tf.train.Saver(self.var_list) 54 | self.reuse = True 55 | # resnetmodel = Resnet_model.imagenet_resnet_v2(18, 5, use_as_loc=True, data_format=None) 56 | # with tf.variable_scope(self.name, reuse=self.reuse): 57 | # x = resnetmodel(x, self.is_train) 58 | return x 59 | 60 | def save(self, sess, ckpt_path): 61 | self.saver.save(sess, ckpt_path) 62 | 63 | def restore(self, sess, ckpt_path): 64 | self.saver.restore(sess, ckpt_path) 65 | 66 | 67 | class Disease_Classifier(object): 68 | def __init__(self, name, is_train): 69 | self.name = name 70 | self.is_train = is_train 71 | self.reuse = None 72 | 73 | def __call__(self, x): 74 | # classifier network 75 | # TODO: think of of reasonable network architecture 76 | with tf.variable_scope(self.name, reuse=self.reuse): 77 | # x = conv2d(x, "conv1", 64, 3, 1, 78 | # "SAME", True, tf.nn.elu, self.is_train) 79 | # x = tf.nn.avg_pool(x, [1, 2, 2, 1], [1, 2, 2, 1], "SAME") 80 | # 81 | # x = conv2d(x, "conv2", 128, 3, 1, 82 | # "SAME", True, tf.nn.elu, self.is_train) 83 | # x = conv2d(x, "out1", 128, 3, 1, 84 | # "SAME", True, tf.nn.elu, self.is_train) 85 | # x = tf.nn.avg_pool(x, [1, 2, 2, 1], [1, 2, 2, 1], "SAME") 86 | # x = conv2d(x, "out2", 2, 3, 1, 87 | # "SAME", False, None, self.is_train) 88 | # x = tf.nn.avg_pool(x, [1, 2, 2, 1], [1, 2, 2, 1], "SAME") 89 | # # # reshape to [batchsize,features_count] 90 | print(x.shape) 91 | # x = tf.reshape(x, [-1, 14 * 16 * 2]) 92 | x = tf.reshape(x, [-1, 31 * 28 * 2]) 93 | print(x.shape) 94 | # dense layer for classification 95 | x = tf.layers.dense(x, units=512, activation=tf.nn.sigmoid) 96 | 97 | # dropout = tf.layers.dropout( 98 | # inputs=x, rate=0.4) 99 | 100 | # to get back to our 5 classes 101 | x = tf.layers.dense(x, units=5, activation=tf.nn.sigmoid) 102 | 103 | if self.reuse is None: 104 | self.var_list = tf.get_collection( 105 | tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name) 106 | self.saver = tf.train.Saver(self.var_list) 107 | self.reuse = True 108 | return x 109 | 110 | def save(self, sess, ckpt_path): 111 | self.saver.save(sess, ckpt_path) 112 | 113 | def restore(self, sess, ckpt_path): 114 | self.saver.restore(sess, ckpt_path) 115 | 116 | 117 | class ResNet(object): 118 | def __init__(self, sess, config, name, is_train): 119 | self.sess = sess 120 | self.name = name 121 | self.is_train = is_train 122 | 123 | # image shape for grayscale images 124 | im_shape = [config.batch_size] + config.im_size + [1] 125 | # x => moving image 126 | self.x = tf.placeholder(tf.float32, im_shape) 127 | # y => fixed image 128 | self.y = tf.placeholder(tf.float32, im_shape) 129 | self.labels = tf.placeholder(tf.int32, [config.batch_size]) 130 | # x and y concatenated in color channel 131 | self.xy = tf.concat([self.x, self.y], 3) 132 | 133 | self.model = imagenet_resnet_v2(18, 5, data_format='channels_first') 134 | self.logits = self.model(self.x, is_training=True) 135 | 136 | # create predictions => filter highest likelyhood from logits 137 | self.prediction = tf.argmax(self.logits, 1) 138 | self.var_list = tf.get_collection( 139 | tf.GraphKeys.GLOBAL_VARIABLES) 140 | self.saver = tf.train.Saver(self.var_list) 141 | 142 | if self.is_train: 143 | # loss definition and weighting of the 2 loss functions 144 | self.loss = - self.disease_loss(self.labels, self.logits) 145 | # self.loss = mse(self.y, self.z) 146 | 147 | self.optim = tf.train.AdamOptimizer(config.lr) 148 | self.train = self.optim.minimize( 149 | - self.loss) 150 | 151 | # self.sess.run( 152 | # tf.variables_initializer(self.vCNN.var_list)) 153 | self.sess.run(tf.global_variables_initializer()) 154 | 155 | def save(self, dir_path): 156 | self.saver.save(self.sess, dir_path + "/model_class.ckpt") 157 | 158 | def restore(self, dir_path): 159 | self.saver.restore(self.sess, dir_path + "/model_class.ckpt") 160 | 161 | def fit(self, batch_x, batch_y, batch_labels): 162 | _, loss, pred = \ 163 | self.sess.run([self.train, self.loss, self.prediction], 164 | {self.x: batch_x, self.y: batch_y, self.labels: batch_labels}) 165 | return loss, pred 166 | 167 | def disease_loss(self, labels, logits): 168 | """ 169 | :param labels: batch of labels 170 | :type labels: numpy array of dim-1 171 | :param logits: logits from classifier network 172 | :type logits: logits 173 | :return: softmax_cross_entropy loss 174 | :rtype: scalar tensor 175 | """ 176 | # transform to one_hot vector and calc softmax_cross_entropy 177 | onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=5) 178 | loss = tf.losses.softmax_cross_entropy( 179 | onehot_labels=onehot_labels, logits=logits) 180 | return loss 181 | 182 | def deploy_with_labels(self, x, y, labels): 183 | """ 184 | :param x: batch of moving images 185 | :type x: numpy array [batch_size,height,width,color_channels] 186 | :param y: batch of fixed images 187 | :type y: numpy array [batch_size,height,width,color_channels] 188 | :param labels: corresponding labels 189 | :type labels: numpy array [batch_size] 190 | """ 191 | pred = self.sess.run([self.prediction], {self.x: x, self.y: y}) 192 | # print(z[0]) 193 | pred = int(pred[0]) 194 | # for i in range(labels.shape[0]): 195 | # print("label: ", labels[i], "prediction: ", pred) 196 | return pred 197 | 198 | 199 | class DIRNet(object): 200 | def __init__(self, sess, config, name, is_train): 201 | self.sess = sess 202 | self.name = name 203 | self.is_train = is_train 204 | 205 | # image shape for grayscale images 206 | im_shape = [config.batch_size] + config.im_size + [1] 207 | # x => moving image 208 | self.x = tf.placeholder(tf.float32, im_shape) 209 | # y => fixed image 210 | self.y = tf.placeholder(tf.float32, im_shape) 211 | self.labels = tf.placeholder(tf.int32, [config.batch_size]) 212 | # x and y concatenated in color channel 213 | self.xy = tf.concat([self.x, self.y], 3) 214 | 215 | self.vCNN = CNN("vector_CNN", is_train=self.is_train) 216 | self.ClassifierNetwork = Disease_Classifier("disease_classifier", is_train=self.is_train) 217 | 218 | # calc v => featuremap from the localisation network 219 | self.v = self.vCNN(self.xy) 220 | self.z = None 221 | if config.use_AffineST: 222 | self.z = AffineST(self.x, self.v, config.im_size) 223 | else: 224 | # warp using deformable transformation 225 | # z contains the warped image 226 | self.z = WarpST(self.x, self.v, config.im_size) 227 | 228 | # y and z concatenated in color channel 229 | self.yz = tf.concat([self.z, self.y], 3) 230 | # calc disease features 231 | self.d_features = self.ClassifierNetwork(self.v) 232 | # self.d_features = self.ClassifierNetwork(self.yz) 233 | # create predictions => filter highest likelyhood from logits 234 | self.prediction = tf.argmax(self.d_features, 1) 235 | 236 | if self.is_train: 237 | # loss definition and weighting of the 2 loss functions 238 | self.loss = ncc(self.x, self.y) - self.disease_loss(self.labels, self.d_features) 239 | # self.loss = mse(self.y, self.z) 240 | 241 | self.optim = tf.train.AdamOptimizer(config.lr) 242 | self.train = self.optim.minimize( 243 | - self.loss) 244 | 245 | # self.sess.run( 246 | # tf.variables_initializer(self.vCNN.var_list)) 247 | self.sess.run(tf.global_variables_initializer()) 248 | 249 | def fit(self, batch_x, batch_y, batch_labels): 250 | _, loss, pred = \ 251 | self.sess.run([self.train, self.loss, self.prediction], 252 | {self.x: batch_x, self.y: batch_y, self.labels: batch_labels}) 253 | return loss, pred 254 | 255 | def disease_loss(self, labels, logits): 256 | """ 257 | :param labels: batch of labels 258 | :type labels: numpy array of dim-1 259 | :param logits: logits from classifier network 260 | :type logits: logits 261 | :return: softmax_cross_entropy loss 262 | :rtype: scalar tensor 263 | """ 264 | # transform to one_hot vector and calc softmax_cross_entropy 265 | onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=5) 266 | loss = tf.losses.softmax_cross_entropy( 267 | onehot_labels=onehot_labels, logits=logits) 268 | return loss 269 | 270 | def calc_rmse(self, x, y): 271 | ''' 272 | calculates the root mean squared error of two arrays 273 | :param x: 274 | :param y: 275 | :return: 276 | ''' 277 | error = np.subtract(x, y) 278 | squared = np.square(error) 279 | avg = np.average(squared) 280 | rooted = np.sqrt(avg) 281 | return rooted 282 | 283 | def calc_rmse_all(self, x, y, dir_path, save_images): 284 | ''' 285 | calculates the rmse for all images in dir_path 286 | and prints the mean rmse for all (x,z) and (x,y) 287 | :param x: numpy array of all moving images with dim-4 288 | :param y: numpy array of all fixed images with dim-4 289 | :param dir_path: directory from which to load the images 290 | :param save_images: saves all images if true 291 | :return: rmse for all (x,y) and (x,z) as tuple ((x,y),(x,z)) 292 | ''' 293 | rmse_original_res = 0 294 | rmse_registered_res = 0 295 | counter = 1 296 | # print(x.shape) 297 | for i in range(x.shape[0]): 298 | 299 | # calc transformed image 300 | z = self.sess.run(self.z, 301 | {self.x: np.expand_dims(x[i, :, :, :], 0), self.y: np.expand_dims(y[i, :, :, :], 0)}) 302 | 303 | # demean images 304 | z = z - np.mean(z[0, :, :, 0]) 305 | x_new = x[i, :, :, 0] - np.mean(x[i, :, :, 0]) 306 | y_new = y[i, :, :, 0] - np.mean(y[i, :, :, 0]) 307 | 308 | # calc rmse for (x,y) and (y,z) 309 | rmse_original = self.calc_rmse(y_new, x_new) 310 | rmse_registered = self.calc_rmse(x_new, z[0, :, :, 0]) 311 | 312 | # filter outliers ? 313 | # if abs(rmse_registered-rmse_original)<3: 314 | 315 | counter += 1 316 | rmse_original_res += rmse_original 317 | rmse_registered_res += rmse_registered 318 | 319 | # save x,y and registered image 320 | if save_images: 321 | scipy.misc.imsave(dir_path + "/{:02d}_x.tif".format(i + 1), x[i, :, :, 0]) 322 | scipy.misc.imsave(dir_path + "/{:02d}_y.tif".format(i + 1), y[i, :, :, 0]) 323 | scipy.misc.imsave(dir_path + "/{:02d}_z.tif".format(i + 1), z[0, :, :, 0]) 324 | (a, b) = ((rmse_original_res / counter), (rmse_registered_res / counter)) 325 | # print(" orig rmse {0}, registered rmse {1}".format(a,b)) 326 | return b 327 | 328 | def deploy_with_labels(self, x, y, labels): 329 | """ 330 | :param x: batch of moving images 331 | :type x: numpy array [batch_size,height,width,color_channels] 332 | :param y: batch of fixed images 333 | :type y: numpy array [batch_size,height,width,color_channels] 334 | :param labels: corresponding labels 335 | :type labels: numpy array [batch_size] 336 | """ 337 | pred, transformed = self.sess.run([self.prediction, self.z], {self.x: x, self.y: y}) 338 | # print(z[0]) 339 | pred = int(pred[0]) 340 | # for i in range(labels.shape[0]): 341 | # print("label: ", labels[i], "prediction: ", pred) 342 | return pred 343 | 344 | def deploy(self, dir_path, x, y): 345 | ''' 346 | saves 5 images a couple of randomly chosen input images 347 | :param x: numpy array of all moving images with dim-4 348 | :param y: numpy array of all fixed images with dim-4 349 | :param dir_path: directory from which to load the images 350 | ''' 351 | z = self.sess.run(self.z, {self.x: x, self.y: y}) 352 | for i in range(z.shape[0]): 353 | # demean images 354 | z_new = z[i, :, :, 0] - np.mean(z[i, :, :, 0]) 355 | x_new = x[i, :, :, 0] - np.mean(x[i, :, :, 0]) 356 | y_new = y[i, :, :, 0] - np.mean(y[i, :, :, 0]) 357 | 358 | # create difference images and ignore low values y-x 359 | array = np.subtract(x_new, y_new) 360 | low_values_flags = array < .8 361 | array[low_values_flags] = 0 362 | array = array[:, :] 363 | 364 | scipy.misc.imsave(dir_path + "/{:02d}_x-y.tif".format(i + 1), array) 365 | scipy.misc.imsave(dir_path + "/{:02d}_x.tif".format(i + 1), x_new) 366 | scipy.misc.imsave(dir_path + "/{:02d}_y.tif".format(i + 1), y_new) 367 | scipy.misc.imsave(dir_path + "/{:02d}_z.tif".format(i + 1), z_new) 368 | 369 | # create difference images and ignore low values for z-y 370 | array = np.subtract(z_new, y_new) 371 | low_values_flags = array < .8 372 | array[low_values_flags] = 0 373 | scipy.misc.imsave(dir_path + "/{:02d}_z-y.tif".format(i + 1), array[:, :]) 374 | 375 | def save(self, dir_path): 376 | self.ClassifierNetwork.save(self.sess, dir_path + "/model_class.ckpt") 377 | self.vCNN.save(self.sess, dir_path + "/model_reg.ckpt") 378 | 379 | def restore(self, dir_path): 380 | self.ClassifierNetwork.restore(self.sess, dir_path + "/model_class.ckpt") 381 | self.vCNN.restore(self.sess, dir_path + "/model_reg.ckpt") 382 | -------------------------------------------------------------------------------- /DIRNet-tensorflow/ops.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import os 3 | import skimage.io 4 | import numpy as np 5 | 6 | def conv2d(x, name, dim, k, s, p, bn, af, is_train): 7 | with tf.variable_scope(name): 8 | w = tf.get_variable('weight', [k, k, x.get_shape()[-1], dim], 9 | initializer=tf.truncated_normal_initializer(stddev=0.01)) 10 | x = tf.nn.conv2d(x, w, [1, s, s, 1], p) 11 | 12 | if bn: 13 | x = batch_norm(x, "bn", is_train=is_train) 14 | else : 15 | b = tf.get_variable('biases', [dim], 16 | initializer=tf.constant_initializer(0.)) 17 | x += b 18 | 19 | if af: 20 | x = af(x) 21 | 22 | return x 23 | 24 | def batch_norm(x, name, momentum=0.9, epsilon=1e-5, is_train=True): 25 | return tf.contrib.layers.batch_norm(x, 26 | decay=momentum, 27 | updates_collections=None, 28 | epsilon=epsilon, 29 | scale=True, 30 | is_training=is_train, 31 | scope=name) 32 | 33 | def ncc(x, y): 34 | mean_x = tf.reduce_mean(x, [1,2,3], keep_dims=True) 35 | mean_y = tf.reduce_mean(y, [1,2,3], keep_dims=True) 36 | mean_x2 = tf.reduce_mean(tf.square(x), [1,2,3], keep_dims=True) 37 | mean_y2 = tf.reduce_mean(tf.square(y), [1,2,3], keep_dims=True) 38 | stddev_x = tf.reduce_sum(tf.sqrt( 39 | mean_x2 - tf.square(mean_x)), [1,2,3], keep_dims=True) 40 | stddev_y = tf.reduce_sum(tf.sqrt( 41 | mean_y2 - tf.square(mean_y)), [1,2,3], keep_dims=True) 42 | return tf.reduce_mean((x - mean_x) * (y - mean_y) / (stddev_x * stddev_y)) 43 | 44 | def mse(x, y): 45 | return tf.reduce_mean(tf.square(x - y)) 46 | 47 | def mkdir(dir_path): 48 | try : 49 | os.makedirs(dir_path) 50 | except: pass 51 | 52 | def save_image_with_scale(path, arr): 53 | arr = np.clip(arr, 0., 1.) 54 | arr = arr * 255. 55 | arr = arr.astype(np.uint8) 56 | skimage.io.imsave(path, arr) 57 | -------------------------------------------------------------------------------- /DIRNet-tensorflow/tf_playground.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HPI-DeepLearning/DIRNet/2817314fa79979ee2edae21e11d0cd45b8594ec8/DIRNet-tensorflow/tf_playground.py -------------------------------------------------------------------------------- /DIRNet-tensorflow/train.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from models import DIRNet,ResNet 3 | from config import get_config 4 | from data import DIRNetDatahandler 5 | import numpy as np 6 | from ops import mkdir 7 | 8 | 9 | def main(): 10 | tf.reset_default_graph() 11 | train_ResNet() 12 | # sess_config = tf.ConfigProto() 13 | # sess_config.gpu_options.allow_growth = True 14 | # sess = tf.Session(config=sess_config) 15 | # config = get_config(is_train=True) 16 | # mkdir(config.tmp_dir) 17 | # mkdir(config.ckpt_dir) 18 | # 19 | # reg = DIRNet(sess, config, "DIRNet", is_train=True) 20 | # # reg.restore(config.ckpt_dir) 21 | # dh = DIRNetDatahandler( config=config) 22 | # 23 | # amnt_pics = np.shape(dh.d_data)[0] 24 | # for epoch in range(13): 25 | # loss_sum = 0 26 | # acc = 0 27 | # for i in range(amnt_pics): 28 | # batch_x, batch_y, batch_labels = dh.get_pair_by_idx(i) 29 | # # loss = reg.fit((1, batch_x[0], batch_x[1], batch_x[2]), 30 | # # (1, batch_y[0], batch_y[1], batch_y[2])) 31 | # loss, prediction = reg.fit(batch_x, batch_y, batch_labels) 32 | # loss_sum += loss 33 | # prediction = int(prediction[0]) 34 | # truth = int(batch_labels[0]) 35 | # # print("pred {} truth {}".format(prediction, truth)) 36 | # if prediction == truth: 37 | # acc += 1 38 | # print("epoch {0}: Loss: {1:.4f} Acc: {2:.4f}".format(epoch, loss_sum / amnt_pics, acc / amnt_pics)) 39 | # # loss_sum = 0 40 | # # acc = 0 41 | # # amnt_eva = np.shape(dh.d_data_eval)[0] 42 | # # for i in range(amnt_eva): 43 | # # batch_x, batch_y, batch_labels = dh.get_eval_pair_by_idx(i) 44 | # # # loss = reg.fit((1, batch_x[0], batch_x[1], batch_x[2]), 45 | # # # (1, batch_y[0], batch_y[1], batch_y[2])) 46 | # # loss, prediction = reg.deploy_with_labels(batch_x, batch_y, batch_labels) 47 | # # loss_sum += loss 48 | # # prediction = int(prediction[0]) 49 | # # truth = int(batch_labels[0]) 50 | # # # print("pred {} truth {}".format(prediction, truth)) 51 | # # if prediction == truth: 52 | # # acc += 1 53 | # # print("evalu {0}: Loss: {1:.4f} Acc: {2:.4f}".format(epoch, loss_sum / amnt_eva, acc / amnt_eva)) 54 | # 55 | # 56 | # if (epoch + 1) % 5 == 0: 57 | # # if (epoch+1) % config.checkpoint_distance == 0: 58 | # # reg.deploy(config.tmp_dir, batch_x, batch_y) 59 | # print('saving model...') 60 | # reg.save(config.ckpt_dir) 61 | # 62 | # 63 | # amnt_eva = np.shape(dh.d_data_eval)[0] 64 | # acc = 0 65 | # for i in range(amnt_eva): 66 | # batch_x, batch_y, batch_labels = dh.get_eval_pair_by_idx(i) 67 | # prev_x = batch_x 68 | # # loss = reg.fit((1, batch_x[0], batch_x[1], batch_x[2]), 69 | # # (1, batch_y[0], batch_y[1], batch_y[2])) 70 | # prediction = reg.deploy_with_labels(batch_x, batch_y, batch_labels) 71 | # truth = int(batch_labels[0]) 72 | # # print("pred {} truth {}".format(prediction, truth)) 73 | # if prediction == truth: 74 | # acc += 1 75 | # print("Acc: {0:.4f}".format(acc / amnt_eva)) 76 | # reg.calc_rmse_all(y=dh.d_data_eval, x=dh.s_data_eval,dir_path='', save_images=False) 77 | # # for i in range(config.iteration): 78 | # # # create new random batch 79 | # # batch_x, batch_y, batch_labels = dh.sample_pair(config.batch_size) 80 | # # 81 | # # # run sess => minimize loss 82 | # # loss = reg.fit(batch_x, batch_y,batch_labels) 83 | # # 84 | # # print("iter {:>6d} : {}".format(i + 1, loss)) 85 | # # 86 | # # if (i + 1) % config.checkpoint_distance == 0: 87 | # # # reg.deploy(config.tmp_dir, batch_x, batch_y) 88 | # # reg.save(config.ckpt_dir) 89 | 90 | def train_ResNet(): 91 | sess_config = tf.ConfigProto() 92 | sess_config.gpu_options.allow_growth = True 93 | sess = tf.Session(config=sess_config) 94 | config = get_config(is_train=True) 95 | mkdir(config.tmp_dir) 96 | mkdir(config.ckpt_dir) 97 | 98 | reg = ResNet(sess, config, "DIRNet", is_train=True) 99 | # reg.restore(config.ckpt_dir) 100 | dh = DIRNetDatahandler(config=config) 101 | 102 | amnt_pics = np.shape(dh.d_data)[0] 103 | for epoch in range(5): 104 | loss_sum = 0 105 | acc = 0 106 | for i in range(amnt_pics-1): 107 | batch_x, batch_y, batch_labels = dh.get_pair_by_idx(i) 108 | 109 | 110 | # loss = reg.fit((1, batch_x[0], batch_x[1], batch_x[2]), 111 | # (1, batch_y[0], batch_y[1], batch_y[2])) 112 | loss, prediction = reg.fit(batch_x, batch_y, batch_labels) 113 | loss2, prediction2 = reg.fit(batch_y, batch_x, batch_labels) 114 | loss_sum += (loss+loss2)/2 115 | prediction = int(prediction[0]) 116 | truth = int(batch_labels[0]) 117 | # print("pred {} truth {}".format(prediction, truth)) 118 | if prediction == truth: 119 | acc += 1 120 | if prediction2[0] == truth: 121 | acc += 1 122 | print("epoch {0}: Loss: {1:.4f} Acc: {2:.4f}".format(epoch, loss_sum / (amnt_pics*2), acc / (amnt_pics*2))) 123 | 124 | if (epoch + 1) % 5 == 0: 125 | # if (epoch+1) % config.checkpoint_distance == 0: 126 | # reg.deploy(config.tmp_dir, batch_x, batch_y) 127 | print('saving model...') 128 | # reg.save(config.ckpt_dir) 129 | 130 | amnt_pics = np.shape(dh.d_data)[0] 131 | acc = 0 132 | prev_x = np.empty(shape=(1, 222, 247)) 133 | amnt_eva = np.shape(dh.d_data_eval)[0] 134 | for i in range(amnt_eva): 135 | batch_x, batch_y, batch_labels = dh.get_eval_pair_by_idx(i) 136 | if np.array_equal(prev_x, batch_x): 137 | print('weird') 138 | prev_x = batch_x 139 | # loss = reg.fit((1, batch_x[0], batch_x[1], batch_x[2]), 140 | # (1, batch_y[0], batch_y[1], batch_y[2])) 141 | prediction = reg.deploy_with_labels(batch_x, batch_y, batch_labels) 142 | print(prediction, "::", batch_labels[0]) 143 | truth = int(batch_labels[0]) 144 | # print("pred {} truth {}".format(prediction, truth)) 145 | if prediction == truth: 146 | acc += 1 147 | print("Acc: {0:.4f}".format(acc / amnt_eva)) 148 | if __name__ == "__main__": 149 | main() 150 | --------------------------------------------------------------------------------