├── .DS_Store ├── README.md ├── data ├── __pycache__ │ ├── download_datasets.cpython-37.pyc │ ├── pf_dataset.cpython-37.pyc │ ├── synth_dataset.cpython-37.pyc │ └── test_dataset.cpython-37.pyc ├── synth_dataset.py └── test_dataset.py ├── geotnf ├── __pycache__ │ ├── point_tnf.cpython-37(1).pyc │ ├── point_tnf.cpython-37.pyc │ ├── transformation.cpython-37(1).pyc │ ├── transformation.cpython-37.pyc │ └── transformation_high_res.cpython-37.pyc ├── point_tnf.py ├── transformation.py └── transformation_high_res.py ├── image ├── __pycache__ │ ├── normalization.cpython-37(1).pyc │ └── normalization.cpython-37.pyc └── normalization.py ├── jsonData ├── TCIA_FUSION.json └── reg_aaa0069.json ├── model ├── ProsRegNet_model.py ├── __pycache__ │ ├── ProsRegNet_model.cpython-37.pyc │ ├── cnn_geometric_model.cpython-37(1).pyc │ ├── cnn_geometric_model.cpython-37.pyc │ ├── loss.cpython-37(1).pyc │ └── loss.cpython-37.pyc └── loss.py ├── parse_registration_json.py ├── parse_study_dict.py ├── pictures └── pipeline.png ├── preprocess.py ├── register_images.py ├── registration_pipeline.ipynb ├── train.py ├── training_data ├── .DS_Store ├── affine │ ├── test.csv │ └── train.csv └── tps │ ├── test.csv │ └── train.csv └── util ├── __pycache__ ├── torch_util.cpython-37.pyc └── train_test_fn.cpython-37.pyc ├── torch_util.py └── train_test_fn.py /.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pimed/ProsRegNet/f8addf5bb65a5d38d081fa3aa57ca4d056165314/.DS_Store -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ProsRegNet: A Deep Learning Framework for Registration of MRI and Histopathology Images of the Prostate. 2 | 3 | ![](pictures/pipeline.png) 4 | 5 | This is the PyTorch implementation of the following paper: 6 | 7 | Shao, Wei, et al. "ProsRegNet: A Deep Learning Framework for Registration of MRI and Histopathology Images of the Prostate." [[Medical Image Analysis (MedIA)](https://doi.org/10.1016/j.media.2020.101919)] 8 | 9 | 10 | ### Introduction 11 | Our source code has been modified from [cnngeometric_pytorch](https://github.com/ignacio-rocco/cnngeometric_pytorch), and have been tested successfully on Linux Mint, Cuda 10.0, RTX 2080 Ti, Anaconda Python 3.7, PyTorch 1.3.0. 12 | 13 | The code is only for research purposes. If you have any questions regarding how to use this code, feel free to contact Wei Shao (weishao@stanford.edu). 14 | 15 | ### Dependencies 16 | PyTorch 1.3.0 17 | 18 | Cuda 10.0 19 | 20 | Anaconda Python 3.7 21 | 22 | SimpleITK 23 | 24 | cv2 25 | 26 | skimage 27 | 28 | ### Usage 29 | 1. Clone the repository: 30 | ``` 31 | git clone https://github.com/pimed/ProsRegNet.git 32 | cd ProsRegNet 33 | ``` 34 | 2. Download the [[training dataset](https://drive.google.com/file/d/1W3eV50pDGBKKz1XX6o6Fi7wzgAHZZBlr/view?usp=sharing)]: 35 | ``` 36 | uzip the compressed folder named "datasets", this folder contains two subfolders: "training" and "testing". 37 | ``` 38 | The small training dataset consists MRI and histopathology image slices of 25 subjects from [[The Cancer Imaging Archive PROSTATE-MRI dataset](https://wiki.cancerimagingarchive.net/display/Public/PROSTATE-MRI)]. The small testing dataset consists of one subject from [[The Cancer Imaging Archive Prostate Fused-MRI-Pathology dataset](https://wiki.cancerimagingarchive.net/display/Public/Prostate+Fused-MRI-Pathology)]. 39 | 40 | 41 | 3. Training the affine and deformable registration models (optional): 42 | ``` 43 | python train.py --geometric-model affine 44 | python train.py --geometric-model tps 45 | ``` 46 | 47 | 4. Evaluation: 48 | ``` 49 | run the registration_pipeline.ipynb jupyter notebok 50 | ``` 51 | 52 | ### Models trained with larger dataset, see details in our [MedIA paper](https://doi.org/10.1016/j.media.2020.101919) 53 | [[Trained ProsRegNet affine model](https://drive.google.com/file/d/1REqMqNVLHRnFfuqzJIWrqQgctnaauSO1/view?usp=sharing)] 54 | [[Trained ProsRegNet deformable model](https://drive.google.com/file/d/1j1ai3RG6blpE6Zz9fmazoMsTyCQvGR9z/view?usp=sharing)] 55 | 56 | ### BibTeX 57 | 58 | If you use this code, please cite the following papers: 59 | 60 | ```bibtex 61 | @article{Shao_2021, 62 | doi = {10.1016/j.media.2020.101919}, 63 | url = {https://doi.org/10.1016%2Fj.media.2020.101919}, 64 | year = 2021, 65 | month = {feb}, 66 | publisher = {Elsevier {BV}}, 67 | volume = {68}, 68 | pages = {101919}, 69 | author = {Wei Shao and Linda Banh and Christian A. Kunder and Richard E. Fan and Simon J.C. Soerensen and Jeffrey B. Wang and Nikola C. Teslovich and Nikhil Madhuripan and Anugayathri Jawahar and Pejman Ghanouni and James D. Brooks and Geoffrey A. Sonn and Mirabela Rusu}, 70 | title = {{ProsRegNet}: A deep learning framework for registration of {MRI} and histopathology images of the prostate}, 71 | journal = {Medical Image Analysis} 72 | } 73 | ``` 74 | 75 | and 76 | 77 | ```bibtex 78 | @InProceedings{Rocco17, 79 | author = {Rocco, I. and Arandjelovi\'c, R. and Sivic, J.}, 80 | title = {Convolutional neural network architecture for geometric matching}, 81 | booktitle = {{Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}}, 82 | year = {2017}, 83 | } 84 | ``` 85 | -------------------------------------------------------------------------------- /data/__pycache__/download_datasets.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pimed/ProsRegNet/f8addf5bb65a5d38d081fa3aa57ca4d056165314/data/__pycache__/download_datasets.cpython-37.pyc -------------------------------------------------------------------------------- /data/__pycache__/pf_dataset.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pimed/ProsRegNet/f8addf5bb65a5d38d081fa3aa57ca4d056165314/data/__pycache__/pf_dataset.cpython-37.pyc -------------------------------------------------------------------------------- /data/__pycache__/synth_dataset.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pimed/ProsRegNet/f8addf5bb65a5d38d081fa3aa57ca4d056165314/data/__pycache__/synth_dataset.cpython-37.pyc -------------------------------------------------------------------------------- /data/__pycache__/test_dataset.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pimed/ProsRegNet/f8addf5bb65a5d38d081fa3aa57ca4d056165314/data/__pycache__/test_dataset.cpython-37.pyc -------------------------------------------------------------------------------- /data/synth_dataset.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | import torch 3 | import os 4 | from os.path import exists, join, basename 5 | from skimage import io 6 | import pandas as pd 7 | import numpy as np 8 | from torch.utils.data import Dataset 9 | from geotnf.transformation import GeometricTnf 10 | from torch.autograd import Variable 11 | 12 | class SynthDataset(Dataset): 13 | """ 14 | 15 | Synthetically transformed pairs dataset for training with strong supervision 16 | 17 | Args: 18 | csv_file (string): Path to the csv file with image names and transformations. 19 | training_image_path (string): Directory with all the images. 20 | transform (callable): Transformation for post-processing the training pair (eg. image normalization) 21 | 22 | Returns: 23 | Dict: {'source_image': source_image, 'target_image': target_image, 'theta': desired transformation} 24 | 25 | """ 26 | 27 | def __init__(self, csv_file, training_image_path, output_size=(240,240), geometric_model='affine', transform=None, 28 | random_sample=False, random_t=0.5, random_s=0.5, random_alpha=1/6, random_t_tps=0.4): 29 | # random_sample is used to indicate whether deformation coefficients are randomly generated? 30 | self.random_sample = random_sample 31 | self.random_t = random_t 32 | self.random_t_tps = random_t_tps 33 | self.random_alpha = random_alpha 34 | self.random_s = random_s 35 | self.out_h, self.out_w = output_size 36 | # read csv file 37 | self.train_data = pd.read_csv(csv_file) 38 | self.img_A_names = self.train_data.iloc[:,0] 39 | self.img_B_names = self.train_data.iloc[:,1] 40 | self.theta_array = self.train_data.iloc[:, 2:].values.astype('float') 41 | # copy arguments 42 | self.training_image_path = training_image_path 43 | self.transform = transform 44 | self.geometric_model = geometric_model 45 | # affine transform used to rescale images 46 | self.affineTnf = GeometricTnf(out_h=self.out_h, out_w=self.out_w, use_cuda = False) 47 | 48 | def __len__(self): 49 | return len(self.train_data) 50 | 51 | def __getitem__(self, idx): 52 | # read image 53 | img_A_name = os.path.join(self.training_image_path, self.img_A_names[idx]) 54 | image_A = io.imread(img_A_name) 55 | 56 | img_B_name = os.path.join(self.training_image_path, self.img_B_names[idx]) 57 | image_B = io.imread(img_B_name) 58 | 59 | # read theta 60 | if self.random_sample==False: 61 | theta = self.theta_array[idx, :] 62 | 63 | if self.geometric_model=='affine': 64 | # reshape theta to 2x3 matrix [A|t] where 65 | # first row corresponds to X and second to Y 66 | theta = theta[[3,2,5,1,0,4]].reshape(2,3) 67 | elif self.geometric_model=='tps': 68 | theta = np.expand_dims(np.expand_dims(theta,1),2) 69 | 70 | # make arrays float tensor for subsequent processing 71 | image_A = torch.Tensor(image_A.astype(np.float32)) 72 | image_B = torch.Tensor(image_B.astype(np.float32)) 73 | theta = torch.Tensor(theta.astype(np.float32)) 74 | 75 | # permute order of image to CHW 76 | image_A = image_A.transpose(1,2).transpose(0,1) 77 | image_B = image_B.transpose(1,2).transpose(0,1) 78 | 79 | # Resize image using bilinear sampling with identity affine tnf 80 | if image_A.size()[0]!=self.out_h or image_A.size()[1]!=self.out_w: 81 | image_A = self.affineTnf(Variable(image_A.unsqueeze(0),requires_grad=False)).data.squeeze(0) 82 | 83 | if image_B.size()[0]!=self.out_h or image_B.size()[1]!=self.out_w: 84 | image_B = self.affineTnf(Variable(image_B.unsqueeze(0),requires_grad=False)).data.squeeze(0) 85 | 86 | sample = {'image_A': image_A, 'image_B': image_B, 'theta': theta} 87 | 88 | if self.transform: 89 | sample = self.transform(sample) 90 | 91 | return sample 92 | -------------------------------------------------------------------------------- /data/test_dataset.py: -------------------------------------------------------------------------------- 1 | 2 | """ 3 | The following code is adapted from: https://github.com/ignacio-rocco/cnngeometric_pytorch. 4 | """ 5 | 6 | 7 | from __future__ import print_function, division 8 | import os 9 | import torch 10 | from torch.autograd import Variable 11 | from skimage import io 12 | import cv2 13 | import pandas as pd 14 | import numpy as np 15 | from torch.utils.data import Dataset 16 | from geotnf.transformation import GeometricTnf 17 | 18 | class TestDataset(Dataset): 19 | 20 | """ 21 | 22 | Test image dataset 23 | 24 | 25 | Args: 26 | csv_file (string): Path to the csv file with image names and transformations. 27 | training_image_path (string): Directory with the images. 28 | output_size (2-tuple): Desired output size 29 | transform (callable): Transformation for post-processing the training pair (eg. image normalization) 30 | 31 | """ 32 | 33 | def __init__(self, csv_file, training_image_path,output_size=(240,240),transform=None): 34 | 35 | self.out_h, self.out_w = output_size 36 | self.train_data = pd.read_csv(csv_file) 37 | self.source_image_names = self.train_data.iloc[:,0] 38 | self.target_image_names = self.train_data.iloc[:,1] 39 | self.training_image_path = training_image_path 40 | self.transform = transform 41 | # no cuda as dataset is called from CPU threads in dataloader and produces confilct 42 | self.affineTnf = GeometricTnf(out_h=self.out_h, out_w=self.out_w, use_cuda = False) 43 | 44 | def __len__(self): 45 | return len(self.train_data) 46 | 47 | def __getitem__(self, idx): 48 | # get pre-processed images 49 | source_image,moving_img_size = self.get_image(self.source_image_names,idx) 50 | target_image,fixed_img_size = self.get_image(self.target_image_names,idx) 51 | 52 | sample = {'source_image': source_image, 'target_image': target_image, 'moving_im_size': moving_img_size, 'fixed_im_size': fixed_img_size} 53 | 54 | if self.transform: 55 | sample = self.transform(sample) 56 | 57 | return sample 58 | 59 | def get_image(self,img_name_list,idx): 60 | img_name = os.path.join(self.training_image_path, img_name_list[idx]) 61 | image = io.imread(img_name) 62 | 63 | 64 | if len(image.shape)== 2: 65 | image_rgb = np.zeros(image.shape[0],image.shape[1],3) 66 | img_rgb[:,:,0] = image 67 | img_rgb[:,:,1] = image 68 | img_rgb[:,:,2] = image 69 | image = img_rgb 70 | 71 | # get image size 72 | im_size = np.asarray(image.shape) 73 | 74 | # convert to torch Variable 75 | image = np.expand_dims(image.transpose((2,0,1)),0) 76 | image = torch.Tensor(image.astype(np.float32)) 77 | image_var = Variable(image,requires_grad=False) 78 | 79 | # Resize image using bilinear sampling with identity affine tnf 80 | image = self.affineTnf(image_var).data.squeeze(0) 81 | 82 | im_size = torch.Tensor(im_size.astype(np.float32)) 83 | 84 | return (image, im_size) 85 | 86 | -------------------------------------------------------------------------------- /geotnf/__pycache__/point_tnf.cpython-37(1).pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pimed/ProsRegNet/f8addf5bb65a5d38d081fa3aa57ca4d056165314/geotnf/__pycache__/point_tnf.cpython-37(1).pyc -------------------------------------------------------------------------------- /geotnf/__pycache__/point_tnf.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pimed/ProsRegNet/f8addf5bb65a5d38d081fa3aa57ca4d056165314/geotnf/__pycache__/point_tnf.cpython-37.pyc -------------------------------------------------------------------------------- /geotnf/__pycache__/transformation.cpython-37(1).pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pimed/ProsRegNet/f8addf5bb65a5d38d081fa3aa57ca4d056165314/geotnf/__pycache__/transformation.cpython-37(1).pyc -------------------------------------------------------------------------------- /geotnf/__pycache__/transformation.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pimed/ProsRegNet/f8addf5bb65a5d38d081fa3aa57ca4d056165314/geotnf/__pycache__/transformation.cpython-37.pyc -------------------------------------------------------------------------------- /geotnf/__pycache__/transformation_high_res.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pimed/ProsRegNet/f8addf5bb65a5d38d081fa3aa57ca4d056165314/geotnf/__pycache__/transformation_high_res.cpython-37.pyc -------------------------------------------------------------------------------- /geotnf/point_tnf.py: -------------------------------------------------------------------------------- 1 | """ 2 | The following code is from: https://github.com/ignacio-rocco/cnngeometric_pytorch. 3 | """ 4 | 5 | import torch 6 | from torch.autograd import Variable 7 | import numpy as np 8 | from geotnf.transformation import TpsGridGen 9 | 10 | class PointTnf(object): 11 | """ 12 | 13 | Class with functions for transforming a set of points with affine/tps transformations 14 | 15 | """ 16 | def __init__(self, use_cuda=True): 17 | self.use_cuda=use_cuda 18 | self.tpsTnf = TpsGridGen(use_cuda=self.use_cuda) 19 | 20 | def tpsPointTnf(self,theta,points): 21 | # points are expected in [B,2,N], where first row is X and second row is Y 22 | # reshape points for applying Tps transformation 23 | points=points.unsqueeze(3).transpose(1,3) 24 | # apply transformation 25 | warped_points = self.tpsTnf.apply_transformation(theta,points) 26 | # undo reshaping 27 | warped_points=warped_points.transpose(3,1).squeeze(3) 28 | return warped_points 29 | 30 | def affPointTnf(self,theta,points): 31 | theta_mat = theta.view(-1,2,3) 32 | warped_points = torch.bmm(theta_mat[:,:,:2],points) 33 | warped_points += theta_mat[:,:,2].unsqueeze(2).expand_as(warped_points) 34 | return warped_points 35 | 36 | def PointsToUnitCoords(P,im_size): 37 | h,w = im_size[:,0],im_size[:,1] 38 | NormAxis = lambda x,L: (x-1-(L-1)/2)*2/(L-1) 39 | P_norm = P.clone() 40 | # normalize Y 41 | P_norm[:,0,:] = NormAxis(P[:,0,:],w.unsqueeze(1).expand_as(P[:,0,:])) 42 | # normalize X 43 | P_norm[:,1,:] = NormAxis(P[:,1,:],h.unsqueeze(1).expand_as(P[:,1,:])) 44 | return P_norm 45 | 46 | def PointsToPixelCoords(P,im_size): 47 | h,w = im_size[:,0],im_size[:,1] 48 | NormAxis = lambda x,L: x*(L-1)/2+1+(L-1)/2 49 | P_norm = P.clone() 50 | # normalize Y 51 | P_norm[:,0,:] = NormAxis(P[:,0,:],w.unsqueeze(1).expand_as(P[:,0,:])) 52 | # normalize X 53 | P_norm[:,1,:] = NormAxis(P[:,1,:],h.unsqueeze(1).expand_as(P[:,1,:])) 54 | return P_norm -------------------------------------------------------------------------------- /geotnf/transformation.py: -------------------------------------------------------------------------------- 1 | """ 2 | The following code is adapted from: https://github.com/ignacio-rocco/cnngeometric_pytorch. 3 | """ 4 | 5 | from __future__ import print_function, division 6 | import os 7 | import sys 8 | from skimage import io 9 | import pandas as pd 10 | import numpy as np 11 | import torch 12 | from torch.nn.modules.module import Module 13 | from torch.utils.data import Dataset 14 | from torch.autograd import Variable 15 | import torch.nn.functional as F 16 | from image.normalization import NormalizeImageDict, normalize_image 17 | 18 | class GeometricTnf(object): 19 | """ 20 | 21 | Geometric transfromation to an image batch (wrapped in a PyTorch Variable) 22 | ( can be used with no transformation to perform bilinear resizing ) 23 | 24 | """ 25 | def __init__(self, geometric_model='affine', out_h=240, out_w=240, use_cuda=True): 26 | self.out_h = out_h 27 | self.out_w = out_w 28 | self.use_cuda = use_cuda 29 | if geometric_model=='affine': 30 | self.gridGen = AffineGridGen(out_h, out_w) 31 | elif geometric_model=='tps': 32 | self.gridGen = TpsGridGen(out_h, out_w, use_cuda=use_cuda) 33 | self.theta_identity = torch.Tensor(np.expand_dims(np.array([[1,0,0],[0,1,0]]),0).astype(np.float32)) 34 | if use_cuda: 35 | self.theta_identity = self.theta_identity.cuda() 36 | 37 | def __call__(self, image_batch, theta_batch=None, padding_factor=0.0, crop_factor=1.0): 38 | b, c, h, w = image_batch.size() 39 | if theta_batch is None: 40 | theta_batch = self.theta_identity 41 | theta_batch = theta_batch.expand(b,2,3) 42 | theta_batch = Variable(theta_batch,requires_grad=False) 43 | 44 | sampling_grid = self.gridGen(theta_batch) 45 | 46 | # rescale grid according to crop_factor and padding_factor 47 | # sampling_grid.data = sampling_grid.data*padding_factor*crop_factor 48 | sampling_grid.data = sampling_grid.data*crop_factor/(1+2*padding_factor) 49 | 50 | # print("original image batch size:") 51 | # print(image_batch.shape) 52 | # sample transformed image 53 | warped_image_batch = F.grid_sample(image_batch, sampling_grid,padding_mode='border') 54 | # print("warped image 88888888888:") 55 | # print(warped_image_batch.shape) 56 | # print(sampling_grid.shape) 57 | 58 | return warped_image_batch 59 | 60 | 61 | class SynthPairTnf(object): 62 | """ 63 | 64 | Generate a synthetically warped training pair using an affine transformation. 65 | 66 | """ 67 | def __init__(self, use_cuda=True, geometric_model='affine', crop_factor=16/16, output_size=(240,240), padding_factor = 0.0): 68 | assert isinstance(use_cuda, (bool)) 69 | assert isinstance(crop_factor, (float)) 70 | assert isinstance(output_size, (tuple)) 71 | assert isinstance(padding_factor, (float)) 72 | self.use_cuda=use_cuda 73 | self.crop_factor = crop_factor 74 | self.padding_factor = padding_factor 75 | self.out_h, self.out_w = output_size 76 | self.rescalingTnf = GeometricTnf('affine', self.out_h, self.out_w, 77 | use_cuda = self.use_cuda) 78 | self.geometricTnf = GeometricTnf(geometric_model, self.out_h, self.out_w, 79 | use_cuda = self.use_cuda) 80 | 81 | 82 | def __call__(self, batch): 83 | image_batch_A, image_batch_B, theta_batch = batch['image_A'], batch['image_B'], batch['theta'] 84 | if self.use_cuda: 85 | image_batch_A = image_batch_A.cuda() 86 | image_batch_B = image_batch_B.cuda() 87 | theta_batch = theta_batch.cuda() 88 | 89 | b, c, h, w = image_batch_A.size() 90 | 91 | # generate symmetrically padded image for bigger sampling region 92 | image_batch_A = self.symmetricImagePad(image_batch_A,self.padding_factor) 93 | image_batch_B = self.symmetricImagePad(image_batch_B,self.padding_factor) 94 | 95 | # convert to variables 96 | image_batch_A = Variable(image_batch_A,requires_grad=False) 97 | image_batch_B = Variable(image_batch_B,requires_grad=False) 98 | theta_batch = Variable(theta_batch,requires_grad=False) 99 | 100 | # print("before crop") 101 | # print(image_batch_A.shape) 102 | # print(image_batch_B.shape) 103 | 104 | # get cropped image 105 | cropped_image_batch = self.rescalingTnf(image_batch_A,None,self.padding_factor,self.crop_factor) # Identity is used as no theta given 106 | # get transformed image 107 | warped_image_batch = self.geometricTnf(image_batch_B,theta_batch, 108 | self.padding_factor,self.crop_factor) # Identity is used as no theta given 109 | 110 | # print("cropped_image_size") 111 | # print(cropped_image_batch.shape) 112 | # print(warped_image_batch.shape) 113 | 114 | Ones = torch.ones(cropped_image_batch.size()) 115 | Zeros = torch.zeros(cropped_image_batch.size()) 116 | 117 | if self.use_cuda: 118 | Ones = Ones.cuda() 119 | Zeros = Zeros.cuda() 120 | 121 | cropped_mask_batch = torch.where(cropped_image_batch > 0.1*Ones, Ones, Zeros) 122 | warped_mask_batch = torch.where(warped_image_batch > 0.1*Ones, Ones, Zeros) 123 | 124 | if self.use_cuda: 125 | cropped_mask_batch = cropped_mask_batch.cuda() 126 | warped_mask_batch = warped_mask_batch.cuda() 127 | 128 | #mask1 = 255*normalize_image(warped_mask_batch,forward=False) 129 | #mask1 = mask1.data.squeeze(0).transpose(0,1).transpose(1,2).cpu().numpy() 130 | 131 | #print(mask1.shape) 132 | 133 | #io.imsave('warped_mask.jpg', mask1) 134 | 135 | 136 | 137 | 138 | return {'source_image': cropped_image_batch, 'target_image': warped_image_batch, 'source_mask': cropped_mask_batch, 'target_mask': warped_mask_batch,'theta_GT': theta_batch} 139 | 140 | def symmetricImagePad(self,image_batch, padding_factor): 141 | b, c, h, w = image_batch.size() 142 | pad_h, pad_w = int(h*padding_factor), int(w*padding_factor) 143 | idx_pad_left = torch.LongTensor(range(pad_w-1,-1,-1)) 144 | idx_pad_right = torch.LongTensor(range(w-1,w-pad_w-1,-1)) 145 | idx_pad_top = torch.LongTensor(range(pad_h-1,-1,-1)) 146 | idx_pad_bottom = torch.LongTensor(range(h-1,h-pad_h-1,-1)) 147 | if self.use_cuda: 148 | idx_pad_left = idx_pad_left.cuda() 149 | idx_pad_right = idx_pad_right.cuda() 150 | idx_pad_top = idx_pad_top.cuda() 151 | idx_pad_bottom = idx_pad_bottom.cuda() 152 | image_batch = torch.cat((image_batch.index_select(3,idx_pad_left),image_batch, 153 | image_batch.index_select(3,idx_pad_right)),3) 154 | image_batch = torch.cat((image_batch.index_select(2,idx_pad_top),image_batch, 155 | image_batch.index_select(2,idx_pad_bottom)),2) 156 | return image_batch 157 | 158 | 159 | class AffineGridGen(Module): 160 | def __init__(self, out_h=240, out_w=240, out_ch = 3): 161 | super(AffineGridGen, self).__init__() 162 | self.out_h = out_h 163 | self.out_w = out_w 164 | self.out_ch = out_ch 165 | 166 | def forward(self, theta): 167 | theta = theta.contiguous() 168 | batch_size = theta.size()[0] 169 | out_size = torch.Size((batch_size,self.out_ch,self.out_h,self.out_w)) 170 | return F.affine_grid(theta, out_size) 171 | 172 | class TpsGridGen(Module): 173 | def __init__(self, out_h=240, out_w=240, use_regular_grid=True, grid_size=6, reg_factor=1, use_cuda=True): 174 | super(TpsGridGen, self).__init__() 175 | self.out_h, self.out_w = out_h, out_w 176 | self.reg_factor = reg_factor 177 | self.use_cuda = use_cuda 178 | 179 | # create grid in numpy 180 | self.grid = np.zeros( [self.out_h, self.out_w, 3], dtype=np.float32) 181 | # sampling grid with dim-0 coords (Y) 182 | self.grid_X,self.grid_Y = np.meshgrid(np.linspace(-1,1,out_w),np.linspace(-1,1,out_h)) 183 | # grid_X,grid_Y: size [1,H,W,1,1] 184 | self.grid_X = torch.FloatTensor(self.grid_X).unsqueeze(0).unsqueeze(3) 185 | self.grid_Y = torch.FloatTensor(self.grid_Y).unsqueeze(0).unsqueeze(3) 186 | self.grid_X = Variable(self.grid_X,requires_grad=False) 187 | self.grid_Y = Variable(self.grid_Y,requires_grad=False) 188 | if use_cuda: 189 | self.grid_X = self.grid_X.cuda() 190 | self.grid_Y = self.grid_Y.cuda() 191 | 192 | # initialize regular grid for control points P_i 193 | if use_regular_grid: 194 | axis_coords = np.linspace(-1,1,grid_size) 195 | self.N = grid_size*grid_size 196 | P_Y,P_X = np.meshgrid(axis_coords,axis_coords) 197 | P_X = np.reshape(P_X,(-1,1)) # size (N,1) 198 | P_Y = np.reshape(P_Y,(-1,1)) # size (N,1) 199 | P_X = torch.FloatTensor(P_X) 200 | P_Y = torch.FloatTensor(P_Y) 201 | self.Li = Variable(self.compute_L_inverse(P_X,P_Y).unsqueeze(0),requires_grad=False) 202 | self.P_X = P_X.unsqueeze(2).unsqueeze(3).unsqueeze(4).transpose(0,4) 203 | self.P_Y = P_Y.unsqueeze(2).unsqueeze(3).unsqueeze(4).transpose(0,4) 204 | self.P_X = Variable(self.P_X,requires_grad=False) 205 | self.P_Y = Variable(self.P_Y,requires_grad=False) 206 | if use_cuda: 207 | self.P_X = self.P_X.cuda() 208 | self.P_Y = self.P_Y.cuda() 209 | 210 | 211 | def forward(self, theta): 212 | 213 | warped_grid = self.apply_transformation(theta,torch.cat((self.grid_X,self.grid_Y),3)) 214 | 215 | return warped_grid 216 | 217 | def compute_L_inverse(self,X,Y): 218 | N = X.size()[0] # num of points (along dim 0) 219 | # construct matrix K 220 | Xmat = X.expand(N,N) 221 | Ymat = Y.expand(N,N) 222 | P_dist_squared = torch.pow(Xmat-Xmat.transpose(0,1),2)+torch.pow(Ymat-Ymat.transpose(0,1),2) 223 | P_dist_squared[P_dist_squared==0]=1 # make diagonal 1 to avoid NaN in log computation 224 | #### add regularization 225 | ### K = torch.mul(P_dist_squared,torch.log(P_dist_squared)) 226 | K = torch.mul(P_dist_squared,torch.log(P_dist_squared)) + 0.0*torch.eye(N) 227 | # construct matrix L 228 | O = torch.FloatTensor(N,1).fill_(1) 229 | Z = torch.FloatTensor(3,3).fill_(0) 230 | P = torch.cat((O,X,Y),1) 231 | L = torch.cat((torch.cat((K,P),1),torch.cat((P.transpose(0,1),Z),1)),0) 232 | Li = torch.inverse(L) 233 | if self.use_cuda: 234 | Li = Li.cuda() 235 | return Li 236 | 237 | def apply_transformation(self,theta,points): 238 | #### the following code will be commented for experiments 239 | if theta.dim()==2: 240 | theta = theta.unsqueeze(2).unsqueeze(3) 241 | 242 | # points should be in the [B,H,W,2] format, 243 | # where points[:,:,:,0] are the X coords 244 | # and points[:,:,:,1] are the Y coords 245 | 246 | # input are the corresponding control points P_i 247 | batch_size = theta.size()[0] 248 | # split theta into point coordinates 249 | Q_X=theta[:,:self.N,:,:].squeeze(3) 250 | ##############################################################Q_Y=theta[:,self.N:,:,:].squeeze(3) 251 | Q_Y=theta[:,self.N:2*self.N,:,:].squeeze(3) 252 | 253 | 254 | # get spatial dimensions of points 255 | points_b = points.size()[0] 256 | points_h = points.size()[1] 257 | points_w = points.size()[2] 258 | 259 | # repeat pre-defined control points along spatial dimensions of points to be transformed 260 | P_X = self.P_X.expand((1,points_h,points_w,1,self.N)) 261 | P_Y = self.P_Y.expand((1,points_h,points_w,1,self.N)) 262 | 263 | # compute weigths for non-linear part 264 | W_X = torch.bmm(self.Li[:,:self.N,:self.N].expand((batch_size,self.N,self.N)),Q_X) 265 | # W_Y = torch.bmm(self.Li[:,:self.N,:self.N].expand((batch_size,self.N,self.N)),Q_Y) 266 | W_Y = torch.bmm(self.Li[:,:self.N,:self.N].expand((batch_size,self.N,self.N)),Q_Y) 267 | 268 | # reshape 269 | # W_X,W,Y: size [B,H,W,1,N] 270 | W_X = W_X.unsqueeze(3).unsqueeze(4).transpose(1,4).repeat(1,points_h,points_w,1,1) 271 | W_Y = W_Y.unsqueeze(3).unsqueeze(4).transpose(1,4).repeat(1,points_h,points_w,1,1) 272 | # compute weights for affine part 273 | A_X = torch.bmm(self.Li[:,self.N:,:self.N].expand((batch_size,3,self.N)),Q_X) 274 | #A_Y = torch.bmm(self.Li[:,self.N:,:self.N].expand((batch_size,3,self.N)),Q_Y) 275 | A_Y = torch.bmm(self.Li[:,self.N:,:self.N].expand((batch_size,3,self.N)),Q_Y) 276 | 277 | # reshape 278 | # A_X,A,Y: size [B,H,W,1,3] 279 | A_X = A_X.unsqueeze(3).unsqueeze(4).transpose(1,4).repeat(1,points_h,points_w,1,1) 280 | A_Y = A_Y.unsqueeze(3).unsqueeze(4).transpose(1,4).repeat(1,points_h,points_w,1,1) 281 | 282 | # compute distance P_i - (grid_X,grid_Y) 283 | # grid is expanded in point dim 4, but not in batch dim 0, as points P_X,P_Y are fixed for all batch 284 | points_X_for_summation = points[:,:,:,0].unsqueeze(3).unsqueeze(4).expand(points[:,:,:,0].size()+(1,self.N)) 285 | points_Y_for_summation = points[:,:,:,1].unsqueeze(3).unsqueeze(4).expand(points[:,:,:,1].size()+(1,self.N)) 286 | 287 | if points_b==1: 288 | delta_X = points_X_for_summation-P_X 289 | delta_Y = points_Y_for_summation-P_Y 290 | else: 291 | # use expanded P_X,P_Y in batch dimension 292 | delta_X = points_X_for_summation-P_X.expand_as(points_X_for_summation) 293 | delta_Y = points_Y_for_summation-P_Y.expand_as(points_Y_for_summation) 294 | 295 | dist_squared = torch.pow(delta_X,2)+torch.pow(delta_Y,2) 296 | # U: size [1,H,W,1,N] 297 | dist_squared[dist_squared==0]=1 # avoid NaN in log computation 298 | U = torch.mul(dist_squared,torch.log(dist_squared)) 299 | 300 | # expand grid in batch dimension if necessary 301 | points_X_batch = points[:,:,:,0].unsqueeze(3) 302 | points_Y_batch = points[:,:,:,1].unsqueeze(3) 303 | if points_b==1: 304 | points_X_batch = points_X_batch.expand((batch_size,)+points_X_batch.size()[1:]) 305 | points_Y_batch = points_Y_batch.expand((batch_size,)+points_Y_batch.size()[1:]) 306 | 307 | points_X_prime = A_X[:,:,:,:,0]+ \ 308 | torch.mul(A_X[:,:,:,:,1],points_X_batch) + \ 309 | torch.mul(A_X[:,:,:,:,2],points_Y_batch) + \ 310 | torch.sum(torch.mul(W_X,U.expand_as(W_X)),4) 311 | 312 | points_Y_prime = A_Y[:,:,:,:,0]+ \ 313 | torch.mul(A_Y[:,:,:,:,1],points_X_batch) + \ 314 | torch.mul(A_Y[:,:,:,:,2],points_Y_batch) + \ 315 | torch.sum(torch.mul(W_Y,U.expand_as(W_Y)),4) 316 | 317 | return torch.cat((points_X_prime,points_Y_prime),3) 318 | 319 | -------------------------------------------------------------------------------- /geotnf/transformation_high_res.py: -------------------------------------------------------------------------------- 1 | """ 2 | The following code is adapted from: https://github.com/ignacio-rocco/cnngeometric_pytorch. 3 | """ 4 | 5 | 6 | from __future__ import print_function, division 7 | import os 8 | import sys 9 | from skimage import io 10 | import pandas as pd 11 | import numpy as np 12 | import torch 13 | from torch.nn.modules.module import Module 14 | from torch.utils.data import Dataset 15 | from torch.autograd import Variable 16 | import torch.nn.functional as F 17 | 18 | half_out_size = 128 19 | 20 | class GeometricTnf_high_res(object): 21 | """ 22 | 23 | Geometric transfromation to an image batch (wrapped in a PyTorch Variable) 24 | ( can be used with no transformation to perform bilinear resizing ) 25 | 26 | """ 27 | def __init__(self, geometric_model='affine', out_h=2*half_out_size, out_w=2*half_out_size, use_cuda=True): 28 | self.out_h = out_h 29 | self.out_w = out_w 30 | self.use_cuda = use_cuda 31 | if geometric_model=='affine': 32 | self.gridGen = AffineGridGen(out_h, out_w) 33 | elif geometric_model=='tps': 34 | self.gridGen = TpsGridGen(out_h, out_w, use_cuda=use_cuda) 35 | self.theta_identity = torch.Tensor(np.expand_dims(np.array([[1,0,0],[0,1,0]]),0).astype(np.float32)) 36 | if use_cuda: 37 | self.theta_identity = self.theta_identity.cuda() 38 | 39 | def __call__(self, image_batch, theta_batch=None, padding_factor=0.0, crop_factor=1.0): 40 | b, c, h, w = image_batch.size() 41 | if theta_batch is None: 42 | theta_batch = self.theta_identity 43 | theta_batch = theta_batch.expand(b,2,3) 44 | theta_batch = Variable(theta_batch,requires_grad=False) 45 | 46 | sampling_grid = self.gridGen(theta_batch) 47 | 48 | # rescale grid according to crop_factor and padding_factor 49 | # sampling_grid.data = sampling_grid.data*padding_factor*crop_factor 50 | sampling_grid.data = sampling_grid.data*crop_factor/(1+2*padding_factor) 51 | 52 | # print("original image batch size:") 53 | # print(image_batch.shape) 54 | # sample transformed image 55 | warped_image_batch = F.grid_sample(image_batch, sampling_grid) 56 | # print("warped image 88888888888:") 57 | # print(warped_image_batch.shape) 58 | # print(sampling_grid.shape) 59 | 60 | return warped_image_batch 61 | 62 | 63 | class SynthPairTnf(object): 64 | """ 65 | 66 | Generate a synthetically warped training pair using an affine transformation. 67 | 68 | """ 69 | def __init__(self, use_cuda=True, geometric_model='affine', crop_factor=16/16, output_size=(1000,1000), padding_factor = 0.2): 70 | assert isinstance(use_cuda, (bool)) 71 | assert isinstance(crop_factor, (float)) 72 | assert isinstance(output_size, (tuple)) 73 | assert isinstance(padding_factor, (float)) 74 | self.use_cuda=use_cuda 75 | self.crop_factor = crop_factor 76 | self.padding_factor = padding_factor 77 | self.out_h, self.out_w = output_size 78 | self.rescalingTnf = GeometricTnf_high_res('affine', self.out_h, self.out_w, 79 | use_cuda = self.use_cuda) 80 | self.GeometricTnf_high_res = GeometricTnf_high_res(geometric_model, self.out_h, self.out_w, 81 | use_cuda = self.use_cuda) 82 | 83 | 84 | def __call__(self, batch): 85 | image_batch_A, image_batch_B, theta_batch = batch['image_A'], batch['image_B'], batch['theta'] 86 | if self.use_cuda: 87 | image_batch_A = image_batch_A.cuda() 88 | image_batch_B = image_batch_B.cuda() 89 | theta_batch = theta_batch.cuda() 90 | 91 | b, c, h, w = image_batch_A.size() 92 | 93 | # generate symmetrically padded image for bigger sampling region 94 | image_batch_A = self.symmetricImagePad(image_batch_A,self.padding_factor) 95 | image_batch_B = self.symmetricImagePad(image_batch_B,self.padding_factor) 96 | 97 | # convert to variables 98 | image_batch_A = Variable(image_batch_A,requires_grad=False) 99 | image_batch_B = Variable(image_batch_B,requires_grad=False) 100 | theta_batch = Variable(theta_batch,requires_grad=False) 101 | 102 | # print("before crop") 103 | # print(image_batch_A.shape) 104 | # print(image_batch_B.shape) 105 | 106 | # get cropped image 107 | cropped_image_batch = self.rescalingTnf(image_batch_A,None,self.padding_factor,self.crop_factor) # Identity is used as no theta given 108 | # get transformed image 109 | warped_image_batch = self.GeometricTnf_high_res(image_batch_B,theta_batch, 110 | self.padding_factor,self.crop_factor) # Identity is used as no theta given 111 | 112 | # print("cropped_image_size") 113 | # print(cropped_image_batch.shape) 114 | # print(warped_image_batch.shape) 115 | 116 | 117 | return {'source_image': cropped_image_batch, 'target_image': warped_image_batch, 'theta_GT': theta_batch} 118 | 119 | def symmetricImagePad(self,image_batch, padding_factor): 120 | b, c, h, w = image_batch.size() 121 | pad_h, pad_w = int(h*padding_factor), int(w*padding_factor) 122 | idx_pad_left = torch.LongTensor(range(pad_w-1,-1,-1)) 123 | idx_pad_right = torch.LongTensor(range(w-1,w-pad_w-1,-1)) 124 | idx_pad_top = torch.LongTensor(range(pad_h-1,-1,-1)) 125 | idx_pad_bottom = torch.LongTensor(range(h-1,h-pad_h-1,-1)) 126 | if self.use_cuda: 127 | idx_pad_left = idx_pad_left.cuda() 128 | idx_pad_right = idx_pad_right.cuda() 129 | idx_pad_top = idx_pad_top.cuda() 130 | idx_pad_bottom = idx_pad_bottom.cuda() 131 | image_batch = torch.cat((image_batch.index_select(3,idx_pad_left),image_batch, 132 | image_batch.index_select(3,idx_pad_right)),3) 133 | image_batch = torch.cat((image_batch.index_select(2,idx_pad_top),image_batch, 134 | image_batch.index_select(2,idx_pad_bottom)),2) 135 | return image_batch 136 | 137 | 138 | class AffineGridGen(Module): 139 | def __init__(self, out_h=2*half_out_size, out_w=2*half_out_size, out_ch = 3): 140 | super(AffineGridGen, self).__init__() 141 | self.out_h = out_h 142 | self.out_w = out_w 143 | self.out_ch = out_ch 144 | 145 | def forward(self, theta): 146 | theta = theta.contiguous() 147 | batch_size = theta.size()[0] 148 | out_size = torch.Size((batch_size,self.out_ch,self.out_h,self.out_w)) 149 | return F.affine_grid(theta, out_size) 150 | 151 | class TpsGridGen(Module): 152 | def __init__(self, out_h=2*half_out_size, out_w=2*half_out_size, use_regular_grid=True, grid_size=6, reg_factor=0, use_cuda=True): 153 | super(TpsGridGen, self).__init__() 154 | self.out_h, self.out_w = out_h, out_w 155 | self.reg_factor = reg_factor 156 | self.use_cuda = use_cuda 157 | 158 | # create grid in numpy 159 | self.grid = np.zeros( [self.out_h, self.out_w, 3], dtype=np.float32) 160 | # sampling grid with dim-0 coords (Y) 161 | self.grid_X,self.grid_Y = np.meshgrid(np.linspace(-1,1,out_w),np.linspace(-1,1,out_h)) 162 | # grid_X,grid_Y: size [1,H,W,1,1] 163 | self.grid_X = torch.FloatTensor(self.grid_X).unsqueeze(0).unsqueeze(3) 164 | self.grid_Y = torch.FloatTensor(self.grid_Y).unsqueeze(0).unsqueeze(3) 165 | self.grid_X = Variable(self.grid_X,requires_grad=False) 166 | self.grid_Y = Variable(self.grid_Y,requires_grad=False) 167 | if use_cuda: 168 | self.grid_X = self.grid_X.cuda() 169 | self.grid_Y = self.grid_Y.cuda() 170 | 171 | # initialize regular grid for control points P_i 172 | if use_regular_grid: 173 | axis_coords = np.linspace(-1,1,grid_size) 174 | self.N = grid_size*grid_size 175 | P_Y,P_X = np.meshgrid(axis_coords,axis_coords) 176 | P_X = np.reshape(P_X,(-1,1)) # size (N,1) 177 | P_Y = np.reshape(P_Y,(-1,1)) # size (N,1) 178 | P_X = torch.FloatTensor(P_X) 179 | P_Y = torch.FloatTensor(P_Y) 180 | self.Li = Variable(self.compute_L_inverse(P_X,P_Y).unsqueeze(0),requires_grad=False) 181 | self.P_X = P_X.unsqueeze(2).unsqueeze(3).unsqueeze(4).transpose(0,4) 182 | self.P_Y = P_Y.unsqueeze(2).unsqueeze(3).unsqueeze(4).transpose(0,4) 183 | self.P_X = Variable(self.P_X,requires_grad=False) 184 | self.P_Y = Variable(self.P_Y,requires_grad=False) 185 | if use_cuda: 186 | self.P_X = self.P_X.cuda() 187 | self.P_Y = self.P_Y.cuda() 188 | 189 | 190 | def forward(self, theta): 191 | 192 | warped_grid = self.apply_transformation(theta,torch.cat((self.grid_X,self.grid_Y),3)) 193 | 194 | return warped_grid 195 | 196 | def compute_L_inverse(self,X,Y): 197 | N = X.size()[0] # num of points (along dim 0) 198 | # construct matrix K 199 | Xmat = X.expand(N,N) 200 | Ymat = Y.expand(N,N) 201 | P_dist_squared = torch.pow(Xmat-Xmat.transpose(0,1),2)+torch.pow(Ymat-Ymat.transpose(0,1),2) 202 | P_dist_squared[P_dist_squared==0]=1 # make diagonal 1 to avoid NaN in log computation 203 | #### add regularization 204 | ### K = torch.mul(P_dist_squared,torch.log(P_dist_squared)) 205 | K = torch.mul(P_dist_squared,torch.log(P_dist_squared)) + 0.0*torch.eye(N) 206 | # construct matrix L 207 | O = torch.FloatTensor(N,1).fill_(1) 208 | Z = torch.FloatTensor(3,3).fill_(0) 209 | P = torch.cat((O,X,Y),1) 210 | L = torch.cat((torch.cat((K,P),1),torch.cat((P.transpose(0,1),Z),1)),0) 211 | Li = torch.inverse(L) 212 | if self.use_cuda: 213 | Li = Li.cuda() 214 | return Li 215 | 216 | def apply_transformation(self,theta,points): 217 | #### the following code will be commented for experiments 218 | if theta.dim()==2: 219 | theta = theta.unsqueeze(2).unsqueeze(3) 220 | 221 | # points should be in the [B,H,W,2] format, 222 | # where points[:,:,:,0] are the X coords 223 | # and points[:,:,:,1] are the Y coords 224 | 225 | # input are the corresponding control points P_i 226 | batch_size = theta.size()[0] 227 | # split theta into point coordinates 228 | Q_X=theta[:,:self.N,:,:].squeeze(3) 229 | ##############################################################Q_Y=theta[:,self.N:,:,:].squeeze(3) 230 | Q_Y=theta[:,self.N:2*self.N,:,:].squeeze(3) 231 | 232 | 233 | # get spatial dimensions of points 234 | points_b = points.size()[0] 235 | points_h = points.size()[1] 236 | points_w = points.size()[2] 237 | 238 | # repeat pre-defined control points along spatial dimensions of points to be transformed 239 | P_X = self.P_X.expand((1,points_h,points_w,1,self.N)) 240 | P_Y = self.P_Y.expand((1,points_h,points_w,1,self.N)) 241 | 242 | # compute weigths for non-linear part 243 | W_X = torch.bmm(self.Li[:,:self.N,:self.N].expand((batch_size,self.N,self.N)),Q_X) 244 | # W_Y = torch.bmm(self.Li[:,:self.N,:self.N].expand((batch_size,self.N,self.N)),Q_Y) 245 | W_Y = torch.bmm(self.Li[:,:self.N,:self.N].expand((batch_size,self.N,self.N)),Q_Y) 246 | 247 | # reshape 248 | # W_X,W,Y: size [B,H,W,1,N] 249 | W_X = W_X.unsqueeze(3).unsqueeze(4).transpose(1,4).repeat(1,points_h,points_w,1,1) 250 | W_Y = W_Y.unsqueeze(3).unsqueeze(4).transpose(1,4).repeat(1,points_h,points_w,1,1) 251 | # compute weights for affine part 252 | A_X = torch.bmm(self.Li[:,self.N:,:self.N].expand((batch_size,3,self.N)),Q_X) 253 | #A_Y = torch.bmm(self.Li[:,self.N:,:self.N].expand((batch_size,3,self.N)),Q_Y) 254 | A_Y = torch.bmm(self.Li[:,self.N:,:self.N].expand((batch_size,3,self.N)),Q_Y) 255 | 256 | # reshape 257 | # A_X,A,Y: size [B,H,W,1,3] 258 | A_X = A_X.unsqueeze(3).unsqueeze(4).transpose(1,4).repeat(1,points_h,points_w,1,1) 259 | A_Y = A_Y.unsqueeze(3).unsqueeze(4).transpose(1,4).repeat(1,points_h,points_w,1,1) 260 | 261 | # compute distance P_i - (grid_X,grid_Y) 262 | # grid is expanded in point dim 4, but not in batch dim 0, as points P_X,P_Y are fixed for all batch 263 | points_X_for_summation = points[:,:,:,0].unsqueeze(3).unsqueeze(4).expand(points[:,:,:,0].size()+(1,self.N)) 264 | points_Y_for_summation = points[:,:,:,1].unsqueeze(3).unsqueeze(4).expand(points[:,:,:,1].size()+(1,self.N)) 265 | 266 | if points_b==1: 267 | delta_X = points_X_for_summation-P_X 268 | delta_Y = points_Y_for_summation-P_Y 269 | else: 270 | # use expanded P_X,P_Y in batch dimension 271 | delta_X = points_X_for_summation-P_X.expand_as(points_X_for_summation) 272 | delta_Y = points_Y_for_summation-P_Y.expand_as(points_Y_for_summation) 273 | 274 | dist_squared = torch.pow(delta_X,2)+torch.pow(delta_Y,2) 275 | # U: size [1,H,W,1,N] 276 | dist_squared[dist_squared==0]=1 # avoid NaN in log computation 277 | U = torch.mul(dist_squared,torch.log(dist_squared)) 278 | 279 | # expand grid in batch dimension if necessary 280 | points_X_batch = points[:,:,:,0].unsqueeze(3) 281 | points_Y_batch = points[:,:,:,1].unsqueeze(3) 282 | if points_b==1: 283 | points_X_batch = points_X_batch.expand((batch_size,)+points_X_batch.size()[1:]) 284 | points_Y_batch = points_Y_batch.expand((batch_size,)+points_Y_batch.size()[1:]) 285 | 286 | points_X_prime = A_X[:,:,:,:,0]+ \ 287 | torch.mul(A_X[:,:,:,:,1],points_X_batch) + \ 288 | torch.mul(A_X[:,:,:,:,2],points_Y_batch) + \ 289 | torch.sum(torch.mul(W_X,U.expand_as(W_X)),4) 290 | 291 | points_Y_prime = A_Y[:,:,:,:,0]+ \ 292 | torch.mul(A_Y[:,:,:,:,1],points_X_batch) + \ 293 | torch.mul(A_Y[:,:,:,:,2],points_Y_batch) + \ 294 | torch.sum(torch.mul(W_Y,U.expand_as(W_Y)),4) 295 | 296 | return torch.cat((points_X_prime,points_Y_prime),3) 297 | 298 | -------------------------------------------------------------------------------- /image/__pycache__/normalization.cpython-37(1).pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pimed/ProsRegNet/f8addf5bb65a5d38d081fa3aa57ca4d056165314/image/__pycache__/normalization.cpython-37(1).pyc -------------------------------------------------------------------------------- /image/__pycache__/normalization.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pimed/ProsRegNet/f8addf5bb65a5d38d081fa3aa57ca4d056165314/image/__pycache__/normalization.cpython-37.pyc -------------------------------------------------------------------------------- /image/normalization.py: -------------------------------------------------------------------------------- 1 | """ 2 | The following code is adapted from: https://github.com/ignacio-rocco/cnngeometric_pytorch. 3 | """ 4 | 5 | import torch 6 | from torchvision import transforms 7 | from torch.autograd import Variable 8 | 9 | class NormalizeImageDict(object): 10 | """ 11 | 12 | Normalizes Tensor images in dictionary 13 | 14 | Args: 15 | image_keys (list): dict. keys of the images to be normalized 16 | normalizeRange (bool): if True the image is divided by 255.0s 17 | 18 | """ 19 | 20 | def __init__(self,image_keys,normalizeRange=True): 21 | self.image_keys = image_keys 22 | self.normalizeRange=normalizeRange 23 | self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], 24 | std=[0.229, 0.224, 0.225]) 25 | 26 | def __call__(self, sample): 27 | for key in self.image_keys: 28 | if self.normalizeRange: 29 | sample[key] /= 255.0 30 | #sample[key] = self.normalize(sample[key]) 31 | return sample 32 | 33 | 34 | def normalize_image(image, forward=True, mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225]): 35 | #def normalize_image(image, forward=True, mean=[0.5, 0.5, 0.5],std=[0.25, 0.25, 0.25]): 36 | im_size = image.size() 37 | mean=torch.FloatTensor(mean).unsqueeze(1).unsqueeze(2) 38 | std=torch.FloatTensor(std).unsqueeze(1).unsqueeze(2) 39 | if image.is_cuda: 40 | mean = mean.cuda() 41 | std = std.cuda() 42 | if isinstance(image,torch.Tensor): 43 | mean = Variable(mean,requires_grad=False) 44 | std = Variable(std,requires_grad=False) 45 | if forward: 46 | if len(im_size)==3: 47 | result = image.sub(mean.expand(im_size)).div(std.expand(im_size)) 48 | elif len(im_size)==4: 49 | result = image.sub(mean.unsqueeze(0).expand(im_size)).div(std.unsqueeze(0).expand(im_size)) 50 | else: 51 | if len(im_size)==3: 52 | result = image.mul(std.expand(im_size)).add(mean.expand(im_size)) 53 | elif len(im_size)==4: 54 | result = image.mul(std.unsqueeze(0).expand(im_size)).add(mean.unsqueeze(0).expand(im_size)) 55 | 56 | return image -------------------------------------------------------------------------------- /jsonData/TCIA_FUSION.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "0.1", 3 | "type": "registration", 4 | "method": { 5 | "type": "3DRegistration", 6 | "params": "-da -dd" 7 | }, 8 | "output_path": "C:\\Users\\weishao\\Desktop\\Code_ProsRegNet\\results\\", 9 | "studies2process": { 10 | "aaa0069": "C:\\Users\\weishao\\Desktop\\Code_ProsRegNet\\jsonData\\reg_aaa0069.json" 11 | }, 12 | "studies": { 13 | "aaa0069": "C:\\Users\\weishao\\Desktop\\Code_ProsRegNet\\jsonData\\reg_aaa0069.json" 14 | } 15 | } -------------------------------------------------------------------------------- /jsonData/reg_aaa0069.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "aaa0069", 3 | "invivo-accession": "", 4 | "exvivo-accession": "", 5 | "fixed": "C:\\Users\\weishao\\Desktop\\Code_ProsRegNet\\datasets\\testing\\MRI\\aaa0069-T2_AXIAL_SM_FOV.mha", 6 | "fixed-segmentation": "C:\\Users\\weishao\\Desktop\\Code_ProsRegNet\\datasets\\testing\\MRI\\aaa0069-T2_AXIAL_SM_FOV-prostate_label.nii.gz", 7 | "fixed-landmarks2": "C:\\Users\\weishao\\Desktop\\Code_ProsRegNet\\datasets\\testing\\MRI\\aaa0069-T2_AXIAL_SM_FOV-urethra_label.nii.gz", 8 | "fixed-landmarks3": "C:\\Users\\weishao\\Desktop\\Code_ProsRegNet\\datasets\\testing\\MRI\\aaa0069-T2-AXIAL-SM-FOV_cancer_label.mha", 9 | "fixed-landmarks1": "C:\\Users\\weishao\\Desktop\\Code_ProsRegNet\\datasets\\testing\\MRI\\aaa0069_fixed_image-label.nii.gz", 10 | "moving-type": "stack", 11 | "moving": "C:\\Users\\weishao\\Desktop\\Code_ProsRegNet\\datasets\\testing\\Histology\\aaa0069.json", 12 | "T2w": "C:\\Users\\weishao\\Desktop\\Code_ProsRegNet\\datasets\\testing\\MRI\\aaa0069-T2_AXIAL_SM_FOV.mha", 13 | "ADC": "", 14 | "DWI": "" 15 | } -------------------------------------------------------------------------------- /model/ProsRegNet_model.py: -------------------------------------------------------------------------------- 1 | 2 | """ 3 | If you use this code, please cite the following papers: 4 | (1) Shao, Wei, et al. "ProsRegNet: A Deep Learning Framework for Registration of MRI and Histopathology Images of the Prostate." Medical Image Analysis. 2020. 5 | (2) Rocco, Ignacio, Relja Arandjelovic, and Josef Sivic. "Convolutional neural network architecture for geometric matching." Proceedings of CVPR. 2017. 6 | 7 | The following code is adapted from: https://github.com/ignacio-rocco/cnngeometric_pytorch. 8 | """ 9 | 10 | 11 | from __future__ import print_function, division 12 | import torch 13 | import torch.nn as nn 14 | from torch.autograd import Variable 15 | import torchvision.models as models 16 | 17 | class FeatureExtraction(torch.nn.Module): 18 | def __init__(self, use_cuda=True, feature_extraction_cnn='resnet101', last_layer=''): 19 | super(FeatureExtraction, self).__init__() 20 | if feature_extraction_cnn == 'vgg': 21 | self.model = models.vgg16(pretrained=True) 22 | # keep feature extraction network up to indicated layer 23 | vgg_feature_layers=['conv1_1','relu1_1','conv1_2','relu1_2','pool1','conv2_1', 24 | 'relu2_1','conv2_2','relu2_2','pool2','conv3_1','relu3_1', 25 | 'conv3_2','relu3_2','conv3_3','relu3_3','pool3','conv4_1', 26 | 'relu4_1','conv4_2','relu4_2','conv4_3','relu4_3','pool4', 27 | 'conv5_1','relu5_1','conv5_2','relu5_2','conv5_3','relu5_3','pool5'] 28 | if last_layer=='': 29 | last_layer = 'pool4' 30 | last_layer_idx = vgg_feature_layers.index(last_layer) 31 | self.model = nn.Sequential(*list(self.model.features.children())[:last_layer_idx+1]) 32 | 33 | if feature_extraction_cnn == 'resnet101': 34 | self.model = models.resnet101(pretrained=True) 35 | resnet_feature_layers = ['conv1', 36 | 'bn1', 37 | 'relu', 38 | 'maxpool', 39 | 'layer1', 40 | 'layer2', 41 | 'layer3', 42 | 'layer4'] 43 | if last_layer=='': 44 | last_layer = 'layer3' 45 | last_layer_idx = resnet_feature_layers.index(last_layer) 46 | resnet_module_list = [self.model.conv1, 47 | self.model.bn1, 48 | self.model.relu, 49 | self.model.maxpool, 50 | self.model.layer1, 51 | self.model.layer2, 52 | self.model.layer3, 53 | self.model.layer4] 54 | 55 | self.model = nn.Sequential(*resnet_module_list[:last_layer_idx+1]) 56 | 57 | # freeze parameters 58 | for param in self.model.parameters(): 59 | # save lots of memory 60 | param.requires_grad = False 61 | #param.requires_grad = True 62 | # move to GPU 63 | if use_cuda: 64 | self.model.cuda() 65 | 66 | def forward(self, image_batch): 67 | return self.model(image_batch) 68 | 69 | class FeatureL2Norm(torch.nn.Module): 70 | def __init__(self): 71 | super(FeatureL2Norm, self).__init__() 72 | 73 | def forward(self, feature): 74 | epsilon = 1e-6 75 | norm = torch.pow(torch.sum(torch.pow(feature,2),1)+epsilon,0.5).unsqueeze(1).expand_as(feature) 76 | return torch.div(feature,norm) 77 | 78 | class FeatureCorrelation(torch.nn.Module): 79 | def __init__(self): 80 | super(FeatureCorrelation, self).__init__() 81 | 82 | def forward(self, feature_A, feature_B): 83 | b,c,h,w = feature_A.size() 84 | # reshape features for matrix multiplication 85 | feature_A = feature_A.transpose(2,3).contiguous().view(b,c,h*w) 86 | feature_B = feature_B.view(b,c,h*w).transpose(1,2) 87 | # perform matrix mult. 88 | feature_mul = torch.bmm(feature_B,feature_A) 89 | correlation_tensor = feature_mul.view(b,h,w,h*w).transpose(2,3).transpose(1,2) 90 | return correlation_tensor 91 | 92 | class FeatureRegression(nn.Module): 93 | def __init__(self, output_dim=6, use_cuda=True): 94 | super(FeatureRegression, self).__init__() 95 | self.conv = nn.Sequential( 96 | nn.Conv2d(225, 128, kernel_size=7, padding=0), 97 | nn.BatchNorm2d(128), 98 | nn.ReLU(inplace=True), 99 | nn.Conv2d(128, 64, kernel_size=5, padding=0), 100 | nn.BatchNorm2d(64), 101 | nn.ReLU(inplace=True), 102 | ) 103 | self.linear = nn.Linear(64 * 5 * 5, output_dim) 104 | 105 | if use_cuda: 106 | self.conv.cuda() 107 | self.linear.cuda() 108 | 109 | def forward(self, x): 110 | 111 | x = self.conv(x) 112 | x = x.view(x.size(0), -1) 113 | x = self.linear(x) 114 | return x 115 | 116 | class ProsRegNet(nn.Module): 117 | def __init__(self, geometric_model='affine', normalize_features=True, normalize_matches=True, batch_normalization=True, use_cuda=True, feature_extraction_cnn='resnet101'): 118 | super(ProsRegNet, self).__init__() 119 | self.use_cuda = use_cuda 120 | self.normalize_features = normalize_features 121 | self.normalize_matches = normalize_matches 122 | self.FeatureExtraction = FeatureExtraction(use_cuda=self.use_cuda, feature_extraction_cnn=feature_extraction_cnn) 123 | self.FeatureL2Norm = FeatureL2Norm() 124 | self.FeatureCorrelation = FeatureCorrelation() 125 | if geometric_model=='affine': 126 | output_dim = 6 127 | elif geometric_model=='tps': 128 | output_dim = 72 129 | self.FeatureRegression = FeatureRegression(output_dim,use_cuda=self.use_cuda) 130 | self.ReLU = nn.ReLU(inplace=True) 131 | 132 | def forward(self, tnf_batch): 133 | # do feature extraction 134 | feature_A = self.FeatureExtraction(tnf_batch['source_image']) 135 | feature_B = self.FeatureExtraction(tnf_batch['target_image']) 136 | # normalize 137 | if self.normalize_features: 138 | feature_A = self.FeatureL2Norm(feature_A) 139 | feature_B = self.FeatureL2Norm(feature_B) 140 | # do feature correlation 141 | correlation = self.FeatureCorrelation(feature_A,feature_B) 142 | # normalize 143 | if self.normalize_matches: 144 | correlation = self.FeatureL2Norm(self.ReLU(correlation)) 145 | # do regression to tnf parameters theta 146 | theta = self.FeatureRegression(correlation) 147 | 148 | if theta.shape[1] == 6: 149 | temp = torch.tensor([1.0,0,0,0,1.0,0]) 150 | adjust = temp.repeat(theta.shape[0],1) 151 | adjust = adjust.cuda() 152 | theta = 0.1*theta + adjust 153 | theta = theta.reshape(theta.size()[0],2,3) 154 | theta = theta.cuda() 155 | 156 | if theta.shape[1] == 72: 157 | temp = torch.tensor([-1.0,-1.0,-1.0,-1.0,-1.0,-1.0, 158 | -0.6,-0.6,-0.6,-0.6,-0.6,-0.6, 159 | -0.2,-0.2,-0.2,-0.2,-0.2,-0.2, 160 | 0.2,0.2,0.2,0.2,0.2,0.2, 161 | 0.6,0.6,0.6,0.6,0.6,0.6, 162 | 1.0,1.0,1.0,1.0,1.0,1.0, 163 | -1.0,-0.6,-0.2,0.2,0.6,1.0, 164 | -1.0,-0.6,-0.2,0.2,0.6,1.0, 165 | -1.0,-0.6,-0.2,0.2,0.6,1.0, 166 | -1.0,-0.6,-0.2,0.2,0.6,1.0, 167 | -1.0,-0.6,-0.2,0.2,0.6,1.0, 168 | -1.0,-0.6,-0.2,0.2,0.6,1.0]) 169 | adjust = temp.repeat(theta.shape[0],1) 170 | adjust = adjust.cuda() 171 | theta = 0.1*theta + adjust 172 | theta = theta.cuda() 173 | 174 | return theta 175 | 176 | -------------------------------------------------------------------------------- /model/__pycache__/ProsRegNet_model.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pimed/ProsRegNet/f8addf5bb65a5d38d081fa3aa57ca4d056165314/model/__pycache__/ProsRegNet_model.cpython-37.pyc -------------------------------------------------------------------------------- /model/__pycache__/cnn_geometric_model.cpython-37(1).pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pimed/ProsRegNet/f8addf5bb65a5d38d081fa3aa57ca4d056165314/model/__pycache__/cnn_geometric_model.cpython-37(1).pyc -------------------------------------------------------------------------------- /model/__pycache__/cnn_geometric_model.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pimed/ProsRegNet/f8addf5bb65a5d38d081fa3aa57ca4d056165314/model/__pycache__/cnn_geometric_model.cpython-37.pyc -------------------------------------------------------------------------------- /model/__pycache__/loss.cpython-37(1).pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pimed/ProsRegNet/f8addf5bb65a5d38d081fa3aa57ca4d056165314/model/__pycache__/loss.cpython-37(1).pyc -------------------------------------------------------------------------------- /model/__pycache__/loss.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pimed/ProsRegNet/f8addf5bb65a5d38d081fa3aa57ca4d056165314/model/__pycache__/loss.cpython-37.pyc -------------------------------------------------------------------------------- /model/loss.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | import numpy as np 3 | import torch 4 | import torch.nn as nn 5 | from torch.autograd import Variable 6 | from geotnf.point_tnf import PointTnf 7 | from geotnf.transformation import GeometricTnf 8 | from skimage import io 9 | 10 | class SSDLoss(nn.Module): 11 | def __init__(self, geometric_model='affine', use_cuda=True): 12 | super(SSDLoss, self).__init__() 13 | self.geometric_model = geometric_model 14 | self.use_cuda = use_cuda 15 | 16 | def forward(self, theta, theta_GT, tnf_batch): 17 | ### compute square root of ssd 18 | A = tnf_batch['target_image'] 19 | geometricTnf = GeometricTnf(self.geometric_model, 240, 240, use_cuda = self.use_cuda) 20 | 21 | B = geometricTnf(tnf_batch['source_image'],theta) 22 | 23 | ssd = torch.sum(torch.sum(torch.sum(torch.pow(A - B,2),dim=3),dim=2),dim=1) 24 | ssd = torch.sum(ssd)/(A.shape[0]*A.shape[1]*A.shape[2]*A.shape[3]) 25 | ssd = torch.sqrt(ssd) 26 | 27 | 28 | return ssd 29 | -------------------------------------------------------------------------------- /parse_registration_json.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | class ParserRegistrationJson: 4 | def __init__(self,filename): 5 | self.filename = filename 6 | self.dict = None 7 | self.version = None 8 | self.method = None 9 | self.studies = None 10 | self.ToProcess = None 11 | self.study_filenames = None 12 | self.output_path= None 13 | self.do_affine = True 14 | self.do_deformable = None 15 | self.fast_execution= None 16 | self.use_imaging_constraints = None 17 | self.do_reconstruction = None 18 | 19 | self.ReadJson() 20 | 21 | 22 | def ReadJson(self): 23 | with open(self.filename) as f: 24 | self.dict = json.load(f) 25 | try: 26 | self.version = self.dict['version'] 27 | except Exception as e: 28 | print(e) 29 | 30 | 31 | try: 32 | self.method = self.dict['method'] 33 | try: 34 | self.do_affine = self.dict['method']['do_affine'] 35 | except Exception as e: 36 | print(e) 37 | try: 38 | self.do_deformable = self.dict['method']['do_deformable'] 39 | except Exception as e: 40 | print(e) 41 | try: 42 | self.do_reconstruction = self.dict['method']['do_reconstruction'] 43 | except Exception as e: 44 | print(e) 45 | try: 46 | self.fast_execution = self.dict['method']['fast_execution'] 47 | except Exception as e: 48 | print(e) 49 | 50 | try: 51 | self.use_imaging_constraints = self.dict['method']['use_imaging_constraints'] 52 | except Exception as e: 53 | print(e) 54 | 55 | except Exception as e: 56 | print(e) 57 | 58 | 59 | try: 60 | self.study_filenames = self.dict['studies'] 61 | self.studies = {} 62 | for s in self.study_filenames: 63 | fn = self.study_filenames[s] 64 | try: 65 | print("Reading", s, "Study Json",fn) 66 | with open(fn) as fs: 67 | studyDict = json.load(fs) 68 | 69 | self.studies[s]=studyDict 70 | except Exception as ee: 71 | 72 | print(ee) 73 | except Exception as e: 74 | print(e) 75 | 76 | try: 77 | self.output_path = self.dict['output_path'] 78 | except Exception as e: 79 | print(e) 80 | 81 | try: 82 | self.ToProcess = self.dict['studies2process'] 83 | except Exception as e: 84 | print(e) 85 | 86 | 87 | 88 | def PrintJson(self): 89 | for d in self.dict: 90 | print(d,self.dict[d]) 91 | -------------------------------------------------------------------------------- /parse_study_dict.py: -------------------------------------------------------------------------------- 1 | import SimpleITK as sitk 2 | import json 3 | 4 | class ParserStudyDict: 5 | def __init__(self,studyDict): 6 | self.dict = studyDict 7 | 8 | self.id = None 9 | self.exvivo_accession = None 10 | self.invivo_accession = None 11 | self.fixed_filename = None 12 | self.fixed_segmentation_filename= None 13 | self.fixed_landmark1_filename = None 14 | self.fixed_landmark2_filename = None 15 | self.moving_type = None 16 | self.moving_filename = None 17 | self.moving_dict = None 18 | 19 | self.T2_filename = None 20 | self.ADC_filename = None 21 | self.DWI_filename = None 22 | 23 | self.SetFromDict() 24 | 25 | def SetFromDict(self): 26 | try: 27 | self.fixed_filename = self.dict['fixed'] 28 | except Exception as e: 29 | print(e) 30 | 31 | try: 32 | self.fixed_segmentation_filename = self.dict['fixed-segmentation'] 33 | except Exception as e: 34 | print(e) 35 | 36 | 37 | try: 38 | self.fixed_landmark1_filename = self.dict['fixed-landmarks1'] 39 | except Exception as e: 40 | print(e) 41 | 42 | try: 43 | self.fixed_landmark2_filename = self.dict['fixed-landmarks2'] 44 | except Exception as e: 45 | print(e) 46 | 47 | 48 | try: 49 | self.moving_type = self.dict['moving-type'] 50 | 51 | except Exception as e: 52 | print(e) 53 | 54 | try: 55 | self.moving_filename = self.dict['moving'] 56 | except Exception as e: 57 | print(e) 58 | 59 | try: 60 | self.id = self.dict['id'] 61 | except Exception as e: 62 | print(e) 63 | 64 | try: 65 | self.invivo_accession = self.dict['invivo-accession'] 66 | except Exception as e: 67 | print(e) 68 | 69 | try: 70 | self.exvivo_accession = self.dict['exvivo-accession'] 71 | except Exception as e: 72 | print(e) 73 | 74 | try: 75 | self.T2_filename = self.dict['T2w'] 76 | except Exception as e: 77 | print(e) 78 | 79 | try: 80 | self.ADC_filename = self.dict['ADC'] 81 | except Exception as e: 82 | print(e) 83 | 84 | try: 85 | self.DWI_filename = self.dict['DWI'] 86 | except Exception as e: 87 | print(e) 88 | 89 | def ReadImage(self, fn): 90 | im = None 91 | if fn: 92 | try: 93 | im = sitk.ReadImage( fn ) 94 | except Exception as e: 95 | print(e) 96 | print("Fixed image cound not be read from", fn) 97 | im = None 98 | else: 99 | print("Fixed filename is not available and fixed image cound't be read") 100 | im = None 101 | 102 | return im 103 | 104 | def ReadMovingImage(self): 105 | 106 | if self.moving_type and self.moving_type.lower()=="stack": 107 | #print(self.moving_filename) 108 | with open(self.moving_filename) as f: 109 | self.moving_dict = json.load(f) 110 | else: 111 | self.moving_dict = None 112 | 113 | return self.moving_dict -------------------------------------------------------------------------------- /pictures/pipeline.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pimed/ProsRegNet/f8addf5bb65a5d38d081fa3aa57ca4d056165314/pictures/pipeline.png -------------------------------------------------------------------------------- /preprocess.py: -------------------------------------------------------------------------------- 1 | import json 2 | import numpy as np 3 | import cv2 4 | import SimpleITK as sitk 5 | from collections import OrderedDict 6 | from geotnf.transformation import GeometricTnf 7 | import torch 8 | from image.normalization import NormalizeImageDict, normalize_image 9 | import os 10 | from matplotlib import pyplot as plt 11 | 12 | 13 | def transformAndSaveRegion(preprocess_moving_dest, case, slice, s, region, theta, dH, dW, h, w,x,y,x_offset,y_offset): 14 | rotated = np.zeros((w + 2*x_offset, h + 2*y_offset, 3)) 15 | try: 16 | path = s['regions'][region]['filename'] 17 | ann = cv2.imread(path) #annotation 18 | # if flip is 1, flip image horizontally 19 | try: 20 | if s['transform']['flip'] == 1: 21 | ann = cv2.flip(ann, 1) 22 | except: 23 | pass 24 | 25 | ann = np.pad(ann,((ann.shape[0],ann.shape[0]),(ann.shape[1],ann.shape[1]),(0,0)),'constant', constant_values=0) 26 | 27 | rows, cols, channels = ann.shape 28 | M = cv2.getRotationMatrix2D((cols/2,rows/2),theta,1) 29 | rotated_ann = cv2.warpAffine(ann,M,(cols,rows)) 30 | 31 | 32 | # find edge and downsample 33 | ann = cv2.resize(rotated_ann, (dH, dW), interpolation=cv2.INTER_CUBIC) 34 | ann[ann > 5] = 1 35 | 36 | # set edge to outline 37 | region3d= np.zeros((w + 2*x_offset, h + 2*y_offset, 3)) 38 | 39 | region3d[x_offset:w + x_offset, y_offset:h + y_offset,:] = (ann[x:x+w,y:y+h]>0)*255 40 | 41 | rotated = region3d 42 | except: 43 | pass 44 | 45 | try: 46 | os.mkdir(preprocess_moving_dest + case) 47 | except: 48 | pass 49 | 50 | outputPath = preprocess_moving_dest + case + '\\' + region + '_' + case + '_' + slice +'.png' 51 | cv2.imwrite(outputPath, rotated) 52 | 53 | 54 | # preprocess_hist into hist slices here 55 | def preprocess_hist(moving_dict, pre_process_moving_dest, case): 56 | for slice in moving_dict: 57 | s = moving_dict[slice] 58 | 59 | # Read image 60 | img = cv2.imread(s['filename'], ) 61 | 62 | # multiply by mask 63 | prosPath = s['regions']['region00']['filename'] 64 | region00 = cv2.imread(prosPath) 65 | img = img*(region00/255) 66 | # if flip is 1, flip image horizontally 67 | try: 68 | if s['transform']['flip'] == 1: 69 | img = cv2.flip(img, 1) 70 | region00 = cv2.flip(region00, 1) 71 | except: 72 | pass 73 | # rotate image 74 | try: 75 | theta = -s['transform']['rotation_angle'] 76 | except: 77 | theta = 0 78 | 79 | 80 | img = np.pad(img,((img.shape[0],img.shape[0]),(img.shape[1],img.shape[1]),(0,0)),'constant', constant_values=0) 81 | region00 = np.pad(region00,((region00.shape[0],region00.shape[0]),(region00.shape[1],region00.shape[1]),(0,0)),'constant', constant_values=0) 82 | 83 | 84 | rows, cols, channels = img.shape 85 | M = cv2.getRotationMatrix2D((cols/2,rows/2),theta,1) 86 | rotated_hist = cv2.warpAffine(img,M,(cols,rows),borderValue = (0,0,0) ) 87 | rotated_region00 = cv2.warpAffine(region00,M,(cols,rows)) 88 | 89 | 90 | dH = int(rotated_hist.shape[1]/4) 91 | dW = int(rotated_hist.shape[0]/4) 92 | 93 | # downsample image, this has to be consistent with the size of MRI 94 | # dSize = rotated_hist.shape[0]/720 ; # downsample size 95 | # dH = int(rotated_hist.shape[1]/dSize) #downsampled height 96 | # dW = int(rotated_hist.shape[0]/dSize) #downsampled width 97 | # dH = 3000 98 | # dW = 3000 99 | # imgResize = cv2.resize(rotated_hist, (dH, dW), interpolation=cv2.INTER_CUBIC) 100 | 101 | 102 | rotated_hist = cv2.resize(rotated_hist, (dH, dW), interpolation=cv2.INTER_CUBIC) 103 | rotated_region00 = cv2.resize(rotated_region00, (dH, dW), interpolation=cv2.INTER_CUBIC) 104 | 105 | 106 | # create a bounding box around slice 107 | points = np.argwhere(rotated_region00[:,:,0] != 0) 108 | points = np.fliplr(points) # store them in x,y coordinates instead of row,col indices 109 | y, x, h, w = cv2.boundingRect(points) # create a rectangle around those points 110 | 111 | crop = rotated_hist[x:x+w, y:y+h,:] 112 | 113 | if h>w: 114 | y_offset = int(h*0.15) 115 | x_offset = int((h - w + 2*y_offset)/2) 116 | else: 117 | y_offset = int(h*0.2) 118 | x_offset = int((h - w + 2*y_offset)/2) 119 | 120 | transformAndSaveRegion(pre_process_moving_dest, case, slice, s, 'region00', theta, dH, dW, h, w,x,y,x_offset,y_offset) 121 | transformAndSaveRegion(pre_process_moving_dest, case, slice, s, 'region01', theta, dH, dW, h, w,x,y,x_offset,y_offset) 122 | transformAndSaveRegion(pre_process_moving_dest, case, slice, s, 'region10', theta, dH, dW, h, w,x,y,x_offset,y_offset) 123 | transformAndSaveRegion(pre_process_moving_dest, case, slice, s, 'region09', theta, dH, dW, h, w,x,y,x_offset,y_offset) 124 | 125 | # pad image 126 | h = h + 2*y_offset 127 | w = w + 2*x_offset 128 | 129 | 130 | 131 | padHist = np.zeros((w, h, 3)) 132 | 133 | padHist[x_offset:crop.shape[0]+x_offset, y_offset:crop.shape[1]+y_offset, :] = crop 134 | 135 | # Write images, with new filename 136 | cv2.imwrite(pre_process_moving_dest + case + '\\hist_' + case + '_' + slice +'.png', padHist) 137 | 138 | #preprocess mri mha files to slices here 139 | def preprocess_mri(fixed_img_mha, fixed_seg, pre_process_fixed_dest, coord, case): 140 | imMri = sitk.ReadImage(fixed_img_mha) 141 | imMri = sitk.GetArrayFromImage(imMri) 142 | imMriMask = sitk.ReadImage(fixed_seg) 143 | #### resample mri mask to be the same size as mri 144 | if imMri.shape[1]!=sitk.GetArrayFromImage(imMriMask).shape[1] | imMri.shape[2]!=sitk.GetArrayFromImage(imMriMask).shape[2]: 145 | mri_ori = sitk.ReadImage(fixed_img_mha) 146 | resampler = sitk.ResampleImageFilter() 147 | resampler.SetReferenceImage(mri_ori) 148 | imMriMask = resampler.Execute(imMriMask) 149 | print("input mri and mri mask have different sizes") 150 | 151 | imMriMask = sitk.GetArrayFromImage(imMriMask) 152 | 153 | coord[case] = {} 154 | coord[case]['x_offset'] = [] 155 | coord[case]['y_offset'] = [] 156 | coord[case]['x'] = [] 157 | coord[case]['y'] = [] 158 | coord[case]['h'] = [] 159 | coord[case]['w'] = [] 160 | coord[case]['slice'] = [] 161 | 162 | for slice in range(imMri.shape[0]): 163 | if np.sum(np.ndarray.flatten(imMriMask[slice, :, :])) == 0: 164 | continue 165 | 166 | mri = imMri[slice, :, :]*imMriMask[slice, :, :] 167 | 168 | mri_mask = imMriMask[slice, :, :] * 255 169 | 170 | # create a bounding box around slice 171 | points = np.argwhere(mri_mask != 0) 172 | points = np.fliplr(points) # store them in x,y coordinates instead of row,col indices 173 | y, x, h, w = cv2.boundingRect(points) # create a rectangle around those points 174 | 175 | 176 | 177 | imMri[slice, :, :] = imMri[slice, :, :] / int(np.max(imMri[slice, :, :]) / 255) 178 | 179 | if h>w: 180 | y_offset = int(h*0.15) 181 | x_offset = int((h - w + 2*y_offset)/2) 182 | else: 183 | y_offset = int(h*0.2) 184 | x_offset = int((h - w + 2*y_offset)/2) 185 | 186 | coord[case]['x'].append(x) 187 | coord[case]['y'].append(y) 188 | coord[case]['h'].append(h) 189 | coord[case]['w'].append(w) 190 | coord[case]['slice'].append(slice) 191 | coord[case]['x_offset'].append(x_offset) 192 | coord[case]['y_offset'].append(y_offset) 193 | 194 | crop = mri[x - x_offset:x+w+x_offset, y - y_offset:y+h +y_offset] 195 | 196 | h = h + 2*y_offset 197 | w = w + 2*x_offset 198 | 199 | crop = crop*25.5/(np.max(crop)/10) 200 | 201 | # upsample slice to approx 500 px in width 202 | ups = 1; 203 | upsHeight = int(h*ups) 204 | upsWidth = int(w*ups) 205 | 206 | upsMri = cv2.resize(crop.astype('float32'), (upsHeight, upsWidth), interpolation=cv2.INTER_CUBIC) 207 | 208 | # save x, y, x_offset, y_offset, h, w for each slice in dictionary 'coord' (coordinates) 209 | 210 | try: 211 | os.mkdir(pre_process_fixed_dest + case) 212 | except: 213 | pass 214 | 215 | # write to a file 216 | cv2.imwrite(pre_process_fixed_dest + case + '\\mri_' + case + '_' + str(slice).zfill(2) +'.jpg', upsMri) 217 | 218 | 219 | cv2.imwrite(pre_process_fixed_dest + case + '\\mriUncropped_' + case + '_' + str(slice).zfill(2) +'.jpg', imMri[slice, :, :]) 220 | cv2.imwrite(pre_process_fixed_dest + case + '\\mriMask_' + case + '_' + str(slice).zfill(2) +'.jpg', np.uint8(mri_mask)) 221 | 222 | coord = OrderedDict(coord) 223 | 224 | return coord -------------------------------------------------------------------------------- /register_images.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | import os 3 | import argparse 4 | import torch 5 | import torch.nn as nn 6 | from torch.utils.data import Dataset, DataLoader 7 | from model.ProsRegNet_model import ProsRegNet 8 | from image.normalization import NormalizeImageDict, normalize_image 9 | from util.torch_util import BatchTensorToVars, str_to_bool 10 | from geotnf.transformation import GeometricTnf 11 | from geotnf.transformation_high_res import GeometricTnf_high_res 12 | from geotnf.point_tnf import * 13 | import matplotlib.pyplot as plt 14 | from skimage import io 15 | import warnings 16 | from torchvision.transforms import Normalize 17 | from collections import OrderedDict 18 | import cv2 19 | warnings.filterwarnings('ignore') 20 | import SimpleITK as sitk 21 | import sys 22 | sys.path.insert(0, '../parse_data/parse_json') 23 | from parse_registration_json import ParserRegistrationJson 24 | from parse_study_dict import ParserStudyDict 25 | import time 26 | import json 27 | import numpy as np 28 | from preprocess import * 29 | from random import randrange 30 | 31 | tr = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0] 32 | 33 | half_out_size = 500 34 | 35 | # normalize image 36 | def preprocess_image(image): 37 | resizeCNN = GeometricTnf(out_h=240, out_w=240, use_cuda = False) 38 | 39 | # convert to torch Variable 40 | image = np.expand_dims(image.transpose((2,0,1)),0) 41 | image = torch.Tensor(image.astype(np.float32)/255.0) 42 | image_var = Variable(image,requires_grad=False) 43 | 44 | # Resize image using bilinear sampling with identity affine tnf 45 | image_var = resizeCNN(image_var) 46 | 47 | # Normalize image 48 | image_var = normalize_image(image_var) 49 | 50 | return image_var 51 | 52 | def preprocess_image_high_res(image): 53 | resizeCNN = GeometricTnf(out_h=half_out_size*2, out_w=half_out_size*2, use_cuda = False) 54 | 55 | # convert to torch Variable 56 | image = np.expand_dims(image.transpose((2,0,1)),0) 57 | image = torch.Tensor(image.astype(np.float32)/255.0) 58 | image_var = Variable(image,requires_grad=False) 59 | 60 | # Resize image using bilinear sampling with identity affine tnf 61 | image_var = resizeCNN(image_var) 62 | 63 | # Normalize image 64 | image_var = normalize_image(image_var) 65 | 66 | return image_var 67 | 68 | ### return high resolution image 69 | def preprocess_image_high_res_back(image): 70 | # convert to torch Variable 71 | image = np.expand_dims(image.transpose((2,0,1)),0) 72 | image = torch.Tensor(image.astype(np.float32)/255.0) 73 | image_var = Variable(image,requires_grad=False) 74 | 75 | # Normalize image 76 | image_var = normalize_image(image_var) 77 | 78 | return image_var 79 | 80 | 81 | # load pre-trained models here 82 | def load_models(feature_extraction_cnn, model_aff_path, model_tps_path, do_deformable=True): 83 | # feature_extraction_cnn = 'resnet101' 84 | # if feature_extraction_cnn=='resnet101': 85 | # model_aff_path = 'trained_models/best_pascal_checkpoint_adam_affine_grid_loss.pth.tar' 86 | # model_tps_path = 'trained_models/best_pascal_checkpoint_adam_tps_grid_loss.pth.tar' 87 | # elif feature_extraction_cnn=='resnet101': 88 | # model_aff_path = 'trained_models/best_pascal_checkpoint_adam_affine_grid_loss_resnet_random.pth.tar' 89 | # model_tps_path = 'trained_models/best_pascal_checkpoint_adam_tps_grid_loss_resnet_random.pth.tar' 90 | 91 | use_cuda = torch.cuda.is_available() 92 | 93 | #use_cuda = False 94 | 95 | do_aff = not model_aff_path=='' 96 | # do_tps = not model_tps_path=='' 97 | do_tps = do_deformable 98 | 99 | # Create model 100 | print('Creating CNN model...') 101 | if do_aff: 102 | model_aff = ProsRegNet(use_cuda=use_cuda,geometric_model='affine',feature_extraction_cnn=feature_extraction_cnn) 103 | if do_tps: 104 | model_tps = ProsRegNet(use_cuda=use_cuda,geometric_model='tps',feature_extraction_cnn=feature_extraction_cnn) 105 | 106 | # Load trained weights 107 | print('Loading trained model weights...') 108 | if do_aff: 109 | checkpoint = torch.load(model_aff_path, map_location=lambda storage, loc: storage) 110 | checkpoint['state_dict'] = OrderedDict([(k.replace('resnet101', 'model'), v) for k, v in checkpoint['state_dict'].items()]) 111 | model_aff.load_state_dict(checkpoint['state_dict']) 112 | if do_tps: 113 | checkpoint = torch.load(model_tps_path, map_location=lambda storage, loc: storage) 114 | checkpoint['state_dict'] = OrderedDict([(k.replace('resnet101', 'model'), v) for k, v in checkpoint['state_dict'].items()]) 115 | model_tps.load_state_dict(checkpoint['state_dict']) 116 | 117 | model_cache = (model_aff, model_tps, do_aff, do_tps, use_cuda) 118 | 119 | return model_cache 120 | 121 | # run the cnn on our images and return 3D images 122 | def runCnn(model_cache, source_image_path, target_image_path, region01, region00, region10, region09): 123 | model_aff, model_tps, do_aff, do_tps, use_cuda = model_cache 124 | 125 | tpsTnf = GeometricTnf(geometric_model='tps', use_cuda=use_cuda) 126 | affTnf = GeometricTnf(geometric_model='affine', use_cuda=use_cuda) 127 | 128 | tpsTnf_high_res = GeometricTnf_high_res(geometric_model='tps', use_cuda=use_cuda) 129 | affTnf_high_res = GeometricTnf_high_res(geometric_model='affine', use_cuda=use_cuda) 130 | 131 | source_image = io.imread(source_image_path) 132 | target_image = io.imread(target_image_path) 133 | 134 | 135 | # copy MRI image to 3 channels 136 | target_image3d = np.zeros((target_image.shape[0], target_image.shape[1], 3), dtype=int) 137 | target_image3d[:, :, 0] = target_image 138 | target_image3d[:, :, 1] = target_image 139 | target_image3d[:, :, 2] = target_image 140 | target_image = np.copy(target_image3d) 141 | 142 | 143 | 144 | #### begin new code, affine registration using the masks only 145 | source_image_mask = np.copy(source_image) 146 | source_image_mask[np.any(source_image_mask > 5, axis=-1)] = 255 147 | target_image_mask = np.copy(target_image) 148 | target_image_mask[np.any(target_image_mask > 5, axis=-1)] = 255 149 | source_image_mask_var = preprocess_image(source_image_mask) 150 | target_image_mask_var = preprocess_image(target_image_mask) 151 | 152 | if use_cuda: 153 | source_image_mask_var = source_image_mask_var.cuda() 154 | target_image_mask_var = target_image_mask_var.cuda() 155 | batch_mask = {'source_image': source_image_mask_var, 'target_image':target_image_mask_var} 156 | #### end new code 157 | 158 | source_image_var = preprocess_image(source_image) 159 | target_image_var = preprocess_image(target_image) 160 | region01_image_var = preprocess_image(region01) 161 | region00_image_var = preprocess_image(region00) 162 | region10_image_var = preprocess_image(region10) 163 | region09_image_var = preprocess_image(region09) 164 | 165 | 166 | source_image_var_high_res = preprocess_image_high_res(source_image) 167 | target_image_var_high_res = preprocess_image_high_res(target_image) 168 | region01_image_var_high_res = preprocess_image_high_res(region01) 169 | region00_image_var_high_res = preprocess_image_high_res(region00) 170 | region10_image_var_high_res = preprocess_image_high_res(region10) 171 | region09_image_var_high_res = preprocess_image_high_res(region09) 172 | 173 | if use_cuda: 174 | source_image_var = source_image_var.cuda() 175 | target_image_var = target_image_var.cuda() 176 | region01_image_var = region01_image_var.cuda() 177 | region00_image_var = region00_image_var.cuda() 178 | region10_image_var = region10_image_var.cuda() 179 | region09_image_var = region09_image_var.cuda() 180 | source_image_var_high_res = source_image_var_high_res.cuda() 181 | target_image_var_high_res = target_image_var_high_res.cuda() 182 | region01_image_var_high_res = region01_image_var_high_res.cuda() 183 | region00_image_var_high_res = region00_image_var_high_res.cuda() 184 | region10_image_var_high_res = region10_image_var_high_res.cuda() 185 | region09_image_var_high_res = region09_image_var_high_res.cuda() 186 | 187 | batch = {'source_image': source_image_var, 'target_image':target_image_var} 188 | batch_high_res = {'source_image': source_image_var_high_res, 'target_image':target_image_var_high_res} 189 | 190 | if do_aff: 191 | model_aff.eval() 192 | if do_tps: 193 | model_tps.eval() 194 | 195 | # Evaluate models 196 | if do_aff: 197 | #theta_aff=model_aff(batch) 198 | #### affine registration using the masks only 199 | theta_aff=model_aff(batch_mask) 200 | warped_image_aff_high_res = affTnf_high_res(batch_high_res['source_image'], theta_aff.view(-1,2,3)) 201 | warped_image_aff = affTnf(batch['source_image'], theta_aff.view(-1,2,3)) 202 | warped_region01_aff_high_res = affTnf_high_res(region01_image_var_high_res, theta_aff.view(-1,2,3)) 203 | warped_region00_aff_high_res = affTnf_high_res(region00_image_var_high_res, theta_aff.view(-1,2,3)) 204 | warped_region10_aff_high_res = affTnf_high_res(region10_image_var_high_res, theta_aff.view(-1,2,3)) 205 | warped_region09_aff_high_res = affTnf_high_res(region09_image_var_high_res, theta_aff.view(-1,2,3)) 206 | 207 | ###>>>>>>>>>>>> do affine registration one more time<<<<<<<<<<<< 208 | warped_mask_aff = affTnf(source_image_mask_var, theta_aff.view(-1,2,3)) 209 | theta_aff=model_aff({'source_image': warped_mask_aff, 'target_image': target_image_mask_var}) 210 | warped_image_aff_high_res = affTnf_high_res(warped_image_aff_high_res, theta_aff.view(-1,2,3)) 211 | warped_image_aff = affTnf(warped_image_aff, theta_aff.view(-1,2,3)) 212 | warped_region01_aff_high_res = affTnf_high_res(warped_region01_aff_high_res, theta_aff.view(-1,2,3)) 213 | warped_region00_aff_high_res = affTnf_high_res(warped_region00_aff_high_res, theta_aff.view(-1,2,3)) 214 | warped_region10_aff_high_res = affTnf_high_res(warped_region10_aff_high_res, theta_aff.view(-1,2,3)) 215 | warped_region09_aff_high_res = affTnf_high_res(warped_region09_aff_high_res, theta_aff.view(-1,2,3)) 216 | ###>>>>>>>>>>>> do affine registration one more time<<<<<<<<<<<< 217 | 218 | if do_aff and do_tps: 219 | theta_aff_tps=model_tps({'source_image': warped_image_aff, 'target_image': batch['target_image']}) 220 | warped_image_aff_tps_high_res = tpsTnf_high_res(warped_image_aff_high_res,theta_aff_tps) 221 | warped_region01_aff_tps_high_res = tpsTnf_high_res(warped_region01_aff_high_res, theta_aff_tps) 222 | warped_region00_aff_tps_high_res = tpsTnf_high_res(warped_region00_aff_high_res, theta_aff_tps) 223 | warped_region10_aff_tps_high_res = tpsTnf_high_res(warped_region10_aff_high_res, theta_aff_tps) 224 | warped_region09_aff_tps_high_res = tpsTnf_high_res(warped_region09_aff_high_res, theta_aff_tps) 225 | 226 | 227 | 228 | # Un-normalize images and convert to numpy 229 | if do_aff: 230 | warped_image_aff_np_high_res = normalize_image(warped_image_aff_high_res,forward=False).data.squeeze(0).transpose(0,1).transpose(1,2).cpu().numpy() 231 | warped_region01_aff_np_high_res = normalize_image(warped_region01_aff_high_res,forward=False).data.squeeze(0).transpose(0,1).transpose(1,2).cpu().numpy() 232 | warped_region00_aff_np_high_res = normalize_image(warped_region00_aff_high_res,forward=False).data.squeeze(0).transpose(0,1).transpose(1,2).cpu().numpy() 233 | warped_region10_aff_np_high_res = normalize_image(warped_region10_aff_high_res,forward=False).data.squeeze(0).transpose(0,1).transpose(1,2).cpu().numpy() 234 | warped_region09_aff_np_high_res = normalize_image(warped_region09_aff_high_res,forward=False).data.squeeze(0).transpose(0,1).transpose(1,2).cpu().numpy() 235 | 236 | 237 | if do_aff and do_tps: 238 | warped_image_aff_tps_np_high_res = normalize_image(warped_image_aff_tps_high_res,forward=False).data.squeeze(0).transpose(0,1).transpose(1,2).cpu().numpy() 239 | warped_region01_aff_tps_np_high_res = normalize_image(warped_region01_aff_tps_high_res,forward=False).data.squeeze(0).transpose(0,1).transpose(1,2).cpu().numpy() 240 | warped_region00_aff_tps_np_high_res = normalize_image(warped_region00_aff_tps_high_res,forward=False).data.squeeze(0).transpose(0,1).transpose(1,2).cpu().numpy() 241 | warped_region10_aff_tps_np_high_res = normalize_image(warped_region10_aff_tps_high_res,forward=False).data.squeeze(0).transpose(0,1).transpose(1,2).cpu().numpy() 242 | warped_region09_aff_tps_np_high_res = normalize_image(warped_region09_aff_tps_high_res,forward=False).data.squeeze(0).transpose(0,1).transpose(1,2).cpu().numpy() 243 | 244 | 245 | 246 | warped_image_aff_np_high_res[warped_image_aff_np_high_res < 0] = 0 247 | warped_image_aff_tps_np_high_res[warped_image_aff_tps_np_high_res < 0] = 0 248 | 249 | return warped_image_aff_tps_np_high_res, warped_region01_aff_tps_np_high_res, warped_region00_aff_tps_np_high_res, warped_region10_aff_tps_np_high_res, warped_region09_aff_tps_np_high_res 250 | # return warped_image_aff_np_high_res, warped_region01_aff_np_high_res, warped_region00_aff_np_high_res, warped_region10_aff_np_high_res, warped_region09_aff_np_high_res 251 | 252 | 253 | # output results to .nii.gz volumes 254 | def output_results(outputPath, inputStack, sid, fn, imSpatialInfo, extension = "nii.gz"): 255 | mriOrigin, mriSpace, mriDirection = imSpatialInfo 256 | sitkIm = sitk.GetImageFromArray(inputStack) 257 | sitkIm.SetOrigin(mriOrigin) 258 | sitkIm.SetSpacing(mriSpace) 259 | sitkIm.SetDirection(mriDirection) 260 | try: 261 | os.mkdir(outputPath + sid) 262 | except: 263 | pass 264 | #sitkIm.SetDirection(tr) 265 | sitk.WriteImage(sitkIm, outputPath + sid + '\\' + sid + fn + extension) 266 | 267 | 268 | def output_results_high_res(preprocess_moving_dest,preprocess_fixed_dest,outputPath, inputStack, sid, fn, imSpatialInfo, coord, imMri, extension = "nii.gz"): 269 | 270 | 271 | hist_case = [] 272 | hist_case = getFiles(preprocess_moving_dest, 'hist', sid) 273 | 274 | #count = min(len(hist_case), len(mri_case)) 275 | 276 | #padding_factor = (round(((coord[sid]['h'][int(count/2)]+2*coord[sid]['y_offset'][int(count/2)]))/((coord[sid]['h'][0]+2*coord[sid]['y_offset'][0])))) 277 | 278 | x = coord[sid]['x'][0] - coord[sid]['x_offset'][0] 279 | y = coord[sid]['y'][0] - coord[sid]['y_offset'][0] 280 | 281 | h = coord[sid]['h'][0] 282 | w = coord[sid]['w'][0] 283 | 284 | y_s = (h+2*coord[sid]['y_offset'][0])/(half_out_size*2) 285 | x_s = (w+2*coord[sid]['x_offset'][0])/(half_out_size*2) 286 | 287 | 288 | padding_factor = int(round(max(np.add(coord[sid]['h'],np.multiply(2,coord[sid]['y_offset'])))/(coord[sid]['h'][0]+2*coord[sid]['y_offset'][0]))) 289 | 290 | physical = imMri.TransformContinuousIndexToPhysicalPoint([y - padding_factor*half_out_size*y_s,x - padding_factor*half_out_size*x_s,coord[sid]['slice'][0]]) 291 | 292 | mriOrigin, mriSpace, mriDirection = imSpatialInfo 293 | sitkIm = sitk.GetImageFromArray(inputStack) 294 | sitkIm.SetOrigin(physical) 295 | sitkIm.SetSpacing((mriSpace[0]*y_s,mriSpace[1]*x_s,mriSpace[2])) 296 | sitkIm.SetDirection(mriDirection) 297 | try: 298 | os.mkdir(outputPath + sid) 299 | except: 300 | pass 301 | #sitkIm.SetDirection(tr) 302 | sitk.WriteImage(sitkIm, outputPath + sid + '\\' + sid + fn + extension) 303 | 304 | def getFiles(file_dest, keyword, sid): 305 | cases = [] 306 | files = [pos for pos in sorted(os.listdir(file_dest)) if keyword in pos] 307 | 308 | for f in files: 309 | if sid in f: 310 | cases.append(f) 311 | 312 | return cases 313 | 314 | def register(preprocess_moving_dest, preprocess_fixed_dest, coord, model_cache, sid): 315 | ####### grab files that were preprocessed 316 | 317 | mri_files = [pos_mri for pos_mri in sorted(os.listdir(preprocess_fixed_dest)) if pos_mri.endswith('.jpg')] 318 | 319 | hist_case = [] 320 | mri_case = [] 321 | mri_highRes = [] 322 | mri_mask = [] 323 | cancer_case = [] 324 | region00_case = [] 325 | 326 | 327 | hist_case = getFiles(preprocess_moving_dest, 'hist', sid) 328 | cancer_case = getFiles(preprocess_moving_dest, 'region01', sid) 329 | region00_case = getFiles(preprocess_moving_dest, 'region00', sid) 330 | region10_case = getFiles(preprocess_moving_dest, 'region10', sid) 331 | region09_case = getFiles(preprocess_moving_dest, 'region09', sid) 332 | 333 | 334 | 335 | for mri_file in mri_files: 336 | if sid in mri_file: 337 | if 'Uncropped_' in mri_file: 338 | mri_highRes.append(mri_file) 339 | elif 'mriMask' in mri_file: 340 | mri_mask.append(mri_file) 341 | else: 342 | mri_case.append(mri_file) 343 | 344 | w, h, _ = (cv2.imread(preprocess_fixed_dest + mri_highRes[0])).shape 345 | count = min(len(hist_case), len(mri_case)) 346 | 347 | padding_factor = int(round(max(np.add(coord[sid]['h'],np.multiply(2,coord[sid]['y_offset'])))/(coord[sid]['h'][0]+2*coord[sid]['y_offset'][0]))) 348 | 349 | 350 | volumeShape_highRes = (count, half_out_size*(2+2*padding_factor), half_out_size*(2+2*padding_factor), 3) 351 | out3Dhist_highRes = np.zeros(volumeShape_highRes) 352 | out3Dmri_highRes = np.zeros((count, w, h, 3)) 353 | out3Dcancer_highRes = np.zeros(volumeShape_highRes[:-1]) 354 | out3D_region00 = np.zeros(volumeShape_highRes[:-1]) 355 | out3D_region10 = np.zeros(volumeShape_highRes[:-1]) 356 | out3D_region09 = np.zeros(volumeShape_highRes[:-1]) 357 | out3Dmri_mask = np.zeros((count, w, h, 3)[:-1]) 358 | 359 | ###### START ALIGNMENT 360 | for idx in range(count): 361 | source_image_path= preprocess_moving_dest + hist_case[idx] 362 | target_image_path= preprocess_fixed_dest + mri_case[idx] 363 | 364 | x = coord[sid]['x'][0] 365 | y = coord[sid]['y'][0] 366 | x_offset = coord[sid]['x_offset'][0] 367 | y_offset = coord[sid]['y_offset'][0] 368 | h = coord[sid]['h'][0] 369 | w = coord[sid]['w'][0] 370 | 371 | 372 | 373 | y_s = (h+2*y_offset)/(half_out_size*2) 374 | x_s = (w+2*x_offset)/(half_out_size*2) 375 | 376 | 377 | x_prime = coord[sid]['x'][idx] 378 | y_prime = coord[sid]['y'][idx] 379 | x_offset_prime = coord[sid]['x_offset'][idx] 380 | y_offset_prime = coord[sid]['y_offset'][idx] 381 | h_prime = coord[sid]['h'][idx] 382 | w_prime = coord[sid]['w'][idx] 383 | 384 | w_new = (w_prime + 2*x_offset_prime)/x_s 385 | h_new = (h_prime + 2*y_offset_prime)/y_s 386 | 387 | start_y = int(padding_factor*half_out_size + (y_prime - y_offset_prime - y + y_offset)/y_s) 388 | start_x = int(padding_factor*half_out_size + (x_prime - x_offset_prime - x + x_offset)/x_s) 389 | 390 | 391 | imMri_highRes = cv2.imread(preprocess_fixed_dest + mri_highRes[idx]) 392 | imCancer = cv2.imread(preprocess_moving_dest + cancer_case[idx]) 393 | imRegion00 = cv2.imread(preprocess_moving_dest + region00_case[idx]) 394 | imRegion10 = cv2.imread(preprocess_moving_dest + region10_case[idx]) 395 | imRegion09 = cv2.imread(preprocess_moving_dest + region09_case[idx]) 396 | imMriMask = cv2.imread(preprocess_fixed_dest + mri_mask[idx]) 397 | 398 | out3Dmri_highRes[idx, :, :,:] = np.uint8(imMri_highRes) 399 | out3Dmri_mask[idx, :, :] = np.uint8((imMriMask[:, :, 0] > 255/2.0)) 400 | 401 | ######## REGISTER 402 | affTps, cancerAffTps, region00_aff_tps, region10_aff_tps, region09_aff_tps= runCnn(model_cache, source_image_path, target_image_path, imCancer, imRegion00, imRegion10, imRegion09) 403 | 404 | ####### region 00 405 | region00_aff_tps = cv2.resize(region00_aff_tps*255, (int(h_new), int(w_new)), interpolation=cv2.INTER_CUBIC) 406 | region00_aff_tps = region00_aff_tps >255/1.5 407 | 408 | out3D_region00[idx, start_x:region00_aff_tps.shape[0]+start_x, start_y:region00_aff_tps.shape[1]+start_y] = np.uint8(region00_aff_tps[:, :,0]) 409 | 410 | ####### region 10 411 | region10_aff_tps = cv2.resize(region10_aff_tps*255, (int(h_new), int(w_new)), interpolation=cv2.INTER_CUBIC) 412 | region10_aff_tps = region10_aff_tps >255/1.5 413 | 414 | out3D_region10[idx, start_x:region10_aff_tps.shape[0]+start_x, start_y:region10_aff_tps.shape[1]+start_y] = np.uint8(region10_aff_tps[:, :,0]) 415 | 416 | ####### region 09 417 | region09_aff_tps = cv2.resize(region09_aff_tps*255, (int(h_new), int(w_new)), interpolation=cv2.INTER_CUBIC) 418 | region09_aff_tps = region09_aff_tps >255/1.5 419 | out3D_region09[idx, start_x:region09_aff_tps.shape[0]+start_x, start_y:region09_aff_tps.shape[1]+start_y] = np.uint8(region09_aff_tps[:,:,0]) 420 | 421 | affTps = cv2.resize(affTps*255, (int(h_new), int(w_new)), interpolation=cv2.INTER_CUBIC) 422 | 423 | region00_image3d = np.zeros((affTps.shape[0], affTps.shape[1], 3), dtype=int) 424 | region00_image3d[:, :, 0] = region00_aff_tps[:, :,0] 425 | region00_image3d[:, :, 1] = region00_aff_tps[:, :,0] 426 | region00_image3d[:, :, 2] = region00_aff_tps[:, :,0] 427 | 428 | points = np.argwhere(region00_image3d == 0) 429 | 430 | for x in range(0,points.shape[0]): 431 | affTps[tuple(points[x])] = 0 432 | out3Dhist_highRes[idx, start_x:affTps.shape[0]+start_x, start_y:affTps.shape[1]+start_y,:] = np.uint8(affTps[:, :,:]) 433 | 434 | cancerAffTps = cv2.resize(cancerAffTps*255, (int(h_new), int(w_new)), interpolation=cv2.INTER_CUBIC) 435 | out3Dcancer_highRes[idx, start_x:cancerAffTps.shape[0]+start_x, start_y:cancerAffTps.shape[1]+start_y] = np.uint8(cancerAffTps[:, :,0]>255/1.5) 436 | output3D_cache = (out3Dhist_highRes, out3Dmri_highRes, out3Dcancer_highRes, out3D_region00, out3D_region10, out3D_region09, out3Dmri_mask) 437 | 438 | return output3D_cache 439 | 440 | 441 | # entire pipeline together with preprocessing, registration, and outputting results 442 | def main(): 443 | ###### INPUTS 444 | parser = argparse.ArgumentParser(description='Parse data') 445 | parser.add_argument('-v','--verbose', action='store_true', 446 | help='verbose output') 447 | 448 | parser.add_argument('-i','--in_path', type=str, required=True, 449 | default=".",help="json file") 450 | 451 | parser.add_argument('-pm','--preprocess_moving', action='store_true', 452 | help='preprocess moving') 453 | 454 | parser.add_argument('-pf','--preprocess_fixed', action='store_true', 455 | help='preprocess fixed') 456 | 457 | parser.add_argument('-r','--register', action='store_true', 458 | help='run deep learning registration') 459 | 460 | parser.add_argument('-e','--extension', type=str, required=False, 461 | default=".",help="extension to save registered volumes(default: nii.gz)") 462 | 463 | opt = parser.parse_args() 464 | 465 | 466 | verbose = opt.verbose 467 | preprocess_moving = opt.preprocess_moving 468 | preprocess_fixed = opt.preprocess_fixed 469 | run_registration = opt.register 470 | 471 | timings = {} 472 | 473 | if verbose: 474 | print("Reading", opt.in_path) 475 | 476 | json_obj = ParserRegistrationJson(opt.in_path) 477 | 478 | if opt.extension: 479 | extension = opt.extension 480 | else: 481 | extension = 'nii.gz' 482 | 483 | try: 484 | with open('coord.txt') as f: 485 | coord = json.load(f) 486 | except: 487 | coord = {} 488 | 489 | ############### START REGISTRATION HERE 490 | studies = json_obj.studies 491 | toProcess = json_obj.ToProcess 492 | outputPath = json_obj.output_path 493 | cases = toProcess.keys() 494 | 495 | ###### PREPROCESSING DESTINATIONS ###################################### 496 | preprocess_moving_dest = outputPath + '\\preprocess\\hist\\' 497 | preprocess_fixed_dest = outputPath + '\\preprocess\\mri\\' 498 | 499 | # start doing preprocessing on each case and register 500 | for s in json_obj.studies: 501 | if json_obj.ToProcess: 502 | if not (s in json_obj.ToProcess): 503 | print("Skipping", s) 504 | continue 505 | 506 | print("x"*30, "Processing", s,"x"*30) 507 | studyDict = json_obj.studies[s] 508 | 509 | 510 | studyParser = ParserStudyDict(studyDict) 511 | 512 | sid = studyParser.id 513 | fixed_img_mha = studyParser.fixed_filename 514 | fixed_seg = studyParser.fixed_segmentation_filename 515 | moving_dict = studyParser.ReadMovingImage() 516 | 517 | ###### PREPROCESSING HISTOLOGY HERE ############################################################# 518 | if preprocess_moving == True: 519 | print('Preprocessing moving sid:', sid, '...') 520 | preprocess_hist(moving_dict, preprocess_moving_dest, sid) 521 | print('Finished preprocessing', sid) 522 | 523 | ###### PREPROCESSING MRI HERE ############################################################# 524 | if preprocess_fixed == True: 525 | print ("Preprocessing fixed case:", sid, '...') 526 | 527 | coord = preprocess_mri(fixed_img_mha, fixed_seg, preprocess_fixed_dest, coord, sid) 528 | 529 | print("Finished processing fixed mha", sid) 530 | 531 | with open('coord.txt', 'w') as json_file: 532 | json.dump(coord, json_file) 533 | ##### ALIGNMENT HERE ######################################################################## 534 | if run_registration == True: 535 | 536 | ######## LOAD MODELS 537 | print('.'*30, 'Begin deep learning registration for ' + sid + '.'*30) 538 | 539 | try: 540 | model_cache 541 | except NameError: 542 | feature_extraction_cnn = 'resnet101' 543 | 544 | if feature_extraction_cnn=='resnet101': 545 | model_aff_path = 'trained_models/best_pascal_checkpoint_adam_affine_grid_loss.pth.tar' 546 | model_tps_path = 'trained_models/best_pascal_checkpoint_adam_tps_grid_loss.pth.tar' 547 | elif feature_extraction_cnn=='resnet101': 548 | model_aff_path = 'trained_models/best_pascal_checkpoint_adam_affine_grid_loss_resnet_random.pth.tar' 549 | model_tps_path = 'trained_models/best_pascal_checkpoint_adam_tps_grid_loss_resnet_random.pth.tar' 550 | 551 | model_cache = load_models(feature_extraction_cnn, model_aff_path, model_tps_path, do_deformable=True) 552 | 553 | start = time.time() 554 | output3D_cache = register(preprocess_moving_dest, preprocess_fixed_dest, coord, model_cache, sid) 555 | end = time.time() 556 | out3Dhist_highRes, out3Dhist_lowRes, out3Dmri_highRes, out3Dmri_lowRes, out3Dcancer_highRes, out3Dcancer_lowRes = output3D_cache 557 | print("Registration done in {:6.3f}(min)".format((end-start)/60.0)) 558 | imMri = sitk.ReadImage(fixed_img_mha) 559 | mriOrigin = imMri[:,:,coord[sid]['slice'][0]:coord[sid]['slice'][-1]].GetOrigin() 560 | mriSpace = imMri.GetSpacing() 561 | mriDirection = imMri.GetDirection() 562 | 563 | imSpatialInfo = (mriOrigin, mriSpace, mriDirection) 564 | 565 | # write output hist 3D volume to .nii.gz format 566 | fn_moving_highRes = '_moving_rgb.' 567 | output_results(outputPath, out3Dhist_highRes, sid, fn_moving_highRes, imSpatialInfo, extension = "nii.gz") 568 | 569 | #write output mri 3D volume to .nii.gz format 570 | fn_fixed_highRes = '_fixed_image.' 571 | output_results(outputPath, out3Dmri_highRes, sid, fn_fixed_highRes, imSpatialInfo, extension = "nii.gz") 572 | 573 | #write output cancer outline 3D volume to .nii.gz format 574 | fn_cancer_highRes = '_moved_region01_label.' 575 | output_results(outputPath, out3Dcancer_highRes, sid, fn_cancer_highRes, imSpatialInfo, extension = "nii.gz") 576 | 577 | timings[s] = (end-start)/60.0 578 | print('Done!') 579 | 580 | return timings 581 | 582 | if __name__=="__main__": 583 | timings = main() 584 | 585 | print("studyID",",", "Runtime (min)") 586 | for s in timings: 587 | print(s,",", timings[s]) -------------------------------------------------------------------------------- /registration_pipeline.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "##### import libraries" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": 1, 13 | "metadata": {}, 14 | "outputs": [], 15 | "source": [ 16 | "import json \n", 17 | "import cv2\n", 18 | "from matplotlib import pyplot as plt\n", 19 | "import numpy as np\n", 20 | "from preprocess import *\n", 21 | "import SimpleITK as sitk\n", 22 | "from collections import OrderedDict\n", 23 | "import os\n", 24 | "from register_images import *\n", 25 | "import sys\n", 26 | "sys.path.insert(0, '../')\n", 27 | "from parse_registration_json import ParserRegistrationJson\n", 28 | "from parse_study_dict import ParserStudyDict\n", 29 | "import time" 30 | ] 31 | }, 32 | { 33 | "cell_type": "code", 34 | "execution_count": 2, 35 | "metadata": {}, 36 | "outputs": [ 37 | { 38 | "name": "stdout", 39 | "output_type": "stream", 40 | "text": [ 41 | "'do_affine'\n", 42 | "'do_deformable'\n", 43 | "'do_reconstruction'\n", 44 | "'fast_execution'\n", 45 | "'use_imaging_constraints'\n", 46 | "Reading aaa0069 Study Json C:\\Users\\weishao\\Desktop\\Code_ProsRegNet\\jsonData\\reg_aaa0069.json\n" 47 | ] 48 | } 49 | ], 50 | "source": [ 51 | "####### INPUTS\n", 52 | "json_path = \"jsonData/TCIA_FUSION.json\"\n", 53 | "preprocess_moving = True\n", 54 | "preprocess_fixed = True\n", 55 | "run_registration = True\n", 56 | "extension = 'nii.gz'\n", 57 | "timings = {}\n", 58 | "\n", 59 | "try:\n", 60 | " with open('coord.txt') as f:\n", 61 | " coord = json.load(f) \n", 62 | " \n", 63 | "except:\n", 64 | " coord = {}\n", 65 | "\n", 66 | "############### START REGISTRATION HERE\n", 67 | "\n", 68 | "json_obj = ParserRegistrationJson(json_path)\n", 69 | "\n", 70 | "studies = json_obj.studies\n", 71 | "toProcess = json_obj.ToProcess\n", 72 | "outputPath = json_obj.output_path\n", 73 | "#cases = toProcess.keys()\n", 74 | "\n", 75 | "if not os.path.isdir(outputPath):\n", 76 | " os.mkdir(outputPath) \n", 77 | "\n", 78 | "###### PREPROCESSING DESTINATIONS ######################################\n", 79 | "preprocess_moving_dest = outputPath + 'preprocess\\\\hist\\\\'\n", 80 | "preprocess_fixed_dest = outputPath + 'preprocess\\\\mri\\\\'\n", 81 | "\n", 82 | "if not os.path.isdir(outputPath + 'preprocess\\\\'):\n", 83 | " os.mkdir(outputPath + 'preprocess\\\\')\n", 84 | "\n", 85 | "if not os.path.isdir(preprocess_moving_dest):\n", 86 | " os.mkdir(preprocess_moving_dest)\n", 87 | " \n", 88 | "if not os.path.isdir(preprocess_fixed_dest):\n", 89 | " os.mkdir(preprocess_fixed_dest)" 90 | ] 91 | }, 92 | { 93 | "cell_type": "code", 94 | "execution_count": 3, 95 | "metadata": { 96 | "scrolled": true 97 | }, 98 | "outputs": [ 99 | { 100 | "name": "stdout", 101 | "output_type": "stream", 102 | "text": [ 103 | "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx Processing aaa0069 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n", 104 | "Preprocessing moving sid: aaa0069 ...\n", 105 | "Finished preprocessing aaa0069\n", 106 | "Preprocessing fixed case: aaa0069 ...\n", 107 | "Finished processing fixed mha aaa0069\n", 108 | ".............................. Begin deep learning registration for aaa0069..............................\n", 109 | "Creating CNN model...\n", 110 | "Loading trained model weights...\n", 111 | "Registration done in 0.156(min)\n", 112 | "Done!\n" 113 | ] 114 | } 115 | ], 116 | "source": [ 117 | "# start doing preprocessing on each case and register\n", 118 | "for s in json_obj.studies:\n", 119 | " if json_obj.ToProcess:\n", 120 | " if not (s in json_obj.ToProcess):\n", 121 | " print(\"Skipping\", s)\n", 122 | " continue\n", 123 | "\n", 124 | " print(\"x\"*30, \"Processing\", s,\"x\"*30)\n", 125 | " studyDict = json_obj.studies[s] \n", 126 | "\n", 127 | "\n", 128 | " studyParser = ParserStudyDict(studyDict)\n", 129 | " \n", 130 | " sid = studyParser.id\n", 131 | " fixed_img_mha = studyParser.fixed_filename\n", 132 | " fixed_seg = studyParser.fixed_segmentation_filename\n", 133 | " moving_dict = studyParser.ReadMovingImage()\n", 134 | "\n", 135 | " ###### PREPROCESSING HISTOLOGY HERE #############################################################\n", 136 | " if preprocess_moving == True: \n", 137 | " print('Preprocessing moving sid:', sid, '...')\n", 138 | " preprocess_hist(moving_dict, preprocess_moving_dest, sid)\n", 139 | " print('Finished preprocessing', sid)\n", 140 | "\n", 141 | " ###### PREPROCESSING MRI HERE #############################################################\n", 142 | " if preprocess_fixed == True:\n", 143 | " print (\"Preprocessing fixed case:\", sid, '...')\n", 144 | "\n", 145 | " coord = preprocess_mri(fixed_img_mha, fixed_seg, preprocess_fixed_dest, coord, sid)\n", 146 | "\n", 147 | " print(\"Finished processing fixed mha\", sid)\n", 148 | "\n", 149 | " with open('coord.txt', 'w') as json_file: \n", 150 | " json.dump(coord, json_file)\n", 151 | " ##### ALIGNMENT HERE ########################################################################\n", 152 | " if run_registration == True: \n", 153 | " \n", 154 | " ######## LOAD MODELS\n", 155 | " print('.'*30, 'Begin deep learning registration for ' + sid + '.'*30)\n", 156 | "\n", 157 | " try:\n", 158 | " model_cache\n", 159 | " except NameError:\n", 160 | " feature_extraction_cnn = 'resnet101'\n", 161 | "\n", 162 | " model_aff_path = 'C:/Users/weishao/Desktop/Code_ProsRegNet/trained_models/best_CombinedLoss_affine_resnet101.pth.tar'\n", 163 | " model_tps_path = 'C:/Users/weishao/Desktop/Code_ProsRegNet/trained_models/best_CombinedLoss_tps_resnet101.pth.tar'\n", 164 | "\n", 165 | " model_cache = load_models(feature_extraction_cnn, model_aff_path, model_tps_path, do_deformable=True)\n", 166 | " \n", 167 | " start = time.time()\n", 168 | " output3D_cache = register(preprocess_moving_dest + sid + '\\\\', preprocess_fixed_dest + sid + '\\\\', coord, model_cache, sid)\n", 169 | " out3Dhist_highRes, out3Dmri_highRes, out3Dcancer_highRes, out3D_region00, out3D_region10, out3D_region09, out3Dmri_mask = output3D_cache\n", 170 | " end = time.time()\n", 171 | " print(\"Registration done in {:6.3f}(min)\".format((end-start)/60.0))\n", 172 | " imMri = sitk.ReadImage(fixed_img_mha)\n", 173 | " mriOrigin = imMri[:,:,coord[sid]['slice'][0]:coord[sid]['slice'][-1]].GetOrigin()\n", 174 | " mriSpace = imMri.GetSpacing()\n", 175 | " mriDirection = imMri.GetDirection()\n", 176 | "\n", 177 | " imSpatialInfo = (mriOrigin, mriSpace, mriDirection)\n", 178 | "\n", 179 | " # write output hist 3D volume to .nii.gz format\n", 180 | " fn_moving_highRes = '_moved_highres_rgb.'\n", 181 | " output_results_high_res(preprocess_moving_dest + sid + '\\\\',preprocess_fixed_dest,outputPath, out3Dhist_highRes, sid, fn_moving_highRes, imSpatialInfo, coord, imMri, extension = \"nii.gz\")\n", 182 | "\n", 183 | " #write output mri 3D volume to .nii.gz format\n", 184 | " fn_fixed_highRes = '_fixed_image.'\n", 185 | " output_results(outputPath, out3Dmri_highRes, sid, fn_fixed_highRes, imSpatialInfo, extension = \"nii.gz\")\n", 186 | "\n", 187 | " #write output cancer outline 3D volume to .nii.gz format\n", 188 | " fn_cancer_highRes = '_moved_highres_region01_label.'\n", 189 | " output_results_high_res(preprocess_moving_dest + sid + '\\\\',preprocess_fixed_dest,outputPath, out3Dcancer_highRes, sid, fn_cancer_highRes, imSpatialInfo, coord, imMri, extension = \"nii.gz\")\n", 190 | " \n", 191 | " #write region00\n", 192 | " fn_region00 = '_moved_highres_region00_label.'\n", 193 | " output_results_high_res(preprocess_moving_dest + sid + '\\\\',preprocess_fixed_dest,outputPath, out3D_region00, sid, fn_region00, imSpatialInfo, coord, imMri, extension = \"nii.gz\")\n", 194 | " \n", 195 | " #write region10\n", 196 | " fn_region00 = '_moved_highres_region10_label.'\n", 197 | " output_results_high_res(preprocess_moving_dest + sid + '\\\\',preprocess_fixed_dest,outputPath, out3D_region10, sid, fn_region00, imSpatialInfo, coord, imMri, extension = \"nii.gz\")\n", 198 | " \n", 199 | " #write region09\n", 200 | " fn_region00 = '_moved_highres_region09_label.'\n", 201 | " output_results_high_res(preprocess_moving_dest + sid + '\\\\',preprocess_fixed_dest,outputPath, out3D_region09, sid, fn_region00, imSpatialInfo, coord, imMri, extension = \"nii.gz\")\n", 202 | " \n", 203 | " #write mriMask\n", 204 | " fn_mriMask = '_fixed_mask_label.'\n", 205 | " output_results(outputPath, out3Dmri_mask, sid, fn_mriMask, imSpatialInfo, extension = \"nii.gz\")\n", 206 | "\n", 207 | " timings[s] = (end-start)/60.0\n", 208 | " print('Done!')" 209 | ] 210 | }, 211 | { 212 | "cell_type": "code", 213 | "execution_count": null, 214 | "metadata": {}, 215 | "outputs": [], 216 | "source": [ 217 | "json.dump(timings, open(\"timings.txt\",'w'))" 218 | ] 219 | }, 220 | { 221 | "cell_type": "code", 222 | "execution_count": null, 223 | "metadata": {}, 224 | "outputs": [], 225 | "source": [] 226 | } 227 | ], 228 | "metadata": { 229 | "kernelspec": { 230 | "display_name": "Python 3", 231 | "language": "python", 232 | "name": "python3" 233 | }, 234 | "language_info": { 235 | "codemirror_mode": { 236 | "name": "ipython", 237 | "version": 3 238 | }, 239 | "file_extension": ".py", 240 | "mimetype": "text/x-python", 241 | "name": "python", 242 | "nbconvert_exporter": "python", 243 | "pygments_lexer": "ipython3", 244 | "version": "3.7.3" 245 | } 246 | }, 247 | "nbformat": 4, 248 | "nbformat_minor": 2 249 | } 250 | -------------------------------------------------------------------------------- /train.py: -------------------------------------------------------------------------------- 1 | 2 | """ 3 | If you use this code, please cite the following papers: 4 | (1) Shao, Wei, et al. "ProsRegNet: A Deep Learning Framework for Registration of MRI and Histopathology Images of the Prostate." Medical Image Analysis. 2020. 5 | (2) Rocco, Ignacio, Relja Arandjelovic, and Josef Sivic. "Convolutional neural network architecture for geometric matching." Proceedings of CVPR. 2017. 6 | 7 | The following code is adapted from: https://github.com/ignacio-rocco/cnngeometric_pytorch. 8 | """ 9 | 10 | 11 | from __future__ import print_function, division 12 | import argparse 13 | import os 14 | from os.path import exists, join, basename 15 | import torch 16 | import torch.nn as nn 17 | import torch.optim as optim 18 | from torch.utils.data import Dataset, DataLoader 19 | from model.ProsRegNet_model import ProsRegNet 20 | from model.loss import SSDLoss 21 | from data.synth_dataset import SynthDataset 22 | from geotnf.transformation import SynthPairTnf 23 | from image.normalization import NormalizeImageDict 24 | from util.train_test_fn import train, test 25 | from util.torch_util import save_checkpoint, str_to_bool 26 | import numpy as np 27 | from collections import OrderedDict 28 | from torch.optim.lr_scheduler import StepLR 29 | 30 | 31 | # Argument parsing 32 | parser = argparse.ArgumentParser(description='ProsRegNet PyTorch implementation') 33 | # Paths 34 | parser.add_argument('--training-tnf-csv', type=str, default='', help='path to training transformation csv folder') 35 | parser.add_argument('--training-image-path', type=str, default='', help='path to folder containing training images') 36 | parser.add_argument('--trained-models-dir', type=str, default='trained_models', help='path to trained models folder') 37 | parser.add_argument('--trained-models-fn', type=str, default='CombinedLoss', help='trained model filename') 38 | 39 | parser.add_argument('--pretrained-model-aff', type=str, default='', help='path to a pretrained affine network') 40 | parser.add_argument('--pretrained-model-tps', type=str, default='', help='path to a pretrained tps network') 41 | 42 | 43 | # Optimization parameters 44 | parser.add_argument('--lr', type=float, default=0.0003, help='learning rate') 45 | parser.add_argument('--gamma', type=float, default=0.95, help='gamma') 46 | parser.add_argument('--momentum', type=float, default=0.9, help='momentum constant') 47 | parser.add_argument('--num-epochs', type=int, default=50, help='number of training epochs') 48 | parser.add_argument('--batch-size', type=int, default=64,help='training batch size') 49 | parser.add_argument('--weight-decay', type=float, default=0, help='weight decay constant') 50 | parser.add_argument('--seed', type=int, default=1, help='Pseudo-RNG seed') 51 | # Model parameters 52 | parser.add_argument('--geometric-model', type=str, default='affine', help='geometric model to be regressed at output: affine or tps') 53 | parser.add_argument('--use-mse-loss', type=str_to_bool, nargs='?', const=True, default=False, help='Use MSE loss on tnf. parameters') 54 | parser.add_argument('--feature-extraction-cnn', type=str, default='resnet101', help='Feature extraction architecture: vgg/resnet101') 55 | # Synthetic dataset parameters 56 | parser.add_argument('--random-sample', type=str_to_bool, nargs='?', const=True, default=False, help='sample random transformations') 57 | 58 | args = parser.parse_args() 59 | 60 | use_cuda = torch.cuda.is_available() 61 | 62 | print("Use Cuda? ", use_cuda) 63 | 64 | torch.cuda.set_device(0) 65 | print("cuda:", torch.cuda.current_device()) 66 | 67 | 68 | do_aff = not args.pretrained_model_aff=='' 69 | do_tps = not args.pretrained_model_tps=='' 70 | 71 | 72 | # Seed 73 | if use_cuda: 74 | torch.cuda.manual_seed(args.seed) 75 | 76 | if args.training_image_path == '': 77 | args.training_image_path = 'datasets/training/' 78 | if args.training_tnf_csv == '' and args.geometric_model=='affine': 79 | args.training_tnf_csv = 'training_data/affine' 80 | elif args.training_tnf_csv == '' and args.geometric_model=='tps': 81 | args.training_tnf_csv = 'training_data/tps' 82 | 83 | # CNN model and loss 84 | print('Creating CNN model...') 85 | 86 | model = ProsRegNet(use_cuda=use_cuda,geometric_model=args.geometric_model,feature_extraction_cnn=args.feature_extraction_cnn) 87 | 88 | if args.geometric_model == 'affine' and do_aff: 89 | checkpoint = torch.load(args.pretrained_model_aff, map_location=lambda storage, loc: storage) 90 | checkpoint['state_dict'] = OrderedDict([(k.replace(args.feature_extraction_cnn, 'model'), v) for k, v in checkpoint['state_dict'].items()]) 91 | model.load_state_dict(checkpoint['state_dict']) 92 | 93 | if args.geometric_model == 'tps' and do_tps: 94 | checkpoint = torch.load(args.pretrained_model_tps, map_location=lambda storage, loc: storage) 95 | checkpoint['state_dict'] = OrderedDict([(k.replace(args.feature_extraction_cnn, 'model'), v) for k, v in checkpoint['state_dict'].items()]) 96 | model.load_state_dict(checkpoint['state_dict']) 97 | 98 | 99 | if args.use_mse_loss: 100 | print('Using MSE loss...') 101 | loss = nn.MSELoss() 102 | else: 103 | print('Using SSD loss...') 104 | loss = SSDLoss(use_cuda=use_cuda,geometric_model=args.geometric_model) 105 | 106 | 107 | # Dataset and dataloader 108 | dataset = SynthDataset(geometric_model=args.geometric_model, 109 | csv_file=os.path.join(args.training_tnf_csv,'train.csv'), 110 | training_image_path=args.training_image_path, 111 | transform=NormalizeImageDict(['image_A','image_B']), 112 | random_sample=args.random_sample) 113 | 114 | dataloader = DataLoader(dataset, batch_size=args.batch_size, 115 | shuffle=True, num_workers=4) 116 | 117 | dataset_test = SynthDataset(geometric_model=args.geometric_model, 118 | csv_file=os.path.join(args.training_tnf_csv,'test.csv'), 119 | training_image_path=args.training_image_path, 120 | transform=NormalizeImageDict(['image_A','image_B']), 121 | random_sample=args.random_sample) 122 | 123 | dataloader_test = DataLoader(dataset_test, batch_size=args.batch_size, 124 | shuffle=True, num_workers=4) 125 | 126 | pair_generation_tnf = SynthPairTnf(geometric_model=args.geometric_model,use_cuda=use_cuda) 127 | 128 | # Optimizer 129 | optimizer = optim.Adam(model.FeatureRegression.parameters(), lr=args.lr) 130 | 131 | scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma) 132 | 133 | # Train 134 | if args.use_mse_loss: 135 | checkpoint_name = os.path.join(args.trained_models_dir, 136 | args.trained_models_fn + '_' + args.geometric_model + '_mse_loss' + args.feature_extraction_cnn + '.pth.tar') 137 | 138 | else: 139 | checkpoint_name = os.path.join(args.trained_models_dir, 140 | args.trained_models_fn + '_' + args.geometric_model + '_' + args.feature_extraction_cnn + '.pth.tar') 141 | 142 | best_test_loss = float("inf") 143 | 144 | print('Starting training...') 145 | 146 | epochArray = np.zeros(args.num_epochs) 147 | trainLossArray = np.zeros(args.num_epochs) 148 | testLossArray = np.zeros(args.num_epochs) 149 | 150 | 151 | for epoch in range(1, args.num_epochs+1): 152 | train_loss = train(epoch,model,loss,optimizer,dataloader,pair_generation_tnf,log_interval=10) 153 | test_loss = test(model,loss,dataloader_test,pair_generation_tnf,use_cuda=use_cuda, geometric_model=args.geometric_model) 154 | 155 | scheduler.step() 156 | 157 | epochArray[epoch-1] = epoch 158 | trainLossArray[epoch-1] = train_loss 159 | testLossArray[epoch-1] = test_loss 160 | 161 | # remember best loss 162 | is_best = test_loss < best_test_loss 163 | best_test_loss = min(test_loss, best_test_loss) 164 | save_checkpoint({ 165 | 'epoch': epoch + 1, 166 | 'args': args, 167 | 'state_dict': model.state_dict(), 168 | 'best_test_loss': best_test_loss, 169 | 'optimizer' : optimizer.state_dict(), 170 | }, is_best,checkpoint_name) 171 | print('Done!') 172 | 173 | if args.use_mse_loss: 174 | np.savetxt(os.path.join(args.trained_models_dir, 175 | args.trained_models_fn + '_' + args.geometric_model + '_mse_loss' + args.feature_extraction_cnn + '.csv'), np.transpose((epochArray, trainLossArray, testLossArray)), delimiter=',') 176 | else: 177 | np.savetxt(os.path.join(args.trained_models_dir, 178 | args.trained_models_fn + '_' + args.geometric_model + args.feature_extraction_cnn + '.csv'), np.transpose((epochArray, trainLossArray, testLossArray)), delimiter=',') 179 | -------------------------------------------------------------------------------- /training_data/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pimed/ProsRegNet/f8addf5bb65a5d38d081fa3aa57ca4d056165314/training_data/.DS_Store -------------------------------------------------------------------------------- /training_data/affine/test.csv: -------------------------------------------------------------------------------- 1 | ImageA,ImageB,A11,A12,A21,A22,tx,ty 2 | region00_TCIA-0021_slice1.png,region00_TCIA-0021_slice1.png,1.05E+00,5.58E-02,4.01E-02,1.04E+00,-2.23E-02,-6.32E-02 3 | region00_TCIA-0021_slice2.png,region00_TCIA-0021_slice2.png,1.11E+00,-3.31E-02,2.22E-02,1.15E+00,-3.91E-02,-4.90E-02 4 | region00_TCIA-0021_slice3.png,region00_TCIA-0021_slice3.png,9.39E-01,2.57E-02,-2.91E-02,1.11E+00,2.67E-02,1.12E-02 5 | region00_TCIA-0021_slice4.png,region00_TCIA-0021_slice4.png,9.49E-01,3.89E-02,-7.33E-03,9.06E-01,9.74E-03,5.25E-02 6 | region00_TCIA-0021_slice5.png,region00_TCIA-0021_slice5.png,1.03E+00,-1.87E-02,1.95E-02,1.05E+00,-2.22E-02,-1.52E-03 7 | region00_TCIA-0021_slice6.png,region00_TCIA-0021_slice6.png,8.61E-01,3.67E-02,3.21E-02,9.53E-01,-4.09E-03,-2.22E-02 8 | region00_TCIA-0022_slice1.png,region00_TCIA-0022_slice1.png,1.09E+00,-5.03E-03,-3.20E-02,1.09E+00,-1.22E-02,-6.46E-02 9 | region00_TCIA-0022_slice2.png,region00_TCIA-0022_slice2.png,1.04E+00,1.92E-02,-1.26E-02,9.69E-01,-1.31E-02,-2.61E-02 10 | region00_TCIA-0022_slice3.png,region00_TCIA-0022_slice3.png,1.11E+00,3.81E-02,1.30E-02,9.02E-01,6.79E-02,-4.38E-02 11 | region00_TCIA-0022_slice4.png,region00_TCIA-0022_slice4.png,8.71E-01,6.55E-03,-4.12E-02,9.53E-01,6.71E-02,-3.15E-02 12 | region00_TCIA-0023_slice1.png,region00_TCIA-0023_slice1.png,1.03E+00,3.21E-02,4.83E-02,1.02E+00,4.34E-02,6.90E-03 13 | region00_TCIA-0023_slice2.png,region00_TCIA-0023_slice2.png,9.66E-01,-1.71E-02,4.62E-02,1.00E+00,-6.41E-02,-4.99E-02 14 | region00_TCIA-0023_slice3.png,region00_TCIA-0023_slice3.png,1.05E+00,8.44E-04,1.68E-02,1.06E+00,-7.13E-02,6.24E-02 15 | region00_TCIA-0023_slice4.png,region00_TCIA-0023_slice4.png,9.87E-01,-3.87E-02,1.58E-02,1.13E+00,-2.70E-02,-6.93E-02 16 | region00_TCIA-0024_slice1.png,region00_TCIA-0024_slice1.png,1.11E+00,7.55E-04,2.76E-02,1.13E+00,-5.16E-02,-7.35E-02 17 | region00_TCIA-0024_slice2.png,region00_TCIA-0024_slice2.png,8.63E-01,1.73E-02,4.67E-02,9.19E-01,4.41E-02,-3.58E-02 18 | region00_TCIA-0024_slice3.png,region00_TCIA-0024_slice3.png,1.02E+00,4.41E-02,-9.23E-03,8.94E-01,-2.73E-02,2.15E-02 19 | region00_TCIA-0025_slice1.png,region00_TCIA-0025_slice1.png,8.66E-01,-3.82E-02,-1.30E-02,1.05E+00,4.47E-02,7.38E-02 20 | region00_TCIA-0025_slice2.png,region00_TCIA-0025_slice2.png,1.12E+00,-2.92E-02,5.21E-03,8.66E-01,-5.34E-02,4.94E-02 21 | region00_TCIA-0025_slice3.png,region00_TCIA-0025_slice3.png,9.96E-01,-4.98E-02,3.03E-02,8.81E-01,-4.13E-03,3.49E-02 22 | region00_TCIA-0026_slice1.png,region00_TCIA-0026_slice1.png,8.62E-01,1.12E-02,-3.05E-02,1.01E+00,1.50E-02,-6.34E-02 23 | region00_TCIA-0026_slice2.png,region00_TCIA-0026_slice2.png,1.04E+00,4.87E-02,-8.69E-03,8.84E-01,-5.96E-02,-5.30E-02 24 | -------------------------------------------------------------------------------- /training_data/affine/train.csv: -------------------------------------------------------------------------------- 1 | ImageA,ImageB,A11,A12,A21,A22,tx,ty 2 | region00_TCIA-0001_slice1.png,region00_TCIA-0001_slice1.png,8.57E-01,1.48E-02,-1.46E-02,9.04E-01,-1.84E-02,2.08E-02 3 | region00_TCIA-0001_slice2.png,region00_TCIA-0001_slice2.png,1.05E+00,5.56E-02,-3.68E-02,1.01E+00,-9.13E-02,-3.92E-02 4 | region00_TCIA-0002_slice1.png,region00_TCIA-0002_slice1.png,8.83E-01,1.59E-02,1.83E-02,9.39E-01,1.68E-02,5.96E-02 5 | region00_TCIA-0002_slice2.png,region00_TCIA-0002_slice2.png,1.04E+00,-2.53E-02,4.54E-02,1.12E+00,4.09E-02,9.76E-02 6 | region00_TCIA-0004_slice1.png,region00_TCIA-0004_slice1.png,8.49E-01,-2.95E-02,-3.29E-03,8.10E-01,8.48E-02,8.27E-02 7 | region00_TCIA-0004_slice2.png,region00_TCIA-0004_slice2.png,1.03E+00,-4.51E-02,1.41E-02,1.14E+00,4.49E-02,7.29E-02 8 | region00_TCIA-0004_slice3.png,region00_TCIA-0004_slice3.png,9.28E-01,4.37E-02,-3.95E-02,9.77E-01,-9.72E-02,-1.36E-02 9 | region00_TCIA-0005_slice1.png,region00_TCIA-0005_slice1.png,1.09E+00,-4.26E-02,9.86E-03,1.06E+00,7.01E-02,5.43E-02 10 | region00_TCIA-0005_slice2.png,region00_TCIA-0005_slice2.png,1.11E+00,-2.37E-02,4.39E-02,1.09E+00,-2.16E-02,1.02E-02 11 | region00_TCIA-0005_slice3.png,region00_TCIA-0005_slice3.png,9.25E-01,-3.52E-02,4.23E-02,1.14E+00,6.11E-03,8.46E-02 12 | region00_TCIA-0006_slice1.png,region00_TCIA-0006_slice1.png,9.53E-01,-6.07E-03,-6.12E-02,1.12E+00,-4.08E-02,2.68E-02 13 | region00_TCIA-0006_slice2.png,region00_TCIA-0006_slice2.png,1.05E+00,1.92E-02,-5.43E-02,9.33E-01,7.34E-02,-9.03E-02 14 | region00_TCIA-0006_slice3.png,region00_TCIA-0006_slice3.png,1.01E+00,-3.51E-02,1.34E-02,9.40E-01,-2.99E-02,-4.68E-02 15 | region00_TCIA-0007_slice1.png,region00_TCIA-0007_slice1.png,9.72E-01,5.41E-02,-4.73E-02,1.11E+00,-7.21E-02,2.74E-02 16 | region00_TCIA-0007_slice2.png,region00_TCIA-0007_slice2.png,8.77E-01,-3.52E-02,-2.80E-02,9.51E-01,-6.57E-02,4.74E-02 17 | region00_TCIA-0007_slice3.png,region00_TCIA-0007_slice3.png,1.03E+00,-4.83E-02,4.67E-02,1.05E+00,5.24E-02,-5.60E-02 18 | region00_TCIA-0008_slice1.png,region00_TCIA-0008_slice1.png,1.04E+00,2.19E-02,3.14E-02,8.64E-01,-4.32E-02,-7.75E-03 19 | region00_TCIA-0008_slice2.png,region00_TCIA-0008_slice2.png,1.11E+00,-1.45E-02,-7.65E-03,8.59E-01,1.96E-02,-4.32E-02 20 | region00_TCIA-0008_slice3.png,region00_TCIA-0008_slice3.png,1.18E+00,-2.94E-02,-2.36E-02,9.41E-01,7.35E-02,7.66E-02 21 | region00_TCIA-0009_slice1.png,region00_TCIA-0009_slice1.png,9.64E-01,-1.32E-02,-7.15E-03,8.67E-01,-3.65E-02,-2.34E-02 22 | region00_TCIA-0009_slice2.png,region00_TCIA-0009_slice2.png,8.83E-01,-4.35E-02,1.59E-02,9.08E-01,4.34E-02,-4.87E-02 23 | region00_TCIA-0009_slice3.png,region00_TCIA-0009_slice3.png,1.17E+00,-3.14E-02,3.64E-03,8.69E-01,9.23E-02,9.98E-02 24 | region00_TCIA-0009_slice4.png,region00_TCIA-0009_slice4.png,9.54E-01,-6.34E-03,-2.58E-02,9.73E-01,3.86E-02,-8.11E-02 25 | region00_TCIA-0010_slice1.png,region00_TCIA-0010_slice1.png,8.67E-01,4.51E-02,2.87E-02,9.38E-01,-2.99E-02,5.90E-02 26 | region00_TCIA-0010_slice2.png,region00_TCIA-0010_slice2.png,1.18E+00,2.77E-02,7.03E-03,1.15E+00,-9.00E-02,7.86E-02 27 | region00_TCIA-0010_slice3.png,region00_TCIA-0010_slice3.png,1.20E+00,2.23E-02,-3.46E-03,9.71E-01,-7.54E-03,-7.05E-02 28 | region00_TCIA-0010_slice4.png,region00_TCIA-0010_slice4.png,1.04E+00,-1.05E-03,5.46E-02,1.11E+00,-4.51E-02,5.46E-02 29 | region00_TCIA-0011_slice1.png,region00_TCIA-0011_slice1.png,8.32E-01,3.75E-02,3.72E-02,1.05E+00,-4.73E-02,-8.84E-02 30 | region00_TCIA-0011_slice2.png,region00_TCIA-0011_slice2.png,9.78E-01,-2.25E-02,-1.16E-02,1.07E+00,2.88E-02,2.21E-02 31 | region00_TCIA-0011_slice3.png,region00_TCIA-0011_slice3.png,1.03E+00,-9.76E-03,-3.35E-02,1.14E+00,7.13E-03,2.05E-03 32 | region00_TCIA-0012_slice1.png,region00_TCIA-0012_slice1.png,1.06E+00,6.86E-03,6.68E-03,1.19E+00,3.91E-02,6.47E-02 33 | region00_TCIA-0012_slice2.png,region00_TCIA-0012_slice2.png,8.61E-01,1.94E-02,-3.88E-02,8.03E-01,9.22E-03,-4.08E-02 34 | region00_TCIA-0013_slice1.png,region00_TCIA-0013_slice1.png,8.11E-01,3.12E-02,1.51E-02,9.31E-01,-1.15E-02,2.71E-03 35 | region00_TCIA-0013_slice2.png,region00_TCIA-0013_slice2.png,1.06E+00,5.56E-02,1.38E-02,9.06E-01,6.83E-02,2.37E-02 36 | region00_TCIA-0013_slice3.png,region00_TCIA-0013_slice3.png,8.63E-01,4.96E-03,3.39E-03,1.09E+00,-4.82E-02,-3.47E-03 37 | region00_TCIA-0013_slice4.png,region00_TCIA-0013_slice4.png,1.07E+00,-3.71E-02,9.08E-03,9.09E-01,8.25E-02,-2.45E-02 38 | region00_TCIA-0014_slice1.png,region00_TCIA-0014_slice1.png,9.55E-01,2.19E-02,-2.38E-02,9.64E-01,6.86E-02,-3.61E-02 39 | region00_TCIA-0014_slice2.png,region00_TCIA-0014_slice2.png,9.10E-01,-4.06E-02,4.97E-02,9.02E-01,-9.38E-02,4.81E-02 40 | region00_TCIA-0014_slice3.png,region00_TCIA-0014_slice3.png,9.34E-01,4.27E-02,1.02E-02,1.01E+00,6.38E-02,-7.57E-02 41 | region00_TCIA-0014_slice4.png,region00_TCIA-0014_slice4.png,8.38E-01,-4.28E-03,2.52E-02,8.29E-01,8.19E-02,3.28E-02 42 | region00_TCIA-0015_slice1.png,region00_TCIA-0015_slice1.png,9.57E-01,-3.61E-02,-1.39E-02,8.44E-01,-4.91E-02,2.58E-02 43 | region00_TCIA-0015_slice2.png,region00_TCIA-0015_slice2.png,8.99E-01,-2.53E-02,-7.64E-04,1.04E+00,-5.96E-02,5.70E-02 44 | region00_TCIA-0015_slice3.png,region00_TCIA-0015_slice3.png,1.10E+00,6.96E-02,3.37E-02,1.08E+00,9.53E-02,-1.27E-02 45 | region00_TCIA-0015_slice4.png,region00_TCIA-0015_slice4.png,8.02E-01,-3.43E-02,-4.76E-02,1.02E+00,2.80E-02,-4.07E-02 46 | region00_TCIA-0016_slice1.png,region00_TCIA-0016_slice1.png,1.19E+00,4.24E-02,-3.25E-02,1.02E+00,-6.39E-02,-4.72E-02 47 | region00_TCIA-0016_slice2.png,region00_TCIA-0016_slice2.png,9.00E-01,5.51E-03,-5.18E-02,1.00E+00,-4.13E-02,4.07E-02 48 | region00_TCIA-0016_slice3.png,region00_TCIA-0016_slice3.png,9.94E-01,5.81E-03,-2.73E-02,9.38E-01,8.32E-02,-7.59E-02 49 | region00_TCIA-0017_slice1.png,region00_TCIA-0017_slice1.png,1.17E+00,2.70E-02,-2.55E-02,1.01E+00,-6.36E-02,6.65E-02 50 | region00_TCIA-0017_slice2.png,region00_TCIA-0017_slice2.png,1.05E+00,5.91E-03,-2.95E-02,9.29E-01,6.37E-02,6.84E-02 51 | region00_TCIA-0017_slice3.png,region00_TCIA-0017_slice3.png,1.04E+00,-1.82E-02,2.55E-03,1.10E+00,5.33E-02,7.53E-04 52 | region00_TCIA-0018_slice1.png,region00_TCIA-0018_slice1.png,1.07E+00,-1.57E-02,-3.48E-02,1.01E+00,3.92E-02,9.73E-02 53 | region00_TCIA-0018_slice2.png,region00_TCIA-0018_slice2.png,8.77E-01,3.18E-02,3.02E-02,1.17E+00,-9.86E-02,-6.07E-02 54 | region00_TCIA-0018_slice3.png,region00_TCIA-0018_slice3.png,9.42E-01,-1.57E-02,-1.07E-02,1.18E+00,7.48E-02,-2.34E-02 55 | region00_TCIA-0018_slice4.png,region00_TCIA-0018_slice4.png,1.14E+00,-2.70E-04,4.31E-02,8.05E-01,4.05E-02,-1.53E-02 56 | region00_TCIA-0019_slice1.png,region00_TCIA-0019_slice1.png,1.04E+00,-2.17E-02,3.65E-02,1.05E+00,-2.21E-02,-9.11E-02 57 | region00_TCIA-0019_slice2.png,region00_TCIA-0019_slice2.png,8.75E-01,-2.05E-02,-2.34E-02,1.15E+00,-1.80E-02,2.55E-02 58 | region00_TCIA-0019_slice3.png,region00_TCIA-0019_slice3.png,1.02E+00,-2.74E-03,3.64E-02,1.07E+00,-1.14E-02,-6.70E-02 59 | region00_TCIA-0020_slice1.png,region00_TCIA-0020_slice1.png,1.06E+00,6.45E-02,2.11E-02,1.08E+00,-1.27E-03,1.82E-02 60 | region00_TCIA-0020_slice2.png,region00_TCIA-0020_slice2.png,8.57E-01,4.74E-03,-1.35E-02,1.20E+00,2.36E-02,8.19E-02 61 | region00_TCIA-0020_slice3.png,region00_TCIA-0020_slice3.png,1.12E+00,7.05E-03,5.56E-02,1.11E+00,-3.04E-02,3.85E-02 62 | region00_TCIA-0001_slice1.png,region00_TCIA-0001_slice1.png,9.68E-01,-3.52E-02,1.56E-02,1.17E+00,2.58E-02,6.63E-02 63 | region00_TCIA-0001_slice2.png,region00_TCIA-0001_slice2.png,8.68E-01,-2.68E-02,1.04E-02,9.71E-01,7.27E-02,4.92E-02 64 | region00_TCIA-0002_slice1.png,region00_TCIA-0002_slice1.png,1.01E+00,3.01E-03,7.05E-03,1.08E+00,3.96E-02,-2.57E-02 65 | region00_TCIA-0002_slice2.png,region00_TCIA-0002_slice2.png,1.18E+00,-3.37E-02,-2.04E-02,8.92E-01,7.08E-02,-4.66E-02 66 | region00_TCIA-0004_slice1.png,region00_TCIA-0004_slice1.png,1.03E+00,4.45E-02,-4.54E-02,9.38E-01,-3.90E-02,-3.24E-02 67 | region00_TCIA-0004_slice2.png,region00_TCIA-0004_slice2.png,9.62E-01,4.78E-02,7.83E-03,8.36E-01,3.61E-03,5.89E-02 68 | region00_TCIA-0004_slice3.png,region00_TCIA-0004_slice3.png,1.13E+00,-3.03E-02,-3.91E-02,1.03E+00,-1.57E-02,5.83E-02 69 | region00_TCIA-0005_slice1.png,region00_TCIA-0005_slice1.png,1.09E+00,-1.93E-02,-3.08E-02,9.91E-01,8.29E-02,-5.70E-02 70 | region00_TCIA-0005_slice2.png,region00_TCIA-0005_slice2.png,1.06E+00,3.44E-02,-3.29E-02,1.18E+00,8.36E-02,7.37E-02 71 | region00_TCIA-0005_slice3.png,region00_TCIA-0005_slice3.png,9.96E-01,2.68E-02,-4.89E-02,1.10E+00,8.95E-02,7.10E-02 72 | region00_TCIA-0006_slice1.png,region00_TCIA-0006_slice1.png,8.99E-01,-3.40E-02,-4.12E-02,1.05E+00,-6.69E-02,-7.05E-02 73 | region00_TCIA-0006_slice2.png,region00_TCIA-0006_slice2.png,1.12E+00,-1.92E-02,3.62E-02,1.06E+00,3.41E-02,2.22E-02 74 | region00_TCIA-0006_slice3.png,region00_TCIA-0006_slice3.png,1.02E+00,-4.84E-03,2.08E-02,8.91E-01,3.75E-02,1.92E-02 75 | region00_TCIA-0007_slice1.png,region00_TCIA-0007_slice1.png,8.65E-01,5.78E-03,-2.36E-03,1.12E+00,4.35E-03,-2.74E-03 76 | region00_TCIA-0007_slice2.png,region00_TCIA-0007_slice2.png,9.21E-01,1.74E-02,-3.69E-02,1.07E+00,-5.37E-02,-2.79E-02 77 | region00_TCIA-0007_slice3.png,region00_TCIA-0007_slice3.png,8.99E-01,1.09E-02,2.87E-03,9.47E-01,-5.46E-02,2.56E-02 78 | region00_TCIA-0008_slice1.png,region00_TCIA-0008_slice1.png,8.55E-01,-2.48E-02,-2.68E-02,9.16E-01,-7.71E-02,-8.66E-02 79 | region00_TCIA-0008_slice2.png,region00_TCIA-0008_slice2.png,8.20E-01,1.41E-02,-1.47E-02,9.08E-01,7.58E-02,-2.82E-02 80 | region00_TCIA-0008_slice3.png,region00_TCIA-0008_slice3.png,8.79E-01,-3.12E-02,2.13E-02,8.96E-01,-8.67E-02,-3.42E-02 81 | region00_TCIA-0009_slice1.png,region00_TCIA-0009_slice1.png,1.20E+00,-1.26E-02,5.54E-02,1.00E+00,-6.44E-03,5.52E-02 82 | region00_TCIA-0009_slice2.png,region00_TCIA-0009_slice2.png,9.82E-01,3.80E-02,-9.67E-03,1.12E+00,9.80E-02,8.14E-02 83 | region00_TCIA-0009_slice3.png,region00_TCIA-0009_slice3.png,9.74E-01,2.39E-02,5.18E-03,9.01E-01,6.21E-02,-6.92E-02 84 | region00_TCIA-0009_slice4.png,region00_TCIA-0009_slice4.png,8.53E-01,6.60E-03,1.59E-02,1.19E+00,-6.45E-02,2.84E-02 85 | region00_TCIA-0010_slice1.png,region00_TCIA-0010_slice1.png,8.60E-01,1.33E-02,1.68E-02,1.16E+00,-2.63E-02,8.89E-02 86 | region00_TCIA-0010_slice2.png,region00_TCIA-0010_slice2.png,1.06E+00,2.33E-02,1.62E-02,9.34E-01,-2.80E-03,6.97E-02 87 | region00_TCIA-0010_slice3.png,region00_TCIA-0010_slice3.png,1.13E+00,3.46E-02,-8.14E-03,1.15E+00,2.96E-02,8.27E-02 88 | region00_TCIA-0010_slice4.png,region00_TCIA-0010_slice4.png,9.58E-01,-9.15E-04,-2.92E-02,8.03E-01,-9.97E-02,-7.27E-02 89 | region00_TCIA-0011_slice1.png,region00_TCIA-0011_slice1.png,8.27E-01,9.31E-03,1.82E-02,1.11E+00,-1.50E-03,-2.60E-02 90 | region00_TCIA-0011_slice2.png,region00_TCIA-0011_slice2.png,1.12E+00,3.53E-02,1.99E-02,1.20E+00,-2.84E-03,-1.80E-02 91 | region00_TCIA-0011_slice3.png,region00_TCIA-0011_slice3.png,1.08E+00,5.62E-02,-6.94E-03,9.20E-01,-6.20E-02,8.75E-03 92 | region00_TCIA-0012_slice1.png,region00_TCIA-0012_slice1.png,1.14E+00,1.71E-02,2.85E-02,9.42E-01,-5.27E-02,-4.12E-02 93 | region00_TCIA-0012_slice2.png,region00_TCIA-0012_slice2.png,8.61E-01,-2.25E-02,3.17E-02,9.04E-01,2.17E-02,-5.91E-02 94 | region00_TCIA-0013_slice1.png,region00_TCIA-0013_slice1.png,1.19E+00,-1.63E-02,5.23E-02,1.08E+00,5.75E-02,-7.32E-02 95 | region00_TCIA-0013_slice2.png,region00_TCIA-0013_slice2.png,9.55E-01,-2.53E-02,-5.85E-02,1.11E+00,-8.34E-02,-6.10E-02 96 | region00_TCIA-0013_slice3.png,region00_TCIA-0013_slice3.png,1.17E+00,6.91E-02,2.33E-02,9.01E-01,8.36E-02,-5.22E-02 97 | region00_TCIA-0013_slice4.png,region00_TCIA-0013_slice4.png,9.57E-01,5.37E-02,-3.97E-02,1.10E+00,3.80E-02,9.64E-02 98 | region00_TCIA-0014_slice1.png,region00_TCIA-0014_slice1.png,1.15E+00,2.10E-02,1.92E-02,1.17E+00,9.35E-02,1.72E-02 99 | region00_TCIA-0014_slice2.png,region00_TCIA-0014_slice2.png,1.06E+00,-1.40E-03,1.55E-02,1.19E+00,-2.47E-02,4.06E-02 100 | region00_TCIA-0014_slice3.png,region00_TCIA-0014_slice3.png,8.26E-01,4.77E-02,-4.91E-02,9.76E-01,-7.07E-02,5.28E-02 101 | region00_TCIA-0014_slice4.png,region00_TCIA-0014_slice4.png,9.03E-01,3.64E-02,4.28E-02,9.77E-01,-1.35E-02,-8.35E-02 102 | region00_TCIA-0015_slice1.png,region00_TCIA-0015_slice1.png,8.15E-01,-1.47E-02,-3.47E-02,8.84E-01,2.28E-03,1.94E-02 103 | region00_TCIA-0015_slice2.png,region00_TCIA-0015_slice2.png,1.07E+00,-1.47E-02,4.08E-02,1.16E+00,-2.52E-02,-5.78E-02 104 | region00_TCIA-0015_slice3.png,region00_TCIA-0015_slice3.png,1.20E+00,2.42E-02,-2.35E-02,8.38E-01,-1.21E-02,-1.99E-02 105 | region00_TCIA-0015_slice4.png,region00_TCIA-0015_slice4.png,1.02E+00,5.32E-02,-3.11E-02,8.85E-01,-2.44E-02,-6.53E-02 106 | region00_TCIA-0016_slice1.png,region00_TCIA-0016_slice1.png,8.91E-01,1.24E-02,-3.27E-02,8.27E-01,-4.91E-02,-6.39E-02 107 | region00_TCIA-0016_slice2.png,region00_TCIA-0016_slice2.png,8.26E-01,-9.36E-03,2.90E-02,8.63E-01,-8.57E-02,-2.35E-02 108 | region00_TCIA-0016_slice3.png,region00_TCIA-0016_slice3.png,1.19E+00,-2.29E-02,-3.37E-02,1.09E+00,-6.83E-02,-7.87E-03 109 | region00_TCIA-0017_slice1.png,region00_TCIA-0017_slice1.png,1.11E+00,-1.63E-02,2.16E-02,1.17E+00,-6.78E-02,5.97E-03 110 | region00_TCIA-0017_slice2.png,region00_TCIA-0017_slice2.png,8.46E-01,-1.86E-02,3.40E-02,1.04E+00,-7.26E-02,-7.48E-02 111 | region00_TCIA-0017_slice3.png,region00_TCIA-0017_slice3.png,9.96E-01,-4.18E-02,9.83E-03,9.16E-01,-4.56E-02,-9.51E-02 112 | region00_TCIA-0018_slice1.png,region00_TCIA-0018_slice1.png,1.02E+00,3.89E-04,2.22E-02,9.45E-01,6.03E-02,4.47E-02 113 | region00_TCIA-0018_slice2.png,region00_TCIA-0018_slice2.png,1.14E+00,4.60E-02,-3.64E-02,8.60E-01,-2.78E-02,-1.59E-02 114 | region00_TCIA-0018_slice3.png,region00_TCIA-0018_slice3.png,1.05E+00,1.71E-02,-4.20E-02,9.01E-01,4.14E-02,-3.57E-02 115 | region00_TCIA-0018_slice4.png,region00_TCIA-0018_slice4.png,1.04E+00,3.05E-02,-4.26E-02,1.07E+00,-2.05E-03,-6.53E-02 116 | region00_TCIA-0019_slice1.png,region00_TCIA-0019_slice1.png,8.20E-01,1.93E-02,-2.49E-02,8.71E-01,8.69E-02,9.02E-02 117 | region00_TCIA-0019_slice2.png,region00_TCIA-0019_slice2.png,1.02E+00,2.01E-02,1.23E-02,1.13E+00,8.34E-03,-1.73E-02 118 | region00_TCIA-0019_slice3.png,region00_TCIA-0019_slice3.png,1.19E+00,5.30E-02,-9.30E-03,1.12E+00,3.59E-02,-5.11E-02 119 | region00_TCIA-0020_slice1.png,region00_TCIA-0020_slice1.png,1.14E+00,-1.44E-02,1.34E-03,1.20E+00,-9.94E-02,7.35E-02 120 | region00_TCIA-0020_slice2.png,region00_TCIA-0020_slice2.png,8.16E-01,-2.04E-02,-5.74E-02,1.00E+00,2.19E-02,5.86E-02 121 | region00_TCIA-0020_slice3.png,region00_TCIA-0020_slice3.png,9.89E-01,-2.72E-02,-5.20E-02,9.13E-01,8.48E-02,-3.51E-02 122 | region00_TCIA-0001_slice1.png,region00_TCIA-0001_slice1.png,1.02E+00,2.21E-02,2.90E-02,8.43E-01,5.85E-02,-9.01E-03 123 | region00_TCIA-0001_slice2.png,region00_TCIA-0001_slice2.png,1.05E+00,1.79E-02,1.69E-02,1.07E+00,5.27E-02,8.74E-02 124 | region00_TCIA-0002_slice1.png,region00_TCIA-0002_slice1.png,8.98E-01,1.63E-02,3.52E-02,1.03E+00,-5.56E-02,-5.04E-02 125 | region00_TCIA-0002_slice2.png,region00_TCIA-0002_slice2.png,1.01E+00,5.87E-03,4.84E-02,1.19E+00,-7.46E-02,-9.43E-02 126 | region00_TCIA-0004_slice1.png,region00_TCIA-0004_slice1.png,1.14E+00,-5.13E-02,1.20E-02,9.22E-01,-7.84E-02,1.39E-02 127 | region00_TCIA-0004_slice2.png,region00_TCIA-0004_slice2.png,1.18E+00,3.52E-02,-4.23E-02,1.13E+00,4.82E-02,3.06E-02 128 | region00_TCIA-0004_slice3.png,region00_TCIA-0004_slice3.png,8.80E-01,-1.85E-02,-5.32E-02,1.08E+00,3.65E-02,4.57E-02 129 | region00_TCIA-0005_slice1.png,region00_TCIA-0005_slice1.png,9.94E-01,1.87E-02,3.45E-03,1.17E+00,-7.44E-02,5.10E-02 130 | region00_TCIA-0005_slice2.png,region00_TCIA-0005_slice2.png,8.35E-01,5.04E-03,3.29E-02,8.81E-01,-7.29E-02,-3.87E-02 131 | region00_TCIA-0005_slice3.png,region00_TCIA-0005_slice3.png,1.19E+00,2.04E-02,-3.65E-02,1.07E+00,5.90E-02,-3.24E-02 132 | region00_TCIA-0006_slice1.png,region00_TCIA-0006_slice1.png,8.06E-01,7.42E-03,-6.28E-03,9.61E-01,-7.11E-02,-7.45E-02 133 | region00_TCIA-0006_slice2.png,region00_TCIA-0006_slice2.png,1.18E+00,1.51E-02,1.09E-02,8.03E-01,3.94E-02,-2.28E-03 134 | region00_TCIA-0006_slice3.png,region00_TCIA-0006_slice3.png,1.07E+00,-4.83E-02,2.97E-02,1.15E+00,5.05E-03,-3.52E-02 135 | region00_TCIA-0007_slice1.png,region00_TCIA-0007_slice1.png,8.60E-01,2.10E-02,5.66E-03,9.72E-01,2.63E-02,-4.89E-02 136 | region00_TCIA-0007_slice2.png,region00_TCIA-0007_slice2.png,9.38E-01,1.23E-02,1.53E-02,1.13E+00,-1.21E-02,-7.30E-02 137 | region00_TCIA-0007_slice3.png,region00_TCIA-0007_slice3.png,8.63E-01,1.55E-02,4.09E-02,1.04E+00,2.78E-02,-2.95E-02 138 | region00_TCIA-0008_slice1.png,region00_TCIA-0008_slice1.png,1.10E+00,-1.92E-03,4.31E-02,9.74E-01,7.96E-03,-1.79E-02 139 | region00_TCIA-0008_slice2.png,region00_TCIA-0008_slice2.png,1.13E+00,4.25E-02,-5.51E-02,9.93E-01,-7.68E-02,2.10E-03 140 | region00_TCIA-0008_slice3.png,region00_TCIA-0008_slice3.png,8.43E-01,4.51E-02,-3.38E-02,8.54E-01,-3.42E-02,-5.33E-02 141 | region00_TCIA-0009_slice1.png,region00_TCIA-0009_slice1.png,1.02E+00,-3.35E-02,1.07E-02,8.67E-01,7.08E-02,-8.43E-02 142 | region00_TCIA-0009_slice2.png,region00_TCIA-0009_slice2.png,8.82E-01,8.57E-03,-2.69E-03,9.22E-01,4.08E-02,6.33E-02 143 | region00_TCIA-0009_slice3.png,region00_TCIA-0009_slice3.png,1.18E+00,-5.18E-02,3.97E-02,8.00E-01,6.33E-02,9.19E-02 144 | region00_TCIA-0009_slice4.png,region00_TCIA-0009_slice4.png,1.04E+00,-1.90E-02,1.93E-02,1.00E+00,9.66E-02,-6.07E-02 145 | region00_TCIA-0010_slice1.png,region00_TCIA-0010_slice1.png,9.67E-01,-2.35E-02,-1.66E-02,9.04E-01,-3.34E-02,-2.88E-02 146 | region00_TCIA-0010_slice2.png,region00_TCIA-0010_slice2.png,1.06E+00,2.12E-02,4.04E-02,1.19E+00,-4.39E-02,-5.83E-02 147 | region00_TCIA-0010_slice3.png,region00_TCIA-0010_slice3.png,1.16E+00,-5.08E-02,1.11E-02,8.67E-01,9.78E-02,-8.84E-02 148 | region00_TCIA-0010_slice4.png,region00_TCIA-0010_slice4.png,1.18E+00,-2.89E-02,2.48E-02,1.17E+00,-9.82E-03,-9.17E-02 149 | region00_TCIA-0011_slice1.png,region00_TCIA-0011_slice1.png,1.04E+00,-9.77E-03,4.79E-02,9.37E-01,1.98E-02,8.15E-02 150 | region00_TCIA-0011_slice2.png,region00_TCIA-0011_slice2.png,1.15E+00,1.10E-02,4.16E-02,1.13E+00,1.87E-02,-2.22E-02 151 | region00_TCIA-0011_slice3.png,region00_TCIA-0011_slice3.png,1.03E+00,-3.89E-02,2.15E-03,8.25E-01,6.64E-02,8.29E-02 152 | region00_TCIA-0012_slice1.png,region00_TCIA-0012_slice1.png,9.73E-01,2.84E-02,6.50E-03,1.16E+00,-5.64E-02,2.57E-02 153 | region00_TCIA-0012_slice2.png,region00_TCIA-0012_slice2.png,1.09E+00,-2.69E-02,-3.76E-03,8.78E-01,-3.34E-02,-8.98E-02 154 | region00_TCIA-0013_slice1.png,region00_TCIA-0013_slice1.png,9.06E-01,-1.41E-03,4.71E-02,1.00E+00,-3.56E-02,1.36E-02 155 | region00_TCIA-0013_slice2.png,region00_TCIA-0013_slice2.png,1.06E+00,-4.41E-02,-4.32E-02,1.03E+00,-9.60E-02,4.98E-02 156 | region00_TCIA-0013_slice3.png,region00_TCIA-0013_slice3.png,8.16E-01,4.86E-03,1.33E-02,1.01E+00,-7.26E-02,1.06E-02 157 | region00_TCIA-0013_slice4.png,region00_TCIA-0013_slice4.png,1.01E+00,-3.12E-03,3.37E-02,9.60E-01,9.71E-02,-9.82E-02 158 | region00_TCIA-0014_slice1.png,region00_TCIA-0014_slice1.png,1.11E+00,1.89E-02,9.63E-03,1.06E+00,8.46E-02,-5.29E-02 159 | region00_TCIA-0014_slice2.png,region00_TCIA-0014_slice2.png,8.93E-01,2.42E-02,-9.65E-04,1.02E+00,-5.56E-02,7.29E-02 160 | region00_TCIA-0014_slice3.png,region00_TCIA-0014_slice3.png,1.12E+00,1.80E-02,7.35E-03,9.46E-01,-7.46E-02,-5.36E-02 161 | region00_TCIA-0014_slice4.png,region00_TCIA-0014_slice4.png,8.71E-01,4.49E-02,3.13E-02,1.14E+00,-6.00E-02,7.94E-02 162 | region00_TCIA-0015_slice1.png,region00_TCIA-0015_slice1.png,1.02E+00,4.58E-03,1.49E-02,1.17E+00,-8.54E-03,5.44E-02 163 | region00_TCIA-0015_slice2.png,region00_TCIA-0015_slice2.png,8.11E-01,-1.52E-02,1.77E-02,8.44E-01,-4.45E-02,-4.00E-02 164 | region00_TCIA-0015_slice3.png,region00_TCIA-0015_slice3.png,1.12E+00,6.11E-02,1.56E-02,8.51E-01,2.25E-02,4.17E-02 165 | region00_TCIA-0015_slice4.png,region00_TCIA-0015_slice4.png,8.14E-01,-3.75E-02,-2.13E-03,8.24E-01,-7.41E-02,5.88E-02 166 | region00_TCIA-0016_slice1.png,region00_TCIA-0016_slice1.png,1.14E+00,2.50E-02,-4.35E-02,9.49E-01,-9.56E-03,3.43E-02 167 | region00_TCIA-0016_slice2.png,region00_TCIA-0016_slice2.png,9.13E-01,5.31E-02,-5.25E-02,8.39E-01,1.16E-02,9.87E-02 168 | region00_TCIA-0016_slice3.png,region00_TCIA-0016_slice3.png,8.72E-01,-3.19E-02,-1.81E-02,8.35E-01,-9.07E-02,5.68E-02 169 | region00_TCIA-0017_slice1.png,region00_TCIA-0017_slice1.png,8.70E-01,-4.46E-02,-2.86E-02,1.10E+00,8.98E-02,3.59E-02 170 | region00_TCIA-0017_slice2.png,region00_TCIA-0017_slice2.png,1.02E+00,2.11E-02,-2.68E-02,8.96E-01,-1.83E-02,1.96E-02 171 | region00_TCIA-0017_slice3.png,region00_TCIA-0017_slice3.png,1.07E+00,5.09E-03,2.69E-02,8.94E-01,8.40E-02,-9.56E-03 172 | region00_TCIA-0018_slice1.png,region00_TCIA-0018_slice1.png,1.02E+00,-3.40E-02,4.70E-03,1.04E+00,6.56E-02,-5.35E-03 173 | region00_TCIA-0018_slice2.png,region00_TCIA-0018_slice2.png,8.49E-01,5.19E-03,3.94E-02,1.05E+00,-2.49E-02,-7.39E-02 174 | region00_TCIA-0018_slice3.png,region00_TCIA-0018_slice3.png,8.98E-01,2.21E-02,4.51E-02,9.47E-01,1.56E-02,3.54E-02 175 | region00_TCIA-0018_slice4.png,region00_TCIA-0018_slice4.png,1.02E+00,2.30E-02,2.26E-02,8.22E-01,-2.78E-02,-3.37E-02 176 | region00_TCIA-0019_slice1.png,region00_TCIA-0019_slice1.png,1.16E+00,7.87E-03,4.14E-02,1.18E+00,1.30E-02,-5.94E-02 177 | region00_TCIA-0019_slice2.png,region00_TCIA-0019_slice2.png,1.16E+00,-2.84E-02,-1.85E-02,8.42E-01,3.69E-02,-2.75E-03 178 | region00_TCIA-0019_slice3.png,region00_TCIA-0019_slice3.png,1.13E+00,3.48E-02,-3.38E-02,1.06E+00,-3.27E-02,-6.18E-02 179 | region00_TCIA-0020_slice1.png,region00_TCIA-0020_slice1.png,8.79E-01,8.82E-03,-3.13E-02,1.15E+00,9.82E-02,9.05E-03 180 | region00_TCIA-0020_slice2.png,region00_TCIA-0020_slice2.png,1.03E+00,3.41E-02,-5.20E-02,9.55E-01,4.95E-02,5.31E-02 181 | region00_TCIA-0020_slice3.png,region00_TCIA-0020_slice3.png,9.17E-01,4.78E-02,-1.32E-03,8.91E-01,-8.45E-02,-2.30E-02 182 | region00_TCIA-0001_slice1.png,region00_TCIA-0001_slice1.png,1.02E+00,-2.87E-02,-2.73E-02,8.61E-01,-3.95E-02,1.60E-02 183 | region00_TCIA-0001_slice2.png,region00_TCIA-0001_slice2.png,1.17E+00,8.41E-03,9.32E-03,1.07E+00,2.08E-03,2.55E-02 184 | region00_TCIA-0002_slice1.png,region00_TCIA-0002_slice1.png,1.18E+00,4.87E-02,2.61E-02,9.74E-01,4.63E-02,-3.53E-02 185 | region00_TCIA-0002_slice2.png,region00_TCIA-0002_slice2.png,9.67E-01,3.20E-03,-5.39E-02,1.10E+00,3.23E-03,5.34E-02 186 | region00_TCIA-0004_slice1.png,region00_TCIA-0004_slice1.png,1.11E+00,4.52E-02,-1.47E-02,1.09E+00,7.59E-02,3.53E-02 187 | region00_TCIA-0004_slice2.png,region00_TCIA-0004_slice2.png,1.17E+00,5.86E-03,2.08E-02,8.62E-01,9.00E-02,-7.03E-02 188 | region00_TCIA-0004_slice3.png,region00_TCIA-0004_slice3.png,8.94E-01,-3.89E-02,4.39E-02,8.26E-01,9.47E-04,4.96E-03 189 | region00_TCIA-0005_slice1.png,region00_TCIA-0005_slice1.png,9.99E-01,-2.82E-03,-3.69E-03,8.15E-01,-5.34E-02,-5.92E-02 190 | region00_TCIA-0005_slice2.png,region00_TCIA-0005_slice2.png,1.12E+00,5.68E-02,1.07E-02,9.03E-01,-3.69E-02,-6.01E-02 191 | region00_TCIA-0005_slice3.png,region00_TCIA-0005_slice3.png,1.20E+00,-3.09E-02,-3.46E-02,9.14E-01,4.44E-02,1.73E-03 192 | region00_TCIA-0006_slice1.png,region00_TCIA-0006_slice1.png,8.65E-01,-5.74E-03,-4.70E-02,9.76E-01,5.75E-02,1.28E-02 193 | region00_TCIA-0006_slice2.png,region00_TCIA-0006_slice2.png,9.11E-01,4.21E-02,-3.46E-02,1.10E+00,5.82E-02,3.90E-02 194 | region00_TCIA-0006_slice3.png,region00_TCIA-0006_slice3.png,1.02E+00,3.95E-02,4.32E-02,1.19E+00,7.11E-02,-5.63E-02 195 | region00_TCIA-0007_slice1.png,region00_TCIA-0007_slice1.png,9.40E-01,6.17E-03,2.44E-02,1.17E+00,8.73E-02,8.61E-02 196 | region00_TCIA-0007_slice2.png,region00_TCIA-0007_slice2.png,9.84E-01,-5.44E-03,6.10E-02,1.14E+00,8.91E-02,9.34E-02 197 | region00_TCIA-0007_slice3.png,region00_TCIA-0007_slice3.png,1.01E+00,6.78E-03,-2.05E-02,1.07E+00,-8.06E-02,8.69E-02 198 | region00_TCIA-0008_slice1.png,region00_TCIA-0008_slice1.png,9.03E-01,-3.15E-02,-6.90E-03,8.29E-01,9.28E-02,-6.49E-02 199 | region00_TCIA-0008_slice2.png,region00_TCIA-0008_slice2.png,1.01E+00,-3.45E-02,2.95E-02,8.42E-01,1.70E-02,-4.02E-02 200 | region00_TCIA-0008_slice3.png,region00_TCIA-0008_slice3.png,8.52E-01,-1.85E-02,4.00E-02,1.07E+00,-6.21E-02,-7.64E-02 201 | region00_TCIA-0009_slice1.png,region00_TCIA-0009_slice1.png,1.04E+00,5.50E-02,2.52E-02,1.01E+00,2.88E-02,9.89E-02 202 | region00_TCIA-0009_slice2.png,region00_TCIA-0009_slice2.png,1.11E+00,-7.38E-03,1.62E-02,1.07E+00,-9.54E-02,2.42E-02 203 | region00_TCIA-0009_slice3.png,region00_TCIA-0009_slice3.png,8.70E-01,-2.29E-02,-1.52E-02,1.05E+00,8.81E-02,3.01E-02 204 | region00_TCIA-0009_slice4.png,region00_TCIA-0009_slice4.png,9.82E-01,-4.75E-02,7.38E-03,8.42E-01,-4.84E-02,-4.81E-02 205 | region00_TCIA-0010_slice1.png,region00_TCIA-0010_slice1.png,1.04E+00,-4.28E-02,-3.24E-02,1.03E+00,2.14E-03,5.94E-02 206 | region00_TCIA-0010_slice2.png,region00_TCIA-0010_slice2.png,9.08E-01,3.06E-02,3.62E-02,1.06E+00,6.99E-02,3.99E-02 207 | region00_TCIA-0010_slice3.png,region00_TCIA-0010_slice3.png,1.01E+00,-3.83E-03,4.60E-02,1.13E+00,4.97E-02,-6.68E-02 208 | region00_TCIA-0010_slice4.png,region00_TCIA-0010_slice4.png,8.15E-01,-2.97E-02,2.55E-02,8.37E-01,6.00E-02,3.08E-02 209 | region00_TCIA-0011_slice1.png,region00_TCIA-0011_slice1.png,1.10E+00,-5.11E-02,2.36E-02,9.38E-01,7.49E-02,3.57E-02 210 | region00_TCIA-0011_slice2.png,region00_TCIA-0011_slice2.png,1.10E+00,5.87E-02,-2.90E-02,9.60E-01,8.87E-02,9.88E-02 211 | region00_TCIA-0011_slice3.png,region00_TCIA-0011_slice3.png,1.03E+00,4.19E-02,4.48E-02,1.04E+00,9.81E-02,-5.73E-02 212 | region00_TCIA-0012_slice1.png,region00_TCIA-0012_slice1.png,1.15E+00,-3.48E-02,-3.28E-02,1.02E+00,3.92E-02,-4.93E-02 213 | region00_TCIA-0012_slice2.png,region00_TCIA-0012_slice2.png,8.79E-01,6.68E-03,1.46E-03,8.88E-01,4.44E-02,-4.46E-02 214 | region00_TCIA-0013_slice1.png,region00_TCIA-0013_slice1.png,1.15E+00,-5.23E-02,-1.23E-02,1.04E+00,-9.64E-02,-4.73E-02 215 | region00_TCIA-0013_slice2.png,region00_TCIA-0013_slice2.png,8.79E-01,2.57E-02,-1.63E-02,9.32E-01,-5.70E-02,1.29E-02 216 | region00_TCIA-0013_slice3.png,region00_TCIA-0013_slice3.png,1.06E+00,4.96E-02,1.28E-02,9.73E-01,-9.50E-02,-3.73E-02 217 | region00_TCIA-0013_slice4.png,region00_TCIA-0013_slice4.png,1.09E+00,-3.51E-03,-1.86E-03,1.08E+00,6.40E-02,9.24E-02 218 | region00_TCIA-0014_slice1.png,region00_TCIA-0014_slice1.png,1.05E+00,-1.92E-02,-2.11E-02,1.19E+00,9.63E-02,-8.46E-02 219 | region00_TCIA-0014_slice2.png,region00_TCIA-0014_slice2.png,1.01E+00,3.08E-02,2.22E-02,1.08E+00,-1.24E-02,2.43E-02 220 | region00_TCIA-0014_slice3.png,region00_TCIA-0014_slice3.png,1.13E+00,3.22E-02,-4.72E-02,1.06E+00,4.99E-02,-3.70E-02 221 | region00_TCIA-0014_slice4.png,region00_TCIA-0014_slice4.png,1.05E+00,-1.79E-02,-1.07E-02,1.05E+00,7.55E-02,4.90E-02 222 | region00_TCIA-0015_slice1.png,region00_TCIA-0015_slice1.png,1.00E+00,-1.74E-02,4.13E-02,8.68E-01,7.67E-02,-8.00E-02 223 | region00_TCIA-0015_slice2.png,region00_TCIA-0015_slice2.png,1.11E+00,-2.12E-02,-3.38E-02,9.31E-01,-3.96E-03,4.91E-02 224 | region00_TCIA-0015_slice3.png,region00_TCIA-0015_slice3.png,1.15E+00,2.41E-03,2.19E-02,8.69E-01,-4.66E-02,-4.26E-02 225 | region00_TCIA-0015_slice4.png,region00_TCIA-0015_slice4.png,9.54E-01,2.92E-03,1.96E-02,8.46E-01,4.81E-02,8.05E-02 226 | region00_TCIA-0016_slice1.png,region00_TCIA-0016_slice1.png,1.14E+00,1.28E-02,-3.09E-03,9.20E-01,-7.46E-02,1.90E-02 227 | region00_TCIA-0016_slice2.png,region00_TCIA-0016_slice2.png,1.04E+00,-9.66E-03,3.95E-02,1.16E+00,4.37E-02,8.37E-02 228 | region00_TCIA-0016_slice3.png,region00_TCIA-0016_slice3.png,8.06E-01,3.94E-03,2.68E-02,1.00E+00,-1.54E-02,9.07E-02 229 | region00_TCIA-0017_slice1.png,region00_TCIA-0017_slice1.png,1.17E+00,-3.81E-02,2.33E-02,8.54E-01,8.69E-02,-5.86E-02 230 | region00_TCIA-0017_slice2.png,region00_TCIA-0017_slice2.png,1.06E+00,-5.27E-02,1.62E-02,1.19E+00,3.74E-02,3.52E-02 231 | region00_TCIA-0017_slice3.png,region00_TCIA-0017_slice3.png,9.74E-01,4.06E-02,-4.36E-02,1.19E+00,2.62E-02,7.92E-02 232 | region00_TCIA-0018_slice1.png,region00_TCIA-0018_slice1.png,1.12E+00,3.68E-02,8.72E-03,1.16E+00,9.37E-03,-7.28E-02 233 | region00_TCIA-0018_slice2.png,region00_TCIA-0018_slice2.png,8.44E-01,-1.80E-02,2.92E-02,9.26E-01,5.24E-02,5.87E-02 234 | region00_TCIA-0018_slice3.png,region00_TCIA-0018_slice3.png,1.14E+00,6.66E-02,-5.03E-02,8.25E-01,-8.86E-04,-6.25E-02 235 | region00_TCIA-0018_slice4.png,region00_TCIA-0018_slice4.png,8.18E-01,-1.50E-02,4.45E-03,1.13E+00,3.63E-02,-2.67E-02 236 | region00_TCIA-0019_slice1.png,region00_TCIA-0019_slice1.png,1.14E+00,2.37E-02,4.03E-02,1.14E+00,2.68E-02,7.57E-02 237 | region00_TCIA-0019_slice2.png,region00_TCIA-0019_slice2.png,1.10E+00,-3.15E-02,-1.05E-02,9.74E-01,9.19E-02,6.40E-02 238 | region00_TCIA-0019_slice3.png,region00_TCIA-0019_slice3.png,1.12E+00,-5.87E-02,4.25E-02,8.93E-01,-9.32E-02,-7.70E-02 239 | region00_TCIA-0020_slice1.png,region00_TCIA-0020_slice1.png,1.12E+00,3.59E-02,1.33E-02,8.56E-01,-5.83E-02,5.11E-02 240 | region00_TCIA-0020_slice2.png,region00_TCIA-0020_slice2.png,1.14E+00,3.25E-02,-1.17E-02,9.44E-01,8.10E-02,-8.55E-02 241 | region00_TCIA-0020_slice3.png,region00_TCIA-0020_slice3.png,1.16E+00,3.65E-02,3.78E-02,1.02E+00,7.73E-02,-3.39E-02 242 | region00_TCIA-0001_slice1.png,region00_TCIA-0001_slice1.png,9.33E-01,2.26E-02,-3.61E-02,1.04E+00,-6.27E-02,5.42E-02 243 | region00_TCIA-0001_slice2.png,region00_TCIA-0001_slice2.png,1.12E+00,-7.61E-03,-5.79E-03,1.04E+00,1.15E-03,-9.79E-02 244 | region00_TCIA-0002_slice1.png,region00_TCIA-0002_slice1.png,1.04E+00,3.81E-03,4.84E-02,1.11E+00,8.88E-02,-4.81E-02 245 | region00_TCIA-0002_slice2.png,region00_TCIA-0002_slice2.png,1.08E+00,8.07E-04,5.76E-02,8.98E-01,4.57E-02,-1.72E-02 246 | region00_TCIA-0004_slice1.png,region00_TCIA-0004_slice1.png,1.17E+00,-2.79E-03,-3.67E-02,8.59E-01,9.04E-02,-2.96E-02 247 | region00_TCIA-0004_slice2.png,region00_TCIA-0004_slice2.png,1.08E+00,-6.24E-02,4.38E-02,1.17E+00,-1.13E-02,-4.59E-02 248 | region00_TCIA-0004_slice3.png,region00_TCIA-0004_slice3.png,1.14E+00,-3.55E-02,-5.78E-03,1.04E+00,-6.67E-02,2.41E-03 249 | region00_TCIA-0005_slice1.png,region00_TCIA-0005_slice1.png,1.12E+00,-1.97E-02,3.73E-02,8.63E-01,-5.56E-02,-5.16E-02 250 | region00_TCIA-0005_slice2.png,region00_TCIA-0005_slice2.png,9.32E-01,-2.17E-03,3.62E-02,1.14E+00,-8.70E-02,6.46E-02 251 | region00_TCIA-0005_slice3.png,region00_TCIA-0005_slice3.png,8.86E-01,1.37E-02,4.77E-02,1.18E+00,4.82E-02,-9.93E-02 252 | region00_TCIA-0006_slice1.png,region00_TCIA-0006_slice1.png,9.47E-01,-2.82E-02,3.19E-02,8.78E-01,-7.17E-02,1.01E-02 253 | region00_TCIA-0006_slice2.png,region00_TCIA-0006_slice2.png,1.18E+00,4.62E-02,2.39E-02,9.37E-01,9.62E-02,2.30E-02 254 | region00_TCIA-0006_slice3.png,region00_TCIA-0006_slice3.png,8.73E-01,-3.78E-02,-2.40E-02,8.65E-01,-7.09E-02,-7.33E-02 255 | region00_TCIA-0007_slice1.png,region00_TCIA-0007_slice1.png,9.71E-01,4.76E-03,-5.01E-02,8.84E-01,-4.19E-02,8.87E-02 256 | region00_TCIA-0007_slice2.png,region00_TCIA-0007_slice2.png,1.13E+00,2.96E-02,2.29E-04,1.09E+00,-6.62E-02,-1.56E-02 257 | region00_TCIA-0007_slice3.png,region00_TCIA-0007_slice3.png,8.10E-01,4.79E-03,3.48E-02,8.60E-01,-4.97E-02,5.59E-03 258 | region00_TCIA-0008_slice1.png,region00_TCIA-0008_slice1.png,8.66E-01,3.45E-02,-2.92E-02,9.52E-01,-4.45E-02,-1.45E-02 259 | region00_TCIA-0008_slice2.png,region00_TCIA-0008_slice2.png,9.67E-01,-2.00E-02,2.06E-02,9.13E-01,4.23E-03,9.01E-02 260 | region00_TCIA-0008_slice3.png,region00_TCIA-0008_slice3.png,1.06E+00,4.82E-02,7.26E-03,8.73E-01,1.59E-03,6.74E-03 261 | region00_TCIA-0009_slice1.png,region00_TCIA-0009_slice1.png,9.36E-01,-6.36E-03,-3.21E-02,9.36E-01,7.15E-02,3.00E-02 262 | region00_TCIA-0009_slice2.png,region00_TCIA-0009_slice2.png,8.87E-01,-2.63E-02,-5.76E-03,1.09E+00,-6.54E-02,-2.47E-03 263 | region00_TCIA-0009_slice3.png,region00_TCIA-0009_slice3.png,9.47E-01,-2.86E-02,-3.31E-02,8.04E-01,-5.33E-02,-2.72E-02 264 | region00_TCIA-0009_slice4.png,region00_TCIA-0009_slice4.png,9.99E-01,1.97E-02,1.43E-02,8.39E-01,-7.72E-03,6.23E-04 265 | region00_TCIA-0010_slice1.png,region00_TCIA-0010_slice1.png,8.22E-01,-4.68E-04,1.56E-02,9.90E-01,4.34E-02,9.58E-02 266 | region00_TCIA-0010_slice2.png,region00_TCIA-0010_slice2.png,1.05E+00,9.98E-03,-2.32E-02,9.70E-01,9.10E-03,-4.13E-02 267 | region00_TCIA-0010_slice3.png,region00_TCIA-0010_slice3.png,1.03E+00,-5.40E-02,1.05E-02,1.12E+00,7.13E-02,-4.27E-03 268 | region00_TCIA-0010_slice4.png,region00_TCIA-0010_slice4.png,1.09E+00,1.65E-02,-3.54E-03,1.08E+00,6.62E-02,-6.29E-02 269 | region00_TCIA-0011_slice1.png,region00_TCIA-0011_slice1.png,1.14E+00,-6.22E-03,-4.97E-02,8.32E-01,-3.51E-03,4.34E-02 270 | region00_TCIA-0011_slice2.png,region00_TCIA-0011_slice2.png,1.05E+00,-1.38E-02,-2.31E-02,8.18E-01,6.65E-02,-5.85E-02 271 | region00_TCIA-0011_slice3.png,region00_TCIA-0011_slice3.png,8.04E-01,1.96E-02,2.73E-02,1.16E+00,-1.62E-02,2.41E-02 272 | region00_TCIA-0012_slice1.png,region00_TCIA-0012_slice1.png,1.19E+00,-2.14E-02,-1.48E-02,1.04E+00,-4.23E-02,6.52E-02 273 | region00_TCIA-0012_slice2.png,region00_TCIA-0012_slice2.png,9.87E-01,1.23E-02,4.53E-02,1.02E+00,2.45E-02,7.64E-02 274 | region00_TCIA-0013_slice1.png,region00_TCIA-0013_slice1.png,1.17E+00,2.33E-02,-1.72E-02,9.02E-01,-7.65E-02,1.84E-02 275 | region00_TCIA-0013_slice2.png,region00_TCIA-0013_slice2.png,1.00E+00,-3.94E-02,2.42E-02,8.99E-01,-6.28E-02,-4.61E-02 276 | region00_TCIA-0013_slice3.png,region00_TCIA-0013_slice3.png,8.98E-01,4.94E-02,2.04E-02,9.31E-01,4.65E-02,4.99E-02 277 | region00_TCIA-0013_slice4.png,region00_TCIA-0013_slice4.png,9.02E-01,2.20E-03,-5.66E-02,1.18E+00,1.24E-03,9.23E-03 278 | region00_TCIA-0014_slice1.png,region00_TCIA-0014_slice1.png,1.18E+00,-4.57E-02,-7.36E-03,1.03E+00,-4.50E-02,9.48E-03 279 | region00_TCIA-0014_slice2.png,region00_TCIA-0014_slice2.png,1.16E+00,2.31E-02,3.05E-02,9.84E-01,8.79E-02,-2.21E-02 280 | region00_TCIA-0014_slice3.png,region00_TCIA-0014_slice3.png,9.10E-01,-4.29E-02,-1.11E-03,9.35E-01,3.10E-02,4.59E-02 281 | region00_TCIA-0014_slice4.png,region00_TCIA-0014_slice4.png,1.00E+00,-2.97E-03,2.84E-02,8.93E-01,1.15E-02,8.21E-02 282 | region00_TCIA-0015_slice1.png,region00_TCIA-0015_slice1.png,9.13E-01,-1.33E-02,3.34E-02,9.25E-01,-4.47E-02,-7.94E-02 283 | region00_TCIA-0015_slice2.png,region00_TCIA-0015_slice2.png,1.19E+00,-3.25E-02,-2.44E-02,9.65E-01,4.41E-02,8.74E-02 284 | region00_TCIA-0015_slice3.png,region00_TCIA-0015_slice3.png,1.02E+00,2.86E-02,-2.47E-02,1.02E+00,-7.73E-02,-5.54E-02 285 | region00_TCIA-0015_slice4.png,region00_TCIA-0015_slice4.png,1.16E+00,-4.13E-02,-4.09E-03,8.85E-01,9.08E-03,-3.55E-02 286 | region00_TCIA-0016_slice1.png,region00_TCIA-0016_slice1.png,8.09E-01,2.83E-02,-2.80E-02,1.00E+00,5.69E-02,-8.61E-02 287 | region00_TCIA-0016_slice2.png,region00_TCIA-0016_slice2.png,1.15E+00,-3.60E-03,6.05E-03,1.12E+00,8.77E-02,-2.64E-02 288 | region00_TCIA-0016_slice3.png,region00_TCIA-0016_slice3.png,1.09E+00,-2.76E-02,-7.22E-04,1.11E+00,2.76E-02,-5.51E-02 289 | region00_TCIA-0017_slice1.png,region00_TCIA-0017_slice1.png,1.08E+00,-3.88E-02,-4.92E-03,1.06E+00,7.74E-02,3.96E-02 290 | region00_TCIA-0017_slice2.png,region00_TCIA-0017_slice2.png,1.07E+00,4.10E-02,-2.52E-02,9.92E-01,5.88E-02,7.64E-02 291 | region00_TCIA-0017_slice3.png,region00_TCIA-0017_slice3.png,1.03E+00,1.47E-02,2.83E-02,1.14E+00,8.80E-02,-5.51E-02 292 | region00_TCIA-0018_slice1.png,region00_TCIA-0018_slice1.png,1.05E+00,-8.23E-03,3.25E-02,1.08E+00,-2.45E-02,-7.15E-02 293 | region00_TCIA-0018_slice2.png,region00_TCIA-0018_slice2.png,1.18E+00,1.50E-02,2.54E-02,1.09E+00,7.27E-02,-5.26E-02 294 | region00_TCIA-0018_slice3.png,region00_TCIA-0018_slice3.png,1.10E+00,-3.66E-02,2.31E-03,1.11E+00,-8.47E-02,4.84E-02 295 | region00_TCIA-0018_slice4.png,region00_TCIA-0018_slice4.png,1.13E+00,1.12E-02,1.31E-02,1.09E+00,-4.71E-02,2.58E-03 296 | region00_TCIA-0019_slice1.png,region00_TCIA-0019_slice1.png,8.52E-01,1.25E-02,-4.69E-02,9.73E-01,-4.70E-02,7.22E-02 297 | region00_TCIA-0019_slice2.png,region00_TCIA-0019_slice2.png,8.52E-01,3.54E-02,-4.39E-02,8.00E-01,-2.52E-02,2.72E-02 298 | region00_TCIA-0019_slice3.png,region00_TCIA-0019_slice3.png,8.90E-01,1.20E-02,1.02E-02,1.08E+00,2.52E-02,1.26E-02 299 | region00_TCIA-0020_slice1.png,region00_TCIA-0020_slice1.png,1.12E+00,-3.05E-02,3.64E-02,9.33E-01,7.92E-02,-1.86E-02 300 | region00_TCIA-0020_slice2.png,region00_TCIA-0020_slice2.png,9.69E-01,1.91E-02,-1.89E-02,9.17E-01,-6.36E-02,5.99E-02 301 | region00_TCIA-0020_slice3.png,region00_TCIA-0020_slice3.png,8.45E-01,-3.29E-02,1.18E-02,1.05E+00,-4.04E-02,5.53E-02 302 | region00_TCIA-0001_slice1.png,region00_TCIA-0001_slice1.png,8.69E-01,-2.95E-02,-3.28E-02,8.71E-01,-9.66E-03,-9.76E-02 303 | region00_TCIA-0001_slice2.png,region00_TCIA-0001_slice2.png,8.65E-01,3.57E-02,2.85E-02,8.13E-01,7.24E-02,2.73E-02 304 | region00_TCIA-0002_slice1.png,region00_TCIA-0002_slice1.png,9.25E-01,-4.89E-02,-1.41E-02,8.87E-01,5.42E-02,-3.02E-02 305 | region00_TCIA-0002_slice2.png,region00_TCIA-0002_slice2.png,8.63E-01,4.82E-03,4.07E-02,9.35E-01,-5.42E-02,-6.11E-02 306 | region00_TCIA-0004_slice1.png,region00_TCIA-0004_slice1.png,1.06E+00,-4.54E-02,-3.17E-02,1.18E+00,-6.28E-02,5.58E-03 307 | region00_TCIA-0004_slice2.png,region00_TCIA-0004_slice2.png,9.30E-01,2.75E-02,-1.08E-02,9.32E-01,5.13E-02,5.87E-02 308 | region00_TCIA-0004_slice3.png,region00_TCIA-0004_slice3.png,1.19E+00,-1.23E-02,-5.90E-02,1.11E+00,7.20E-02,-3.64E-02 309 | region00_TCIA-0005_slice1.png,region00_TCIA-0005_slice1.png,1.04E+00,-1.44E-03,-4.72E-02,9.41E-01,1.48E-03,-6.39E-02 310 | region00_TCIA-0005_slice2.png,region00_TCIA-0005_slice2.png,8.95E-01,1.17E-02,-5.02E-02,1.18E+00,-6.20E-02,-1.38E-02 311 | region00_TCIA-0005_slice3.png,region00_TCIA-0005_slice3.png,1.13E+00,3.27E-02,-1.67E-02,8.70E-01,6.51E-02,3.60E-02 312 | region00_TCIA-0006_slice1.png,region00_TCIA-0006_slice1.png,9.39E-01,-2.65E-02,4.53E-02,1.13E+00,-1.66E-02,-2.70E-02 313 | region00_TCIA-0006_slice2.png,region00_TCIA-0006_slice2.png,9.88E-01,-1.33E-03,3.47E-02,8.51E-01,4.07E-02,7.45E-02 314 | region00_TCIA-0006_slice3.png,region00_TCIA-0006_slice3.png,8.07E-01,6.49E-03,1.92E-02,9.59E-01,-4.09E-02,-2.78E-02 315 | region00_TCIA-0007_slice1.png,region00_TCIA-0007_slice1.png,1.10E+00,-1.80E-02,8.64E-03,8.84E-01,-6.93E-02,-2.75E-02 316 | region00_TCIA-0007_slice2.png,region00_TCIA-0007_slice2.png,9.89E-01,2.23E-02,-3.19E-02,1.13E+00,9.53E-03,9.37E-02 317 | region00_TCIA-0007_slice3.png,region00_TCIA-0007_slice3.png,8.10E-01,2.87E-02,-3.72E-02,8.96E-01,-6.15E-03,5.03E-02 318 | region00_TCIA-0008_slice1.png,region00_TCIA-0008_slice1.png,1.09E+00,4.47E-02,-5.05E-02,1.15E+00,-8.86E-02,-1.41E-02 319 | region00_TCIA-0008_slice2.png,region00_TCIA-0008_slice2.png,9.00E-01,3.57E-02,1.72E-02,1.13E+00,-8.31E-04,8.24E-02 320 | region00_TCIA-0008_slice3.png,region00_TCIA-0008_slice3.png,8.81E-01,2.22E-02,-3.19E-02,9.54E-01,-6.65E-04,-4.41E-02 321 | region00_TCIA-0009_slice1.png,region00_TCIA-0009_slice1.png,9.03E-01,-1.06E-02,-2.91E-02,1.09E+00,-6.76E-02,-2.71E-02 322 | region00_TCIA-0009_slice2.png,region00_TCIA-0009_slice2.png,1.15E+00,-4.28E-03,3.47E-02,1.08E+00,-9.38E-02,-5.91E-02 323 | region00_TCIA-0009_slice3.png,region00_TCIA-0009_slice3.png,8.46E-01,-1.95E-02,-1.39E-02,1.06E+00,6.97E-02,-9.13E-02 324 | region00_TCIA-0009_slice4.png,region00_TCIA-0009_slice4.png,8.22E-01,4.02E-03,5.85E-02,9.41E-01,-1.77E-02,3.40E-02 325 | region00_TCIA-0010_slice1.png,region00_TCIA-0010_slice1.png,1.20E+00,1.20E-02,-4.85E-02,1.18E+00,-6.12E-02,-4.66E-02 326 | region00_TCIA-0010_slice2.png,region00_TCIA-0010_slice2.png,1.04E+00,-4.30E-02,1.38E-02,8.18E-01,6.78E-02,-3.05E-02 327 | region00_TCIA-0010_slice3.png,region00_TCIA-0010_slice3.png,9.17E-01,-1.30E-02,3.20E-04,1.01E+00,-8.50E-02,-5.92E-03 328 | region00_TCIA-0010_slice4.png,region00_TCIA-0010_slice4.png,1.08E+00,3.15E-02,-2.93E-02,8.20E-01,6.09E-02,-8.70E-02 329 | region00_TCIA-0011_slice1.png,region00_TCIA-0011_slice1.png,1.18E+00,-2.28E-02,-1.16E-02,9.19E-01,-1.17E-02,-7.32E-02 330 | region00_TCIA-0011_slice2.png,region00_TCIA-0011_slice2.png,9.15E-01,-3.29E-02,-2.73E-02,1.07E+00,8.35E-02,6.02E-02 331 | region00_TCIA-0011_slice3.png,region00_TCIA-0011_slice3.png,1.05E+00,-3.47E-02,-3.84E-02,1.14E+00,-1.70E-02,-2.70E-02 332 | region00_TCIA-0012_slice1.png,region00_TCIA-0012_slice1.png,8.49E-01,-3.62E-02,-3.91E-02,8.21E-01,-6.61E-02,-2.29E-02 333 | region00_TCIA-0012_slice2.png,region00_TCIA-0012_slice2.png,1.09E+00,-6.14E-02,4.86E-02,9.60E-01,6.88E-02,1.92E-02 334 | region00_TCIA-0013_slice1.png,region00_TCIA-0013_slice1.png,1.17E+00,5.00E-02,2.09E-02,9.23E-01,-9.36E-02,2.72E-02 335 | region00_TCIA-0013_slice2.png,region00_TCIA-0013_slice2.png,1.07E+00,2.05E-02,-1.67E-02,8.99E-01,4.15E-02,1.45E-02 336 | region00_TCIA-0013_slice3.png,region00_TCIA-0013_slice3.png,9.50E-01,-2.12E-02,4.64E-03,8.65E-01,2.64E-02,-2.94E-02 337 | region00_TCIA-0013_slice4.png,region00_TCIA-0013_slice4.png,1.18E+00,-3.11E-02,-5.49E-03,1.02E+00,6.59E-02,-6.68E-02 338 | region00_TCIA-0014_slice1.png,region00_TCIA-0014_slice1.png,1.07E+00,-4.93E-02,2.81E-02,1.13E+00,9.42E-02,1.67E-02 339 | region00_TCIA-0014_slice2.png,region00_TCIA-0014_slice2.png,1.02E+00,3.53E-02,-2.66E-02,1.04E+00,-1.40E-02,8.39E-02 340 | region00_TCIA-0014_slice3.png,region00_TCIA-0014_slice3.png,1.17E+00,-5.73E-02,4.51E-03,8.54E-01,3.64E-02,-5.10E-02 341 | region00_TCIA-0014_slice4.png,region00_TCIA-0014_slice4.png,8.37E-01,3.31E-04,1.72E-02,1.15E+00,1.71E-02,1.45E-02 342 | region00_TCIA-0015_slice1.png,region00_TCIA-0015_slice1.png,9.44E-01,-3.52E-02,-3.53E-02,8.98E-01,4.71E-02,5.37E-02 343 | region00_TCIA-0015_slice2.png,region00_TCIA-0015_slice2.png,9.29E-01,-4.03E-02,-1.37E-03,1.14E+00,-3.05E-02,-6.20E-02 344 | region00_TCIA-0015_slice3.png,region00_TCIA-0015_slice3.png,1.02E+00,-2.74E-02,-3.74E-02,8.81E-01,5.75E-02,-8.14E-02 345 | region00_TCIA-0015_slice4.png,region00_TCIA-0015_slice4.png,9.89E-01,-6.22E-02,4.71E-02,9.76E-01,-5.64E-02,-6.98E-02 346 | region00_TCIA-0016_slice1.png,region00_TCIA-0016_slice1.png,1.09E+00,-3.03E-02,3.46E-02,8.12E-01,-5.79E-02,3.36E-02 347 | region00_TCIA-0016_slice2.png,region00_TCIA-0016_slice2.png,1.03E+00,1.64E-02,-5.29E-03,1.15E+00,-7.38E-02,9.70E-02 348 | region00_TCIA-0016_slice3.png,region00_TCIA-0016_slice3.png,8.97E-01,3.55E-02,3.77E-02,8.63E-01,-8.14E-02,2.13E-02 349 | region00_TCIA-0017_slice1.png,region00_TCIA-0017_slice1.png,1.07E+00,5.06E-03,-2.22E-02,1.01E+00,9.13E-02,-8.36E-02 350 | region00_TCIA-0017_slice2.png,region00_TCIA-0017_slice2.png,9.12E-01,4.46E-02,-1.15E-02,9.58E-01,-5.26E-03,7.20E-02 351 | region00_TCIA-0017_slice3.png,region00_TCIA-0017_slice3.png,9.18E-01,2.57E-02,3.05E-02,9.86E-01,-8.05E-02,2.76E-02 352 | region00_TCIA-0018_slice1.png,region00_TCIA-0018_slice1.png,8.47E-01,-2.82E-02,3.15E-02,1.01E+00,-3.65E-02,2.18E-02 353 | region00_TCIA-0018_slice2.png,region00_TCIA-0018_slice2.png,8.60E-01,-1.88E-02,1.11E-02,1.02E+00,-8.97E-02,-6.89E-02 354 | region00_TCIA-0018_slice3.png,region00_TCIA-0018_slice3.png,1.19E+00,-2.99E-02,-2.36E-02,9.12E-01,7.61E-02,4.25E-02 355 | region00_TCIA-0018_slice4.png,region00_TCIA-0018_slice4.png,1.03E+00,2.35E-02,5.33E-02,8.38E-01,-3.35E-02,1.73E-02 356 | region00_TCIA-0019_slice1.png,region00_TCIA-0019_slice1.png,9.91E-01,3.51E-02,3.60E-02,1.14E+00,-9.20E-02,5.19E-02 357 | region00_TCIA-0019_slice2.png,region00_TCIA-0019_slice2.png,8.93E-01,3.18E-02,2.84E-02,8.73E-01,-3.40E-02,2.04E-03 358 | region00_TCIA-0019_slice3.png,region00_TCIA-0019_slice3.png,8.28E-01,-1.44E-02,4.23E-02,9.89E-01,-3.26E-02,-4.33E-02 359 | region00_TCIA-0020_slice1.png,region00_TCIA-0020_slice1.png,1.17E+00,-2.04E-02,7.24E-03,1.13E+00,2.01E-02,-9.94E-03 360 | region00_TCIA-0020_slice2.png,region00_TCIA-0020_slice2.png,9.08E-01,-1.27E-02,2.45E-02,8.58E-01,4.58E-02,-3.91E-02 361 | region00_TCIA-0020_slice3.png,region00_TCIA-0020_slice3.png,9.85E-01,3.08E-02,1.61E-02,1.10E+00,8.34E-02,-9.66E-02 362 | region00_TCIA-0001_slice1.png,region00_TCIA-0001_slice1.png,1.16E+00,-1.95E-03,-2.67E-02,1.00E+00,-6.86E-02,-4.90E-02 363 | region00_TCIA-0001_slice2.png,region00_TCIA-0001_slice2.png,8.65E-01,-5.13E-02,1.04E-02,9.57E-01,-3.76E-02,1.02E-02 364 | region00_TCIA-0002_slice1.png,region00_TCIA-0002_slice1.png,9.81E-01,3.21E-02,1.74E-02,1.03E+00,2.90E-02,-7.87E-02 365 | region00_TCIA-0002_slice2.png,region00_TCIA-0002_slice2.png,8.25E-01,4.14E-03,3.58E-02,8.07E-01,-6.08E-02,2.07E-05 366 | region00_TCIA-0004_slice1.png,region00_TCIA-0004_slice1.png,1.20E+00,-4.67E-02,4.07E-02,1.04E+00,-1.65E-02,1.62E-03 367 | region00_TCIA-0004_slice2.png,region00_TCIA-0004_slice2.png,9.38E-01,1.37E-02,-3.29E-02,1.11E+00,-2.73E-02,9.63E-02 368 | region00_TCIA-0004_slice3.png,region00_TCIA-0004_slice3.png,1.10E+00,1.77E-02,1.21E-02,1.17E+00,-7.15E-02,1.63E-02 369 | region00_TCIA-0005_slice1.png,region00_TCIA-0005_slice1.png,9.61E-01,7.22E-03,-6.07E-03,9.08E-01,2.64E-02,-4.77E-02 370 | region00_TCIA-0005_slice2.png,region00_TCIA-0005_slice2.png,9.48E-01,2.65E-02,-2.42E-02,9.83E-01,-1.15E-02,8.98E-02 371 | region00_TCIA-0005_slice3.png,region00_TCIA-0005_slice3.png,1.10E+00,1.20E-03,-4.48E-02,9.04E-01,-9.35E-02,7.89E-04 372 | region00_TCIA-0006_slice1.png,region00_TCIA-0006_slice1.png,9.27E-01,-2.81E-02,4.42E-02,9.82E-01,6.30E-02,-8.28E-02 373 | region00_TCIA-0006_slice2.png,region00_TCIA-0006_slice2.png,9.72E-01,2.50E-02,3.58E-02,1.07E+00,-9.81E-02,5.01E-02 374 | region00_TCIA-0006_slice3.png,region00_TCIA-0006_slice3.png,9.75E-01,2.53E-02,2.81E-02,8.02E-01,6.79E-02,8.40E-02 375 | region00_TCIA-0007_slice1.png,region00_TCIA-0007_slice1.png,1.19E+00,-4.08E-02,1.64E-02,8.09E-01,7.85E-02,8.00E-02 376 | region00_TCIA-0007_slice2.png,region00_TCIA-0007_slice2.png,8.55E-01,4.15E-03,-4.06E-02,1.11E+00,6.79E-02,-8.22E-02 377 | region00_TCIA-0007_slice3.png,region00_TCIA-0007_slice3.png,1.07E+00,2.90E-02,2.61E-02,1.02E+00,-8.83E-02,-8.26E-02 378 | region00_TCIA-0008_slice1.png,region00_TCIA-0008_slice1.png,1.14E+00,-4.91E-02,5.45E-03,1.07E+00,-1.70E-02,7.21E-02 379 | region00_TCIA-0008_slice2.png,region00_TCIA-0008_slice2.png,9.94E-01,-5.34E-02,-1.79E-02,1.09E+00,-2.18E-02,-9.22E-02 380 | region00_TCIA-0008_slice3.png,region00_TCIA-0008_slice3.png,1.12E+00,-3.10E-02,1.80E-02,1.11E+00,7.20E-02,3.51E-03 381 | region00_TCIA-0009_slice1.png,region00_TCIA-0009_slice1.png,1.01E+00,1.51E-02,-1.17E-02,1.11E+00,4.93E-02,2.29E-02 382 | region00_TCIA-0009_slice2.png,region00_TCIA-0009_slice2.png,9.59E-01,-2.98E-02,2.15E-02,1.17E+00,9.94E-02,-5.12E-02 383 | region00_TCIA-0009_slice3.png,region00_TCIA-0009_slice3.png,9.37E-01,-2.82E-02,-2.23E-02,8.02E-01,-5.38E-02,5.93E-02 384 | region00_TCIA-0009_slice4.png,region00_TCIA-0009_slice4.png,1.03E+00,1.51E-02,2.33E-02,1.12E+00,6.38E-02,-3.50E-03 385 | region00_TCIA-0010_slice1.png,region00_TCIA-0010_slice1.png,1.09E+00,4.47E-02,-6.24E-03,9.51E-01,-8.00E-02,5.27E-02 386 | region00_TCIA-0010_slice2.png,region00_TCIA-0010_slice2.png,1.04E+00,-5.96E-02,-3.47E-02,1.13E+00,8.97E-02,6.17E-02 387 | region00_TCIA-0010_slice3.png,region00_TCIA-0010_slice3.png,9.55E-01,-3.10E-02,-2.30E-02,8.48E-01,-8.25E-02,-5.79E-02 388 | region00_TCIA-0010_slice4.png,region00_TCIA-0010_slice4.png,1.10E+00,1.11E-02,-1.31E-02,9.66E-01,-9.05E-02,3.95E-02 389 | region00_TCIA-0011_slice1.png,region00_TCIA-0011_slice1.png,8.64E-01,2.79E-02,4.40E-02,1.04E+00,4.80E-02,4.08E-02 390 | region00_TCIA-0011_slice2.png,region00_TCIA-0011_slice2.png,1.03E+00,-3.15E-02,3.44E-02,8.99E-01,5.60E-02,-3.47E-02 391 | region00_TCIA-0011_slice3.png,region00_TCIA-0011_slice3.png,1.17E+00,2.53E-02,3.39E-02,1.04E+00,8.17E-02,9.08E-02 392 | region00_TCIA-0012_slice1.png,region00_TCIA-0012_slice1.png,1.01E+00,-2.98E-02,-1.69E-02,8.43E-01,-6.90E-02,-1.92E-02 393 | region00_TCIA-0012_slice2.png,region00_TCIA-0012_slice2.png,9.98E-01,1.03E-02,-2.39E-02,1.15E+00,9.65E-02,9.44E-03 394 | region00_TCIA-0013_slice1.png,region00_TCIA-0013_slice1.png,8.22E-01,-3.22E-03,9.49E-03,9.62E-01,-6.57E-02,7.84E-02 395 | region00_TCIA-0013_slice2.png,region00_TCIA-0013_slice2.png,9.58E-01,-5.57E-03,-2.72E-02,1.04E+00,-9.95E-02,-6.28E-02 396 | region00_TCIA-0013_slice3.png,region00_TCIA-0013_slice3.png,1.13E+00,-2.63E-02,-1.60E-02,1.07E+00,7.66E-03,-9.61E-02 397 | region00_TCIA-0013_slice4.png,region00_TCIA-0013_slice4.png,1.06E+00,-4.46E-02,3.48E-02,1.20E+00,-8.70E-02,-3.85E-02 398 | region00_TCIA-0014_slice1.png,region00_TCIA-0014_slice1.png,8.65E-01,-3.53E-02,-1.55E-03,8.61E-01,9.41E-02,-3.89E-02 399 | region00_TCIA-0014_slice2.png,region00_TCIA-0014_slice2.png,1.07E+00,1.25E-02,3.49E-03,1.18E+00,-2.70E-02,-7.66E-02 400 | region00_TCIA-0014_slice3.png,region00_TCIA-0014_slice3.png,1.16E+00,2.26E-02,-2.04E-02,8.70E-01,-6.73E-02,8.94E-02 401 | region00_TCIA-0014_slice4.png,region00_TCIA-0014_slice4.png,8.32E-01,-9.97E-04,3.32E-02,9.55E-01,7.54E-02,9.24E-02 402 | region00_TCIA-0015_slice1.png,region00_TCIA-0015_slice1.png,1.08E+00,1.63E-03,-1.29E-02,9.04E-01,-1.61E-02,-1.31E-02 403 | region00_TCIA-0015_slice2.png,region00_TCIA-0015_slice2.png,1.20E+00,-8.57E-04,5.07E-03,9.16E-01,-3.76E-02,5.38E-02 404 | region00_TCIA-0015_slice3.png,region00_TCIA-0015_slice3.png,1.09E+00,-4.32E-02,2.61E-02,8.75E-01,-8.23E-02,2.27E-02 405 | region00_TCIA-0015_slice4.png,region00_TCIA-0015_slice4.png,9.11E-01,-1.69E-02,-4.61E-02,1.10E+00,-5.56E-02,-7.99E-02 406 | region00_TCIA-0016_slice1.png,region00_TCIA-0016_slice1.png,8.88E-01,2.39E-02,-3.88E-02,9.21E-01,3.85E-03,-4.13E-02 407 | region00_TCIA-0016_slice2.png,region00_TCIA-0016_slice2.png,1.10E+00,2.24E-02,7.34E-03,8.77E-01,-2.84E-02,2.63E-02 408 | region00_TCIA-0016_slice3.png,region00_TCIA-0016_slice3.png,1.03E+00,1.53E-02,-9.50E-03,9.85E-01,-6.09E-02,7.61E-02 409 | region00_TCIA-0017_slice1.png,region00_TCIA-0017_slice1.png,1.08E+00,3.52E-02,1.07E-02,9.74E-01,6.50E-02,-2.89E-02 410 | region00_TCIA-0017_slice2.png,region00_TCIA-0017_slice2.png,8.38E-01,-3.15E-02,2.80E-02,1.18E+00,-7.76E-02,-1.53E-03 411 | region00_TCIA-0017_slice3.png,region00_TCIA-0017_slice3.png,1.16E+00,7.93E-03,4.62E-02,1.17E+00,6.90E-03,9.52E-02 412 | region00_TCIA-0018_slice1.png,region00_TCIA-0018_slice1.png,8.05E-01,1.01E-02,-9.03E-03,1.08E+00,-5.21E-02,-6.89E-02 413 | region00_TCIA-0018_slice2.png,region00_TCIA-0018_slice2.png,8.74E-01,1.65E-02,2.56E-02,1.18E+00,7.02E-02,-3.35E-02 414 | region00_TCIA-0018_slice3.png,region00_TCIA-0018_slice3.png,1.00E+00,1.32E-03,2.93E-02,8.15E-01,7.84E-02,9.89E-02 415 | region00_TCIA-0018_slice4.png,region00_TCIA-0018_slice4.png,9.56E-01,9.33E-03,-4.53E-02,1.17E+00,-2.95E-02,8.76E-03 416 | region00_TCIA-0019_slice1.png,region00_TCIA-0019_slice1.png,1.16E+00,-5.53E-02,2.86E-02,9.84E-01,1.88E-02,-1.83E-02 417 | region00_TCIA-0019_slice2.png,region00_TCIA-0019_slice2.png,9.58E-01,1.91E-02,-2.87E-02,9.38E-01,-4.28E-02,4.84E-02 418 | region00_TCIA-0019_slice3.png,region00_TCIA-0019_slice3.png,9.46E-01,-6.07E-03,-2.00E-02,8.79E-01,9.20E-02,-2.38E-02 419 | region00_TCIA-0020_slice1.png,region00_TCIA-0020_slice1.png,8.64E-01,1.51E-02,2.08E-02,9.99E-01,-2.17E-02,-5.28E-02 420 | region00_TCIA-0020_slice2.png,region00_TCIA-0020_slice2.png,1.16E+00,3.67E-03,-3.57E-02,9.84E-01,-3.39E-02,6.34E-02 421 | region00_TCIA-0020_slice3.png,region00_TCIA-0020_slice3.png,1.04E+00,-3.50E-02,4.47E-02,1.07E+00,-2.19E-02,-1.10E-02 422 | region00_TCIA-0001_slice1.png,region00_TCIA-0001_slice1.png,1.14E+00,-7.07E-02,-9.01E-03,1.18E+00,-5.62E-03,-4.12E-02 423 | region00_TCIA-0001_slice2.png,region00_TCIA-0001_slice2.png,9.29E-01,5.64E-02,-4.80E-02,1.18E+00,-3.79E-02,-2.59E-02 424 | region00_TCIA-0002_slice1.png,region00_TCIA-0002_slice1.png,8.10E-01,-2.28E-02,-2.78E-02,9.84E-01,1.47E-02,6.63E-02 425 | region00_TCIA-0002_slice2.png,region00_TCIA-0002_slice2.png,8.24E-01,1.17E-02,-2.85E-02,1.15E+00,5.62E-02,1.98E-02 426 | region00_TCIA-0004_slice1.png,region00_TCIA-0004_slice1.png,1.11E+00,5.08E-02,3.73E-02,1.10E+00,-3.39E-02,-8.80E-02 427 | region00_TCIA-0004_slice2.png,region00_TCIA-0004_slice2.png,9.36E-01,-4.17E-03,4.65E-02,8.81E-01,-4.21E-02,6.57E-02 428 | region00_TCIA-0004_slice3.png,region00_TCIA-0004_slice3.png,8.19E-01,4.93E-03,3.12E-02,1.09E+00,7.96E-02,3.55E-02 429 | region00_TCIA-0005_slice1.png,region00_TCIA-0005_slice1.png,1.01E+00,3.83E-02,4.13E-02,9.86E-01,-5.08E-02,4.33E-02 430 | region00_TCIA-0005_slice2.png,region00_TCIA-0005_slice2.png,1.12E+00,-1.40E-02,-3.04E-02,1.15E+00,-6.37E-02,6.33E-02 431 | region00_TCIA-0005_slice3.png,region00_TCIA-0005_slice3.png,1.05E+00,-2.82E-02,-3.44E-02,8.54E-01,8.05E-02,-3.06E-02 432 | region00_TCIA-0006_slice1.png,region00_TCIA-0006_slice1.png,1.17E+00,2.39E-02,3.93E-02,9.14E-01,-2.05E-03,3.64E-02 433 | region00_TCIA-0006_slice2.png,region00_TCIA-0006_slice2.png,9.18E-01,-4.88E-02,-8.55E-04,8.76E-01,-1.03E-02,-6.72E-03 434 | region00_TCIA-0006_slice3.png,region00_TCIA-0006_slice3.png,1.16E+00,-1.32E-03,-2.08E-02,9.17E-01,5.40E-02,-1.18E-02 435 | region00_TCIA-0007_slice1.png,region00_TCIA-0007_slice1.png,9.18E-01,-2.39E-02,-2.25E-02,9.33E-01,8.03E-02,4.19E-04 436 | region00_TCIA-0007_slice2.png,region00_TCIA-0007_slice2.png,8.46E-01,-4.11E-02,4.84E-02,8.43E-01,2.63E-03,-7.86E-03 437 | region00_TCIA-0007_slice3.png,region00_TCIA-0007_slice3.png,1.04E+00,3.34E-02,3.12E-02,9.25E-01,-5.32E-02,-9.01E-02 438 | region00_TCIA-0008_slice1.png,region00_TCIA-0008_slice1.png,8.94E-01,-5.71E-04,1.96E-02,8.72E-01,-6.38E-02,-6.02E-02 439 | region00_TCIA-0008_slice2.png,region00_TCIA-0008_slice2.png,1.07E+00,2.94E-02,4.62E-02,9.24E-01,-4.70E-02,1.56E-02 440 | region00_TCIA-0008_slice3.png,region00_TCIA-0008_slice3.png,1.11E+00,4.93E-05,4.38E-02,1.01E+00,-9.98E-02,-7.88E-02 441 | region00_TCIA-0009_slice1.png,region00_TCIA-0009_slice1.png,1.06E+00,-3.79E-02,-1.97E-02,8.84E-01,-4.86E-02,9.68E-02 442 | region00_TCIA-0009_slice2.png,region00_TCIA-0009_slice2.png,9.38E-01,1.06E-02,3.81E-02,8.48E-01,5.74E-02,-8.06E-02 443 | region00_TCIA-0009_slice3.png,region00_TCIA-0009_slice3.png,1.19E+00,-1.87E-03,-4.04E-02,9.96E-01,2.65E-02,-3.63E-02 444 | region00_TCIA-0009_slice4.png,region00_TCIA-0009_slice4.png,9.75E-01,3.40E-02,2.84E-02,8.09E-01,6.99E-02,-8.99E-02 445 | region00_TCIA-0010_slice1.png,region00_TCIA-0010_slice1.png,9.20E-01,1.60E-02,-3.24E-03,8.45E-01,-4.14E-02,6.54E-04 446 | region00_TCIA-0010_slice2.png,region00_TCIA-0010_slice2.png,8.04E-01,1.77E-02,3.51E-02,8.62E-01,-8.97E-02,-7.21E-02 447 | region00_TCIA-0010_slice3.png,region00_TCIA-0010_slice3.png,8.48E-01,-2.07E-02,1.18E-02,8.92E-01,-4.18E-02,1.56E-02 448 | region00_TCIA-0010_slice4.png,region00_TCIA-0010_slice4.png,1.03E+00,-5.39E-02,6.84E-02,1.09E+00,9.33E-02,-6.35E-03 449 | region00_TCIA-0011_slice1.png,region00_TCIA-0011_slice1.png,8.03E-01,-1.52E-02,1.98E-03,1.04E+00,-5.66E-02,-4.66E-02 450 | region00_TCIA-0011_slice2.png,region00_TCIA-0011_slice2.png,1.06E+00,-2.05E-02,2.69E-02,1.11E+00,9.88E-02,6.41E-02 451 | region00_TCIA-0011_slice3.png,region00_TCIA-0011_slice3.png,1.08E+00,4.18E-02,-1.50E-02,8.52E-01,-5.74E-02,-7.81E-02 452 | region00_TCIA-0012_slice1.png,region00_TCIA-0012_slice1.png,9.20E-01,9.51E-03,9.99E-03,9.20E-01,2.51E-02,1.02E-05 453 | region00_TCIA-0012_slice2.png,region00_TCIA-0012_slice2.png,1.01E+00,3.35E-02,-2.86E-02,8.88E-01,-2.96E-03,-2.62E-02 454 | region00_TCIA-0013_slice1.png,region00_TCIA-0013_slice1.png,1.11E+00,-1.43E-02,2.51E-02,8.41E-01,4.31E-02,1.38E-02 455 | region00_TCIA-0013_slice2.png,region00_TCIA-0013_slice2.png,8.35E-01,3.39E-03,9.62E-03,1.08E+00,-1.01E-02,-4.17E-02 456 | region00_TCIA-0013_slice3.png,region00_TCIA-0013_slice3.png,1.00E+00,-3.44E-02,3.58E-02,8.54E-01,2.90E-02,-6.11E-02 457 | region00_TCIA-0013_slice4.png,region00_TCIA-0013_slice4.png,1.20E+00,-4.60E-03,1.79E-02,1.01E+00,-5.73E-02,8.26E-02 458 | region00_TCIA-0014_slice1.png,region00_TCIA-0014_slice1.png,1.17E+00,5.19E-02,1.55E-02,1.07E+00,2.89E-02,9.36E-02 459 | region00_TCIA-0014_slice2.png,region00_TCIA-0014_slice2.png,1.08E+00,2.80E-02,-1.60E-02,8.78E-01,-9.36E-02,-4.55E-03 460 | region00_TCIA-0014_slice3.png,region00_TCIA-0014_slice3.png,9.81E-01,-2.17E-03,2.77E-03,9.88E-01,-4.21E-02,8.66E-02 461 | region00_TCIA-0014_slice4.png,region00_TCIA-0014_slice4.png,1.02E+00,-8.82E-03,3.85E-02,1.02E+00,1.74E-02,9.91E-02 462 | region00_TCIA-0015_slice1.png,region00_TCIA-0015_slice1.png,8.97E-01,-6.06E-04,-4.88E-02,9.62E-01,-9.70E-02,-9.92E-02 463 | region00_TCIA-0015_slice2.png,region00_TCIA-0015_slice2.png,1.02E+00,4.33E-02,-1.55E-02,8.50E-01,8.16E-02,-5.01E-02 464 | region00_TCIA-0015_slice3.png,region00_TCIA-0015_slice3.png,1.17E+00,-2.43E-02,-3.70E-02,1.04E+00,1.44E-02,-1.85E-02 465 | region00_TCIA-0015_slice4.png,region00_TCIA-0015_slice4.png,1.19E+00,4.53E-02,-9.68E-03,9.63E-01,9.48E-02,-2.02E-02 466 | region00_TCIA-0016_slice1.png,region00_TCIA-0016_slice1.png,8.63E-01,2.28E-02,2.11E-02,1.19E+00,-5.55E-02,-1.67E-02 467 | region00_TCIA-0016_slice2.png,region00_TCIA-0016_slice2.png,1.00E+00,-3.45E-02,2.74E-02,8.64E-01,7.12E-02,4.95E-02 468 | region00_TCIA-0016_slice3.png,region00_TCIA-0016_slice3.png,1.18E+00,3.15E-02,-1.09E-02,9.34E-01,-4.52E-02,-2.41E-02 469 | region00_TCIA-0017_slice1.png,region00_TCIA-0017_slice1.png,1.11E+00,-2.81E-02,2.54E-02,9.44E-01,-6.95E-02,-8.26E-02 470 | region00_TCIA-0017_slice2.png,region00_TCIA-0017_slice2.png,9.70E-01,3.25E-02,-5.62E-02,1.04E+00,-4.93E-03,3.01E-02 471 | region00_TCIA-0017_slice3.png,region00_TCIA-0017_slice3.png,1.16E+00,3.22E-02,1.26E-02,8.59E-01,-1.42E-02,3.73E-02 472 | region00_TCIA-0018_slice1.png,region00_TCIA-0018_slice1.png,1.18E+00,2.25E-03,-9.90E-03,9.64E-01,3.94E-02,-2.31E-02 473 | region00_TCIA-0018_slice2.png,region00_TCIA-0018_slice2.png,1.08E+00,-3.81E-02,5.29E-02,1.17E+00,4.18E-02,7.45E-03 474 | region00_TCIA-0018_slice3.png,region00_TCIA-0018_slice3.png,9.01E-01,-3.59E-02,-1.81E-02,1.13E+00,-1.29E-02,-8.23E-03 475 | region00_TCIA-0018_slice4.png,region00_TCIA-0018_slice4.png,1.20E+00,-7.48E-02,2.81E-02,1.08E+00,6.41E-02,5.62E-02 476 | region00_TCIA-0019_slice1.png,region00_TCIA-0019_slice1.png,9.77E-01,3.02E-02,-3.34E-03,1.02E+00,-2.89E-02,8.39E-02 477 | region00_TCIA-0019_slice2.png,region00_TCIA-0019_slice2.png,1.15E+00,-1.98E-03,1.27E-02,1.10E+00,2.25E-02,5.26E-02 478 | region00_TCIA-0019_slice3.png,region00_TCIA-0019_slice3.png,1.06E+00,4.05E-02,-1.76E-02,9.37E-01,3.33E-02,-3.11E-02 479 | region00_TCIA-0020_slice1.png,region00_TCIA-0020_slice1.png,1.01E+00,6.17E-03,1.61E-02,1.18E+00,4.69E-02,7.43E-02 480 | region00_TCIA-0020_slice2.png,region00_TCIA-0020_slice2.png,1.14E+00,3.38E-02,-1.60E-02,8.47E-01,-5.68E-02,1.36E-02 481 | region00_TCIA-0020_slice3.png,region00_TCIA-0020_slice3.png,1.06E+00,5.00E-02,3.44E-02,1.07E+00,-2.24E-02,6.93E-02 482 | region00_TCIA-0001_slice1.png,region00_TCIA-0001_slice1.png,1.12E+00,-1.77E-02,-4.44E-02,8.93E-01,-1.25E-02,-6.16E-02 483 | region00_TCIA-0001_slice2.png,region00_TCIA-0001_slice2.png,9.06E-01,5.24E-03,-3.06E-03,1.19E+00,-7.68E-02,7.19E-03 484 | region00_TCIA-0002_slice1.png,region00_TCIA-0002_slice1.png,9.09E-01,2.83E-02,-3.03E-02,8.73E-01,7.05E-02,9.99E-02 485 | region00_TCIA-0002_slice2.png,region00_TCIA-0002_slice2.png,9.33E-01,-2.17E-02,3.34E-02,8.43E-01,-7.97E-02,-6.65E-02 486 | region00_TCIA-0004_slice1.png,region00_TCIA-0004_slice1.png,8.18E-01,-4.41E-03,-4.62E-02,1.11E+00,-7.78E-02,6.76E-02 487 | region00_TCIA-0004_slice2.png,region00_TCIA-0004_slice2.png,1.18E+00,8.69E-03,-3.16E-02,1.20E+00,-8.01E-03,6.40E-02 488 | region00_TCIA-0004_slice3.png,region00_TCIA-0004_slice3.png,1.11E+00,-6.60E-03,1.62E-02,1.02E+00,3.35E-04,8.21E-02 489 | region00_TCIA-0005_slice1.png,region00_TCIA-0005_slice1.png,8.97E-01,3.28E-02,-1.14E-02,9.70E-01,1.19E-02,3.73E-02 490 | region00_TCIA-0005_slice2.png,region00_TCIA-0005_slice2.png,8.87E-01,-2.39E-02,1.35E-02,1.08E+00,-6.70E-02,6.35E-03 491 | region00_TCIA-0005_slice3.png,region00_TCIA-0005_slice3.png,1.06E+00,3.46E-02,-2.56E-02,9.33E-01,-8.69E-02,4.51E-02 492 | region00_TCIA-0006_slice1.png,region00_TCIA-0006_slice1.png,9.48E-01,-9.69E-03,2.81E-02,9.32E-01,1.98E-02,-5.84E-02 493 | region00_TCIA-0006_slice2.png,region00_TCIA-0006_slice2.png,1.03E+00,2.79E-02,3.07E-02,8.82E-01,3.74E-02,5.92E-02 494 | region00_TCIA-0006_slice3.png,region00_TCIA-0006_slice3.png,1.05E+00,-3.56E-02,-4.15E-02,9.83E-01,-6.79E-02,9.87E-02 495 | region00_TCIA-0007_slice1.png,region00_TCIA-0007_slice1.png,1.12E+00,5.15E-02,1.26E-02,8.88E-01,9.45E-02,1.50E-02 496 | region00_TCIA-0007_slice2.png,region00_TCIA-0007_slice2.png,9.50E-01,8.59E-03,-5.75E-02,1.05E+00,3.58E-02,8.05E-02 497 | region00_TCIA-0007_slice3.png,region00_TCIA-0007_slice3.png,8.42E-01,-4.91E-02,2.09E-02,1.11E+00,6.07E-02,6.90E-02 498 | region00_TCIA-0008_slice1.png,region00_TCIA-0008_slice1.png,1.04E+00,-5.15E-02,3.51E-02,1.14E+00,-3.87E-02,-4.24E-02 499 | region00_TCIA-0008_slice2.png,region00_TCIA-0008_slice2.png,8.51E-01,1.36E-02,-3.88E-03,1.20E+00,5.01E-02,-7.46E-02 500 | region00_TCIA-0008_slice3.png,region00_TCIA-0008_slice3.png,9.96E-01,1.35E-02,3.35E-02,1.14E+00,9.13E-02,-2.41E-02 501 | region00_TCIA-0009_slice1.png,region00_TCIA-0009_slice1.png,8.69E-01,-3.77E-02,-4.43E-02,8.47E-01,-4.18E-02,3.69E-03 502 | region00_TCIA-0009_slice2.png,region00_TCIA-0009_slice2.png,1.02E+00,1.66E-02,-2.38E-02,1.03E+00,4.77E-02,-1.79E-02 503 | region00_TCIA-0009_slice3.png,region00_TCIA-0009_slice3.png,8.77E-01,-1.34E-02,-3.73E-02,1.05E+00,-6.26E-02,-1.46E-02 504 | region00_TCIA-0009_slice4.png,region00_TCIA-0009_slice4.png,1.07E+00,-1.59E-02,-1.47E-03,8.03E-01,-3.05E-02,2.79E-02 505 | region00_TCIA-0010_slice1.png,region00_TCIA-0010_slice1.png,9.29E-01,4.13E-02,1.36E-02,9.78E-01,4.70E-02,-1.92E-02 506 | region00_TCIA-0010_slice2.png,region00_TCIA-0010_slice2.png,8.86E-01,4.69E-02,6.83E-03,8.99E-01,-3.21E-02,6.69E-02 507 | region00_TCIA-0010_slice3.png,region00_TCIA-0010_slice3.png,9.19E-01,-3.59E-02,-1.60E-02,9.23E-01,-7.97E-03,-8.61E-02 508 | region00_TCIA-0010_slice4.png,region00_TCIA-0010_slice4.png,1.14E+00,4.53E-02,3.27E-02,1.08E+00,-2.27E-02,7.86E-02 509 | region00_TCIA-0011_slice1.png,region00_TCIA-0011_slice1.png,1.09E+00,1.46E-02,1.82E-02,8.76E-01,-5.49E-03,-4.91E-02 510 | region00_TCIA-0011_slice2.png,region00_TCIA-0011_slice2.png,8.46E-01,-2.88E-02,-3.33E-02,9.00E-01,6.47E-02,-8.38E-02 511 | region00_TCIA-0011_slice3.png,region00_TCIA-0011_slice3.png,1.08E+00,-4.67E-02,4.20E-02,9.53E-01,-2.62E-03,-2.07E-02 512 | region00_TCIA-0012_slice1.png,region00_TCIA-0012_slice1.png,1.12E+00,1.14E-02,2.93E-02,9.32E-01,5.67E-02,9.32E-02 513 | region00_TCIA-0012_slice2.png,region00_TCIA-0012_slice2.png,1.02E+00,-3.49E-02,4.99E-02,1.00E+00,2.29E-02,-5.80E-02 514 | region00_TCIA-0013_slice1.png,region00_TCIA-0013_slice1.png,8.82E-01,1.53E-02,-3.21E-03,8.80E-01,-6.75E-02,-6.04E-02 515 | region00_TCIA-0013_slice2.png,region00_TCIA-0013_slice2.png,1.12E+00,1.91E-02,3.33E-02,9.90E-01,3.22E-02,6.79E-02 516 | region00_TCIA-0013_slice3.png,region00_TCIA-0013_slice3.png,8.74E-01,5.08E-02,-5.78E-02,1.19E+00,5.86E-02,-4.73E-02 517 | region00_TCIA-0013_slice4.png,region00_TCIA-0013_slice4.png,9.85E-01,2.14E-02,1.50E-02,8.97E-01,-6.83E-03,-3.46E-02 518 | region00_TCIA-0014_slice1.png,region00_TCIA-0014_slice1.png,1.13E+00,-4.85E-02,5.15E-02,8.08E-01,-6.31E-02,2.73E-02 519 | region00_TCIA-0014_slice2.png,region00_TCIA-0014_slice2.png,8.12E-01,2.32E-02,-1.81E-02,9.07E-01,8.39E-02,2.95E-02 520 | region00_TCIA-0014_slice3.png,region00_TCIA-0014_slice3.png,9.92E-01,-5.50E-02,-2.74E-02,8.34E-01,1.28E-02,-9.88E-02 521 | region00_TCIA-0014_slice4.png,region00_TCIA-0014_slice4.png,1.05E+00,2.21E-02,-4.40E-02,8.63E-01,-3.55E-02,-9.21E-02 522 | region00_TCIA-0015_slice1.png,region00_TCIA-0015_slice1.png,9.89E-01,1.75E-02,-3.84E-02,8.88E-01,-3.81E-02,1.92E-02 523 | region00_TCIA-0015_slice2.png,region00_TCIA-0015_slice2.png,1.07E+00,6.40E-03,-1.23E-02,1.16E+00,6.30E-02,-4.78E-03 524 | region00_TCIA-0015_slice3.png,region00_TCIA-0015_slice3.png,1.02E+00,-2.78E-02,5.00E-02,8.19E-01,7.46E-02,7.03E-02 525 | region00_TCIA-0015_slice4.png,region00_TCIA-0015_slice4.png,9.66E-01,8.59E-03,2.29E-02,9.43E-01,3.26E-02,-2.93E-02 526 | region00_TCIA-0016_slice1.png,region00_TCIA-0016_slice1.png,1.02E+00,-5.29E-02,2.70E-03,1.03E+00,-6.79E-02,-3.63E-02 527 | region00_TCIA-0016_slice2.png,region00_TCIA-0016_slice2.png,1.04E+00,1.01E-02,-4.48E-02,9.66E-01,-5.25E-02,4.34E-02 528 | region00_TCIA-0016_slice3.png,region00_TCIA-0016_slice3.png,8.68E-01,-1.40E-02,-2.44E-02,1.02E+00,5.34E-02,1.89E-02 529 | region00_TCIA-0017_slice1.png,region00_TCIA-0017_slice1.png,8.45E-01,-2.96E-02,-1.49E-03,1.19E+00,7.95E-02,4.63E-02 530 | region00_TCIA-0017_slice2.png,region00_TCIA-0017_slice2.png,1.01E+00,4.86E-03,-2.93E-02,1.05E+00,5.65E-02,5.27E-02 531 | region00_TCIA-0017_slice3.png,region00_TCIA-0017_slice3.png,1.00E+00,4.24E-02,3.07E-02,8.76E-01,7.80E-02,-6.54E-02 532 | region00_TCIA-0018_slice1.png,region00_TCIA-0018_slice1.png,1.06E+00,-2.53E-02,-1.36E-03,9.27E-01,1.75E-02,5.92E-02 533 | region00_TCIA-0018_slice2.png,region00_TCIA-0018_slice2.png,1.06E+00,-1.68E-02,1.13E-02,1.17E+00,-6.60E-02,6.80E-02 534 | region00_TCIA-0018_slice3.png,region00_TCIA-0018_slice3.png,9.27E-01,2.48E-02,2.44E-03,9.05E-01,-8.65E-02,4.39E-02 535 | region00_TCIA-0018_slice4.png,region00_TCIA-0018_slice4.png,1.11E+00,-4.26E-02,6.56E-02,9.97E-01,-7.68E-02,-5.67E-02 536 | region00_TCIA-0019_slice1.png,region00_TCIA-0019_slice1.png,8.18E-01,-2.26E-02,-2.65E-02,8.57E-01,-6.33E-02,-6.56E-02 537 | region00_TCIA-0019_slice2.png,region00_TCIA-0019_slice2.png,1.05E+00,-2.66E-02,4.64E-02,1.13E+00,9.02E-02,4.57E-02 538 | region00_TCIA-0019_slice3.png,region00_TCIA-0019_slice3.png,1.20E+00,-2.12E-02,-9.02E-03,8.38E-01,2.53E-02,-6.87E-02 539 | region00_TCIA-0020_slice1.png,region00_TCIA-0020_slice1.png,1.05E+00,3.91E-02,-4.20E-03,1.03E+00,-6.24E-02,5.96E-02 540 | region00_TCIA-0020_slice2.png,region00_TCIA-0020_slice2.png,8.29E-01,-1.39E-02,-9.67E-03,1.12E+00,-8.26E-02,4.95E-02 541 | region00_TCIA-0020_slice3.png,region00_TCIA-0020_slice3.png,1.20E+00,2.89E-02,3.98E-02,8.62E-01,7.54E-02,-5.25E-02 542 | region00_TCIA-0001_slice1.png,region00_TCIA-0001_slice1.png,8.67E-01,-3.67E-02,-2.57E-02,9.95E-01,9.79E-02,-3.25E-02 543 | region00_TCIA-0001_slice2.png,region00_TCIA-0001_slice2.png,9.08E-01,3.35E-02,-1.89E-03,1.08E+00,-2.39E-02,1.48E-02 544 | region00_TCIA-0002_slice1.png,region00_TCIA-0002_slice1.png,9.23E-01,-4.39E-02,7.99E-03,8.10E-01,4.77E-02,9.99E-03 545 | region00_TCIA-0002_slice2.png,region00_TCIA-0002_slice2.png,1.19E+00,6.26E-02,-2.03E-02,1.14E+00,1.72E-02,5.80E-02 546 | region00_TCIA-0004_slice1.png,region00_TCIA-0004_slice1.png,1.07E+00,1.51E-02,-3.01E-02,1.08E+00,-3.76E-02,-2.88E-02 547 | region00_TCIA-0004_slice2.png,region00_TCIA-0004_slice2.png,9.61E-01,-2.48E-03,1.77E-02,1.06E+00,-1.69E-02,3.52E-03 548 | region00_TCIA-0004_slice3.png,region00_TCIA-0004_slice3.png,1.08E+00,1.52E-02,-9.95E-03,1.01E+00,-3.10E-02,-1.06E-02 549 | region00_TCIA-0005_slice1.png,region00_TCIA-0005_slice1.png,1.00E+00,-1.26E-02,-1.58E-02,8.56E-01,6.83E-02,-6.66E-02 550 | region00_TCIA-0005_slice2.png,region00_TCIA-0005_slice2.png,1.18E+00,4.67E-02,1.22E-02,1.01E+00,6.53E-02,2.96E-02 551 | region00_TCIA-0005_slice3.png,region00_TCIA-0005_slice3.png,1.20E+00,3.31E-02,2.45E-02,8.50E-01,5.28E-02,-8.78E-02 552 | region00_TCIA-0006_slice1.png,region00_TCIA-0006_slice1.png,1.09E+00,5.08E-02,-1.35E-02,1.08E+00,6.74E-02,-7.65E-02 553 | region00_TCIA-0006_slice2.png,region00_TCIA-0006_slice2.png,1.00E+00,-4.75E-03,4.55E-02,9.20E-01,1.27E-02,-4.27E-02 554 | region00_TCIA-0006_slice3.png,region00_TCIA-0006_slice3.png,9.76E-01,-8.58E-03,5.94E-02,1.08E+00,-7.15E-02,-5.08E-02 555 | region00_TCIA-0007_slice1.png,region00_TCIA-0007_slice1.png,9.34E-01,-4.96E-02,5.52E-03,8.66E-01,4.01E-02,-8.56E-02 556 | region00_TCIA-0007_slice2.png,region00_TCIA-0007_slice2.png,1.12E+00,2.66E-02,4.71E-03,8.41E-01,-9.48E-02,-2.11E-02 557 | region00_TCIA-0007_slice3.png,region00_TCIA-0007_slice3.png,1.14E+00,2.33E-03,-3.96E-02,1.09E+00,-7.84E-02,-7.47E-02 558 | region00_TCIA-0008_slice1.png,region00_TCIA-0008_slice1.png,9.42E-01,-3.22E-02,-5.84E-03,9.13E-01,-6.13E-02,-4.90E-02 559 | region00_TCIA-0008_slice2.png,region00_TCIA-0008_slice2.png,9.27E-01,1.64E-02,-1.91E-02,8.26E-01,9.47E-02,6.08E-02 560 | region00_TCIA-0008_slice3.png,region00_TCIA-0008_slice3.png,9.89E-01,-1.94E-02,-5.53E-03,9.06E-01,5.68E-02,8.42E-03 561 | region00_TCIA-0009_slice1.png,region00_TCIA-0009_slice1.png,8.91E-01,-2.22E-02,-4.36E-02,1.19E+00,-4.67E-02,-1.45E-02 562 | region00_TCIA-0009_slice2.png,region00_TCIA-0009_slice2.png,1.19E+00,6.13E-02,2.37E-02,1.07E+00,9.46E-02,8.04E-02 563 | region00_TCIA-0009_slice3.png,region00_TCIA-0009_slice3.png,8.40E-01,2.88E-02,1.56E-02,8.72E-01,3.59E-02,-2.64E-02 564 | region00_TCIA-0009_slice4.png,region00_TCIA-0009_slice4.png,8.48E-01,-7.93E-03,4.57E-02,1.14E+00,2.70E-02,-4.04E-02 565 | region00_TCIA-0010_slice1.png,region00_TCIA-0010_slice1.png,9.80E-01,-2.80E-02,-3.32E-02,1.16E+00,5.65E-02,8.93E-02 566 | region00_TCIA-0010_slice2.png,region00_TCIA-0010_slice2.png,1.01E+00,-2.60E-02,4.43E-02,9.63E-01,2.81E-03,-3.14E-02 567 | region00_TCIA-0010_slice3.png,region00_TCIA-0010_slice3.png,9.12E-01,-1.26E-02,-2.15E-02,1.15E+00,-6.92E-02,-3.72E-02 568 | region00_TCIA-0010_slice4.png,region00_TCIA-0010_slice4.png,1.06E+00,3.64E-02,5.47E-02,1.17E+00,-7.71E-03,2.05E-02 569 | region00_TCIA-0011_slice1.png,region00_TCIA-0011_slice1.png,1.12E+00,-1.70E-02,-5.10E-02,1.20E+00,-6.33E-03,-7.05E-02 570 | region00_TCIA-0011_slice2.png,region00_TCIA-0011_slice2.png,8.51E-01,-2.27E-02,-3.97E-02,1.10E+00,-7.04E-02,-6.12E-02 571 | region00_TCIA-0011_slice3.png,region00_TCIA-0011_slice3.png,1.10E+00,-3.50E-02,3.72E-02,9.11E-01,-7.97E-02,5.73E-02 572 | region00_TCIA-0012_slice1.png,region00_TCIA-0012_slice1.png,8.93E-01,-9.08E-03,-5.98E-02,9.61E-01,3.37E-02,2.24E-02 573 | region00_TCIA-0012_slice2.png,region00_TCIA-0012_slice2.png,1.04E+00,-4.16E-02,5.38E-03,8.88E-01,-9.57E-02,3.43E-02 574 | region00_TCIA-0013_slice1.png,region00_TCIA-0013_slice1.png,1.15E+00,-2.92E-03,-4.66E-02,8.95E-01,1.68E-02,4.92E-02 575 | region00_TCIA-0013_slice2.png,region00_TCIA-0013_slice2.png,8.20E-01,3.73E-02,-2.47E-02,9.68E-01,9.51E-03,5.60E-02 576 | region00_TCIA-0013_slice3.png,region00_TCIA-0013_slice3.png,1.06E+00,-2.94E-02,2.93E-02,1.02E+00,8.11E-02,-3.95E-03 577 | region00_TCIA-0013_slice4.png,region00_TCIA-0013_slice4.png,8.69E-01,9.10E-04,2.82E-02,9.67E-01,6.02E-02,8.19E-02 578 | region00_TCIA-0014_slice1.png,region00_TCIA-0014_slice1.png,1.11E+00,-1.48E-02,2.65E-02,9.86E-01,2.29E-02,3.34E-02 579 | region00_TCIA-0014_slice2.png,region00_TCIA-0014_slice2.png,9.61E-01,-3.03E-02,3.74E-02,9.32E-01,-2.02E-02,5.55E-02 580 | region00_TCIA-0014_slice3.png,region00_TCIA-0014_slice3.png,1.09E+00,-6.51E-03,-3.76E-02,9.93E-01,9.47E-02,-2.49E-02 581 | region00_TCIA-0014_slice4.png,region00_TCIA-0014_slice4.png,1.15E+00,-9.56E-03,-2.12E-03,8.84E-01,9.65E-02,-4.45E-02 582 | region00_TCIA-0015_slice1.png,region00_TCIA-0015_slice1.png,1.03E+00,-3.69E-02,-1.02E-02,8.60E-01,7.04E-02,-9.94E-02 583 | region00_TCIA-0015_slice2.png,region00_TCIA-0015_slice2.png,1.06E+00,5.32E-02,3.38E-02,1.07E+00,-8.99E-02,-2.03E-02 584 | region00_TCIA-0015_slice3.png,region00_TCIA-0015_slice3.png,1.20E+00,-5.36E-02,-4.05E-02,1.03E+00,4.11E-02,-6.02E-03 585 | region00_TCIA-0015_slice4.png,region00_TCIA-0015_slice4.png,8.22E-01,1.96E-02,-3.90E-02,1.07E+00,2.28E-03,-2.98E-02 586 | region00_TCIA-0016_slice1.png,region00_TCIA-0016_slice1.png,1.10E+00,3.17E-02,8.28E-03,1.16E+00,-7.24E-02,-2.04E-02 587 | region00_TCIA-0016_slice2.png,region00_TCIA-0016_slice2.png,9.61E-01,-1.88E-02,7.62E-03,9.13E-01,-8.99E-02,-8.22E-03 588 | region00_TCIA-0016_slice3.png,region00_TCIA-0016_slice3.png,9.95E-01,-1.00E-02,2.96E-02,8.69E-01,-9.72E-03,-1.50E-02 589 | region00_TCIA-0017_slice1.png,region00_TCIA-0017_slice1.png,1.06E+00,6.16E-02,-2.79E-02,1.18E+00,-2.93E-02,-4.74E-02 590 | region00_TCIA-0017_slice2.png,region00_TCIA-0017_slice2.png,8.63E-01,-4.54E-02,-4.64E-03,8.56E-01,9.30E-02,-8.09E-02 591 | region00_TCIA-0017_slice3.png,region00_TCIA-0017_slice3.png,1.07E+00,1.73E-04,4.95E-02,1.04E+00,-6.02E-02,-8.18E-02 592 | region00_TCIA-0018_slice1.png,region00_TCIA-0018_slice1.png,9.32E-01,1.38E-02,-2.31E-02,8.50E-01,-6.63E-03,3.90E-02 593 | region00_TCIA-0018_slice2.png,region00_TCIA-0018_slice2.png,9.77E-01,-1.72E-02,-5.68E-02,9.81E-01,2.80E-02,-4.48E-02 594 | region00_TCIA-0018_slice3.png,region00_TCIA-0018_slice3.png,1.07E+00,-2.67E-02,-1.71E-02,1.12E+00,-2.77E-02,-9.32E-02 595 | region00_TCIA-0018_slice4.png,region00_TCIA-0018_slice4.png,1.07E+00,-4.09E-02,3.53E-02,9.42E-01,-4.51E-02,-2.42E-02 596 | region00_TCIA-0019_slice1.png,region00_TCIA-0019_slice1.png,1.15E+00,1.84E-02,-4.96E-02,8.82E-01,9.70E-02,3.70E-02 597 | region00_TCIA-0019_slice2.png,region00_TCIA-0019_slice2.png,8.40E-01,9.95E-03,-3.85E-02,8.91E-01,-4.53E-02,-2.98E-02 598 | region00_TCIA-0019_slice3.png,region00_TCIA-0019_slice3.png,1.12E+00,-1.30E-02,4.09E-02,1.00E+00,8.51E-03,-1.15E-02 599 | region00_TCIA-0020_slice1.png,region00_TCIA-0020_slice1.png,9.08E-01,2.38E-02,3.06E-02,1.11E+00,4.70E-02,1.88E-02 600 | region00_TCIA-0020_slice2.png,region00_TCIA-0020_slice2.png,9.30E-01,1.88E-02,-2.77E-03,8.02E-01,-9.78E-03,-8.20E-02 601 | region00_TCIA-0020_slice3.png,region00_TCIA-0020_slice3.png,1.17E+00,-7.25E-03,-3.56E-02,1.07E+00,8.77E-02,-6.08E-02 602 | -------------------------------------------------------------------------------- /training_data/tps/test.csv: -------------------------------------------------------------------------------- 1 | ImageA,ImageB,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10,t11,t12,t13,t14,t15,t16,t17,t18,t19,t20,t21,t22,t23,t24,t25,t26,t27,t28,t29,t30,t31,t32,t33,t34,t35,t36,t37,t38,t39,t40,t41,t42,t43,t44,t45,t46,t47,t48,t49,t50,t51,t52,t53,t54,t55,t56,t57,t58,t59,t60,t61,t62,t63,t64,t65,t66,t67,t68,t69,t70,t71,t72 2 | hist_TCIA-0021_slice1.png,hist_TCIA-0021_slice1.png,-9.79E-01,-9.06E-01,-9.34E-01,-9.30E-01,-1.04E+00,-9.60E-01,-6.14E-01,-5.39E-01,-6.07E-01,-6.92E-01,-6.98E-01,-6.37E-01,-2.42E-01,-2.19E-01,-1.75E-01,-2.03E-01,-2.14E-01,-1.27E-01,1.80E-01,1.19E-01,2.71E-01,1.24E-01,1.77E-01,2.44E-01,5.30E-01,6.38E-01,6.73E-01,5.99E-01,5.57E-01,6.69E-01,1.00E+00,1.07E+00,9.95E-01,9.67E-01,9.19E-01,9.67E-01,-9.05E-01,-6.91E-01,-2.80E-01,2.54E-01,5.75E-01,1.07E+00,-9.76E-01,-6.42E-01,-1.42E-01,1.37E-01,5.94E-01,1.07E+00,-1.01E+00,-6.52E-01,-1.67E-01,1.74E-01,6.86E-01,9.69E-01,-9.37E-01,-5.67E-01,-1.64E-01,2.09E-01,5.31E-01,1.04E+00,-9.67E-01,-5.59E-01,-2.18E-01,1.33E-01,6.91E-01,9.54E-01,-1.03E+00,-6.66E-01,-2.55E-01,1.46E-01,6.62E-01,9.95E-01 3 | hist_TCIA-0021_slice2.png,hist_TCIA-0021_slice2.png,-1.04E+00,-1.07E+00,-1.04E+00,-9.64E-01,-9.48E-01,-9.64E-01,-6.89E-01,-5.59E-01,-6.55E-01,-6.63E-01,-5.37E-01,-5.59E-01,-2.48E-01,-1.43E-01,-2.78E-01,-1.80E-01,-1.75E-01,-2.01E-01,2.27E-01,1.87E-01,2.61E-01,1.50E-01,2.45E-01,1.70E-01,6.07E-01,5.72E-01,6.40E-01,5.33E-01,6.26E-01,6.27E-01,1.10E+00,9.55E-01,9.27E-01,1.02E+00,9.74E-01,1.07E+00,-1.04E+00,-5.13E-01,-1.64E-01,2.49E-01,5.34E-01,1.08E+00,-1.09E+00,-6.67E-01,-1.93E-01,1.27E-01,5.05E-01,9.89E-01,-9.26E-01,-6.78E-01,-2.41E-01,1.29E-01,6.39E-01,1.08E+00,-1.03E+00,-6.87E-01,-2.38E-01,2.57E-01,6.41E-01,1.07E+00,-1.10E+00,-5.30E-01,-2.72E-01,1.29E-01,5.20E-01,1.05E+00,-1.05E+00,-5.20E-01,-1.85E-01,2.82E-01,6.91E-01,1.02E+00 4 | hist_TCIA-0021_slice3.png,hist_TCIA-0021_slice3.png,-1.05E+00,-9.91E-01,-9.63E-01,-9.80E-01,-9.65E-01,-1.07E+00,-5.42E-01,-5.28E-01,-6.04E-01,-6.76E-01,-5.31E-01,-5.08E-01,-2.17E-01,-1.60E-01,-1.81E-01,-1.12E-01,-2.58E-01,-2.04E-01,2.43E-01,1.50E-01,1.59E-01,1.10E-01,1.23E-01,2.21E-01,6.15E-01,6.36E-01,6.40E-01,6.70E-01,5.72E-01,6.39E-01,9.69E-01,9.40E-01,1.02E+00,1.06E+00,1.01E+00,1.01E+00,-9.25E-01,-6.75E-01,-1.13E-01,1.55E-01,6.35E-01,1.07E+00,-1.07E+00,-5.90E-01,-2.79E-01,2.29E-01,6.24E-01,1.08E+00,-1.09E+00,-5.07E-01,-2.99E-01,2.55E-01,6.04E-01,1.06E+00,-1.00E+00,-6.68E-01,-2.03E-01,1.05E-01,6.69E-01,1.05E+00,-1.05E+00,-6.31E-01,-2.38E-01,2.28E-01,6.18E-01,9.02E-01,-9.43E-01,-6.61E-01,-1.21E-01,2.37E-01,6.61E-01,9.21E-01 5 | hist_TCIA-0021_slice4.png,hist_TCIA-0021_slice4.png,-9.68E-01,-9.19E-01,-1.08E+00,-1.01E+00,-1.03E+00,-9.75E-01,-6.02E-01,-6.99E-01,-6.27E-01,-6.11E-01,-6.23E-01,-5.98E-01,-2.05E-01,-2.96E-01,-2.33E-01,-1.71E-01,-1.54E-01,-1.21E-01,1.53E-01,1.56E-01,1.56E-01,1.94E-01,2.14E-01,1.61E-01,5.34E-01,5.00E-01,5.88E-01,5.33E-01,5.07E-01,6.09E-01,1.00E+00,9.39E-01,9.12E-01,9.74E-01,1.04E+00,9.38E-01,-9.10E-01,-5.56E-01,-1.01E-01,2.45E-01,5.17E-01,1.10E+00,-1.07E+00,-6.03E-01,-2.32E-01,1.12E-01,5.36E-01,9.91E-01,-1.05E+00,-5.47E-01,-1.79E-01,1.20E-01,6.88E-01,9.89E-01,-1.06E+00,-5.98E-01,-2.25E-01,1.23E-01,5.10E-01,9.73E-01,-1.04E+00,-5.77E-01,-2.64E-01,2.84E-01,5.89E-01,9.14E-01,-1.09E+00,-5.64E-01,-1.04E-01,1.85E-01,5.57E-01,1.09E+00 6 | hist_TCIA-0021_slice5.png,hist_TCIA-0021_slice5.png,-1.01E+00,-1.06E+00,-1.05E+00,-9.42E-01,-9.77E-01,-9.48E-01,-6.54E-01,-6.74E-01,-6.62E-01,-6.94E-01,-5.65E-01,-5.20E-01,-2.35E-01,-2.84E-01,-1.15E-01,-2.02E-01,-2.60E-01,-1.70E-01,1.31E-01,2.31E-01,2.58E-01,2.07E-01,2.17E-01,2.96E-01,5.71E-01,6.60E-01,5.24E-01,6.22E-01,6.97E-01,5.21E-01,9.64E-01,1.08E+00,9.28E-01,9.39E-01,1.00E+00,1.09E+00,-9.60E-01,-6.00E-01,-2.71E-01,1.14E-01,6.49E-01,1.09E+00,-9.05E-01,-5.60E-01,-1.56E-01,2.26E-01,6.29E-01,1.04E+00,-9.86E-01,-5.67E-01,-2.00E-01,2.39E-01,5.14E-01,1.04E+00,-1.00E+00,-5.28E-01,-2.99E-01,2.65E-01,5.62E-01,9.14E-01,-9.97E-01,-6.25E-01,-2.37E-01,2.67E-01,6.32E-01,1.05E+00,-1.10E+00,-5.84E-01,-2.38E-01,2.97E-01,5.54E-01,9.80E-01 7 | hist_TCIA-0021_slice6.png,hist_TCIA-0021_slice6.png,-1.08E+00,-1.09E+00,-1.03E+00,-1.08E+00,-9.40E-01,-1.00E+00,-5.00E-01,-6.18E-01,-5.11E-01,-5.90E-01,-6.23E-01,-5.86E-01,-1.21E-01,-2.65E-01,-2.10E-01,-2.83E-01,-2.33E-01,-1.01E-01,2.36E-01,2.74E-01,2.20E-01,2.14E-01,1.92E-01,2.05E-01,5.91E-01,5.52E-01,5.56E-01,5.72E-01,5.70E-01,5.08E-01,1.05E+00,9.97E-01,9.22E-01,1.03E+00,1.00E+00,1.01E+00,-1.02E+00,-6.12E-01,-2.75E-01,2.32E-01,5.73E-01,9.58E-01,-1.01E+00,-6.76E-01,-1.44E-01,2.28E-01,6.48E-01,9.84E-01,-9.17E-01,-6.21E-01,-2.64E-01,1.14E-01,6.81E-01,9.54E-01,-9.04E-01,-5.87E-01,-2.48E-01,1.06E-01,5.29E-01,1.02E+00,-1.06E+00,-5.53E-01,-1.40E-01,2.92E-01,5.53E-01,1.02E+00,-9.91E-01,-6.23E-01,-2.82E-01,1.32E-01,5.20E-01,9.16E-01 8 | hist_TCIA-0022_slice1.png,hist_TCIA-0022_slice1.png,-1.01E+00,-9.31E-01,-9.86E-01,-1.02E+00,-1.09E+00,-9.69E-01,-5.14E-01,-6.50E-01,-6.07E-01,-5.85E-01,-6.48E-01,-6.03E-01,-2.22E-01,-2.94E-01,-2.98E-01,-2.42E-01,-2.69E-01,-2.07E-01,2.93E-01,2.81E-01,2.80E-01,1.47E-01,2.05E-01,1.62E-01,5.23E-01,5.90E-01,6.19E-01,5.29E-01,6.47E-01,5.95E-01,9.00E-01,1.04E+00,1.06E+00,9.11E-01,9.98E-01,9.23E-01,-1.03E+00,-6.13E-01,-2.88E-01,1.63E-01,5.62E-01,9.01E-01,-9.91E-01,-6.72E-01,-1.07E-01,1.39E-01,5.97E-01,9.22E-01,-9.75E-01,-6.50E-01,-2.37E-01,1.31E-01,6.81E-01,1.05E+00,-9.39E-01,-6.17E-01,-1.64E-01,1.79E-01,6.47E-01,1.07E+00,-1.08E+00,-6.85E-01,-1.87E-01,2.09E-01,5.49E-01,1.07E+00,-1.01E+00,-6.55E-01,-1.67E-01,2.42E-01,6.01E-01,1.00E+00 9 | hist_TCIA-0022_slice2.png,hist_TCIA-0022_slice2.png,-9.84E-01,-1.09E+00,-1.08E+00,-9.24E-01,-9.19E-01,-1.00E+00,-5.55E-01,-5.66E-01,-6.91E-01,-5.26E-01,-5.77E-01,-5.63E-01,-1.38E-01,-1.37E-01,-2.61E-01,-2.81E-01,-2.18E-01,-2.35E-01,2.79E-01,1.77E-01,2.23E-01,2.31E-01,2.27E-01,2.79E-01,6.54E-01,6.67E-01,6.16E-01,6.48E-01,6.00E-01,6.06E-01,9.66E-01,9.76E-01,9.78E-01,1.09E+00,9.53E-01,1.09E+00,-1.09E+00,-6.22E-01,-2.02E-01,1.64E-01,5.27E-01,9.85E-01,-1.02E+00,-6.94E-01,-1.54E-01,2.21E-01,6.25E-01,1.00E+00,-1.03E+00,-6.89E-01,-2.04E-01,1.25E-01,6.38E-01,9.89E-01,-1.09E+00,-5.49E-01,-1.13E-01,1.20E-01,5.26E-01,1.09E+00,-9.90E-01,-5.11E-01,-2.06E-01,2.04E-01,5.85E-01,9.38E-01,-1.00E+00,-6.63E-01,-1.31E-01,2.68E-01,6.08E-01,1.03E+00 10 | hist_TCIA-0022_slice3.png,hist_TCIA-0022_slice3.png,-1.04E+00,-1.09E+00,-1.01E+00,-9.56E-01,-9.91E-01,-9.60E-01,-5.87E-01,-6.29E-01,-6.19E-01,-5.12E-01,-6.90E-01,-6.37E-01,-1.37E-01,-2.15E-01,-1.71E-01,-1.55E-01,-2.26E-01,-1.01E-01,2.02E-01,2.06E-01,1.32E-01,1.57E-01,1.55E-01,1.21E-01,5.85E-01,6.83E-01,5.05E-01,5.14E-01,6.36E-01,6.36E-01,1.01E+00,1.08E+00,1.08E+00,9.75E-01,9.30E-01,1.07E+00,-1.06E+00,-6.34E-01,-2.17E-01,1.63E-01,5.85E-01,9.73E-01,-9.67E-01,-5.16E-01,-2.64E-01,1.07E-01,6.20E-01,9.63E-01,-1.09E+00,-5.26E-01,-1.09E-01,2.71E-01,5.49E-01,9.74E-01,-9.86E-01,-6.92E-01,-1.29E-01,2.21E-01,6.83E-01,9.60E-01,-9.39E-01,-5.72E-01,-2.30E-01,1.45E-01,5.79E-01,9.38E-01,-1.07E+00,-5.08E-01,-1.02E-01,1.04E-01,6.93E-01,9.70E-01 11 | hist_TCIA-0022_slice4.png,hist_TCIA-0022_slice4.png,-1.10E+00,-9.18E-01,-9.57E-01,-9.72E-01,-9.91E-01,-9.87E-01,-6.42E-01,-6.04E-01,-5.36E-01,-5.18E-01,-6.59E-01,-5.45E-01,-2.21E-01,-1.78E-01,-1.87E-01,-1.21E-01,-2.36E-01,-1.26E-01,1.23E-01,1.14E-01,2.59E-01,2.87E-01,1.04E-01,2.13E-01,5.86E-01,5.56E-01,5.84E-01,5.04E-01,5.97E-01,6.26E-01,1.06E+00,1.02E+00,9.95E-01,9.93E-01,9.02E-01,9.83E-01,-1.09E+00,-5.69E-01,-2.45E-01,2.16E-01,5.38E-01,9.32E-01,-1.07E+00,-6.97E-01,-2.19E-01,2.78E-01,5.94E-01,9.88E-01,-9.51E-01,-6.64E-01,-1.63E-01,2.93E-01,5.35E-01,1.01E+00,-9.24E-01,-6.04E-01,-2.49E-01,1.31E-01,5.48E-01,9.72E-01,-1.01E+00,-6.20E-01,-1.45E-01,2.19E-01,5.90E-01,9.84E-01,-9.03E-01,-5.13E-01,-1.52E-01,2.77E-01,6.68E-01,1.08E+00 12 | hist_TCIA-0023_slice1.png,hist_TCIA-0023_slice1.png,-1.09E+00,-1.02E+00,-1.04E+00,-9.23E-01,-1.07E+00,-1.02E+00,-5.48E-01,-6.73E-01,-6.76E-01,-5.78E-01,-6.11E-01,-5.70E-01,-1.16E-01,-1.36E-01,-2.04E-01,-1.87E-01,-1.47E-01,-1.03E-01,1.50E-01,2.40E-01,1.19E-01,1.39E-01,1.02E-01,2.73E-01,5.27E-01,5.99E-01,6.77E-01,6.40E-01,5.64E-01,6.81E-01,9.57E-01,9.07E-01,1.05E+00,9.54E-01,1.09E+00,9.06E-01,-1.08E+00,-5.57E-01,-1.82E-01,2.54E-01,5.09E-01,9.73E-01,-9.83E-01,-5.43E-01,-2.33E-01,1.84E-01,5.75E-01,1.00E+00,-9.16E-01,-6.09E-01,-1.14E-01,2.36E-01,6.95E-01,9.31E-01,-9.33E-01,-6.75E-01,-2.72E-01,1.80E-01,5.44E-01,9.69E-01,-9.33E-01,-6.80E-01,-2.01E-01,1.79E-01,5.88E-01,9.40E-01,-9.33E-01,-5.59E-01,-2.04E-01,2.71E-01,5.89E-01,9.82E-01 13 | hist_TCIA-0023_slice2.png,hist_TCIA-0023_slice2.png,-9.81E-01,-9.91E-01,-1.08E+00,-1.03E+00,-9.69E-01,-9.68E-01,-6.17E-01,-6.77E-01,-6.58E-01,-6.05E-01,-6.46E-01,-6.20E-01,-2.64E-01,-1.49E-01,-2.64E-01,-2.43E-01,-2.46E-01,-1.18E-01,2.93E-01,2.59E-01,1.46E-01,1.46E-01,1.68E-01,1.24E-01,6.80E-01,6.75E-01,5.59E-01,6.81E-01,6.63E-01,5.29E-01,1.02E+00,9.80E-01,1.02E+00,9.84E-01,9.37E-01,9.57E-01,-9.58E-01,-5.08E-01,-1.48E-01,2.79E-01,5.16E-01,9.95E-01,-1.09E+00,-5.08E-01,-2.43E-01,1.82E-01,6.24E-01,1.05E+00,-1.00E+00,-5.35E-01,-2.20E-01,2.36E-01,6.81E-01,1.05E+00,-1.05E+00,-6.60E-01,-1.92E-01,2.12E-01,6.13E-01,9.96E-01,-9.99E-01,-5.49E-01,-2.43E-01,1.80E-01,5.71E-01,9.59E-01,-1.05E+00,-6.50E-01,-2.95E-01,1.71E-01,5.13E-01,9.31E-01 14 | hist_TCIA-0023_slice3.png,hist_TCIA-0023_slice3.png,-9.87E-01,-9.69E-01,-1.06E+00,-1.06E+00,-1.10E+00,-9.77E-01,-5.79E-01,-5.64E-01,-5.47E-01,-5.17E-01,-6.91E-01,-5.56E-01,-1.27E-01,-2.76E-01,-2.03E-01,-1.57E-01,-1.76E-01,-1.78E-01,2.61E-01,1.62E-01,2.88E-01,2.43E-01,2.89E-01,1.39E-01,6.68E-01,6.90E-01,5.74E-01,5.29E-01,5.34E-01,6.06E-01,1.10E+00,1.08E+00,9.56E-01,9.43E-01,1.03E+00,1.08E+00,-1.06E+00,-6.86E-01,-2.60E-01,1.74E-01,6.64E-01,1.05E+00,-9.52E-01,-5.18E-01,-2.10E-01,1.22E-01,5.69E-01,1.07E+00,-1.04E+00,-5.56E-01,-1.88E-01,1.50E-01,6.62E-01,1.07E+00,-9.53E-01,-6.63E-01,-1.57E-01,1.25E-01,6.80E-01,9.25E-01,-9.85E-01,-6.85E-01,-1.60E-01,1.82E-01,5.02E-01,9.71E-01,-1.02E+00,-6.20E-01,-1.81E-01,2.11E-01,5.14E-01,9.42E-01 15 | hist_TCIA-0023_slice4.png,hist_TCIA-0023_slice4.png,-1.07E+00,-9.42E-01,-1.07E+00,-1.06E+00,-1.06E+00,-1.08E+00,-5.76E-01,-5.89E-01,-6.44E-01,-6.92E-01,-6.45E-01,-6.18E-01,-2.60E-01,-1.59E-01,-2.43E-01,-2.18E-01,-1.84E-01,-2.90E-01,1.37E-01,2.74E-01,2.72E-01,1.98E-01,2.80E-01,1.92E-01,6.29E-01,6.12E-01,5.61E-01,5.83E-01,5.04E-01,5.48E-01,9.68E-01,9.68E-01,1.10E+00,9.43E-01,9.28E-01,9.18E-01,-1.10E+00,-6.07E-01,-2.90E-01,1.51E-01,5.98E-01,1.01E+00,-1.04E+00,-6.69E-01,-1.49E-01,2.25E-01,6.19E-01,9.18E-01,-1.02E+00,-6.20E-01,-1.34E-01,2.66E-01,5.05E-01,9.10E-01,-9.84E-01,-6.75E-01,-2.66E-01,2.03E-01,5.77E-01,9.08E-01,-9.67E-01,-6.02E-01,-2.47E-01,2.15E-01,6.72E-01,1.08E+00,-1.07E+00,-5.58E-01,-1.57E-01,1.99E-01,6.98E-01,9.86E-01 16 | hist_TCIA-0024_slice1.png,hist_TCIA-0024_slice1.png,-1.00E+00,-1.09E+00,-9.50E-01,-1.10E+00,-9.21E-01,-9.75E-01,-5.77E-01,-5.86E-01,-6.51E-01,-5.19E-01,-6.29E-01,-5.22E-01,-1.59E-01,-1.91E-01,-2.67E-01,-1.03E-01,-1.64E-01,-1.22E-01,2.79E-01,1.03E-01,1.41E-01,2.81E-01,2.94E-01,2.46E-01,5.90E-01,5.36E-01,6.05E-01,5.77E-01,6.14E-01,6.88E-01,9.75E-01,9.36E-01,1.06E+00,9.24E-01,1.06E+00,9.29E-01,-1.09E+00,-6.89E-01,-3.00E-01,2.01E-01,6.47E-01,9.49E-01,-9.10E-01,-5.80E-01,-1.23E-01,1.41E-01,5.76E-01,1.06E+00,-1.06E+00,-6.05E-01,-2.14E-01,1.65E-01,5.77E-01,9.15E-01,-1.03E+00,-5.89E-01,-2.74E-01,2.01E-01,5.51E-01,1.00E+00,-1.07E+00,-6.92E-01,-1.88E-01,2.60E-01,6.75E-01,1.06E+00,-1.10E+00,-6.67E-01,-1.38E-01,2.29E-01,5.73E-01,1.02E+00 17 | hist_TCIA-0024_slice2.png,hist_TCIA-0024_slice2.png,-9.24E-01,-9.87E-01,-1.02E+00,-1.10E+00,-9.42E-01,-9.20E-01,-5.10E-01,-6.47E-01,-5.64E-01,-5.78E-01,-5.37E-01,-6.06E-01,-2.23E-01,-2.11E-01,-2.44E-01,-1.97E-01,-1.83E-01,-1.28E-01,1.91E-01,2.59E-01,1.94E-01,1.41E-01,2.69E-01,1.43E-01,6.23E-01,5.82E-01,5.45E-01,6.39E-01,6.05E-01,5.85E-01,1.06E+00,9.51E-01,9.41E-01,1.07E+00,9.50E-01,1.02E+00,-9.83E-01,-6.21E-01,-2.46E-01,1.23E-01,6.71E-01,9.59E-01,-1.02E+00,-6.00E-01,-1.85E-01,1.63E-01,5.29E-01,1.02E+00,-1.03E+00,-6.53E-01,-2.93E-01,1.86E-01,6.36E-01,1.10E+00,-1.04E+00,-5.72E-01,-2.33E-01,2.35E-01,5.08E-01,1.06E+00,-9.16E-01,-6.42E-01,-2.25E-01,2.61E-01,5.01E-01,9.69E-01,-1.09E+00,-6.31E-01,-1.68E-01,2.45E-01,5.70E-01,9.36E-01 18 | hist_TCIA-0024_slice3.png,hist_TCIA-0024_slice3.png,-9.50E-01,-1.06E+00,-1.10E+00,-9.88E-01,-9.79E-01,-1.09E+00,-5.31E-01,-6.71E-01,-6.04E-01,-5.17E-01,-5.58E-01,-6.66E-01,-1.18E-01,-1.64E-01,-2.21E-01,-2.30E-01,-2.49E-01,-2.63E-01,2.11E-01,2.31E-01,1.64E-01,1.81E-01,2.19E-01,1.60E-01,5.70E-01,6.87E-01,6.74E-01,5.08E-01,5.54E-01,6.02E-01,9.70E-01,9.75E-01,9.53E-01,1.07E+00,9.90E-01,1.05E+00,-1.04E+00,-5.11E-01,-1.32E-01,1.43E-01,5.60E-01,1.02E+00,-9.23E-01,-5.68E-01,-2.34E-01,2.71E-01,5.33E-01,9.74E-01,-9.91E-01,-6.95E-01,-1.21E-01,2.23E-01,5.37E-01,9.57E-01,-1.10E+00,-6.46E-01,-2.17E-01,2.12E-01,6.44E-01,1.05E+00,-9.04E-01,-5.96E-01,-1.40E-01,2.07E-01,6.64E-01,1.06E+00,-1.02E+00,-5.64E-01,-1.16E-01,1.19E-01,5.25E-01,9.30E-01 19 | hist_TCIA-0025_slice1.png,hist_TCIA-0025_slice1.png,-1.02E+00,-1.02E+00,-1.05E+00,-1.07E+00,-1.09E+00,-9.44E-01,-5.77E-01,-5.17E-01,-5.47E-01,-5.01E-01,-6.05E-01,-5.92E-01,-2.78E-01,-2.85E-01,-1.73E-01,-1.47E-01,-1.99E-01,-2.70E-01,1.63E-01,1.29E-01,2.91E-01,1.11E-01,1.22E-01,1.88E-01,5.94E-01,5.94E-01,6.59E-01,5.54E-01,5.88E-01,6.50E-01,9.86E-01,9.86E-01,1.09E+00,9.10E-01,1.08E+00,9.09E-01,-9.54E-01,-5.44E-01,-2.34E-01,2.75E-01,5.22E-01,9.99E-01,-9.36E-01,-5.58E-01,-1.58E-01,1.96E-01,6.49E-01,9.09E-01,-1.04E+00,-5.74E-01,-1.51E-01,2.36E-01,5.99E-01,1.07E+00,-9.91E-01,-5.09E-01,-2.86E-01,2.20E-01,6.24E-01,9.48E-01,-1.06E+00,-5.94E-01,-2.53E-01,2.30E-01,6.89E-01,9.63E-01,-9.30E-01,-6.28E-01,-1.77E-01,2.69E-01,5.97E-01,9.65E-01 20 | hist_TCIA-0025_slice2.png,hist_TCIA-0025_slice2.png,-1.01E+00,-9.78E-01,-1.08E+00,-9.96E-01,-1.05E+00,-9.76E-01,-6.65E-01,-5.17E-01,-6.90E-01,-5.99E-01,-5.02E-01,-5.27E-01,-2.46E-01,-1.20E-01,-1.98E-01,-2.65E-01,-1.41E-01,-2.55E-01,1.16E-01,1.16E-01,2.79E-01,2.73E-01,1.80E-01,2.15E-01,6.65E-01,6.89E-01,5.47E-01,5.15E-01,5.59E-01,6.24E-01,9.14E-01,9.32E-01,1.01E+00,9.61E-01,9.45E-01,9.42E-01,-1.02E+00,-5.67E-01,-2.02E-01,1.16E-01,5.31E-01,9.90E-01,-1.01E+00,-6.33E-01,-2.08E-01,1.93E-01,5.84E-01,9.01E-01,-9.67E-01,-5.72E-01,-2.08E-01,2.78E-01,5.96E-01,9.52E-01,-9.99E-01,-6.81E-01,-2.08E-01,1.35E-01,5.47E-01,1.06E+00,-1.01E+00,-5.29E-01,-2.71E-01,2.30E-01,5.19E-01,1.08E+00,-9.38E-01,-5.70E-01,-1.83E-01,1.14E-01,6.71E-01,1.04E+00 21 | hist_TCIA-0025_slice3.png,hist_TCIA-0025_slice3.png,-9.22E-01,-1.04E+00,-9.93E-01,-9.34E-01,-9.78E-01,-1.07E+00,-5.33E-01,-6.00E-01,-6.15E-01,-6.42E-01,-6.43E-01,-6.87E-01,-2.27E-01,-2.96E-01,-1.85E-01,-1.47E-01,-1.38E-01,-2.00E-01,1.24E-01,1.78E-01,1.95E-01,1.76E-01,1.82E-01,2.43E-01,5.92E-01,6.84E-01,5.46E-01,5.83E-01,6.35E-01,5.73E-01,9.30E-01,1.00E+00,1.00E+00,9.30E-01,1.06E+00,9.91E-01,-9.06E-01,-6.63E-01,-2.29E-01,1.55E-01,6.01E-01,9.06E-01,-1.09E+00,-5.33E-01,-1.07E-01,1.07E-01,5.21E-01,1.04E+00,-1.01E+00,-6.49E-01,-2.86E-01,1.61E-01,5.14E-01,1.02E+00,-1.05E+00,-6.63E-01,-1.21E-01,2.37E-01,6.54E-01,9.46E-01,-1.01E+00,-6.17E-01,-2.62E-01,1.81E-01,6.25E-01,9.66E-01,-1.07E+00,-5.25E-01,-2.33E-01,2.85E-01,6.57E-01,1.09E+00 22 | hist_TCIA-0026_slice1.png,hist_TCIA-0026_slice1.png,-1.06E+00,-1.09E+00,-9.74E-01,-9.40E-01,-9.75E-01,-1.07E+00,-6.14E-01,-5.32E-01,-5.98E-01,-6.05E-01,-5.42E-01,-6.98E-01,-1.87E-01,-1.44E-01,-2.57E-01,-1.66E-01,-2.25E-01,-2.71E-01,1.94E-01,2.95E-01,1.15E-01,2.92E-01,1.91E-01,1.91E-01,5.18E-01,6.42E-01,6.77E-01,5.99E-01,5.25E-01,6.81E-01,1.06E+00,1.08E+00,9.59E-01,1.01E+00,1.05E+00,9.63E-01,-9.97E-01,-6.68E-01,-2.36E-01,2.59E-01,6.10E-01,9.57E-01,-9.75E-01,-6.91E-01,-1.29E-01,1.27E-01,5.85E-01,9.65E-01,-1.09E+00,-5.62E-01,-2.57E-01,2.69E-01,5.47E-01,9.86E-01,-1.00E+00,-6.29E-01,-1.72E-01,2.41E-01,5.23E-01,1.02E+00,-1.07E+00,-6.12E-01,-1.07E-01,2.08E-01,5.38E-01,9.14E-01,-9.10E-01,-5.35E-01,-1.48E-01,2.39E-01,6.45E-01,9.47E-01 23 | hist_TCIA-0026_slice2.png,hist_TCIA-0026_slice2.png,-1.04E+00,-1.04E+00,-9.66E-01,-1.03E+00,-1.09E+00,-9.64E-01,-5.18E-01,-6.57E-01,-6.23E-01,-5.76E-01,-5.53E-01,-6.87E-01,-2.12E-01,-2.70E-01,-2.58E-01,-2.50E-01,-1.98E-01,-2.67E-01,1.02E-01,2.05E-01,1.23E-01,1.19E-01,2.59E-01,2.75E-01,6.96E-01,6.15E-01,6.18E-01,6.21E-01,6.38E-01,6.50E-01,1.02E+00,1.01E+00,1.10E+00,1.09E+00,1.00E+00,1.01E+00,-9.29E-01,-6.21E-01,-1.61E-01,2.49E-01,5.78E-01,9.10E-01,-9.71E-01,-5.77E-01,-2.43E-01,2.50E-01,5.76E-01,9.16E-01,-1.02E+00,-6.63E-01,-1.31E-01,2.49E-01,6.90E-01,1.08E+00,-9.38E-01,-5.26E-01,-1.94E-01,2.83E-01,6.07E-01,9.90E-01,-9.68E-01,-5.92E-01,-2.45E-01,2.47E-01,6.79E-01,1.08E+00,-9.84E-01,-5.75E-01,-2.99E-01,1.77E-01,5.03E-01,9.24E-01 24 | -------------------------------------------------------------------------------- /util/__pycache__/torch_util.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pimed/ProsRegNet/f8addf5bb65a5d38d081fa3aa57ca4d056165314/util/__pycache__/torch_util.cpython-37.pyc -------------------------------------------------------------------------------- /util/__pycache__/train_test_fn.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pimed/ProsRegNet/f8addf5bb65a5d38d081fa3aa57ca4d056165314/util/__pycache__/train_test_fn.cpython-37.pyc -------------------------------------------------------------------------------- /util/torch_util.py: -------------------------------------------------------------------------------- 1 | """ 2 | The following code is adapted from: https://github.com/ignacio-rocco/cnngeometric_pytorch. 3 | """ 4 | 5 | import shutil 6 | import torch 7 | from torch.autograd import Variable 8 | from os import makedirs, remove 9 | from os.path import exists, join, basename, dirname 10 | 11 | class BatchTensorToVars(object): 12 | """Convert tensors in dict batch to vars 13 | """ 14 | def __init__(self, use_cuda=True): 15 | self.use_cuda=use_cuda 16 | 17 | def __call__(self, batch): 18 | batch_var = {} 19 | for key,value in batch.items(): 20 | batch_var[key] = Variable(value,requires_grad=False) 21 | if self.use_cuda: 22 | batch_var[key] = batch_var[key].cuda() 23 | 24 | return batch_var 25 | 26 | def save_checkpoint(state, is_best, file): 27 | model_dir = dirname(file) 28 | model_fn = basename(file) 29 | # make dir if needed (should be non-empty) 30 | if model_dir!='' and not exists(model_dir): 31 | makedirs(model_dir) 32 | torch.save(state, file) 33 | if is_best: 34 | shutil.copyfile(file, join(model_dir,'best_' + model_fn)) 35 | 36 | def str_to_bool(v): 37 | if v.lower() in ('yes', 'true', 't', 'y', '1'): 38 | return True 39 | elif v.lower() in ('no', 'false', 'f', 'n', '0'): 40 | return False 41 | else: 42 | raise argparse.ArgumentTypeError('Boolean value expected.') 43 | -------------------------------------------------------------------------------- /util/train_test_fn.py: -------------------------------------------------------------------------------- 1 | """ 2 | The following code is adapted from: https://github.com/ignacio-rocco/cnngeometric_pytorch. 3 | """ 4 | 5 | from __future__ import print_function, division 6 | import torch 7 | from skimage import io 8 | from collections import OrderedDict 9 | from image.normalization import NormalizeImageDict, normalize_image 10 | from geotnf.transformation import GeometricTnf 11 | import torch 12 | import torch.nn as nn 13 | 14 | def train(epoch,model,loss_fn,optimizer,dataloader,pair_generation_tnf,use_cuda=True,log_interval=50): 15 | model.train() 16 | train_loss = 0 17 | for batch_idx, batch in enumerate(dataloader): 18 | optimizer.zero_grad() 19 | tnf_batch = pair_generation_tnf(batch) 20 | theta = model(tnf_batch) 21 | loss = loss_fn(theta,tnf_batch['theta_GT'],tnf_batch) 22 | loss.backward() 23 | optimizer.step() 24 | train_loss += loss.data.cpu().numpy() 25 | if batch_idx % log_interval == 0: 26 | print('Train Epoch: {} [{}/{} ({:.0f}%)]\t\tLoss: {:.6f}'.format( 27 | epoch, batch_idx , len(dataloader), 28 | 100. * batch_idx / len(dataloader), loss.data)) 29 | train_loss /= len(dataloader) 30 | print('Train set: Average loss: {:.6f}'.format(train_loss)) 31 | return train_loss 32 | 33 | def test(model,loss_fn,dataloader,pair_generation_tnf,use_cuda=True,geometric_model='affine'): 34 | model.eval() 35 | test_loss = 0 36 | dice = 0 37 | for batch_idx, batch in enumerate(dataloader): 38 | tnf_batch = pair_generation_tnf(batch) 39 | theta = model(tnf_batch) 40 | loss = loss_fn(theta,tnf_batch['theta_GT'],tnf_batch) 41 | test_loss += loss.data.cpu().numpy() 42 | 43 | 44 | I = tnf_batch['target_mask'] 45 | geometricTnf = GeometricTnf(geometric_model, 240, 240, use_cuda = use_cuda) 46 | 47 | if geometric_model == 'affine': 48 | theta = theta.view(-1,2,3) 49 | J = geometricTnf(tnf_batch['source_mask'],theta) 50 | 51 | if use_cuda: 52 | I = I.cuda() 53 | J = J.cuda() 54 | 55 | numerator = 2 * torch.sum(torch.sum(torch.sum(I * J,dim=3),dim=2),dim=1) 56 | denominator = torch.sum(torch.sum(torch.sum(I + J,dim=3),dim=2),dim=1) 57 | dice = dice + torch.sum(numerator/(denominator + 0.00001))/I.shape[0] 58 | 59 | test_loss /= len(dataloader) 60 | dice /=len(dataloader) 61 | 62 | print('Test set: Average loss: {:.6f}'.format(test_loss)) 63 | print('Test set: Dice: {:.6f}'.format(dice)) 64 | return test_loss 65 | --------------------------------------------------------------------------------